]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.5.4-201209231138.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.5.4-201209231138.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b4a898f..cd023f2 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 @@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56 +ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60 @@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64 +builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70 +clut_vga16.c
71 +common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78 +config.c
79 config.mak
80 config.mak.autogen
81 +config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85 @@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89 +dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93 +exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97 @@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101 +gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108 +hash
109 +hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113 @@ -145,7 +163,7 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117 -kconfig
118 +kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 @@ -153,7 +171,7 @@ kxgettext
123 lkc_defs.h
124 lex.c
125 lex.*.c
126 -linux
127 +lib1funcs.S
128 logo_*.c
129 logo_*_clut224.c
130 logo_*_mono.c
131 @@ -164,14 +182,15 @@ machtypes.h
132 map
133 map_hugetlb
134 maui_boot.h
135 -media
136 mconf
137 +mdp
138 miboot*
139 mk_elfconfig
140 mkboot
141 mkbugboot
142 mkcpustr
143 mkdep
144 +mkpiggy
145 mkprep
146 mkregtable
147 mktables
148 @@ -188,6 +207,8 @@ oui.c*
149 page-types
150 parse.c
151 parse.h
152 +parse-events*
153 +pasyms.h
154 patches*
155 pca200e.bin
156 pca200e_ecd.bin2
157 @@ -197,6 +218,7 @@ perf-archive
158 piggyback
159 piggy.gzip
160 piggy.S
161 +pmu-*
162 pnmtologo
163 ppc_defs.h*
164 pss_boot.h
165 @@ -206,7 +228,10 @@ r200_reg_safe.h
166 r300_reg_safe.h
167 r420_reg_safe.h
168 r600_reg_safe.h
169 +realmode.lds
170 +realmode.relocs
171 recordmcount
172 +regdb.c
173 relocs
174 rlim_names.h
175 rn50_reg_safe.h
176 @@ -216,8 +241,11 @@ series
177 setup
178 setup.bin
179 setup.elf
180 +size_overflow_hash.h
181 sImage
182 +slabinfo
183 sm_tbl*
184 +sortextable
185 split-include
186 syscalltab.h
187 tables.c
188 @@ -227,6 +255,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192 +user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196 @@ -238,13 +267,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200 +vdsox32.lds
201 +vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208 +vmlinux.bin.bz2
209 vmlinux.lds
210 +vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214 @@ -252,9 +285,11 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218 +utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222 zImage*
223 zconf.hash.c
224 +zconf.lex.c
225 zoffset.h
226 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
227 index a92c5eb..7530459 100644
228 --- a/Documentation/kernel-parameters.txt
229 +++ b/Documentation/kernel-parameters.txt
230 @@ -2051,6 +2051,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
231 the specified number of seconds. This is to be used if
232 your oopses keep scrolling off the screen.
233
234 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
235 + virtualization environments that don't cope well with the
236 + expand down segment used by UDEREF on X86-32 or the frequent
237 + page table updates on X86-64.
238 +
239 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
240 +
241 pcbit= [HW,ISDN]
242
243 pcd. [PARIDE]
244 diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
245 index 13d6166..8c235b6 100644
246 --- a/Documentation/sysctl/fs.txt
247 +++ b/Documentation/sysctl/fs.txt
248 @@ -163,16 +163,22 @@ This value can be used to query and set the core dump mode for setuid
249 or otherwise protected/tainted binaries. The modes are
250
251 0 - (default) - traditional behaviour. Any process which has changed
252 - privilege levels or is execute only will not be dumped
253 + privilege levels or is execute only will not be dumped.
254 1 - (debug) - all processes dump core when possible. The core dump is
255 owned by the current user and no security is applied. This is
256 intended for system debugging situations only. Ptrace is unchecked.
257 + This is insecure as it allows regular users to examine the memory
258 + contents of privileged processes.
259 2 - (suidsafe) - any binary which normally would not be dumped is dumped
260 - readable by root only. This allows the end user to remove
261 - such a dump but not access it directly. For security reasons
262 - core dumps in this mode will not overwrite one another or
263 - other files. This mode is appropriate when administrators are
264 - attempting to debug problems in a normal environment.
265 + anyway, but only if the "core_pattern" kernel sysctl is set to
266 + either a pipe handler or a fully qualified path. (For more details
267 + on this limitation, see CVE-2006-2451.) This mode is appropriate
268 + when administrators are attempting to debug problems in a normal
269 + environment, and either have a core dump pipe handler that knows
270 + to treat privileged core dumps with care, or specific directory
271 + defined for catching core dumps. If a core dump happens without
272 + a pipe handler or fully qualifid path, a message will be emitted
273 + to syslog warning about the lack of a correct setting.
274
275 ==============================================================
276
277 diff --git a/Makefile b/Makefile
278 index 6453ead..f5148e2 100644
279 --- a/Makefile
280 +++ b/Makefile
281 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
282
283 HOSTCC = gcc
284 HOSTCXX = g++
285 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
286 -HOSTCXXFLAGS = -O2
287 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
288 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
289 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
290
291 # Decide whether to build built-in, modular, or both.
292 # Normally, just do built-in.
293 @@ -404,8 +405,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
294 # Rules shared between *config targets and build targets
295
296 # Basic helpers built in scripts/
297 -PHONY += scripts_basic
298 -scripts_basic:
299 +PHONY += scripts_basic gcc-plugins
300 +scripts_basic: gcc-plugins
301 $(Q)$(MAKE) $(build)=scripts/basic
302 $(Q)rm -f .tmp_quiet_recordmcount
303
304 @@ -561,6 +562,60 @@ else
305 KBUILD_CFLAGS += -O2
306 endif
307
308 +ifndef DISABLE_PAX_PLUGINS
309 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
310 +ifneq ($(PLUGINCC),)
311 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
312 +ifndef CONFIG_UML
313 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
314 +endif
315 +endif
316 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
317 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
318 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
319 +endif
320 +ifdef CONFIG_KALLOCSTAT_PLUGIN
321 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
322 +endif
323 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
324 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
325 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
326 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
327 +endif
328 +ifdef CONFIG_CHECKER_PLUGIN
329 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
330 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
331 +endif
332 +endif
333 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
334 +ifdef CONFIG_PAX_SIZE_OVERFLOW
335 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
336 +endif
337 +ifdef CONFIG_PAX_LATENT_ENTROPY
338 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
339 +endif
340 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
341 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
342 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
343 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
344 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
345 +ifeq ($(KBUILD_EXTMOD),)
346 +gcc-plugins:
347 + $(Q)$(MAKE) $(build)=tools/gcc
348 +else
349 +gcc-plugins: ;
350 +endif
351 +else
352 +gcc-plugins:
353 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
354 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
355 +else
356 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
357 +endif
358 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
359 +endif
360 +endif
361 +
362 include $(srctree)/arch/$(SRCARCH)/Makefile
363
364 ifdef CONFIG_READABLE_ASM
365 @@ -715,7 +770,7 @@ export mod_strip_cmd
366
367
368 ifeq ($(KBUILD_EXTMOD),)
369 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
370 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
371
372 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
373 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
374 @@ -762,6 +817,8 @@ endif
375
376 # The actual objects are generated when descending,
377 # make sure no implicit rule kicks in
378 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
379 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
380 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
381
382 # Handle descending into subdirectories listed in $(vmlinux-dirs)
383 @@ -771,7 +828,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
384 # Error messages still appears in the original language
385
386 PHONY += $(vmlinux-dirs)
387 -$(vmlinux-dirs): prepare scripts
388 +$(vmlinux-dirs): gcc-plugins prepare scripts
389 $(Q)$(MAKE) $(build)=$@
390
391 # Store (new) KERNELRELASE string in include/config/kernel.release
392 @@ -815,6 +872,7 @@ prepare0: archprepare FORCE
393 $(Q)$(MAKE) $(build)=.
394
395 # All the preparing..
396 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
397 prepare: prepare0
398
399 # Generate some files
400 @@ -922,6 +980,8 @@ all: modules
401 # using awk while concatenating to the final file.
402
403 PHONY += modules
404 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
405 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
406 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
407 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
408 @$(kecho) ' Building modules, stage 2.';
409 @@ -937,7 +997,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
410
411 # Target to prepare building external modules
412 PHONY += modules_prepare
413 -modules_prepare: prepare scripts
414 +modules_prepare: gcc-plugins prepare scripts
415
416 # Target to install modules
417 PHONY += modules_install
418 @@ -994,7 +1054,7 @@ CLEAN_DIRS += $(MODVERDIR)
419 MRPROPER_DIRS += include/config usr/include include/generated \
420 arch/*/include/generated
421 MRPROPER_FILES += .config .config.old .version .old_version \
422 - include/linux/version.h \
423 + include/linux/version.h tools/gcc/size_overflow_hash.h\
424 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
425
426 # clean - Delete most, but leave enough to build external modules
427 @@ -1032,6 +1092,7 @@ distclean: mrproper
428 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
429 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
430 -o -name '.*.rej' \
431 + -o -name '.*.rej' -o -name '*.so' \
432 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
433 -type f -print | xargs rm -f
434
435 @@ -1192,6 +1253,8 @@ PHONY += $(module-dirs) modules
436 $(module-dirs): crmodverdir $(objtree)/Module.symvers
437 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
438
439 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
440 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
441 modules: $(module-dirs)
442 @$(kecho) ' Building modules, stage 2.';
443 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
444 @@ -1326,17 +1389,21 @@ else
445 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
446 endif
447
448 -%.s: %.c prepare scripts FORCE
449 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
450 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
451 +%.s: %.c gcc-plugins prepare scripts FORCE
452 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
453 %.i: %.c prepare scripts FORCE
454 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
455 -%.o: %.c prepare scripts FORCE
456 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
457 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
458 +%.o: %.c gcc-plugins prepare scripts FORCE
459 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
460 %.lst: %.c prepare scripts FORCE
461 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
462 -%.s: %.S prepare scripts FORCE
463 +%.s: %.S gcc-plugins prepare scripts FORCE
464 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
465 -%.o: %.S prepare scripts FORCE
466 +%.o: %.S gcc-plugins prepare scripts FORCE
467 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
468 %.symtypes: %.c prepare scripts FORCE
469 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
470 @@ -1346,11 +1413,15 @@ endif
471 $(cmd_crmodverdir)
472 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
473 $(build)=$(build-dir)
474 -%/: prepare scripts FORCE
475 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
476 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
477 +%/: gcc-plugins prepare scripts FORCE
478 $(cmd_crmodverdir)
479 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
480 $(build)=$(build-dir)
481 -%.ko: prepare scripts FORCE
482 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
483 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
484 +%.ko: gcc-plugins prepare scripts FORCE
485 $(cmd_crmodverdir)
486 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
487 $(build)=$(build-dir) $(@:.ko=.o)
488 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
489 index 3bb7ffe..347a54c 100644
490 --- a/arch/alpha/include/asm/atomic.h
491 +++ b/arch/alpha/include/asm/atomic.h
492 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
493 #define atomic_dec(v) atomic_sub(1,(v))
494 #define atomic64_dec(v) atomic64_sub(1,(v))
495
496 +#define atomic64_read_unchecked(v) atomic64_read(v)
497 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
498 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
499 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
500 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
501 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
502 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
503 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
504 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
505 +
506 #define smp_mb__before_atomic_dec() smp_mb()
507 #define smp_mb__after_atomic_dec() smp_mb()
508 #define smp_mb__before_atomic_inc() smp_mb()
509 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
510 index ad368a9..fbe0f25 100644
511 --- a/arch/alpha/include/asm/cache.h
512 +++ b/arch/alpha/include/asm/cache.h
513 @@ -4,19 +4,19 @@
514 #ifndef __ARCH_ALPHA_CACHE_H
515 #define __ARCH_ALPHA_CACHE_H
516
517 +#include <linux/const.h>
518
519 /* Bytes per L1 (data) cache line. */
520 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
521 -# define L1_CACHE_BYTES 64
522 # define L1_CACHE_SHIFT 6
523 #else
524 /* Both EV4 and EV5 are write-through, read-allocate,
525 direct-mapped, physical.
526 */
527 -# define L1_CACHE_BYTES 32
528 # define L1_CACHE_SHIFT 5
529 #endif
530
531 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
532 #define SMP_CACHE_BYTES L1_CACHE_BYTES
533
534 #endif
535 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
536 index 968d999..d36b2df 100644
537 --- a/arch/alpha/include/asm/elf.h
538 +++ b/arch/alpha/include/asm/elf.h
539 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
540
541 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
542
543 +#ifdef CONFIG_PAX_ASLR
544 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
545 +
546 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
547 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
548 +#endif
549 +
550 /* $0 is set by ld.so to a pointer to a function which might be
551 registered using atexit. This provides a mean for the dynamic
552 linker to call DT_FINI functions for shared libraries that have
553 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
554 index bc2a0da..8ad11ee 100644
555 --- a/arch/alpha/include/asm/pgalloc.h
556 +++ b/arch/alpha/include/asm/pgalloc.h
557 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
558 pgd_set(pgd, pmd);
559 }
560
561 +static inline void
562 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
563 +{
564 + pgd_populate(mm, pgd, pmd);
565 +}
566 +
567 extern pgd_t *pgd_alloc(struct mm_struct *mm);
568
569 static inline void
570 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
571 index 81a4342..348b927 100644
572 --- a/arch/alpha/include/asm/pgtable.h
573 +++ b/arch/alpha/include/asm/pgtable.h
574 @@ -102,6 +102,17 @@ struct vm_area_struct;
575 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
576 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
577 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
578 +
579 +#ifdef CONFIG_PAX_PAGEEXEC
580 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
581 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
582 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
583 +#else
584 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
585 +# define PAGE_COPY_NOEXEC PAGE_COPY
586 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
587 +#endif
588 +
589 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
590
591 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
592 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
593 index 2fd00b7..cfd5069 100644
594 --- a/arch/alpha/kernel/module.c
595 +++ b/arch/alpha/kernel/module.c
596 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
597
598 /* The small sections were sorted to the end of the segment.
599 The following should definitely cover them. */
600 - gp = (u64)me->module_core + me->core_size - 0x8000;
601 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
602 got = sechdrs[me->arch.gotsecindex].sh_addr;
603
604 for (i = 0; i < n; i++) {
605 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
606 index 98a1036..fb54ccf 100644
607 --- a/arch/alpha/kernel/osf_sys.c
608 +++ b/arch/alpha/kernel/osf_sys.c
609 @@ -1312,7 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
610 /* At this point: (!vma || addr < vma->vm_end). */
611 if (limit - len < addr)
612 return -ENOMEM;
613 - if (!vma || addr + len <= vma->vm_start)
614 + if (check_heap_stack_gap(vma, addr, len))
615 return addr;
616 addr = vma->vm_end;
617 vma = vma->vm_next;
618 @@ -1348,6 +1348,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
619 merely specific addresses, but regions of memory -- perhaps
620 this feature should be incorporated into all ports? */
621
622 +#ifdef CONFIG_PAX_RANDMMAP
623 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
624 +#endif
625 +
626 if (addr) {
627 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
628 if (addr != (unsigned long) -ENOMEM)
629 @@ -1355,8 +1359,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
630 }
631
632 /* Next, try allocating at TASK_UNMAPPED_BASE. */
633 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
634 - len, limit);
635 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
636 +
637 if (addr != (unsigned long) -ENOMEM)
638 return addr;
639
640 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
641 index 5eecab1..609abc0 100644
642 --- a/arch/alpha/mm/fault.c
643 +++ b/arch/alpha/mm/fault.c
644 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
645 __reload_thread(pcb);
646 }
647
648 +#ifdef CONFIG_PAX_PAGEEXEC
649 +/*
650 + * PaX: decide what to do with offenders (regs->pc = fault address)
651 + *
652 + * returns 1 when task should be killed
653 + * 2 when patched PLT trampoline was detected
654 + * 3 when unpatched PLT trampoline was detected
655 + */
656 +static int pax_handle_fetch_fault(struct pt_regs *regs)
657 +{
658 +
659 +#ifdef CONFIG_PAX_EMUPLT
660 + int err;
661 +
662 + do { /* PaX: patched PLT emulation #1 */
663 + unsigned int ldah, ldq, jmp;
664 +
665 + err = get_user(ldah, (unsigned int *)regs->pc);
666 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
667 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
668 +
669 + if (err)
670 + break;
671 +
672 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
673 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
674 + jmp == 0x6BFB0000U)
675 + {
676 + unsigned long r27, addr;
677 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
678 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
679 +
680 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
681 + err = get_user(r27, (unsigned long *)addr);
682 + if (err)
683 + break;
684 +
685 + regs->r27 = r27;
686 + regs->pc = r27;
687 + return 2;
688 + }
689 + } while (0);
690 +
691 + do { /* PaX: patched PLT emulation #2 */
692 + unsigned int ldah, lda, br;
693 +
694 + err = get_user(ldah, (unsigned int *)regs->pc);
695 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
696 + err |= get_user(br, (unsigned int *)(regs->pc+8));
697 +
698 + if (err)
699 + break;
700 +
701 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
702 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
703 + (br & 0xFFE00000U) == 0xC3E00000U)
704 + {
705 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
706 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
707 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
708 +
709 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
710 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
711 + return 2;
712 + }
713 + } while (0);
714 +
715 + do { /* PaX: unpatched PLT emulation */
716 + unsigned int br;
717 +
718 + err = get_user(br, (unsigned int *)regs->pc);
719 +
720 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
721 + unsigned int br2, ldq, nop, jmp;
722 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
723 +
724 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
725 + err = get_user(br2, (unsigned int *)addr);
726 + err |= get_user(ldq, (unsigned int *)(addr+4));
727 + err |= get_user(nop, (unsigned int *)(addr+8));
728 + err |= get_user(jmp, (unsigned int *)(addr+12));
729 + err |= get_user(resolver, (unsigned long *)(addr+16));
730 +
731 + if (err)
732 + break;
733 +
734 + if (br2 == 0xC3600000U &&
735 + ldq == 0xA77B000CU &&
736 + nop == 0x47FF041FU &&
737 + jmp == 0x6B7B0000U)
738 + {
739 + regs->r28 = regs->pc+4;
740 + regs->r27 = addr+16;
741 + regs->pc = resolver;
742 + return 3;
743 + }
744 + }
745 + } while (0);
746 +#endif
747 +
748 + return 1;
749 +}
750 +
751 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
752 +{
753 + unsigned long i;
754 +
755 + printk(KERN_ERR "PAX: bytes at PC: ");
756 + for (i = 0; i < 5; i++) {
757 + unsigned int c;
758 + if (get_user(c, (unsigned int *)pc+i))
759 + printk(KERN_CONT "???????? ");
760 + else
761 + printk(KERN_CONT "%08x ", c);
762 + }
763 + printk("\n");
764 +}
765 +#endif
766
767 /*
768 * This routine handles page faults. It determines the address,
769 @@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
770 good_area:
771 si_code = SEGV_ACCERR;
772 if (cause < 0) {
773 - if (!(vma->vm_flags & VM_EXEC))
774 + if (!(vma->vm_flags & VM_EXEC)) {
775 +
776 +#ifdef CONFIG_PAX_PAGEEXEC
777 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
778 + goto bad_area;
779 +
780 + up_read(&mm->mmap_sem);
781 + switch (pax_handle_fetch_fault(regs)) {
782 +
783 +#ifdef CONFIG_PAX_EMUPLT
784 + case 2:
785 + case 3:
786 + return;
787 +#endif
788 +
789 + }
790 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
791 + do_group_exit(SIGKILL);
792 +#else
793 goto bad_area;
794 +#endif
795 +
796 + }
797 } else if (!cause) {
798 /* Allow reads even for write-only mappings */
799 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
800 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
801 index c79f61f..9ac0642 100644
802 --- a/arch/arm/include/asm/atomic.h
803 +++ b/arch/arm/include/asm/atomic.h
804 @@ -17,17 +17,35 @@
805 #include <asm/barrier.h>
806 #include <asm/cmpxchg.h>
807
808 +#ifdef CONFIG_GENERIC_ATOMIC64
809 +#include <asm-generic/atomic64.h>
810 +#endif
811 +
812 #define ATOMIC_INIT(i) { (i) }
813
814 #ifdef __KERNEL__
815
816 +#define _ASM_EXTABLE(from, to) \
817 +" .pushsection __ex_table,\"a\"\n"\
818 +" .align 3\n" \
819 +" .long " #from ", " #to"\n" \
820 +" .popsection"
821 +
822 /*
823 * On ARM, ordinary assignment (str instruction) doesn't clear the local
824 * strex/ldrex monitor on some implementations. The reason we can use it for
825 * atomic_set() is the clrex or dummy strex done on every exception return.
826 */
827 #define atomic_read(v) (*(volatile int *)&(v)->counter)
828 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
829 +{
830 + return v->counter;
831 +}
832 #define atomic_set(v,i) (((v)->counter) = (i))
833 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
834 +{
835 + v->counter = i;
836 +}
837
838 #if __LINUX_ARM_ARCH__ >= 6
839
840 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
841 int result;
842
843 __asm__ __volatile__("@ atomic_add\n"
844 +"1: ldrex %1, [%3]\n"
845 +" adds %0, %1, %4\n"
846 +
847 +#ifdef CONFIG_PAX_REFCOUNT
848 +" bvc 3f\n"
849 +"2: bkpt 0xf103\n"
850 +"3:\n"
851 +#endif
852 +
853 +" strex %1, %0, [%3]\n"
854 +" teq %1, #0\n"
855 +" bne 1b"
856 +
857 +#ifdef CONFIG_PAX_REFCOUNT
858 +"\n4:\n"
859 + _ASM_EXTABLE(2b, 4b)
860 +#endif
861 +
862 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
863 + : "r" (&v->counter), "Ir" (i)
864 + : "cc");
865 +}
866 +
867 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
868 +{
869 + unsigned long tmp;
870 + int result;
871 +
872 + __asm__ __volatile__("@ atomic_add_unchecked\n"
873 "1: ldrex %0, [%3]\n"
874 " add %0, %0, %4\n"
875 " strex %1, %0, [%3]\n"
876 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
877 smp_mb();
878
879 __asm__ __volatile__("@ atomic_add_return\n"
880 +"1: ldrex %1, [%3]\n"
881 +" adds %0, %1, %4\n"
882 +
883 +#ifdef CONFIG_PAX_REFCOUNT
884 +" bvc 3f\n"
885 +" mov %0, %1\n"
886 +"2: bkpt 0xf103\n"
887 +"3:\n"
888 +#endif
889 +
890 +" strex %1, %0, [%3]\n"
891 +" teq %1, #0\n"
892 +" bne 1b"
893 +
894 +#ifdef CONFIG_PAX_REFCOUNT
895 +"\n4:\n"
896 + _ASM_EXTABLE(2b, 4b)
897 +#endif
898 +
899 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
900 + : "r" (&v->counter), "Ir" (i)
901 + : "cc");
902 +
903 + smp_mb();
904 +
905 + return result;
906 +}
907 +
908 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
909 +{
910 + unsigned long tmp;
911 + int result;
912 +
913 + smp_mb();
914 +
915 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
916 "1: ldrex %0, [%3]\n"
917 " add %0, %0, %4\n"
918 " strex %1, %0, [%3]\n"
919 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
920 int result;
921
922 __asm__ __volatile__("@ atomic_sub\n"
923 +"1: ldrex %1, [%3]\n"
924 +" subs %0, %1, %4\n"
925 +
926 +#ifdef CONFIG_PAX_REFCOUNT
927 +" bvc 3f\n"
928 +"2: bkpt 0xf103\n"
929 +"3:\n"
930 +#endif
931 +
932 +" strex %1, %0, [%3]\n"
933 +" teq %1, #0\n"
934 +" bne 1b"
935 +
936 +#ifdef CONFIG_PAX_REFCOUNT
937 +"\n4:\n"
938 + _ASM_EXTABLE(2b, 4b)
939 +#endif
940 +
941 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
942 + : "r" (&v->counter), "Ir" (i)
943 + : "cc");
944 +}
945 +
946 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
947 +{
948 + unsigned long tmp;
949 + int result;
950 +
951 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
952 "1: ldrex %0, [%3]\n"
953 " sub %0, %0, %4\n"
954 " strex %1, %0, [%3]\n"
955 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
956 smp_mb();
957
958 __asm__ __volatile__("@ atomic_sub_return\n"
959 -"1: ldrex %0, [%3]\n"
960 -" sub %0, %0, %4\n"
961 +"1: ldrex %1, [%3]\n"
962 +" subs %0, %1, %4\n"
963 +
964 +#ifdef CONFIG_PAX_REFCOUNT
965 +" bvc 3f\n"
966 +" mov %0, %1\n"
967 +"2: bkpt 0xf103\n"
968 +"3:\n"
969 +#endif
970 +
971 " strex %1, %0, [%3]\n"
972 " teq %1, #0\n"
973 " bne 1b"
974 +
975 +#ifdef CONFIG_PAX_REFCOUNT
976 +"\n4:\n"
977 + _ASM_EXTABLE(2b, 4b)
978 +#endif
979 +
980 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
981 : "r" (&v->counter), "Ir" (i)
982 : "cc");
983 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
984 return oldval;
985 }
986
987 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
988 +{
989 + unsigned long oldval, res;
990 +
991 + smp_mb();
992 +
993 + do {
994 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
995 + "ldrex %1, [%3]\n"
996 + "mov %0, #0\n"
997 + "teq %1, %4\n"
998 + "strexeq %0, %5, [%3]\n"
999 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1000 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1001 + : "cc");
1002 + } while (res);
1003 +
1004 + smp_mb();
1005 +
1006 + return oldval;
1007 +}
1008 +
1009 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1010 {
1011 unsigned long tmp, tmp2;
1012 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1013
1014 return val;
1015 }
1016 +
1017 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1018 +{
1019 + return atomic_add_return(i, v);
1020 +}
1021 +
1022 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1023 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1024 +{
1025 + (void) atomic_add_return(i, v);
1026 +}
1027
1028 static inline int atomic_sub_return(int i, atomic_t *v)
1029 {
1030 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1031 return val;
1032 }
1033 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1034 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1035 +{
1036 + (void) atomic_sub_return(i, v);
1037 +}
1038
1039 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1040 {
1041 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1042 return ret;
1043 }
1044
1045 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1046 +{
1047 + return atomic_cmpxchg(v, old, new);
1048 +}
1049 +
1050 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1051 {
1052 unsigned long flags;
1053 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 #endif /* __LINUX_ARM_ARCH__ */
1055
1056 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1057 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1058 +{
1059 + return xchg(&v->counter, new);
1060 +}
1061
1062 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1063 {
1064 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1065 }
1066
1067 #define atomic_inc(v) atomic_add(1, v)
1068 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1069 +{
1070 + atomic_add_unchecked(1, v);
1071 +}
1072 #define atomic_dec(v) atomic_sub(1, v)
1073 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1074 +{
1075 + atomic_sub_unchecked(1, v);
1076 +}
1077
1078 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1079 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1080 +{
1081 + return atomic_add_return_unchecked(1, v) == 0;
1082 +}
1083 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1084 #define atomic_inc_return(v) (atomic_add_return(1, v))
1085 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1086 +{
1087 + return atomic_add_return_unchecked(1, v);
1088 +}
1089 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1090 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1091
1092 @@ -241,6 +428,14 @@ typedef struct {
1093 u64 __aligned(8) counter;
1094 } atomic64_t;
1095
1096 +#ifdef CONFIG_PAX_REFCOUNT
1097 +typedef struct {
1098 + u64 __aligned(8) counter;
1099 +} atomic64_unchecked_t;
1100 +#else
1101 +typedef atomic64_t atomic64_unchecked_t;
1102 +#endif
1103 +
1104 #define ATOMIC64_INIT(i) { (i) }
1105
1106 static inline u64 atomic64_read(const atomic64_t *v)
1107 @@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1108 return result;
1109 }
1110
1111 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1112 +{
1113 + u64 result;
1114 +
1115 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1116 +" ldrexd %0, %H0, [%1]"
1117 + : "=&r" (result)
1118 + : "r" (&v->counter), "Qo" (v->counter)
1119 + );
1120 +
1121 + return result;
1122 +}
1123 +
1124 static inline void atomic64_set(atomic64_t *v, u64 i)
1125 {
1126 u64 tmp;
1127 @@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1128 : "cc");
1129 }
1130
1131 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1132 +{
1133 + u64 tmp;
1134 +
1135 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1136 +"1: ldrexd %0, %H0, [%2]\n"
1137 +" strexd %0, %3, %H3, [%2]\n"
1138 +" teq %0, #0\n"
1139 +" bne 1b"
1140 + : "=&r" (tmp), "=Qo" (v->counter)
1141 + : "r" (&v->counter), "r" (i)
1142 + : "cc");
1143 +}
1144 +
1145 static inline void atomic64_add(u64 i, atomic64_t *v)
1146 {
1147 u64 result;
1148 @@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1149 __asm__ __volatile__("@ atomic64_add\n"
1150 "1: ldrexd %0, %H0, [%3]\n"
1151 " adds %0, %0, %4\n"
1152 +" adcs %H0, %H0, %H4\n"
1153 +
1154 +#ifdef CONFIG_PAX_REFCOUNT
1155 +" bvc 3f\n"
1156 +"2: bkpt 0xf103\n"
1157 +"3:\n"
1158 +#endif
1159 +
1160 +" strexd %1, %0, %H0, [%3]\n"
1161 +" teq %1, #0\n"
1162 +" bne 1b"
1163 +
1164 +#ifdef CONFIG_PAX_REFCOUNT
1165 +"\n4:\n"
1166 + _ASM_EXTABLE(2b, 4b)
1167 +#endif
1168 +
1169 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1170 + : "r" (&v->counter), "r" (i)
1171 + : "cc");
1172 +}
1173 +
1174 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1175 +{
1176 + u64 result;
1177 + unsigned long tmp;
1178 +
1179 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1180 +"1: ldrexd %0, %H0, [%3]\n"
1181 +" adds %0, %0, %4\n"
1182 " adc %H0, %H0, %H4\n"
1183 " strexd %1, %0, %H0, [%3]\n"
1184 " teq %1, #0\n"
1185 @@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1186
1187 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1188 {
1189 - u64 result;
1190 - unsigned long tmp;
1191 + u64 result, tmp;
1192
1193 smp_mb();
1194
1195 __asm__ __volatile__("@ atomic64_add_return\n"
1196 +"1: ldrexd %1, %H1, [%3]\n"
1197 +" adds %0, %1, %4\n"
1198 +" adcs %H0, %H1, %H4\n"
1199 +
1200 +#ifdef CONFIG_PAX_REFCOUNT
1201 +" bvc 3f\n"
1202 +" mov %0, %1\n"
1203 +" mov %H0, %H1\n"
1204 +"2: bkpt 0xf103\n"
1205 +"3:\n"
1206 +#endif
1207 +
1208 +" strexd %1, %0, %H0, [%3]\n"
1209 +" teq %1, #0\n"
1210 +" bne 1b"
1211 +
1212 +#ifdef CONFIG_PAX_REFCOUNT
1213 +"\n4:\n"
1214 + _ASM_EXTABLE(2b, 4b)
1215 +#endif
1216 +
1217 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1218 + : "r" (&v->counter), "r" (i)
1219 + : "cc");
1220 +
1221 + smp_mb();
1222 +
1223 + return result;
1224 +}
1225 +
1226 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1227 +{
1228 + u64 result;
1229 + unsigned long tmp;
1230 +
1231 + smp_mb();
1232 +
1233 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1234 "1: ldrexd %0, %H0, [%3]\n"
1235 " adds %0, %0, %4\n"
1236 " adc %H0, %H0, %H4\n"
1237 @@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1238 __asm__ __volatile__("@ atomic64_sub\n"
1239 "1: ldrexd %0, %H0, [%3]\n"
1240 " subs %0, %0, %4\n"
1241 +" sbcs %H0, %H0, %H4\n"
1242 +
1243 +#ifdef CONFIG_PAX_REFCOUNT
1244 +" bvc 3f\n"
1245 +"2: bkpt 0xf103\n"
1246 +"3:\n"
1247 +#endif
1248 +
1249 +" strexd %1, %0, %H0, [%3]\n"
1250 +" teq %1, #0\n"
1251 +" bne 1b"
1252 +
1253 +#ifdef CONFIG_PAX_REFCOUNT
1254 +"\n4:\n"
1255 + _ASM_EXTABLE(2b, 4b)
1256 +#endif
1257 +
1258 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1259 + : "r" (&v->counter), "r" (i)
1260 + : "cc");
1261 +}
1262 +
1263 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1264 +{
1265 + u64 result;
1266 + unsigned long tmp;
1267 +
1268 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1269 +"1: ldrexd %0, %H0, [%3]\n"
1270 +" subs %0, %0, %4\n"
1271 " sbc %H0, %H0, %H4\n"
1272 " strexd %1, %0, %H0, [%3]\n"
1273 " teq %1, #0\n"
1274 @@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1275
1276 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1277 {
1278 - u64 result;
1279 - unsigned long tmp;
1280 + u64 result, tmp;
1281
1282 smp_mb();
1283
1284 __asm__ __volatile__("@ atomic64_sub_return\n"
1285 -"1: ldrexd %0, %H0, [%3]\n"
1286 -" subs %0, %0, %4\n"
1287 -" sbc %H0, %H0, %H4\n"
1288 +"1: ldrexd %1, %H1, [%3]\n"
1289 +" subs %0, %1, %4\n"
1290 +" sbcs %H0, %H1, %H4\n"
1291 +
1292 +#ifdef CONFIG_PAX_REFCOUNT
1293 +" bvc 3f\n"
1294 +" mov %0, %1\n"
1295 +" mov %H0, %H1\n"
1296 +"2: bkpt 0xf103\n"
1297 +"3:\n"
1298 +#endif
1299 +
1300 " strexd %1, %0, %H0, [%3]\n"
1301 " teq %1, #0\n"
1302 " bne 1b"
1303 +
1304 +#ifdef CONFIG_PAX_REFCOUNT
1305 +"\n4:\n"
1306 + _ASM_EXTABLE(2b, 4b)
1307 +#endif
1308 +
1309 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1310 : "r" (&v->counter), "r" (i)
1311 : "cc");
1312 @@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1313 return oldval;
1314 }
1315
1316 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1317 +{
1318 + u64 oldval;
1319 + unsigned long res;
1320 +
1321 + smp_mb();
1322 +
1323 + do {
1324 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1325 + "ldrexd %1, %H1, [%3]\n"
1326 + "mov %0, #0\n"
1327 + "teq %1, %4\n"
1328 + "teqeq %H1, %H4\n"
1329 + "strexdeq %0, %5, %H5, [%3]"
1330 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1331 + : "r" (&ptr->counter), "r" (old), "r" (new)
1332 + : "cc");
1333 + } while (res);
1334 +
1335 + smp_mb();
1336 +
1337 + return oldval;
1338 +}
1339 +
1340 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1341 {
1342 u64 result;
1343 @@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1344
1345 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1346 {
1347 - u64 result;
1348 - unsigned long tmp;
1349 + u64 result, tmp;
1350
1351 smp_mb();
1352
1353 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1354 -"1: ldrexd %0, %H0, [%3]\n"
1355 -" subs %0, %0, #1\n"
1356 -" sbc %H0, %H0, #0\n"
1357 +"1: ldrexd %1, %H1, [%3]\n"
1358 +" subs %0, %1, #1\n"
1359 +" sbcs %H0, %H1, #0\n"
1360 +
1361 +#ifdef CONFIG_PAX_REFCOUNT
1362 +" bvc 3f\n"
1363 +" mov %0, %1\n"
1364 +" mov %H0, %H1\n"
1365 +"2: bkpt 0xf103\n"
1366 +"3:\n"
1367 +#endif
1368 +
1369 " teq %H0, #0\n"
1370 -" bmi 2f\n"
1371 +" bmi 4f\n"
1372 " strexd %1, %0, %H0, [%3]\n"
1373 " teq %1, #0\n"
1374 " bne 1b\n"
1375 -"2:"
1376 +"4:\n"
1377 +
1378 +#ifdef CONFIG_PAX_REFCOUNT
1379 + _ASM_EXTABLE(2b, 4b)
1380 +#endif
1381 +
1382 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1383 : "r" (&v->counter)
1384 : "cc");
1385 @@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1386 " teq %0, %5\n"
1387 " teqeq %H0, %H5\n"
1388 " moveq %1, #0\n"
1389 -" beq 2f\n"
1390 +" beq 4f\n"
1391 " adds %0, %0, %6\n"
1392 -" adc %H0, %H0, %H6\n"
1393 +" adcs %H0, %H0, %H6\n"
1394 +
1395 +#ifdef CONFIG_PAX_REFCOUNT
1396 +" bvc 3f\n"
1397 +"2: bkpt 0xf103\n"
1398 +"3:\n"
1399 +#endif
1400 +
1401 " strexd %2, %0, %H0, [%4]\n"
1402 " teq %2, #0\n"
1403 " bne 1b\n"
1404 -"2:"
1405 +"4:\n"
1406 +
1407 +#ifdef CONFIG_PAX_REFCOUNT
1408 + _ASM_EXTABLE(2b, 4b)
1409 +#endif
1410 +
1411 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1412 : "r" (&v->counter), "r" (u), "r" (a)
1413 : "cc");
1414 @@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1415
1416 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1417 #define atomic64_inc(v) atomic64_add(1LL, (v))
1418 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1419 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1420 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1421 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1422 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1423 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1424 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1425 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1426 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1427 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1428 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1429 index 75fe66b..2255c86 100644
1430 --- a/arch/arm/include/asm/cache.h
1431 +++ b/arch/arm/include/asm/cache.h
1432 @@ -4,8 +4,10 @@
1433 #ifndef __ASMARM_CACHE_H
1434 #define __ASMARM_CACHE_H
1435
1436 +#include <linux/const.h>
1437 +
1438 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1439 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1440 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1441
1442 /*
1443 * Memory returned by kmalloc() may be used for DMA, so we must make
1444 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1445 index e4448e1..7bc86b7 100644
1446 --- a/arch/arm/include/asm/cacheflush.h
1447 +++ b/arch/arm/include/asm/cacheflush.h
1448 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1449 void (*dma_unmap_area)(const void *, size_t, int);
1450
1451 void (*dma_flush_range)(const void *, const void *);
1452 -};
1453 +} __no_const;
1454
1455 /*
1456 * Select the calling method
1457 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1458 index 7eb18c1..e38b6d2 100644
1459 --- a/arch/arm/include/asm/cmpxchg.h
1460 +++ b/arch/arm/include/asm/cmpxchg.h
1461 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1462
1463 #define xchg(ptr,x) \
1464 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1465 +#define xchg_unchecked(ptr,x) \
1466 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1467
1468 #include <asm-generic/cmpxchg-local.h>
1469
1470 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1471 index 38050b1..9d90e8b 100644
1472 --- a/arch/arm/include/asm/elf.h
1473 +++ b/arch/arm/include/asm/elf.h
1474 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1475 the loader. We need to make sure that it is out of the way of the program
1476 that it will "exec", and that there is sufficient room for the brk. */
1477
1478 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1479 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1480 +
1481 +#ifdef CONFIG_PAX_ASLR
1482 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1483 +
1484 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1485 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1486 +#endif
1487
1488 /* When the program starts, a1 contains a pointer to a function to be
1489 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1490 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1491 extern void elf_set_personality(const struct elf32_hdr *);
1492 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1493
1494 -struct mm_struct;
1495 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1496 -#define arch_randomize_brk arch_randomize_brk
1497 -
1498 #endif
1499 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1500 index e51b1e8..32a3113 100644
1501 --- a/arch/arm/include/asm/kmap_types.h
1502 +++ b/arch/arm/include/asm/kmap_types.h
1503 @@ -21,6 +21,7 @@ enum km_type {
1504 KM_L1_CACHE,
1505 KM_L2_CACHE,
1506 KM_KDB,
1507 + KM_CLEARPAGE,
1508 KM_TYPE_NR
1509 };
1510
1511 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1512 index 53426c6..c7baff3 100644
1513 --- a/arch/arm/include/asm/outercache.h
1514 +++ b/arch/arm/include/asm/outercache.h
1515 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1516 #endif
1517 void (*set_debug)(unsigned long);
1518 void (*resume)(void);
1519 -};
1520 +} __no_const;
1521
1522 #ifdef CONFIG_OUTER_CACHE
1523
1524 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1525 index ecf9019..b71d9a1 100644
1526 --- a/arch/arm/include/asm/page.h
1527 +++ b/arch/arm/include/asm/page.h
1528 @@ -114,7 +114,7 @@ struct cpu_user_fns {
1529 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1530 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1531 unsigned long vaddr, struct vm_area_struct *vma);
1532 -};
1533 +} __no_const;
1534
1535 #ifdef MULTI_USER
1536 extern struct cpu_user_fns cpu_user;
1537 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1538 index 943504f..bf8d667 100644
1539 --- a/arch/arm/include/asm/pgalloc.h
1540 +++ b/arch/arm/include/asm/pgalloc.h
1541 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1542 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1543 }
1544
1545 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1546 +{
1547 + pud_populate(mm, pud, pmd);
1548 +}
1549 +
1550 #else /* !CONFIG_ARM_LPAE */
1551
1552 /*
1553 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1554 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1555 #define pmd_free(mm, pmd) do { } while (0)
1556 #define pud_populate(mm,pmd,pte) BUG()
1557 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1558
1559 #endif /* CONFIG_ARM_LPAE */
1560
1561 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1562 index af7b0bd..6750a8c 100644
1563 --- a/arch/arm/include/asm/thread_info.h
1564 +++ b/arch/arm/include/asm/thread_info.h
1565 @@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1566 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1567 #define TIF_SYSCALL_TRACE 8
1568 #define TIF_SYSCALL_AUDIT 9
1569 +
1570 +/* within 8 bits of TIF_SYSCALL_TRACE
1571 + to meet flexible second operand requirements
1572 +*/
1573 +#define TIF_GRSEC_SETXID 10
1574 +
1575 #define TIF_POLLING_NRFLAG 16
1576 #define TIF_USING_IWMMXT 17
1577 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1578 @@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1579 #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
1580 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1581 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1582 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1583
1584 /* Checks for any syscall work in entry-common.S */
1585 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1586 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1587 + _TIF_GRSEC_SETXID)
1588
1589 /*
1590 * Change these and you break ASM code in entry-common.S
1591 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1592 index 71f6536..602f279 100644
1593 --- a/arch/arm/include/asm/uaccess.h
1594 +++ b/arch/arm/include/asm/uaccess.h
1595 @@ -22,6 +22,8 @@
1596 #define VERIFY_READ 0
1597 #define VERIFY_WRITE 1
1598
1599 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1600 +
1601 /*
1602 * The exception table consists of pairs of addresses: the first is the
1603 * address of an instruction that is allowed to fault, and the second is
1604 @@ -387,8 +389,23 @@ do { \
1605
1606
1607 #ifdef CONFIG_MMU
1608 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1609 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1610 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1611 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1612 +
1613 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1614 +{
1615 + if (!__builtin_constant_p(n))
1616 + check_object_size(to, n, false);
1617 + return ___copy_from_user(to, from, n);
1618 +}
1619 +
1620 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1621 +{
1622 + if (!__builtin_constant_p(n))
1623 + check_object_size(from, n, true);
1624 + return ___copy_to_user(to, from, n);
1625 +}
1626 +
1627 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1628 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1629 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1630 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1631
1632 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1633 {
1634 + if ((long)n < 0)
1635 + return n;
1636 +
1637 if (access_ok(VERIFY_READ, from, n))
1638 n = __copy_from_user(to, from, n);
1639 else /* security hole - plug it */
1640 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1641
1642 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1643 {
1644 + if ((long)n < 0)
1645 + return n;
1646 +
1647 if (access_ok(VERIFY_WRITE, to, n))
1648 n = __copy_to_user(to, from, n);
1649 return n;
1650 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1651 index b57c75e..ed2d6b2 100644
1652 --- a/arch/arm/kernel/armksyms.c
1653 +++ b/arch/arm/kernel/armksyms.c
1654 @@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1655 #ifdef CONFIG_MMU
1656 EXPORT_SYMBOL(copy_page);
1657
1658 -EXPORT_SYMBOL(__copy_from_user);
1659 -EXPORT_SYMBOL(__copy_to_user);
1660 +EXPORT_SYMBOL(___copy_from_user);
1661 +EXPORT_SYMBOL(___copy_to_user);
1662 EXPORT_SYMBOL(__clear_user);
1663
1664 EXPORT_SYMBOL(__get_user_1);
1665 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1666 index 693b744..e684262 100644
1667 --- a/arch/arm/kernel/process.c
1668 +++ b/arch/arm/kernel/process.c
1669 @@ -28,7 +28,6 @@
1670 #include <linux/tick.h>
1671 #include <linux/utsname.h>
1672 #include <linux/uaccess.h>
1673 -#include <linux/random.h>
1674 #include <linux/hw_breakpoint.h>
1675 #include <linux/cpuidle.h>
1676
1677 @@ -256,9 +255,10 @@ void machine_power_off(void)
1678 machine_shutdown();
1679 if (pm_power_off)
1680 pm_power_off();
1681 + BUG();
1682 }
1683
1684 -void machine_restart(char *cmd)
1685 +__noreturn void machine_restart(char *cmd)
1686 {
1687 machine_shutdown();
1688
1689 @@ -501,12 +501,6 @@ unsigned long get_wchan(struct task_struct *p)
1690 return 0;
1691 }
1692
1693 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1694 -{
1695 - unsigned long range_end = mm->brk + 0x02000000;
1696 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1697 -}
1698 -
1699 #ifdef CONFIG_MMU
1700 /*
1701 * The vectors page is always readable from user space for the
1702 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1703 index 14e3826..d832d89 100644
1704 --- a/arch/arm/kernel/ptrace.c
1705 +++ b/arch/arm/kernel/ptrace.c
1706 @@ -907,10 +907,19 @@ long arch_ptrace(struct task_struct *child, long request,
1707 return ret;
1708 }
1709
1710 +#ifdef CONFIG_GRKERNSEC_SETXID
1711 +extern void gr_delayed_cred_worker(void);
1712 +#endif
1713 +
1714 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1715 {
1716 unsigned long ip;
1717
1718 +#ifdef CONFIG_GRKERNSEC_SETXID
1719 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1720 + gr_delayed_cred_worker();
1721 +#endif
1722 +
1723 if (why)
1724 audit_syscall_exit(regs);
1725 else
1726 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1727 index e15d83b..8c466dd 100644
1728 --- a/arch/arm/kernel/setup.c
1729 +++ b/arch/arm/kernel/setup.c
1730 @@ -112,13 +112,13 @@ struct processor processor __read_mostly;
1731 struct cpu_tlb_fns cpu_tlb __read_mostly;
1732 #endif
1733 #ifdef MULTI_USER
1734 -struct cpu_user_fns cpu_user __read_mostly;
1735 +struct cpu_user_fns cpu_user __read_only;
1736 #endif
1737 #ifdef MULTI_CACHE
1738 -struct cpu_cache_fns cpu_cache __read_mostly;
1739 +struct cpu_cache_fns cpu_cache __read_only;
1740 #endif
1741 #ifdef CONFIG_OUTER_CACHE
1742 -struct outer_cache_fns outer_cache __read_mostly;
1743 +struct outer_cache_fns outer_cache __read_only;
1744 EXPORT_SYMBOL(outer_cache);
1745 #endif
1746
1747 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1748 index c7cae6b..e1e523c 100644
1749 --- a/arch/arm/kernel/traps.c
1750 +++ b/arch/arm/kernel/traps.c
1751 @@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1752
1753 static DEFINE_RAW_SPINLOCK(die_lock);
1754
1755 +extern void gr_handle_kernel_exploit(void);
1756 +
1757 /*
1758 * This function is protected against re-entrancy.
1759 */
1760 @@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1761 panic("Fatal exception in interrupt");
1762 if (panic_on_oops)
1763 panic("Fatal exception");
1764 +
1765 + gr_handle_kernel_exploit();
1766 +
1767 if (ret != NOTIFY_STOP)
1768 do_exit(SIGSEGV);
1769 }
1770 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1771 index 66a477a..bee61d3 100644
1772 --- a/arch/arm/lib/copy_from_user.S
1773 +++ b/arch/arm/lib/copy_from_user.S
1774 @@ -16,7 +16,7 @@
1775 /*
1776 * Prototype:
1777 *
1778 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1779 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1780 *
1781 * Purpose:
1782 *
1783 @@ -84,11 +84,11 @@
1784
1785 .text
1786
1787 -ENTRY(__copy_from_user)
1788 +ENTRY(___copy_from_user)
1789
1790 #include "copy_template.S"
1791
1792 -ENDPROC(__copy_from_user)
1793 +ENDPROC(___copy_from_user)
1794
1795 .pushsection .fixup,"ax"
1796 .align 0
1797 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1798 index 6ee2f67..d1cce76 100644
1799 --- a/arch/arm/lib/copy_page.S
1800 +++ b/arch/arm/lib/copy_page.S
1801 @@ -10,6 +10,7 @@
1802 * ASM optimised string functions
1803 */
1804 #include <linux/linkage.h>
1805 +#include <linux/const.h>
1806 #include <asm/assembler.h>
1807 #include <asm/asm-offsets.h>
1808 #include <asm/cache.h>
1809 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1810 index d066df6..df28194 100644
1811 --- a/arch/arm/lib/copy_to_user.S
1812 +++ b/arch/arm/lib/copy_to_user.S
1813 @@ -16,7 +16,7 @@
1814 /*
1815 * Prototype:
1816 *
1817 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1818 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1819 *
1820 * Purpose:
1821 *
1822 @@ -88,11 +88,11 @@
1823 .text
1824
1825 ENTRY(__copy_to_user_std)
1826 -WEAK(__copy_to_user)
1827 +WEAK(___copy_to_user)
1828
1829 #include "copy_template.S"
1830
1831 -ENDPROC(__copy_to_user)
1832 +ENDPROC(___copy_to_user)
1833 ENDPROC(__copy_to_user_std)
1834
1835 .pushsection .fixup,"ax"
1836 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1837 index 025f742..8432b08 100644
1838 --- a/arch/arm/lib/uaccess_with_memcpy.c
1839 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1840 @@ -104,7 +104,7 @@ out:
1841 }
1842
1843 unsigned long
1844 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1845 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1846 {
1847 /*
1848 * This test is stubbed out of the main function above to keep
1849 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
1850 index f261cd2..4ae63fb 100644
1851 --- a/arch/arm/mach-kirkwood/common.c
1852 +++ b/arch/arm/mach-kirkwood/common.c
1853 @@ -128,7 +128,7 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
1854 clk_gate_ops.disable(hw);
1855 }
1856
1857 -static struct clk_ops clk_gate_fn_ops;
1858 +static clk_ops_no_const clk_gate_fn_ops;
1859
1860 static struct clk __init *clk_register_gate_fn(struct device *dev,
1861 const char *name,
1862 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1863 index 2c5d0ed..7d9099c 100644
1864 --- a/arch/arm/mach-omap2/board-n8x0.c
1865 +++ b/arch/arm/mach-omap2/board-n8x0.c
1866 @@ -594,7 +594,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1867 }
1868 #endif
1869
1870 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1871 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1872 .late_init = n8x0_menelaus_late_init,
1873 };
1874
1875 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1876 index c3bd834..e81ef02 100644
1877 --- a/arch/arm/mm/fault.c
1878 +++ b/arch/arm/mm/fault.c
1879 @@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1880 }
1881 #endif
1882
1883 +#ifdef CONFIG_PAX_PAGEEXEC
1884 + if (fsr & FSR_LNX_PF) {
1885 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1886 + do_group_exit(SIGKILL);
1887 + }
1888 +#endif
1889 +
1890 tsk->thread.address = addr;
1891 tsk->thread.error_code = fsr;
1892 tsk->thread.trap_no = 14;
1893 @@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1894 }
1895 #endif /* CONFIG_MMU */
1896
1897 +#ifdef CONFIG_PAX_PAGEEXEC
1898 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1899 +{
1900 + long i;
1901 +
1902 + printk(KERN_ERR "PAX: bytes at PC: ");
1903 + for (i = 0; i < 20; i++) {
1904 + unsigned char c;
1905 + if (get_user(c, (__force unsigned char __user *)pc+i))
1906 + printk(KERN_CONT "?? ");
1907 + else
1908 + printk(KERN_CONT "%02x ", c);
1909 + }
1910 + printk("\n");
1911 +
1912 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1913 + for (i = -1; i < 20; i++) {
1914 + unsigned long c;
1915 + if (get_user(c, (__force unsigned long __user *)sp+i))
1916 + printk(KERN_CONT "???????? ");
1917 + else
1918 + printk(KERN_CONT "%08lx ", c);
1919 + }
1920 + printk("\n");
1921 +}
1922 +#endif
1923 +
1924 /*
1925 * First Level Translation Fault Handler
1926 *
1927 @@ -574,6 +608,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1928 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1929 struct siginfo info;
1930
1931 +#ifdef CONFIG_PAX_REFCOUNT
1932 + if (fsr_fs(ifsr) == 2) {
1933 + unsigned int bkpt;
1934 +
1935 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1936 + current->thread.error_code = ifsr;
1937 + current->thread.trap_no = 0;
1938 + pax_report_refcount_overflow(regs);
1939 + fixup_exception(regs);
1940 + return;
1941 + }
1942 + }
1943 +#endif
1944 +
1945 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1946 return;
1947
1948 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1949 index ce8cb19..3ec539d 100644
1950 --- a/arch/arm/mm/mmap.c
1951 +++ b/arch/arm/mm/mmap.c
1952 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1953 if (len > TASK_SIZE)
1954 return -ENOMEM;
1955
1956 +#ifdef CONFIG_PAX_RANDMMAP
1957 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1958 +#endif
1959 +
1960 if (addr) {
1961 if (do_align)
1962 addr = COLOUR_ALIGN(addr, pgoff);
1963 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1964 addr = PAGE_ALIGN(addr);
1965
1966 vma = find_vma(mm, addr);
1967 - if (TASK_SIZE - len >= addr &&
1968 - (!vma || addr + len <= vma->vm_start))
1969 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1970 return addr;
1971 }
1972 if (len > mm->cached_hole_size) {
1973 - start_addr = addr = mm->free_area_cache;
1974 + start_addr = addr = mm->free_area_cache;
1975 } else {
1976 - start_addr = addr = mm->mmap_base;
1977 - mm->cached_hole_size = 0;
1978 + start_addr = addr = mm->mmap_base;
1979 + mm->cached_hole_size = 0;
1980 }
1981
1982 full_search:
1983 @@ -124,14 +127,14 @@ full_search:
1984 * Start a new search - just in case we missed
1985 * some holes.
1986 */
1987 - if (start_addr != TASK_UNMAPPED_BASE) {
1988 - start_addr = addr = TASK_UNMAPPED_BASE;
1989 + if (start_addr != mm->mmap_base) {
1990 + start_addr = addr = mm->mmap_base;
1991 mm->cached_hole_size = 0;
1992 goto full_search;
1993 }
1994 return -ENOMEM;
1995 }
1996 - if (!vma || addr + len <= vma->vm_start) {
1997 + if (check_heap_stack_gap(vma, addr, len)) {
1998 /*
1999 * Remember the place where we stopped the search:
2000 */
2001 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2002
2003 if (mmap_is_legacy()) {
2004 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2005 +
2006 +#ifdef CONFIG_PAX_RANDMMAP
2007 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2008 + mm->mmap_base += mm->delta_mmap;
2009 +#endif
2010 +
2011 mm->get_unmapped_area = arch_get_unmapped_area;
2012 mm->unmap_area = arch_unmap_area;
2013 } else {
2014 mm->mmap_base = mmap_base(random_factor);
2015 +
2016 +#ifdef CONFIG_PAX_RANDMMAP
2017 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2018 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2019 +#endif
2020 +
2021 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2022 mm->unmap_area = arch_unmap_area_topdown;
2023 }
2024 diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2025 index fd556f7..af2e7d2 100644
2026 --- a/arch/arm/plat-orion/include/plat/addr-map.h
2027 +++ b/arch/arm/plat-orion/include/plat/addr-map.h
2028 @@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2029 value in bridge_virt_base */
2030 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2031 const int win);
2032 -};
2033 +} __no_const;
2034
2035 /*
2036 * Information needed to setup one address mapping.
2037 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2038 index 71a6827..e7fbc23 100644
2039 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2040 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2041 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
2042 int (*started)(unsigned ch);
2043 int (*flush)(unsigned ch);
2044 int (*stop)(unsigned ch);
2045 -};
2046 +} __no_const;
2047
2048 extern void *samsung_dmadev_get_ops(void);
2049 extern void *s3c_dma_get_ops(void);
2050 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2051 index 5f28cae..3d23723 100644
2052 --- a/arch/arm/plat-samsung/include/plat/ehci.h
2053 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
2054 @@ -14,7 +14,7 @@
2055 struct s5p_ehci_platdata {
2056 int (*phy_init)(struct platform_device *pdev, int type);
2057 int (*phy_exit)(struct platform_device *pdev, int type);
2058 -};
2059 +} __no_const;
2060
2061 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2062
2063 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2064 index c3a58a1..78fbf54 100644
2065 --- a/arch/avr32/include/asm/cache.h
2066 +++ b/arch/avr32/include/asm/cache.h
2067 @@ -1,8 +1,10 @@
2068 #ifndef __ASM_AVR32_CACHE_H
2069 #define __ASM_AVR32_CACHE_H
2070
2071 +#include <linux/const.h>
2072 +
2073 #define L1_CACHE_SHIFT 5
2074 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2075 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2076
2077 /*
2078 * Memory returned by kmalloc() may be used for DMA, so we must make
2079 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2080 index 3b3159b..425ea94 100644
2081 --- a/arch/avr32/include/asm/elf.h
2082 +++ b/arch/avr32/include/asm/elf.h
2083 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2084 the loader. We need to make sure that it is out of the way of the program
2085 that it will "exec", and that there is sufficient room for the brk. */
2086
2087 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2088 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2089
2090 +#ifdef CONFIG_PAX_ASLR
2091 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2092 +
2093 +#define PAX_DELTA_MMAP_LEN 15
2094 +#define PAX_DELTA_STACK_LEN 15
2095 +#endif
2096
2097 /* This yields a mask that user programs can use to figure out what
2098 instruction set this CPU supports. This could be done in user space,
2099 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2100 index b7f5c68..556135c 100644
2101 --- a/arch/avr32/include/asm/kmap_types.h
2102 +++ b/arch/avr32/include/asm/kmap_types.h
2103 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2104 D(11) KM_IRQ1,
2105 D(12) KM_SOFTIRQ0,
2106 D(13) KM_SOFTIRQ1,
2107 -D(14) KM_TYPE_NR
2108 +D(14) KM_CLEARPAGE,
2109 +D(15) KM_TYPE_NR
2110 };
2111
2112 #undef D
2113 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2114 index f7040a1..db9f300 100644
2115 --- a/arch/avr32/mm/fault.c
2116 +++ b/arch/avr32/mm/fault.c
2117 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2118
2119 int exception_trace = 1;
2120
2121 +#ifdef CONFIG_PAX_PAGEEXEC
2122 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2123 +{
2124 + unsigned long i;
2125 +
2126 + printk(KERN_ERR "PAX: bytes at PC: ");
2127 + for (i = 0; i < 20; i++) {
2128 + unsigned char c;
2129 + if (get_user(c, (unsigned char *)pc+i))
2130 + printk(KERN_CONT "???????? ");
2131 + else
2132 + printk(KERN_CONT "%02x ", c);
2133 + }
2134 + printk("\n");
2135 +}
2136 +#endif
2137 +
2138 /*
2139 * This routine handles page faults. It determines the address and the
2140 * problem, and then passes it off to one of the appropriate routines.
2141 @@ -156,6 +173,16 @@ bad_area:
2142 up_read(&mm->mmap_sem);
2143
2144 if (user_mode(regs)) {
2145 +
2146 +#ifdef CONFIG_PAX_PAGEEXEC
2147 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2148 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2149 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2150 + do_group_exit(SIGKILL);
2151 + }
2152 + }
2153 +#endif
2154 +
2155 if (exception_trace && printk_ratelimit())
2156 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2157 "sp %08lx ecr %lu\n",
2158 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2159 index 568885a..f8008df 100644
2160 --- a/arch/blackfin/include/asm/cache.h
2161 +++ b/arch/blackfin/include/asm/cache.h
2162 @@ -7,6 +7,7 @@
2163 #ifndef __ARCH_BLACKFIN_CACHE_H
2164 #define __ARCH_BLACKFIN_CACHE_H
2165
2166 +#include <linux/const.h>
2167 #include <linux/linkage.h> /* for asmlinkage */
2168
2169 /*
2170 @@ -14,7 +15,7 @@
2171 * Blackfin loads 32 bytes for cache
2172 */
2173 #define L1_CACHE_SHIFT 5
2174 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2175 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2176 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2177
2178 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2179 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2180 index aea2718..3639a60 100644
2181 --- a/arch/cris/include/arch-v10/arch/cache.h
2182 +++ b/arch/cris/include/arch-v10/arch/cache.h
2183 @@ -1,8 +1,9 @@
2184 #ifndef _ASM_ARCH_CACHE_H
2185 #define _ASM_ARCH_CACHE_H
2186
2187 +#include <linux/const.h>
2188 /* Etrax 100LX have 32-byte cache-lines. */
2189 -#define L1_CACHE_BYTES 32
2190 #define L1_CACHE_SHIFT 5
2191 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2192
2193 #endif /* _ASM_ARCH_CACHE_H */
2194 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2195 index 7caf25d..ee65ac5 100644
2196 --- a/arch/cris/include/arch-v32/arch/cache.h
2197 +++ b/arch/cris/include/arch-v32/arch/cache.h
2198 @@ -1,11 +1,12 @@
2199 #ifndef _ASM_CRIS_ARCH_CACHE_H
2200 #define _ASM_CRIS_ARCH_CACHE_H
2201
2202 +#include <linux/const.h>
2203 #include <arch/hwregs/dma.h>
2204
2205 /* A cache-line is 32 bytes. */
2206 -#define L1_CACHE_BYTES 32
2207 #define L1_CACHE_SHIFT 5
2208 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2209
2210 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
2211
2212 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2213 index b86329d..6709906 100644
2214 --- a/arch/frv/include/asm/atomic.h
2215 +++ b/arch/frv/include/asm/atomic.h
2216 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2217 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2218 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2219
2220 +#define atomic64_read_unchecked(v) atomic64_read(v)
2221 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2222 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2223 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2224 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2225 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2226 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2227 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2228 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2229 +
2230 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2231 {
2232 int c, old;
2233 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2234 index 2797163..c2a401d 100644
2235 --- a/arch/frv/include/asm/cache.h
2236 +++ b/arch/frv/include/asm/cache.h
2237 @@ -12,10 +12,11 @@
2238 #ifndef __ASM_CACHE_H
2239 #define __ASM_CACHE_H
2240
2241 +#include <linux/const.h>
2242
2243 /* bytes per L1 cache line */
2244 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2245 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2246 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2247
2248 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2249 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2250 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2251 index f8e16b2..c73ff79 100644
2252 --- a/arch/frv/include/asm/kmap_types.h
2253 +++ b/arch/frv/include/asm/kmap_types.h
2254 @@ -23,6 +23,7 @@ enum km_type {
2255 KM_IRQ1,
2256 KM_SOFTIRQ0,
2257 KM_SOFTIRQ1,
2258 + KM_CLEARPAGE,
2259 KM_TYPE_NR
2260 };
2261
2262 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2263 index 385fd30..6c3d97e 100644
2264 --- a/arch/frv/mm/elf-fdpic.c
2265 +++ b/arch/frv/mm/elf-fdpic.c
2266 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2267 if (addr) {
2268 addr = PAGE_ALIGN(addr);
2269 vma = find_vma(current->mm, addr);
2270 - if (TASK_SIZE - len >= addr &&
2271 - (!vma || addr + len <= vma->vm_start))
2272 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2273 goto success;
2274 }
2275
2276 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2277 for (; vma; vma = vma->vm_next) {
2278 if (addr > limit)
2279 break;
2280 - if (addr + len <= vma->vm_start)
2281 + if (check_heap_stack_gap(vma, addr, len))
2282 goto success;
2283 addr = vma->vm_end;
2284 }
2285 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2286 for (; vma; vma = vma->vm_next) {
2287 if (addr > limit)
2288 break;
2289 - if (addr + len <= vma->vm_start)
2290 + if (check_heap_stack_gap(vma, addr, len))
2291 goto success;
2292 addr = vma->vm_end;
2293 }
2294 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2295 index c635028..6d9445a 100644
2296 --- a/arch/h8300/include/asm/cache.h
2297 +++ b/arch/h8300/include/asm/cache.h
2298 @@ -1,8 +1,10 @@
2299 #ifndef __ARCH_H8300_CACHE_H
2300 #define __ARCH_H8300_CACHE_H
2301
2302 +#include <linux/const.h>
2303 +
2304 /* bytes per L1 cache line */
2305 -#define L1_CACHE_BYTES 4
2306 +#define L1_CACHE_BYTES _AC(4,UL)
2307
2308 /* m68k-elf-gcc 2.95.2 doesn't like these */
2309
2310 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2311 index 0f01de2..d37d309 100644
2312 --- a/arch/hexagon/include/asm/cache.h
2313 +++ b/arch/hexagon/include/asm/cache.h
2314 @@ -21,9 +21,11 @@
2315 #ifndef __ASM_CACHE_H
2316 #define __ASM_CACHE_H
2317
2318 +#include <linux/const.h>
2319 +
2320 /* Bytes per L1 cache line */
2321 -#define L1_CACHE_SHIFT (5)
2322 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2323 +#define L1_CACHE_SHIFT 5
2324 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2325
2326 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2327 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2328 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2329 index 6e6fe18..a6ae668 100644
2330 --- a/arch/ia64/include/asm/atomic.h
2331 +++ b/arch/ia64/include/asm/atomic.h
2332 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2333 #define atomic64_inc(v) atomic64_add(1, (v))
2334 #define atomic64_dec(v) atomic64_sub(1, (v))
2335
2336 +#define atomic64_read_unchecked(v) atomic64_read(v)
2337 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2338 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2339 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2340 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2341 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2342 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2343 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2344 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2345 +
2346 /* Atomic operations are already serializing */
2347 #define smp_mb__before_atomic_dec() barrier()
2348 #define smp_mb__after_atomic_dec() barrier()
2349 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2350 index 988254a..e1ee885 100644
2351 --- a/arch/ia64/include/asm/cache.h
2352 +++ b/arch/ia64/include/asm/cache.h
2353 @@ -1,6 +1,7 @@
2354 #ifndef _ASM_IA64_CACHE_H
2355 #define _ASM_IA64_CACHE_H
2356
2357 +#include <linux/const.h>
2358
2359 /*
2360 * Copyright (C) 1998-2000 Hewlett-Packard Co
2361 @@ -9,7 +10,7 @@
2362
2363 /* Bytes per L1 (data) cache line. */
2364 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2365 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2366 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2367
2368 #ifdef CONFIG_SMP
2369 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2370 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2371 index b5298eb..67c6e62 100644
2372 --- a/arch/ia64/include/asm/elf.h
2373 +++ b/arch/ia64/include/asm/elf.h
2374 @@ -42,6 +42,13 @@
2375 */
2376 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2377
2378 +#ifdef CONFIG_PAX_ASLR
2379 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2380 +
2381 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2382 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2383 +#endif
2384 +
2385 #define PT_IA_64_UNWIND 0x70000001
2386
2387 /* IA-64 relocations: */
2388 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2389 index 96a8d92..617a1cf 100644
2390 --- a/arch/ia64/include/asm/pgalloc.h
2391 +++ b/arch/ia64/include/asm/pgalloc.h
2392 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2393 pgd_val(*pgd_entry) = __pa(pud);
2394 }
2395
2396 +static inline void
2397 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2398 +{
2399 + pgd_populate(mm, pgd_entry, pud);
2400 +}
2401 +
2402 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2403 {
2404 return quicklist_alloc(0, GFP_KERNEL, NULL);
2405 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2406 pud_val(*pud_entry) = __pa(pmd);
2407 }
2408
2409 +static inline void
2410 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2411 +{
2412 + pud_populate(mm, pud_entry, pmd);
2413 +}
2414 +
2415 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2416 {
2417 return quicklist_alloc(0, GFP_KERNEL, NULL);
2418 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2419 index 815810c..d60bd4c 100644
2420 --- a/arch/ia64/include/asm/pgtable.h
2421 +++ b/arch/ia64/include/asm/pgtable.h
2422 @@ -12,7 +12,7 @@
2423 * David Mosberger-Tang <davidm@hpl.hp.com>
2424 */
2425
2426 -
2427 +#include <linux/const.h>
2428 #include <asm/mman.h>
2429 #include <asm/page.h>
2430 #include <asm/processor.h>
2431 @@ -142,6 +142,17 @@
2432 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2433 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2434 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2435 +
2436 +#ifdef CONFIG_PAX_PAGEEXEC
2437 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2438 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2439 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2440 +#else
2441 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2442 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2443 +# define PAGE_COPY_NOEXEC PAGE_COPY
2444 +#endif
2445 +
2446 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2447 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2448 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2449 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2450 index 54ff557..70c88b7 100644
2451 --- a/arch/ia64/include/asm/spinlock.h
2452 +++ b/arch/ia64/include/asm/spinlock.h
2453 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2454 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2455
2456 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2457 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2458 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2459 }
2460
2461 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2462 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2463 index 449c8c0..432a3d2 100644
2464 --- a/arch/ia64/include/asm/uaccess.h
2465 +++ b/arch/ia64/include/asm/uaccess.h
2466 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2467 const void *__cu_from = (from); \
2468 long __cu_len = (n); \
2469 \
2470 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2471 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2472 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2473 __cu_len; \
2474 })
2475 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2476 long __cu_len = (n); \
2477 \
2478 __chk_user_ptr(__cu_from); \
2479 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2480 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2481 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2482 __cu_len; \
2483 })
2484 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2485 index 24603be..948052d 100644
2486 --- a/arch/ia64/kernel/module.c
2487 +++ b/arch/ia64/kernel/module.c
2488 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2489 void
2490 module_free (struct module *mod, void *module_region)
2491 {
2492 - if (mod && mod->arch.init_unw_table &&
2493 - module_region == mod->module_init) {
2494 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2495 unw_remove_unwind_table(mod->arch.init_unw_table);
2496 mod->arch.init_unw_table = NULL;
2497 }
2498 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2499 }
2500
2501 static inline int
2502 +in_init_rx (const struct module *mod, uint64_t addr)
2503 +{
2504 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2505 +}
2506 +
2507 +static inline int
2508 +in_init_rw (const struct module *mod, uint64_t addr)
2509 +{
2510 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2511 +}
2512 +
2513 +static inline int
2514 in_init (const struct module *mod, uint64_t addr)
2515 {
2516 - return addr - (uint64_t) mod->module_init < mod->init_size;
2517 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2518 +}
2519 +
2520 +static inline int
2521 +in_core_rx (const struct module *mod, uint64_t addr)
2522 +{
2523 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2524 +}
2525 +
2526 +static inline int
2527 +in_core_rw (const struct module *mod, uint64_t addr)
2528 +{
2529 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2530 }
2531
2532 static inline int
2533 in_core (const struct module *mod, uint64_t addr)
2534 {
2535 - return addr - (uint64_t) mod->module_core < mod->core_size;
2536 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2537 }
2538
2539 static inline int
2540 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2541 break;
2542
2543 case RV_BDREL:
2544 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2545 + if (in_init_rx(mod, val))
2546 + val -= (uint64_t) mod->module_init_rx;
2547 + else if (in_init_rw(mod, val))
2548 + val -= (uint64_t) mod->module_init_rw;
2549 + else if (in_core_rx(mod, val))
2550 + val -= (uint64_t) mod->module_core_rx;
2551 + else if (in_core_rw(mod, val))
2552 + val -= (uint64_t) mod->module_core_rw;
2553 break;
2554
2555 case RV_LTV:
2556 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2557 * addresses have been selected...
2558 */
2559 uint64_t gp;
2560 - if (mod->core_size > MAX_LTOFF)
2561 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2562 /*
2563 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2564 * at the end of the module.
2565 */
2566 - gp = mod->core_size - MAX_LTOFF / 2;
2567 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2568 else
2569 - gp = mod->core_size / 2;
2570 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2571 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2572 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2573 mod->arch.gp = gp;
2574 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2575 }
2576 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2577 index d9439ef..b9a4303 100644
2578 --- a/arch/ia64/kernel/sys_ia64.c
2579 +++ b/arch/ia64/kernel/sys_ia64.c
2580 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2581 if (REGION_NUMBER(addr) == RGN_HPAGE)
2582 addr = 0;
2583 #endif
2584 +
2585 +#ifdef CONFIG_PAX_RANDMMAP
2586 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2587 + addr = mm->free_area_cache;
2588 + else
2589 +#endif
2590 +
2591 if (!addr)
2592 addr = mm->free_area_cache;
2593
2594 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2595 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2596 /* At this point: (!vma || addr < vma->vm_end). */
2597 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2598 - if (start_addr != TASK_UNMAPPED_BASE) {
2599 + if (start_addr != mm->mmap_base) {
2600 /* Start a new search --- just in case we missed some holes. */
2601 - addr = TASK_UNMAPPED_BASE;
2602 + addr = mm->mmap_base;
2603 goto full_search;
2604 }
2605 return -ENOMEM;
2606 }
2607 - if (!vma || addr + len <= vma->vm_start) {
2608 + if (check_heap_stack_gap(vma, addr, len)) {
2609 /* Remember the address where we stopped this search: */
2610 mm->free_area_cache = addr + len;
2611 return addr;
2612 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2613 index 0ccb28f..8992469 100644
2614 --- a/arch/ia64/kernel/vmlinux.lds.S
2615 +++ b/arch/ia64/kernel/vmlinux.lds.S
2616 @@ -198,7 +198,7 @@ SECTIONS {
2617 /* Per-cpu data: */
2618 . = ALIGN(PERCPU_PAGE_SIZE);
2619 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2620 - __phys_per_cpu_start = __per_cpu_load;
2621 + __phys_per_cpu_start = per_cpu_load;
2622 /*
2623 * ensure percpu data fits
2624 * into percpu page size
2625 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2626 index 02d29c2..ea893df 100644
2627 --- a/arch/ia64/mm/fault.c
2628 +++ b/arch/ia64/mm/fault.c
2629 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2630 return pte_present(pte);
2631 }
2632
2633 +#ifdef CONFIG_PAX_PAGEEXEC
2634 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2635 +{
2636 + unsigned long i;
2637 +
2638 + printk(KERN_ERR "PAX: bytes at PC: ");
2639 + for (i = 0; i < 8; i++) {
2640 + unsigned int c;
2641 + if (get_user(c, (unsigned int *)pc+i))
2642 + printk(KERN_CONT "???????? ");
2643 + else
2644 + printk(KERN_CONT "%08x ", c);
2645 + }
2646 + printk("\n");
2647 +}
2648 +#endif
2649 +
2650 void __kprobes
2651 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2652 {
2653 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2654 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2655 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2656
2657 - if ((vma->vm_flags & mask) != mask)
2658 + if ((vma->vm_flags & mask) != mask) {
2659 +
2660 +#ifdef CONFIG_PAX_PAGEEXEC
2661 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2662 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2663 + goto bad_area;
2664 +
2665 + up_read(&mm->mmap_sem);
2666 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2667 + do_group_exit(SIGKILL);
2668 + }
2669 +#endif
2670 +
2671 goto bad_area;
2672
2673 + }
2674 +
2675 /*
2676 * If for any reason at all we couldn't handle the fault, make
2677 * sure we exit gracefully rather than endlessly redo the
2678 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2679 index 5ca674b..e0e1b70 100644
2680 --- a/arch/ia64/mm/hugetlbpage.c
2681 +++ b/arch/ia64/mm/hugetlbpage.c
2682 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2683 /* At this point: (!vmm || addr < vmm->vm_end). */
2684 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2685 return -ENOMEM;
2686 - if (!vmm || (addr + len) <= vmm->vm_start)
2687 + if (check_heap_stack_gap(vmm, addr, len))
2688 return addr;
2689 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2690 }
2691 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2692 index 0eab454..bd794f2 100644
2693 --- a/arch/ia64/mm/init.c
2694 +++ b/arch/ia64/mm/init.c
2695 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2696 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2697 vma->vm_end = vma->vm_start + PAGE_SIZE;
2698 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2699 +
2700 +#ifdef CONFIG_PAX_PAGEEXEC
2701 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2702 + vma->vm_flags &= ~VM_EXEC;
2703 +
2704 +#ifdef CONFIG_PAX_MPROTECT
2705 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2706 + vma->vm_flags &= ~VM_MAYEXEC;
2707 +#endif
2708 +
2709 + }
2710 +#endif
2711 +
2712 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2713 down_write(&current->mm->mmap_sem);
2714 if (insert_vm_struct(current->mm, vma)) {
2715 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2716 index 40b3ee9..8c2c112 100644
2717 --- a/arch/m32r/include/asm/cache.h
2718 +++ b/arch/m32r/include/asm/cache.h
2719 @@ -1,8 +1,10 @@
2720 #ifndef _ASM_M32R_CACHE_H
2721 #define _ASM_M32R_CACHE_H
2722
2723 +#include <linux/const.h>
2724 +
2725 /* L1 cache line size */
2726 #define L1_CACHE_SHIFT 4
2727 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2728 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2729
2730 #endif /* _ASM_M32R_CACHE_H */
2731 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2732 index 82abd15..d95ae5d 100644
2733 --- a/arch/m32r/lib/usercopy.c
2734 +++ b/arch/m32r/lib/usercopy.c
2735 @@ -14,6 +14,9 @@
2736 unsigned long
2737 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2738 {
2739 + if ((long)n < 0)
2740 + return n;
2741 +
2742 prefetch(from);
2743 if (access_ok(VERIFY_WRITE, to, n))
2744 __copy_user(to,from,n);
2745 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2746 unsigned long
2747 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2748 {
2749 + if ((long)n < 0)
2750 + return n;
2751 +
2752 prefetchw(to);
2753 if (access_ok(VERIFY_READ, from, n))
2754 __copy_user_zeroing(to,from,n);
2755 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2756 index 0395c51..5f26031 100644
2757 --- a/arch/m68k/include/asm/cache.h
2758 +++ b/arch/m68k/include/asm/cache.h
2759 @@ -4,9 +4,11 @@
2760 #ifndef __ARCH_M68K_CACHE_H
2761 #define __ARCH_M68K_CACHE_H
2762
2763 +#include <linux/const.h>
2764 +
2765 /* bytes per L1 cache line */
2766 #define L1_CACHE_SHIFT 4
2767 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2768 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2769
2770 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2771
2772 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2773 index 4efe96a..60e8699 100644
2774 --- a/arch/microblaze/include/asm/cache.h
2775 +++ b/arch/microblaze/include/asm/cache.h
2776 @@ -13,11 +13,12 @@
2777 #ifndef _ASM_MICROBLAZE_CACHE_H
2778 #define _ASM_MICROBLAZE_CACHE_H
2779
2780 +#include <linux/const.h>
2781 #include <asm/registers.h>
2782
2783 #define L1_CACHE_SHIFT 5
2784 /* word-granular cache in microblaze */
2785 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2786 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2787
2788 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2789
2790 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2791 index 3f4c5cb..3439c6e 100644
2792 --- a/arch/mips/include/asm/atomic.h
2793 +++ b/arch/mips/include/asm/atomic.h
2794 @@ -21,6 +21,10 @@
2795 #include <asm/cmpxchg.h>
2796 #include <asm/war.h>
2797
2798 +#ifdef CONFIG_GENERIC_ATOMIC64
2799 +#include <asm-generic/atomic64.h>
2800 +#endif
2801 +
2802 #define ATOMIC_INIT(i) { (i) }
2803
2804 /*
2805 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2806 */
2807 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2808
2809 +#define atomic64_read_unchecked(v) atomic64_read(v)
2810 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2811 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2812 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2813 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2814 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2815 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2816 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2817 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2818 +
2819 #endif /* CONFIG_64BIT */
2820
2821 /*
2822 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2823 index b4db69f..8f3b093 100644
2824 --- a/arch/mips/include/asm/cache.h
2825 +++ b/arch/mips/include/asm/cache.h
2826 @@ -9,10 +9,11 @@
2827 #ifndef _ASM_CACHE_H
2828 #define _ASM_CACHE_H
2829
2830 +#include <linux/const.h>
2831 #include <kmalloc.h>
2832
2833 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2834 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2835 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2836
2837 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2838 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2839 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2840 index 455c0ac..ad65fbe 100644
2841 --- a/arch/mips/include/asm/elf.h
2842 +++ b/arch/mips/include/asm/elf.h
2843 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2844 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2845 #endif
2846
2847 +#ifdef CONFIG_PAX_ASLR
2848 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2849 +
2850 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2851 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2852 +#endif
2853 +
2854 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2855 struct linux_binprm;
2856 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2857 int uses_interp);
2858
2859 -struct mm_struct;
2860 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2861 -#define arch_randomize_brk arch_randomize_brk
2862 -
2863 #endif /* _ASM_ELF_H */
2864 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2865 index c1f6afa..38cc6e9 100644
2866 --- a/arch/mips/include/asm/exec.h
2867 +++ b/arch/mips/include/asm/exec.h
2868 @@ -12,6 +12,6 @@
2869 #ifndef _ASM_EXEC_H
2870 #define _ASM_EXEC_H
2871
2872 -extern unsigned long arch_align_stack(unsigned long sp);
2873 +#define arch_align_stack(x) ((x) & ~0xfUL)
2874
2875 #endif /* _ASM_EXEC_H */
2876 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2877 index da9bd7d..91aa7ab 100644
2878 --- a/arch/mips/include/asm/page.h
2879 +++ b/arch/mips/include/asm/page.h
2880 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2881 #ifdef CONFIG_CPU_MIPS32
2882 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2883 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2884 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2885 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2886 #else
2887 typedef struct { unsigned long long pte; } pte_t;
2888 #define pte_val(x) ((x).pte)
2889 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2890 index 881d18b..cea38bc 100644
2891 --- a/arch/mips/include/asm/pgalloc.h
2892 +++ b/arch/mips/include/asm/pgalloc.h
2893 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2894 {
2895 set_pud(pud, __pud((unsigned long)pmd));
2896 }
2897 +
2898 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2899 +{
2900 + pud_populate(mm, pud, pmd);
2901 +}
2902 #endif
2903
2904 /*
2905 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2906 index ca97e0e..cd08920 100644
2907 --- a/arch/mips/include/asm/thread_info.h
2908 +++ b/arch/mips/include/asm/thread_info.h
2909 @@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2910 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2911 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2912 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2913 +/* li takes a 32bit immediate */
2914 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2915 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2916
2917 #ifdef CONFIG_MIPS32_O32
2918 @@ -134,15 +136,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2919 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2920 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2921 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2922 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2923 +
2924 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2925
2926 /* work to do in syscall_trace_leave() */
2927 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2928 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2929
2930 /* work to do on interrupt/exception return */
2931 #define _TIF_WORK_MASK (0x0000ffef & \
2932 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2933 /* work to do on any return to u-space */
2934 -#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2935 +#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2936
2937 #endif /* __KERNEL__ */
2938
2939 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2940 index 9fdd8bc..4bd7f1a 100644
2941 --- a/arch/mips/kernel/binfmt_elfn32.c
2942 +++ b/arch/mips/kernel/binfmt_elfn32.c
2943 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2944 #undef ELF_ET_DYN_BASE
2945 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2946
2947 +#ifdef CONFIG_PAX_ASLR
2948 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2949 +
2950 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2951 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2952 +#endif
2953 +
2954 #include <asm/processor.h>
2955 #include <linux/module.h>
2956 #include <linux/elfcore.h>
2957 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2958 index ff44823..97f8906 100644
2959 --- a/arch/mips/kernel/binfmt_elfo32.c
2960 +++ b/arch/mips/kernel/binfmt_elfo32.c
2961 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2962 #undef ELF_ET_DYN_BASE
2963 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2964
2965 +#ifdef CONFIG_PAX_ASLR
2966 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2967 +
2968 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2969 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2970 +#endif
2971 +
2972 #include <asm/processor.h>
2973
2974 /*
2975 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2976 index e9a5fd7..378809a 100644
2977 --- a/arch/mips/kernel/process.c
2978 +++ b/arch/mips/kernel/process.c
2979 @@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2980 out:
2981 return pc;
2982 }
2983 -
2984 -/*
2985 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2986 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2987 - */
2988 -unsigned long arch_align_stack(unsigned long sp)
2989 -{
2990 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2991 - sp -= get_random_int() & ~PAGE_MASK;
2992 -
2993 - return sp & ALMASK;
2994 -}
2995 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2996 index 4812c6d..2069554 100644
2997 --- a/arch/mips/kernel/ptrace.c
2998 +++ b/arch/mips/kernel/ptrace.c
2999 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
3000 return arch;
3001 }
3002
3003 +#ifdef CONFIG_GRKERNSEC_SETXID
3004 +extern void gr_delayed_cred_worker(void);
3005 +#endif
3006 +
3007 /*
3008 * Notification of system call entry/exit
3009 * - triggered by current->work.syscall_trace
3010 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3011 /* do the secure computing check first */
3012 secure_computing_strict(regs->regs[2]);
3013
3014 +#ifdef CONFIG_GRKERNSEC_SETXID
3015 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3016 + gr_delayed_cred_worker();
3017 +#endif
3018 +
3019 if (!(current->ptrace & PT_PTRACED))
3020 goto out;
3021
3022 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3023 index a632bc1..0b77c7c 100644
3024 --- a/arch/mips/kernel/scall32-o32.S
3025 +++ b/arch/mips/kernel/scall32-o32.S
3026 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3027
3028 stack_done:
3029 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3030 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3031 + li t1, _TIF_SYSCALL_WORK
3032 and t0, t1
3033 bnez t0, syscall_trace_entry # -> yes
3034
3035 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3036 index 3b5a5e9..e1ee86d 100644
3037 --- a/arch/mips/kernel/scall64-64.S
3038 +++ b/arch/mips/kernel/scall64-64.S
3039 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3040
3041 sd a3, PT_R26(sp) # save a3 for syscall restarting
3042
3043 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3044 + li t1, _TIF_SYSCALL_WORK
3045 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3046 and t0, t1, t0
3047 bnez t0, syscall_trace_entry
3048 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3049 index 6be6f70..1859577 100644
3050 --- a/arch/mips/kernel/scall64-n32.S
3051 +++ b/arch/mips/kernel/scall64-n32.S
3052 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3053
3054 sd a3, PT_R26(sp) # save a3 for syscall restarting
3055
3056 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3057 + li t1, _TIF_SYSCALL_WORK
3058 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3059 and t0, t1, t0
3060 bnez t0, n32_syscall_trace_entry
3061 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3062 index 5422855..74e63a3 100644
3063 --- a/arch/mips/kernel/scall64-o32.S
3064 +++ b/arch/mips/kernel/scall64-o32.S
3065 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3066 PTR 4b, bad_stack
3067 .previous
3068
3069 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3070 + li t1, _TIF_SYSCALL_WORK
3071 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3072 and t0, t1, t0
3073 bnez t0, trace_a_syscall
3074 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3075 index c14f6df..537e729 100644
3076 --- a/arch/mips/mm/fault.c
3077 +++ b/arch/mips/mm/fault.c
3078 @@ -27,6 +27,23 @@
3079 #include <asm/highmem.h> /* For VMALLOC_END */
3080 #include <linux/kdebug.h>
3081
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3084 +{
3085 + unsigned long i;
3086 +
3087 + printk(KERN_ERR "PAX: bytes at PC: ");
3088 + for (i = 0; i < 5; i++) {
3089 + unsigned int c;
3090 + if (get_user(c, (unsigned int *)pc+i))
3091 + printk(KERN_CONT "???????? ");
3092 + else
3093 + printk(KERN_CONT "%08x ", c);
3094 + }
3095 + printk("\n");
3096 +}
3097 +#endif
3098 +
3099 /*
3100 * This routine handles page faults. It determines the address,
3101 * and the problem, and then passes it off to one of the appropriate
3102 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3103 index 302d779..7d35bf8 100644
3104 --- a/arch/mips/mm/mmap.c
3105 +++ b/arch/mips/mm/mmap.c
3106 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3107 do_color_align = 1;
3108
3109 /* requesting a specific address */
3110 +
3111 +#ifdef CONFIG_PAX_RANDMMAP
3112 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3113 +#endif
3114 +
3115 if (addr) {
3116 if (do_color_align)
3117 addr = COLOUR_ALIGN(addr, pgoff);
3118 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3119 addr = PAGE_ALIGN(addr);
3120
3121 vma = find_vma(mm, addr);
3122 - if (TASK_SIZE - len >= addr &&
3123 - (!vma || addr + len <= vma->vm_start))
3124 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3125 return addr;
3126 }
3127
3128 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3129 /* At this point: (!vma || addr < vma->vm_end). */
3130 if (TASK_SIZE - len < addr)
3131 return -ENOMEM;
3132 - if (!vma || addr + len <= vma->vm_start)
3133 + if (check_heap_stack_gap(vmm, addr, len))
3134 return addr;
3135 addr = vma->vm_end;
3136 if (do_color_align)
3137 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3138 /* make sure it can fit in the remaining address space */
3139 if (likely(addr > len)) {
3140 vma = find_vma(mm, addr - len);
3141 - if (!vma || addr <= vma->vm_start) {
3142 + if (check_heap_stack_gap(vmm, addr - len, len))
3143 /* cache the address as a hint for next time */
3144 return mm->free_area_cache = addr - len;
3145 }
3146 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3147 * return with success:
3148 */
3149 vma = find_vma(mm, addr);
3150 - if (likely(!vma || addr + len <= vma->vm_start)) {
3151 + if (check_heap_stack_gap(vmm, addr, len)) {
3152 /* cache the address as a hint for next time */
3153 return mm->free_area_cache = addr;
3154 }
3155 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3156 mm->unmap_area = arch_unmap_area_topdown;
3157 }
3158 }
3159 -
3160 -static inline unsigned long brk_rnd(void)
3161 -{
3162 - unsigned long rnd = get_random_int();
3163 -
3164 - rnd = rnd << PAGE_SHIFT;
3165 - /* 8MB for 32bit, 256MB for 64bit */
3166 - if (TASK_IS_32BIT_ADDR)
3167 - rnd = rnd & 0x7ffffful;
3168 - else
3169 - rnd = rnd & 0xffffffful;
3170 -
3171 - return rnd;
3172 -}
3173 -
3174 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3175 -{
3176 - unsigned long base = mm->brk;
3177 - unsigned long ret;
3178 -
3179 - ret = PAGE_ALIGN(base + brk_rnd());
3180 -
3181 - if (ret < mm->brk)
3182 - return mm->brk;
3183 -
3184 - return ret;
3185 -}
3186 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3187 index 967d144..db12197 100644
3188 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3189 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3190 @@ -11,12 +11,14 @@
3191 #ifndef _ASM_PROC_CACHE_H
3192 #define _ASM_PROC_CACHE_H
3193
3194 +#include <linux/const.h>
3195 +
3196 /* L1 cache */
3197
3198 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3199 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3200 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3201 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3202 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3203 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3204
3205 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3206 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3207 index bcb5df2..84fabd2 100644
3208 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3209 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3210 @@ -16,13 +16,15 @@
3211 #ifndef _ASM_PROC_CACHE_H
3212 #define _ASM_PROC_CACHE_H
3213
3214 +#include <linux/const.h>
3215 +
3216 /*
3217 * L1 cache
3218 */
3219 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3220 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3221 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3222 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3223 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3224 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3225
3226 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3227 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3228 index 4ce7a01..449202a 100644
3229 --- a/arch/openrisc/include/asm/cache.h
3230 +++ b/arch/openrisc/include/asm/cache.h
3231 @@ -19,11 +19,13 @@
3232 #ifndef __ASM_OPENRISC_CACHE_H
3233 #define __ASM_OPENRISC_CACHE_H
3234
3235 +#include <linux/const.h>
3236 +
3237 /* FIXME: How can we replace these with values from the CPU...
3238 * they shouldn't be hard-coded!
3239 */
3240
3241 -#define L1_CACHE_BYTES 16
3242 #define L1_CACHE_SHIFT 4
3243 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3244
3245 #endif /* __ASM_OPENRISC_CACHE_H */
3246 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3247 index af9cf30..2aae9b2 100644
3248 --- a/arch/parisc/include/asm/atomic.h
3249 +++ b/arch/parisc/include/asm/atomic.h
3250 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3251
3252 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3253
3254 +#define atomic64_read_unchecked(v) atomic64_read(v)
3255 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3256 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3257 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3258 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3259 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3260 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3261 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3262 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3263 +
3264 #endif /* !CONFIG_64BIT */
3265
3266
3267 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3268 index 47f11c7..3420df2 100644
3269 --- a/arch/parisc/include/asm/cache.h
3270 +++ b/arch/parisc/include/asm/cache.h
3271 @@ -5,6 +5,7 @@
3272 #ifndef __ARCH_PARISC_CACHE_H
3273 #define __ARCH_PARISC_CACHE_H
3274
3275 +#include <linux/const.h>
3276
3277 /*
3278 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3279 @@ -15,13 +16,13 @@
3280 * just ruin performance.
3281 */
3282 #ifdef CONFIG_PA20
3283 -#define L1_CACHE_BYTES 64
3284 #define L1_CACHE_SHIFT 6
3285 #else
3286 -#define L1_CACHE_BYTES 32
3287 #define L1_CACHE_SHIFT 5
3288 #endif
3289
3290 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3291 +
3292 #ifndef __ASSEMBLY__
3293
3294 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3295 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3296 index 19f6cb1..6c78cf2 100644
3297 --- a/arch/parisc/include/asm/elf.h
3298 +++ b/arch/parisc/include/asm/elf.h
3299 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3300
3301 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3302
3303 +#ifdef CONFIG_PAX_ASLR
3304 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3305 +
3306 +#define PAX_DELTA_MMAP_LEN 16
3307 +#define PAX_DELTA_STACK_LEN 16
3308 +#endif
3309 +
3310 /* This yields a mask that user programs can use to figure out what
3311 instruction set this CPU supports. This could be done in user space,
3312 but it's not easy, and we've already done it here. */
3313 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3314 index fc987a1..6e068ef 100644
3315 --- a/arch/parisc/include/asm/pgalloc.h
3316 +++ b/arch/parisc/include/asm/pgalloc.h
3317 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3318 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3319 }
3320
3321 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3322 +{
3323 + pgd_populate(mm, pgd, pmd);
3324 +}
3325 +
3326 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3327 {
3328 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3329 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3330 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3331 #define pmd_free(mm, x) do { } while (0)
3332 #define pgd_populate(mm, pmd, pte) BUG()
3333 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3334
3335 #endif
3336
3337 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3338 index ee99f23..802b0a1 100644
3339 --- a/arch/parisc/include/asm/pgtable.h
3340 +++ b/arch/parisc/include/asm/pgtable.h
3341 @@ -212,6 +212,17 @@ struct vm_area_struct;
3342 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3343 #define PAGE_COPY PAGE_EXECREAD
3344 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3345 +
3346 +#ifdef CONFIG_PAX_PAGEEXEC
3347 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3348 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3349 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3350 +#else
3351 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3352 +# define PAGE_COPY_NOEXEC PAGE_COPY
3353 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3354 +#endif
3355 +
3356 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3357 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3358 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3359 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
3360 index 4ba2c93..f5e3974 100644
3361 --- a/arch/parisc/include/asm/uaccess.h
3362 +++ b/arch/parisc/include/asm/uaccess.h
3363 @@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
3364 const void __user *from,
3365 unsigned long n)
3366 {
3367 - int sz = __compiletime_object_size(to);
3368 + size_t sz = __compiletime_object_size(to);
3369 int ret = -EFAULT;
3370
3371 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
3372 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
3373 ret = __copy_from_user(to, from, n);
3374 else
3375 copy_from_user_overflow();
3376 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3377 index 5e34ccf..672bc9c 100644
3378 --- a/arch/parisc/kernel/module.c
3379 +++ b/arch/parisc/kernel/module.c
3380 @@ -98,16 +98,38 @@
3381
3382 /* three functions to determine where in the module core
3383 * or init pieces the location is */
3384 +static inline int in_init_rx(struct module *me, void *loc)
3385 +{
3386 + return (loc >= me->module_init_rx &&
3387 + loc < (me->module_init_rx + me->init_size_rx));
3388 +}
3389 +
3390 +static inline int in_init_rw(struct module *me, void *loc)
3391 +{
3392 + return (loc >= me->module_init_rw &&
3393 + loc < (me->module_init_rw + me->init_size_rw));
3394 +}
3395 +
3396 static inline int in_init(struct module *me, void *loc)
3397 {
3398 - return (loc >= me->module_init &&
3399 - loc <= (me->module_init + me->init_size));
3400 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3401 +}
3402 +
3403 +static inline int in_core_rx(struct module *me, void *loc)
3404 +{
3405 + return (loc >= me->module_core_rx &&
3406 + loc < (me->module_core_rx + me->core_size_rx));
3407 +}
3408 +
3409 +static inline int in_core_rw(struct module *me, void *loc)
3410 +{
3411 + return (loc >= me->module_core_rw &&
3412 + loc < (me->module_core_rw + me->core_size_rw));
3413 }
3414
3415 static inline int in_core(struct module *me, void *loc)
3416 {
3417 - return (loc >= me->module_core &&
3418 - loc <= (me->module_core + me->core_size));
3419 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3420 }
3421
3422 static inline int in_local(struct module *me, void *loc)
3423 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3424 }
3425
3426 /* align things a bit */
3427 - me->core_size = ALIGN(me->core_size, 16);
3428 - me->arch.got_offset = me->core_size;
3429 - me->core_size += gots * sizeof(struct got_entry);
3430 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3431 + me->arch.got_offset = me->core_size_rw;
3432 + me->core_size_rw += gots * sizeof(struct got_entry);
3433
3434 - me->core_size = ALIGN(me->core_size, 16);
3435 - me->arch.fdesc_offset = me->core_size;
3436 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3437 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3438 + me->arch.fdesc_offset = me->core_size_rw;
3439 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3440
3441 me->arch.got_max = gots;
3442 me->arch.fdesc_max = fdescs;
3443 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3444
3445 BUG_ON(value == 0);
3446
3447 - got = me->module_core + me->arch.got_offset;
3448 + got = me->module_core_rw + me->arch.got_offset;
3449 for (i = 0; got[i].addr; i++)
3450 if (got[i].addr == value)
3451 goto out;
3452 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3453 #ifdef CONFIG_64BIT
3454 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3455 {
3456 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3457 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3458
3459 if (!value) {
3460 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3461 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3462
3463 /* Create new one */
3464 fdesc->addr = value;
3465 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3466 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3467 return (Elf_Addr)fdesc;
3468 }
3469 #endif /* CONFIG_64BIT */
3470 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3471
3472 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3473 end = table + sechdrs[me->arch.unwind_section].sh_size;
3474 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3475 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3476
3477 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3478 me->arch.unwind_section, table, end, gp);
3479 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3480 index c9b9322..02d8940 100644
3481 --- a/arch/parisc/kernel/sys_parisc.c
3482 +++ b/arch/parisc/kernel/sys_parisc.c
3483 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3484 /* At this point: (!vma || addr < vma->vm_end). */
3485 if (TASK_SIZE - len < addr)
3486 return -ENOMEM;
3487 - if (!vma || addr + len <= vma->vm_start)
3488 + if (check_heap_stack_gap(vma, addr, len))
3489 return addr;
3490 addr = vma->vm_end;
3491 }
3492 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3493 /* At this point: (!vma || addr < vma->vm_end). */
3494 if (TASK_SIZE - len < addr)
3495 return -ENOMEM;
3496 - if (!vma || addr + len <= vma->vm_start)
3497 + if (check_heap_stack_gap(vma, addr, len))
3498 return addr;
3499 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3500 if (addr < vma->vm_end) /* handle wraparound */
3501 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3502 if (flags & MAP_FIXED)
3503 return addr;
3504 if (!addr)
3505 - addr = TASK_UNMAPPED_BASE;
3506 + addr = current->mm->mmap_base;
3507
3508 if (filp) {
3509 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3510 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3511 index 45ba99f..8e22c33 100644
3512 --- a/arch/parisc/kernel/traps.c
3513 +++ b/arch/parisc/kernel/traps.c
3514 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3515
3516 down_read(&current->mm->mmap_sem);
3517 vma = find_vma(current->mm,regs->iaoq[0]);
3518 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3519 - && (vma->vm_flags & VM_EXEC)) {
3520 -
3521 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3522 fault_address = regs->iaoq[0];
3523 fault_space = regs->iasq[0];
3524
3525 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3526 index 18162ce..94de376 100644
3527 --- a/arch/parisc/mm/fault.c
3528 +++ b/arch/parisc/mm/fault.c
3529 @@ -15,6 +15,7 @@
3530 #include <linux/sched.h>
3531 #include <linux/interrupt.h>
3532 #include <linux/module.h>
3533 +#include <linux/unistd.h>
3534
3535 #include <asm/uaccess.h>
3536 #include <asm/traps.h>
3537 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3538 static unsigned long
3539 parisc_acctyp(unsigned long code, unsigned int inst)
3540 {
3541 - if (code == 6 || code == 16)
3542 + if (code == 6 || code == 7 || code == 16)
3543 return VM_EXEC;
3544
3545 switch (inst & 0xf0000000) {
3546 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3547 }
3548 #endif
3549
3550 +#ifdef CONFIG_PAX_PAGEEXEC
3551 +/*
3552 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3553 + *
3554 + * returns 1 when task should be killed
3555 + * 2 when rt_sigreturn trampoline was detected
3556 + * 3 when unpatched PLT trampoline was detected
3557 + */
3558 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3559 +{
3560 +
3561 +#ifdef CONFIG_PAX_EMUPLT
3562 + int err;
3563 +
3564 + do { /* PaX: unpatched PLT emulation */
3565 + unsigned int bl, depwi;
3566 +
3567 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3568 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3569 +
3570 + if (err)
3571 + break;
3572 +
3573 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3574 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3575 +
3576 + err = get_user(ldw, (unsigned int *)addr);
3577 + err |= get_user(bv, (unsigned int *)(addr+4));
3578 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3579 +
3580 + if (err)
3581 + break;
3582 +
3583 + if (ldw == 0x0E801096U &&
3584 + bv == 0xEAC0C000U &&
3585 + ldw2 == 0x0E881095U)
3586 + {
3587 + unsigned int resolver, map;
3588 +
3589 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3590 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3591 + if (err)
3592 + break;
3593 +
3594 + regs->gr[20] = instruction_pointer(regs)+8;
3595 + regs->gr[21] = map;
3596 + regs->gr[22] = resolver;
3597 + regs->iaoq[0] = resolver | 3UL;
3598 + regs->iaoq[1] = regs->iaoq[0] + 4;
3599 + return 3;
3600 + }
3601 + }
3602 + } while (0);
3603 +#endif
3604 +
3605 +#ifdef CONFIG_PAX_EMUTRAMP
3606 +
3607 +#ifndef CONFIG_PAX_EMUSIGRT
3608 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3609 + return 1;
3610 +#endif
3611 +
3612 + do { /* PaX: rt_sigreturn emulation */
3613 + unsigned int ldi1, ldi2, bel, nop;
3614 +
3615 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3616 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3617 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3618 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3619 +
3620 + if (err)
3621 + break;
3622 +
3623 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3624 + ldi2 == 0x3414015AU &&
3625 + bel == 0xE4008200U &&
3626 + nop == 0x08000240U)
3627 + {
3628 + regs->gr[25] = (ldi1 & 2) >> 1;
3629 + regs->gr[20] = __NR_rt_sigreturn;
3630 + regs->gr[31] = regs->iaoq[1] + 16;
3631 + regs->sr[0] = regs->iasq[1];
3632 + regs->iaoq[0] = 0x100UL;
3633 + regs->iaoq[1] = regs->iaoq[0] + 4;
3634 + regs->iasq[0] = regs->sr[2];
3635 + regs->iasq[1] = regs->sr[2];
3636 + return 2;
3637 + }
3638 + } while (0);
3639 +#endif
3640 +
3641 + return 1;
3642 +}
3643 +
3644 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3645 +{
3646 + unsigned long i;
3647 +
3648 + printk(KERN_ERR "PAX: bytes at PC: ");
3649 + for (i = 0; i < 5; i++) {
3650 + unsigned int c;
3651 + if (get_user(c, (unsigned int *)pc+i))
3652 + printk(KERN_CONT "???????? ");
3653 + else
3654 + printk(KERN_CONT "%08x ", c);
3655 + }
3656 + printk("\n");
3657 +}
3658 +#endif
3659 +
3660 int fixup_exception(struct pt_regs *regs)
3661 {
3662 const struct exception_table_entry *fix;
3663 @@ -192,8 +303,33 @@ good_area:
3664
3665 acc_type = parisc_acctyp(code,regs->iir);
3666
3667 - if ((vma->vm_flags & acc_type) != acc_type)
3668 + if ((vma->vm_flags & acc_type) != acc_type) {
3669 +
3670 +#ifdef CONFIG_PAX_PAGEEXEC
3671 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3672 + (address & ~3UL) == instruction_pointer(regs))
3673 + {
3674 + up_read(&mm->mmap_sem);
3675 + switch (pax_handle_fetch_fault(regs)) {
3676 +
3677 +#ifdef CONFIG_PAX_EMUPLT
3678 + case 3:
3679 + return;
3680 +#endif
3681 +
3682 +#ifdef CONFIG_PAX_EMUTRAMP
3683 + case 2:
3684 + return;
3685 +#endif
3686 +
3687 + }
3688 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3689 + do_group_exit(SIGKILL);
3690 + }
3691 +#endif
3692 +
3693 goto bad_area;
3694 + }
3695
3696 /*
3697 * If for any reason at all we couldn't handle the fault, make
3698 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3699 index da29032..f76c24c 100644
3700 --- a/arch/powerpc/include/asm/atomic.h
3701 +++ b/arch/powerpc/include/asm/atomic.h
3702 @@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3703 return t1;
3704 }
3705
3706 +#define atomic64_read_unchecked(v) atomic64_read(v)
3707 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3708 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3709 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3710 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3711 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3712 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3713 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3714 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3715 +
3716 #endif /* __powerpc64__ */
3717
3718 #endif /* __KERNEL__ */
3719 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3720 index 9e495c9..b6878e5 100644
3721 --- a/arch/powerpc/include/asm/cache.h
3722 +++ b/arch/powerpc/include/asm/cache.h
3723 @@ -3,6 +3,7 @@
3724
3725 #ifdef __KERNEL__
3726
3727 +#include <linux/const.h>
3728
3729 /* bytes per L1 cache line */
3730 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3731 @@ -22,7 +23,7 @@
3732 #define L1_CACHE_SHIFT 7
3733 #endif
3734
3735 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3736 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3737
3738 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3739
3740 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3741 index 3bf9cca..e7457d0 100644
3742 --- a/arch/powerpc/include/asm/elf.h
3743 +++ b/arch/powerpc/include/asm/elf.h
3744 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3745 the loader. We need to make sure that it is out of the way of the program
3746 that it will "exec", and that there is sufficient room for the brk. */
3747
3748 -extern unsigned long randomize_et_dyn(unsigned long base);
3749 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3750 +#define ELF_ET_DYN_BASE (0x20000000)
3751 +
3752 +#ifdef CONFIG_PAX_ASLR
3753 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3754 +
3755 +#ifdef __powerpc64__
3756 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3757 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3758 +#else
3759 +#define PAX_DELTA_MMAP_LEN 15
3760 +#define PAX_DELTA_STACK_LEN 15
3761 +#endif
3762 +#endif
3763
3764 /*
3765 * Our registers are always unsigned longs, whether we're a 32 bit
3766 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3767 (0x7ff >> (PAGE_SHIFT - 12)) : \
3768 (0x3ffff >> (PAGE_SHIFT - 12)))
3769
3770 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3771 -#define arch_randomize_brk arch_randomize_brk
3772 -
3773 #endif /* __KERNEL__ */
3774
3775 /*
3776 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3777 index 8196e9c..d83a9f3 100644
3778 --- a/arch/powerpc/include/asm/exec.h
3779 +++ b/arch/powerpc/include/asm/exec.h
3780 @@ -4,6 +4,6 @@
3781 #ifndef _ASM_POWERPC_EXEC_H
3782 #define _ASM_POWERPC_EXEC_H
3783
3784 -extern unsigned long arch_align_stack(unsigned long sp);
3785 +#define arch_align_stack(x) ((x) & ~0xfUL)
3786
3787 #endif /* _ASM_POWERPC_EXEC_H */
3788 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3789 index bca8fdc..61e9580 100644
3790 --- a/arch/powerpc/include/asm/kmap_types.h
3791 +++ b/arch/powerpc/include/asm/kmap_types.h
3792 @@ -27,6 +27,7 @@ enum km_type {
3793 KM_PPC_SYNC_PAGE,
3794 KM_PPC_SYNC_ICACHE,
3795 KM_KDB,
3796 + KM_CLEARPAGE,
3797 KM_TYPE_NR
3798 };
3799
3800 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3801 index d4a7f64..451de1c 100644
3802 --- a/arch/powerpc/include/asm/mman.h
3803 +++ b/arch/powerpc/include/asm/mman.h
3804 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3805 }
3806 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3807
3808 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3809 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3810 {
3811 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3812 }
3813 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3814 index f072e97..b436dee 100644
3815 --- a/arch/powerpc/include/asm/page.h
3816 +++ b/arch/powerpc/include/asm/page.h
3817 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3818 * and needs to be executable. This means the whole heap ends
3819 * up being executable.
3820 */
3821 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3822 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3823 +#define VM_DATA_DEFAULT_FLAGS32 \
3824 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3825 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3826
3827 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3828 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3829 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3830 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3831 #endif
3832
3833 +#define ktla_ktva(addr) (addr)
3834 +#define ktva_ktla(addr) (addr)
3835 +
3836 /*
3837 * Use the top bit of the higher-level page table entries to indicate whether
3838 * the entries we point to contain hugepages. This works because we know that
3839 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3840 index fed85e6..da5c71b 100644
3841 --- a/arch/powerpc/include/asm/page_64.h
3842 +++ b/arch/powerpc/include/asm/page_64.h
3843 @@ -146,15 +146,18 @@ do { \
3844 * stack by default, so in the absence of a PT_GNU_STACK program header
3845 * we turn execute permission off.
3846 */
3847 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3848 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3849 +#define VM_STACK_DEFAULT_FLAGS32 \
3850 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3851 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3852
3853 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3854 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3855
3856 +#ifndef CONFIG_PAX_PAGEEXEC
3857 #define VM_STACK_DEFAULT_FLAGS \
3858 (is_32bit_task() ? \
3859 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3860 +#endif
3861
3862 #include <asm-generic/getorder.h>
3863
3864 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3865 index 292725c..f87ae14 100644
3866 --- a/arch/powerpc/include/asm/pgalloc-64.h
3867 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3868 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3869 #ifndef CONFIG_PPC_64K_PAGES
3870
3871 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3872 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3873
3874 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3875 {
3876 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3877 pud_set(pud, (unsigned long)pmd);
3878 }
3879
3880 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3881 +{
3882 + pud_populate(mm, pud, pmd);
3883 +}
3884 +
3885 #define pmd_populate(mm, pmd, pte_page) \
3886 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3887 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3888 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3889 #else /* CONFIG_PPC_64K_PAGES */
3890
3891 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3892 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3893
3894 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3895 pte_t *pte)
3896 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3897 index 2e0e411..7899c68 100644
3898 --- a/arch/powerpc/include/asm/pgtable.h
3899 +++ b/arch/powerpc/include/asm/pgtable.h
3900 @@ -2,6 +2,7 @@
3901 #define _ASM_POWERPC_PGTABLE_H
3902 #ifdef __KERNEL__
3903
3904 +#include <linux/const.h>
3905 #ifndef __ASSEMBLY__
3906 #include <asm/processor.h> /* For TASK_SIZE */
3907 #include <asm/mmu.h>
3908 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3909 index 4aad413..85d86bf 100644
3910 --- a/arch/powerpc/include/asm/pte-hash32.h
3911 +++ b/arch/powerpc/include/asm/pte-hash32.h
3912 @@ -21,6 +21,7 @@
3913 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3914 #define _PAGE_USER 0x004 /* usermode access allowed */
3915 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3916 +#define _PAGE_EXEC _PAGE_GUARDED
3917 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3918 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3919 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3920 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3921 index 360585d..c3930ef 100644
3922 --- a/arch/powerpc/include/asm/reg.h
3923 +++ b/arch/powerpc/include/asm/reg.h
3924 @@ -212,6 +212,7 @@
3925 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3926 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3927 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3928 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3929 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3930 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3931 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3932 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3933 index 68831e9..379c695 100644
3934 --- a/arch/powerpc/include/asm/thread_info.h
3935 +++ b/arch/powerpc/include/asm/thread_info.h
3936 @@ -91,12 +91,14 @@ static inline struct thread_info *current_thread_info(void)
3937 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3938 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3939 #define TIF_SINGLESTEP 8 /* singlestepping active */
3940 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3941 #define TIF_SECCOMP 10 /* secure computing */
3942 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3943 #define TIF_NOERROR 12 /* Force successful syscall return */
3944 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3945 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3946 +#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3947 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3948 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3949
3950 /* as above, but as bit values */
3951 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3952 @@ -113,8 +115,10 @@ static inline struct thread_info *current_thread_info(void)
3953 #define _TIF_NOERROR (1<<TIF_NOERROR)
3954 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3955 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3956 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3957 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3958 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3959 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
3960 + _TIF_GRSEC_SETXID)
3961
3962 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3963 _TIF_NOTIFY_RESUME)
3964 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3965 index 17bb40c..353c98b 100644
3966 --- a/arch/powerpc/include/asm/uaccess.h
3967 +++ b/arch/powerpc/include/asm/uaccess.h
3968 @@ -13,6 +13,8 @@
3969 #define VERIFY_READ 0
3970 #define VERIFY_WRITE 1
3971
3972 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3973 +
3974 /*
3975 * The fs value determines whether argument validity checking should be
3976 * performed or not. If get_fs() == USER_DS, checking is performed, with
3977 @@ -329,52 +331,6 @@ do { \
3978 extern unsigned long __copy_tofrom_user(void __user *to,
3979 const void __user *from, unsigned long size);
3980
3981 -#ifndef __powerpc64__
3982 -
3983 -static inline unsigned long copy_from_user(void *to,
3984 - const void __user *from, unsigned long n)
3985 -{
3986 - unsigned long over;
3987 -
3988 - if (access_ok(VERIFY_READ, from, n))
3989 - return __copy_tofrom_user((__force void __user *)to, from, n);
3990 - if ((unsigned long)from < TASK_SIZE) {
3991 - over = (unsigned long)from + n - TASK_SIZE;
3992 - return __copy_tofrom_user((__force void __user *)to, from,
3993 - n - over) + over;
3994 - }
3995 - return n;
3996 -}
3997 -
3998 -static inline unsigned long copy_to_user(void __user *to,
3999 - const void *from, unsigned long n)
4000 -{
4001 - unsigned long over;
4002 -
4003 - if (access_ok(VERIFY_WRITE, to, n))
4004 - return __copy_tofrom_user(to, (__force void __user *)from, n);
4005 - if ((unsigned long)to < TASK_SIZE) {
4006 - over = (unsigned long)to + n - TASK_SIZE;
4007 - return __copy_tofrom_user(to, (__force void __user *)from,
4008 - n - over) + over;
4009 - }
4010 - return n;
4011 -}
4012 -
4013 -#else /* __powerpc64__ */
4014 -
4015 -#define __copy_in_user(to, from, size) \
4016 - __copy_tofrom_user((to), (from), (size))
4017 -
4018 -extern unsigned long copy_from_user(void *to, const void __user *from,
4019 - unsigned long n);
4020 -extern unsigned long copy_to_user(void __user *to, const void *from,
4021 - unsigned long n);
4022 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
4023 - unsigned long n);
4024 -
4025 -#endif /* __powerpc64__ */
4026 -
4027 static inline unsigned long __copy_from_user_inatomic(void *to,
4028 const void __user *from, unsigned long n)
4029 {
4030 @@ -398,6 +354,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4031 if (ret == 0)
4032 return 0;
4033 }
4034 +
4035 + if (!__builtin_constant_p(n))
4036 + check_object_size(to, n, false);
4037 +
4038 return __copy_tofrom_user((__force void __user *)to, from, n);
4039 }
4040
4041 @@ -424,6 +384,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4042 if (ret == 0)
4043 return 0;
4044 }
4045 +
4046 + if (!__builtin_constant_p(n))
4047 + check_object_size(from, n, true);
4048 +
4049 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4050 }
4051
4052 @@ -441,6 +405,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4053 return __copy_to_user_inatomic(to, from, size);
4054 }
4055
4056 +#ifndef __powerpc64__
4057 +
4058 +static inline unsigned long __must_check copy_from_user(void *to,
4059 + const void __user *from, unsigned long n)
4060 +{
4061 + unsigned long over;
4062 +
4063 + if ((long)n < 0)
4064 + return n;
4065 +
4066 + if (access_ok(VERIFY_READ, from, n)) {
4067 + if (!__builtin_constant_p(n))
4068 + check_object_size(to, n, false);
4069 + return __copy_tofrom_user((__force void __user *)to, from, n);
4070 + }
4071 + if ((unsigned long)from < TASK_SIZE) {
4072 + over = (unsigned long)from + n - TASK_SIZE;
4073 + if (!__builtin_constant_p(n - over))
4074 + check_object_size(to, n - over, false);
4075 + return __copy_tofrom_user((__force void __user *)to, from,
4076 + n - over) + over;
4077 + }
4078 + return n;
4079 +}
4080 +
4081 +static inline unsigned long __must_check copy_to_user(void __user *to,
4082 + const void *from, unsigned long n)
4083 +{
4084 + unsigned long over;
4085 +
4086 + if ((long)n < 0)
4087 + return n;
4088 +
4089 + if (access_ok(VERIFY_WRITE, to, n)) {
4090 + if (!__builtin_constant_p(n))
4091 + check_object_size(from, n, true);
4092 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4093 + }
4094 + if ((unsigned long)to < TASK_SIZE) {
4095 + over = (unsigned long)to + n - TASK_SIZE;
4096 + if (!__builtin_constant_p(n))
4097 + check_object_size(from, n - over, true);
4098 + return __copy_tofrom_user(to, (__force void __user *)from,
4099 + n - over) + over;
4100 + }
4101 + return n;
4102 +}
4103 +
4104 +#else /* __powerpc64__ */
4105 +
4106 +#define __copy_in_user(to, from, size) \
4107 + __copy_tofrom_user((to), (from), (size))
4108 +
4109 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4110 +{
4111 + if ((long)n < 0 || n > INT_MAX)
4112 + return n;
4113 +
4114 + if (!__builtin_constant_p(n))
4115 + check_object_size(to, n, false);
4116 +
4117 + if (likely(access_ok(VERIFY_READ, from, n)))
4118 + n = __copy_from_user(to, from, n);
4119 + else
4120 + memset(to, 0, n);
4121 + return n;
4122 +}
4123 +
4124 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4125 +{
4126 + if ((long)n < 0 || n > INT_MAX)
4127 + return n;
4128 +
4129 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4130 + if (!__builtin_constant_p(n))
4131 + check_object_size(from, n, true);
4132 + n = __copy_to_user(to, from, n);
4133 + }
4134 + return n;
4135 +}
4136 +
4137 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4138 + unsigned long n);
4139 +
4140 +#endif /* __powerpc64__ */
4141 +
4142 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4143
4144 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4145 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4146 index 7215cc2..a9730c1 100644
4147 --- a/arch/powerpc/kernel/exceptions-64e.S
4148 +++ b/arch/powerpc/kernel/exceptions-64e.S
4149 @@ -661,6 +661,7 @@ storage_fault_common:
4150 std r14,_DAR(r1)
4151 std r15,_DSISR(r1)
4152 addi r3,r1,STACK_FRAME_OVERHEAD
4153 + bl .save_nvgprs
4154 mr r4,r14
4155 mr r5,r15
4156 ld r14,PACA_EXGEN+EX_R14(r13)
4157 @@ -669,8 +670,7 @@ storage_fault_common:
4158 cmpdi r3,0
4159 bne- 1f
4160 b .ret_from_except_lite
4161 -1: bl .save_nvgprs
4162 - mr r5,r3
4163 +1: mr r5,r3
4164 addi r3,r1,STACK_FRAME_OVERHEAD
4165 ld r4,_DAR(r1)
4166 bl .bad_page_fault
4167 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4168 index 1c06d29..c2a339b 100644
4169 --- a/arch/powerpc/kernel/exceptions-64s.S
4170 +++ b/arch/powerpc/kernel/exceptions-64s.S
4171 @@ -888,10 +888,10 @@ handle_page_fault:
4172 11: ld r4,_DAR(r1)
4173 ld r5,_DSISR(r1)
4174 addi r3,r1,STACK_FRAME_OVERHEAD
4175 + bl .save_nvgprs
4176 bl .do_page_fault
4177 cmpdi r3,0
4178 beq+ 12f
4179 - bl .save_nvgprs
4180 mr r5,r3
4181 addi r3,r1,STACK_FRAME_OVERHEAD
4182 lwz r4,_DAR(r1)
4183 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4184 index 2e3200c..72095ce 100644
4185 --- a/arch/powerpc/kernel/module_32.c
4186 +++ b/arch/powerpc/kernel/module_32.c
4187 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4188 me->arch.core_plt_section = i;
4189 }
4190 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4191 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4192 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4193 return -ENOEXEC;
4194 }
4195
4196 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4197
4198 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4199 /* Init, or core PLT? */
4200 - if (location >= mod->module_core
4201 - && location < mod->module_core + mod->core_size)
4202 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4203 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4204 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4205 - else
4206 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4207 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4208 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4209 + else {
4210 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4211 + return ~0UL;
4212 + }
4213
4214 /* Find this entry, or if that fails, the next avail. entry */
4215 while (entry->jump[0]) {
4216 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4217 index 1a1f2dd..f4d1bb4 100644
4218 --- a/arch/powerpc/kernel/process.c
4219 +++ b/arch/powerpc/kernel/process.c
4220 @@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4221 * Lookup NIP late so we have the best change of getting the
4222 * above info out without failing
4223 */
4224 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4225 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4226 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4227 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4228 #endif
4229 show_stack(current, (unsigned long *) regs->gpr[1]);
4230 if (!user_mode(regs))
4231 @@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4232 newsp = stack[0];
4233 ip = stack[STACK_FRAME_LR_SAVE];
4234 if (!firstframe || ip != lr) {
4235 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4236 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4237 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4238 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4239 - printk(" (%pS)",
4240 + printk(" (%pA)",
4241 (void *)current->ret_stack[curr_frame].ret);
4242 curr_frame--;
4243 }
4244 @@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4245 struct pt_regs *regs = (struct pt_regs *)
4246 (sp + STACK_FRAME_OVERHEAD);
4247 lr = regs->link;
4248 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4249 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4250 regs->trap, (void *)regs->nip, (void *)lr);
4251 firstframe = 1;
4252 }
4253 @@ -1246,58 +1246,3 @@ void __ppc64_runlatch_off(void)
4254 mtspr(SPRN_CTRLT, ctrl);
4255 }
4256 #endif /* CONFIG_PPC64 */
4257 -
4258 -unsigned long arch_align_stack(unsigned long sp)
4259 -{
4260 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4261 - sp -= get_random_int() & ~PAGE_MASK;
4262 - return sp & ~0xf;
4263 -}
4264 -
4265 -static inline unsigned long brk_rnd(void)
4266 -{
4267 - unsigned long rnd = 0;
4268 -
4269 - /* 8MB for 32bit, 1GB for 64bit */
4270 - if (is_32bit_task())
4271 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4272 - else
4273 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4274 -
4275 - return rnd << PAGE_SHIFT;
4276 -}
4277 -
4278 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4279 -{
4280 - unsigned long base = mm->brk;
4281 - unsigned long ret;
4282 -
4283 -#ifdef CONFIG_PPC_STD_MMU_64
4284 - /*
4285 - * If we are using 1TB segments and we are allowed to randomise
4286 - * the heap, we can put it above 1TB so it is backed by a 1TB
4287 - * segment. Otherwise the heap will be in the bottom 1TB
4288 - * which always uses 256MB segments and this may result in a
4289 - * performance penalty.
4290 - */
4291 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4292 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4293 -#endif
4294 -
4295 - ret = PAGE_ALIGN(base + brk_rnd());
4296 -
4297 - if (ret < mm->brk)
4298 - return mm->brk;
4299 -
4300 - return ret;
4301 -}
4302 -
4303 -unsigned long randomize_et_dyn(unsigned long base)
4304 -{
4305 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4306 -
4307 - if (ret < base)
4308 - return base;
4309 -
4310 - return ret;
4311 -}
4312 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4313 index c10fc28..c4ef063 100644
4314 --- a/arch/powerpc/kernel/ptrace.c
4315 +++ b/arch/powerpc/kernel/ptrace.c
4316 @@ -1660,6 +1660,10 @@ long arch_ptrace(struct task_struct *child, long request,
4317 return ret;
4318 }
4319
4320 +#ifdef CONFIG_GRKERNSEC_SETXID
4321 +extern void gr_delayed_cred_worker(void);
4322 +#endif
4323 +
4324 /*
4325 * We must return the syscall number to actually look up in the table.
4326 * This can be -1L to skip running any syscall at all.
4327 @@ -1670,6 +1674,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4328
4329 secure_computing_strict(regs->gpr[0]);
4330
4331 +#ifdef CONFIG_GRKERNSEC_SETXID
4332 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4333 + gr_delayed_cred_worker();
4334 +#endif
4335 +
4336 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4337 tracehook_report_syscall_entry(regs))
4338 /*
4339 @@ -1704,6 +1713,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4340 {
4341 int step;
4342
4343 +#ifdef CONFIG_GRKERNSEC_SETXID
4344 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4345 + gr_delayed_cred_worker();
4346 +#endif
4347 +
4348 audit_syscall_exit(regs);
4349
4350 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4351 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4352 index 8b4c049..dcd6ef3 100644
4353 --- a/arch/powerpc/kernel/signal_32.c
4354 +++ b/arch/powerpc/kernel/signal_32.c
4355 @@ -852,7 +852,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4356 /* Save user registers on the stack */
4357 frame = &rt_sf->uc.uc_mcontext;
4358 addr = frame;
4359 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4360 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4361 if (save_user_regs(regs, frame, 0, 1))
4362 goto badframe;
4363 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4364 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4365 index d183f87..1867f1a 100644
4366 --- a/arch/powerpc/kernel/signal_64.c
4367 +++ b/arch/powerpc/kernel/signal_64.c
4368 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4369 current->thread.fpscr.val = 0;
4370
4371 /* Set up to return from userspace. */
4372 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4373 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4374 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4375 } else {
4376 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4377 diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
4378 index f2496f2..4e3cc47 100644
4379 --- a/arch/powerpc/kernel/syscalls.c
4380 +++ b/arch/powerpc/kernel/syscalls.c
4381 @@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
4382 long ret;
4383
4384 if (personality(current->personality) == PER_LINUX32
4385 - && personality == PER_LINUX)
4386 - personality = PER_LINUX32;
4387 + && personality(personality) == PER_LINUX)
4388 + personality = (personality & ~PER_MASK) | PER_LINUX32;
4389 ret = sys_personality(personality);
4390 - if (ret == PER_LINUX32)
4391 - ret = PER_LINUX;
4392 + if (personality(ret) == PER_LINUX32)
4393 + ret = (ret & ~PER_MASK) | PER_LINUX;
4394 return ret;
4395 }
4396 #endif
4397 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4398 index ae0843f..f16372c 100644
4399 --- a/arch/powerpc/kernel/traps.c
4400 +++ b/arch/powerpc/kernel/traps.c
4401 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4402 return flags;
4403 }
4404
4405 +extern void gr_handle_kernel_exploit(void);
4406 +
4407 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4408 int signr)
4409 {
4410 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4411 panic("Fatal exception in interrupt");
4412 if (panic_on_oops)
4413 panic("Fatal exception");
4414 +
4415 + gr_handle_kernel_exploit();
4416 +
4417 do_exit(signr);
4418 }
4419
4420 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4421 index 9eb5b9b..e45498a 100644
4422 --- a/arch/powerpc/kernel/vdso.c
4423 +++ b/arch/powerpc/kernel/vdso.c
4424 @@ -34,6 +34,7 @@
4425 #include <asm/firmware.h>
4426 #include <asm/vdso.h>
4427 #include <asm/vdso_datapage.h>
4428 +#include <asm/mman.h>
4429
4430 #include "setup.h"
4431
4432 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4433 vdso_base = VDSO32_MBASE;
4434 #endif
4435
4436 - current->mm->context.vdso_base = 0;
4437 + current->mm->context.vdso_base = ~0UL;
4438
4439 /* vDSO has a problem and was disabled, just don't "enable" it for the
4440 * process
4441 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4442 vdso_base = get_unmapped_area(NULL, vdso_base,
4443 (vdso_pages << PAGE_SHIFT) +
4444 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4445 - 0, 0);
4446 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4447 if (IS_ERR_VALUE(vdso_base)) {
4448 rc = vdso_base;
4449 goto fail_mmapsem;
4450 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4451 index 5eea6f3..5d10396 100644
4452 --- a/arch/powerpc/lib/usercopy_64.c
4453 +++ b/arch/powerpc/lib/usercopy_64.c
4454 @@ -9,22 +9,6 @@
4455 #include <linux/module.h>
4456 #include <asm/uaccess.h>
4457
4458 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4459 -{
4460 - if (likely(access_ok(VERIFY_READ, from, n)))
4461 - n = __copy_from_user(to, from, n);
4462 - else
4463 - memset(to, 0, n);
4464 - return n;
4465 -}
4466 -
4467 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4468 -{
4469 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4470 - n = __copy_to_user(to, from, n);
4471 - return n;
4472 -}
4473 -
4474 unsigned long copy_in_user(void __user *to, const void __user *from,
4475 unsigned long n)
4476 {
4477 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4478 return n;
4479 }
4480
4481 -EXPORT_SYMBOL(copy_from_user);
4482 -EXPORT_SYMBOL(copy_to_user);
4483 EXPORT_SYMBOL(copy_in_user);
4484
4485 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4486 index 08ffcf5..a0ab912 100644
4487 --- a/arch/powerpc/mm/fault.c
4488 +++ b/arch/powerpc/mm/fault.c
4489 @@ -32,6 +32,10 @@
4490 #include <linux/perf_event.h>
4491 #include <linux/magic.h>
4492 #include <linux/ratelimit.h>
4493 +#include <linux/slab.h>
4494 +#include <linux/pagemap.h>
4495 +#include <linux/compiler.h>
4496 +#include <linux/unistd.h>
4497
4498 #include <asm/firmware.h>
4499 #include <asm/page.h>
4500 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4501 }
4502 #endif
4503
4504 +#ifdef CONFIG_PAX_PAGEEXEC
4505 +/*
4506 + * PaX: decide what to do with offenders (regs->nip = fault address)
4507 + *
4508 + * returns 1 when task should be killed
4509 + */
4510 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4511 +{
4512 + return 1;
4513 +}
4514 +
4515 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4516 +{
4517 + unsigned long i;
4518 +
4519 + printk(KERN_ERR "PAX: bytes at PC: ");
4520 + for (i = 0; i < 5; i++) {
4521 + unsigned int c;
4522 + if (get_user(c, (unsigned int __user *)pc+i))
4523 + printk(KERN_CONT "???????? ");
4524 + else
4525 + printk(KERN_CONT "%08x ", c);
4526 + }
4527 + printk("\n");
4528 +}
4529 +#endif
4530 +
4531 /*
4532 * Check whether the instruction at regs->nip is a store using
4533 * an update addressing form which will update r1.
4534 @@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4535 * indicate errors in DSISR but can validly be set in SRR1.
4536 */
4537 if (trap == 0x400)
4538 - error_code &= 0x48200000;
4539 + error_code &= 0x58200000;
4540 else
4541 is_write = error_code & DSISR_ISSTORE;
4542 #else
4543 @@ -366,7 +397,7 @@ good_area:
4544 * "undefined". Of those that can be set, this is the only
4545 * one which seems bad.
4546 */
4547 - if (error_code & 0x10000000)
4548 + if (error_code & DSISR_GUARDED)
4549 /* Guarded storage error. */
4550 goto bad_area;
4551 #endif /* CONFIG_8xx */
4552 @@ -381,7 +412,7 @@ good_area:
4553 * processors use the same I/D cache coherency mechanism
4554 * as embedded.
4555 */
4556 - if (error_code & DSISR_PROTFAULT)
4557 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4558 goto bad_area;
4559 #endif /* CONFIG_PPC_STD_MMU */
4560
4561 @@ -463,6 +494,23 @@ bad_area:
4562 bad_area_nosemaphore:
4563 /* User mode accesses cause a SIGSEGV */
4564 if (user_mode(regs)) {
4565 +
4566 +#ifdef CONFIG_PAX_PAGEEXEC
4567 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4568 +#ifdef CONFIG_PPC_STD_MMU
4569 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4570 +#else
4571 + if (is_exec && regs->nip == address) {
4572 +#endif
4573 + switch (pax_handle_fetch_fault(regs)) {
4574 + }
4575 +
4576 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4577 + do_group_exit(SIGKILL);
4578 + }
4579 + }
4580 +#endif
4581 +
4582 _exception(SIGSEGV, regs, code, address);
4583 return 0;
4584 }
4585 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4586 index 67a42ed..1c7210c 100644
4587 --- a/arch/powerpc/mm/mmap_64.c
4588 +++ b/arch/powerpc/mm/mmap_64.c
4589 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4590 */
4591 if (mmap_is_legacy()) {
4592 mm->mmap_base = TASK_UNMAPPED_BASE;
4593 +
4594 +#ifdef CONFIG_PAX_RANDMMAP
4595 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4596 + mm->mmap_base += mm->delta_mmap;
4597 +#endif
4598 +
4599 mm->get_unmapped_area = arch_get_unmapped_area;
4600 mm->unmap_area = arch_unmap_area;
4601 } else {
4602 mm->mmap_base = mmap_base();
4603 +
4604 +#ifdef CONFIG_PAX_RANDMMAP
4605 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4606 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4607 +#endif
4608 +
4609 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4610 mm->unmap_area = arch_unmap_area_topdown;
4611 }
4612 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4613 index 73709f7..6b90313 100644
4614 --- a/arch/powerpc/mm/slice.c
4615 +++ b/arch/powerpc/mm/slice.c
4616 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4617 if ((mm->task_size - len) < addr)
4618 return 0;
4619 vma = find_vma(mm, addr);
4620 - return (!vma || (addr + len) <= vma->vm_start);
4621 + return check_heap_stack_gap(vma, addr, len);
4622 }
4623
4624 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4625 @@ -256,7 +256,7 @@ full_search:
4626 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4627 continue;
4628 }
4629 - if (!vma || addr + len <= vma->vm_start) {
4630 + if (check_heap_stack_gap(vma, addr, len)) {
4631 /*
4632 * Remember the place where we stopped the search:
4633 */
4634 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4635 }
4636 }
4637
4638 - addr = mm->mmap_base;
4639 - while (addr > len) {
4640 + if (mm->mmap_base < len)
4641 + addr = -ENOMEM;
4642 + else
4643 + addr = mm->mmap_base - len;
4644 +
4645 + while (!IS_ERR_VALUE(addr)) {
4646 /* Go down by chunk size */
4647 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4648 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4649
4650 /* Check for hit with different page size */
4651 mask = slice_range_to_mask(addr, len);
4652 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4653 * return with success:
4654 */
4655 vma = find_vma(mm, addr);
4656 - if (!vma || (addr + len) <= vma->vm_start) {
4657 + if (check_heap_stack_gap(vma, addr, len)) {
4658 /* remember the address as a hint for next time */
4659 if (use_cache)
4660 mm->free_area_cache = addr;
4661 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4662 mm->cached_hole_size = vma->vm_start - addr;
4663
4664 /* try just below the current vma->vm_start */
4665 - addr = vma->vm_start;
4666 + addr = skip_heap_stack_gap(vma, len);
4667 }
4668
4669 /*
4670 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4671 if (fixed && addr > (mm->task_size - len))
4672 return -EINVAL;
4673
4674 +#ifdef CONFIG_PAX_RANDMMAP
4675 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4676 + addr = 0;
4677 +#endif
4678 +
4679 /* If hint, make sure it matches our alignment restrictions */
4680 if (!fixed && addr) {
4681 addr = _ALIGN_UP(addr, 1ul << pshift);
4682 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4683 index 748347b..81bc6c7 100644
4684 --- a/arch/s390/include/asm/atomic.h
4685 +++ b/arch/s390/include/asm/atomic.h
4686 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4687 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4688 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4689
4690 +#define atomic64_read_unchecked(v) atomic64_read(v)
4691 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4692 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4693 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4694 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4695 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4696 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4697 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4698 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4699 +
4700 #define smp_mb__before_atomic_dec() smp_mb()
4701 #define smp_mb__after_atomic_dec() smp_mb()
4702 #define smp_mb__before_atomic_inc() smp_mb()
4703 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4704 index 2a30d5a..5e5586f 100644
4705 --- a/arch/s390/include/asm/cache.h
4706 +++ b/arch/s390/include/asm/cache.h
4707 @@ -11,8 +11,10 @@
4708 #ifndef __ARCH_S390_CACHE_H
4709 #define __ARCH_S390_CACHE_H
4710
4711 -#define L1_CACHE_BYTES 256
4712 +#include <linux/const.h>
4713 +
4714 #define L1_CACHE_SHIFT 8
4715 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4716 #define NET_SKB_PAD 32
4717
4718 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4719 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4720 index 06151e6..598f9a5 100644
4721 --- a/arch/s390/include/asm/elf.h
4722 +++ b/arch/s390/include/asm/elf.h
4723 @@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4724 the loader. We need to make sure that it is out of the way of the program
4725 that it will "exec", and that there is sufficient room for the brk. */
4726
4727 -extern unsigned long randomize_et_dyn(unsigned long base);
4728 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4729 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4730 +
4731 +#ifdef CONFIG_PAX_ASLR
4732 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4733 +
4734 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4735 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4736 +#endif
4737
4738 /* This yields a mask that user programs can use to figure out what
4739 instruction set this CPU supports. */
4740 @@ -182,7 +188,8 @@ extern char elf_platform[];
4741 #define ELF_PLATFORM (elf_platform)
4742
4743 #ifndef CONFIG_64BIT
4744 -#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
4745 +#define SET_PERSONALITY(ex) \
4746 + set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
4747 #else /* CONFIG_64BIT */
4748 #define SET_PERSONALITY(ex) \
4749 do { \
4750 @@ -210,7 +217,4 @@ struct linux_binprm;
4751 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4752 int arch_setup_additional_pages(struct linux_binprm *, int);
4753
4754 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4755 -#define arch_randomize_brk arch_randomize_brk
4756 -
4757 #endif
4758 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4759 index c4a93d6..4d2a9b4 100644
4760 --- a/arch/s390/include/asm/exec.h
4761 +++ b/arch/s390/include/asm/exec.h
4762 @@ -7,6 +7,6 @@
4763 #ifndef __ASM_EXEC_H
4764 #define __ASM_EXEC_H
4765
4766 -extern unsigned long arch_align_stack(unsigned long sp);
4767 +#define arch_align_stack(x) ((x) & ~0xfUL)
4768
4769 #endif /* __ASM_EXEC_H */
4770 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4771 index 1f3a79b..44d7f9c 100644
4772 --- a/arch/s390/include/asm/uaccess.h
4773 +++ b/arch/s390/include/asm/uaccess.h
4774 @@ -241,6 +241,10 @@ static inline unsigned long __must_check
4775 copy_to_user(void __user *to, const void *from, unsigned long n)
4776 {
4777 might_fault();
4778 +
4779 + if ((long)n < 0)
4780 + return n;
4781 +
4782 if (access_ok(VERIFY_WRITE, to, n))
4783 n = __copy_to_user(to, from, n);
4784 return n;
4785 @@ -266,6 +270,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4786 static inline unsigned long __must_check
4787 __copy_from_user(void *to, const void __user *from, unsigned long n)
4788 {
4789 + if ((long)n < 0)
4790 + return n;
4791 +
4792 if (__builtin_constant_p(n) && (n <= 256))
4793 return uaccess.copy_from_user_small(n, from, to);
4794 else
4795 @@ -297,10 +304,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
4796 static inline unsigned long __must_check
4797 copy_from_user(void *to, const void __user *from, unsigned long n)
4798 {
4799 - unsigned int sz = __compiletime_object_size(to);
4800 + size_t sz = __compiletime_object_size(to);
4801
4802 might_fault();
4803 - if (unlikely(sz != -1 && sz < n)) {
4804 +
4805 + if ((long)n < 0)
4806 + return n;
4807 +
4808 + if (unlikely(sz != (size_t)-1 && sz < n)) {
4809 copy_from_user_overflow();
4810 return n;
4811 }
4812 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4813 index dfcb343..eda788a 100644
4814 --- a/arch/s390/kernel/module.c
4815 +++ b/arch/s390/kernel/module.c
4816 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4817
4818 /* Increase core size by size of got & plt and set start
4819 offsets for got and plt. */
4820 - me->core_size = ALIGN(me->core_size, 4);
4821 - me->arch.got_offset = me->core_size;
4822 - me->core_size += me->arch.got_size;
4823 - me->arch.plt_offset = me->core_size;
4824 - me->core_size += me->arch.plt_size;
4825 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4826 + me->arch.got_offset = me->core_size_rw;
4827 + me->core_size_rw += me->arch.got_size;
4828 + me->arch.plt_offset = me->core_size_rx;
4829 + me->core_size_rx += me->arch.plt_size;
4830 return 0;
4831 }
4832
4833 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4834 if (info->got_initialized == 0) {
4835 Elf_Addr *gotent;
4836
4837 - gotent = me->module_core + me->arch.got_offset +
4838 + gotent = me->module_core_rw + me->arch.got_offset +
4839 info->got_offset;
4840 *gotent = val;
4841 info->got_initialized = 1;
4842 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4843 else if (r_type == R_390_GOTENT ||
4844 r_type == R_390_GOTPLTENT)
4845 *(unsigned int *) loc =
4846 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4847 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4848 else if (r_type == R_390_GOT64 ||
4849 r_type == R_390_GOTPLT64)
4850 *(unsigned long *) loc = val;
4851 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4852 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4853 if (info->plt_initialized == 0) {
4854 unsigned int *ip;
4855 - ip = me->module_core + me->arch.plt_offset +
4856 + ip = me->module_core_rx + me->arch.plt_offset +
4857 info->plt_offset;
4858 #ifndef CONFIG_64BIT
4859 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4860 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4861 val - loc + 0xffffUL < 0x1ffffeUL) ||
4862 (r_type == R_390_PLT32DBL &&
4863 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4864 - val = (Elf_Addr) me->module_core +
4865 + val = (Elf_Addr) me->module_core_rx +
4866 me->arch.plt_offset +
4867 info->plt_offset;
4868 val += rela->r_addend - loc;
4869 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4870 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4871 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4872 val = val + rela->r_addend -
4873 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4874 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4875 if (r_type == R_390_GOTOFF16)
4876 *(unsigned short *) loc = val;
4877 else if (r_type == R_390_GOTOFF32)
4878 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4879 break;
4880 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4881 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4882 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4883 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4884 rela->r_addend - loc;
4885 if (r_type == R_390_GOTPC)
4886 *(unsigned int *) loc = val;
4887 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4888 index 60055ce..ee4b252 100644
4889 --- a/arch/s390/kernel/process.c
4890 +++ b/arch/s390/kernel/process.c
4891 @@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4892 }
4893 return 0;
4894 }
4895 -
4896 -unsigned long arch_align_stack(unsigned long sp)
4897 -{
4898 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4899 - sp -= get_random_int() & ~PAGE_MASK;
4900 - return sp & ~0xf;
4901 -}
4902 -
4903 -static inline unsigned long brk_rnd(void)
4904 -{
4905 - /* 8MB for 32bit, 1GB for 64bit */
4906 - if (is_32bit_task())
4907 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4908 - else
4909 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4910 -}
4911 -
4912 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4913 -{
4914 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4915 -
4916 - if (ret < mm->brk)
4917 - return mm->brk;
4918 - return ret;
4919 -}
4920 -
4921 -unsigned long randomize_et_dyn(unsigned long base)
4922 -{
4923 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4924 -
4925 - if (!(current->flags & PF_RANDOMIZE))
4926 - return base;
4927 - if (ret < base)
4928 - return base;
4929 - return ret;
4930 -}
4931 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4932 index a64fe53..5c66963 100644
4933 --- a/arch/s390/mm/mmap.c
4934 +++ b/arch/s390/mm/mmap.c
4935 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4936 */
4937 if (mmap_is_legacy()) {
4938 mm->mmap_base = TASK_UNMAPPED_BASE;
4939 +
4940 +#ifdef CONFIG_PAX_RANDMMAP
4941 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4942 + mm->mmap_base += mm->delta_mmap;
4943 +#endif
4944 +
4945 mm->get_unmapped_area = arch_get_unmapped_area;
4946 mm->unmap_area = arch_unmap_area;
4947 } else {
4948 mm->mmap_base = mmap_base();
4949 +
4950 +#ifdef CONFIG_PAX_RANDMMAP
4951 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4952 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4953 +#endif
4954 +
4955 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4956 mm->unmap_area = arch_unmap_area_topdown;
4957 }
4958 @@ -174,10 +186,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4959 */
4960 if (mmap_is_legacy()) {
4961 mm->mmap_base = TASK_UNMAPPED_BASE;
4962 +
4963 +#ifdef CONFIG_PAX_RANDMMAP
4964 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4965 + mm->mmap_base += mm->delta_mmap;
4966 +#endif
4967 +
4968 mm->get_unmapped_area = s390_get_unmapped_area;
4969 mm->unmap_area = arch_unmap_area;
4970 } else {
4971 mm->mmap_base = mmap_base();
4972 +
4973 +#ifdef CONFIG_PAX_RANDMMAP
4974 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4975 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4976 +#endif
4977 +
4978 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4979 mm->unmap_area = arch_unmap_area_topdown;
4980 }
4981 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4982 index ae3d59f..f65f075 100644
4983 --- a/arch/score/include/asm/cache.h
4984 +++ b/arch/score/include/asm/cache.h
4985 @@ -1,7 +1,9 @@
4986 #ifndef _ASM_SCORE_CACHE_H
4987 #define _ASM_SCORE_CACHE_H
4988
4989 +#include <linux/const.h>
4990 +
4991 #define L1_CACHE_SHIFT 4
4992 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4993 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4994
4995 #endif /* _ASM_SCORE_CACHE_H */
4996 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4997 index f9f3cd5..58ff438 100644
4998 --- a/arch/score/include/asm/exec.h
4999 +++ b/arch/score/include/asm/exec.h
5000 @@ -1,6 +1,6 @@
5001 #ifndef _ASM_SCORE_EXEC_H
5002 #define _ASM_SCORE_EXEC_H
5003
5004 -extern unsigned long arch_align_stack(unsigned long sp);
5005 +#define arch_align_stack(x) (x)
5006
5007 #endif /* _ASM_SCORE_EXEC_H */
5008 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5009 index 2707023..1c2a3b7 100644
5010 --- a/arch/score/kernel/process.c
5011 +++ b/arch/score/kernel/process.c
5012 @@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
5013
5014 return task_pt_regs(task)->cp0_epc;
5015 }
5016 -
5017 -unsigned long arch_align_stack(unsigned long sp)
5018 -{
5019 - return sp;
5020 -}
5021 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5022 index ef9e555..331bd29 100644
5023 --- a/arch/sh/include/asm/cache.h
5024 +++ b/arch/sh/include/asm/cache.h
5025 @@ -9,10 +9,11 @@
5026 #define __ASM_SH_CACHE_H
5027 #ifdef __KERNEL__
5028
5029 +#include <linux/const.h>
5030 #include <linux/init.h>
5031 #include <cpu/cache.h>
5032
5033 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5034 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5035
5036 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5037
5038 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5039 index afeb710..d1d1289 100644
5040 --- a/arch/sh/mm/mmap.c
5041 +++ b/arch/sh/mm/mmap.c
5042 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5043 addr = PAGE_ALIGN(addr);
5044
5045 vma = find_vma(mm, addr);
5046 - if (TASK_SIZE - len >= addr &&
5047 - (!vma || addr + len <= vma->vm_start))
5048 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5049 return addr;
5050 }
5051
5052 @@ -106,7 +105,7 @@ full_search:
5053 }
5054 return -ENOMEM;
5055 }
5056 - if (likely(!vma || addr + len <= vma->vm_start)) {
5057 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5058 /*
5059 * Remember the place where we stopped the search:
5060 */
5061 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5062 addr = PAGE_ALIGN(addr);
5063
5064 vma = find_vma(mm, addr);
5065 - if (TASK_SIZE - len >= addr &&
5066 - (!vma || addr + len <= vma->vm_start))
5067 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5068 return addr;
5069 }
5070
5071 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5072 /* make sure it can fit in the remaining address space */
5073 if (likely(addr > len)) {
5074 vma = find_vma(mm, addr-len);
5075 - if (!vma || addr <= vma->vm_start) {
5076 + if (check_heap_stack_gap(vma, addr - len, len)) {
5077 /* remember the address as a hint for next time */
5078 return (mm->free_area_cache = addr-len);
5079 }
5080 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5081 if (unlikely(mm->mmap_base < len))
5082 goto bottomup;
5083
5084 - addr = mm->mmap_base-len;
5085 - if (do_colour_align)
5086 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5087 + addr = mm->mmap_base - len;
5088
5089 do {
5090 + if (do_colour_align)
5091 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5092 /*
5093 * Lookup failure means no vma is above this address,
5094 * else if new region fits below vma->vm_start,
5095 * return with success:
5096 */
5097 vma = find_vma(mm, addr);
5098 - if (likely(!vma || addr+len <= vma->vm_start)) {
5099 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5100 /* remember the address as a hint for next time */
5101 return (mm->free_area_cache = addr);
5102 }
5103 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5104 mm->cached_hole_size = vma->vm_start - addr;
5105
5106 /* try just below the current vma->vm_start */
5107 - addr = vma->vm_start-len;
5108 - if (do_colour_align)
5109 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5110 - } while (likely(len < vma->vm_start));
5111 + addr = skip_heap_stack_gap(vma, len);
5112 + } while (!IS_ERR_VALUE(addr));
5113
5114 bottomup:
5115 /*
5116 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5117 index ce35a1c..2e7b8f9 100644
5118 --- a/arch/sparc/include/asm/atomic_64.h
5119 +++ b/arch/sparc/include/asm/atomic_64.h
5120 @@ -14,18 +14,40 @@
5121 #define ATOMIC64_INIT(i) { (i) }
5122
5123 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5124 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5125 +{
5126 + return v->counter;
5127 +}
5128 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5129 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5130 +{
5131 + return v->counter;
5132 +}
5133
5134 #define atomic_set(v, i) (((v)->counter) = i)
5135 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5136 +{
5137 + v->counter = i;
5138 +}
5139 #define atomic64_set(v, i) (((v)->counter) = i)
5140 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5141 +{
5142 + v->counter = i;
5143 +}
5144
5145 extern void atomic_add(int, atomic_t *);
5146 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5147 extern void atomic64_add(long, atomic64_t *);
5148 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5149 extern void atomic_sub(int, atomic_t *);
5150 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5151 extern void atomic64_sub(long, atomic64_t *);
5152 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5153
5154 extern int atomic_add_ret(int, atomic_t *);
5155 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5156 extern long atomic64_add_ret(long, atomic64_t *);
5157 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5158 extern int atomic_sub_ret(int, atomic_t *);
5159 extern long atomic64_sub_ret(long, atomic64_t *);
5160
5161 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5162 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5163
5164 #define atomic_inc_return(v) atomic_add_ret(1, v)
5165 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5166 +{
5167 + return atomic_add_ret_unchecked(1, v);
5168 +}
5169 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5170 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5171 +{
5172 + return atomic64_add_ret_unchecked(1, v);
5173 +}
5174
5175 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5176 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5177
5178 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5179 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5180 +{
5181 + return atomic_add_ret_unchecked(i, v);
5182 +}
5183 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5184 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5185 +{
5186 + return atomic64_add_ret_unchecked(i, v);
5187 +}
5188
5189 /*
5190 * atomic_inc_and_test - increment and test
5191 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5192 * other cases.
5193 */
5194 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5195 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5196 +{
5197 + return atomic_inc_return_unchecked(v) == 0;
5198 +}
5199 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5200
5201 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5202 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5203 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5204
5205 #define atomic_inc(v) atomic_add(1, v)
5206 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5207 +{
5208 + atomic_add_unchecked(1, v);
5209 +}
5210 #define atomic64_inc(v) atomic64_add(1, v)
5211 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5212 +{
5213 + atomic64_add_unchecked(1, v);
5214 +}
5215
5216 #define atomic_dec(v) atomic_sub(1, v)
5217 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5218 +{
5219 + atomic_sub_unchecked(1, v);
5220 +}
5221 #define atomic64_dec(v) atomic64_sub(1, v)
5222 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5223 +{
5224 + atomic64_sub_unchecked(1, v);
5225 +}
5226
5227 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5228 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5229
5230 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5231 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5232 +{
5233 + return cmpxchg(&v->counter, old, new);
5234 +}
5235 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5236 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5237 +{
5238 + return xchg(&v->counter, new);
5239 +}
5240
5241 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5242 {
5243 - int c, old;
5244 + int c, old, new;
5245 c = atomic_read(v);
5246 for (;;) {
5247 - if (unlikely(c == (u)))
5248 + if (unlikely(c == u))
5249 break;
5250 - old = atomic_cmpxchg((v), c, c + (a));
5251 +
5252 + asm volatile("addcc %2, %0, %0\n"
5253 +
5254 +#ifdef CONFIG_PAX_REFCOUNT
5255 + "tvs %%icc, 6\n"
5256 +#endif
5257 +
5258 + : "=r" (new)
5259 + : "0" (c), "ir" (a)
5260 + : "cc");
5261 +
5262 + old = atomic_cmpxchg(v, c, new);
5263 if (likely(old == c))
5264 break;
5265 c = old;
5266 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5267 #define atomic64_cmpxchg(v, o, n) \
5268 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5269 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5270 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5271 +{
5272 + return xchg(&v->counter, new);
5273 +}
5274
5275 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5276 {
5277 - long c, old;
5278 + long c, old, new;
5279 c = atomic64_read(v);
5280 for (;;) {
5281 - if (unlikely(c == (u)))
5282 + if (unlikely(c == u))
5283 break;
5284 - old = atomic64_cmpxchg((v), c, c + (a));
5285 +
5286 + asm volatile("addcc %2, %0, %0\n"
5287 +
5288 +#ifdef CONFIG_PAX_REFCOUNT
5289 + "tvs %%xcc, 6\n"
5290 +#endif
5291 +
5292 + : "=r" (new)
5293 + : "0" (c), "ir" (a)
5294 + : "cc");
5295 +
5296 + old = atomic64_cmpxchg(v, c, new);
5297 if (likely(old == c))
5298 break;
5299 c = old;
5300 }
5301 - return c != (u);
5302 + return c != u;
5303 }
5304
5305 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5306 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5307 index 5bb6991..5c2132e 100644
5308 --- a/arch/sparc/include/asm/cache.h
5309 +++ b/arch/sparc/include/asm/cache.h
5310 @@ -7,10 +7,12 @@
5311 #ifndef _SPARC_CACHE_H
5312 #define _SPARC_CACHE_H
5313
5314 +#include <linux/const.h>
5315 +
5316 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5317
5318 #define L1_CACHE_SHIFT 5
5319 -#define L1_CACHE_BYTES 32
5320 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5321
5322 #ifdef CONFIG_SPARC32
5323 #define SMP_CACHE_BYTES_SHIFT 5
5324 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5325 index 2d4d755..81b6662 100644
5326 --- a/arch/sparc/include/asm/elf_32.h
5327 +++ b/arch/sparc/include/asm/elf_32.h
5328 @@ -114,6 +114,13 @@ typedef struct {
5329
5330 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5331
5332 +#ifdef CONFIG_PAX_ASLR
5333 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5334 +
5335 +#define PAX_DELTA_MMAP_LEN 16
5336 +#define PAX_DELTA_STACK_LEN 16
5337 +#endif
5338 +
5339 /* This yields a mask that user programs can use to figure out what
5340 instruction set this cpu supports. This can NOT be done in userspace
5341 on Sparc. */
5342 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5343 index 7df8b7f..4946269 100644
5344 --- a/arch/sparc/include/asm/elf_64.h
5345 +++ b/arch/sparc/include/asm/elf_64.h
5346 @@ -180,6 +180,13 @@ typedef struct {
5347 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5348 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5349
5350 +#ifdef CONFIG_PAX_ASLR
5351 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5352 +
5353 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5354 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5355 +#endif
5356 +
5357 extern unsigned long sparc64_elf_hwcap;
5358 #define ELF_HWCAP sparc64_elf_hwcap
5359
5360 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5361 index e5b169b46..e90b4fa 100644
5362 --- a/arch/sparc/include/asm/pgalloc_32.h
5363 +++ b/arch/sparc/include/asm/pgalloc_32.h
5364 @@ -46,6 +46,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
5365 }
5366
5367 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5368 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5369
5370 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
5371 unsigned long address)
5372 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5373 index 40b2d7a..22a665b 100644
5374 --- a/arch/sparc/include/asm/pgalloc_64.h
5375 +++ b/arch/sparc/include/asm/pgalloc_64.h
5376 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5377 }
5378
5379 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5380 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5381
5382 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5383 {
5384 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5385 index cbbbed5..97f72f9 100644
5386 --- a/arch/sparc/include/asm/pgtable_32.h
5387 +++ b/arch/sparc/include/asm/pgtable_32.h
5388 @@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
5389 #define PAGE_SHARED SRMMU_PAGE_SHARED
5390 #define PAGE_COPY SRMMU_PAGE_COPY
5391 #define PAGE_READONLY SRMMU_PAGE_RDONLY
5392 +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
5393 +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
5394 +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
5395 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
5396
5397 /* Top-level page directory */
5398 @@ -61,18 +64,18 @@ extern unsigned long ptr_in_current_pgd;
5399
5400 /* xwr */
5401 #define __P000 PAGE_NONE
5402 -#define __P001 PAGE_READONLY
5403 -#define __P010 PAGE_COPY
5404 -#define __P011 PAGE_COPY
5405 +#define __P001 PAGE_READONLY_NOEXEC
5406 +#define __P010 PAGE_COPY_NOEXEC
5407 +#define __P011 PAGE_COPY_NOEXEC
5408 #define __P100 PAGE_READONLY
5409 #define __P101 PAGE_READONLY
5410 #define __P110 PAGE_COPY
5411 #define __P111 PAGE_COPY
5412
5413 #define __S000 PAGE_NONE
5414 -#define __S001 PAGE_READONLY
5415 -#define __S010 PAGE_SHARED
5416 -#define __S011 PAGE_SHARED
5417 +#define __S001 PAGE_READONLY_NOEXEC
5418 +#define __S010 PAGE_SHARED_NOEXEC
5419 +#define __S011 PAGE_SHARED_NOEXEC
5420 #define __S100 PAGE_READONLY
5421 #define __S101 PAGE_READONLY
5422 #define __S110 PAGE_SHARED
5423 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5424 index 79da178..c2eede8 100644
5425 --- a/arch/sparc/include/asm/pgtsrmmu.h
5426 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5427 @@ -115,6 +115,11 @@
5428 SRMMU_EXEC | SRMMU_REF)
5429 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5430 SRMMU_EXEC | SRMMU_REF)
5431 +
5432 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5433 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5434 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5435 +
5436 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5437 SRMMU_DIRTY | SRMMU_REF)
5438
5439 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5440 index 9689176..63c18ea 100644
5441 --- a/arch/sparc/include/asm/spinlock_64.h
5442 +++ b/arch/sparc/include/asm/spinlock_64.h
5443 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5444
5445 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5446
5447 -static void inline arch_read_lock(arch_rwlock_t *lock)
5448 +static inline void arch_read_lock(arch_rwlock_t *lock)
5449 {
5450 unsigned long tmp1, tmp2;
5451
5452 __asm__ __volatile__ (
5453 "1: ldsw [%2], %0\n"
5454 " brlz,pn %0, 2f\n"
5455 -"4: add %0, 1, %1\n"
5456 +"4: addcc %0, 1, %1\n"
5457 +
5458 +#ifdef CONFIG_PAX_REFCOUNT
5459 +" tvs %%icc, 6\n"
5460 +#endif
5461 +
5462 " cas [%2], %0, %1\n"
5463 " cmp %0, %1\n"
5464 " bne,pn %%icc, 1b\n"
5465 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5466 " .previous"
5467 : "=&r" (tmp1), "=&r" (tmp2)
5468 : "r" (lock)
5469 - : "memory");
5470 + : "memory", "cc");
5471 }
5472
5473 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5474 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5475 {
5476 int tmp1, tmp2;
5477
5478 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5479 "1: ldsw [%2], %0\n"
5480 " brlz,a,pn %0, 2f\n"
5481 " mov 0, %0\n"
5482 -" add %0, 1, %1\n"
5483 +" addcc %0, 1, %1\n"
5484 +
5485 +#ifdef CONFIG_PAX_REFCOUNT
5486 +" tvs %%icc, 6\n"
5487 +#endif
5488 +
5489 " cas [%2], %0, %1\n"
5490 " cmp %0, %1\n"
5491 " bne,pn %%icc, 1b\n"
5492 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5493 return tmp1;
5494 }
5495
5496 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5497 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5498 {
5499 unsigned long tmp1, tmp2;
5500
5501 __asm__ __volatile__(
5502 "1: lduw [%2], %0\n"
5503 -" sub %0, 1, %1\n"
5504 +" subcc %0, 1, %1\n"
5505 +
5506 +#ifdef CONFIG_PAX_REFCOUNT
5507 +" tvs %%icc, 6\n"
5508 +#endif
5509 +
5510 " cas [%2], %0, %1\n"
5511 " cmp %0, %1\n"
5512 " bne,pn %%xcc, 1b\n"
5513 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5514 : "memory");
5515 }
5516
5517 -static void inline arch_write_lock(arch_rwlock_t *lock)
5518 +static inline void arch_write_lock(arch_rwlock_t *lock)
5519 {
5520 unsigned long mask, tmp1, tmp2;
5521
5522 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5523 : "memory");
5524 }
5525
5526 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5527 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5528 {
5529 __asm__ __volatile__(
5530 " stw %%g0, [%0]"
5531 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5532 : "memory");
5533 }
5534
5535 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5536 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5537 {
5538 unsigned long mask, tmp1, tmp2, result;
5539
5540 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5541 index e6cd224..3a71793 100644
5542 --- a/arch/sparc/include/asm/thread_info_32.h
5543 +++ b/arch/sparc/include/asm/thread_info_32.h
5544 @@ -49,6 +49,8 @@ struct thread_info {
5545 unsigned long w_saved;
5546
5547 struct restart_block restart_block;
5548 +
5549 + unsigned long lowest_stack;
5550 };
5551
5552 /*
5553 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5554 index cfa8c38..13f30d3 100644
5555 --- a/arch/sparc/include/asm/thread_info_64.h
5556 +++ b/arch/sparc/include/asm/thread_info_64.h
5557 @@ -63,6 +63,8 @@ struct thread_info {
5558 struct pt_regs *kern_una_regs;
5559 unsigned int kern_una_insn;
5560
5561 + unsigned long lowest_stack;
5562 +
5563 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5564 };
5565
5566 @@ -193,10 +195,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5567 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5568 /* flag bit 6 is available */
5569 #define TIF_32BIT 7 /* 32-bit binary */
5570 -/* flag bit 8 is available */
5571 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5572 #define TIF_SECCOMP 9 /* secure computing */
5573 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5574 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5575 +
5576 /* NOTE: Thread flags >= 12 should be ones we have no interest
5577 * in using in assembly, else we can't use the mask as
5578 * an immediate value in instructions such as andcc.
5579 @@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5580 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5581 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5582 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5583 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5584
5585 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5586 _TIF_DO_NOTIFY_RESUME_MASK | \
5587 _TIF_NEED_RESCHED)
5588 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5589
5590 +#define _TIF_WORK_SYSCALL \
5591 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5592 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5593 +
5594 +
5595 /*
5596 * Thread-synchronous status.
5597 *
5598 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5599 index 0167d26..9acd8ed 100644
5600 --- a/arch/sparc/include/asm/uaccess.h
5601 +++ b/arch/sparc/include/asm/uaccess.h
5602 @@ -1,5 +1,13 @@
5603 #ifndef ___ASM_SPARC_UACCESS_H
5604 #define ___ASM_SPARC_UACCESS_H
5605 +
5606 +#ifdef __KERNEL__
5607 +#ifndef __ASSEMBLY__
5608 +#include <linux/types.h>
5609 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5610 +#endif
5611 +#endif
5612 +
5613 #if defined(__sparc__) && defined(__arch64__)
5614 #include <asm/uaccess_64.h>
5615 #else
5616 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5617 index 53a28dd..50c38c3 100644
5618 --- a/arch/sparc/include/asm/uaccess_32.h
5619 +++ b/arch/sparc/include/asm/uaccess_32.h
5620 @@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5621
5622 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5623 {
5624 - if (n && __access_ok((unsigned long) to, n))
5625 + if ((long)n < 0)
5626 + return n;
5627 +
5628 + if (n && __access_ok((unsigned long) to, n)) {
5629 + if (!__builtin_constant_p(n))
5630 + check_object_size(from, n, true);
5631 return __copy_user(to, (__force void __user *) from, n);
5632 - else
5633 + } else
5634 return n;
5635 }
5636
5637 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5638 {
5639 + if ((long)n < 0)
5640 + return n;
5641 +
5642 + if (!__builtin_constant_p(n))
5643 + check_object_size(from, n, true);
5644 +
5645 return __copy_user(to, (__force void __user *) from, n);
5646 }
5647
5648 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5649 {
5650 - if (n && __access_ok((unsigned long) from, n))
5651 + if ((long)n < 0)
5652 + return n;
5653 +
5654 + if (n && __access_ok((unsigned long) from, n)) {
5655 + if (!__builtin_constant_p(n))
5656 + check_object_size(to, n, false);
5657 return __copy_user((__force void __user *) to, from, n);
5658 - else
5659 + } else
5660 return n;
5661 }
5662
5663 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5664 {
5665 + if ((long)n < 0)
5666 + return n;
5667 +
5668 return __copy_user((__force void __user *) to, from, n);
5669 }
5670
5671 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5672 index 7c831d8..d440ca7 100644
5673 --- a/arch/sparc/include/asm/uaccess_64.h
5674 +++ b/arch/sparc/include/asm/uaccess_64.h
5675 @@ -10,6 +10,7 @@
5676 #include <linux/compiler.h>
5677 #include <linux/string.h>
5678 #include <linux/thread_info.h>
5679 +#include <linux/kernel.h>
5680 #include <asm/asi.h>
5681 #include <asm/spitfire.h>
5682 #include <asm-generic/uaccess-unaligned.h>
5683 @@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5684 static inline unsigned long __must_check
5685 copy_from_user(void *to, const void __user *from, unsigned long size)
5686 {
5687 - unsigned long ret = ___copy_from_user(to, from, size);
5688 + unsigned long ret;
5689
5690 + if ((long)size < 0 || size > INT_MAX)
5691 + return size;
5692 +
5693 + if (!__builtin_constant_p(size))
5694 + check_object_size(to, size, false);
5695 +
5696 + ret = ___copy_from_user(to, from, size);
5697 if (unlikely(ret))
5698 ret = copy_from_user_fixup(to, from, size);
5699
5700 @@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5701 static inline unsigned long __must_check
5702 copy_to_user(void __user *to, const void *from, unsigned long size)
5703 {
5704 - unsigned long ret = ___copy_to_user(to, from, size);
5705 + unsigned long ret;
5706
5707 + if ((long)size < 0 || size > INT_MAX)
5708 + return size;
5709 +
5710 + if (!__builtin_constant_p(size))
5711 + check_object_size(from, size, true);
5712 +
5713 + ret = ___copy_to_user(to, from, size);
5714 if (unlikely(ret))
5715 ret = copy_to_user_fixup(to, from, size);
5716 return ret;
5717 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5718 index 6cf591b..b49e65a 100644
5719 --- a/arch/sparc/kernel/Makefile
5720 +++ b/arch/sparc/kernel/Makefile
5721 @@ -3,7 +3,7 @@
5722 #
5723
5724 asflags-y := -ansi
5725 -ccflags-y := -Werror
5726 +#ccflags-y := -Werror
5727
5728 extra-y := head_$(BITS).o
5729
5730 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5731 index cb36e82..1c1462f 100644
5732 --- a/arch/sparc/kernel/process_32.c
5733 +++ b/arch/sparc/kernel/process_32.c
5734 @@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
5735
5736 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5737 r->psr, r->pc, r->npc, r->y, print_tainted());
5738 - printk("PC: <%pS>\n", (void *) r->pc);
5739 + printk("PC: <%pA>\n", (void *) r->pc);
5740 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5741 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5742 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5743 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5744 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5745 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5746 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5747 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5748
5749 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5750 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5751 @@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5752 rw = (struct reg_window32 *) fp;
5753 pc = rw->ins[7];
5754 printk("[%08lx : ", pc);
5755 - printk("%pS ] ", (void *) pc);
5756 + printk("%pA ] ", (void *) pc);
5757 fp = rw->ins[6];
5758 } while (++count < 16);
5759 printk("\n");
5760 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5761 index aff0c72..9067b39 100644
5762 --- a/arch/sparc/kernel/process_64.c
5763 +++ b/arch/sparc/kernel/process_64.c
5764 @@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5765 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5766 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5767 if (regs->tstate & TSTATE_PRIV)
5768 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5769 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5770 }
5771
5772 void show_regs(struct pt_regs *regs)
5773 {
5774 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5775 regs->tpc, regs->tnpc, regs->y, print_tainted());
5776 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5777 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5778 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5779 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5780 regs->u_regs[3]);
5781 @@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5782 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5783 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5784 regs->u_regs[15]);
5785 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5786 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5787 show_regwindow(regs);
5788 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5789 }
5790 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5791 ((tp && tp->task) ? tp->task->pid : -1));
5792
5793 if (gp->tstate & TSTATE_PRIV) {
5794 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5795 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5796 (void *) gp->tpc,
5797 (void *) gp->o7,
5798 (void *) gp->i7,
5799 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5800 index 484daba..0674139 100644
5801 --- a/arch/sparc/kernel/ptrace_64.c
5802 +++ b/arch/sparc/kernel/ptrace_64.c
5803 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5804 return ret;
5805 }
5806
5807 +#ifdef CONFIG_GRKERNSEC_SETXID
5808 +extern void gr_delayed_cred_worker(void);
5809 +#endif
5810 +
5811 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5812 {
5813 int ret = 0;
5814 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5815 /* do the secure computing check first */
5816 secure_computing_strict(regs->u_regs[UREG_G1]);
5817
5818 +#ifdef CONFIG_GRKERNSEC_SETXID
5819 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5820 + gr_delayed_cred_worker();
5821 +#endif
5822 +
5823 if (test_thread_flag(TIF_SYSCALL_TRACE))
5824 ret = tracehook_report_syscall_entry(regs);
5825
5826 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5827
5828 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5829 {
5830 +#ifdef CONFIG_GRKERNSEC_SETXID
5831 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5832 + gr_delayed_cred_worker();
5833 +#endif
5834 +
5835 audit_syscall_exit(regs);
5836
5837 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5838 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5839 index 0c9b31b..7cb7aee 100644
5840 --- a/arch/sparc/kernel/sys_sparc_32.c
5841 +++ b/arch/sparc/kernel/sys_sparc_32.c
5842 @@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5843 if (len > TASK_SIZE - PAGE_SIZE)
5844 return -ENOMEM;
5845 if (!addr)
5846 - addr = TASK_UNMAPPED_BASE;
5847 + addr = current->mm->mmap_base;
5848
5849 if (flags & MAP_SHARED)
5850 addr = COLOUR_ALIGN(addr);
5851 @@ -65,7 +65,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5852 /* At this point: (!vmm || addr < vmm->vm_end). */
5853 if (TASK_SIZE - PAGE_SIZE - len < addr)
5854 return -ENOMEM;
5855 - if (!vmm || addr + len <= vmm->vm_start)
5856 + if (check_heap_stack_gap(vmm, addr, len))
5857 return addr;
5858 addr = vmm->vm_end;
5859 if (flags & MAP_SHARED)
5860 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5861 index 275f74f..81bf5b8 100644
5862 --- a/arch/sparc/kernel/sys_sparc_64.c
5863 +++ b/arch/sparc/kernel/sys_sparc_64.c
5864 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5865 /* We do not accept a shared mapping if it would violate
5866 * cache aliasing constraints.
5867 */
5868 - if ((flags & MAP_SHARED) &&
5869 + if ((filp || (flags & MAP_SHARED)) &&
5870 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5871 return -EINVAL;
5872 return addr;
5873 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5874 if (filp || (flags & MAP_SHARED))
5875 do_color_align = 1;
5876
5877 +#ifdef CONFIG_PAX_RANDMMAP
5878 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5879 +#endif
5880 +
5881 if (addr) {
5882 if (do_color_align)
5883 addr = COLOUR_ALIGN(addr, pgoff);
5884 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5885 addr = PAGE_ALIGN(addr);
5886
5887 vma = find_vma(mm, addr);
5888 - if (task_size - len >= addr &&
5889 - (!vma || addr + len <= vma->vm_start))
5890 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5891 return addr;
5892 }
5893
5894 if (len > mm->cached_hole_size) {
5895 - start_addr = addr = mm->free_area_cache;
5896 + start_addr = addr = mm->free_area_cache;
5897 } else {
5898 - start_addr = addr = TASK_UNMAPPED_BASE;
5899 + start_addr = addr = mm->mmap_base;
5900 mm->cached_hole_size = 0;
5901 }
5902
5903 @@ -174,14 +177,14 @@ full_search:
5904 vma = find_vma(mm, VA_EXCLUDE_END);
5905 }
5906 if (unlikely(task_size < addr)) {
5907 - if (start_addr != TASK_UNMAPPED_BASE) {
5908 - start_addr = addr = TASK_UNMAPPED_BASE;
5909 + if (start_addr != mm->mmap_base) {
5910 + start_addr = addr = mm->mmap_base;
5911 mm->cached_hole_size = 0;
5912 goto full_search;
5913 }
5914 return -ENOMEM;
5915 }
5916 - if (likely(!vma || addr + len <= vma->vm_start)) {
5917 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5918 /*
5919 * Remember the place where we stopped the search:
5920 */
5921 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5922 /* We do not accept a shared mapping if it would violate
5923 * cache aliasing constraints.
5924 */
5925 - if ((flags & MAP_SHARED) &&
5926 + if ((filp || (flags & MAP_SHARED)) &&
5927 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5928 return -EINVAL;
5929 return addr;
5930 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5931 addr = PAGE_ALIGN(addr);
5932
5933 vma = find_vma(mm, addr);
5934 - if (task_size - len >= addr &&
5935 - (!vma || addr + len <= vma->vm_start))
5936 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5937 return addr;
5938 }
5939
5940 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5941 /* make sure it can fit in the remaining address space */
5942 if (likely(addr > len)) {
5943 vma = find_vma(mm, addr-len);
5944 - if (!vma || addr <= vma->vm_start) {
5945 + if (check_heap_stack_gap(vma, addr - len, len)) {
5946 /* remember the address as a hint for next time */
5947 return (mm->free_area_cache = addr-len);
5948 }
5949 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5950 if (unlikely(mm->mmap_base < len))
5951 goto bottomup;
5952
5953 - addr = mm->mmap_base-len;
5954 - if (do_color_align)
5955 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5956 + addr = mm->mmap_base - len;
5957
5958 do {
5959 + if (do_color_align)
5960 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5961 /*
5962 * Lookup failure means no vma is above this address,
5963 * else if new region fits below vma->vm_start,
5964 * return with success:
5965 */
5966 vma = find_vma(mm, addr);
5967 - if (likely(!vma || addr+len <= vma->vm_start)) {
5968 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5969 /* remember the address as a hint for next time */
5970 return (mm->free_area_cache = addr);
5971 }
5972 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5973 mm->cached_hole_size = vma->vm_start - addr;
5974
5975 /* try just below the current vma->vm_start */
5976 - addr = vma->vm_start-len;
5977 - if (do_color_align)
5978 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5979 - } while (likely(len < vma->vm_start));
5980 + addr = skip_heap_stack_gap(vma, len);
5981 + } while (!IS_ERR_VALUE(addr));
5982
5983 bottomup:
5984 /*
5985 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5986 gap == RLIM_INFINITY ||
5987 sysctl_legacy_va_layout) {
5988 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5989 +
5990 +#ifdef CONFIG_PAX_RANDMMAP
5991 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5992 + mm->mmap_base += mm->delta_mmap;
5993 +#endif
5994 +
5995 mm->get_unmapped_area = arch_get_unmapped_area;
5996 mm->unmap_area = arch_unmap_area;
5997 } else {
5998 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5999 gap = (task_size / 6 * 5);
6000
6001 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6002 +
6003 +#ifdef CONFIG_PAX_RANDMMAP
6004 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6005 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6006 +#endif
6007 +
6008 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6009 mm->unmap_area = arch_unmap_area_topdown;
6010 }
6011 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6012 index 1d7e274..b39c527 100644
6013 --- a/arch/sparc/kernel/syscalls.S
6014 +++ b/arch/sparc/kernel/syscalls.S
6015 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6016 #endif
6017 .align 32
6018 1: ldx [%g6 + TI_FLAGS], %l5
6019 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6020 + andcc %l5, _TIF_WORK_SYSCALL, %g0
6021 be,pt %icc, rtrap
6022 nop
6023 call syscall_trace_leave
6024 @@ -179,7 +179,7 @@ linux_sparc_syscall32:
6025
6026 srl %i5, 0, %o5 ! IEU1
6027 srl %i2, 0, %o2 ! IEU0 Group
6028 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6029 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6030 bne,pn %icc, linux_syscall_trace32 ! CTI
6031 mov %i0, %l5 ! IEU1
6032 call %l7 ! CTI Group brk forced
6033 @@ -202,7 +202,7 @@ linux_sparc_syscall:
6034
6035 mov %i3, %o3 ! IEU1
6036 mov %i4, %o4 ! IEU0 Group
6037 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6038 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6039 bne,pn %icc, linux_syscall_trace ! CTI Group
6040 mov %i0, %l5 ! IEU0
6041 2: call %l7 ! CTI Group brk forced
6042 @@ -226,7 +226,7 @@ ret_sys_call:
6043
6044 cmp %o0, -ERESTART_RESTARTBLOCK
6045 bgeu,pn %xcc, 1f
6046 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6047 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6048 80:
6049 /* System call success, clear Carry condition code. */
6050 andn %g3, %g2, %g3
6051 @@ -241,7 +241,7 @@ ret_sys_call:
6052 /* System call failure, set Carry condition code.
6053 * Also, get abs(errno) to return to the process.
6054 */
6055 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6056 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6057 sub %g0, %o0, %o0
6058 or %g3, %g2, %g3
6059 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6060 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6061 index a5785ea..405c5f7 100644
6062 --- a/arch/sparc/kernel/traps_32.c
6063 +++ b/arch/sparc/kernel/traps_32.c
6064 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6065 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6066 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6067
6068 +extern void gr_handle_kernel_exploit(void);
6069 +
6070 void die_if_kernel(char *str, struct pt_regs *regs)
6071 {
6072 static int die_counter;
6073 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6074 count++ < 30 &&
6075 (((unsigned long) rw) >= PAGE_OFFSET) &&
6076 !(((unsigned long) rw) & 0x7)) {
6077 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
6078 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
6079 (void *) rw->ins[7]);
6080 rw = (struct reg_window32 *)rw->ins[6];
6081 }
6082 }
6083 printk("Instruction DUMP:");
6084 instruction_dump ((unsigned long *) regs->pc);
6085 - if(regs->psr & PSR_PS)
6086 + if(regs->psr & PSR_PS) {
6087 + gr_handle_kernel_exploit();
6088 do_exit(SIGKILL);
6089 + }
6090 do_exit(SIGSEGV);
6091 }
6092
6093 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6094 index 3b05e66..6ea2917 100644
6095 --- a/arch/sparc/kernel/traps_64.c
6096 +++ b/arch/sparc/kernel/traps_64.c
6097 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6098 i + 1,
6099 p->trapstack[i].tstate, p->trapstack[i].tpc,
6100 p->trapstack[i].tnpc, p->trapstack[i].tt);
6101 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6102 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6103 }
6104 }
6105
6106 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6107
6108 lvl -= 0x100;
6109 if (regs->tstate & TSTATE_PRIV) {
6110 +
6111 +#ifdef CONFIG_PAX_REFCOUNT
6112 + if (lvl == 6)
6113 + pax_report_refcount_overflow(regs);
6114 +#endif
6115 +
6116 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6117 die_if_kernel(buffer, regs);
6118 }
6119 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6120 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6121 {
6122 char buffer[32];
6123 -
6124 +
6125 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6126 0, lvl, SIGTRAP) == NOTIFY_STOP)
6127 return;
6128
6129 +#ifdef CONFIG_PAX_REFCOUNT
6130 + if (lvl == 6)
6131 + pax_report_refcount_overflow(regs);
6132 +#endif
6133 +
6134 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6135
6136 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6137 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6138 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6139 printk("%s" "ERROR(%d): ",
6140 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6141 - printk("TPC<%pS>\n", (void *) regs->tpc);
6142 + printk("TPC<%pA>\n", (void *) regs->tpc);
6143 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6144 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6145 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6146 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6147 smp_processor_id(),
6148 (type & 0x1) ? 'I' : 'D',
6149 regs->tpc);
6150 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6151 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6152 panic("Irrecoverable Cheetah+ parity error.");
6153 }
6154
6155 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6156 smp_processor_id(),
6157 (type & 0x1) ? 'I' : 'D',
6158 regs->tpc);
6159 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6160 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6161 }
6162
6163 struct sun4v_error_entry {
6164 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6165
6166 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6167 regs->tpc, tl);
6168 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6169 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6170 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6171 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6172 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6173 (void *) regs->u_regs[UREG_I7]);
6174 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6175 "pte[%lx] error[%lx]\n",
6176 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6177
6178 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6179 regs->tpc, tl);
6180 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6181 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6182 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6183 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6184 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6185 (void *) regs->u_regs[UREG_I7]);
6186 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6187 "pte[%lx] error[%lx]\n",
6188 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6189 fp = (unsigned long)sf->fp + STACK_BIAS;
6190 }
6191
6192 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6193 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6194 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6195 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6196 int index = tsk->curr_ret_stack;
6197 if (tsk->ret_stack && index >= graph) {
6198 pc = tsk->ret_stack[index - graph].ret;
6199 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6200 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6201 graph++;
6202 }
6203 }
6204 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6205 return (struct reg_window *) (fp + STACK_BIAS);
6206 }
6207
6208 +extern void gr_handle_kernel_exploit(void);
6209 +
6210 void die_if_kernel(char *str, struct pt_regs *regs)
6211 {
6212 static int die_counter;
6213 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6214 while (rw &&
6215 count++ < 30 &&
6216 kstack_valid(tp, (unsigned long) rw)) {
6217 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6218 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6219 (void *) rw->ins[7]);
6220
6221 rw = kernel_stack_up(rw);
6222 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6223 }
6224 user_instruction_dump ((unsigned int __user *) regs->tpc);
6225 }
6226 - if (regs->tstate & TSTATE_PRIV)
6227 + if (regs->tstate & TSTATE_PRIV) {
6228 + gr_handle_kernel_exploit();
6229 do_exit(SIGKILL);
6230 + }
6231 do_exit(SIGSEGV);
6232 }
6233 EXPORT_SYMBOL(die_if_kernel);
6234 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6235 index f81d038..e7a4680 100644
6236 --- a/arch/sparc/kernel/unaligned_64.c
6237 +++ b/arch/sparc/kernel/unaligned_64.c
6238 @@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs *regs)
6239 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6240
6241 if (__ratelimit(&ratelimit)) {
6242 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6243 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6244 regs->tpc, (void *) regs->tpc);
6245 }
6246 }
6247 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6248 index dff4096..bd9a388 100644
6249 --- a/arch/sparc/lib/Makefile
6250 +++ b/arch/sparc/lib/Makefile
6251 @@ -2,7 +2,7 @@
6252 #
6253
6254 asflags-y := -ansi -DST_DIV0=0x02
6255 -ccflags-y := -Werror
6256 +#ccflags-y := -Werror
6257
6258 lib-$(CONFIG_SPARC32) += ashrdi3.o
6259 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6260 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6261 index 4d502da..527c48d 100644
6262 --- a/arch/sparc/lib/atomic_64.S
6263 +++ b/arch/sparc/lib/atomic_64.S
6264 @@ -17,7 +17,12 @@
6265 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
6266 BACKOFF_SETUP(%o2)
6267 1: lduw [%o1], %g1
6268 - add %g1, %o0, %g7
6269 + addcc %g1, %o0, %g7
6270 +
6271 +#ifdef CONFIG_PAX_REFCOUNT
6272 + tvs %icc, 6
6273 +#endif
6274 +
6275 cas [%o1], %g1, %g7
6276 cmp %g1, %g7
6277 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6278 @@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
6279 2: BACKOFF_SPIN(%o2, %o3, 1b)
6280 ENDPROC(atomic_add)
6281
6282 +ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6283 + BACKOFF_SETUP(%o2)
6284 +1: lduw [%o1], %g1
6285 + add %g1, %o0, %g7
6286 + cas [%o1], %g1, %g7
6287 + cmp %g1, %g7
6288 + bne,pn %icc, 2f
6289 + nop
6290 + retl
6291 + nop
6292 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6293 +ENDPROC(atomic_add_unchecked)
6294 +
6295 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6296 BACKOFF_SETUP(%o2)
6297 1: lduw [%o1], %g1
6298 - sub %g1, %o0, %g7
6299 + subcc %g1, %o0, %g7
6300 +
6301 +#ifdef CONFIG_PAX_REFCOUNT
6302 + tvs %icc, 6
6303 +#endif
6304 +
6305 cas [%o1], %g1, %g7
6306 cmp %g1, %g7
6307 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6308 @@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6309 2: BACKOFF_SPIN(%o2, %o3, 1b)
6310 ENDPROC(atomic_sub)
6311
6312 +ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
6313 + BACKOFF_SETUP(%o2)
6314 +1: lduw [%o1], %g1
6315 + sub %g1, %o0, %g7
6316 + cas [%o1], %g1, %g7
6317 + cmp %g1, %g7
6318 + bne,pn %icc, 2f
6319 + nop
6320 + retl
6321 + nop
6322 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6323 +ENDPROC(atomic_sub_unchecked)
6324 +
6325 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6326 BACKOFF_SETUP(%o2)
6327 1: lduw [%o1], %g1
6328 - add %g1, %o0, %g7
6329 + addcc %g1, %o0, %g7
6330 +
6331 +#ifdef CONFIG_PAX_REFCOUNT
6332 + tvs %icc, 6
6333 +#endif
6334 +
6335 cas [%o1], %g1, %g7
6336 cmp %g1, %g7
6337 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6338 @@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6339 2: BACKOFF_SPIN(%o2, %o3, 1b)
6340 ENDPROC(atomic_add_ret)
6341
6342 +ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6343 + BACKOFF_SETUP(%o2)
6344 +1: lduw [%o1], %g1
6345 + addcc %g1, %o0, %g7
6346 + cas [%o1], %g1, %g7
6347 + cmp %g1, %g7
6348 + bne,pn %icc, 2f
6349 + add %g7, %o0, %g7
6350 + sra %g7, 0, %o0
6351 + retl
6352 + nop
6353 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6354 +ENDPROC(atomic_add_ret_unchecked)
6355 +
6356 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
6357 BACKOFF_SETUP(%o2)
6358 1: lduw [%o1], %g1
6359 - sub %g1, %o0, %g7
6360 + subcc %g1, %o0, %g7
6361 +
6362 +#ifdef CONFIG_PAX_REFCOUNT
6363 + tvs %icc, 6
6364 +#endif
6365 +
6366 cas [%o1], %g1, %g7
6367 cmp %g1, %g7
6368 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6369 @@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
6370 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
6371 BACKOFF_SETUP(%o2)
6372 1: ldx [%o1], %g1
6373 - add %g1, %o0, %g7
6374 + addcc %g1, %o0, %g7
6375 +
6376 +#ifdef CONFIG_PAX_REFCOUNT
6377 + tvs %xcc, 6
6378 +#endif
6379 +
6380 casx [%o1], %g1, %g7
6381 cmp %g1, %g7
6382 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6383 @@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
6384 2: BACKOFF_SPIN(%o2, %o3, 1b)
6385 ENDPROC(atomic64_add)
6386
6387 +ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6388 + BACKOFF_SETUP(%o2)
6389 +1: ldx [%o1], %g1
6390 + addcc %g1, %o0, %g7
6391 + casx [%o1], %g1, %g7
6392 + cmp %g1, %g7
6393 + bne,pn %xcc, 2f
6394 + nop
6395 + retl
6396 + nop
6397 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6398 +ENDPROC(atomic64_add_unchecked)
6399 +
6400 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6401 BACKOFF_SETUP(%o2)
6402 1: ldx [%o1], %g1
6403 - sub %g1, %o0, %g7
6404 + subcc %g1, %o0, %g7
6405 +
6406 +#ifdef CONFIG_PAX_REFCOUNT
6407 + tvs %xcc, 6
6408 +#endif
6409 +
6410 casx [%o1], %g1, %g7
6411 cmp %g1, %g7
6412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6413 @@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6414 2: BACKOFF_SPIN(%o2, %o3, 1b)
6415 ENDPROC(atomic64_sub)
6416
6417 +ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
6418 + BACKOFF_SETUP(%o2)
6419 +1: ldx [%o1], %g1
6420 + subcc %g1, %o0, %g7
6421 + casx [%o1], %g1, %g7
6422 + cmp %g1, %g7
6423 + bne,pn %xcc, 2f
6424 + nop
6425 + retl
6426 + nop
6427 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6428 +ENDPROC(atomic64_sub_unchecked)
6429 +
6430 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6431 BACKOFF_SETUP(%o2)
6432 1: ldx [%o1], %g1
6433 - add %g1, %o0, %g7
6434 + addcc %g1, %o0, %g7
6435 +
6436 +#ifdef CONFIG_PAX_REFCOUNT
6437 + tvs %xcc, 6
6438 +#endif
6439 +
6440 casx [%o1], %g1, %g7
6441 cmp %g1, %g7
6442 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6443 @@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6444 2: BACKOFF_SPIN(%o2, %o3, 1b)
6445 ENDPROC(atomic64_add_ret)
6446
6447 +ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6448 + BACKOFF_SETUP(%o2)
6449 +1: ldx [%o1], %g1
6450 + addcc %g1, %o0, %g7
6451 + casx [%o1], %g1, %g7
6452 + cmp %g1, %g7
6453 + bne,pn %xcc, 2f
6454 + add %g7, %o0, %g7
6455 + mov %g7, %o0
6456 + retl
6457 + nop
6458 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6459 +ENDPROC(atomic64_add_ret_unchecked)
6460 +
6461 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
6462 BACKOFF_SETUP(%o2)
6463 1: ldx [%o1], %g1
6464 - sub %g1, %o0, %g7
6465 + subcc %g1, %o0, %g7
6466 +
6467 +#ifdef CONFIG_PAX_REFCOUNT
6468 + tvs %xcc, 6
6469 +#endif
6470 +
6471 casx [%o1], %g1, %g7
6472 cmp %g1, %g7
6473 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6474 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6475 index 3b31218..345c609 100644
6476 --- a/arch/sparc/lib/ksyms.c
6477 +++ b/arch/sparc/lib/ksyms.c
6478 @@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
6479
6480 /* Atomic counter implementation. */
6481 EXPORT_SYMBOL(atomic_add);
6482 +EXPORT_SYMBOL(atomic_add_unchecked);
6483 EXPORT_SYMBOL(atomic_add_ret);
6484 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6485 EXPORT_SYMBOL(atomic_sub);
6486 +EXPORT_SYMBOL(atomic_sub_unchecked);
6487 EXPORT_SYMBOL(atomic_sub_ret);
6488 EXPORT_SYMBOL(atomic64_add);
6489 +EXPORT_SYMBOL(atomic64_add_unchecked);
6490 EXPORT_SYMBOL(atomic64_add_ret);
6491 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6492 EXPORT_SYMBOL(atomic64_sub);
6493 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6494 EXPORT_SYMBOL(atomic64_sub_ret);
6495
6496 /* Atomic bit operations. */
6497 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6498 index 30c3ecc..736f015 100644
6499 --- a/arch/sparc/mm/Makefile
6500 +++ b/arch/sparc/mm/Makefile
6501 @@ -2,7 +2,7 @@
6502 #
6503
6504 asflags-y := -ansi
6505 -ccflags-y := -Werror
6506 +#ccflags-y := -Werror
6507
6508 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6509 obj-y += fault_$(BITS).o
6510 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6511 index f46cf6b..7235ec9 100644
6512 --- a/arch/sparc/mm/fault_32.c
6513 +++ b/arch/sparc/mm/fault_32.c
6514 @@ -21,6 +21,9 @@
6515 #include <linux/perf_event.h>
6516 #include <linux/interrupt.h>
6517 #include <linux/kdebug.h>
6518 +#include <linux/slab.h>
6519 +#include <linux/pagemap.h>
6520 +#include <linux/compiler.h>
6521
6522 #include <asm/page.h>
6523 #include <asm/pgtable.h>
6524 @@ -177,6 +180,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6525 return safe_compute_effective_address(regs, insn);
6526 }
6527
6528 +#ifdef CONFIG_PAX_PAGEEXEC
6529 +#ifdef CONFIG_PAX_DLRESOLVE
6530 +static void pax_emuplt_close(struct vm_area_struct *vma)
6531 +{
6532 + vma->vm_mm->call_dl_resolve = 0UL;
6533 +}
6534 +
6535 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6536 +{
6537 + unsigned int *kaddr;
6538 +
6539 + vmf->page = alloc_page(GFP_HIGHUSER);
6540 + if (!vmf->page)
6541 + return VM_FAULT_OOM;
6542 +
6543 + kaddr = kmap(vmf->page);
6544 + memset(kaddr, 0, PAGE_SIZE);
6545 + kaddr[0] = 0x9DE3BFA8U; /* save */
6546 + flush_dcache_page(vmf->page);
6547 + kunmap(vmf->page);
6548 + return VM_FAULT_MAJOR;
6549 +}
6550 +
6551 +static const struct vm_operations_struct pax_vm_ops = {
6552 + .close = pax_emuplt_close,
6553 + .fault = pax_emuplt_fault
6554 +};
6555 +
6556 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6557 +{
6558 + int ret;
6559 +
6560 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6561 + vma->vm_mm = current->mm;
6562 + vma->vm_start = addr;
6563 + vma->vm_end = addr + PAGE_SIZE;
6564 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6565 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6566 + vma->vm_ops = &pax_vm_ops;
6567 +
6568 + ret = insert_vm_struct(current->mm, vma);
6569 + if (ret)
6570 + return ret;
6571 +
6572 + ++current->mm->total_vm;
6573 + return 0;
6574 +}
6575 +#endif
6576 +
6577 +/*
6578 + * PaX: decide what to do with offenders (regs->pc = fault address)
6579 + *
6580 + * returns 1 when task should be killed
6581 + * 2 when patched PLT trampoline was detected
6582 + * 3 when unpatched PLT trampoline was detected
6583 + */
6584 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6585 +{
6586 +
6587 +#ifdef CONFIG_PAX_EMUPLT
6588 + int err;
6589 +
6590 + do { /* PaX: patched PLT emulation #1 */
6591 + unsigned int sethi1, sethi2, jmpl;
6592 +
6593 + err = get_user(sethi1, (unsigned int *)regs->pc);
6594 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6595 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6596 +
6597 + if (err)
6598 + break;
6599 +
6600 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6601 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6602 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6603 + {
6604 + unsigned int addr;
6605 +
6606 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6607 + addr = regs->u_regs[UREG_G1];
6608 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6609 + regs->pc = addr;
6610 + regs->npc = addr+4;
6611 + return 2;
6612 + }
6613 + } while (0);
6614 +
6615 + do { /* PaX: patched PLT emulation #2 */
6616 + unsigned int ba;
6617 +
6618 + err = get_user(ba, (unsigned int *)regs->pc);
6619 +
6620 + if (err)
6621 + break;
6622 +
6623 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
6624 + unsigned int addr;
6625 +
6626 + if ((ba & 0xFFC00000U) == 0x30800000U)
6627 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6628 + else
6629 + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6630 + regs->pc = addr;
6631 + regs->npc = addr+4;
6632 + return 2;
6633 + }
6634 + } while (0);
6635 +
6636 + do { /* PaX: patched PLT emulation #3 */
6637 + unsigned int sethi, bajmpl, nop;
6638 +
6639 + err = get_user(sethi, (unsigned int *)regs->pc);
6640 + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
6641 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6642 +
6643 + if (err)
6644 + break;
6645 +
6646 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6647 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
6648 + nop == 0x01000000U)
6649 + {
6650 + unsigned int addr;
6651 +
6652 + addr = (sethi & 0x003FFFFFU) << 10;
6653 + regs->u_regs[UREG_G1] = addr;
6654 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
6655 + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6656 + else
6657 + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6658 + regs->pc = addr;
6659 + regs->npc = addr+4;
6660 + return 2;
6661 + }
6662 + } while (0);
6663 +
6664 + do { /* PaX: unpatched PLT emulation step 1 */
6665 + unsigned int sethi, ba, nop;
6666 +
6667 + err = get_user(sethi, (unsigned int *)regs->pc);
6668 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6669 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6670 +
6671 + if (err)
6672 + break;
6673 +
6674 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6675 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6676 + nop == 0x01000000U)
6677 + {
6678 + unsigned int addr, save, call;
6679 +
6680 + if ((ba & 0xFFC00000U) == 0x30800000U)
6681 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6682 + else
6683 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6684 +
6685 + err = get_user(save, (unsigned int *)addr);
6686 + err |= get_user(call, (unsigned int *)(addr+4));
6687 + err |= get_user(nop, (unsigned int *)(addr+8));
6688 + if (err)
6689 + break;
6690 +
6691 +#ifdef CONFIG_PAX_DLRESOLVE
6692 + if (save == 0x9DE3BFA8U &&
6693 + (call & 0xC0000000U) == 0x40000000U &&
6694 + nop == 0x01000000U)
6695 + {
6696 + struct vm_area_struct *vma;
6697 + unsigned long call_dl_resolve;
6698 +
6699 + down_read(&current->mm->mmap_sem);
6700 + call_dl_resolve = current->mm->call_dl_resolve;
6701 + up_read(&current->mm->mmap_sem);
6702 + if (likely(call_dl_resolve))
6703 + goto emulate;
6704 +
6705 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6706 +
6707 + down_write(&current->mm->mmap_sem);
6708 + if (current->mm->call_dl_resolve) {
6709 + call_dl_resolve = current->mm->call_dl_resolve;
6710 + up_write(&current->mm->mmap_sem);
6711 + if (vma)
6712 + kmem_cache_free(vm_area_cachep, vma);
6713 + goto emulate;
6714 + }
6715 +
6716 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6717 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6718 + up_write(&current->mm->mmap_sem);
6719 + if (vma)
6720 + kmem_cache_free(vm_area_cachep, vma);
6721 + return 1;
6722 + }
6723 +
6724 + if (pax_insert_vma(vma, call_dl_resolve)) {
6725 + up_write(&current->mm->mmap_sem);
6726 + kmem_cache_free(vm_area_cachep, vma);
6727 + return 1;
6728 + }
6729 +
6730 + current->mm->call_dl_resolve = call_dl_resolve;
6731 + up_write(&current->mm->mmap_sem);
6732 +
6733 +emulate:
6734 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6735 + regs->pc = call_dl_resolve;
6736 + regs->npc = addr+4;
6737 + return 3;
6738 + }
6739 +#endif
6740 +
6741 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6742 + if ((save & 0xFFC00000U) == 0x05000000U &&
6743 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6744 + nop == 0x01000000U)
6745 + {
6746 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6747 + regs->u_regs[UREG_G2] = addr + 4;
6748 + addr = (save & 0x003FFFFFU) << 10;
6749 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6750 + regs->pc = addr;
6751 + regs->npc = addr+4;
6752 + return 3;
6753 + }
6754 + }
6755 + } while (0);
6756 +
6757 + do { /* PaX: unpatched PLT emulation step 2 */
6758 + unsigned int save, call, nop;
6759 +
6760 + err = get_user(save, (unsigned int *)(regs->pc-4));
6761 + err |= get_user(call, (unsigned int *)regs->pc);
6762 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6763 + if (err)
6764 + break;
6765 +
6766 + if (save == 0x9DE3BFA8U &&
6767 + (call & 0xC0000000U) == 0x40000000U &&
6768 + nop == 0x01000000U)
6769 + {
6770 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6771 +
6772 + regs->u_regs[UREG_RETPC] = regs->pc;
6773 + regs->pc = dl_resolve;
6774 + regs->npc = dl_resolve+4;
6775 + return 3;
6776 + }
6777 + } while (0);
6778 +#endif
6779 +
6780 + return 1;
6781 +}
6782 +
6783 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6784 +{
6785 + unsigned long i;
6786 +
6787 + printk(KERN_ERR "PAX: bytes at PC: ");
6788 + for (i = 0; i < 8; i++) {
6789 + unsigned int c;
6790 + if (get_user(c, (unsigned int *)pc+i))
6791 + printk(KERN_CONT "???????? ");
6792 + else
6793 + printk(KERN_CONT "%08x ", c);
6794 + }
6795 + printk("\n");
6796 +}
6797 +#endif
6798 +
6799 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6800 int text_fault)
6801 {
6802 @@ -248,6 +522,24 @@ good_area:
6803 if (!(vma->vm_flags & VM_WRITE))
6804 goto bad_area;
6805 } else {
6806 +
6807 +#ifdef CONFIG_PAX_PAGEEXEC
6808 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6809 + up_read(&mm->mmap_sem);
6810 + switch (pax_handle_fetch_fault(regs)) {
6811 +
6812 +#ifdef CONFIG_PAX_EMUPLT
6813 + case 2:
6814 + case 3:
6815 + return;
6816 +#endif
6817 +
6818 + }
6819 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6820 + do_group_exit(SIGKILL);
6821 + }
6822 +#endif
6823 +
6824 /* Allow reads even for write-only mappings */
6825 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
6826 goto bad_area;
6827 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6828 index 1fe0429..8dd5dd5 100644
6829 --- a/arch/sparc/mm/fault_64.c
6830 +++ b/arch/sparc/mm/fault_64.c
6831 @@ -21,6 +21,9 @@
6832 #include <linux/kprobes.h>
6833 #include <linux/kdebug.h>
6834 #include <linux/percpu.h>
6835 +#include <linux/slab.h>
6836 +#include <linux/pagemap.h>
6837 +#include <linux/compiler.h>
6838
6839 #include <asm/page.h>
6840 #include <asm/pgtable.h>
6841 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6842 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6843 regs->tpc);
6844 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6845 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6846 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6847 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6848 dump_stack();
6849 unhandled_fault(regs->tpc, current, regs);
6850 @@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6851 show_regs(regs);
6852 }
6853
6854 +#ifdef CONFIG_PAX_PAGEEXEC
6855 +#ifdef CONFIG_PAX_DLRESOLVE
6856 +static void pax_emuplt_close(struct vm_area_struct *vma)
6857 +{
6858 + vma->vm_mm->call_dl_resolve = 0UL;
6859 +}
6860 +
6861 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6862 +{
6863 + unsigned int *kaddr;
6864 +
6865 + vmf->page = alloc_page(GFP_HIGHUSER);
6866 + if (!vmf->page)
6867 + return VM_FAULT_OOM;
6868 +
6869 + kaddr = kmap(vmf->page);
6870 + memset(kaddr, 0, PAGE_SIZE);
6871 + kaddr[0] = 0x9DE3BFA8U; /* save */
6872 + flush_dcache_page(vmf->page);
6873 + kunmap(vmf->page);
6874 + return VM_FAULT_MAJOR;
6875 +}
6876 +
6877 +static const struct vm_operations_struct pax_vm_ops = {
6878 + .close = pax_emuplt_close,
6879 + .fault = pax_emuplt_fault
6880 +};
6881 +
6882 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6883 +{
6884 + int ret;
6885 +
6886 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6887 + vma->vm_mm = current->mm;
6888 + vma->vm_start = addr;
6889 + vma->vm_end = addr + PAGE_SIZE;
6890 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6891 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6892 + vma->vm_ops = &pax_vm_ops;
6893 +
6894 + ret = insert_vm_struct(current->mm, vma);
6895 + if (ret)
6896 + return ret;
6897 +
6898 + ++current->mm->total_vm;
6899 + return 0;
6900 +}
6901 +#endif
6902 +
6903 +/*
6904 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6905 + *
6906 + * returns 1 when task should be killed
6907 + * 2 when patched PLT trampoline was detected
6908 + * 3 when unpatched PLT trampoline was detected
6909 + */
6910 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6911 +{
6912 +
6913 +#ifdef CONFIG_PAX_EMUPLT
6914 + int err;
6915 +
6916 + do { /* PaX: patched PLT emulation #1 */
6917 + unsigned int sethi1, sethi2, jmpl;
6918 +
6919 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6920 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6921 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6922 +
6923 + if (err)
6924 + break;
6925 +
6926 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6927 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6928 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6929 + {
6930 + unsigned long addr;
6931 +
6932 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6933 + addr = regs->u_regs[UREG_G1];
6934 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6935 +
6936 + if (test_thread_flag(TIF_32BIT))
6937 + addr &= 0xFFFFFFFFUL;
6938 +
6939 + regs->tpc = addr;
6940 + regs->tnpc = addr+4;
6941 + return 2;
6942 + }
6943 + } while (0);
6944 +
6945 + do { /* PaX: patched PLT emulation #2 */
6946 + unsigned int ba;
6947 +
6948 + err = get_user(ba, (unsigned int *)regs->tpc);
6949 +
6950 + if (err)
6951 + break;
6952 +
6953 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
6954 + unsigned long addr;
6955 +
6956 + if ((ba & 0xFFC00000U) == 0x30800000U)
6957 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6958 + else
6959 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6960 +
6961 + if (test_thread_flag(TIF_32BIT))
6962 + addr &= 0xFFFFFFFFUL;
6963 +
6964 + regs->tpc = addr;
6965 + regs->tnpc = addr+4;
6966 + return 2;
6967 + }
6968 + } while (0);
6969 +
6970 + do { /* PaX: patched PLT emulation #3 */
6971 + unsigned int sethi, bajmpl, nop;
6972 +
6973 + err = get_user(sethi, (unsigned int *)regs->tpc);
6974 + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
6975 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6976 +
6977 + if (err)
6978 + break;
6979 +
6980 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6981 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
6982 + nop == 0x01000000U)
6983 + {
6984 + unsigned long addr;
6985 +
6986 + addr = (sethi & 0x003FFFFFU) << 10;
6987 + regs->u_regs[UREG_G1] = addr;
6988 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
6989 + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6990 + else
6991 + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6992 +
6993 + if (test_thread_flag(TIF_32BIT))
6994 + addr &= 0xFFFFFFFFUL;
6995 +
6996 + regs->tpc = addr;
6997 + regs->tnpc = addr+4;
6998 + return 2;
6999 + }
7000 + } while (0);
7001 +
7002 + do { /* PaX: patched PLT emulation #4 */
7003 + unsigned int sethi, mov1, call, mov2;
7004 +
7005 + err = get_user(sethi, (unsigned int *)regs->tpc);
7006 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7007 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
7008 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7009 +
7010 + if (err)
7011 + break;
7012 +
7013 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7014 + mov1 == 0x8210000FU &&
7015 + (call & 0xC0000000U) == 0x40000000U &&
7016 + mov2 == 0x9E100001U)
7017 + {
7018 + unsigned long addr;
7019 +
7020 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7021 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7022 +
7023 + if (test_thread_flag(TIF_32BIT))
7024 + addr &= 0xFFFFFFFFUL;
7025 +
7026 + regs->tpc = addr;
7027 + regs->tnpc = addr+4;
7028 + return 2;
7029 + }
7030 + } while (0);
7031 +
7032 + do { /* PaX: patched PLT emulation #5 */
7033 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7034 +
7035 + err = get_user(sethi, (unsigned int *)regs->tpc);
7036 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7037 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7038 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7039 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7040 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7041 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7042 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7043 +
7044 + if (err)
7045 + break;
7046 +
7047 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7048 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7049 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7050 + (or1 & 0xFFFFE000U) == 0x82106000U &&
7051 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7052 + sllx == 0x83287020U &&
7053 + jmpl == 0x81C04005U &&
7054 + nop == 0x01000000U)
7055 + {
7056 + unsigned long addr;
7057 +
7058 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7059 + regs->u_regs[UREG_G1] <<= 32;
7060 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7061 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7062 + regs->tpc = addr;
7063 + regs->tnpc = addr+4;
7064 + return 2;
7065 + }
7066 + } while (0);
7067 +
7068 + do { /* PaX: patched PLT emulation #6 */
7069 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7070 +
7071 + err = get_user(sethi, (unsigned int *)regs->tpc);
7072 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7073 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7074 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7075 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
7076 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7077 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7078 +
7079 + if (err)
7080 + break;
7081 +
7082 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7083 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7084 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7085 + sllx == 0x83287020U &&
7086 + (or & 0xFFFFE000U) == 0x8A116000U &&
7087 + jmpl == 0x81C04005U &&
7088 + nop == 0x01000000U)
7089 + {
7090 + unsigned long addr;
7091 +
7092 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7093 + regs->u_regs[UREG_G1] <<= 32;
7094 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7095 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7096 + regs->tpc = addr;
7097 + regs->tnpc = addr+4;
7098 + return 2;
7099 + }
7100 + } while (0);
7101 +
7102 + do { /* PaX: unpatched PLT emulation step 1 */
7103 + unsigned int sethi, ba, nop;
7104 +
7105 + err = get_user(sethi, (unsigned int *)regs->tpc);
7106 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7107 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7108 +
7109 + if (err)
7110 + break;
7111 +
7112 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7113 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7114 + nop == 0x01000000U)
7115 + {
7116 + unsigned long addr;
7117 + unsigned int save, call;
7118 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7119 +
7120 + if ((ba & 0xFFC00000U) == 0x30800000U)
7121 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7122 + else
7123 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7124 +
7125 + if (test_thread_flag(TIF_32BIT))
7126 + addr &= 0xFFFFFFFFUL;
7127 +
7128 + err = get_user(save, (unsigned int *)addr);
7129 + err |= get_user(call, (unsigned int *)(addr+4));
7130 + err |= get_user(nop, (unsigned int *)(addr+8));
7131 + if (err)
7132 + break;
7133 +
7134 +#ifdef CONFIG_PAX_DLRESOLVE
7135 + if (save == 0x9DE3BFA8U &&
7136 + (call & 0xC0000000U) == 0x40000000U &&
7137 + nop == 0x01000000U)
7138 + {
7139 + struct vm_area_struct *vma;
7140 + unsigned long call_dl_resolve;
7141 +
7142 + down_read(&current->mm->mmap_sem);
7143 + call_dl_resolve = current->mm->call_dl_resolve;
7144 + up_read(&current->mm->mmap_sem);
7145 + if (likely(call_dl_resolve))
7146 + goto emulate;
7147 +
7148 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7149 +
7150 + down_write(&current->mm->mmap_sem);
7151 + if (current->mm->call_dl_resolve) {
7152 + call_dl_resolve = current->mm->call_dl_resolve;
7153 + up_write(&current->mm->mmap_sem);
7154 + if (vma)
7155 + kmem_cache_free(vm_area_cachep, vma);
7156 + goto emulate;
7157 + }
7158 +
7159 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7160 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7161 + up_write(&current->mm->mmap_sem);
7162 + if (vma)
7163 + kmem_cache_free(vm_area_cachep, vma);
7164 + return 1;
7165 + }
7166 +
7167 + if (pax_insert_vma(vma, call_dl_resolve)) {
7168 + up_write(&current->mm->mmap_sem);
7169 + kmem_cache_free(vm_area_cachep, vma);
7170 + return 1;
7171 + }
7172 +
7173 + current->mm->call_dl_resolve = call_dl_resolve;
7174 + up_write(&current->mm->mmap_sem);
7175 +
7176 +emulate:
7177 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7178 + regs->tpc = call_dl_resolve;
7179 + regs->tnpc = addr+4;
7180 + return 3;
7181 + }
7182 +#endif
7183 +
7184 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7185 + if ((save & 0xFFC00000U) == 0x05000000U &&
7186 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7187 + nop == 0x01000000U)
7188 + {
7189 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7190 + regs->u_regs[UREG_G2] = addr + 4;
7191 + addr = (save & 0x003FFFFFU) << 10;
7192 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7193 +
7194 + if (test_thread_flag(TIF_32BIT))
7195 + addr &= 0xFFFFFFFFUL;
7196 +
7197 + regs->tpc = addr;
7198 + regs->tnpc = addr+4;
7199 + return 3;
7200 + }
7201 +
7202 + /* PaX: 64-bit PLT stub */
7203 + err = get_user(sethi1, (unsigned int *)addr);
7204 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7205 + err |= get_user(or1, (unsigned int *)(addr+8));
7206 + err |= get_user(or2, (unsigned int *)(addr+12));
7207 + err |= get_user(sllx, (unsigned int *)(addr+16));
7208 + err |= get_user(add, (unsigned int *)(addr+20));
7209 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7210 + err |= get_user(nop, (unsigned int *)(addr+28));
7211 + if (err)
7212 + break;
7213 +
7214 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7215 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7216 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7217 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7218 + sllx == 0x89293020U &&
7219 + add == 0x8A010005U &&
7220 + jmpl == 0x89C14000U &&
7221 + nop == 0x01000000U)
7222 + {
7223 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7224 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7225 + regs->u_regs[UREG_G4] <<= 32;
7226 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7227 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7228 + regs->u_regs[UREG_G4] = addr + 24;
7229 + addr = regs->u_regs[UREG_G5];
7230 + regs->tpc = addr;
7231 + regs->tnpc = addr+4;
7232 + return 3;
7233 + }
7234 + }
7235 + } while (0);
7236 +
7237 +#ifdef CONFIG_PAX_DLRESOLVE
7238 + do { /* PaX: unpatched PLT emulation step 2 */
7239 + unsigned int save, call, nop;
7240 +
7241 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7242 + err |= get_user(call, (unsigned int *)regs->tpc);
7243 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7244 + if (err)
7245 + break;
7246 +
7247 + if (save == 0x9DE3BFA8U &&
7248 + (call & 0xC0000000U) == 0x40000000U &&
7249 + nop == 0x01000000U)
7250 + {
7251 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7252 +
7253 + if (test_thread_flag(TIF_32BIT))
7254 + dl_resolve &= 0xFFFFFFFFUL;
7255 +
7256 + regs->u_regs[UREG_RETPC] = regs->tpc;
7257 + regs->tpc = dl_resolve;
7258 + regs->tnpc = dl_resolve+4;
7259 + return 3;
7260 + }
7261 + } while (0);
7262 +#endif
7263 +
7264 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7265 + unsigned int sethi, ba, nop;
7266 +
7267 + err = get_user(sethi, (unsigned int *)regs->tpc);
7268 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7269 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7270 +
7271 + if (err)
7272 + break;
7273 +
7274 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7275 + (ba & 0xFFF00000U) == 0x30600000U &&
7276 + nop == 0x01000000U)
7277 + {
7278 + unsigned long addr;
7279 +
7280 + addr = (sethi & 0x003FFFFFU) << 10;
7281 + regs->u_regs[UREG_G1] = addr;
7282 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7283 +
7284 + if (test_thread_flag(TIF_32BIT))
7285 + addr &= 0xFFFFFFFFUL;
7286 +
7287 + regs->tpc = addr;
7288 + regs->tnpc = addr+4;
7289 + return 2;
7290 + }
7291 + } while (0);
7292 +
7293 +#endif
7294 +
7295 + return 1;
7296 +}
7297 +
7298 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7299 +{
7300 + unsigned long i;
7301 +
7302 + printk(KERN_ERR "PAX: bytes at PC: ");
7303 + for (i = 0; i < 8; i++) {
7304 + unsigned int c;
7305 + if (get_user(c, (unsigned int *)pc+i))
7306 + printk(KERN_CONT "???????? ");
7307 + else
7308 + printk(KERN_CONT "%08x ", c);
7309 + }
7310 + printk("\n");
7311 +}
7312 +#endif
7313 +
7314 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7315 {
7316 struct mm_struct *mm = current->mm;
7317 @@ -343,6 +806,29 @@ retry:
7318 if (!vma)
7319 goto bad_area;
7320
7321 +#ifdef CONFIG_PAX_PAGEEXEC
7322 + /* PaX: detect ITLB misses on non-exec pages */
7323 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7324 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7325 + {
7326 + if (address != regs->tpc)
7327 + goto good_area;
7328 +
7329 + up_read(&mm->mmap_sem);
7330 + switch (pax_handle_fetch_fault(regs)) {
7331 +
7332 +#ifdef CONFIG_PAX_EMUPLT
7333 + case 2:
7334 + case 3:
7335 + return;
7336 +#endif
7337 +
7338 + }
7339 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7340 + do_group_exit(SIGKILL);
7341 + }
7342 +#endif
7343 +
7344 /* Pure DTLB misses do not tell us whether the fault causing
7345 * load/store/atomic was a write or not, it only says that there
7346 * was no match. So in such a case we (carefully) read the
7347 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7348 index 07e1453..0a7d9e9 100644
7349 --- a/arch/sparc/mm/hugetlbpage.c
7350 +++ b/arch/sparc/mm/hugetlbpage.c
7351 @@ -67,7 +67,7 @@ full_search:
7352 }
7353 return -ENOMEM;
7354 }
7355 - if (likely(!vma || addr + len <= vma->vm_start)) {
7356 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7357 /*
7358 * Remember the place where we stopped the search:
7359 */
7360 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7361 /* make sure it can fit in the remaining address space */
7362 if (likely(addr > len)) {
7363 vma = find_vma(mm, addr-len);
7364 - if (!vma || addr <= vma->vm_start) {
7365 + if (check_heap_stack_gap(vma, addr - len, len)) {
7366 /* remember the address as a hint for next time */
7367 return (mm->free_area_cache = addr-len);
7368 }
7369 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7370 if (unlikely(mm->mmap_base < len))
7371 goto bottomup;
7372
7373 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7374 + addr = mm->mmap_base - len;
7375
7376 do {
7377 + addr &= HPAGE_MASK;
7378 /*
7379 * Lookup failure means no vma is above this address,
7380 * else if new region fits below vma->vm_start,
7381 * return with success:
7382 */
7383 vma = find_vma(mm, addr);
7384 - if (likely(!vma || addr+len <= vma->vm_start)) {
7385 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7386 /* remember the address as a hint for next time */
7387 return (mm->free_area_cache = addr);
7388 }
7389 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7390 mm->cached_hole_size = vma->vm_start - addr;
7391
7392 /* try just below the current vma->vm_start */
7393 - addr = (vma->vm_start-len) & HPAGE_MASK;
7394 - } while (likely(len < vma->vm_start));
7395 + addr = skip_heap_stack_gap(vma, len);
7396 + } while (!IS_ERR_VALUE(addr));
7397
7398 bottomup:
7399 /*
7400 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7401 if (addr) {
7402 addr = ALIGN(addr, HPAGE_SIZE);
7403 vma = find_vma(mm, addr);
7404 - if (task_size - len >= addr &&
7405 - (!vma || addr + len <= vma->vm_start))
7406 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7407 return addr;
7408 }
7409 if (mm->get_unmapped_area == arch_get_unmapped_area)
7410 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7411 index f4500c6..889656c 100644
7412 --- a/arch/tile/include/asm/atomic_64.h
7413 +++ b/arch/tile/include/asm/atomic_64.h
7414 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7415
7416 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7417
7418 +#define atomic64_read_unchecked(v) atomic64_read(v)
7419 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7420 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7421 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7422 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7423 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7424 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7425 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7426 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7427 +
7428 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7429 #define smp_mb__before_atomic_dec() smp_mb()
7430 #define smp_mb__after_atomic_dec() smp_mb()
7431 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7432 index 392e533..536b092 100644
7433 --- a/arch/tile/include/asm/cache.h
7434 +++ b/arch/tile/include/asm/cache.h
7435 @@ -15,11 +15,12 @@
7436 #ifndef _ASM_TILE_CACHE_H
7437 #define _ASM_TILE_CACHE_H
7438
7439 +#include <linux/const.h>
7440 #include <arch/chip.h>
7441
7442 /* bytes per L1 data cache line */
7443 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7444 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7445 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7446
7447 /* bytes per L2 cache line */
7448 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7449 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
7450 index 9ab078a..d6635c2 100644
7451 --- a/arch/tile/include/asm/uaccess.h
7452 +++ b/arch/tile/include/asm/uaccess.h
7453 @@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
7454 const void __user *from,
7455 unsigned long n)
7456 {
7457 - int sz = __compiletime_object_size(to);
7458 + size_t sz = __compiletime_object_size(to);
7459
7460 - if (likely(sz == -1 || sz >= n))
7461 + if (likely(sz == (size_t)-1 || sz >= n))
7462 n = _copy_from_user(to, from, n);
7463 else
7464 copy_from_user_overflow();
7465 diff --git a/arch/um/Makefile b/arch/um/Makefile
7466 index 0970910..13adb57a 100644
7467 --- a/arch/um/Makefile
7468 +++ b/arch/um/Makefile
7469 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7470 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7471 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7472
7473 +ifdef CONSTIFY_PLUGIN
7474 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7475 +endif
7476 +
7477 #This will adjust *FLAGS accordingly to the platform.
7478 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7479
7480 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7481 index 19e1bdd..3665b77 100644
7482 --- a/arch/um/include/asm/cache.h
7483 +++ b/arch/um/include/asm/cache.h
7484 @@ -1,6 +1,7 @@
7485 #ifndef __UM_CACHE_H
7486 #define __UM_CACHE_H
7487
7488 +#include <linux/const.h>
7489
7490 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7491 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7492 @@ -12,6 +13,6 @@
7493 # define L1_CACHE_SHIFT 5
7494 #endif
7495
7496 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7497 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7498
7499 #endif
7500 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7501 index 6c03acd..a5e0215 100644
7502 --- a/arch/um/include/asm/kmap_types.h
7503 +++ b/arch/um/include/asm/kmap_types.h
7504 @@ -23,6 +23,7 @@ enum km_type {
7505 KM_IRQ1,
7506 KM_SOFTIRQ0,
7507 KM_SOFTIRQ1,
7508 + KM_CLEARPAGE,
7509 KM_TYPE_NR
7510 };
7511
7512 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7513 index 7cfc3ce..cbd1a58 100644
7514 --- a/arch/um/include/asm/page.h
7515 +++ b/arch/um/include/asm/page.h
7516 @@ -14,6 +14,9 @@
7517 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7518 #define PAGE_MASK (~(PAGE_SIZE-1))
7519
7520 +#define ktla_ktva(addr) (addr)
7521 +#define ktva_ktla(addr) (addr)
7522 +
7523 #ifndef __ASSEMBLY__
7524
7525 struct page;
7526 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7527 index 0032f92..cd151e0 100644
7528 --- a/arch/um/include/asm/pgtable-3level.h
7529 +++ b/arch/um/include/asm/pgtable-3level.h
7530 @@ -58,6 +58,7 @@
7531 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7532 #define pud_populate(mm, pud, pmd) \
7533 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7534 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7535
7536 #ifdef CONFIG_64BIT
7537 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7538 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7539 index ccb9a9d..cc425bb 100644
7540 --- a/arch/um/kernel/process.c
7541 +++ b/arch/um/kernel/process.c
7542 @@ -407,22 +407,6 @@ int singlestepping(void * t)
7543 return 2;
7544 }
7545
7546 -/*
7547 - * Only x86 and x86_64 have an arch_align_stack().
7548 - * All other arches have "#define arch_align_stack(x) (x)"
7549 - * in their asm/system.h
7550 - * As this is included in UML from asm-um/system-generic.h,
7551 - * we can use it to behave as the subarch does.
7552 - */
7553 -#ifndef arch_align_stack
7554 -unsigned long arch_align_stack(unsigned long sp)
7555 -{
7556 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7557 - sp -= get_random_int() % 8192;
7558 - return sp & ~0xf;
7559 -}
7560 -#endif
7561 -
7562 unsigned long get_wchan(struct task_struct *p)
7563 {
7564 unsigned long stack_page, sp, ip;
7565 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7566 index ad8f795..2c7eec6 100644
7567 --- a/arch/unicore32/include/asm/cache.h
7568 +++ b/arch/unicore32/include/asm/cache.h
7569 @@ -12,8 +12,10 @@
7570 #ifndef __UNICORE_CACHE_H__
7571 #define __UNICORE_CACHE_H__
7572
7573 -#define L1_CACHE_SHIFT (5)
7574 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7575 +#include <linux/const.h>
7576 +
7577 +#define L1_CACHE_SHIFT 5
7578 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7579
7580 /*
7581 * Memory returned by kmalloc() may be used for DMA, so we must make
7582 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7583 index c70684f..698fa4b 100644
7584 --- a/arch/x86/Kconfig
7585 +++ b/arch/x86/Kconfig
7586 @@ -218,7 +218,7 @@ config X86_HT
7587
7588 config X86_32_LAZY_GS
7589 def_bool y
7590 - depends on X86_32 && !CC_STACKPROTECTOR
7591 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7592
7593 config ARCH_HWEIGHT_CFLAGS
7594 string
7595 @@ -1047,7 +1047,7 @@ choice
7596
7597 config NOHIGHMEM
7598 bool "off"
7599 - depends on !X86_NUMAQ
7600 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7601 ---help---
7602 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7603 However, the address space of 32-bit x86 processors is only 4
7604 @@ -1084,7 +1084,7 @@ config NOHIGHMEM
7605
7606 config HIGHMEM4G
7607 bool "4GB"
7608 - depends on !X86_NUMAQ
7609 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7610 ---help---
7611 Select this if you have a 32-bit processor and between 1 and 4
7612 gigabytes of physical RAM.
7613 @@ -1138,7 +1138,7 @@ config PAGE_OFFSET
7614 hex
7615 default 0xB0000000 if VMSPLIT_3G_OPT
7616 default 0x80000000 if VMSPLIT_2G
7617 - default 0x78000000 if VMSPLIT_2G_OPT
7618 + default 0x70000000 if VMSPLIT_2G_OPT
7619 default 0x40000000 if VMSPLIT_1G
7620 default 0xC0000000
7621 depends on X86_32
7622 @@ -1526,6 +1526,7 @@ config SECCOMP
7623
7624 config CC_STACKPROTECTOR
7625 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7626 + depends on X86_64 || !PAX_MEMORY_UDEREF
7627 ---help---
7628 This option turns on the -fstack-protector GCC feature. This
7629 feature puts, at the beginning of functions, a canary value on
7630 @@ -1583,6 +1584,7 @@ config KEXEC_JUMP
7631 config PHYSICAL_START
7632 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7633 default "0x1000000"
7634 + range 0x400000 0x40000000
7635 ---help---
7636 This gives the physical address where the kernel is loaded.
7637
7638 @@ -1646,6 +1648,7 @@ config X86_NEED_RELOCS
7639 config PHYSICAL_ALIGN
7640 hex "Alignment value to which kernel should be aligned" if X86_32
7641 default "0x1000000"
7642 + range 0x400000 0x1000000 if PAX_KERNEXEC
7643 range 0x2000 0x1000000
7644 ---help---
7645 This value puts the alignment restrictions on physical address
7646 @@ -1677,9 +1680,10 @@ config HOTPLUG_CPU
7647 Say N if you want to disable CPU hotplug.
7648
7649 config COMPAT_VDSO
7650 - def_bool y
7651 + def_bool n
7652 prompt "Compat VDSO support"
7653 depends on X86_32 || IA32_EMULATION
7654 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7655 ---help---
7656 Map the 32-bit VDSO to the predictable old-style address too.
7657
7658 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7659 index 706e12e..62e4feb 100644
7660 --- a/arch/x86/Kconfig.cpu
7661 +++ b/arch/x86/Kconfig.cpu
7662 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7663
7664 config X86_F00F_BUG
7665 def_bool y
7666 - depends on M586MMX || M586TSC || M586 || M486 || M386
7667 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7668
7669 config X86_INVD_BUG
7670 def_bool y
7671 @@ -358,7 +358,7 @@ config X86_POPAD_OK
7672
7673 config X86_ALIGNMENT_16
7674 def_bool y
7675 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7676 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7677
7678 config X86_INTEL_USERCOPY
7679 def_bool y
7680 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
7681 # generates cmov.
7682 config X86_CMOV
7683 def_bool y
7684 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7685 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7686
7687 config X86_MINIMUM_CPU_FAMILY
7688 int
7689 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7690 index e46c214..ab62fd1 100644
7691 --- a/arch/x86/Kconfig.debug
7692 +++ b/arch/x86/Kconfig.debug
7693 @@ -84,7 +84,7 @@ config X86_PTDUMP
7694 config DEBUG_RODATA
7695 bool "Write protect kernel read-only data structures"
7696 default y
7697 - depends on DEBUG_KERNEL
7698 + depends on DEBUG_KERNEL && BROKEN
7699 ---help---
7700 Mark the kernel read-only data as write-protected in the pagetables,
7701 in order to catch accidental (and incorrect) writes to such const
7702 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7703
7704 config DEBUG_SET_MODULE_RONX
7705 bool "Set loadable kernel module data as NX and text as RO"
7706 - depends on MODULES
7707 + depends on MODULES && BROKEN
7708 ---help---
7709 This option helps catch unintended modifications to loadable
7710 kernel module's text and read-only data. It also prevents execution
7711 @@ -275,7 +275,7 @@ config OPTIMIZE_INLINING
7712
7713 config DEBUG_STRICT_USER_COPY_CHECKS
7714 bool "Strict copy size checks"
7715 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
7716 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
7717 ---help---
7718 Enabling this option turns a certain set of sanity checks for user
7719 copy operations into compile time failures.
7720 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7721 index 1f25214..39422b3 100644
7722 --- a/arch/x86/Makefile
7723 +++ b/arch/x86/Makefile
7724 @@ -46,6 +46,7 @@ else
7725 UTS_MACHINE := x86_64
7726 CHECKFLAGS += -D__x86_64__ -m64
7727
7728 + biarch := $(call cc-option,-m64)
7729 KBUILD_AFLAGS += -m64
7730 KBUILD_CFLAGS += -m64
7731
7732 @@ -222,3 +223,12 @@ define archhelp
7733 echo ' FDARGS="..." arguments for the booted kernel'
7734 echo ' FDINITRD=file initrd for the booted kernel'
7735 endef
7736 +
7737 +define OLD_LD
7738 +
7739 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7740 +*** Please upgrade your binutils to 2.18 or newer
7741 +endef
7742 +
7743 +archprepare:
7744 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7745 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7746 index 5a747dd..ff7b12c 100644
7747 --- a/arch/x86/boot/Makefile
7748 +++ b/arch/x86/boot/Makefile
7749 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7750 $(call cc-option, -fno-stack-protector) \
7751 $(call cc-option, -mpreferred-stack-boundary=2)
7752 KBUILD_CFLAGS += $(call cc-option, -m32)
7753 +ifdef CONSTIFY_PLUGIN
7754 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7755 +endif
7756 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7757 GCOV_PROFILE := n
7758
7759 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7760 index 878e4b9..20537ab 100644
7761 --- a/arch/x86/boot/bitops.h
7762 +++ b/arch/x86/boot/bitops.h
7763 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7764 u8 v;
7765 const u32 *p = (const u32 *)addr;
7766
7767 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7768 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7769 return v;
7770 }
7771
7772 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7773
7774 static inline void set_bit(int nr, void *addr)
7775 {
7776 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7777 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7778 }
7779
7780 #endif /* BOOT_BITOPS_H */
7781 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7782 index 18997e5..83d9c67 100644
7783 --- a/arch/x86/boot/boot.h
7784 +++ b/arch/x86/boot/boot.h
7785 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7786 static inline u16 ds(void)
7787 {
7788 u16 seg;
7789 - asm("movw %%ds,%0" : "=rm" (seg));
7790 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7791 return seg;
7792 }
7793
7794 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7795 static inline int memcmp(const void *s1, const void *s2, size_t len)
7796 {
7797 u8 diff;
7798 - asm("repe; cmpsb; setnz %0"
7799 + asm volatile("repe; cmpsb; setnz %0"
7800 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7801 return diff;
7802 }
7803 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7804 index e398bb5..3a382ca 100644
7805 --- a/arch/x86/boot/compressed/Makefile
7806 +++ b/arch/x86/boot/compressed/Makefile
7807 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7808 KBUILD_CFLAGS += $(cflags-y)
7809 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7810 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7811 +ifdef CONSTIFY_PLUGIN
7812 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7813 +endif
7814
7815 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7816 GCOV_PROFILE := n
7817 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7818 index 4e85f5f..39fa641 100644
7819 --- a/arch/x86/boot/compressed/eboot.c
7820 +++ b/arch/x86/boot/compressed/eboot.c
7821 @@ -142,7 +142,6 @@ again:
7822 *addr = max_addr;
7823 }
7824
7825 -free_pool:
7826 efi_call_phys1(sys_table->boottime->free_pool, map);
7827
7828 fail:
7829 @@ -206,7 +205,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7830 if (i == map_size / desc_size)
7831 status = EFI_NOT_FOUND;
7832
7833 -free_pool:
7834 efi_call_phys1(sys_table->boottime->free_pool, map);
7835 fail:
7836 return status;
7837 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7838 index c85e3ac..6f5aa80 100644
7839 --- a/arch/x86/boot/compressed/head_32.S
7840 +++ b/arch/x86/boot/compressed/head_32.S
7841 @@ -106,7 +106,7 @@ preferred_addr:
7842 notl %eax
7843 andl %eax, %ebx
7844 #else
7845 - movl $LOAD_PHYSICAL_ADDR, %ebx
7846 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7847 #endif
7848
7849 /* Target address to relocate to for decompression */
7850 @@ -192,7 +192,7 @@ relocated:
7851 * and where it was actually loaded.
7852 */
7853 movl %ebp, %ebx
7854 - subl $LOAD_PHYSICAL_ADDR, %ebx
7855 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7856 jz 2f /* Nothing to be done if loaded at compiled addr. */
7857 /*
7858 * Process relocations.
7859 @@ -200,8 +200,7 @@ relocated:
7860
7861 1: subl $4, %edi
7862 movl (%edi), %ecx
7863 - testl %ecx, %ecx
7864 - jz 2f
7865 + jecxz 2f
7866 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7867 jmp 1b
7868 2:
7869 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7870 index 87e03a1..0d94c76 100644
7871 --- a/arch/x86/boot/compressed/head_64.S
7872 +++ b/arch/x86/boot/compressed/head_64.S
7873 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7874 notl %eax
7875 andl %eax, %ebx
7876 #else
7877 - movl $LOAD_PHYSICAL_ADDR, %ebx
7878 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7879 #endif
7880
7881 /* Target address to relocate to for decompression */
7882 @@ -263,7 +263,7 @@ preferred_addr:
7883 notq %rax
7884 andq %rax, %rbp
7885 #else
7886 - movq $LOAD_PHYSICAL_ADDR, %rbp
7887 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7888 #endif
7889
7890 /* Target address to relocate to for decompression */
7891 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7892 index 7116dcb..d9ae1d7 100644
7893 --- a/arch/x86/boot/compressed/misc.c
7894 +++ b/arch/x86/boot/compressed/misc.c
7895 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7896 case PT_LOAD:
7897 #ifdef CONFIG_RELOCATABLE
7898 dest = output;
7899 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7900 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7901 #else
7902 dest = (void *)(phdr->p_paddr);
7903 #endif
7904 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7905 error("Destination address too large");
7906 #endif
7907 #ifndef CONFIG_RELOCATABLE
7908 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7909 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7910 error("Wrong destination address");
7911 #endif
7912
7913 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7914 index 4d3ff03..e4972ff 100644
7915 --- a/arch/x86/boot/cpucheck.c
7916 +++ b/arch/x86/boot/cpucheck.c
7917 @@ -74,7 +74,7 @@ static int has_fpu(void)
7918 u16 fcw = -1, fsw = -1;
7919 u32 cr0;
7920
7921 - asm("movl %%cr0,%0" : "=r" (cr0));
7922 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7923 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7924 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7925 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7926 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7927 {
7928 u32 f0, f1;
7929
7930 - asm("pushfl ; "
7931 + asm volatile("pushfl ; "
7932 "pushfl ; "
7933 "popl %0 ; "
7934 "movl %0,%1 ; "
7935 @@ -115,7 +115,7 @@ static void get_flags(void)
7936 set_bit(X86_FEATURE_FPU, cpu.flags);
7937
7938 if (has_eflag(X86_EFLAGS_ID)) {
7939 - asm("cpuid"
7940 + asm volatile("cpuid"
7941 : "=a" (max_intel_level),
7942 "=b" (cpu_vendor[0]),
7943 "=d" (cpu_vendor[1]),
7944 @@ -124,7 +124,7 @@ static void get_flags(void)
7945
7946 if (max_intel_level >= 0x00000001 &&
7947 max_intel_level <= 0x0000ffff) {
7948 - asm("cpuid"
7949 + asm volatile("cpuid"
7950 : "=a" (tfms),
7951 "=c" (cpu.flags[4]),
7952 "=d" (cpu.flags[0])
7953 @@ -136,7 +136,7 @@ static void get_flags(void)
7954 cpu.model += ((tfms >> 16) & 0xf) << 4;
7955 }
7956
7957 - asm("cpuid"
7958 + asm volatile("cpuid"
7959 : "=a" (max_amd_level)
7960 : "a" (0x80000000)
7961 : "ebx", "ecx", "edx");
7962 @@ -144,7 +144,7 @@ static void get_flags(void)
7963 if (max_amd_level >= 0x80000001 &&
7964 max_amd_level <= 0x8000ffff) {
7965 u32 eax = 0x80000001;
7966 - asm("cpuid"
7967 + asm volatile("cpuid"
7968 : "+a" (eax),
7969 "=c" (cpu.flags[6]),
7970 "=d" (cpu.flags[1])
7971 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7972 u32 ecx = MSR_K7_HWCR;
7973 u32 eax, edx;
7974
7975 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7976 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7977 eax &= ~(1 << 15);
7978 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7979 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7980
7981 get_flags(); /* Make sure it really did something */
7982 err = check_flags();
7983 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7984 u32 ecx = MSR_VIA_FCR;
7985 u32 eax, edx;
7986
7987 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7988 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7989 eax |= (1<<1)|(1<<7);
7990 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7991 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7992
7993 set_bit(X86_FEATURE_CX8, cpu.flags);
7994 err = check_flags();
7995 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7996 u32 eax, edx;
7997 u32 level = 1;
7998
7999 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8000 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8001 - asm("cpuid"
8002 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8003 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8004 + asm volatile("cpuid"
8005 : "+a" (level), "=d" (cpu.flags[0])
8006 : : "ecx", "ebx");
8007 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8008 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8009
8010 err = check_flags();
8011 }
8012 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8013 index efe5acf..22a3784 100644
8014 --- a/arch/x86/boot/header.S
8015 +++ b/arch/x86/boot/header.S
8016 @@ -391,10 +391,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
8017 # single linked list of
8018 # struct setup_data
8019
8020 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8021 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8022
8023 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8024 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
8025 +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
8026 +#else
8027 #define VO_INIT_SIZE (VO__end - VO__text)
8028 +#endif
8029 #if ZO_INIT_SIZE > VO_INIT_SIZE
8030 #define INIT_SIZE ZO_INIT_SIZE
8031 #else
8032 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8033 index db75d07..8e6d0af 100644
8034 --- a/arch/x86/boot/memory.c
8035 +++ b/arch/x86/boot/memory.c
8036 @@ -19,7 +19,7 @@
8037
8038 static int detect_memory_e820(void)
8039 {
8040 - int count = 0;
8041 + unsigned int count = 0;
8042 struct biosregs ireg, oreg;
8043 struct e820entry *desc = boot_params.e820_map;
8044 static struct e820entry buf; /* static so it is zeroed */
8045 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8046 index 11e8c6e..fdbb1ed 100644
8047 --- a/arch/x86/boot/video-vesa.c
8048 +++ b/arch/x86/boot/video-vesa.c
8049 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8050
8051 boot_params.screen_info.vesapm_seg = oreg.es;
8052 boot_params.screen_info.vesapm_off = oreg.di;
8053 + boot_params.screen_info.vesapm_size = oreg.cx;
8054 }
8055
8056 /*
8057 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8058 index 43eda28..5ab5fdb 100644
8059 --- a/arch/x86/boot/video.c
8060 +++ b/arch/x86/boot/video.c
8061 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8062 static unsigned int get_entry(void)
8063 {
8064 char entry_buf[4];
8065 - int i, len = 0;
8066 + unsigned int i, len = 0;
8067 int key;
8068 unsigned int v;
8069
8070 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8071 index 5b577d5..3c1fed4 100644
8072 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8073 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8074 @@ -8,6 +8,8 @@
8075 * including this sentence is retained in full.
8076 */
8077
8078 +#include <asm/alternative-asm.h>
8079 +
8080 .extern crypto_ft_tab
8081 .extern crypto_it_tab
8082 .extern crypto_fl_tab
8083 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8084 je B192; \
8085 leaq 32(r9),r9;
8086
8087 +#define ret pax_force_retaddr 0, 1; ret
8088 +
8089 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8090 movq r1,r2; \
8091 movq r3,r4; \
8092 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8093 index 3470624..201259d 100644
8094 --- a/arch/x86/crypto/aesni-intel_asm.S
8095 +++ b/arch/x86/crypto/aesni-intel_asm.S
8096 @@ -31,6 +31,7 @@
8097
8098 #include <linux/linkage.h>
8099 #include <asm/inst.h>
8100 +#include <asm/alternative-asm.h>
8101
8102 #ifdef __x86_64__
8103 .data
8104 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8105 pop %r14
8106 pop %r13
8107 pop %r12
8108 + pax_force_retaddr 0, 1
8109 ret
8110 +ENDPROC(aesni_gcm_dec)
8111
8112
8113 /*****************************************************************************
8114 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8115 pop %r14
8116 pop %r13
8117 pop %r12
8118 + pax_force_retaddr 0, 1
8119 ret
8120 +ENDPROC(aesni_gcm_enc)
8121
8122 #endif
8123
8124 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8125 pxor %xmm1, %xmm0
8126 movaps %xmm0, (TKEYP)
8127 add $0x10, TKEYP
8128 + pax_force_retaddr_bts
8129 ret
8130
8131 .align 4
8132 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8133 shufps $0b01001110, %xmm2, %xmm1
8134 movaps %xmm1, 0x10(TKEYP)
8135 add $0x20, TKEYP
8136 + pax_force_retaddr_bts
8137 ret
8138
8139 .align 4
8140 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8141
8142 movaps %xmm0, (TKEYP)
8143 add $0x10, TKEYP
8144 + pax_force_retaddr_bts
8145 ret
8146
8147 .align 4
8148 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8149 pxor %xmm1, %xmm2
8150 movaps %xmm2, (TKEYP)
8151 add $0x10, TKEYP
8152 + pax_force_retaddr_bts
8153 ret
8154
8155 /*
8156 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8157 #ifndef __x86_64__
8158 popl KEYP
8159 #endif
8160 + pax_force_retaddr 0, 1
8161 ret
8162 +ENDPROC(aesni_set_key)
8163
8164 /*
8165 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8166 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8167 popl KLEN
8168 popl KEYP
8169 #endif
8170 + pax_force_retaddr 0, 1
8171 ret
8172 +ENDPROC(aesni_enc)
8173
8174 /*
8175 * _aesni_enc1: internal ABI
8176 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8177 AESENC KEY STATE
8178 movaps 0x70(TKEYP), KEY
8179 AESENCLAST KEY STATE
8180 + pax_force_retaddr_bts
8181 ret
8182
8183 /*
8184 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8185 AESENCLAST KEY STATE2
8186 AESENCLAST KEY STATE3
8187 AESENCLAST KEY STATE4
8188 + pax_force_retaddr_bts
8189 ret
8190
8191 /*
8192 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8193 popl KLEN
8194 popl KEYP
8195 #endif
8196 + pax_force_retaddr 0, 1
8197 ret
8198 +ENDPROC(aesni_dec)
8199
8200 /*
8201 * _aesni_dec1: internal ABI
8202 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8203 AESDEC KEY STATE
8204 movaps 0x70(TKEYP), KEY
8205 AESDECLAST KEY STATE
8206 + pax_force_retaddr_bts
8207 ret
8208
8209 /*
8210 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8211 AESDECLAST KEY STATE2
8212 AESDECLAST KEY STATE3
8213 AESDECLAST KEY STATE4
8214 + pax_force_retaddr_bts
8215 ret
8216
8217 /*
8218 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8219 popl KEYP
8220 popl LEN
8221 #endif
8222 + pax_force_retaddr 0, 1
8223 ret
8224 +ENDPROC(aesni_ecb_enc)
8225
8226 /*
8227 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8228 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8229 popl KEYP
8230 popl LEN
8231 #endif
8232 + pax_force_retaddr 0, 1
8233 ret
8234 +ENDPROC(aesni_ecb_dec)
8235
8236 /*
8237 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8238 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8239 popl LEN
8240 popl IVP
8241 #endif
8242 + pax_force_retaddr 0, 1
8243 ret
8244 +ENDPROC(aesni_cbc_enc)
8245
8246 /*
8247 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8248 @@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8249 popl LEN
8250 popl IVP
8251 #endif
8252 + pax_force_retaddr 0, 1
8253 ret
8254 +ENDPROC(aesni_cbc_dec)
8255
8256 #ifdef __x86_64__
8257 .align 16
8258 @@ -2526,6 +2553,7 @@ _aesni_inc_init:
8259 mov $1, TCTR_LOW
8260 MOVQ_R64_XMM TCTR_LOW INC
8261 MOVQ_R64_XMM CTR TCTR_LOW
8262 + pax_force_retaddr_bts
8263 ret
8264
8265 /*
8266 @@ -2554,6 +2582,7 @@ _aesni_inc:
8267 .Linc_low:
8268 movaps CTR, IV
8269 PSHUFB_XMM BSWAP_MASK IV
8270 + pax_force_retaddr_bts
8271 ret
8272
8273 /*
8274 @@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8275 .Lctr_enc_ret:
8276 movups IV, (IVP)
8277 .Lctr_enc_just_ret:
8278 + pax_force_retaddr 0, 1
8279 ret
8280 +ENDPROC(aesni_ctr_enc)
8281 #endif
8282 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8283 index 391d245..67f35c2 100644
8284 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8285 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8286 @@ -20,6 +20,8 @@
8287 *
8288 */
8289
8290 +#include <asm/alternative-asm.h>
8291 +
8292 .file "blowfish-x86_64-asm.S"
8293 .text
8294
8295 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8296 jnz __enc_xor;
8297
8298 write_block();
8299 + pax_force_retaddr 0, 1
8300 ret;
8301 __enc_xor:
8302 xor_block();
8303 + pax_force_retaddr 0, 1
8304 ret;
8305
8306 .align 8
8307 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8308
8309 movq %r11, %rbp;
8310
8311 + pax_force_retaddr 0, 1
8312 ret;
8313
8314 /**********************************************************************
8315 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8316
8317 popq %rbx;
8318 popq %rbp;
8319 + pax_force_retaddr 0, 1
8320 ret;
8321
8322 __enc_xor4:
8323 @@ -349,6 +355,7 @@ __enc_xor4:
8324
8325 popq %rbx;
8326 popq %rbp;
8327 + pax_force_retaddr 0, 1
8328 ret;
8329
8330 .align 8
8331 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8332 popq %rbx;
8333 popq %rbp;
8334
8335 + pax_force_retaddr 0, 1
8336 ret;
8337
8338 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8339 index 0b33743..7a56206 100644
8340 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8341 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8342 @@ -20,6 +20,8 @@
8343 *
8344 */
8345
8346 +#include <asm/alternative-asm.h>
8347 +
8348 .file "camellia-x86_64-asm_64.S"
8349 .text
8350
8351 @@ -229,12 +231,14 @@ __enc_done:
8352 enc_outunpack(mov, RT1);
8353
8354 movq RRBP, %rbp;
8355 + pax_force_retaddr 0, 1
8356 ret;
8357
8358 __enc_xor:
8359 enc_outunpack(xor, RT1);
8360
8361 movq RRBP, %rbp;
8362 + pax_force_retaddr 0, 1
8363 ret;
8364
8365 .global camellia_dec_blk;
8366 @@ -275,6 +279,7 @@ __dec_rounds16:
8367 dec_outunpack();
8368
8369 movq RRBP, %rbp;
8370 + pax_force_retaddr 0, 1
8371 ret;
8372
8373 /**********************************************************************
8374 @@ -468,6 +473,7 @@ __enc2_done:
8375
8376 movq RRBP, %rbp;
8377 popq %rbx;
8378 + pax_force_retaddr 0, 1
8379 ret;
8380
8381 __enc2_xor:
8382 @@ -475,6 +481,7 @@ __enc2_xor:
8383
8384 movq RRBP, %rbp;
8385 popq %rbx;
8386 + pax_force_retaddr 0, 1
8387 ret;
8388
8389 .global camellia_dec_blk_2way;
8390 @@ -517,4 +524,5 @@ __dec2_rounds16:
8391
8392 movq RRBP, %rbp;
8393 movq RXOR, %rbx;
8394 + pax_force_retaddr 0, 1
8395 ret;
8396 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8397 index 6214a9b..1f4fc9a 100644
8398 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8399 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8400 @@ -1,3 +1,5 @@
8401 +#include <asm/alternative-asm.h>
8402 +
8403 # enter ECRYPT_encrypt_bytes
8404 .text
8405 .p2align 5
8406 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8407 add %r11,%rsp
8408 mov %rdi,%rax
8409 mov %rsi,%rdx
8410 + pax_force_retaddr 0, 1
8411 ret
8412 # bytesatleast65:
8413 ._bytesatleast65:
8414 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8415 add %r11,%rsp
8416 mov %rdi,%rax
8417 mov %rsi,%rdx
8418 + pax_force_retaddr
8419 ret
8420 # enter ECRYPT_ivsetup
8421 .text
8422 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8423 add %r11,%rsp
8424 mov %rdi,%rax
8425 mov %rsi,%rdx
8426 + pax_force_retaddr
8427 ret
8428 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8429 index 3ee1ff0..cbc568b 100644
8430 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8431 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8432 @@ -24,6 +24,8 @@
8433 *
8434 */
8435
8436 +#include <asm/alternative-asm.h>
8437 +
8438 .file "serpent-sse2-x86_64-asm_64.S"
8439 .text
8440
8441 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8442 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8443 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8444
8445 + pax_force_retaddr
8446 ret;
8447
8448 __enc_xor8:
8449 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8450 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8451
8452 + pax_force_retaddr
8453 ret;
8454
8455 .align 8
8456 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8457 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8458 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8459
8460 + pax_force_retaddr
8461 ret;
8462 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8463 index b2c2f57..8470cab 100644
8464 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8465 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8466 @@ -28,6 +28,8 @@
8467 * (at your option) any later version.
8468 */
8469
8470 +#include <asm/alternative-asm.h>
8471 +
8472 #define CTX %rdi // arg1
8473 #define BUF %rsi // arg2
8474 #define CNT %rdx // arg3
8475 @@ -104,6 +106,7 @@
8476 pop %r12
8477 pop %rbp
8478 pop %rbx
8479 + pax_force_retaddr 0, 1
8480 ret
8481
8482 .size \name, .-\name
8483 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8484 index 5b012a2..36d5364 100644
8485 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8486 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8487 @@ -20,6 +20,8 @@
8488 *
8489 */
8490
8491 +#include <asm/alternative-asm.h>
8492 +
8493 .file "twofish-x86_64-asm-3way.S"
8494 .text
8495
8496 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8497 popq %r13;
8498 popq %r14;
8499 popq %r15;
8500 + pax_force_retaddr 0, 1
8501 ret;
8502
8503 __enc_xor3:
8504 @@ -271,6 +274,7 @@ __enc_xor3:
8505 popq %r13;
8506 popq %r14;
8507 popq %r15;
8508 + pax_force_retaddr 0, 1
8509 ret;
8510
8511 .global twofish_dec_blk_3way
8512 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8513 popq %r13;
8514 popq %r14;
8515 popq %r15;
8516 + pax_force_retaddr 0, 1
8517 ret;
8518
8519 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8520 index 7bcf3fc..f53832f 100644
8521 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8522 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8523 @@ -21,6 +21,7 @@
8524 .text
8525
8526 #include <asm/asm-offsets.h>
8527 +#include <asm/alternative-asm.h>
8528
8529 #define a_offset 0
8530 #define b_offset 4
8531 @@ -268,6 +269,7 @@ twofish_enc_blk:
8532
8533 popq R1
8534 movq $1,%rax
8535 + pax_force_retaddr 0, 1
8536 ret
8537
8538 twofish_dec_blk:
8539 @@ -319,4 +321,5 @@ twofish_dec_blk:
8540
8541 popq R1
8542 movq $1,%rax
8543 + pax_force_retaddr 0, 1
8544 ret
8545 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8546 index 07b3a68..bd2a388 100644
8547 --- a/arch/x86/ia32/ia32_aout.c
8548 +++ b/arch/x86/ia32/ia32_aout.c
8549 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8550 unsigned long dump_start, dump_size;
8551 struct user32 dump;
8552
8553 + memset(&dump, 0, sizeof(dump));
8554 +
8555 fs = get_fs();
8556 set_fs(KERNEL_DS);
8557 has_dumped = 1;
8558 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8559 index 673ac9b..7a8c5df 100644
8560 --- a/arch/x86/ia32/ia32_signal.c
8561 +++ b/arch/x86/ia32/ia32_signal.c
8562 @@ -162,7 +162,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8563 }
8564 seg = get_fs();
8565 set_fs(KERNEL_DS);
8566 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8567 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8568 set_fs(seg);
8569 if (ret >= 0 && uoss_ptr) {
8570 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8571 @@ -361,7 +361,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8572 */
8573 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8574 size_t frame_size,
8575 - void **fpstate)
8576 + void __user **fpstate)
8577 {
8578 unsigned long sp;
8579
8580 @@ -382,7 +382,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8581
8582 if (used_math()) {
8583 sp = sp - sig_xstate_ia32_size;
8584 - *fpstate = (struct _fpstate_ia32 *) sp;
8585 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8586 if (save_i387_xstate_ia32(*fpstate) < 0)
8587 return (void __user *) -1L;
8588 }
8589 @@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8590 sp -= frame_size;
8591 /* Align the stack pointer according to the i386 ABI,
8592 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8593 - sp = ((sp + 4) & -16ul) - 4;
8594 + sp = ((sp - 12) & -16ul) - 4;
8595 return (void __user *) sp;
8596 }
8597
8598 @@ -448,7 +448,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8599 * These are actually not used anymore, but left because some
8600 * gdb versions depend on them as a marker.
8601 */
8602 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8603 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8604 } put_user_catch(err);
8605
8606 if (err)
8607 @@ -490,7 +490,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8608 0xb8,
8609 __NR_ia32_rt_sigreturn,
8610 0x80cd,
8611 - 0,
8612 + 0
8613 };
8614
8615 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8616 @@ -520,16 +520,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8617
8618 if (ka->sa.sa_flags & SA_RESTORER)
8619 restorer = ka->sa.sa_restorer;
8620 + else if (current->mm->context.vdso)
8621 + /* Return stub is in 32bit vsyscall page */
8622 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8623 else
8624 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8625 - rt_sigreturn);
8626 + restorer = &frame->retcode;
8627 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8628
8629 /*
8630 * Not actually used anymore, but left because some gdb
8631 * versions need it.
8632 */
8633 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8634 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8635 } put_user_catch(err);
8636
8637 if (err)
8638 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8639 index 20e5f7b..f33c779 100644
8640 --- a/arch/x86/ia32/ia32entry.S
8641 +++ b/arch/x86/ia32/ia32entry.S
8642 @@ -14,8 +14,10 @@
8643 #include <asm/segment.h>
8644 #include <asm/irqflags.h>
8645 #include <asm/asm.h>
8646 +#include <asm/pgtable.h>
8647 #include <linux/linkage.h>
8648 #include <linux/err.h>
8649 +#include <asm/alternative-asm.h>
8650
8651 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8652 #include <linux/elf-em.h>
8653 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
8654 ENDPROC(native_irq_enable_sysexit)
8655 #endif
8656
8657 + .macro pax_enter_kernel_user
8658 + pax_set_fptr_mask
8659 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8660 + call pax_enter_kernel_user
8661 +#endif
8662 + .endm
8663 +
8664 + .macro pax_exit_kernel_user
8665 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8666 + call pax_exit_kernel_user
8667 +#endif
8668 +#ifdef CONFIG_PAX_RANDKSTACK
8669 + pushq %rax
8670 + pushq %r11
8671 + call pax_randomize_kstack
8672 + popq %r11
8673 + popq %rax
8674 +#endif
8675 + .endm
8676 +
8677 +.macro pax_erase_kstack
8678 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8679 + call pax_erase_kstack
8680 +#endif
8681 +.endm
8682 +
8683 /*
8684 * 32bit SYSENTER instruction entry.
8685 *
8686 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
8687 CFI_REGISTER rsp,rbp
8688 SWAPGS_UNSAFE_STACK
8689 movq PER_CPU_VAR(kernel_stack), %rsp
8690 - addq $(KERNEL_STACK_OFFSET),%rsp
8691 - /*
8692 - * No need to follow this irqs on/off section: the syscall
8693 - * disabled irqs, here we enable it straight after entry:
8694 - */
8695 - ENABLE_INTERRUPTS(CLBR_NONE)
8696 movl %ebp,%ebp /* zero extension */
8697 pushq_cfi $__USER32_DS
8698 /*CFI_REL_OFFSET ss,0*/
8699 @@ -134,22 +156,42 @@ ENTRY(ia32_sysenter_target)
8700 CFI_REL_OFFSET rsp,0
8701 pushfq_cfi
8702 /*CFI_REL_OFFSET rflags,0*/
8703 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8704 - CFI_REGISTER rip,r10
8705 + orl $X86_EFLAGS_IF,(%rsp)
8706 + GET_THREAD_INFO(%r11)
8707 + movl TI_sysenter_return(%r11), %r11d
8708 + CFI_REGISTER rip,r11
8709 pushq_cfi $__USER32_CS
8710 /*CFI_REL_OFFSET cs,0*/
8711 movl %eax, %eax
8712 - pushq_cfi %r10
8713 + pushq_cfi %r11
8714 CFI_REL_OFFSET rip,0
8715 pushq_cfi %rax
8716 cld
8717 SAVE_ARGS 0,1,0
8718 + pax_enter_kernel_user
8719 +
8720 +#ifdef CONFIG_PAX_RANDKSTACK
8721 + pax_erase_kstack
8722 +#endif
8723 +
8724 + /*
8725 + * No need to follow this irqs on/off section: the syscall
8726 + * disabled irqs, here we enable it straight after entry:
8727 + */
8728 + ENABLE_INTERRUPTS(CLBR_NONE)
8729 /* no need to do an access_ok check here because rbp has been
8730 32bit zero extended */
8731 +
8732 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8733 + mov $PAX_USER_SHADOW_BASE,%r11
8734 + add %r11,%rbp
8735 +#endif
8736 +
8737 1: movl (%rbp),%ebp
8738 _ASM_EXTABLE(1b,ia32_badarg)
8739 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8740 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8741 + GET_THREAD_INFO(%r11)
8742 + orl $TS_COMPAT,TI_status(%r11)
8743 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8744 CFI_REMEMBER_STATE
8745 jnz sysenter_tracesys
8746 cmpq $(IA32_NR_syscalls-1),%rax
8747 @@ -159,12 +201,15 @@ sysenter_do_call:
8748 sysenter_dispatch:
8749 call *ia32_sys_call_table(,%rax,8)
8750 movq %rax,RAX-ARGOFFSET(%rsp)
8751 + GET_THREAD_INFO(%r11)
8752 DISABLE_INTERRUPTS(CLBR_NONE)
8753 TRACE_IRQS_OFF
8754 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8755 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8756 jnz sysexit_audit
8757 sysexit_from_sys_call:
8758 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8759 + pax_exit_kernel_user
8760 + pax_erase_kstack
8761 + andl $~TS_COMPAT,TI_status(%r11)
8762 /* clear IF, that popfq doesn't enable interrupts early */
8763 andl $~0x200,EFLAGS-R11(%rsp)
8764 movl RIP-R11(%rsp),%edx /* User %eip */
8765 @@ -190,6 +235,9 @@ sysexit_from_sys_call:
8766 movl %eax,%esi /* 2nd arg: syscall number */
8767 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8768 call __audit_syscall_entry
8769 +
8770 + pax_erase_kstack
8771 +
8772 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8773 cmpq $(IA32_NR_syscalls-1),%rax
8774 ja ia32_badsys
8775 @@ -201,7 +249,7 @@ sysexit_from_sys_call:
8776 .endm
8777
8778 .macro auditsys_exit exit
8779 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8780 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8781 jnz ia32_ret_from_sys_call
8782 TRACE_IRQS_ON
8783 sti
8784 @@ -212,11 +260,12 @@ sysexit_from_sys_call:
8785 1: setbe %al /* 1 if error, 0 if not */
8786 movzbl %al,%edi /* zero-extend that into %edi */
8787 call __audit_syscall_exit
8788 + GET_THREAD_INFO(%r11)
8789 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8790 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8791 cli
8792 TRACE_IRQS_OFF
8793 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8794 + testl %edi,TI_flags(%r11)
8795 jz \exit
8796 CLEAR_RREGS -ARGOFFSET
8797 jmp int_with_check
8798 @@ -234,7 +283,7 @@ sysexit_audit:
8799
8800 sysenter_tracesys:
8801 #ifdef CONFIG_AUDITSYSCALL
8802 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8803 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8804 jz sysenter_auditsys
8805 #endif
8806 SAVE_REST
8807 @@ -246,6 +295,9 @@ sysenter_tracesys:
8808 RESTORE_REST
8809 cmpq $(IA32_NR_syscalls-1),%rax
8810 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
8811 +
8812 + pax_erase_kstack
8813 +
8814 jmp sysenter_do_call
8815 CFI_ENDPROC
8816 ENDPROC(ia32_sysenter_target)
8817 @@ -273,19 +325,25 @@ ENDPROC(ia32_sysenter_target)
8818 ENTRY(ia32_cstar_target)
8819 CFI_STARTPROC32 simple
8820 CFI_SIGNAL_FRAME
8821 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8822 + CFI_DEF_CFA rsp,0
8823 CFI_REGISTER rip,rcx
8824 /*CFI_REGISTER rflags,r11*/
8825 SWAPGS_UNSAFE_STACK
8826 movl %esp,%r8d
8827 CFI_REGISTER rsp,r8
8828 movq PER_CPU_VAR(kernel_stack),%rsp
8829 + SAVE_ARGS 8*6,0,0
8830 + pax_enter_kernel_user
8831 +
8832 +#ifdef CONFIG_PAX_RANDKSTACK
8833 + pax_erase_kstack
8834 +#endif
8835 +
8836 /*
8837 * No need to follow this irqs on/off section: the syscall
8838 * disabled irqs and here we enable it straight after entry:
8839 */
8840 ENABLE_INTERRUPTS(CLBR_NONE)
8841 - SAVE_ARGS 8,0,0
8842 movl %eax,%eax /* zero extension */
8843 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8844 movq %rcx,RIP-ARGOFFSET(%rsp)
8845 @@ -301,10 +359,17 @@ ENTRY(ia32_cstar_target)
8846 /* no need to do an access_ok check here because r8 has been
8847 32bit zero extended */
8848 /* hardware stack frame is complete now */
8849 +
8850 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8851 + mov $PAX_USER_SHADOW_BASE,%r11
8852 + add %r11,%r8
8853 +#endif
8854 +
8855 1: movl (%r8),%r9d
8856 _ASM_EXTABLE(1b,ia32_badarg)
8857 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8858 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8859 + GET_THREAD_INFO(%r11)
8860 + orl $TS_COMPAT,TI_status(%r11)
8861 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8862 CFI_REMEMBER_STATE
8863 jnz cstar_tracesys
8864 cmpq $IA32_NR_syscalls-1,%rax
8865 @@ -314,12 +379,15 @@ cstar_do_call:
8866 cstar_dispatch:
8867 call *ia32_sys_call_table(,%rax,8)
8868 movq %rax,RAX-ARGOFFSET(%rsp)
8869 + GET_THREAD_INFO(%r11)
8870 DISABLE_INTERRUPTS(CLBR_NONE)
8871 TRACE_IRQS_OFF
8872 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8873 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8874 jnz sysretl_audit
8875 sysretl_from_sys_call:
8876 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8877 + pax_exit_kernel_user
8878 + pax_erase_kstack
8879 + andl $~TS_COMPAT,TI_status(%r11)
8880 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8881 movl RIP-ARGOFFSET(%rsp),%ecx
8882 CFI_REGISTER rip,rcx
8883 @@ -347,7 +415,7 @@ sysretl_audit:
8884
8885 cstar_tracesys:
8886 #ifdef CONFIG_AUDITSYSCALL
8887 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8888 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8889 jz cstar_auditsys
8890 #endif
8891 xchgl %r9d,%ebp
8892 @@ -361,6 +429,9 @@ cstar_tracesys:
8893 xchgl %ebp,%r9d
8894 cmpq $(IA32_NR_syscalls-1),%rax
8895 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
8896 +
8897 + pax_erase_kstack
8898 +
8899 jmp cstar_do_call
8900 END(ia32_cstar_target)
8901
8902 @@ -401,19 +472,26 @@ ENTRY(ia32_syscall)
8903 CFI_REL_OFFSET rip,RIP-RIP
8904 PARAVIRT_ADJUST_EXCEPTION_FRAME
8905 SWAPGS
8906 - /*
8907 - * No need to follow this irqs on/off section: the syscall
8908 - * disabled irqs and here we enable it straight after entry:
8909 - */
8910 - ENABLE_INTERRUPTS(CLBR_NONE)
8911 movl %eax,%eax
8912 pushq_cfi %rax
8913 cld
8914 /* note the registers are not zero extended to the sf.
8915 this could be a problem. */
8916 SAVE_ARGS 0,1,0
8917 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8918 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8919 + pax_enter_kernel_user
8920 +
8921 +#ifdef CONFIG_PAX_RANDKSTACK
8922 + pax_erase_kstack
8923 +#endif
8924 +
8925 + /*
8926 + * No need to follow this irqs on/off section: the syscall
8927 + * disabled irqs and here we enable it straight after entry:
8928 + */
8929 + ENABLE_INTERRUPTS(CLBR_NONE)
8930 + GET_THREAD_INFO(%r11)
8931 + orl $TS_COMPAT,TI_status(%r11)
8932 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8933 jnz ia32_tracesys
8934 cmpq $(IA32_NR_syscalls-1),%rax
8935 ja ia32_badsys
8936 @@ -436,6 +514,9 @@ ia32_tracesys:
8937 RESTORE_REST
8938 cmpq $(IA32_NR_syscalls-1),%rax
8939 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
8940 +
8941 + pax_erase_kstack
8942 +
8943 jmp ia32_do_call
8944 END(ia32_syscall)
8945
8946 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8947 index 4540bec..714d913 100644
8948 --- a/arch/x86/ia32/sys_ia32.c
8949 +++ b/arch/x86/ia32/sys_ia32.c
8950 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8951 */
8952 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8953 {
8954 - typeof(ubuf->st_uid) uid = 0;
8955 - typeof(ubuf->st_gid) gid = 0;
8956 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8957 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8958 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
8959 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
8960 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8961 @@ -287,7 +287,7 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
8962 return ret;
8963 }
8964
8965 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8966 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8967 int options)
8968 {
8969 return compat_sys_wait4(pid, stat_addr, options, NULL);
8970 @@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8971 mm_segment_t old_fs = get_fs();
8972
8973 set_fs(KERNEL_DS);
8974 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8975 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8976 set_fs(old_fs);
8977 if (put_compat_timespec(&t, interval))
8978 return -EFAULT;
8979 @@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8980 mm_segment_t old_fs = get_fs();
8981
8982 set_fs(KERNEL_DS);
8983 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8984 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8985 set_fs(old_fs);
8986 if (!ret) {
8987 switch (_NSIG_WORDS) {
8988 @@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8989 if (copy_siginfo_from_user32(&info, uinfo))
8990 return -EFAULT;
8991 set_fs(KERNEL_DS);
8992 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8993 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8994 set_fs(old_fs);
8995 return ret;
8996 }
8997 @@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8998 return -EFAULT;
8999
9000 set_fs(KERNEL_DS);
9001 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9002 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9003 count);
9004 set_fs(old_fs);
9005
9006 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9007 index 952bd01..7692c6f 100644
9008 --- a/arch/x86/include/asm/alternative-asm.h
9009 +++ b/arch/x86/include/asm/alternative-asm.h
9010 @@ -15,6 +15,45 @@
9011 .endm
9012 #endif
9013
9014 +#ifdef KERNEXEC_PLUGIN
9015 + .macro pax_force_retaddr_bts rip=0
9016 + btsq $63,\rip(%rsp)
9017 + .endm
9018 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9019 + .macro pax_force_retaddr rip=0, reload=0
9020 + btsq $63,\rip(%rsp)
9021 + .endm
9022 + .macro pax_force_fptr ptr
9023 + btsq $63,\ptr
9024 + .endm
9025 + .macro pax_set_fptr_mask
9026 + .endm
9027 +#endif
9028 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9029 + .macro pax_force_retaddr rip=0, reload=0
9030 + .if \reload
9031 + pax_set_fptr_mask
9032 + .endif
9033 + orq %r10,\rip(%rsp)
9034 + .endm
9035 + .macro pax_force_fptr ptr
9036 + orq %r10,\ptr
9037 + .endm
9038 + .macro pax_set_fptr_mask
9039 + movabs $0x8000000000000000,%r10
9040 + .endm
9041 +#endif
9042 +#else
9043 + .macro pax_force_retaddr rip=0, reload=0
9044 + .endm
9045 + .macro pax_force_fptr ptr
9046 + .endm
9047 + .macro pax_force_retaddr_bts rip=0
9048 + .endm
9049 + .macro pax_set_fptr_mask
9050 + .endm
9051 +#endif
9052 +
9053 .macro altinstruction_entry orig alt feature orig_len alt_len
9054 .long \orig - .
9055 .long \alt - .
9056 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9057 index 49331be..9706065 100644
9058 --- a/arch/x86/include/asm/alternative.h
9059 +++ b/arch/x86/include/asm/alternative.h
9060 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9061 ".section .discard,\"aw\",@progbits\n" \
9062 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9063 ".previous\n" \
9064 - ".section .altinstr_replacement, \"ax\"\n" \
9065 + ".section .altinstr_replacement, \"a\"\n" \
9066 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9067 ".previous"
9068
9069 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9070 index eaff479..1eff9b5 100644
9071 --- a/arch/x86/include/asm/apic.h
9072 +++ b/arch/x86/include/asm/apic.h
9073 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9074
9075 #ifdef CONFIG_X86_LOCAL_APIC
9076
9077 -extern unsigned int apic_verbosity;
9078 +extern int apic_verbosity;
9079 extern int local_apic_timer_c2_ok;
9080
9081 extern int disable_apic;
9082 @@ -390,7 +390,7 @@ struct apic {
9083 */
9084 int (*x86_32_numa_cpu_node)(int cpu);
9085 #endif
9086 -};
9087 +} __do_const;
9088
9089 /*
9090 * Pointer to the local APIC driver in use on this system (there's
9091 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9092 index 20370c6..a2eb9b0 100644
9093 --- a/arch/x86/include/asm/apm.h
9094 +++ b/arch/x86/include/asm/apm.h
9095 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9096 __asm__ __volatile__(APM_DO_ZERO_SEGS
9097 "pushl %%edi\n\t"
9098 "pushl %%ebp\n\t"
9099 - "lcall *%%cs:apm_bios_entry\n\t"
9100 + "lcall *%%ss:apm_bios_entry\n\t"
9101 "setc %%al\n\t"
9102 "popl %%ebp\n\t"
9103 "popl %%edi\n\t"
9104 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9105 __asm__ __volatile__(APM_DO_ZERO_SEGS
9106 "pushl %%edi\n\t"
9107 "pushl %%ebp\n\t"
9108 - "lcall *%%cs:apm_bios_entry\n\t"
9109 + "lcall *%%ss:apm_bios_entry\n\t"
9110 "setc %%bl\n\t"
9111 "popl %%ebp\n\t"
9112 "popl %%edi\n\t"
9113 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9114 index 58cb6d4..a4b806c 100644
9115 --- a/arch/x86/include/asm/atomic.h
9116 +++ b/arch/x86/include/asm/atomic.h
9117 @@ -22,7 +22,18 @@
9118 */
9119 static inline int atomic_read(const atomic_t *v)
9120 {
9121 - return (*(volatile int *)&(v)->counter);
9122 + return (*(volatile const int *)&(v)->counter);
9123 +}
9124 +
9125 +/**
9126 + * atomic_read_unchecked - read atomic variable
9127 + * @v: pointer of type atomic_unchecked_t
9128 + *
9129 + * Atomically reads the value of @v.
9130 + */
9131 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9132 +{
9133 + return (*(volatile const int *)&(v)->counter);
9134 }
9135
9136 /**
9137 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9138 }
9139
9140 /**
9141 + * atomic_set_unchecked - set atomic variable
9142 + * @v: pointer of type atomic_unchecked_t
9143 + * @i: required value
9144 + *
9145 + * Atomically sets the value of @v to @i.
9146 + */
9147 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9148 +{
9149 + v->counter = i;
9150 +}
9151 +
9152 +/**
9153 * atomic_add - add integer to atomic variable
9154 * @i: integer value to add
9155 * @v: pointer of type atomic_t
9156 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9157 */
9158 static inline void atomic_add(int i, atomic_t *v)
9159 {
9160 - asm volatile(LOCK_PREFIX "addl %1,%0"
9161 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9162 +
9163 +#ifdef CONFIG_PAX_REFCOUNT
9164 + "jno 0f\n"
9165 + LOCK_PREFIX "subl %1,%0\n"
9166 + "int $4\n0:\n"
9167 + _ASM_EXTABLE(0b, 0b)
9168 +#endif
9169 +
9170 + : "+m" (v->counter)
9171 + : "ir" (i));
9172 +}
9173 +
9174 +/**
9175 + * atomic_add_unchecked - add integer to atomic variable
9176 + * @i: integer value to add
9177 + * @v: pointer of type atomic_unchecked_t
9178 + *
9179 + * Atomically adds @i to @v.
9180 + */
9181 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9182 +{
9183 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9184 : "+m" (v->counter)
9185 : "ir" (i));
9186 }
9187 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9188 */
9189 static inline void atomic_sub(int i, atomic_t *v)
9190 {
9191 - asm volatile(LOCK_PREFIX "subl %1,%0"
9192 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9193 +
9194 +#ifdef CONFIG_PAX_REFCOUNT
9195 + "jno 0f\n"
9196 + LOCK_PREFIX "addl %1,%0\n"
9197 + "int $4\n0:\n"
9198 + _ASM_EXTABLE(0b, 0b)
9199 +#endif
9200 +
9201 + : "+m" (v->counter)
9202 + : "ir" (i));
9203 +}
9204 +
9205 +/**
9206 + * atomic_sub_unchecked - subtract integer from atomic variable
9207 + * @i: integer value to subtract
9208 + * @v: pointer of type atomic_unchecked_t
9209 + *
9210 + * Atomically subtracts @i from @v.
9211 + */
9212 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9213 +{
9214 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9215 : "+m" (v->counter)
9216 : "ir" (i));
9217 }
9218 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9219 {
9220 unsigned char c;
9221
9222 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9223 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9224 +
9225 +#ifdef CONFIG_PAX_REFCOUNT
9226 + "jno 0f\n"
9227 + LOCK_PREFIX "addl %2,%0\n"
9228 + "int $4\n0:\n"
9229 + _ASM_EXTABLE(0b, 0b)
9230 +#endif
9231 +
9232 + "sete %1\n"
9233 : "+m" (v->counter), "=qm" (c)
9234 : "ir" (i) : "memory");
9235 return c;
9236 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9237 */
9238 static inline void atomic_inc(atomic_t *v)
9239 {
9240 - asm volatile(LOCK_PREFIX "incl %0"
9241 + asm volatile(LOCK_PREFIX "incl %0\n"
9242 +
9243 +#ifdef CONFIG_PAX_REFCOUNT
9244 + "jno 0f\n"
9245 + LOCK_PREFIX "decl %0\n"
9246 + "int $4\n0:\n"
9247 + _ASM_EXTABLE(0b, 0b)
9248 +#endif
9249 +
9250 + : "+m" (v->counter));
9251 +}
9252 +
9253 +/**
9254 + * atomic_inc_unchecked - increment atomic variable
9255 + * @v: pointer of type atomic_unchecked_t
9256 + *
9257 + * Atomically increments @v by 1.
9258 + */
9259 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9260 +{
9261 + asm volatile(LOCK_PREFIX "incl %0\n"
9262 : "+m" (v->counter));
9263 }
9264
9265 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9266 */
9267 static inline void atomic_dec(atomic_t *v)
9268 {
9269 - asm volatile(LOCK_PREFIX "decl %0"
9270 + asm volatile(LOCK_PREFIX "decl %0\n"
9271 +
9272 +#ifdef CONFIG_PAX_REFCOUNT
9273 + "jno 0f\n"
9274 + LOCK_PREFIX "incl %0\n"
9275 + "int $4\n0:\n"
9276 + _ASM_EXTABLE(0b, 0b)
9277 +#endif
9278 +
9279 + : "+m" (v->counter));
9280 +}
9281 +
9282 +/**
9283 + * atomic_dec_unchecked - decrement atomic variable
9284 + * @v: pointer of type atomic_unchecked_t
9285 + *
9286 + * Atomically decrements @v by 1.
9287 + */
9288 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9289 +{
9290 + asm volatile(LOCK_PREFIX "decl %0\n"
9291 : "+m" (v->counter));
9292 }
9293
9294 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9295 {
9296 unsigned char c;
9297
9298 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9299 + asm volatile(LOCK_PREFIX "decl %0\n"
9300 +
9301 +#ifdef CONFIG_PAX_REFCOUNT
9302 + "jno 0f\n"
9303 + LOCK_PREFIX "incl %0\n"
9304 + "int $4\n0:\n"
9305 + _ASM_EXTABLE(0b, 0b)
9306 +#endif
9307 +
9308 + "sete %1\n"
9309 : "+m" (v->counter), "=qm" (c)
9310 : : "memory");
9311 return c != 0;
9312 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9313 {
9314 unsigned char c;
9315
9316 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9317 + asm volatile(LOCK_PREFIX "incl %0\n"
9318 +
9319 +#ifdef CONFIG_PAX_REFCOUNT
9320 + "jno 0f\n"
9321 + LOCK_PREFIX "decl %0\n"
9322 + "int $4\n0:\n"
9323 + _ASM_EXTABLE(0b, 0b)
9324 +#endif
9325 +
9326 + "sete %1\n"
9327 + : "+m" (v->counter), "=qm" (c)
9328 + : : "memory");
9329 + return c != 0;
9330 +}
9331 +
9332 +/**
9333 + * atomic_inc_and_test_unchecked - increment and test
9334 + * @v: pointer of type atomic_unchecked_t
9335 + *
9336 + * Atomically increments @v by 1
9337 + * and returns true if the result is zero, or false for all
9338 + * other cases.
9339 + */
9340 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9341 +{
9342 + unsigned char c;
9343 +
9344 + asm volatile(LOCK_PREFIX "incl %0\n"
9345 + "sete %1\n"
9346 : "+m" (v->counter), "=qm" (c)
9347 : : "memory");
9348 return c != 0;
9349 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9350 {
9351 unsigned char c;
9352
9353 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9354 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9355 +
9356 +#ifdef CONFIG_PAX_REFCOUNT
9357 + "jno 0f\n"
9358 + LOCK_PREFIX "subl %2,%0\n"
9359 + "int $4\n0:\n"
9360 + _ASM_EXTABLE(0b, 0b)
9361 +#endif
9362 +
9363 + "sets %1\n"
9364 : "+m" (v->counter), "=qm" (c)
9365 : "ir" (i) : "memory");
9366 return c;
9367 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9368 goto no_xadd;
9369 #endif
9370 /* Modern 486+ processor */
9371 - return i + xadd(&v->counter, i);
9372 + return i + xadd_check_overflow(&v->counter, i);
9373
9374 #ifdef CONFIG_M386
9375 no_xadd: /* Legacy 386 processor */
9376 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9377 }
9378
9379 /**
9380 + * atomic_add_return_unchecked - add integer and return
9381 + * @i: integer value to add
9382 + * @v: pointer of type atomic_unchecked_t
9383 + *
9384 + * Atomically adds @i to @v and returns @i + @v
9385 + */
9386 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9387 +{
9388 +#ifdef CONFIG_M386
9389 + int __i;
9390 + unsigned long flags;
9391 + if (unlikely(boot_cpu_data.x86 <= 3))
9392 + goto no_xadd;
9393 +#endif
9394 + /* Modern 486+ processor */
9395 + return i + xadd(&v->counter, i);
9396 +
9397 +#ifdef CONFIG_M386
9398 +no_xadd: /* Legacy 386 processor */
9399 + raw_local_irq_save(flags);
9400 + __i = atomic_read_unchecked(v);
9401 + atomic_set_unchecked(v, i + __i);
9402 + raw_local_irq_restore(flags);
9403 + return i + __i;
9404 +#endif
9405 +}
9406 +
9407 +/**
9408 * atomic_sub_return - subtract integer and return
9409 * @v: pointer of type atomic_t
9410 * @i: integer value to subtract
9411 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9412 }
9413
9414 #define atomic_inc_return(v) (atomic_add_return(1, v))
9415 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9416 +{
9417 + return atomic_add_return_unchecked(1, v);
9418 +}
9419 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9420
9421 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9422 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9423 return cmpxchg(&v->counter, old, new);
9424 }
9425
9426 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9427 +{
9428 + return cmpxchg(&v->counter, old, new);
9429 +}
9430 +
9431 static inline int atomic_xchg(atomic_t *v, int new)
9432 {
9433 return xchg(&v->counter, new);
9434 }
9435
9436 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9437 +{
9438 + return xchg(&v->counter, new);
9439 +}
9440 +
9441 /**
9442 * __atomic_add_unless - add unless the number is already a given value
9443 * @v: pointer of type atomic_t
9444 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9445 */
9446 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9447 {
9448 - int c, old;
9449 + int c, old, new;
9450 c = atomic_read(v);
9451 for (;;) {
9452 - if (unlikely(c == (u)))
9453 + if (unlikely(c == u))
9454 break;
9455 - old = atomic_cmpxchg((v), c, c + (a));
9456 +
9457 + asm volatile("addl %2,%0\n"
9458 +
9459 +#ifdef CONFIG_PAX_REFCOUNT
9460 + "jno 0f\n"
9461 + "subl %2,%0\n"
9462 + "int $4\n0:\n"
9463 + _ASM_EXTABLE(0b, 0b)
9464 +#endif
9465 +
9466 + : "=r" (new)
9467 + : "0" (c), "ir" (a));
9468 +
9469 + old = atomic_cmpxchg(v, c, new);
9470 if (likely(old == c))
9471 break;
9472 c = old;
9473 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9474 return c;
9475 }
9476
9477 +/**
9478 + * atomic_inc_not_zero_hint - increment if not null
9479 + * @v: pointer of type atomic_t
9480 + * @hint: probable value of the atomic before the increment
9481 + *
9482 + * This version of atomic_inc_not_zero() gives a hint of probable
9483 + * value of the atomic. This helps processor to not read the memory
9484 + * before doing the atomic read/modify/write cycle, lowering
9485 + * number of bus transactions on some arches.
9486 + *
9487 + * Returns: 0 if increment was not done, 1 otherwise.
9488 + */
9489 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9490 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9491 +{
9492 + int val, c = hint, new;
9493 +
9494 + /* sanity test, should be removed by compiler if hint is a constant */
9495 + if (!hint)
9496 + return __atomic_add_unless(v, 1, 0);
9497 +
9498 + do {
9499 + asm volatile("incl %0\n"
9500 +
9501 +#ifdef CONFIG_PAX_REFCOUNT
9502 + "jno 0f\n"
9503 + "decl %0\n"
9504 + "int $4\n0:\n"
9505 + _ASM_EXTABLE(0b, 0b)
9506 +#endif
9507 +
9508 + : "=r" (new)
9509 + : "0" (c));
9510 +
9511 + val = atomic_cmpxchg(v, c, new);
9512 + if (val == c)
9513 + return 1;
9514 + c = val;
9515 + } while (c);
9516 +
9517 + return 0;
9518 +}
9519
9520 /*
9521 * atomic_dec_if_positive - decrement by 1 if old value positive
9522 @@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
9523 #endif
9524
9525 /* These are x86-specific, used by some header files */
9526 -#define atomic_clear_mask(mask, addr) \
9527 - asm volatile(LOCK_PREFIX "andl %0,%1" \
9528 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
9529 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
9530 +{
9531 + asm volatile(LOCK_PREFIX "andl %1,%0"
9532 + : "+m" (v->counter)
9533 + : "r" (~(mask))
9534 + : "memory");
9535 +}
9536
9537 -#define atomic_set_mask(mask, addr) \
9538 - asm volatile(LOCK_PREFIX "orl %0,%1" \
9539 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
9540 - : "memory")
9541 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9542 +{
9543 + asm volatile(LOCK_PREFIX "andl %1,%0"
9544 + : "+m" (v->counter)
9545 + : "r" (~(mask))
9546 + : "memory");
9547 +}
9548 +
9549 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
9550 +{
9551 + asm volatile(LOCK_PREFIX "orl %1,%0"
9552 + : "+m" (v->counter)
9553 + : "r" (mask)
9554 + : "memory");
9555 +}
9556 +
9557 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9558 +{
9559 + asm volatile(LOCK_PREFIX "orl %1,%0"
9560 + : "+m" (v->counter)
9561 + : "r" (mask)
9562 + : "memory");
9563 +}
9564
9565 /* Atomic operations are already serializing on x86 */
9566 #define smp_mb__before_atomic_dec() barrier()
9567 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9568 index b154de7..aadebd8 100644
9569 --- a/arch/x86/include/asm/atomic64_32.h
9570 +++ b/arch/x86/include/asm/atomic64_32.h
9571 @@ -12,6 +12,14 @@ typedef struct {
9572 u64 __aligned(8) counter;
9573 } atomic64_t;
9574
9575 +#ifdef CONFIG_PAX_REFCOUNT
9576 +typedef struct {
9577 + u64 __aligned(8) counter;
9578 +} atomic64_unchecked_t;
9579 +#else
9580 +typedef atomic64_t atomic64_unchecked_t;
9581 +#endif
9582 +
9583 #define ATOMIC64_INIT(val) { (val) }
9584
9585 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9586 @@ -37,21 +45,31 @@ typedef struct {
9587 ATOMIC64_DECL_ONE(sym##_386)
9588
9589 ATOMIC64_DECL_ONE(add_386);
9590 +ATOMIC64_DECL_ONE(add_unchecked_386);
9591 ATOMIC64_DECL_ONE(sub_386);
9592 +ATOMIC64_DECL_ONE(sub_unchecked_386);
9593 ATOMIC64_DECL_ONE(inc_386);
9594 +ATOMIC64_DECL_ONE(inc_unchecked_386);
9595 ATOMIC64_DECL_ONE(dec_386);
9596 +ATOMIC64_DECL_ONE(dec_unchecked_386);
9597 #endif
9598
9599 #define alternative_atomic64(f, out, in...) \
9600 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9601
9602 ATOMIC64_DECL(read);
9603 +ATOMIC64_DECL(read_unchecked);
9604 ATOMIC64_DECL(set);
9605 +ATOMIC64_DECL(set_unchecked);
9606 ATOMIC64_DECL(xchg);
9607 ATOMIC64_DECL(add_return);
9608 +ATOMIC64_DECL(add_return_unchecked);
9609 ATOMIC64_DECL(sub_return);
9610 +ATOMIC64_DECL(sub_return_unchecked);
9611 ATOMIC64_DECL(inc_return);
9612 +ATOMIC64_DECL(inc_return_unchecked);
9613 ATOMIC64_DECL(dec_return);
9614 +ATOMIC64_DECL(dec_return_unchecked);
9615 ATOMIC64_DECL(dec_if_positive);
9616 ATOMIC64_DECL(inc_not_zero);
9617 ATOMIC64_DECL(add_unless);
9618 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9619 }
9620
9621 /**
9622 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9623 + * @p: pointer to type atomic64_unchecked_t
9624 + * @o: expected value
9625 + * @n: new value
9626 + *
9627 + * Atomically sets @v to @n if it was equal to @o and returns
9628 + * the old value.
9629 + */
9630 +
9631 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9632 +{
9633 + return cmpxchg64(&v->counter, o, n);
9634 +}
9635 +
9636 +/**
9637 * atomic64_xchg - xchg atomic64 variable
9638 * @v: pointer to type atomic64_t
9639 * @n: value to assign
9640 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9641 }
9642
9643 /**
9644 + * atomic64_set_unchecked - set atomic64 variable
9645 + * @v: pointer to type atomic64_unchecked_t
9646 + * @n: value to assign
9647 + *
9648 + * Atomically sets the value of @v to @n.
9649 + */
9650 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9651 +{
9652 + unsigned high = (unsigned)(i >> 32);
9653 + unsigned low = (unsigned)i;
9654 + alternative_atomic64(set, /* no output */,
9655 + "S" (v), "b" (low), "c" (high)
9656 + : "eax", "edx", "memory");
9657 +}
9658 +
9659 +/**
9660 * atomic64_read - read atomic64 variable
9661 * @v: pointer to type atomic64_t
9662 *
9663 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9664 }
9665
9666 /**
9667 + * atomic64_read_unchecked - read atomic64 variable
9668 + * @v: pointer to type atomic64_unchecked_t
9669 + *
9670 + * Atomically reads the value of @v and returns it.
9671 + */
9672 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9673 +{
9674 + long long r;
9675 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9676 + return r;
9677 + }
9678 +
9679 +/**
9680 * atomic64_add_return - add and return
9681 * @i: integer value to add
9682 * @v: pointer to type atomic64_t
9683 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9684 return i;
9685 }
9686
9687 +/**
9688 + * atomic64_add_return_unchecked - add and return
9689 + * @i: integer value to add
9690 + * @v: pointer to type atomic64_unchecked_t
9691 + *
9692 + * Atomically adds @i to @v and returns @i + *@v
9693 + */
9694 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9695 +{
9696 + alternative_atomic64(add_return_unchecked,
9697 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9698 + ASM_NO_INPUT_CLOBBER("memory"));
9699 + return i;
9700 +}
9701 +
9702 /*
9703 * Other variants with different arithmetic operators:
9704 */
9705 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9706 return a;
9707 }
9708
9709 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9710 +{
9711 + long long a;
9712 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
9713 + "S" (v) : "memory", "ecx");
9714 + return a;
9715 +}
9716 +
9717 static inline long long atomic64_dec_return(atomic64_t *v)
9718 {
9719 long long a;
9720 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9721 }
9722
9723 /**
9724 + * atomic64_add_unchecked - add integer to atomic64 variable
9725 + * @i: integer value to add
9726 + * @v: pointer to type atomic64_unchecked_t
9727 + *
9728 + * Atomically adds @i to @v.
9729 + */
9730 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9731 +{
9732 + __alternative_atomic64(add_unchecked, add_return_unchecked,
9733 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9734 + ASM_NO_INPUT_CLOBBER("memory"));
9735 + return i;
9736 +}
9737 +
9738 +/**
9739 * atomic64_sub - subtract the atomic64 variable
9740 * @i: integer value to subtract
9741 * @v: pointer to type atomic64_t
9742 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9743 index 0e1cbfc..5623683 100644
9744 --- a/arch/x86/include/asm/atomic64_64.h
9745 +++ b/arch/x86/include/asm/atomic64_64.h
9746 @@ -18,7 +18,19 @@
9747 */
9748 static inline long atomic64_read(const atomic64_t *v)
9749 {
9750 - return (*(volatile long *)&(v)->counter);
9751 + return (*(volatile const long *)&(v)->counter);
9752 +}
9753 +
9754 +/**
9755 + * atomic64_read_unchecked - read atomic64 variable
9756 + * @v: pointer of type atomic64_unchecked_t
9757 + *
9758 + * Atomically reads the value of @v.
9759 + * Doesn't imply a read memory barrier.
9760 + */
9761 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9762 +{
9763 + return (*(volatile const long *)&(v)->counter);
9764 }
9765
9766 /**
9767 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9768 }
9769
9770 /**
9771 + * atomic64_set_unchecked - set atomic64 variable
9772 + * @v: pointer to type atomic64_unchecked_t
9773 + * @i: required value
9774 + *
9775 + * Atomically sets the value of @v to @i.
9776 + */
9777 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9778 +{
9779 + v->counter = i;
9780 +}
9781 +
9782 +/**
9783 * atomic64_add - add integer to atomic64 variable
9784 * @i: integer value to add
9785 * @v: pointer to type atomic64_t
9786 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9787 */
9788 static inline void atomic64_add(long i, atomic64_t *v)
9789 {
9790 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9791 +
9792 +#ifdef CONFIG_PAX_REFCOUNT
9793 + "jno 0f\n"
9794 + LOCK_PREFIX "subq %1,%0\n"
9795 + "int $4\n0:\n"
9796 + _ASM_EXTABLE(0b, 0b)
9797 +#endif
9798 +
9799 + : "=m" (v->counter)
9800 + : "er" (i), "m" (v->counter));
9801 +}
9802 +
9803 +/**
9804 + * atomic64_add_unchecked - add integer to atomic64 variable
9805 + * @i: integer value to add
9806 + * @v: pointer to type atomic64_unchecked_t
9807 + *
9808 + * Atomically adds @i to @v.
9809 + */
9810 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9811 +{
9812 asm volatile(LOCK_PREFIX "addq %1,%0"
9813 : "=m" (v->counter)
9814 : "er" (i), "m" (v->counter));
9815 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9816 */
9817 static inline void atomic64_sub(long i, atomic64_t *v)
9818 {
9819 - asm volatile(LOCK_PREFIX "subq %1,%0"
9820 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9821 +
9822 +#ifdef CONFIG_PAX_REFCOUNT
9823 + "jno 0f\n"
9824 + LOCK_PREFIX "addq %1,%0\n"
9825 + "int $4\n0:\n"
9826 + _ASM_EXTABLE(0b, 0b)
9827 +#endif
9828 +
9829 + : "=m" (v->counter)
9830 + : "er" (i), "m" (v->counter));
9831 +}
9832 +
9833 +/**
9834 + * atomic64_sub_unchecked - subtract the atomic64 variable
9835 + * @i: integer value to subtract
9836 + * @v: pointer to type atomic64_unchecked_t
9837 + *
9838 + * Atomically subtracts @i from @v.
9839 + */
9840 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9841 +{
9842 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9843 : "=m" (v->counter)
9844 : "er" (i), "m" (v->counter));
9845 }
9846 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9847 {
9848 unsigned char c;
9849
9850 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9851 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9852 +
9853 +#ifdef CONFIG_PAX_REFCOUNT
9854 + "jno 0f\n"
9855 + LOCK_PREFIX "addq %2,%0\n"
9856 + "int $4\n0:\n"
9857 + _ASM_EXTABLE(0b, 0b)
9858 +#endif
9859 +
9860 + "sete %1\n"
9861 : "=m" (v->counter), "=qm" (c)
9862 : "er" (i), "m" (v->counter) : "memory");
9863 return c;
9864 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9865 */
9866 static inline void atomic64_inc(atomic64_t *v)
9867 {
9868 + asm volatile(LOCK_PREFIX "incq %0\n"
9869 +
9870 +#ifdef CONFIG_PAX_REFCOUNT
9871 + "jno 0f\n"
9872 + LOCK_PREFIX "decq %0\n"
9873 + "int $4\n0:\n"
9874 + _ASM_EXTABLE(0b, 0b)
9875 +#endif
9876 +
9877 + : "=m" (v->counter)
9878 + : "m" (v->counter));
9879 +}
9880 +
9881 +/**
9882 + * atomic64_inc_unchecked - increment atomic64 variable
9883 + * @v: pointer to type atomic64_unchecked_t
9884 + *
9885 + * Atomically increments @v by 1.
9886 + */
9887 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9888 +{
9889 asm volatile(LOCK_PREFIX "incq %0"
9890 : "=m" (v->counter)
9891 : "m" (v->counter));
9892 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9893 */
9894 static inline void atomic64_dec(atomic64_t *v)
9895 {
9896 - asm volatile(LOCK_PREFIX "decq %0"
9897 + asm volatile(LOCK_PREFIX "decq %0\n"
9898 +
9899 +#ifdef CONFIG_PAX_REFCOUNT
9900 + "jno 0f\n"
9901 + LOCK_PREFIX "incq %0\n"
9902 + "int $4\n0:\n"
9903 + _ASM_EXTABLE(0b, 0b)
9904 +#endif
9905 +
9906 + : "=m" (v->counter)
9907 + : "m" (v->counter));
9908 +}
9909 +
9910 +/**
9911 + * atomic64_dec_unchecked - decrement atomic64 variable
9912 + * @v: pointer to type atomic64_t
9913 + *
9914 + * Atomically decrements @v by 1.
9915 + */
9916 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9917 +{
9918 + asm volatile(LOCK_PREFIX "decq %0\n"
9919 : "=m" (v->counter)
9920 : "m" (v->counter));
9921 }
9922 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9923 {
9924 unsigned char c;
9925
9926 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9927 + asm volatile(LOCK_PREFIX "decq %0\n"
9928 +
9929 +#ifdef CONFIG_PAX_REFCOUNT
9930 + "jno 0f\n"
9931 + LOCK_PREFIX "incq %0\n"
9932 + "int $4\n0:\n"
9933 + _ASM_EXTABLE(0b, 0b)
9934 +#endif
9935 +
9936 + "sete %1\n"
9937 : "=m" (v->counter), "=qm" (c)
9938 : "m" (v->counter) : "memory");
9939 return c != 0;
9940 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9941 {
9942 unsigned char c;
9943
9944 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9945 + asm volatile(LOCK_PREFIX "incq %0\n"
9946 +
9947 +#ifdef CONFIG_PAX_REFCOUNT
9948 + "jno 0f\n"
9949 + LOCK_PREFIX "decq %0\n"
9950 + "int $4\n0:\n"
9951 + _ASM_EXTABLE(0b, 0b)
9952 +#endif
9953 +
9954 + "sete %1\n"
9955 : "=m" (v->counter), "=qm" (c)
9956 : "m" (v->counter) : "memory");
9957 return c != 0;
9958 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9959 {
9960 unsigned char c;
9961
9962 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9963 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9964 +
9965 +#ifdef CONFIG_PAX_REFCOUNT
9966 + "jno 0f\n"
9967 + LOCK_PREFIX "subq %2,%0\n"
9968 + "int $4\n0:\n"
9969 + _ASM_EXTABLE(0b, 0b)
9970 +#endif
9971 +
9972 + "sets %1\n"
9973 : "=m" (v->counter), "=qm" (c)
9974 : "er" (i), "m" (v->counter) : "memory");
9975 return c;
9976 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9977 */
9978 static inline long atomic64_add_return(long i, atomic64_t *v)
9979 {
9980 + return i + xadd_check_overflow(&v->counter, i);
9981 +}
9982 +
9983 +/**
9984 + * atomic64_add_return_unchecked - add and return
9985 + * @i: integer value to add
9986 + * @v: pointer to type atomic64_unchecked_t
9987 + *
9988 + * Atomically adds @i to @v and returns @i + @v
9989 + */
9990 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9991 +{
9992 return i + xadd(&v->counter, i);
9993 }
9994
9995 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9996 }
9997
9998 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9999 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10000 +{
10001 + return atomic64_add_return_unchecked(1, v);
10002 +}
10003 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10004
10005 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10006 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10007 return cmpxchg(&v->counter, old, new);
10008 }
10009
10010 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10011 +{
10012 + return cmpxchg(&v->counter, old, new);
10013 +}
10014 +
10015 static inline long atomic64_xchg(atomic64_t *v, long new)
10016 {
10017 return xchg(&v->counter, new);
10018 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10019 */
10020 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10021 {
10022 - long c, old;
10023 + long c, old, new;
10024 c = atomic64_read(v);
10025 for (;;) {
10026 - if (unlikely(c == (u)))
10027 + if (unlikely(c == u))
10028 break;
10029 - old = atomic64_cmpxchg((v), c, c + (a));
10030 +
10031 + asm volatile("add %2,%0\n"
10032 +
10033 +#ifdef CONFIG_PAX_REFCOUNT
10034 + "jno 0f\n"
10035 + "sub %2,%0\n"
10036 + "int $4\n0:\n"
10037 + _ASM_EXTABLE(0b, 0b)
10038 +#endif
10039 +
10040 + : "=r" (new)
10041 + : "0" (c), "ir" (a));
10042 +
10043 + old = atomic64_cmpxchg(v, c, new);
10044 if (likely(old == c))
10045 break;
10046 c = old;
10047 }
10048 - return c != (u);
10049 + return c != u;
10050 }
10051
10052 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10053 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10054 index a6983b2..63f48a2 100644
10055 --- a/arch/x86/include/asm/bitops.h
10056 +++ b/arch/x86/include/asm/bitops.h
10057 @@ -40,7 +40,7 @@
10058 * a mask operation on a byte.
10059 */
10060 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10061 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10062 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10063 #define CONST_MASK(nr) (1 << ((nr) & 7))
10064
10065 /**
10066 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10067 index b13fe63..0dab13a 100644
10068 --- a/arch/x86/include/asm/boot.h
10069 +++ b/arch/x86/include/asm/boot.h
10070 @@ -11,10 +11,15 @@
10071 #include <asm/pgtable_types.h>
10072
10073 /* Physical address where kernel should be loaded. */
10074 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10075 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10076 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10077 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10078
10079 +#ifndef __ASSEMBLY__
10080 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
10081 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10082 +#endif
10083 +
10084 /* Minimum kernel alignment, as a power of two */
10085 #ifdef CONFIG_X86_64
10086 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10087 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10088 index 48f99f1..d78ebf9 100644
10089 --- a/arch/x86/include/asm/cache.h
10090 +++ b/arch/x86/include/asm/cache.h
10091 @@ -5,12 +5,13 @@
10092
10093 /* L1 cache line size */
10094 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10095 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10096 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10097
10098 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10099 +#define __read_only __attribute__((__section__(".data..read_only")))
10100
10101 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10102 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10103 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10104
10105 #ifdef CONFIG_X86_VSMP
10106 #ifdef CONFIG_SMP
10107 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10108 index 9863ee3..4a1f8e1 100644
10109 --- a/arch/x86/include/asm/cacheflush.h
10110 +++ b/arch/x86/include/asm/cacheflush.h
10111 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10112 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10113
10114 if (pg_flags == _PGMT_DEFAULT)
10115 - return -1;
10116 + return ~0UL;
10117 else if (pg_flags == _PGMT_WC)
10118 return _PAGE_CACHE_WC;
10119 else if (pg_flags == _PGMT_UC_MINUS)
10120 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10121 index 46fc474..b02b0f9 100644
10122 --- a/arch/x86/include/asm/checksum_32.h
10123 +++ b/arch/x86/include/asm/checksum_32.h
10124 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10125 int len, __wsum sum,
10126 int *src_err_ptr, int *dst_err_ptr);
10127
10128 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10129 + int len, __wsum sum,
10130 + int *src_err_ptr, int *dst_err_ptr);
10131 +
10132 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10133 + int len, __wsum sum,
10134 + int *src_err_ptr, int *dst_err_ptr);
10135 +
10136 /*
10137 * Note: when you get a NULL pointer exception here this means someone
10138 * passed in an incorrect kernel address to one of these functions.
10139 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10140 int *err_ptr)
10141 {
10142 might_sleep();
10143 - return csum_partial_copy_generic((__force void *)src, dst,
10144 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10145 len, sum, err_ptr, NULL);
10146 }
10147
10148 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10149 {
10150 might_sleep();
10151 if (access_ok(VERIFY_WRITE, dst, len))
10152 - return csum_partial_copy_generic(src, (__force void *)dst,
10153 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10154 len, sum, NULL, err_ptr);
10155
10156 if (len)
10157 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10158 index 99480e5..d81165b 100644
10159 --- a/arch/x86/include/asm/cmpxchg.h
10160 +++ b/arch/x86/include/asm/cmpxchg.h
10161 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10162 __compiletime_error("Bad argument size for cmpxchg");
10163 extern void __xadd_wrong_size(void)
10164 __compiletime_error("Bad argument size for xadd");
10165 +extern void __xadd_check_overflow_wrong_size(void)
10166 + __compiletime_error("Bad argument size for xadd_check_overflow");
10167 extern void __add_wrong_size(void)
10168 __compiletime_error("Bad argument size for add");
10169 +extern void __add_check_overflow_wrong_size(void)
10170 + __compiletime_error("Bad argument size for add_check_overflow");
10171
10172 /*
10173 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10174 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10175 __ret; \
10176 })
10177
10178 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10179 + ({ \
10180 + __typeof__ (*(ptr)) __ret = (arg); \
10181 + switch (sizeof(*(ptr))) { \
10182 + case __X86_CASE_L: \
10183 + asm volatile (lock #op "l %0, %1\n" \
10184 + "jno 0f\n" \
10185 + "mov %0,%1\n" \
10186 + "int $4\n0:\n" \
10187 + _ASM_EXTABLE(0b, 0b) \
10188 + : "+r" (__ret), "+m" (*(ptr)) \
10189 + : : "memory", "cc"); \
10190 + break; \
10191 + case __X86_CASE_Q: \
10192 + asm volatile (lock #op "q %q0, %1\n" \
10193 + "jno 0f\n" \
10194 + "mov %0,%1\n" \
10195 + "int $4\n0:\n" \
10196 + _ASM_EXTABLE(0b, 0b) \
10197 + : "+r" (__ret), "+m" (*(ptr)) \
10198 + : : "memory", "cc"); \
10199 + break; \
10200 + default: \
10201 + __ ## op ## _check_overflow_wrong_size(); \
10202 + } \
10203 + __ret; \
10204 + })
10205 +
10206 /*
10207 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10208 * Since this is generally used to protect other memory information, we
10209 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10210 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10211 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10212
10213 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10214 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10215 +
10216 #define __add(ptr, inc, lock) \
10217 ({ \
10218 __typeof__ (*(ptr)) __ret = (inc); \
10219 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10220 index f91e80f..7731066 100644
10221 --- a/arch/x86/include/asm/cpufeature.h
10222 +++ b/arch/x86/include/asm/cpufeature.h
10223 @@ -202,11 +202,12 @@
10224 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
10225 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
10226 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
10227 -#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
10228 +#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
10229 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
10230 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
10231 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
10232 #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
10233 +#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
10234
10235 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
10236
10237 @@ -371,7 +372,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10238 ".section .discard,\"aw\",@progbits\n"
10239 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10240 ".previous\n"
10241 - ".section .altinstr_replacement,\"ax\"\n"
10242 + ".section .altinstr_replacement,\"a\"\n"
10243 "3: movb $1,%0\n"
10244 "4:\n"
10245 ".previous\n"
10246 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10247 index 8bf1c06..f723dfd 100644
10248 --- a/arch/x86/include/asm/desc.h
10249 +++ b/arch/x86/include/asm/desc.h
10250 @@ -4,6 +4,7 @@
10251 #include <asm/desc_defs.h>
10252 #include <asm/ldt.h>
10253 #include <asm/mmu.h>
10254 +#include <asm/pgtable.h>
10255
10256 #include <linux/smp.h>
10257 #include <linux/percpu.h>
10258 @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10259
10260 desc->type = (info->read_exec_only ^ 1) << 1;
10261 desc->type |= info->contents << 2;
10262 + desc->type |= info->seg_not_present ^ 1;
10263
10264 desc->s = 1;
10265 desc->dpl = 0x3;
10266 @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10267 }
10268
10269 extern struct desc_ptr idt_descr;
10270 -extern gate_desc idt_table[];
10271 extern struct desc_ptr nmi_idt_descr;
10272 -extern gate_desc nmi_idt_table[];
10273 -
10274 -struct gdt_page {
10275 - struct desc_struct gdt[GDT_ENTRIES];
10276 -} __attribute__((aligned(PAGE_SIZE)));
10277 -
10278 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10279 +extern gate_desc idt_table[256];
10280 +extern gate_desc nmi_idt_table[256];
10281
10282 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10283 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10284 {
10285 - return per_cpu(gdt_page, cpu).gdt;
10286 + return cpu_gdt_table[cpu];
10287 }
10288
10289 #ifdef CONFIG_X86_64
10290 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10291 unsigned long base, unsigned dpl, unsigned flags,
10292 unsigned short seg)
10293 {
10294 - gate->a = (seg << 16) | (base & 0xffff);
10295 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10296 + gate->gate.offset_low = base;
10297 + gate->gate.seg = seg;
10298 + gate->gate.reserved = 0;
10299 + gate->gate.type = type;
10300 + gate->gate.s = 0;
10301 + gate->gate.dpl = dpl;
10302 + gate->gate.p = 1;
10303 + gate->gate.offset_high = base >> 16;
10304 }
10305
10306 #endif
10307 @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10308
10309 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10310 {
10311 + pax_open_kernel();
10312 memcpy(&idt[entry], gate, sizeof(*gate));
10313 + pax_close_kernel();
10314 }
10315
10316 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10317 {
10318 + pax_open_kernel();
10319 memcpy(&ldt[entry], desc, 8);
10320 + pax_close_kernel();
10321 }
10322
10323 static inline void
10324 @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10325 default: size = sizeof(*gdt); break;
10326 }
10327
10328 + pax_open_kernel();
10329 memcpy(&gdt[entry], desc, size);
10330 + pax_close_kernel();
10331 }
10332
10333 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10334 @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10335
10336 static inline void native_load_tr_desc(void)
10337 {
10338 + pax_open_kernel();
10339 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10340 + pax_close_kernel();
10341 }
10342
10343 static inline void native_load_gdt(const struct desc_ptr *dtr)
10344 @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10345 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10346 unsigned int i;
10347
10348 + pax_open_kernel();
10349 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10350 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10351 + pax_close_kernel();
10352 }
10353
10354 #define _LDT_empty(info) \
10355 @@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10356 }
10357
10358 #ifdef CONFIG_X86_64
10359 -static inline void set_nmi_gate(int gate, void *addr)
10360 +static inline void set_nmi_gate(int gate, const void *addr)
10361 {
10362 gate_desc s;
10363
10364 @@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10365 }
10366 #endif
10367
10368 -static inline void _set_gate(int gate, unsigned type, void *addr,
10369 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10370 unsigned dpl, unsigned ist, unsigned seg)
10371 {
10372 gate_desc s;
10373 @@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10374 * Pentium F0 0F bugfix can have resulted in the mapped
10375 * IDT being write-protected.
10376 */
10377 -static inline void set_intr_gate(unsigned int n, void *addr)
10378 +static inline void set_intr_gate(unsigned int n, const void *addr)
10379 {
10380 BUG_ON((unsigned)n > 0xFF);
10381 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10382 @@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10383 /*
10384 * This routine sets up an interrupt gate at directory privilege level 3.
10385 */
10386 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10387 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10388 {
10389 BUG_ON((unsigned)n > 0xFF);
10390 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10391 }
10392
10393 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10394 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10395 {
10396 BUG_ON((unsigned)n > 0xFF);
10397 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10398 }
10399
10400 -static inline void set_trap_gate(unsigned int n, void *addr)
10401 +static inline void set_trap_gate(unsigned int n, const void *addr)
10402 {
10403 BUG_ON((unsigned)n > 0xFF);
10404 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10405 @@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10406 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10407 {
10408 BUG_ON((unsigned)n > 0xFF);
10409 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10410 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10411 }
10412
10413 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10414 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10415 {
10416 BUG_ON((unsigned)n > 0xFF);
10417 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10418 }
10419
10420 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10421 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10422 {
10423 BUG_ON((unsigned)n > 0xFF);
10424 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10425 }
10426
10427 +#ifdef CONFIG_X86_32
10428 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10429 +{
10430 + struct desc_struct d;
10431 +
10432 + if (likely(limit))
10433 + limit = (limit - 1UL) >> PAGE_SHIFT;
10434 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10435 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10436 +}
10437 +#endif
10438 +
10439 #endif /* _ASM_X86_DESC_H */
10440 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10441 index 278441f..b95a174 100644
10442 --- a/arch/x86/include/asm/desc_defs.h
10443 +++ b/arch/x86/include/asm/desc_defs.h
10444 @@ -31,6 +31,12 @@ struct desc_struct {
10445 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10446 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10447 };
10448 + struct {
10449 + u16 offset_low;
10450 + u16 seg;
10451 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10452 + unsigned offset_high: 16;
10453 + } gate;
10454 };
10455 } __attribute__((packed));
10456
10457 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10458 index 3778256..c5d4fce 100644
10459 --- a/arch/x86/include/asm/e820.h
10460 +++ b/arch/x86/include/asm/e820.h
10461 @@ -69,7 +69,7 @@ struct e820map {
10462 #define ISA_START_ADDRESS 0xa0000
10463 #define ISA_END_ADDRESS 0x100000
10464
10465 -#define BIOS_BEGIN 0x000a0000
10466 +#define BIOS_BEGIN 0x000c0000
10467 #define BIOS_END 0x00100000
10468
10469 #define BIOS_ROM_BASE 0xffe00000
10470 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10471 index 5939f44..f8845f6 100644
10472 --- a/arch/x86/include/asm/elf.h
10473 +++ b/arch/x86/include/asm/elf.h
10474 @@ -243,7 +243,25 @@ extern int force_personality32;
10475 the loader. We need to make sure that it is out of the way of the program
10476 that it will "exec", and that there is sufficient room for the brk. */
10477
10478 +#ifdef CONFIG_PAX_SEGMEXEC
10479 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10480 +#else
10481 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10482 +#endif
10483 +
10484 +#ifdef CONFIG_PAX_ASLR
10485 +#ifdef CONFIG_X86_32
10486 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10487 +
10488 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10489 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10490 +#else
10491 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10492 +
10493 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10494 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10495 +#endif
10496 +#endif
10497
10498 /* This yields a mask that user programs can use to figure out what
10499 instruction set this CPU supports. This could be done in user space,
10500 @@ -296,16 +314,12 @@ do { \
10501
10502 #define ARCH_DLINFO \
10503 do { \
10504 - if (vdso_enabled) \
10505 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10506 - (unsigned long)current->mm->context.vdso); \
10507 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10508 } while (0)
10509
10510 #define ARCH_DLINFO_X32 \
10511 do { \
10512 - if (vdso_enabled) \
10513 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10514 - (unsigned long)current->mm->context.vdso); \
10515 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10516 } while (0)
10517
10518 #define AT_SYSINFO 32
10519 @@ -320,7 +334,7 @@ else \
10520
10521 #endif /* !CONFIG_X86_32 */
10522
10523 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10524 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10525
10526 #define VDSO_ENTRY \
10527 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10528 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10529 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10530 #define compat_arch_setup_additional_pages syscall32_setup_pages
10531
10532 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10533 -#define arch_randomize_brk arch_randomize_brk
10534 -
10535 /*
10536 * True on X86_32 or when emulating IA32 on X86_64
10537 */
10538 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10539 index cc70c1c..d96d011 100644
10540 --- a/arch/x86/include/asm/emergency-restart.h
10541 +++ b/arch/x86/include/asm/emergency-restart.h
10542 @@ -15,6 +15,6 @@ enum reboot_type {
10543
10544 extern enum reboot_type reboot_type;
10545
10546 -extern void machine_emergency_restart(void);
10547 +extern void machine_emergency_restart(void) __noreturn;
10548
10549 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10550 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10551 index 75f4c6d..ee3eb8f 100644
10552 --- a/arch/x86/include/asm/fpu-internal.h
10553 +++ b/arch/x86/include/asm/fpu-internal.h
10554 @@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10555 {
10556 int err;
10557
10558 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10559 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10560 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10561 +#endif
10562 +
10563 /* See comment in fxsave() below. */
10564 #ifdef CONFIG_AS_FXSAVEQ
10565 asm volatile("1: fxrstorq %[fx]\n\t"
10566 @@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10567 {
10568 int err;
10569
10570 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10571 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10572 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10573 +#endif
10574 +
10575 /*
10576 * Clear the bytes not touched by the fxsave and reserved
10577 * for the SW usage.
10578 @@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10579 "emms\n\t" /* clear stack tags */
10580 "fildl %P[addr]", /* set F?P to defined value */
10581 X86_FEATURE_FXSAVE_LEAK,
10582 - [addr] "m" (tsk->thread.fpu.has_fpu));
10583 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10584
10585 return fpu_restore_checking(&tsk->thread.fpu);
10586 }
10587 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10588 index 71ecbcb..bac10b7 100644
10589 --- a/arch/x86/include/asm/futex.h
10590 +++ b/arch/x86/include/asm/futex.h
10591 @@ -11,16 +11,18 @@
10592 #include <asm/processor.h>
10593
10594 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10595 + typecheck(u32 __user *, uaddr); \
10596 asm volatile("1:\t" insn "\n" \
10597 "2:\t.section .fixup,\"ax\"\n" \
10598 "3:\tmov\t%3, %1\n" \
10599 "\tjmp\t2b\n" \
10600 "\t.previous\n" \
10601 _ASM_EXTABLE(1b, 3b) \
10602 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10603 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10604 : "i" (-EFAULT), "0" (oparg), "1" (0))
10605
10606 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10607 + typecheck(u32 __user *, uaddr); \
10608 asm volatile("1:\tmovl %2, %0\n" \
10609 "\tmovl\t%0, %3\n" \
10610 "\t" insn "\n" \
10611 @@ -33,7 +35,7 @@
10612 _ASM_EXTABLE(1b, 4b) \
10613 _ASM_EXTABLE(2b, 4b) \
10614 : "=&a" (oldval), "=&r" (ret), \
10615 - "+m" (*uaddr), "=&r" (tem) \
10616 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10617 : "r" (oparg), "i" (-EFAULT), "1" (0))
10618
10619 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10620 @@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10621
10622 switch (op) {
10623 case FUTEX_OP_SET:
10624 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10625 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10626 break;
10627 case FUTEX_OP_ADD:
10628 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10629 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10630 uaddr, oparg);
10631 break;
10632 case FUTEX_OP_OR:
10633 @@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10634 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10635 return -EFAULT;
10636
10637 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10638 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10639 "2:\t.section .fixup, \"ax\"\n"
10640 "3:\tmov %3, %0\n"
10641 "\tjmp 2b\n"
10642 "\t.previous\n"
10643 _ASM_EXTABLE(1b, 3b)
10644 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10645 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10646 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10647 : "memory"
10648 );
10649 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10650 index eb92a6e..b98b2f4 100644
10651 --- a/arch/x86/include/asm/hw_irq.h
10652 +++ b/arch/x86/include/asm/hw_irq.h
10653 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10654 extern void enable_IO_APIC(void);
10655
10656 /* Statistics */
10657 -extern atomic_t irq_err_count;
10658 -extern atomic_t irq_mis_count;
10659 +extern atomic_unchecked_t irq_err_count;
10660 +extern atomic_unchecked_t irq_mis_count;
10661
10662 /* EISA */
10663 extern void eisa_set_level_irq(unsigned int irq);
10664 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10665 index d8e8eef..99f81ae 100644
10666 --- a/arch/x86/include/asm/io.h
10667 +++ b/arch/x86/include/asm/io.h
10668 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10669
10670 #include <linux/vmalloc.h>
10671
10672 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10673 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10674 +{
10675 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10676 +}
10677 +
10678 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10679 +{
10680 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10681 +}
10682 +
10683 /*
10684 * Convert a virtual cached pointer to an uncached pointer
10685 */
10686 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10687 index bba3cf8..06bc8da 100644
10688 --- a/arch/x86/include/asm/irqflags.h
10689 +++ b/arch/x86/include/asm/irqflags.h
10690 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10691 sti; \
10692 sysexit
10693
10694 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10695 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10696 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10697 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10698 +
10699 #else
10700 #define INTERRUPT_RETURN iret
10701 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10702 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10703 index 5478825..839e88c 100644
10704 --- a/arch/x86/include/asm/kprobes.h
10705 +++ b/arch/x86/include/asm/kprobes.h
10706 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10707 #define RELATIVEJUMP_SIZE 5
10708 #define RELATIVECALL_OPCODE 0xe8
10709 #define RELATIVE_ADDR_SIZE 4
10710 -#define MAX_STACK_SIZE 64
10711 -#define MIN_STACK_SIZE(ADDR) \
10712 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10713 - THREAD_SIZE - (unsigned long)(ADDR))) \
10714 - ? (MAX_STACK_SIZE) \
10715 - : (((unsigned long)current_thread_info()) + \
10716 - THREAD_SIZE - (unsigned long)(ADDR)))
10717 +#define MAX_STACK_SIZE 64UL
10718 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10719
10720 #define flush_insn_slot(p) do { } while (0)
10721
10722 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10723 index db7c1f2..92f130a 100644
10724 --- a/arch/x86/include/asm/kvm_host.h
10725 +++ b/arch/x86/include/asm/kvm_host.h
10726 @@ -680,7 +680,7 @@ struct kvm_x86_ops {
10727 int (*check_intercept)(struct kvm_vcpu *vcpu,
10728 struct x86_instruction_info *info,
10729 enum x86_intercept_stage stage);
10730 -};
10731 +} __do_const;
10732
10733 struct kvm_arch_async_pf {
10734 u32 token;
10735 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10736 index c8bed0d..e5721fa 100644
10737 --- a/arch/x86/include/asm/local.h
10738 +++ b/arch/x86/include/asm/local.h
10739 @@ -17,26 +17,58 @@ typedef struct {
10740
10741 static inline void local_inc(local_t *l)
10742 {
10743 - asm volatile(_ASM_INC "%0"
10744 + asm volatile(_ASM_INC "%0\n"
10745 +
10746 +#ifdef CONFIG_PAX_REFCOUNT
10747 + "jno 0f\n"
10748 + _ASM_DEC "%0\n"
10749 + "int $4\n0:\n"
10750 + _ASM_EXTABLE(0b, 0b)
10751 +#endif
10752 +
10753 : "+m" (l->a.counter));
10754 }
10755
10756 static inline void local_dec(local_t *l)
10757 {
10758 - asm volatile(_ASM_DEC "%0"
10759 + asm volatile(_ASM_DEC "%0\n"
10760 +
10761 +#ifdef CONFIG_PAX_REFCOUNT
10762 + "jno 0f\n"
10763 + _ASM_INC "%0\n"
10764 + "int $4\n0:\n"
10765 + _ASM_EXTABLE(0b, 0b)
10766 +#endif
10767 +
10768 : "+m" (l->a.counter));
10769 }
10770
10771 static inline void local_add(long i, local_t *l)
10772 {
10773 - asm volatile(_ASM_ADD "%1,%0"
10774 + asm volatile(_ASM_ADD "%1,%0\n"
10775 +
10776 +#ifdef CONFIG_PAX_REFCOUNT
10777 + "jno 0f\n"
10778 + _ASM_SUB "%1,%0\n"
10779 + "int $4\n0:\n"
10780 + _ASM_EXTABLE(0b, 0b)
10781 +#endif
10782 +
10783 : "+m" (l->a.counter)
10784 : "ir" (i));
10785 }
10786
10787 static inline void local_sub(long i, local_t *l)
10788 {
10789 - asm volatile(_ASM_SUB "%1,%0"
10790 + asm volatile(_ASM_SUB "%1,%0\n"
10791 +
10792 +#ifdef CONFIG_PAX_REFCOUNT
10793 + "jno 0f\n"
10794 + _ASM_ADD "%1,%0\n"
10795 + "int $4\n0:\n"
10796 + _ASM_EXTABLE(0b, 0b)
10797 +#endif
10798 +
10799 : "+m" (l->a.counter)
10800 : "ir" (i));
10801 }
10802 @@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10803 {
10804 unsigned char c;
10805
10806 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10807 + asm volatile(_ASM_SUB "%2,%0\n"
10808 +
10809 +#ifdef CONFIG_PAX_REFCOUNT
10810 + "jno 0f\n"
10811 + _ASM_ADD "%2,%0\n"
10812 + "int $4\n0:\n"
10813 + _ASM_EXTABLE(0b, 0b)
10814 +#endif
10815 +
10816 + "sete %1\n"
10817 : "+m" (l->a.counter), "=qm" (c)
10818 : "ir" (i) : "memory");
10819 return c;
10820 @@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10821 {
10822 unsigned char c;
10823
10824 - asm volatile(_ASM_DEC "%0; sete %1"
10825 + asm volatile(_ASM_DEC "%0\n"
10826 +
10827 +#ifdef CONFIG_PAX_REFCOUNT
10828 + "jno 0f\n"
10829 + _ASM_INC "%0\n"
10830 + "int $4\n0:\n"
10831 + _ASM_EXTABLE(0b, 0b)
10832 +#endif
10833 +
10834 + "sete %1\n"
10835 : "+m" (l->a.counter), "=qm" (c)
10836 : : "memory");
10837 return c != 0;
10838 @@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10839 {
10840 unsigned char c;
10841
10842 - asm volatile(_ASM_INC "%0; sete %1"
10843 + asm volatile(_ASM_INC "%0\n"
10844 +
10845 +#ifdef CONFIG_PAX_REFCOUNT
10846 + "jno 0f\n"
10847 + _ASM_DEC "%0\n"
10848 + "int $4\n0:\n"
10849 + _ASM_EXTABLE(0b, 0b)
10850 +#endif
10851 +
10852 + "sete %1\n"
10853 : "+m" (l->a.counter), "=qm" (c)
10854 : : "memory");
10855 return c != 0;
10856 @@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10857 {
10858 unsigned char c;
10859
10860 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10861 + asm volatile(_ASM_ADD "%2,%0\n"
10862 +
10863 +#ifdef CONFIG_PAX_REFCOUNT
10864 + "jno 0f\n"
10865 + _ASM_SUB "%2,%0\n"
10866 + "int $4\n0:\n"
10867 + _ASM_EXTABLE(0b, 0b)
10868 +#endif
10869 +
10870 + "sets %1\n"
10871 : "+m" (l->a.counter), "=qm" (c)
10872 : "ir" (i) : "memory");
10873 return c;
10874 @@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10875 #endif
10876 /* Modern 486+ processor */
10877 __i = i;
10878 - asm volatile(_ASM_XADD "%0, %1;"
10879 + asm volatile(_ASM_XADD "%0, %1\n"
10880 +
10881 +#ifdef CONFIG_PAX_REFCOUNT
10882 + "jno 0f\n"
10883 + _ASM_MOV "%0,%1\n"
10884 + "int $4\n0:\n"
10885 + _ASM_EXTABLE(0b, 0b)
10886 +#endif
10887 +
10888 : "+r" (i), "+m" (l->a.counter)
10889 : : "memory");
10890 return i + __i;
10891 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10892 index 593e51d..fa69c9a 100644
10893 --- a/arch/x86/include/asm/mman.h
10894 +++ b/arch/x86/include/asm/mman.h
10895 @@ -5,4 +5,14 @@
10896
10897 #include <asm-generic/mman.h>
10898
10899 +#ifdef __KERNEL__
10900 +#ifndef __ASSEMBLY__
10901 +#ifdef CONFIG_X86_32
10902 +#define arch_mmap_check i386_mmap_check
10903 +int i386_mmap_check(unsigned long addr, unsigned long len,
10904 + unsigned long flags);
10905 +#endif
10906 +#endif
10907 +#endif
10908 +
10909 #endif /* _ASM_X86_MMAN_H */
10910 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10911 index 5f55e69..e20bfb1 100644
10912 --- a/arch/x86/include/asm/mmu.h
10913 +++ b/arch/x86/include/asm/mmu.h
10914 @@ -9,7 +9,7 @@
10915 * we put the segment information here.
10916 */
10917 typedef struct {
10918 - void *ldt;
10919 + struct desc_struct *ldt;
10920 int size;
10921
10922 #ifdef CONFIG_X86_64
10923 @@ -18,7 +18,19 @@ typedef struct {
10924 #endif
10925
10926 struct mutex lock;
10927 - void *vdso;
10928 + unsigned long vdso;
10929 +
10930 +#ifdef CONFIG_X86_32
10931 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10932 + unsigned long user_cs_base;
10933 + unsigned long user_cs_limit;
10934 +
10935 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10936 + cpumask_t cpu_user_cs_mask;
10937 +#endif
10938 +
10939 +#endif
10940 +#endif
10941 } mm_context_t;
10942
10943 #ifdef CONFIG_SMP
10944 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10945 index cdbf367..adb37ac 100644
10946 --- a/arch/x86/include/asm/mmu_context.h
10947 +++ b/arch/x86/include/asm/mmu_context.h
10948 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10949
10950 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10951 {
10952 +
10953 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10954 + unsigned int i;
10955 + pgd_t *pgd;
10956 +
10957 + pax_open_kernel();
10958 + pgd = get_cpu_pgd(smp_processor_id());
10959 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10960 + set_pgd_batched(pgd+i, native_make_pgd(0));
10961 + pax_close_kernel();
10962 +#endif
10963 +
10964 #ifdef CONFIG_SMP
10965 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10966 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10967 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10968 struct task_struct *tsk)
10969 {
10970 unsigned cpu = smp_processor_id();
10971 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10972 + int tlbstate = TLBSTATE_OK;
10973 +#endif
10974
10975 if (likely(prev != next)) {
10976 #ifdef CONFIG_SMP
10977 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10978 + tlbstate = this_cpu_read(cpu_tlbstate.state);
10979 +#endif
10980 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10981 this_cpu_write(cpu_tlbstate.active_mm, next);
10982 #endif
10983 cpumask_set_cpu(cpu, mm_cpumask(next));
10984
10985 /* Re-load page tables */
10986 +#ifdef CONFIG_PAX_PER_CPU_PGD
10987 + pax_open_kernel();
10988 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10989 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10990 + pax_close_kernel();
10991 + load_cr3(get_cpu_pgd(cpu));
10992 +#else
10993 load_cr3(next->pgd);
10994 +#endif
10995
10996 /* stop flush ipis for the previous mm */
10997 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10998 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10999 */
11000 if (unlikely(prev->context.ldt != next->context.ldt))
11001 load_LDT_nolock(&next->context);
11002 - }
11003 +
11004 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11005 + if (!(__supported_pte_mask & _PAGE_NX)) {
11006 + smp_mb__before_clear_bit();
11007 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11008 + smp_mb__after_clear_bit();
11009 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11010 + }
11011 +#endif
11012 +
11013 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11014 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11015 + prev->context.user_cs_limit != next->context.user_cs_limit))
11016 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11017 #ifdef CONFIG_SMP
11018 + else if (unlikely(tlbstate != TLBSTATE_OK))
11019 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11020 +#endif
11021 +#endif
11022 +
11023 + }
11024 else {
11025 +
11026 +#ifdef CONFIG_PAX_PER_CPU_PGD
11027 + pax_open_kernel();
11028 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11029 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11030 + pax_close_kernel();
11031 + load_cr3(get_cpu_pgd(cpu));
11032 +#endif
11033 +
11034 +#ifdef CONFIG_SMP
11035 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11036 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
11037
11038 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11039 * tlb flush IPI delivery. We must reload CR3
11040 * to make sure to use no freed page tables.
11041 */
11042 +
11043 +#ifndef CONFIG_PAX_PER_CPU_PGD
11044 load_cr3(next->pgd);
11045 +#endif
11046 +
11047 load_LDT_nolock(&next->context);
11048 +
11049 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11050 + if (!(__supported_pte_mask & _PAGE_NX))
11051 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11052 +#endif
11053 +
11054 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11055 +#ifdef CONFIG_PAX_PAGEEXEC
11056 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11057 +#endif
11058 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11059 +#endif
11060 +
11061 }
11062 +#endif
11063 }
11064 -#endif
11065 }
11066
11067 #define activate_mm(prev, next) \
11068 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11069 index 9eae775..c914fea 100644
11070 --- a/arch/x86/include/asm/module.h
11071 +++ b/arch/x86/include/asm/module.h
11072 @@ -5,6 +5,7 @@
11073
11074 #ifdef CONFIG_X86_64
11075 /* X86_64 does not define MODULE_PROC_FAMILY */
11076 +#define MODULE_PROC_FAMILY ""
11077 #elif defined CONFIG_M386
11078 #define MODULE_PROC_FAMILY "386 "
11079 #elif defined CONFIG_M486
11080 @@ -59,8 +60,20 @@
11081 #error unknown processor family
11082 #endif
11083
11084 -#ifdef CONFIG_X86_32
11085 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11086 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11087 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11088 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11089 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11090 +#else
11091 +#define MODULE_PAX_KERNEXEC ""
11092 #endif
11093
11094 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11095 +#define MODULE_PAX_UDEREF "UDEREF "
11096 +#else
11097 +#define MODULE_PAX_UDEREF ""
11098 +#endif
11099 +
11100 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11101 +
11102 #endif /* _ASM_X86_MODULE_H */
11103 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11104 index 320f7bb..e89f8f8 100644
11105 --- a/arch/x86/include/asm/page_64_types.h
11106 +++ b/arch/x86/include/asm/page_64_types.h
11107 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11108
11109 /* duplicated to the one in bootmem.h */
11110 extern unsigned long max_pfn;
11111 -extern unsigned long phys_base;
11112 +extern const unsigned long phys_base;
11113
11114 extern unsigned long __phys_addr(unsigned long);
11115 #define __phys_reloc_hide(x) (x)
11116 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11117 index 6cbbabf..11b3aed 100644
11118 --- a/arch/x86/include/asm/paravirt.h
11119 +++ b/arch/x86/include/asm/paravirt.h
11120 @@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11121 val);
11122 }
11123
11124 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11125 +{
11126 + pgdval_t val = native_pgd_val(pgd);
11127 +
11128 + if (sizeof(pgdval_t) > sizeof(long))
11129 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11130 + val, (u64)val >> 32);
11131 + else
11132 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11133 + val);
11134 +}
11135 +
11136 static inline void pgd_clear(pgd_t *pgdp)
11137 {
11138 set_pgd(pgdp, __pgd(0));
11139 @@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11140 pv_mmu_ops.set_fixmap(idx, phys, flags);
11141 }
11142
11143 +#ifdef CONFIG_PAX_KERNEXEC
11144 +static inline unsigned long pax_open_kernel(void)
11145 +{
11146 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11147 +}
11148 +
11149 +static inline unsigned long pax_close_kernel(void)
11150 +{
11151 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11152 +}
11153 +#else
11154 +static inline unsigned long pax_open_kernel(void) { return 0; }
11155 +static inline unsigned long pax_close_kernel(void) { return 0; }
11156 +#endif
11157 +
11158 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11159
11160 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11161 @@ -965,7 +992,7 @@ extern void default_banner(void);
11162
11163 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11164 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11165 -#define PARA_INDIRECT(addr) *%cs:addr
11166 +#define PARA_INDIRECT(addr) *%ss:addr
11167 #endif
11168
11169 #define INTERRUPT_RETURN \
11170 @@ -1040,6 +1067,21 @@ extern void default_banner(void);
11171 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11172 CLBR_NONE, \
11173 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11174 +
11175 +#define GET_CR0_INTO_RDI \
11176 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11177 + mov %rax,%rdi
11178 +
11179 +#define SET_RDI_INTO_CR0 \
11180 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11181 +
11182 +#define GET_CR3_INTO_RDI \
11183 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11184 + mov %rax,%rdi
11185 +
11186 +#define SET_RDI_INTO_CR3 \
11187 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11188 +
11189 #endif /* CONFIG_X86_32 */
11190
11191 #endif /* __ASSEMBLY__ */
11192 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11193 index 8e8b9a4..f07d725 100644
11194 --- a/arch/x86/include/asm/paravirt_types.h
11195 +++ b/arch/x86/include/asm/paravirt_types.h
11196 @@ -84,20 +84,20 @@ struct pv_init_ops {
11197 */
11198 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11199 unsigned long addr, unsigned len);
11200 -};
11201 +} __no_const;
11202
11203
11204 struct pv_lazy_ops {
11205 /* Set deferred update mode, used for batching operations. */
11206 void (*enter)(void);
11207 void (*leave)(void);
11208 -};
11209 +} __no_const;
11210
11211 struct pv_time_ops {
11212 unsigned long long (*sched_clock)(void);
11213 unsigned long long (*steal_clock)(int cpu);
11214 unsigned long (*get_tsc_khz)(void);
11215 -};
11216 +} __no_const;
11217
11218 struct pv_cpu_ops {
11219 /* hooks for various privileged instructions */
11220 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
11221
11222 void (*start_context_switch)(struct task_struct *prev);
11223 void (*end_context_switch)(struct task_struct *next);
11224 -};
11225 +} __no_const;
11226
11227 struct pv_irq_ops {
11228 /*
11229 @@ -224,7 +224,7 @@ struct pv_apic_ops {
11230 unsigned long start_eip,
11231 unsigned long start_esp);
11232 #endif
11233 -};
11234 +} __no_const;
11235
11236 struct pv_mmu_ops {
11237 unsigned long (*read_cr2)(void);
11238 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
11239 struct paravirt_callee_save make_pud;
11240
11241 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11242 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11243 #endif /* PAGETABLE_LEVELS == 4 */
11244 #endif /* PAGETABLE_LEVELS >= 3 */
11245
11246 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
11247 an mfn. We can tell which is which from the index. */
11248 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11249 phys_addr_t phys, pgprot_t flags);
11250 +
11251 +#ifdef CONFIG_PAX_KERNEXEC
11252 + unsigned long (*pax_open_kernel)(void);
11253 + unsigned long (*pax_close_kernel)(void);
11254 +#endif
11255 +
11256 };
11257
11258 struct arch_spinlock;
11259 @@ -334,7 +341,7 @@ struct pv_lock_ops {
11260 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11261 int (*spin_trylock)(struct arch_spinlock *lock);
11262 void (*spin_unlock)(struct arch_spinlock *lock);
11263 -};
11264 +} __no_const;
11265
11266 /* This contains all the paravirt structures: we get a convenient
11267 * number for each function using the offset which we use to indicate
11268 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11269 index b4389a4..7024269 100644
11270 --- a/arch/x86/include/asm/pgalloc.h
11271 +++ b/arch/x86/include/asm/pgalloc.h
11272 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11273 pmd_t *pmd, pte_t *pte)
11274 {
11275 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11276 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11277 +}
11278 +
11279 +static inline void pmd_populate_user(struct mm_struct *mm,
11280 + pmd_t *pmd, pte_t *pte)
11281 +{
11282 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11283 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11284 }
11285
11286 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11287
11288 #ifdef CONFIG_X86_PAE
11289 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11290 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11291 +{
11292 + pud_populate(mm, pudp, pmd);
11293 +}
11294 #else /* !CONFIG_X86_PAE */
11295 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11296 {
11297 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11298 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11299 }
11300 +
11301 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11302 +{
11303 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11304 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11305 +}
11306 #endif /* CONFIG_X86_PAE */
11307
11308 #if PAGETABLE_LEVELS > 3
11309 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11310 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11311 }
11312
11313 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11314 +{
11315 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11316 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11317 +}
11318 +
11319 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11320 {
11321 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11322 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11323 index 98391db..8f6984e 100644
11324 --- a/arch/x86/include/asm/pgtable-2level.h
11325 +++ b/arch/x86/include/asm/pgtable-2level.h
11326 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11327
11328 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11329 {
11330 + pax_open_kernel();
11331 *pmdp = pmd;
11332 + pax_close_kernel();
11333 }
11334
11335 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11336 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11337 index cb00ccc..17e9054 100644
11338 --- a/arch/x86/include/asm/pgtable-3level.h
11339 +++ b/arch/x86/include/asm/pgtable-3level.h
11340 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11341
11342 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11343 {
11344 + pax_open_kernel();
11345 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11346 + pax_close_kernel();
11347 }
11348
11349 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11350 {
11351 + pax_open_kernel();
11352 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11353 + pax_close_kernel();
11354 }
11355
11356 /*
11357 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11358 index 49afb3f..91a8c63 100644
11359 --- a/arch/x86/include/asm/pgtable.h
11360 +++ b/arch/x86/include/asm/pgtable.h
11361 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11362
11363 #ifndef __PAGETABLE_PUD_FOLDED
11364 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11365 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11366 #define pgd_clear(pgd) native_pgd_clear(pgd)
11367 #endif
11368
11369 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11370
11371 #define arch_end_context_switch(prev) do {} while(0)
11372
11373 +#define pax_open_kernel() native_pax_open_kernel()
11374 +#define pax_close_kernel() native_pax_close_kernel()
11375 #endif /* CONFIG_PARAVIRT */
11376
11377 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11378 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11379 +
11380 +#ifdef CONFIG_PAX_KERNEXEC
11381 +static inline unsigned long native_pax_open_kernel(void)
11382 +{
11383 + unsigned long cr0;
11384 +
11385 + preempt_disable();
11386 + barrier();
11387 + cr0 = read_cr0() ^ X86_CR0_WP;
11388 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11389 + write_cr0(cr0);
11390 + return cr0 ^ X86_CR0_WP;
11391 +}
11392 +
11393 +static inline unsigned long native_pax_close_kernel(void)
11394 +{
11395 + unsigned long cr0;
11396 +
11397 + cr0 = read_cr0() ^ X86_CR0_WP;
11398 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11399 + write_cr0(cr0);
11400 + barrier();
11401 + preempt_enable_no_resched();
11402 + return cr0 ^ X86_CR0_WP;
11403 +}
11404 +#else
11405 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11406 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11407 +#endif
11408 +
11409 /*
11410 * The following only work if pte_present() is true.
11411 * Undefined behaviour if not..
11412 */
11413 +static inline int pte_user(pte_t pte)
11414 +{
11415 + return pte_val(pte) & _PAGE_USER;
11416 +}
11417 +
11418 static inline int pte_dirty(pte_t pte)
11419 {
11420 return pte_flags(pte) & _PAGE_DIRTY;
11421 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11422 return pte_clear_flags(pte, _PAGE_RW);
11423 }
11424
11425 +static inline pte_t pte_mkread(pte_t pte)
11426 +{
11427 + return __pte(pte_val(pte) | _PAGE_USER);
11428 +}
11429 +
11430 static inline pte_t pte_mkexec(pte_t pte)
11431 {
11432 - return pte_clear_flags(pte, _PAGE_NX);
11433 +#ifdef CONFIG_X86_PAE
11434 + if (__supported_pte_mask & _PAGE_NX)
11435 + return pte_clear_flags(pte, _PAGE_NX);
11436 + else
11437 +#endif
11438 + return pte_set_flags(pte, _PAGE_USER);
11439 +}
11440 +
11441 +static inline pte_t pte_exprotect(pte_t pte)
11442 +{
11443 +#ifdef CONFIG_X86_PAE
11444 + if (__supported_pte_mask & _PAGE_NX)
11445 + return pte_set_flags(pte, _PAGE_NX);
11446 + else
11447 +#endif
11448 + return pte_clear_flags(pte, _PAGE_USER);
11449 }
11450
11451 static inline pte_t pte_mkdirty(pte_t pte)
11452 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11453 #endif
11454
11455 #ifndef __ASSEMBLY__
11456 +
11457 +#ifdef CONFIG_PAX_PER_CPU_PGD
11458 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11459 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11460 +{
11461 + return cpu_pgd[cpu];
11462 +}
11463 +#endif
11464 +
11465 #include <linux/mm_types.h>
11466
11467 static inline int pte_none(pte_t pte)
11468 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11469
11470 static inline int pgd_bad(pgd_t pgd)
11471 {
11472 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11473 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11474 }
11475
11476 static inline int pgd_none(pgd_t pgd)
11477 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11478 * pgd_offset() returns a (pgd_t *)
11479 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11480 */
11481 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11482 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11483 +
11484 +#ifdef CONFIG_PAX_PER_CPU_PGD
11485 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11486 +#endif
11487 +
11488 /*
11489 * a shortcut which implies the use of the kernel's pgd, instead
11490 * of a process's
11491 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11492 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11493 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11494
11495 +#ifdef CONFIG_X86_32
11496 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11497 +#else
11498 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11499 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11500 +
11501 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11502 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11503 +#else
11504 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11505 +#endif
11506 +
11507 +#endif
11508 +
11509 #ifndef __ASSEMBLY__
11510
11511 extern int direct_gbpages;
11512 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11513 * dst and src can be on the same page, but the range must not overlap,
11514 * and must not cross a page boundary.
11515 */
11516 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11517 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11518 {
11519 - memcpy(dst, src, count * sizeof(pgd_t));
11520 + pax_open_kernel();
11521 + while (count--)
11522 + *dst++ = *src++;
11523 + pax_close_kernel();
11524 }
11525
11526 +#ifdef CONFIG_PAX_PER_CPU_PGD
11527 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11528 +#endif
11529 +
11530 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11531 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11532 +#else
11533 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11534 +#endif
11535
11536 #include <asm-generic/pgtable.h>
11537 #endif /* __ASSEMBLY__ */
11538 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11539 index 0c92113..34a77c6 100644
11540 --- a/arch/x86/include/asm/pgtable_32.h
11541 +++ b/arch/x86/include/asm/pgtable_32.h
11542 @@ -25,9 +25,6 @@
11543 struct mm_struct;
11544 struct vm_area_struct;
11545
11546 -extern pgd_t swapper_pg_dir[1024];
11547 -extern pgd_t initial_page_table[1024];
11548 -
11549 static inline void pgtable_cache_init(void) { }
11550 static inline void check_pgt_cache(void) { }
11551 void paging_init(void);
11552 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11553 # include <asm/pgtable-2level.h>
11554 #endif
11555
11556 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11557 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11558 +#ifdef CONFIG_X86_PAE
11559 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11560 +#endif
11561 +
11562 #if defined(CONFIG_HIGHPTE)
11563 #define pte_offset_map(dir, address) \
11564 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11565 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11566 /* Clear a kernel PTE and flush it from the TLB */
11567 #define kpte_clear_flush(ptep, vaddr) \
11568 do { \
11569 + pax_open_kernel(); \
11570 pte_clear(&init_mm, (vaddr), (ptep)); \
11571 + pax_close_kernel(); \
11572 __flush_tlb_one((vaddr)); \
11573 } while (0)
11574
11575 @@ -74,6 +79,9 @@ do { \
11576
11577 #endif /* !__ASSEMBLY__ */
11578
11579 +#define HAVE_ARCH_UNMAPPED_AREA
11580 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11581 +
11582 /*
11583 * kern_addr_valid() is (1) for FLATMEM and (0) for
11584 * SPARSEMEM and DISCONTIGMEM
11585 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11586 index ed5903b..c7fe163 100644
11587 --- a/arch/x86/include/asm/pgtable_32_types.h
11588 +++ b/arch/x86/include/asm/pgtable_32_types.h
11589 @@ -8,7 +8,7 @@
11590 */
11591 #ifdef CONFIG_X86_PAE
11592 # include <asm/pgtable-3level_types.h>
11593 -# define PMD_SIZE (1UL << PMD_SHIFT)
11594 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11595 # define PMD_MASK (~(PMD_SIZE - 1))
11596 #else
11597 # include <asm/pgtable-2level_types.h>
11598 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11599 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11600 #endif
11601
11602 +#ifdef CONFIG_PAX_KERNEXEC
11603 +#ifndef __ASSEMBLY__
11604 +extern unsigned char MODULES_EXEC_VADDR[];
11605 +extern unsigned char MODULES_EXEC_END[];
11606 +#endif
11607 +#include <asm/boot.h>
11608 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11609 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11610 +#else
11611 +#define ktla_ktva(addr) (addr)
11612 +#define ktva_ktla(addr) (addr)
11613 +#endif
11614 +
11615 #define MODULES_VADDR VMALLOC_START
11616 #define MODULES_END VMALLOC_END
11617 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11618 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11619 index 975f709..9f779c9 100644
11620 --- a/arch/x86/include/asm/pgtable_64.h
11621 +++ b/arch/x86/include/asm/pgtable_64.h
11622 @@ -16,10 +16,14 @@
11623
11624 extern pud_t level3_kernel_pgt[512];
11625 extern pud_t level3_ident_pgt[512];
11626 +extern pud_t level3_vmalloc_start_pgt[512];
11627 +extern pud_t level3_vmalloc_end_pgt[512];
11628 +extern pud_t level3_vmemmap_pgt[512];
11629 +extern pud_t level2_vmemmap_pgt[512];
11630 extern pmd_t level2_kernel_pgt[512];
11631 extern pmd_t level2_fixmap_pgt[512];
11632 -extern pmd_t level2_ident_pgt[512];
11633 -extern pgd_t init_level4_pgt[];
11634 +extern pmd_t level2_ident_pgt[512*2];
11635 +extern pgd_t init_level4_pgt[512];
11636
11637 #define swapper_pg_dir init_level4_pgt
11638
11639 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11640
11641 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11642 {
11643 + pax_open_kernel();
11644 *pmdp = pmd;
11645 + pax_close_kernel();
11646 }
11647
11648 static inline void native_pmd_clear(pmd_t *pmd)
11649 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11650
11651 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11652 {
11653 + pax_open_kernel();
11654 *pudp = pud;
11655 + pax_close_kernel();
11656 }
11657
11658 static inline void native_pud_clear(pud_t *pud)
11659 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11660
11661 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11662 {
11663 + pax_open_kernel();
11664 + *pgdp = pgd;
11665 + pax_close_kernel();
11666 +}
11667 +
11668 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11669 +{
11670 *pgdp = pgd;
11671 }
11672
11673 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11674 index 766ea16..5b96cb3 100644
11675 --- a/arch/x86/include/asm/pgtable_64_types.h
11676 +++ b/arch/x86/include/asm/pgtable_64_types.h
11677 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11678 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11679 #define MODULES_END _AC(0xffffffffff000000, UL)
11680 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11681 +#define MODULES_EXEC_VADDR MODULES_VADDR
11682 +#define MODULES_EXEC_END MODULES_END
11683 +
11684 +#define ktla_ktva(addr) (addr)
11685 +#define ktva_ktla(addr) (addr)
11686
11687 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11688 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11689 index 013286a..8b42f4f 100644
11690 --- a/arch/x86/include/asm/pgtable_types.h
11691 +++ b/arch/x86/include/asm/pgtable_types.h
11692 @@ -16,13 +16,12 @@
11693 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11694 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11695 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11696 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11697 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11698 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11699 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11700 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11701 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11702 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11703 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11704 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11705 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11706 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11707
11708 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11709 @@ -40,7 +39,6 @@
11710 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11711 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11712 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11713 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11714 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11715 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11716 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11717 @@ -57,8 +55,10 @@
11718
11719 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11720 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11721 -#else
11722 +#elif defined(CONFIG_KMEMCHECK)
11723 #define _PAGE_NX (_AT(pteval_t, 0))
11724 +#else
11725 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11726 #endif
11727
11728 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11729 @@ -96,6 +96,9 @@
11730 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11731 _PAGE_ACCESSED)
11732
11733 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11734 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11735 +
11736 #define __PAGE_KERNEL_EXEC \
11737 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11738 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11739 @@ -106,7 +109,7 @@
11740 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11741 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11742 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11743 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11744 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11745 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11746 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11747 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11748 @@ -168,8 +171,8 @@
11749 * bits are combined, this will alow user to access the high address mapped
11750 * VDSO in the presence of CONFIG_COMPAT_VDSO
11751 */
11752 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11753 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11754 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11755 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11756 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11757 #endif
11758
11759 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11760 {
11761 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11762 }
11763 +#endif
11764
11765 +#if PAGETABLE_LEVELS == 3
11766 +#include <asm-generic/pgtable-nopud.h>
11767 +#endif
11768 +
11769 +#if PAGETABLE_LEVELS == 2
11770 +#include <asm-generic/pgtable-nopmd.h>
11771 +#endif
11772 +
11773 +#ifndef __ASSEMBLY__
11774 #if PAGETABLE_LEVELS > 3
11775 typedef struct { pudval_t pud; } pud_t;
11776
11777 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11778 return pud.pud;
11779 }
11780 #else
11781 -#include <asm-generic/pgtable-nopud.h>
11782 -
11783 static inline pudval_t native_pud_val(pud_t pud)
11784 {
11785 return native_pgd_val(pud.pgd);
11786 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11787 return pmd.pmd;
11788 }
11789 #else
11790 -#include <asm-generic/pgtable-nopmd.h>
11791 -
11792 static inline pmdval_t native_pmd_val(pmd_t pmd)
11793 {
11794 return native_pgd_val(pmd.pud.pgd);
11795 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11796
11797 extern pteval_t __supported_pte_mask;
11798 extern void set_nx(void);
11799 -extern int nx_enabled;
11800
11801 #define pgprot_writecombine pgprot_writecombine
11802 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11803 diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
11804 index f8ab3ea..67889db 100644
11805 --- a/arch/x86/include/asm/processor-flags.h
11806 +++ b/arch/x86/include/asm/processor-flags.h
11807 @@ -63,6 +63,7 @@
11808 #define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
11809 #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
11810 #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
11811 +#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
11812
11813 /*
11814 * x86-64 Task Priority Register, CR8
11815 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11816 index 39bc577..538233f 100644
11817 --- a/arch/x86/include/asm/processor.h
11818 +++ b/arch/x86/include/asm/processor.h
11819 @@ -276,7 +276,7 @@ struct tss_struct {
11820
11821 } ____cacheline_aligned;
11822
11823 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11824 +extern struct tss_struct init_tss[NR_CPUS];
11825
11826 /*
11827 * Save the original ist values for checking stack pointers during debugging
11828 @@ -809,11 +809,18 @@ static inline void spin_lock_prefetch(const void *x)
11829 */
11830 #define TASK_SIZE PAGE_OFFSET
11831 #define TASK_SIZE_MAX TASK_SIZE
11832 +
11833 +#ifdef CONFIG_PAX_SEGMEXEC
11834 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11835 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11836 +#else
11837 #define STACK_TOP TASK_SIZE
11838 -#define STACK_TOP_MAX STACK_TOP
11839 +#endif
11840 +
11841 +#define STACK_TOP_MAX TASK_SIZE
11842
11843 #define INIT_THREAD { \
11844 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11845 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11846 .vm86_info = NULL, \
11847 .sysenter_cs = __KERNEL_CS, \
11848 .io_bitmap_ptr = NULL, \
11849 @@ -827,7 +834,7 @@ static inline void spin_lock_prefetch(const void *x)
11850 */
11851 #define INIT_TSS { \
11852 .x86_tss = { \
11853 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11854 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11855 .ss0 = __KERNEL_DS, \
11856 .ss1 = __KERNEL_CS, \
11857 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11858 @@ -838,11 +845,7 @@ static inline void spin_lock_prefetch(const void *x)
11859 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11860
11861 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11862 -#define KSTK_TOP(info) \
11863 -({ \
11864 - unsigned long *__ptr = (unsigned long *)(info); \
11865 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11866 -})
11867 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11868
11869 /*
11870 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11871 @@ -857,7 +860,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11872 #define task_pt_regs(task) \
11873 ({ \
11874 struct pt_regs *__regs__; \
11875 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11876 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11877 __regs__ - 1; \
11878 })
11879
11880 @@ -867,13 +870,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11881 /*
11882 * User space process size. 47bits minus one guard page.
11883 */
11884 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11885 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11886
11887 /* This decides where the kernel will search for a free chunk of vm
11888 * space during mmap's.
11889 */
11890 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11891 - 0xc0000000 : 0xFFFFe000)
11892 + 0xc0000000 : 0xFFFFf000)
11893
11894 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11895 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11896 @@ -884,11 +887,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11897 #define STACK_TOP_MAX TASK_SIZE_MAX
11898
11899 #define INIT_THREAD { \
11900 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11901 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11902 }
11903
11904 #define INIT_TSS { \
11905 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11906 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11907 }
11908
11909 /*
11910 @@ -916,6 +919,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11911 */
11912 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11913
11914 +#ifdef CONFIG_PAX_SEGMEXEC
11915 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11916 +#endif
11917 +
11918 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11919
11920 /* Get/set a process' ability to use the timestamp counter instruction */
11921 @@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11922 #define cpu_has_amd_erratum(x) (false)
11923 #endif /* CONFIG_CPU_SUP_AMD */
11924
11925 -extern unsigned long arch_align_stack(unsigned long sp);
11926 +#define arch_align_stack(x) ((x) & ~0xfUL)
11927 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11928
11929 void default_idle(void);
11930 bool set_pm_idle_to_default(void);
11931
11932 -void stop_this_cpu(void *dummy);
11933 +void stop_this_cpu(void *dummy) __noreturn;
11934
11935 #endif /* _ASM_X86_PROCESSOR_H */
11936 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11937 index dcfde52..dbfea06 100644
11938 --- a/arch/x86/include/asm/ptrace.h
11939 +++ b/arch/x86/include/asm/ptrace.h
11940 @@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11941 }
11942
11943 /*
11944 - * user_mode_vm(regs) determines whether a register set came from user mode.
11945 + * user_mode(regs) determines whether a register set came from user mode.
11946 * This is true if V8086 mode was enabled OR if the register set was from
11947 * protected mode with RPL-3 CS value. This tricky test checks that with
11948 * one comparison. Many places in the kernel can bypass this full check
11949 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11950 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11951 + * be used.
11952 */
11953 -static inline int user_mode(struct pt_regs *regs)
11954 +static inline int user_mode_novm(struct pt_regs *regs)
11955 {
11956 #ifdef CONFIG_X86_32
11957 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11958 #else
11959 - return !!(regs->cs & 3);
11960 + return !!(regs->cs & SEGMENT_RPL_MASK);
11961 #endif
11962 }
11963
11964 -static inline int user_mode_vm(struct pt_regs *regs)
11965 +static inline int user_mode(struct pt_regs *regs)
11966 {
11967 #ifdef CONFIG_X86_32
11968 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11969 USER_RPL;
11970 #else
11971 - return user_mode(regs);
11972 + return user_mode_novm(regs);
11973 #endif
11974 }
11975
11976 @@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11977 #ifdef CONFIG_X86_64
11978 static inline bool user_64bit_mode(struct pt_regs *regs)
11979 {
11980 + unsigned long cs = regs->cs & 0xffff;
11981 #ifndef CONFIG_PARAVIRT
11982 /*
11983 * On non-paravirt systems, this is the only long mode CPL 3
11984 * selector. We do not allow long mode selectors in the LDT.
11985 */
11986 - return regs->cs == __USER_CS;
11987 + return cs == __USER_CS;
11988 #else
11989 /* Headers are too twisted for this to go in paravirt.h. */
11990 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11991 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11992 #endif
11993 }
11994 #endif
11995 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
11996 index fce3f4a..3f69f2a 100644
11997 --- a/arch/x86/include/asm/realmode.h
11998 +++ b/arch/x86/include/asm/realmode.h
11999 @@ -30,7 +30,7 @@ struct real_mode_header {
12000 struct trampoline_header {
12001 #ifdef CONFIG_X86_32
12002 u32 start;
12003 - u16 gdt_pad;
12004 + u16 boot_cs;
12005 u16 gdt_limit;
12006 u32 gdt_base;
12007 #else
12008 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12009 index 92f29706..d0a1a53 100644
12010 --- a/arch/x86/include/asm/reboot.h
12011 +++ b/arch/x86/include/asm/reboot.h
12012 @@ -6,19 +6,19 @@
12013 struct pt_regs;
12014
12015 struct machine_ops {
12016 - void (*restart)(char *cmd);
12017 - void (*halt)(void);
12018 - void (*power_off)(void);
12019 + void (* __noreturn restart)(char *cmd);
12020 + void (* __noreturn halt)(void);
12021 + void (* __noreturn power_off)(void);
12022 void (*shutdown)(void);
12023 void (*crash_shutdown)(struct pt_regs *);
12024 - void (*emergency_restart)(void);
12025 -};
12026 + void (* __noreturn emergency_restart)(void);
12027 +} __no_const;
12028
12029 extern struct machine_ops machine_ops;
12030
12031 void native_machine_crash_shutdown(struct pt_regs *regs);
12032 void native_machine_shutdown(void);
12033 -void machine_real_restart(unsigned int type);
12034 +void __noreturn machine_real_restart(unsigned int type);
12035 /* These must match dispatch_table in reboot_32.S */
12036 #define MRR_BIOS 0
12037 #define MRR_APM 1
12038 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12039 index 2dbe4a7..ce1db00 100644
12040 --- a/arch/x86/include/asm/rwsem.h
12041 +++ b/arch/x86/include/asm/rwsem.h
12042 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12043 {
12044 asm volatile("# beginning down_read\n\t"
12045 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12046 +
12047 +#ifdef CONFIG_PAX_REFCOUNT
12048 + "jno 0f\n"
12049 + LOCK_PREFIX _ASM_DEC "(%1)\n"
12050 + "int $4\n0:\n"
12051 + _ASM_EXTABLE(0b, 0b)
12052 +#endif
12053 +
12054 /* adds 0x00000001 */
12055 " jns 1f\n"
12056 " call call_rwsem_down_read_failed\n"
12057 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12058 "1:\n\t"
12059 " mov %1,%2\n\t"
12060 " add %3,%2\n\t"
12061 +
12062 +#ifdef CONFIG_PAX_REFCOUNT
12063 + "jno 0f\n"
12064 + "sub %3,%2\n"
12065 + "int $4\n0:\n"
12066 + _ASM_EXTABLE(0b, 0b)
12067 +#endif
12068 +
12069 " jle 2f\n\t"
12070 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12071 " jnz 1b\n\t"
12072 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12073 long tmp;
12074 asm volatile("# beginning down_write\n\t"
12075 LOCK_PREFIX " xadd %1,(%2)\n\t"
12076 +
12077 +#ifdef CONFIG_PAX_REFCOUNT
12078 + "jno 0f\n"
12079 + "mov %1,(%2)\n"
12080 + "int $4\n0:\n"
12081 + _ASM_EXTABLE(0b, 0b)
12082 +#endif
12083 +
12084 /* adds 0xffff0001, returns the old value */
12085 " test %1,%1\n\t"
12086 /* was the count 0 before? */
12087 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12088 long tmp;
12089 asm volatile("# beginning __up_read\n\t"
12090 LOCK_PREFIX " xadd %1,(%2)\n\t"
12091 +
12092 +#ifdef CONFIG_PAX_REFCOUNT
12093 + "jno 0f\n"
12094 + "mov %1,(%2)\n"
12095 + "int $4\n0:\n"
12096 + _ASM_EXTABLE(0b, 0b)
12097 +#endif
12098 +
12099 /* subtracts 1, returns the old value */
12100 " jns 1f\n\t"
12101 " call call_rwsem_wake\n" /* expects old value in %edx */
12102 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12103 long tmp;
12104 asm volatile("# beginning __up_write\n\t"
12105 LOCK_PREFIX " xadd %1,(%2)\n\t"
12106 +
12107 +#ifdef CONFIG_PAX_REFCOUNT
12108 + "jno 0f\n"
12109 + "mov %1,(%2)\n"
12110 + "int $4\n0:\n"
12111 + _ASM_EXTABLE(0b, 0b)
12112 +#endif
12113 +
12114 /* subtracts 0xffff0001, returns the old value */
12115 " jns 1f\n\t"
12116 " call call_rwsem_wake\n" /* expects old value in %edx */
12117 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12118 {
12119 asm volatile("# beginning __downgrade_write\n\t"
12120 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12121 +
12122 +#ifdef CONFIG_PAX_REFCOUNT
12123 + "jno 0f\n"
12124 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12125 + "int $4\n0:\n"
12126 + _ASM_EXTABLE(0b, 0b)
12127 +#endif
12128 +
12129 /*
12130 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12131 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12132 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12133 */
12134 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12135 {
12136 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12137 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12138 +
12139 +#ifdef CONFIG_PAX_REFCOUNT
12140 + "jno 0f\n"
12141 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12142 + "int $4\n0:\n"
12143 + _ASM_EXTABLE(0b, 0b)
12144 +#endif
12145 +
12146 : "+m" (sem->count)
12147 : "er" (delta));
12148 }
12149 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12150 */
12151 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12152 {
12153 - return delta + xadd(&sem->count, delta);
12154 + return delta + xadd_check_overflow(&sem->count, delta);
12155 }
12156
12157 #endif /* __KERNEL__ */
12158 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12159 index c48a950..c6d7468 100644
12160 --- a/arch/x86/include/asm/segment.h
12161 +++ b/arch/x86/include/asm/segment.h
12162 @@ -64,10 +64,15 @@
12163 * 26 - ESPFIX small SS
12164 * 27 - per-cpu [ offset to per-cpu data area ]
12165 * 28 - stack_canary-20 [ for stack protector ]
12166 - * 29 - unused
12167 - * 30 - unused
12168 + * 29 - PCI BIOS CS
12169 + * 30 - PCI BIOS DS
12170 * 31 - TSS for double fault handler
12171 */
12172 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12173 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12174 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12175 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12176 +
12177 #define GDT_ENTRY_TLS_MIN 6
12178 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12179
12180 @@ -79,6 +84,8 @@
12181
12182 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12183
12184 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12185 +
12186 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12187
12188 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12189 @@ -104,6 +111,12 @@
12190 #define __KERNEL_STACK_CANARY 0
12191 #endif
12192
12193 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12194 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12195 +
12196 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12197 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12198 +
12199 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12200
12201 /*
12202 @@ -141,7 +154,7 @@
12203 */
12204
12205 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12206 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12207 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12208
12209
12210 #else
12211 @@ -165,6 +178,8 @@
12212 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12213 #define __USER32_DS __USER_DS
12214
12215 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12216 +
12217 #define GDT_ENTRY_TSS 8 /* needs two entries */
12218 #define GDT_ENTRY_LDT 10 /* needs two entries */
12219 #define GDT_ENTRY_TLS_MIN 12
12220 @@ -185,6 +200,7 @@
12221 #endif
12222
12223 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12224 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12225 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12226 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12227 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12228 @@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
12229 {
12230 unsigned long __limit;
12231 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12232 - return __limit + 1;
12233 + return __limit;
12234 }
12235
12236 #endif /* !__ASSEMBLY__ */
12237 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12238 index f483945..64a7851 100644
12239 --- a/arch/x86/include/asm/smp.h
12240 +++ b/arch/x86/include/asm/smp.h
12241 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12242 /* cpus sharing the last level cache: */
12243 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12244 DECLARE_PER_CPU(u16, cpu_llc_id);
12245 -DECLARE_PER_CPU(int, cpu_number);
12246 +DECLARE_PER_CPU(unsigned int, cpu_number);
12247
12248 static inline struct cpumask *cpu_sibling_mask(int cpu)
12249 {
12250 @@ -79,7 +79,7 @@ struct smp_ops {
12251
12252 void (*send_call_func_ipi)(const struct cpumask *mask);
12253 void (*send_call_func_single_ipi)(int cpu);
12254 -};
12255 +} __no_const;
12256
12257 /* Globals due to paravirt */
12258 extern void set_cpu_sibling_map(int cpu);
12259 @@ -195,14 +195,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12260 extern int safe_smp_processor_id(void);
12261
12262 #elif defined(CONFIG_X86_64_SMP)
12263 -#define raw_smp_processor_id() (this_cpu_read(cpu_number))
12264 -
12265 -#define stack_smp_processor_id() \
12266 -({ \
12267 - struct thread_info *ti; \
12268 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12269 - ti->cpu; \
12270 -})
12271 +#define raw_smp_processor_id() (this_cpu_read(cpu_number))
12272 +#define stack_smp_processor_id() raw_smp_processor_id()
12273 #define safe_smp_processor_id() smp_processor_id()
12274
12275 #endif
12276 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12277 index b315a33..8849ab0 100644
12278 --- a/arch/x86/include/asm/spinlock.h
12279 +++ b/arch/x86/include/asm/spinlock.h
12280 @@ -173,6 +173,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12281 static inline void arch_read_lock(arch_rwlock_t *rw)
12282 {
12283 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12284 +
12285 +#ifdef CONFIG_PAX_REFCOUNT
12286 + "jno 0f\n"
12287 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12288 + "int $4\n0:\n"
12289 + _ASM_EXTABLE(0b, 0b)
12290 +#endif
12291 +
12292 "jns 1f\n"
12293 "call __read_lock_failed\n\t"
12294 "1:\n"
12295 @@ -182,6 +190,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12296 static inline void arch_write_lock(arch_rwlock_t *rw)
12297 {
12298 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12299 +
12300 +#ifdef CONFIG_PAX_REFCOUNT
12301 + "jno 0f\n"
12302 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12303 + "int $4\n0:\n"
12304 + _ASM_EXTABLE(0b, 0b)
12305 +#endif
12306 +
12307 "jz 1f\n"
12308 "call __write_lock_failed\n\t"
12309 "1:\n"
12310 @@ -211,13 +227,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12311
12312 static inline void arch_read_unlock(arch_rwlock_t *rw)
12313 {
12314 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12315 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12316 +
12317 +#ifdef CONFIG_PAX_REFCOUNT
12318 + "jno 0f\n"
12319 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12320 + "int $4\n0:\n"
12321 + _ASM_EXTABLE(0b, 0b)
12322 +#endif
12323 +
12324 :"+m" (rw->lock) : : "memory");
12325 }
12326
12327 static inline void arch_write_unlock(arch_rwlock_t *rw)
12328 {
12329 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12330 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12331 +
12332 +#ifdef CONFIG_PAX_REFCOUNT
12333 + "jno 0f\n"
12334 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12335 + "int $4\n0:\n"
12336 + _ASM_EXTABLE(0b, 0b)
12337 +#endif
12338 +
12339 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12340 }
12341
12342 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12343 index 6a99859..03cb807 100644
12344 --- a/arch/x86/include/asm/stackprotector.h
12345 +++ b/arch/x86/include/asm/stackprotector.h
12346 @@ -47,7 +47,7 @@
12347 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12348 */
12349 #define GDT_STACK_CANARY_INIT \
12350 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12351 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12352
12353 /*
12354 * Initialize the stackprotector canary value.
12355 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12356
12357 static inline void load_stack_canary_segment(void)
12358 {
12359 -#ifdef CONFIG_X86_32
12360 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12361 asm volatile ("mov %0, %%gs" : : "r" (0));
12362 #endif
12363 }
12364 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12365 index 70bbe39..4ae2bd4 100644
12366 --- a/arch/x86/include/asm/stacktrace.h
12367 +++ b/arch/x86/include/asm/stacktrace.h
12368 @@ -11,28 +11,20 @@
12369
12370 extern int kstack_depth_to_print;
12371
12372 -struct thread_info;
12373 +struct task_struct;
12374 struct stacktrace_ops;
12375
12376 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12377 - unsigned long *stack,
12378 - unsigned long bp,
12379 - const struct stacktrace_ops *ops,
12380 - void *data,
12381 - unsigned long *end,
12382 - int *graph);
12383 +typedef unsigned long walk_stack_t(struct task_struct *task,
12384 + void *stack_start,
12385 + unsigned long *stack,
12386 + unsigned long bp,
12387 + const struct stacktrace_ops *ops,
12388 + void *data,
12389 + unsigned long *end,
12390 + int *graph);
12391
12392 -extern unsigned long
12393 -print_context_stack(struct thread_info *tinfo,
12394 - unsigned long *stack, unsigned long bp,
12395 - const struct stacktrace_ops *ops, void *data,
12396 - unsigned long *end, int *graph);
12397 -
12398 -extern unsigned long
12399 -print_context_stack_bp(struct thread_info *tinfo,
12400 - unsigned long *stack, unsigned long bp,
12401 - const struct stacktrace_ops *ops, void *data,
12402 - unsigned long *end, int *graph);
12403 +extern walk_stack_t print_context_stack;
12404 +extern walk_stack_t print_context_stack_bp;
12405
12406 /* Generic stack tracer with callbacks */
12407
12408 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12409 void (*address)(void *data, unsigned long address, int reliable);
12410 /* On negative return stop dumping */
12411 int (*stack)(void *data, char *name);
12412 - walk_stack_t walk_stack;
12413 + walk_stack_t *walk_stack;
12414 };
12415
12416 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12417 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12418 index 4ec45b3..a4f0a8a 100644
12419 --- a/arch/x86/include/asm/switch_to.h
12420 +++ b/arch/x86/include/asm/switch_to.h
12421 @@ -108,7 +108,7 @@ do { \
12422 "call __switch_to\n\t" \
12423 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12424 __switch_canary \
12425 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12426 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12427 "movq %%rax,%%rdi\n\t" \
12428 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12429 "jnz ret_from_fork\n\t" \
12430 @@ -119,7 +119,7 @@ do { \
12431 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12432 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12433 [_tif_fork] "i" (_TIF_FORK), \
12434 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12435 + [thread_info] "m" (current_tinfo), \
12436 [current_task] "m" (current_task) \
12437 __switch_canary_iparam \
12438 : "memory", "cc" __EXTRA_CLOBBER)
12439 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12440 index 3fda9db4..4ca1c61 100644
12441 --- a/arch/x86/include/asm/sys_ia32.h
12442 +++ b/arch/x86/include/asm/sys_ia32.h
12443 @@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12444 struct old_sigaction32 __user *);
12445 asmlinkage long sys32_alarm(unsigned int);
12446
12447 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12448 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12449 asmlinkage long sys32_sysfs(int, u32, u32);
12450
12451 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12452 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12453 index 89f794f..1422765 100644
12454 --- a/arch/x86/include/asm/thread_info.h
12455 +++ b/arch/x86/include/asm/thread_info.h
12456 @@ -10,6 +10,7 @@
12457 #include <linux/compiler.h>
12458 #include <asm/page.h>
12459 #include <asm/types.h>
12460 +#include <asm/percpu.h>
12461
12462 /*
12463 * low level task data that entry.S needs immediate access to
12464 @@ -24,7 +25,6 @@ struct exec_domain;
12465 #include <linux/atomic.h>
12466
12467 struct thread_info {
12468 - struct task_struct *task; /* main task structure */
12469 struct exec_domain *exec_domain; /* execution domain */
12470 __u32 flags; /* low level flags */
12471 __u32 status; /* thread synchronous flags */
12472 @@ -34,19 +34,13 @@ struct thread_info {
12473 mm_segment_t addr_limit;
12474 struct restart_block restart_block;
12475 void __user *sysenter_return;
12476 -#ifdef CONFIG_X86_32
12477 - unsigned long previous_esp; /* ESP of the previous stack in
12478 - case of nested (IRQ) stacks
12479 - */
12480 - __u8 supervisor_stack[0];
12481 -#endif
12482 + unsigned long lowest_stack;
12483 unsigned int sig_on_uaccess_error:1;
12484 unsigned int uaccess_err:1; /* uaccess failed */
12485 };
12486
12487 -#define INIT_THREAD_INFO(tsk) \
12488 +#define INIT_THREAD_INFO \
12489 { \
12490 - .task = &tsk, \
12491 .exec_domain = &default_exec_domain, \
12492 .flags = 0, \
12493 .cpu = 0, \
12494 @@ -57,7 +51,7 @@ struct thread_info {
12495 }, \
12496 }
12497
12498 -#define init_thread_info (init_thread_union.thread_info)
12499 +#define init_thread_info (init_thread_union.stack)
12500 #define init_stack (init_thread_union.stack)
12501
12502 #else /* !__ASSEMBLY__ */
12503 @@ -98,6 +92,7 @@ struct thread_info {
12504 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12505 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12506 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12507 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12508
12509 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12510 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12511 @@ -122,16 +117,18 @@ struct thread_info {
12512 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12513 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12514 #define _TIF_X32 (1 << TIF_X32)
12515 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12516
12517 /* work to do in syscall_trace_enter() */
12518 #define _TIF_WORK_SYSCALL_ENTRY \
12519 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12520 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12521 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12522 + _TIF_GRSEC_SETXID)
12523
12524 /* work to do in syscall_trace_leave() */
12525 #define _TIF_WORK_SYSCALL_EXIT \
12526 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12527 - _TIF_SYSCALL_TRACEPOINT)
12528 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12529
12530 /* work to do on interrupt/exception return */
12531 #define _TIF_WORK_MASK \
12532 @@ -141,7 +138,8 @@ struct thread_info {
12533
12534 /* work to do on any return to user space */
12535 #define _TIF_ALLWORK_MASK \
12536 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12537 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12538 + _TIF_GRSEC_SETXID)
12539
12540 /* Only used for 64 bit */
12541 #define _TIF_DO_NOTIFY_MASK \
12542 @@ -157,45 +155,40 @@ struct thread_info {
12543
12544 #define PREEMPT_ACTIVE 0x10000000
12545
12546 -#ifdef CONFIG_X86_32
12547 -
12548 -#define STACK_WARN (THREAD_SIZE/8)
12549 -/*
12550 - * macros/functions for gaining access to the thread information structure
12551 - *
12552 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12553 - */
12554 -#ifndef __ASSEMBLY__
12555 -
12556 -
12557 -/* how to get the current stack pointer from C */
12558 -register unsigned long current_stack_pointer asm("esp") __used;
12559 -
12560 -/* how to get the thread information struct from C */
12561 -static inline struct thread_info *current_thread_info(void)
12562 -{
12563 - return (struct thread_info *)
12564 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12565 -}
12566 -
12567 -#else /* !__ASSEMBLY__ */
12568 -
12569 +#ifdef __ASSEMBLY__
12570 /* how to get the thread information struct from ASM */
12571 #define GET_THREAD_INFO(reg) \
12572 - movl $-THREAD_SIZE, reg; \
12573 - andl %esp, reg
12574 + mov PER_CPU_VAR(current_tinfo), reg
12575
12576 /* use this one if reg already contains %esp */
12577 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12578 - andl $-THREAD_SIZE, reg
12579 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12580 +#else
12581 +/* how to get the thread information struct from C */
12582 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12583 +
12584 +static __always_inline struct thread_info *current_thread_info(void)
12585 +{
12586 + return this_cpu_read_stable(current_tinfo);
12587 +}
12588 +#endif
12589 +
12590 +#ifdef CONFIG_X86_32
12591 +
12592 +#define STACK_WARN (THREAD_SIZE/8)
12593 +/*
12594 + * macros/functions for gaining access to the thread information structure
12595 + *
12596 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12597 + */
12598 +#ifndef __ASSEMBLY__
12599 +
12600 +/* how to get the current stack pointer from C */
12601 +register unsigned long current_stack_pointer asm("esp") __used;
12602
12603 #endif
12604
12605 #else /* X86_32 */
12606
12607 -#include <asm/percpu.h>
12608 -#define KERNEL_STACK_OFFSET (5*8)
12609 -
12610 /*
12611 * macros/functions for gaining access to the thread information structure
12612 * preempt_count needs to be 1 initially, until the scheduler is functional.
12613 @@ -203,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
12614 #ifndef __ASSEMBLY__
12615 DECLARE_PER_CPU(unsigned long, kernel_stack);
12616
12617 -static inline struct thread_info *current_thread_info(void)
12618 -{
12619 - struct thread_info *ti;
12620 - ti = (void *)(this_cpu_read_stable(kernel_stack) +
12621 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12622 - return ti;
12623 -}
12624 -
12625 -#else /* !__ASSEMBLY__ */
12626 -
12627 -/* how to get the thread information struct from ASM */
12628 -#define GET_THREAD_INFO(reg) \
12629 - movq PER_CPU_VAR(kernel_stack),reg ; \
12630 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12631 -
12632 -/*
12633 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12634 - * a certain register (to be used in assembler memory operands).
12635 - */
12636 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12637 -
12638 +/* how to get the current stack pointer from C */
12639 +register unsigned long current_stack_pointer asm("rsp") __used;
12640 #endif
12641
12642 #endif /* !X86_32 */
12643 @@ -284,5 +258,12 @@ static inline bool is_ia32_task(void)
12644 extern void arch_task_cache_init(void);
12645 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12646 extern void arch_release_task_struct(struct task_struct *tsk);
12647 +
12648 +#define __HAVE_THREAD_FUNCTIONS
12649 +#define task_thread_info(task) (&(task)->tinfo)
12650 +#define task_stack_page(task) ((task)->stack)
12651 +#define setup_thread_stack(p, org) do {} while (0)
12652 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12653 +
12654 #endif
12655 #endif /* _ASM_X86_THREAD_INFO_H */
12656 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12657 index e1f3a17..1ab364d 100644
12658 --- a/arch/x86/include/asm/uaccess.h
12659 +++ b/arch/x86/include/asm/uaccess.h
12660 @@ -7,12 +7,15 @@
12661 #include <linux/compiler.h>
12662 #include <linux/thread_info.h>
12663 #include <linux/string.h>
12664 +#include <linux/sched.h>
12665 #include <asm/asm.h>
12666 #include <asm/page.h>
12667
12668 #define VERIFY_READ 0
12669 #define VERIFY_WRITE 1
12670
12671 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12672 +
12673 /*
12674 * The fs value determines whether argument validity checking should be
12675 * performed or not. If get_fs() == USER_DS, checking is performed, with
12676 @@ -28,7 +31,12 @@
12677
12678 #define get_ds() (KERNEL_DS)
12679 #define get_fs() (current_thread_info()->addr_limit)
12680 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12681 +void __set_fs(mm_segment_t x);
12682 +void set_fs(mm_segment_t x);
12683 +#else
12684 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12685 +#endif
12686
12687 #define segment_eq(a, b) ((a).seg == (b).seg)
12688
12689 @@ -76,8 +84,33 @@
12690 * checks that the pointer is in the user space range - after calling
12691 * this function, memory access functions may still return -EFAULT.
12692 */
12693 -#define access_ok(type, addr, size) \
12694 - (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
12695 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
12696 +#define access_ok(type, addr, size) \
12697 +({ \
12698 + long __size = size; \
12699 + unsigned long __addr = (unsigned long)addr; \
12700 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12701 + unsigned long __end_ao = __addr + __size - 1; \
12702 + bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
12703 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12704 + while(__addr_ao <= __end_ao) { \
12705 + char __c_ao; \
12706 + __addr_ao += PAGE_SIZE; \
12707 + if (__size > PAGE_SIZE) \
12708 + cond_resched(); \
12709 + if (__get_user(__c_ao, (char __user *)__addr)) \
12710 + break; \
12711 + if (type != VERIFY_WRITE) { \
12712 + __addr = __addr_ao; \
12713 + continue; \
12714 + } \
12715 + if (__put_user(__c_ao, (char __user *)__addr)) \
12716 + break; \
12717 + __addr = __addr_ao; \
12718 + } \
12719 + } \
12720 + __ret_ao; \
12721 +})
12722
12723 /*
12724 * The exception table consists of pairs of addresses relative to the
12725 @@ -188,12 +221,20 @@ extern int __get_user_bad(void);
12726 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12727 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12728
12729 -
12730 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12731 +#define __copyuser_seg "gs;"
12732 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12733 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12734 +#else
12735 +#define __copyuser_seg
12736 +#define __COPYUSER_SET_ES
12737 +#define __COPYUSER_RESTORE_ES
12738 +#endif
12739
12740 #ifdef CONFIG_X86_32
12741 #define __put_user_asm_u64(x, addr, err, errret) \
12742 - asm volatile("1: movl %%eax,0(%2)\n" \
12743 - "2: movl %%edx,4(%2)\n" \
12744 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12745 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12746 "3:\n" \
12747 ".section .fixup,\"ax\"\n" \
12748 "4: movl %3,%0\n" \
12749 @@ -205,8 +246,8 @@ extern int __get_user_bad(void);
12750 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12751
12752 #define __put_user_asm_ex_u64(x, addr) \
12753 - asm volatile("1: movl %%eax,0(%1)\n" \
12754 - "2: movl %%edx,4(%1)\n" \
12755 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12756 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12757 "3:\n" \
12758 _ASM_EXTABLE_EX(1b, 2b) \
12759 _ASM_EXTABLE_EX(2b, 3b) \
12760 @@ -258,7 +299,7 @@ extern void __put_user_8(void);
12761 __typeof__(*(ptr)) __pu_val; \
12762 __chk_user_ptr(ptr); \
12763 might_fault(); \
12764 - __pu_val = x; \
12765 + __pu_val = (x); \
12766 switch (sizeof(*(ptr))) { \
12767 case 1: \
12768 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12769 @@ -379,7 +420,7 @@ do { \
12770 } while (0)
12771
12772 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12773 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12774 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12775 "2:\n" \
12776 ".section .fixup,\"ax\"\n" \
12777 "3: mov %3,%0\n" \
12778 @@ -387,7 +428,7 @@ do { \
12779 " jmp 2b\n" \
12780 ".previous\n" \
12781 _ASM_EXTABLE(1b, 3b) \
12782 - : "=r" (err), ltype(x) \
12783 + : "=r" (err), ltype (x) \
12784 : "m" (__m(addr)), "i" (errret), "0" (err))
12785
12786 #define __get_user_size_ex(x, ptr, size) \
12787 @@ -412,7 +453,7 @@ do { \
12788 } while (0)
12789
12790 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12791 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12792 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12793 "2:\n" \
12794 _ASM_EXTABLE_EX(1b, 2b) \
12795 : ltype(x) : "m" (__m(addr)))
12796 @@ -429,13 +470,24 @@ do { \
12797 int __gu_err; \
12798 unsigned long __gu_val; \
12799 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12800 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12801 + (x) = (__typeof__(*(ptr)))__gu_val; \
12802 __gu_err; \
12803 })
12804
12805 /* FIXME: this hack is definitely wrong -AK */
12806 struct __large_struct { unsigned long buf[100]; };
12807 -#define __m(x) (*(struct __large_struct __user *)(x))
12808 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12809 +#define ____m(x) \
12810 +({ \
12811 + unsigned long ____x = (unsigned long)(x); \
12812 + if (____x < PAX_USER_SHADOW_BASE) \
12813 + ____x += PAX_USER_SHADOW_BASE; \
12814 + (void __user *)____x; \
12815 +})
12816 +#else
12817 +#define ____m(x) (x)
12818 +#endif
12819 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12820
12821 /*
12822 * Tell gcc we read from memory instead of writing: this is because
12823 @@ -443,7 +495,7 @@ struct __large_struct { unsigned long buf[100]; };
12824 * aliasing issues.
12825 */
12826 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12827 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12828 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12829 "2:\n" \
12830 ".section .fixup,\"ax\"\n" \
12831 "3: mov %3,%0\n" \
12832 @@ -451,10 +503,10 @@ struct __large_struct { unsigned long buf[100]; };
12833 ".previous\n" \
12834 _ASM_EXTABLE(1b, 3b) \
12835 : "=r"(err) \
12836 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12837 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12838
12839 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12840 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12841 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12842 "2:\n" \
12843 _ASM_EXTABLE_EX(1b, 2b) \
12844 : : ltype(x), "m" (__m(addr)))
12845 @@ -493,8 +545,12 @@ struct __large_struct { unsigned long buf[100]; };
12846 * On error, the variable @x is set to zero.
12847 */
12848
12849 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12850 +#define __get_user(x, ptr) get_user((x), (ptr))
12851 +#else
12852 #define __get_user(x, ptr) \
12853 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12854 +#endif
12855
12856 /**
12857 * __put_user: - Write a simple value into user space, with less checking.
12858 @@ -516,8 +572,12 @@ struct __large_struct { unsigned long buf[100]; };
12859 * Returns zero on success, or -EFAULT on error.
12860 */
12861
12862 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12863 +#define __put_user(x, ptr) put_user((x), (ptr))
12864 +#else
12865 #define __put_user(x, ptr) \
12866 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12867 +#endif
12868
12869 #define __get_user_unaligned __get_user
12870 #define __put_user_unaligned __put_user
12871 @@ -535,7 +595,7 @@ struct __large_struct { unsigned long buf[100]; };
12872 #define get_user_ex(x, ptr) do { \
12873 unsigned long __gue_val; \
12874 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12875 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12876 + (x) = (__typeof__(*(ptr)))__gue_val; \
12877 } while (0)
12878
12879 #ifdef CONFIG_X86_WP_WORKS_OK
12880 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12881 index 576e39b..ccd0a39 100644
12882 --- a/arch/x86/include/asm/uaccess_32.h
12883 +++ b/arch/x86/include/asm/uaccess_32.h
12884 @@ -11,15 +11,15 @@
12885 #include <asm/page.h>
12886
12887 unsigned long __must_check __copy_to_user_ll
12888 - (void __user *to, const void *from, unsigned long n);
12889 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12890 unsigned long __must_check __copy_from_user_ll
12891 - (void *to, const void __user *from, unsigned long n);
12892 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12893 unsigned long __must_check __copy_from_user_ll_nozero
12894 - (void *to, const void __user *from, unsigned long n);
12895 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12896 unsigned long __must_check __copy_from_user_ll_nocache
12897 - (void *to, const void __user *from, unsigned long n);
12898 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12899 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12900 - (void *to, const void __user *from, unsigned long n);
12901 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12902
12903 /**
12904 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12905 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12906 static __always_inline unsigned long __must_check
12907 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12908 {
12909 + if ((long)n < 0)
12910 + return n;
12911 +
12912 if (__builtin_constant_p(n)) {
12913 unsigned long ret;
12914
12915 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12916 return ret;
12917 }
12918 }
12919 + if (!__builtin_constant_p(n))
12920 + check_object_size(from, n, true);
12921 return __copy_to_user_ll(to, from, n);
12922 }
12923
12924 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12925 __copy_to_user(void __user *to, const void *from, unsigned long n)
12926 {
12927 might_fault();
12928 +
12929 return __copy_to_user_inatomic(to, from, n);
12930 }
12931
12932 static __always_inline unsigned long
12933 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12934 {
12935 + if ((long)n < 0)
12936 + return n;
12937 +
12938 /* Avoid zeroing the tail if the copy fails..
12939 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12940 * but as the zeroing behaviour is only significant when n is not
12941 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12942 __copy_from_user(void *to, const void __user *from, unsigned long n)
12943 {
12944 might_fault();
12945 +
12946 + if ((long)n < 0)
12947 + return n;
12948 +
12949 if (__builtin_constant_p(n)) {
12950 unsigned long ret;
12951
12952 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12953 return ret;
12954 }
12955 }
12956 + if (!__builtin_constant_p(n))
12957 + check_object_size(to, n, false);
12958 return __copy_from_user_ll(to, from, n);
12959 }
12960
12961 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12962 const void __user *from, unsigned long n)
12963 {
12964 might_fault();
12965 +
12966 + if ((long)n < 0)
12967 + return n;
12968 +
12969 if (__builtin_constant_p(n)) {
12970 unsigned long ret;
12971
12972 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12973 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12974 unsigned long n)
12975 {
12976 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12977 + if ((long)n < 0)
12978 + return n;
12979 +
12980 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12981 }
12982
12983 -unsigned long __must_check copy_to_user(void __user *to,
12984 - const void *from, unsigned long n);
12985 -unsigned long __must_check _copy_from_user(void *to,
12986 - const void __user *from,
12987 - unsigned long n);
12988 -
12989 +extern void copy_to_user_overflow(void)
12990 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12991 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12992 +#else
12993 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12994 +#endif
12995 +;
12996
12997 extern void copy_from_user_overflow(void)
12998 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12999 @@ -199,21 +222,65 @@ extern void copy_from_user_overflow(void)
13000 #endif
13001 ;
13002
13003 -static inline unsigned long __must_check copy_from_user(void *to,
13004 - const void __user *from,
13005 - unsigned long n)
13006 +/**
13007 + * copy_to_user: - Copy a block of data into user space.
13008 + * @to: Destination address, in user space.
13009 + * @from: Source address, in kernel space.
13010 + * @n: Number of bytes to copy.
13011 + *
13012 + * Context: User context only. This function may sleep.
13013 + *
13014 + * Copy data from kernel space to user space.
13015 + *
13016 + * Returns number of bytes that could not be copied.
13017 + * On success, this will be zero.
13018 + */
13019 +static inline unsigned long __must_check
13020 +copy_to_user(void __user *to, const void *from, unsigned long n)
13021 {
13022 - int sz = __compiletime_object_size(to);
13023 + size_t sz = __compiletime_object_size(from);
13024
13025 - if (likely(sz == -1 || sz >= n))
13026 - n = _copy_from_user(to, from, n);
13027 - else
13028 + if (unlikely(sz != (size_t)-1 && sz < n))
13029 + copy_to_user_overflow();
13030 + else if (access_ok(VERIFY_WRITE, to, n))
13031 + n = __copy_to_user(to, from, n);
13032 + return n;
13033 +}
13034 +
13035 +/**
13036 + * copy_from_user: - Copy a block of data from user space.
13037 + * @to: Destination address, in kernel space.
13038 + * @from: Source address, in user space.
13039 + * @n: Number of bytes to copy.
13040 + *
13041 + * Context: User context only. This function may sleep.
13042 + *
13043 + * Copy data from user space to kernel space.
13044 + *
13045 + * Returns number of bytes that could not be copied.
13046 + * On success, this will be zero.
13047 + *
13048 + * If some data could not be copied, this function will pad the copied
13049 + * data to the requested size using zero bytes.
13050 + */
13051 +static inline unsigned long __must_check
13052 +copy_from_user(void *to, const void __user *from, unsigned long n)
13053 +{
13054 + size_t sz = __compiletime_object_size(to);
13055 +
13056 + if (unlikely(sz != (size_t)-1 && sz < n))
13057 copy_from_user_overflow();
13058 -
13059 + else if (access_ok(VERIFY_READ, from, n))
13060 + n = __copy_from_user(to, from, n);
13061 + else if ((long)n > 0) {
13062 + if (!__builtin_constant_p(n))
13063 + check_object_size(to, n, false);
13064 + memset(to, 0, n);
13065 + }
13066 return n;
13067 }
13068
13069 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13070 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13071 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13072 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13073
13074 #endif /* _ASM_X86_UACCESS_32_H */
13075 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13076 index 8e796fb..468c55a 100644
13077 --- a/arch/x86/include/asm/uaccess_64.h
13078 +++ b/arch/x86/include/asm/uaccess_64.h
13079 @@ -10,6 +10,9 @@
13080 #include <asm/alternative.h>
13081 #include <asm/cpufeature.h>
13082 #include <asm/page.h>
13083 +#include <asm/pgtable.h>
13084 +
13085 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13086
13087 /*
13088 * Copy To/From Userspace
13089 @@ -17,12 +20,12 @@
13090
13091 /* Handles exceptions in both to and from, but doesn't do access_ok */
13092 __must_check unsigned long
13093 -copy_user_generic_string(void *to, const void *from, unsigned len);
13094 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13095 __must_check unsigned long
13096 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13097 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13098
13099 -static __always_inline __must_check unsigned long
13100 -copy_user_generic(void *to, const void *from, unsigned len)
13101 +static __always_inline __must_check __size_overflow(3) unsigned long
13102 +copy_user_generic(void *to, const void *from, unsigned long len)
13103 {
13104 unsigned ret;
13105
13106 @@ -32,142 +35,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
13107 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13108 "=d" (len)),
13109 "1" (to), "2" (from), "3" (len)
13110 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13111 + : "memory", "rcx", "r8", "r9", "r11");
13112 return ret;
13113 }
13114
13115 +static __always_inline __must_check unsigned long
13116 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13117 +static __always_inline __must_check unsigned long
13118 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13119 __must_check unsigned long
13120 -_copy_to_user(void __user *to, const void *from, unsigned len);
13121 -__must_check unsigned long
13122 -_copy_from_user(void *to, const void __user *from, unsigned len);
13123 -__must_check unsigned long
13124 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13125 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13126 +
13127 +extern void copy_to_user_overflow(void)
13128 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13129 + __compiletime_error("copy_to_user() buffer size is not provably correct")
13130 +#else
13131 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
13132 +#endif
13133 +;
13134 +
13135 +extern void copy_from_user_overflow(void)
13136 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13137 + __compiletime_error("copy_from_user() buffer size is not provably correct")
13138 +#else
13139 + __compiletime_warning("copy_from_user() buffer size is not provably correct")
13140 +#endif
13141 +;
13142
13143 static inline unsigned long __must_check copy_from_user(void *to,
13144 const void __user *from,
13145 unsigned long n)
13146 {
13147 - int sz = __compiletime_object_size(to);
13148 -
13149 might_fault();
13150 - if (likely(sz == -1 || sz >= n))
13151 - n = _copy_from_user(to, from, n);
13152 -#ifdef CONFIG_DEBUG_VM
13153 - else
13154 - WARN(1, "Buffer overflow detected!\n");
13155 -#endif
13156 +
13157 + if (access_ok(VERIFY_READ, from, n))
13158 + n = __copy_from_user(to, from, n);
13159 + else if (n < INT_MAX) {
13160 + if (!__builtin_constant_p(n))
13161 + check_object_size(to, n, false);
13162 + memset(to, 0, n);
13163 + }
13164 return n;
13165 }
13166
13167 static __always_inline __must_check
13168 -int copy_to_user(void __user *dst, const void *src, unsigned size)
13169 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
13170 {
13171 might_fault();
13172
13173 - return _copy_to_user(dst, src, size);
13174 + if (access_ok(VERIFY_WRITE, dst, size))
13175 + size = __copy_to_user(dst, src, size);
13176 + return size;
13177 }
13178
13179 static __always_inline __must_check
13180 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13181 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13182 {
13183 - int ret = 0;
13184 + size_t sz = __compiletime_object_size(dst);
13185 + unsigned ret = 0;
13186
13187 might_fault();
13188 - if (!__builtin_constant_p(size))
13189 - return copy_user_generic(dst, (__force void *)src, size);
13190 +
13191 + if (size > INT_MAX)
13192 + return size;
13193 +
13194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13195 + if (!__access_ok(VERIFY_READ, src, size))
13196 + return size;
13197 +#endif
13198 +
13199 + if (unlikely(sz != (size_t)-1 && sz < size)) {
13200 + copy_from_user_overflow();
13201 + return size;
13202 + }
13203 +
13204 + if (!__builtin_constant_p(size)) {
13205 + check_object_size(dst, size, false);
13206 +
13207 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13208 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13209 + src += PAX_USER_SHADOW_BASE;
13210 +#endif
13211 +
13212 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13213 + }
13214 switch (size) {
13215 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13216 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13217 ret, "b", "b", "=q", 1);
13218 return ret;
13219 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13220 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13221 ret, "w", "w", "=r", 2);
13222 return ret;
13223 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13224 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13225 ret, "l", "k", "=r", 4);
13226 return ret;
13227 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13228 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13229 ret, "q", "", "=r", 8);
13230 return ret;
13231 case 10:
13232 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13233 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13234 ret, "q", "", "=r", 10);
13235 if (unlikely(ret))
13236 return ret;
13237 __get_user_asm(*(u16 *)(8 + (char *)dst),
13238 - (u16 __user *)(8 + (char __user *)src),
13239 + (const u16 __user *)(8 + (const char __user *)src),
13240 ret, "w", "w", "=r", 2);
13241 return ret;
13242 case 16:
13243 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13244 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13245 ret, "q", "", "=r", 16);
13246 if (unlikely(ret))
13247 return ret;
13248 __get_user_asm(*(u64 *)(8 + (char *)dst),
13249 - (u64 __user *)(8 + (char __user *)src),
13250 + (const u64 __user *)(8 + (const char __user *)src),
13251 ret, "q", "", "=r", 8);
13252 return ret;
13253 default:
13254 - return copy_user_generic(dst, (__force void *)src, size);
13255 +
13256 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13257 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13258 + src += PAX_USER_SHADOW_BASE;
13259 +#endif
13260 +
13261 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13262 }
13263 }
13264
13265 static __always_inline __must_check
13266 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13267 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13268 {
13269 - int ret = 0;
13270 + size_t sz = __compiletime_object_size(src);
13271 + unsigned ret = 0;
13272
13273 might_fault();
13274 - if (!__builtin_constant_p(size))
13275 - return copy_user_generic((__force void *)dst, src, size);
13276 +
13277 + if (size > INT_MAX)
13278 + return size;
13279 +
13280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13281 + if (!__access_ok(VERIFY_WRITE, dst, size))
13282 + return size;
13283 +#endif
13284 +
13285 + if (unlikely(sz != (size_t)-1 && sz < size)) {
13286 + copy_to_user_overflow();
13287 + return size;
13288 + }
13289 +
13290 + if (!__builtin_constant_p(size)) {
13291 + check_object_size(src, size, true);
13292 +
13293 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13294 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13295 + dst += PAX_USER_SHADOW_BASE;
13296 +#endif
13297 +
13298 + return copy_user_generic((__force_kernel void *)dst, src, size);
13299 + }
13300 switch (size) {
13301 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13302 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13303 ret, "b", "b", "iq", 1);
13304 return ret;
13305 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13306 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13307 ret, "w", "w", "ir", 2);
13308 return ret;
13309 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13310 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13311 ret, "l", "k", "ir", 4);
13312 return ret;
13313 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13314 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13315 ret, "q", "", "er", 8);
13316 return ret;
13317 case 10:
13318 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13319 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13320 ret, "q", "", "er", 10);
13321 if (unlikely(ret))
13322 return ret;
13323 asm("":::"memory");
13324 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13325 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13326 ret, "w", "w", "ir", 2);
13327 return ret;
13328 case 16:
13329 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13330 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13331 ret, "q", "", "er", 16);
13332 if (unlikely(ret))
13333 return ret;
13334 asm("":::"memory");
13335 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13336 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13337 ret, "q", "", "er", 8);
13338 return ret;
13339 default:
13340 - return copy_user_generic((__force void *)dst, src, size);
13341 +
13342 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13343 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13344 + dst += PAX_USER_SHADOW_BASE;
13345 +#endif
13346 +
13347 + return copy_user_generic((__force_kernel void *)dst, src, size);
13348 }
13349 }
13350
13351 static __always_inline __must_check
13352 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13353 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13354 {
13355 - int ret = 0;
13356 + unsigned ret = 0;
13357
13358 might_fault();
13359 - if (!__builtin_constant_p(size))
13360 - return copy_user_generic((__force void *)dst,
13361 - (__force void *)src, size);
13362 +
13363 + if (size > INT_MAX)
13364 + return size;
13365 +
13366 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13367 + if (!__access_ok(VERIFY_READ, src, size))
13368 + return size;
13369 + if (!__access_ok(VERIFY_WRITE, dst, size))
13370 + return size;
13371 +#endif
13372 +
13373 + if (!__builtin_constant_p(size)) {
13374 +
13375 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13376 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13377 + src += PAX_USER_SHADOW_BASE;
13378 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13379 + dst += PAX_USER_SHADOW_BASE;
13380 +#endif
13381 +
13382 + return copy_user_generic((__force_kernel void *)dst,
13383 + (__force_kernel const void *)src, size);
13384 + }
13385 switch (size) {
13386 case 1: {
13387 u8 tmp;
13388 - __get_user_asm(tmp, (u8 __user *)src,
13389 + __get_user_asm(tmp, (const u8 __user *)src,
13390 ret, "b", "b", "=q", 1);
13391 if (likely(!ret))
13392 __put_user_asm(tmp, (u8 __user *)dst,
13393 @@ -176,7 +275,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13394 }
13395 case 2: {
13396 u16 tmp;
13397 - __get_user_asm(tmp, (u16 __user *)src,
13398 + __get_user_asm(tmp, (const u16 __user *)src,
13399 ret, "w", "w", "=r", 2);
13400 if (likely(!ret))
13401 __put_user_asm(tmp, (u16 __user *)dst,
13402 @@ -186,7 +285,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13403
13404 case 4: {
13405 u32 tmp;
13406 - __get_user_asm(tmp, (u32 __user *)src,
13407 + __get_user_asm(tmp, (const u32 __user *)src,
13408 ret, "l", "k", "=r", 4);
13409 if (likely(!ret))
13410 __put_user_asm(tmp, (u32 __user *)dst,
13411 @@ -195,7 +294,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13412 }
13413 case 8: {
13414 u64 tmp;
13415 - __get_user_asm(tmp, (u64 __user *)src,
13416 + __get_user_asm(tmp, (const u64 __user *)src,
13417 ret, "q", "", "=r", 8);
13418 if (likely(!ret))
13419 __put_user_asm(tmp, (u64 __user *)dst,
13420 @@ -203,44 +302,89 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13421 return ret;
13422 }
13423 default:
13424 - return copy_user_generic((__force void *)dst,
13425 - (__force void *)src, size);
13426 +
13427 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13428 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13429 + src += PAX_USER_SHADOW_BASE;
13430 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13431 + dst += PAX_USER_SHADOW_BASE;
13432 +#endif
13433 +
13434 + return copy_user_generic((__force_kernel void *)dst,
13435 + (__force_kernel const void *)src, size);
13436 }
13437 }
13438
13439 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13440 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13441 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13442 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13443
13444 static __must_check __always_inline int
13445 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13446 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13447 {
13448 - return copy_user_generic(dst, (__force const void *)src, size);
13449 + if (size > INT_MAX)
13450 + return size;
13451 +
13452 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13453 + if (!__access_ok(VERIFY_READ, src, size))
13454 + return size;
13455 +
13456 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13457 + src += PAX_USER_SHADOW_BASE;
13458 +#endif
13459 +
13460 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13461 }
13462
13463 -static __must_check __always_inline int
13464 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13465 +static __must_check __always_inline unsigned long
13466 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13467 {
13468 - return copy_user_generic((__force void *)dst, src, size);
13469 + if (size > INT_MAX)
13470 + return size;
13471 +
13472 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13473 + if (!__access_ok(VERIFY_WRITE, dst, size))
13474 + return size;
13475 +
13476 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13477 + dst += PAX_USER_SHADOW_BASE;
13478 +#endif
13479 +
13480 + return copy_user_generic((__force_kernel void *)dst, src, size);
13481 }
13482
13483 -extern long __copy_user_nocache(void *dst, const void __user *src,
13484 - unsigned size, int zerorest);
13485 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13486 + unsigned long size, int zerorest) __size_overflow(3);
13487
13488 -static inline int
13489 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13490 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13491 {
13492 might_sleep();
13493 +
13494 + if (size > INT_MAX)
13495 + return size;
13496 +
13497 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13498 + if (!__access_ok(VERIFY_READ, src, size))
13499 + return size;
13500 +#endif
13501 +
13502 return __copy_user_nocache(dst, src, size, 1);
13503 }
13504
13505 -static inline int
13506 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13507 - unsigned size)
13508 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13509 + unsigned long size)
13510 {
13511 + if (size > INT_MAX)
13512 + return size;
13513 +
13514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13515 + if (!__access_ok(VERIFY_READ, src, size))
13516 + return size;
13517 +#endif
13518 +
13519 return __copy_user_nocache(dst, src, size, 0);
13520 }
13521
13522 -unsigned long
13523 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13524 +extern unsigned long
13525 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13526
13527 #endif /* _ASM_X86_UACCESS_64_H */
13528 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13529 index bb05228..d763d5b 100644
13530 --- a/arch/x86/include/asm/vdso.h
13531 +++ b/arch/x86/include/asm/vdso.h
13532 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13533 #define VDSO32_SYMBOL(base, name) \
13534 ({ \
13535 extern const char VDSO32_##name[]; \
13536 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13537 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13538 })
13539 #endif
13540
13541 diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
13542 index 5b238981..77fdd78 100644
13543 --- a/arch/x86/include/asm/word-at-a-time.h
13544 +++ b/arch/x86/include/asm/word-at-a-time.h
13545 @@ -11,7 +11,7 @@
13546 * and shift, for example.
13547 */
13548 struct word_at_a_time {
13549 - const unsigned long one_bits, high_bits;
13550 + unsigned long one_bits, high_bits;
13551 };
13552
13553 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
13554 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13555 index c090af1..7e7bf16 100644
13556 --- a/arch/x86/include/asm/x86_init.h
13557 +++ b/arch/x86/include/asm/x86_init.h
13558 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13559 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13560 void (*find_smp_config)(void);
13561 void (*get_smp_config)(unsigned int early);
13562 -};
13563 +} __no_const;
13564
13565 /**
13566 * struct x86_init_resources - platform specific resource related ops
13567 @@ -43,7 +43,7 @@ struct x86_init_resources {
13568 void (*probe_roms)(void);
13569 void (*reserve_resources)(void);
13570 char *(*memory_setup)(void);
13571 -};
13572 +} __no_const;
13573
13574 /**
13575 * struct x86_init_irqs - platform specific interrupt setup
13576 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13577 void (*pre_vector_init)(void);
13578 void (*intr_init)(void);
13579 void (*trap_init)(void);
13580 -};
13581 +} __no_const;
13582
13583 /**
13584 * struct x86_init_oem - oem platform specific customizing functions
13585 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13586 struct x86_init_oem {
13587 void (*arch_setup)(void);
13588 void (*banner)(void);
13589 -};
13590 +} __no_const;
13591
13592 /**
13593 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13594 @@ -77,7 +77,7 @@ struct x86_init_oem {
13595 */
13596 struct x86_init_mapping {
13597 void (*pagetable_reserve)(u64 start, u64 end);
13598 -};
13599 +} __no_const;
13600
13601 /**
13602 * struct x86_init_paging - platform specific paging functions
13603 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13604 struct x86_init_paging {
13605 void (*pagetable_setup_start)(pgd_t *base);
13606 void (*pagetable_setup_done)(pgd_t *base);
13607 -};
13608 +} __no_const;
13609
13610 /**
13611 * struct x86_init_timers - platform specific timer setup
13612 @@ -102,7 +102,7 @@ struct x86_init_timers {
13613 void (*tsc_pre_init)(void);
13614 void (*timer_init)(void);
13615 void (*wallclock_init)(void);
13616 -};
13617 +} __no_const;
13618
13619 /**
13620 * struct x86_init_iommu - platform specific iommu setup
13621 @@ -110,7 +110,7 @@ struct x86_init_timers {
13622 */
13623 struct x86_init_iommu {
13624 int (*iommu_init)(void);
13625 -};
13626 +} __no_const;
13627
13628 /**
13629 * struct x86_init_pci - platform specific pci init functions
13630 @@ -124,7 +124,7 @@ struct x86_init_pci {
13631 int (*init)(void);
13632 void (*init_irq)(void);
13633 void (*fixup_irqs)(void);
13634 -};
13635 +} __no_const;
13636
13637 /**
13638 * struct x86_init_ops - functions for platform specific setup
13639 @@ -140,7 +140,7 @@ struct x86_init_ops {
13640 struct x86_init_timers timers;
13641 struct x86_init_iommu iommu;
13642 struct x86_init_pci pci;
13643 -};
13644 +} __no_const;
13645
13646 /**
13647 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13648 @@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13649 void (*setup_percpu_clockev)(void);
13650 void (*early_percpu_clock_init)(void);
13651 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13652 -};
13653 +} __no_const;
13654
13655 /**
13656 * struct x86_platform_ops - platform specific runtime functions
13657 @@ -177,7 +177,7 @@ struct x86_platform_ops {
13658 int (*i8042_detect)(void);
13659 void (*save_sched_clock_state)(void);
13660 void (*restore_sched_clock_state)(void);
13661 -};
13662 +} __no_const;
13663
13664 struct pci_dev;
13665
13666 @@ -186,14 +186,14 @@ struct x86_msi_ops {
13667 void (*teardown_msi_irq)(unsigned int irq);
13668 void (*teardown_msi_irqs)(struct pci_dev *dev);
13669 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13670 -};
13671 +} __no_const;
13672
13673 struct x86_io_apic_ops {
13674 void (*init) (void);
13675 unsigned int (*read) (unsigned int apic, unsigned int reg);
13676 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
13677 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
13678 -};
13679 +} __no_const;
13680
13681 extern struct x86_init_ops x86_init;
13682 extern struct x86_cpuinit_ops x86_cpuinit;
13683 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13684 index 8a1b6f9..a29c4e4 100644
13685 --- a/arch/x86/include/asm/xsave.h
13686 +++ b/arch/x86/include/asm/xsave.h
13687 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13688 {
13689 int err;
13690
13691 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13692 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13693 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13694 +#endif
13695 +
13696 /*
13697 * Clear the xsave header first, so that reserved fields are
13698 * initialized to zero.
13699 @@ -93,10 +98,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13700 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13701 {
13702 int err;
13703 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13704 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13705 u32 lmask = mask;
13706 u32 hmask = mask >> 32;
13707
13708 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13709 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13710 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13711 +#endif
13712 +
13713 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13714 "2:\n"
13715 ".section .fixup,\"ax\"\n"
13716 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13717 index 95bf99de..de9235c 100644
13718 --- a/arch/x86/kernel/acpi/sleep.c
13719 +++ b/arch/x86/kernel/acpi/sleep.c
13720 @@ -73,8 +73,12 @@ int acpi_suspend_lowlevel(void)
13721 #else /* CONFIG_64BIT */
13722 #ifdef CONFIG_SMP
13723 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13724 +
13725 + pax_open_kernel();
13726 early_gdt_descr.address =
13727 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13728 + pax_close_kernel();
13729 +
13730 initial_gs = per_cpu_offset(smp_processor_id());
13731 #endif
13732 initial_code = (unsigned long)wakeup_long64;
13733 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13734 index 7261083..5c12053 100644
13735 --- a/arch/x86/kernel/acpi/wakeup_32.S
13736 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13737 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13738 # and restore the stack ... but you need gdt for this to work
13739 movl saved_context_esp, %esp
13740
13741 - movl %cs:saved_magic, %eax
13742 - cmpl $0x12345678, %eax
13743 + cmpl $0x12345678, saved_magic
13744 jne bogus_magic
13745
13746 # jump to place where we left off
13747 - movl saved_eip, %eax
13748 - jmp *%eax
13749 + jmp *(saved_eip)
13750
13751 bogus_magic:
13752 jmp bogus_magic
13753 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13754 index 73ef56c..0238021 100644
13755 --- a/arch/x86/kernel/alternative.c
13756 +++ b/arch/x86/kernel/alternative.c
13757 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13758 */
13759 for (a = start; a < end; a++) {
13760 instr = (u8 *)&a->instr_offset + a->instr_offset;
13761 +
13762 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13763 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13764 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13765 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13766 +#endif
13767 +
13768 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13769 BUG_ON(a->replacementlen > a->instrlen);
13770 BUG_ON(a->instrlen > sizeof(insnbuf));
13771 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13772 for (poff = start; poff < end; poff++) {
13773 u8 *ptr = (u8 *)poff + *poff;
13774
13775 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13776 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13777 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13778 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13779 +#endif
13780 +
13781 if (!*poff || ptr < text || ptr >= text_end)
13782 continue;
13783 /* turn DS segment override prefix into lock prefix */
13784 - if (*ptr == 0x3e)
13785 + if (*ktla_ktva(ptr) == 0x3e)
13786 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13787 };
13788 mutex_unlock(&text_mutex);
13789 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13790 for (poff = start; poff < end; poff++) {
13791 u8 *ptr = (u8 *)poff + *poff;
13792
13793 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13794 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13795 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13796 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13797 +#endif
13798 +
13799 if (!*poff || ptr < text || ptr >= text_end)
13800 continue;
13801 /* turn lock prefix into DS segment override prefix */
13802 - if (*ptr == 0xf0)
13803 + if (*ktla_ktva(ptr) == 0xf0)
13804 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13805 };
13806 mutex_unlock(&text_mutex);
13807 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13808
13809 BUG_ON(p->len > MAX_PATCH_LEN);
13810 /* prep the buffer with the original instructions */
13811 - memcpy(insnbuf, p->instr, p->len);
13812 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13813 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13814 (unsigned long)p->instr, p->len);
13815
13816 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13817 if (smp_alt_once)
13818 free_init_pages("SMP alternatives",
13819 (unsigned long)__smp_locks,
13820 - (unsigned long)__smp_locks_end);
13821 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13822
13823 restart_nmi();
13824 }
13825 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13826 * instructions. And on the local CPU you need to be protected again NMI or MCE
13827 * handlers seeing an inconsistent instruction while you patch.
13828 */
13829 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13830 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13831 size_t len)
13832 {
13833 unsigned long flags;
13834 local_irq_save(flags);
13835 - memcpy(addr, opcode, len);
13836 +
13837 + pax_open_kernel();
13838 + memcpy(ktla_ktva(addr), opcode, len);
13839 sync_core();
13840 + pax_close_kernel();
13841 +
13842 local_irq_restore(flags);
13843 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13844 that causes hangs on some VIA CPUs. */
13845 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13846 */
13847 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13848 {
13849 - unsigned long flags;
13850 - char *vaddr;
13851 + unsigned char *vaddr = ktla_ktva(addr);
13852 struct page *pages[2];
13853 - int i;
13854 + size_t i;
13855
13856 if (!core_kernel_text((unsigned long)addr)) {
13857 - pages[0] = vmalloc_to_page(addr);
13858 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13859 + pages[0] = vmalloc_to_page(vaddr);
13860 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13861 } else {
13862 - pages[0] = virt_to_page(addr);
13863 + pages[0] = virt_to_page(vaddr);
13864 WARN_ON(!PageReserved(pages[0]));
13865 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13866 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13867 }
13868 BUG_ON(!pages[0]);
13869 - local_irq_save(flags);
13870 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13871 - if (pages[1])
13872 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13873 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13874 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13875 - clear_fixmap(FIX_TEXT_POKE0);
13876 - if (pages[1])
13877 - clear_fixmap(FIX_TEXT_POKE1);
13878 - local_flush_tlb();
13879 - sync_core();
13880 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13881 - that causes hangs on some VIA CPUs. */
13882 + text_poke_early(addr, opcode, len);
13883 for (i = 0; i < len; i++)
13884 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13885 - local_irq_restore(flags);
13886 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13887 return addr;
13888 }
13889
13890 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13891 index 39a222e..85a7767 100644
13892 --- a/arch/x86/kernel/apic/apic.c
13893 +++ b/arch/x86/kernel/apic/apic.c
13894 @@ -185,7 +185,7 @@ int first_system_vector = 0xfe;
13895 /*
13896 * Debug level, exported for io_apic.c
13897 */
13898 -unsigned int apic_verbosity;
13899 +int apic_verbosity;
13900
13901 int pic_mode;
13902
13903 @@ -1923,7 +1923,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13904 apic_write(APIC_ESR, 0);
13905 v1 = apic_read(APIC_ESR);
13906 ack_APIC_irq();
13907 - atomic_inc(&irq_err_count);
13908 + atomic_inc_unchecked(&irq_err_count);
13909
13910 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13911 smp_processor_id(), v0 , v1);
13912 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13913 index 5f0ff59..f9e01bc 100644
13914 --- a/arch/x86/kernel/apic/io_apic.c
13915 +++ b/arch/x86/kernel/apic/io_apic.c
13916 @@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13917 }
13918 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13919
13920 -void lock_vector_lock(void)
13921 +void lock_vector_lock(void) __acquires(vector_lock)
13922 {
13923 /* Used to the online set of cpus does not change
13924 * during assign_irq_vector.
13925 @@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
13926 raw_spin_lock(&vector_lock);
13927 }
13928
13929 -void unlock_vector_lock(void)
13930 +void unlock_vector_lock(void) __releases(vector_lock)
13931 {
13932 raw_spin_unlock(&vector_lock);
13933 }
13934 @@ -2369,7 +2369,7 @@ static void ack_apic_edge(struct irq_data *data)
13935 ack_APIC_irq();
13936 }
13937
13938 -atomic_t irq_mis_count;
13939 +atomic_unchecked_t irq_mis_count;
13940
13941 #ifdef CONFIG_GENERIC_PENDING_IRQ
13942 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
13943 @@ -2510,7 +2510,7 @@ static void ack_apic_level(struct irq_data *data)
13944 * at the cpu.
13945 */
13946 if (!(v & (1 << (i & 0x1f)))) {
13947 - atomic_inc(&irq_mis_count);
13948 + atomic_inc_unchecked(&irq_mis_count);
13949
13950 eoi_ioapic_irq(irq, cfg);
13951 }
13952 diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
13953 index 3fe9866..6abf259 100644
13954 --- a/arch/x86/kernel/apic/probe_64.c
13955 +++ b/arch/x86/kernel/apic/probe_64.c
13956 @@ -50,7 +50,7 @@ void __init default_setup_apic_routing(void)
13957
13958 if (is_vsmp_box()) {
13959 /* need to update phys_pkg_id */
13960 - apic->phys_pkg_id = apicid_phys_pkg_id;
13961 + *(void **)&apic->phys_pkg_id = apicid_phys_pkg_id;
13962 }
13963 }
13964
13965 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13966 index 07b0c0d..1df6f42 100644
13967 --- a/arch/x86/kernel/apm_32.c
13968 +++ b/arch/x86/kernel/apm_32.c
13969 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13970 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13971 * even though they are called in protected mode.
13972 */
13973 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13974 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13975 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13976
13977 static const char driver_version[] = "1.16ac"; /* no spaces */
13978 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13979 BUG_ON(cpu != 0);
13980 gdt = get_cpu_gdt_table(cpu);
13981 save_desc_40 = gdt[0x40 / 8];
13982 +
13983 + pax_open_kernel();
13984 gdt[0x40 / 8] = bad_bios_desc;
13985 + pax_close_kernel();
13986
13987 apm_irq_save(flags);
13988 APM_DO_SAVE_SEGS;
13989 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13990 &call->esi);
13991 APM_DO_RESTORE_SEGS;
13992 apm_irq_restore(flags);
13993 +
13994 + pax_open_kernel();
13995 gdt[0x40 / 8] = save_desc_40;
13996 + pax_close_kernel();
13997 +
13998 put_cpu();
13999
14000 return call->eax & 0xff;
14001 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14002 BUG_ON(cpu != 0);
14003 gdt = get_cpu_gdt_table(cpu);
14004 save_desc_40 = gdt[0x40 / 8];
14005 +
14006 + pax_open_kernel();
14007 gdt[0x40 / 8] = bad_bios_desc;
14008 + pax_close_kernel();
14009
14010 apm_irq_save(flags);
14011 APM_DO_SAVE_SEGS;
14012 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14013 &call->eax);
14014 APM_DO_RESTORE_SEGS;
14015 apm_irq_restore(flags);
14016 +
14017 + pax_open_kernel();
14018 gdt[0x40 / 8] = save_desc_40;
14019 + pax_close_kernel();
14020 +
14021 put_cpu();
14022 return error;
14023 }
14024 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
14025 * code to that CPU.
14026 */
14027 gdt = get_cpu_gdt_table(0);
14028 +
14029 + pax_open_kernel();
14030 set_desc_base(&gdt[APM_CS >> 3],
14031 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14032 set_desc_base(&gdt[APM_CS_16 >> 3],
14033 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14034 set_desc_base(&gdt[APM_DS >> 3],
14035 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14036 + pax_close_kernel();
14037
14038 proc_create("apm", 0, NULL, &apm_file_ops);
14039
14040 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14041 index 68de2dc..1f3c720 100644
14042 --- a/arch/x86/kernel/asm-offsets.c
14043 +++ b/arch/x86/kernel/asm-offsets.c
14044 @@ -33,6 +33,8 @@ void common(void) {
14045 OFFSET(TI_status, thread_info, status);
14046 OFFSET(TI_addr_limit, thread_info, addr_limit);
14047 OFFSET(TI_preempt_count, thread_info, preempt_count);
14048 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14049 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14050
14051 BLANK();
14052 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14053 @@ -53,8 +55,26 @@ void common(void) {
14054 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14055 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14056 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14057 +
14058 +#ifdef CONFIG_PAX_KERNEXEC
14059 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14060 #endif
14061
14062 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14063 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14064 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14065 +#ifdef CONFIG_X86_64
14066 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14067 +#endif
14068 +#endif
14069 +
14070 +#endif
14071 +
14072 + BLANK();
14073 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14074 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14075 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14076 +
14077 #ifdef CONFIG_XEN
14078 BLANK();
14079 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14080 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14081 index 1b4754f..fbb4227 100644
14082 --- a/arch/x86/kernel/asm-offsets_64.c
14083 +++ b/arch/x86/kernel/asm-offsets_64.c
14084 @@ -76,6 +76,7 @@ int main(void)
14085 BLANK();
14086 #undef ENTRY
14087
14088 + DEFINE(TSS_size, sizeof(struct tss_struct));
14089 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14090 BLANK();
14091
14092 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14093 index 6ab6aa2..8f71507 100644
14094 --- a/arch/x86/kernel/cpu/Makefile
14095 +++ b/arch/x86/kernel/cpu/Makefile
14096 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14097 CFLAGS_REMOVE_perf_event.o = -pg
14098 endif
14099
14100 -# Make sure load_percpu_segment has no stackprotector
14101 -nostackp := $(call cc-option, -fno-stack-protector)
14102 -CFLAGS_common.o := $(nostackp)
14103 -
14104 obj-y := intel_cacheinfo.o scattered.o topology.o
14105 obj-y += proc.o capflags.o powerflags.o common.o
14106 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14107 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14108 index 146bb62..ac9c74a 100644
14109 --- a/arch/x86/kernel/cpu/amd.c
14110 +++ b/arch/x86/kernel/cpu/amd.c
14111 @@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14112 unsigned int size)
14113 {
14114 /* AMD errata T13 (order #21922) */
14115 - if ((c->x86 == 6)) {
14116 + if (c->x86 == 6) {
14117 /* Duron Rev A0 */
14118 if (c->x86_model == 3 && c->x86_mask == 0)
14119 size = 64;
14120 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14121 index 6b9333b..4c3083a 100644
14122 --- a/arch/x86/kernel/cpu/common.c
14123 +++ b/arch/x86/kernel/cpu/common.c
14124 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14125
14126 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14127
14128 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14129 -#ifdef CONFIG_X86_64
14130 - /*
14131 - * We need valid kernel segments for data and code in long mode too
14132 - * IRET will check the segment types kkeil 2000/10/28
14133 - * Also sysret mandates a special GDT layout
14134 - *
14135 - * TLS descriptors are currently at a different place compared to i386.
14136 - * Hopefully nobody expects them at a fixed place (Wine?)
14137 - */
14138 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14139 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14140 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14141 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14142 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14143 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14144 -#else
14145 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14146 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14147 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14148 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14149 - /*
14150 - * Segments used for calling PnP BIOS have byte granularity.
14151 - * They code segments and data segments have fixed 64k limits,
14152 - * the transfer segment sizes are set at run time.
14153 - */
14154 - /* 32-bit code */
14155 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14156 - /* 16-bit code */
14157 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14158 - /* 16-bit data */
14159 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14160 - /* 16-bit data */
14161 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14162 - /* 16-bit data */
14163 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14164 - /*
14165 - * The APM segments have byte granularity and their bases
14166 - * are set at run time. All have 64k limits.
14167 - */
14168 - /* 32-bit code */
14169 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14170 - /* 16-bit code */
14171 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14172 - /* data */
14173 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14174 -
14175 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14176 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14177 - GDT_STACK_CANARY_INIT
14178 -#endif
14179 -} };
14180 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14181 -
14182 static int __init x86_xsave_setup(char *s)
14183 {
14184 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14185 @@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14186 {
14187 struct desc_ptr gdt_descr;
14188
14189 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14190 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14191 gdt_descr.size = GDT_SIZE - 1;
14192 load_gdt(&gdt_descr);
14193 /* Reload the per-cpu base */
14194 @@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14195 /* Filter out anything that depends on CPUID levels we don't have */
14196 filter_cpuid_features(c, true);
14197
14198 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14199 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14200 +#endif
14201 +
14202 /* If the model name is still unset, do table lookup. */
14203 if (!c->x86_model_id[0]) {
14204 const char *p;
14205 @@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14206 }
14207 __setup("clearcpuid=", setup_disablecpuid);
14208
14209 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14210 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14211 +
14212 #ifdef CONFIG_X86_64
14213 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14214 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14215 - (unsigned long) nmi_idt_table };
14216 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14217
14218 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14219 irq_stack_union) __aligned(PAGE_SIZE);
14220 @@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14221 EXPORT_PER_CPU_SYMBOL(current_task);
14222
14223 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14224 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14225 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14226 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14227
14228 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14229 @@ -1132,7 +1084,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14230 {
14231 memset(regs, 0, sizeof(struct pt_regs));
14232 regs->fs = __KERNEL_PERCPU;
14233 - regs->gs = __KERNEL_STACK_CANARY;
14234 + savesegment(gs, regs->gs);
14235
14236 return regs;
14237 }
14238 @@ -1187,7 +1139,7 @@ void __cpuinit cpu_init(void)
14239 int i;
14240
14241 cpu = stack_smp_processor_id();
14242 - t = &per_cpu(init_tss, cpu);
14243 + t = init_tss + cpu;
14244 oist = &per_cpu(orig_ist, cpu);
14245
14246 #ifdef CONFIG_NUMA
14247 @@ -1213,7 +1165,7 @@ void __cpuinit cpu_init(void)
14248 switch_to_new_gdt(cpu);
14249 loadsegment(fs, 0);
14250
14251 - load_idt((const struct desc_ptr *)&idt_descr);
14252 + load_idt(&idt_descr);
14253
14254 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14255 syscall_init();
14256 @@ -1222,7 +1174,6 @@ void __cpuinit cpu_init(void)
14257 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14258 barrier();
14259
14260 - x86_configure_nx();
14261 if (cpu != 0)
14262 enable_x2apic();
14263
14264 @@ -1278,7 +1229,7 @@ void __cpuinit cpu_init(void)
14265 {
14266 int cpu = smp_processor_id();
14267 struct task_struct *curr = current;
14268 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14269 + struct tss_struct *t = init_tss + cpu;
14270 struct thread_struct *thread = &curr->thread;
14271
14272 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14273 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14274 index 3e6ff6c..54b4992 100644
14275 --- a/arch/x86/kernel/cpu/intel.c
14276 +++ b/arch/x86/kernel/cpu/intel.c
14277 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14278 * Update the IDT descriptor and reload the IDT so that
14279 * it uses the read-only mapped virtual address.
14280 */
14281 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14282 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14283 load_idt(&idt_descr);
14284 }
14285 #endif
14286 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14287 index c46ed49..5dc0a53 100644
14288 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14289 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14290 @@ -42,6 +42,7 @@
14291 #include <asm/processor.h>
14292 #include <asm/mce.h>
14293 #include <asm/msr.h>
14294 +#include <asm/local.h>
14295
14296 #include "mce-internal.h"
14297
14298 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14299 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14300 m->cs, m->ip);
14301
14302 - if (m->cs == __KERNEL_CS)
14303 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14304 print_symbol("{%s}", m->ip);
14305 pr_cont("\n");
14306 }
14307 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14308
14309 #define PANIC_TIMEOUT 5 /* 5 seconds */
14310
14311 -static atomic_t mce_paniced;
14312 +static atomic_unchecked_t mce_paniced;
14313
14314 static int fake_panic;
14315 -static atomic_t mce_fake_paniced;
14316 +static atomic_unchecked_t mce_fake_paniced;
14317
14318 /* Panic in progress. Enable interrupts and wait for final IPI */
14319 static void wait_for_panic(void)
14320 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14321 /*
14322 * Make sure only one CPU runs in machine check panic
14323 */
14324 - if (atomic_inc_return(&mce_paniced) > 1)
14325 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14326 wait_for_panic();
14327 barrier();
14328
14329 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14330 console_verbose();
14331 } else {
14332 /* Don't log too much for fake panic */
14333 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14334 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14335 return;
14336 }
14337 /* First print corrected ones that are still unlogged */
14338 @@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
14339 * might have been modified by someone else.
14340 */
14341 rmb();
14342 - if (atomic_read(&mce_paniced))
14343 + if (atomic_read_unchecked(&mce_paniced))
14344 wait_for_panic();
14345 if (!monarch_timeout)
14346 goto out;
14347 @@ -1581,7 +1582,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14348 }
14349
14350 /* Call the installed machine check handler for this CPU setup. */
14351 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14352 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14353 unexpected_machine_check;
14354
14355 /*
14356 @@ -1604,7 +1605,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14357 return;
14358 }
14359
14360 + pax_open_kernel();
14361 machine_check_vector = do_machine_check;
14362 + pax_close_kernel();
14363
14364 __mcheck_cpu_init_generic();
14365 __mcheck_cpu_init_vendor(c);
14366 @@ -1618,7 +1621,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14367 */
14368
14369 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14370 -static int mce_chrdev_open_count; /* #times opened */
14371 +static local_t mce_chrdev_open_count; /* #times opened */
14372 static int mce_chrdev_open_exclu; /* already open exclusive? */
14373
14374 static int mce_chrdev_open(struct inode *inode, struct file *file)
14375 @@ -1626,7 +1629,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14376 spin_lock(&mce_chrdev_state_lock);
14377
14378 if (mce_chrdev_open_exclu ||
14379 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14380 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14381 spin_unlock(&mce_chrdev_state_lock);
14382
14383 return -EBUSY;
14384 @@ -1634,7 +1637,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14385
14386 if (file->f_flags & O_EXCL)
14387 mce_chrdev_open_exclu = 1;
14388 - mce_chrdev_open_count++;
14389 + local_inc(&mce_chrdev_open_count);
14390
14391 spin_unlock(&mce_chrdev_state_lock);
14392
14393 @@ -1645,7 +1648,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14394 {
14395 spin_lock(&mce_chrdev_state_lock);
14396
14397 - mce_chrdev_open_count--;
14398 + local_dec(&mce_chrdev_open_count);
14399 mce_chrdev_open_exclu = 0;
14400
14401 spin_unlock(&mce_chrdev_state_lock);
14402 @@ -2370,7 +2373,7 @@ struct dentry *mce_get_debugfs_dir(void)
14403 static void mce_reset(void)
14404 {
14405 cpu_missing = 0;
14406 - atomic_set(&mce_fake_paniced, 0);
14407 + atomic_set_unchecked(&mce_fake_paniced, 0);
14408 atomic_set(&mce_executing, 0);
14409 atomic_set(&mce_callin, 0);
14410 atomic_set(&global_nwo, 0);
14411 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14412 index 2d5454c..51987eb 100644
14413 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14414 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14415 @@ -11,6 +11,7 @@
14416 #include <asm/processor.h>
14417 #include <asm/mce.h>
14418 #include <asm/msr.h>
14419 +#include <asm/pgtable.h>
14420
14421 /* By default disabled */
14422 int mce_p5_enabled __read_mostly;
14423 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14424 if (!cpu_has(c, X86_FEATURE_MCE))
14425 return;
14426
14427 + pax_open_kernel();
14428 machine_check_vector = pentium_machine_check;
14429 + pax_close_kernel();
14430 /* Make sure the vector pointer is visible before we enable MCEs: */
14431 wmb();
14432
14433 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14434 index 2d7998f..17c9de1 100644
14435 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14436 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14437 @@ -10,6 +10,7 @@
14438 #include <asm/processor.h>
14439 #include <asm/mce.h>
14440 #include <asm/msr.h>
14441 +#include <asm/pgtable.h>
14442
14443 /* Machine check handler for WinChip C6: */
14444 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14445 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14446 {
14447 u32 lo, hi;
14448
14449 + pax_open_kernel();
14450 machine_check_vector = winchip_machine_check;
14451 + pax_close_kernel();
14452 /* Make sure the vector pointer is visible before we enable MCEs: */
14453 wmb();
14454
14455 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14456 index 6b96110..0da73eb 100644
14457 --- a/arch/x86/kernel/cpu/mtrr/main.c
14458 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14459 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14460 u64 size_or_mask, size_and_mask;
14461 static bool mtrr_aps_delayed_init;
14462
14463 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14464 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14465
14466 const struct mtrr_ops *mtrr_if;
14467
14468 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14469 index df5e41f..816c719 100644
14470 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14471 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14472 @@ -25,7 +25,7 @@ struct mtrr_ops {
14473 int (*validate_add_page)(unsigned long base, unsigned long size,
14474 unsigned int type);
14475 int (*have_wrcomb)(void);
14476 -};
14477 +} __do_const;
14478
14479 extern int generic_get_free_region(unsigned long base, unsigned long size,
14480 int replace_reg);
14481 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14482 index c4706cf..264b0f7 100644
14483 --- a/arch/x86/kernel/cpu/perf_event.c
14484 +++ b/arch/x86/kernel/cpu/perf_event.c
14485 @@ -1837,7 +1837,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14486 break;
14487
14488 perf_callchain_store(entry, frame.return_address);
14489 - fp = frame.next_frame;
14490 + fp = (const void __force_user *)frame.next_frame;
14491 }
14492 }
14493
14494 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
14495 index 187c294..28a069c 100644
14496 --- a/arch/x86/kernel/cpu/perf_event_intel.c
14497 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
14498 @@ -1811,10 +1811,10 @@ __init int intel_pmu_init(void)
14499 * v2 and above have a perf capabilities MSR
14500 */
14501 if (version > 1) {
14502 - u64 capabilities;
14503 + u64 capabilities = x86_pmu.intel_cap.capabilities;
14504
14505 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
14506 - x86_pmu.intel_cap.capabilities = capabilities;
14507 + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
14508 + x86_pmu.intel_cap.capabilities = capabilities;
14509 }
14510
14511 intel_ds_init();
14512 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14513 index 13ad899..f642b9a 100644
14514 --- a/arch/x86/kernel/crash.c
14515 +++ b/arch/x86/kernel/crash.c
14516 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14517 {
14518 #ifdef CONFIG_X86_32
14519 struct pt_regs fixed_regs;
14520 -#endif
14521
14522 -#ifdef CONFIG_X86_32
14523 - if (!user_mode_vm(regs)) {
14524 + if (!user_mode(regs)) {
14525 crash_fixup_ss_esp(&fixed_regs, regs);
14526 regs = &fixed_regs;
14527 }
14528 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14529 index 37250fe..bf2ec74 100644
14530 --- a/arch/x86/kernel/doublefault_32.c
14531 +++ b/arch/x86/kernel/doublefault_32.c
14532 @@ -11,7 +11,7 @@
14533
14534 #define DOUBLEFAULT_STACKSIZE (1024)
14535 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14536 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14537 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14538
14539 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14540
14541 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14542 unsigned long gdt, tss;
14543
14544 store_gdt(&gdt_desc);
14545 - gdt = gdt_desc.address;
14546 + gdt = (unsigned long)gdt_desc.address;
14547
14548 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14549
14550 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14551 /* 0x2 bit is always set */
14552 .flags = X86_EFLAGS_SF | 0x2,
14553 .sp = STACK_START,
14554 - .es = __USER_DS,
14555 + .es = __KERNEL_DS,
14556 .cs = __KERNEL_CS,
14557 .ss = __KERNEL_DS,
14558 - .ds = __USER_DS,
14559 + .ds = __KERNEL_DS,
14560 .fs = __KERNEL_PERCPU,
14561
14562 .__cr3 = __pa_nodebug(swapper_pg_dir),
14563 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14564 index 571246d..81f335c 100644
14565 --- a/arch/x86/kernel/dumpstack.c
14566 +++ b/arch/x86/kernel/dumpstack.c
14567 @@ -2,6 +2,9 @@
14568 * Copyright (C) 1991, 1992 Linus Torvalds
14569 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14570 */
14571 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14572 +#define __INCLUDED_BY_HIDESYM 1
14573 +#endif
14574 #include <linux/kallsyms.h>
14575 #include <linux/kprobes.h>
14576 #include <linux/uaccess.h>
14577 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14578 static void
14579 print_ftrace_graph_addr(unsigned long addr, void *data,
14580 const struct stacktrace_ops *ops,
14581 - struct thread_info *tinfo, int *graph)
14582 + struct task_struct *task, int *graph)
14583 {
14584 - struct task_struct *task;
14585 unsigned long ret_addr;
14586 int index;
14587
14588 if (addr != (unsigned long)return_to_handler)
14589 return;
14590
14591 - task = tinfo->task;
14592 index = task->curr_ret_stack;
14593
14594 if (!task->ret_stack || index < *graph)
14595 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14596 static inline void
14597 print_ftrace_graph_addr(unsigned long addr, void *data,
14598 const struct stacktrace_ops *ops,
14599 - struct thread_info *tinfo, int *graph)
14600 + struct task_struct *task, int *graph)
14601 { }
14602 #endif
14603
14604 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14605 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14606 */
14607
14608 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14609 - void *p, unsigned int size, void *end)
14610 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14611 {
14612 - void *t = tinfo;
14613 if (end) {
14614 if (p < end && p >= (end-THREAD_SIZE))
14615 return 1;
14616 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14617 }
14618
14619 unsigned long
14620 -print_context_stack(struct thread_info *tinfo,
14621 +print_context_stack(struct task_struct *task, void *stack_start,
14622 unsigned long *stack, unsigned long bp,
14623 const struct stacktrace_ops *ops, void *data,
14624 unsigned long *end, int *graph)
14625 {
14626 struct stack_frame *frame = (struct stack_frame *)bp;
14627
14628 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14629 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14630 unsigned long addr;
14631
14632 addr = *stack;
14633 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14634 } else {
14635 ops->address(data, addr, 0);
14636 }
14637 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14638 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14639 }
14640 stack++;
14641 }
14642 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14643 EXPORT_SYMBOL_GPL(print_context_stack);
14644
14645 unsigned long
14646 -print_context_stack_bp(struct thread_info *tinfo,
14647 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14648 unsigned long *stack, unsigned long bp,
14649 const struct stacktrace_ops *ops, void *data,
14650 unsigned long *end, int *graph)
14651 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14652 struct stack_frame *frame = (struct stack_frame *)bp;
14653 unsigned long *ret_addr = &frame->return_address;
14654
14655 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14656 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14657 unsigned long addr = *ret_addr;
14658
14659 if (!__kernel_text_address(addr))
14660 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14661 ops->address(data, addr, 1);
14662 frame = frame->next_frame;
14663 ret_addr = &frame->return_address;
14664 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14665 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14666 }
14667
14668 return (unsigned long)frame;
14669 @@ -189,7 +188,7 @@ void dump_stack(void)
14670
14671 bp = stack_frame(current, NULL);
14672 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14673 - current->pid, current->comm, print_tainted(),
14674 + task_pid_nr(current), current->comm, print_tainted(),
14675 init_utsname()->release,
14676 (int)strcspn(init_utsname()->version, " "),
14677 init_utsname()->version);
14678 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14679 }
14680 EXPORT_SYMBOL_GPL(oops_begin);
14681
14682 +extern void gr_handle_kernel_exploit(void);
14683 +
14684 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14685 {
14686 if (regs && kexec_should_crash(current))
14687 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14688 panic("Fatal exception in interrupt");
14689 if (panic_on_oops)
14690 panic("Fatal exception");
14691 - do_exit(signr);
14692 +
14693 + gr_handle_kernel_exploit();
14694 +
14695 + do_group_exit(signr);
14696 }
14697
14698 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14699 @@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14700
14701 show_regs(regs);
14702 #ifdef CONFIG_X86_32
14703 - if (user_mode_vm(regs)) {
14704 + if (user_mode(regs)) {
14705 sp = regs->sp;
14706 ss = regs->ss & 0xffff;
14707 } else {
14708 @@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14709 unsigned long flags = oops_begin();
14710 int sig = SIGSEGV;
14711
14712 - if (!user_mode_vm(regs))
14713 + if (!user_mode(regs))
14714 report_bug(regs->ip, regs);
14715
14716 if (__die(str, regs, err))
14717 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14718 index e0b1d78..a8ade5e 100644
14719 --- a/arch/x86/kernel/dumpstack_32.c
14720 +++ b/arch/x86/kernel/dumpstack_32.c
14721 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14722 bp = stack_frame(task, regs);
14723
14724 for (;;) {
14725 - struct thread_info *context;
14726 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14727
14728 - context = (struct thread_info *)
14729 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14730 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14731 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14732
14733 - stack = (unsigned long *)context->previous_esp;
14734 - if (!stack)
14735 + if (stack_start == task_stack_page(task))
14736 break;
14737 + stack = *(unsigned long **)stack_start;
14738 if (ops->stack(data, "IRQ") < 0)
14739 break;
14740 touch_nmi_watchdog();
14741 @@ -87,7 +85,7 @@ void show_regs(struct pt_regs *regs)
14742 int i;
14743
14744 print_modules();
14745 - __show_regs(regs, !user_mode_vm(regs));
14746 + __show_regs(regs, !user_mode(regs));
14747
14748 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14749 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14750 @@ -96,21 +94,22 @@ void show_regs(struct pt_regs *regs)
14751 * When in-kernel, we also print out the stack and code at the
14752 * time of the fault..
14753 */
14754 - if (!user_mode_vm(regs)) {
14755 + if (!user_mode(regs)) {
14756 unsigned int code_prologue = code_bytes * 43 / 64;
14757 unsigned int code_len = code_bytes;
14758 unsigned char c;
14759 u8 *ip;
14760 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14761
14762 printk(KERN_EMERG "Stack:\n");
14763 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14764
14765 printk(KERN_EMERG "Code: ");
14766
14767 - ip = (u8 *)regs->ip - code_prologue;
14768 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14769 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14770 /* try starting at IP */
14771 - ip = (u8 *)regs->ip;
14772 + ip = (u8 *)regs->ip + cs_base;
14773 code_len = code_len - code_prologue + 1;
14774 }
14775 for (i = 0; i < code_len; i++, ip++) {
14776 @@ -119,7 +118,7 @@ void show_regs(struct pt_regs *regs)
14777 printk(KERN_CONT " Bad EIP value.");
14778 break;
14779 }
14780 - if (ip == (u8 *)regs->ip)
14781 + if (ip == (u8 *)regs->ip + cs_base)
14782 printk(KERN_CONT "<%02x> ", c);
14783 else
14784 printk(KERN_CONT "%02x ", c);
14785 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14786 {
14787 unsigned short ud2;
14788
14789 + ip = ktla_ktva(ip);
14790 if (ip < PAGE_OFFSET)
14791 return 0;
14792 if (probe_kernel_address((unsigned short *)ip, ud2))
14793 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14794
14795 return ud2 == 0x0b0f;
14796 }
14797 +
14798 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14799 +void pax_check_alloca(unsigned long size)
14800 +{
14801 + unsigned long sp = (unsigned long)&sp, stack_left;
14802 +
14803 + /* all kernel stacks are of the same size */
14804 + stack_left = sp & (THREAD_SIZE - 1);
14805 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14806 +}
14807 +EXPORT_SYMBOL(pax_check_alloca);
14808 +#endif
14809 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14810 index 791b761..2ab6e33 100644
14811 --- a/arch/x86/kernel/dumpstack_64.c
14812 +++ b/arch/x86/kernel/dumpstack_64.c
14813 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14814 unsigned long *irq_stack_end =
14815 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14816 unsigned used = 0;
14817 - struct thread_info *tinfo;
14818 int graph = 0;
14819 unsigned long dummy;
14820 + void *stack_start;
14821
14822 if (!task)
14823 task = current;
14824 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14825 * current stack address. If the stacks consist of nested
14826 * exceptions
14827 */
14828 - tinfo = task_thread_info(task);
14829 for (;;) {
14830 char *id;
14831 unsigned long *estack_end;
14832 +
14833 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14834 &used, &id);
14835
14836 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14837 if (ops->stack(data, id) < 0)
14838 break;
14839
14840 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14841 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14842 data, estack_end, &graph);
14843 ops->stack(data, "<EOE>");
14844 /*
14845 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14846 * second-to-last pointer (index -2 to end) in the
14847 * exception stack:
14848 */
14849 + if ((u16)estack_end[-1] != __KERNEL_DS)
14850 + goto out;
14851 stack = (unsigned long *) estack_end[-2];
14852 continue;
14853 }
14854 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14855 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14856 if (ops->stack(data, "IRQ") < 0)
14857 break;
14858 - bp = ops->walk_stack(tinfo, stack, bp,
14859 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14860 ops, data, irq_stack_end, &graph);
14861 /*
14862 * We link to the next stack (which would be
14863 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14864 /*
14865 * This handles the process stack:
14866 */
14867 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14868 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14869 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14870 +out:
14871 put_cpu();
14872 }
14873 EXPORT_SYMBOL(dump_trace);
14874 @@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14875
14876 return ud2 == 0x0b0f;
14877 }
14878 +
14879 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14880 +void pax_check_alloca(unsigned long size)
14881 +{
14882 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14883 + unsigned cpu, used;
14884 + char *id;
14885 +
14886 + /* check the process stack first */
14887 + stack_start = (unsigned long)task_stack_page(current);
14888 + stack_end = stack_start + THREAD_SIZE;
14889 + if (likely(stack_start <= sp && sp < stack_end)) {
14890 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14891 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14892 + return;
14893 + }
14894 +
14895 + cpu = get_cpu();
14896 +
14897 + /* check the irq stacks */
14898 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14899 + stack_start = stack_end - IRQ_STACK_SIZE;
14900 + if (stack_start <= sp && sp < stack_end) {
14901 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14902 + put_cpu();
14903 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14904 + return;
14905 + }
14906 +
14907 + /* check the exception stacks */
14908 + used = 0;
14909 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14910 + stack_start = stack_end - EXCEPTION_STKSZ;
14911 + if (stack_end && stack_start <= sp && sp < stack_end) {
14912 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14913 + put_cpu();
14914 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14915 + return;
14916 + }
14917 +
14918 + put_cpu();
14919 +
14920 + /* unknown stack */
14921 + BUG();
14922 +}
14923 +EXPORT_SYMBOL(pax_check_alloca);
14924 +#endif
14925 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14926 index 9b9f18b..9fcaa04 100644
14927 --- a/arch/x86/kernel/early_printk.c
14928 +++ b/arch/x86/kernel/early_printk.c
14929 @@ -7,6 +7,7 @@
14930 #include <linux/pci_regs.h>
14931 #include <linux/pci_ids.h>
14932 #include <linux/errno.h>
14933 +#include <linux/sched.h>
14934 #include <asm/io.h>
14935 #include <asm/processor.h>
14936 #include <asm/fcntl.h>
14937 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14938 index 623f288..8bdd78a 100644
14939 --- a/arch/x86/kernel/entry_32.S
14940 +++ b/arch/x86/kernel/entry_32.S
14941 @@ -176,13 +176,153 @@
14942 /*CFI_REL_OFFSET gs, PT_GS*/
14943 .endm
14944 .macro SET_KERNEL_GS reg
14945 +
14946 +#ifdef CONFIG_CC_STACKPROTECTOR
14947 movl $(__KERNEL_STACK_CANARY), \reg
14948 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14949 + movl $(__USER_DS), \reg
14950 +#else
14951 + xorl \reg, \reg
14952 +#endif
14953 +
14954 movl \reg, %gs
14955 .endm
14956
14957 #endif /* CONFIG_X86_32_LAZY_GS */
14958
14959 -.macro SAVE_ALL
14960 +.macro pax_enter_kernel
14961 +#ifdef CONFIG_PAX_KERNEXEC
14962 + call pax_enter_kernel
14963 +#endif
14964 +.endm
14965 +
14966 +.macro pax_exit_kernel
14967 +#ifdef CONFIG_PAX_KERNEXEC
14968 + call pax_exit_kernel
14969 +#endif
14970 +.endm
14971 +
14972 +#ifdef CONFIG_PAX_KERNEXEC
14973 +ENTRY(pax_enter_kernel)
14974 +#ifdef CONFIG_PARAVIRT
14975 + pushl %eax
14976 + pushl %ecx
14977 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14978 + mov %eax, %esi
14979 +#else
14980 + mov %cr0, %esi
14981 +#endif
14982 + bts $16, %esi
14983 + jnc 1f
14984 + mov %cs, %esi
14985 + cmp $__KERNEL_CS, %esi
14986 + jz 3f
14987 + ljmp $__KERNEL_CS, $3f
14988 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14989 +2:
14990 +#ifdef CONFIG_PARAVIRT
14991 + mov %esi, %eax
14992 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14993 +#else
14994 + mov %esi, %cr0
14995 +#endif
14996 +3:
14997 +#ifdef CONFIG_PARAVIRT
14998 + popl %ecx
14999 + popl %eax
15000 +#endif
15001 + ret
15002 +ENDPROC(pax_enter_kernel)
15003 +
15004 +ENTRY(pax_exit_kernel)
15005 +#ifdef CONFIG_PARAVIRT
15006 + pushl %eax
15007 + pushl %ecx
15008 +#endif
15009 + mov %cs, %esi
15010 + cmp $__KERNEXEC_KERNEL_CS, %esi
15011 + jnz 2f
15012 +#ifdef CONFIG_PARAVIRT
15013 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15014 + mov %eax, %esi
15015 +#else
15016 + mov %cr0, %esi
15017 +#endif
15018 + btr $16, %esi
15019 + ljmp $__KERNEL_CS, $1f
15020 +1:
15021 +#ifdef CONFIG_PARAVIRT
15022 + mov %esi, %eax
15023 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15024 +#else
15025 + mov %esi, %cr0
15026 +#endif
15027 +2:
15028 +#ifdef CONFIG_PARAVIRT
15029 + popl %ecx
15030 + popl %eax
15031 +#endif
15032 + ret
15033 +ENDPROC(pax_exit_kernel)
15034 +#endif
15035 +
15036 +.macro pax_erase_kstack
15037 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15038 + call pax_erase_kstack
15039 +#endif
15040 +.endm
15041 +
15042 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15043 +/*
15044 + * ebp: thread_info
15045 + */
15046 +ENTRY(pax_erase_kstack)
15047 + pushl %edi
15048 + pushl %ecx
15049 + pushl %eax
15050 +
15051 + mov TI_lowest_stack(%ebp), %edi
15052 + mov $-0xBEEF, %eax
15053 + std
15054 +
15055 +1: mov %edi, %ecx
15056 + and $THREAD_SIZE_asm - 1, %ecx
15057 + shr $2, %ecx
15058 + repne scasl
15059 + jecxz 2f
15060 +
15061 + cmp $2*16, %ecx
15062 + jc 2f
15063 +
15064 + mov $2*16, %ecx
15065 + repe scasl
15066 + jecxz 2f
15067 + jne 1b
15068 +
15069 +2: cld
15070 + mov %esp, %ecx
15071 + sub %edi, %ecx
15072 +
15073 + cmp $THREAD_SIZE_asm, %ecx
15074 + jb 3f
15075 + ud2
15076 +3:
15077 +
15078 + shr $2, %ecx
15079 + rep stosl
15080 +
15081 + mov TI_task_thread_sp0(%ebp), %edi
15082 + sub $128, %edi
15083 + mov %edi, TI_lowest_stack(%ebp)
15084 +
15085 + popl %eax
15086 + popl %ecx
15087 + popl %edi
15088 + ret
15089 +ENDPROC(pax_erase_kstack)
15090 +#endif
15091 +
15092 +.macro __SAVE_ALL _DS
15093 cld
15094 PUSH_GS
15095 pushl_cfi %fs
15096 @@ -205,7 +345,7 @@
15097 CFI_REL_OFFSET ecx, 0
15098 pushl_cfi %ebx
15099 CFI_REL_OFFSET ebx, 0
15100 - movl $(__USER_DS), %edx
15101 + movl $\_DS, %edx
15102 movl %edx, %ds
15103 movl %edx, %es
15104 movl $(__KERNEL_PERCPU), %edx
15105 @@ -213,6 +353,15 @@
15106 SET_KERNEL_GS %edx
15107 .endm
15108
15109 +.macro SAVE_ALL
15110 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15111 + __SAVE_ALL __KERNEL_DS
15112 + pax_enter_kernel
15113 +#else
15114 + __SAVE_ALL __USER_DS
15115 +#endif
15116 +.endm
15117 +
15118 .macro RESTORE_INT_REGS
15119 popl_cfi %ebx
15120 CFI_RESTORE ebx
15121 @@ -296,7 +445,7 @@ ENTRY(ret_from_fork)
15122 popfl_cfi
15123 jmp syscall_exit
15124 CFI_ENDPROC
15125 -END(ret_from_fork)
15126 +ENDPROC(ret_from_fork)
15127
15128 /*
15129 * Interrupt exit functions should be protected against kprobes
15130 @@ -329,7 +478,15 @@ ret_from_intr:
15131 andl $SEGMENT_RPL_MASK, %eax
15132 #endif
15133 cmpl $USER_RPL, %eax
15134 +
15135 +#ifdef CONFIG_PAX_KERNEXEC
15136 + jae resume_userspace
15137 +
15138 + pax_exit_kernel
15139 + jmp resume_kernel
15140 +#else
15141 jb resume_kernel # not returning to v8086 or userspace
15142 +#endif
15143
15144 ENTRY(resume_userspace)
15145 LOCKDEP_SYS_EXIT
15146 @@ -341,8 +498,8 @@ ENTRY(resume_userspace)
15147 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15148 # int/exception return?
15149 jne work_pending
15150 - jmp restore_all
15151 -END(ret_from_exception)
15152 + jmp restore_all_pax
15153 +ENDPROC(ret_from_exception)
15154
15155 #ifdef CONFIG_PREEMPT
15156 ENTRY(resume_kernel)
15157 @@ -357,7 +514,7 @@ need_resched:
15158 jz restore_all
15159 call preempt_schedule_irq
15160 jmp need_resched
15161 -END(resume_kernel)
15162 +ENDPROC(resume_kernel)
15163 #endif
15164 CFI_ENDPROC
15165 /*
15166 @@ -391,28 +548,43 @@ sysenter_past_esp:
15167 /*CFI_REL_OFFSET cs, 0*/
15168 /*
15169 * Push current_thread_info()->sysenter_return to the stack.
15170 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15171 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15172 */
15173 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15174 + pushl_cfi $0
15175 CFI_REL_OFFSET eip, 0
15176
15177 pushl_cfi %eax
15178 SAVE_ALL
15179 + GET_THREAD_INFO(%ebp)
15180 + movl TI_sysenter_return(%ebp),%ebp
15181 + movl %ebp,PT_EIP(%esp)
15182 ENABLE_INTERRUPTS(CLBR_NONE)
15183
15184 /*
15185 * Load the potential sixth argument from user stack.
15186 * Careful about security.
15187 */
15188 + movl PT_OLDESP(%esp),%ebp
15189 +
15190 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15191 + mov PT_OLDSS(%esp),%ds
15192 +1: movl %ds:(%ebp),%ebp
15193 + push %ss
15194 + pop %ds
15195 +#else
15196 cmpl $__PAGE_OFFSET-3,%ebp
15197 jae syscall_fault
15198 1: movl (%ebp),%ebp
15199 +#endif
15200 +
15201 movl %ebp,PT_EBP(%esp)
15202 _ASM_EXTABLE(1b,syscall_fault)
15203
15204 GET_THREAD_INFO(%ebp)
15205
15206 +#ifdef CONFIG_PAX_RANDKSTACK
15207 + pax_erase_kstack
15208 +#endif
15209 +
15210 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
15211 jnz sysenter_audit
15212 sysenter_do_call:
15213 @@ -427,12 +599,24 @@ sysenter_do_call:
15214 testl $_TIF_ALLWORK_MASK, %ecx
15215 jne sysexit_audit
15216 sysenter_exit:
15217 +
15218 +#ifdef CONFIG_PAX_RANDKSTACK
15219 + pushl_cfi %eax
15220 + movl %esp, %eax
15221 + call pax_randomize_kstack
15222 + popl_cfi %eax
15223 +#endif
15224 +
15225 + pax_erase_kstack
15226 +
15227 /* if something modifies registers it must also disable sysexit */
15228 movl PT_EIP(%esp), %edx
15229 movl PT_OLDESP(%esp), %ecx
15230 xorl %ebp,%ebp
15231 TRACE_IRQS_ON
15232 1: mov PT_FS(%esp), %fs
15233 +2: mov PT_DS(%esp), %ds
15234 +3: mov PT_ES(%esp), %es
15235 PTGS_TO_GS
15236 ENABLE_INTERRUPTS_SYSEXIT
15237
15238 @@ -449,6 +633,9 @@ sysenter_audit:
15239 movl %eax,%edx /* 2nd arg: syscall number */
15240 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15241 call __audit_syscall_entry
15242 +
15243 + pax_erase_kstack
15244 +
15245 pushl_cfi %ebx
15246 movl PT_EAX(%esp),%eax /* reload syscall number */
15247 jmp sysenter_do_call
15248 @@ -474,10 +661,16 @@ sysexit_audit:
15249
15250 CFI_ENDPROC
15251 .pushsection .fixup,"ax"
15252 -2: movl $0,PT_FS(%esp)
15253 +4: movl $0,PT_FS(%esp)
15254 + jmp 1b
15255 +5: movl $0,PT_DS(%esp)
15256 + jmp 1b
15257 +6: movl $0,PT_ES(%esp)
15258 jmp 1b
15259 .popsection
15260 - _ASM_EXTABLE(1b,2b)
15261 + _ASM_EXTABLE(1b,4b)
15262 + _ASM_EXTABLE(2b,5b)
15263 + _ASM_EXTABLE(3b,6b)
15264 PTGS_TO_GS_EX
15265 ENDPROC(ia32_sysenter_target)
15266
15267 @@ -491,6 +684,11 @@ ENTRY(system_call)
15268 pushl_cfi %eax # save orig_eax
15269 SAVE_ALL
15270 GET_THREAD_INFO(%ebp)
15271 +
15272 +#ifdef CONFIG_PAX_RANDKSTACK
15273 + pax_erase_kstack
15274 +#endif
15275 +
15276 # system call tracing in operation / emulation
15277 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
15278 jnz syscall_trace_entry
15279 @@ -509,6 +707,15 @@ syscall_exit:
15280 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15281 jne syscall_exit_work
15282
15283 +restore_all_pax:
15284 +
15285 +#ifdef CONFIG_PAX_RANDKSTACK
15286 + movl %esp, %eax
15287 + call pax_randomize_kstack
15288 +#endif
15289 +
15290 + pax_erase_kstack
15291 +
15292 restore_all:
15293 TRACE_IRQS_IRET
15294 restore_all_notrace:
15295 @@ -565,14 +772,34 @@ ldt_ss:
15296 * compensating for the offset by changing to the ESPFIX segment with
15297 * a base address that matches for the difference.
15298 */
15299 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15300 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15301 mov %esp, %edx /* load kernel esp */
15302 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15303 mov %dx, %ax /* eax: new kernel esp */
15304 sub %eax, %edx /* offset (low word is 0) */
15305 +#ifdef CONFIG_SMP
15306 + movl PER_CPU_VAR(cpu_number), %ebx
15307 + shll $PAGE_SHIFT_asm, %ebx
15308 + addl $cpu_gdt_table, %ebx
15309 +#else
15310 + movl $cpu_gdt_table, %ebx
15311 +#endif
15312 shr $16, %edx
15313 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15314 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15315 +
15316 +#ifdef CONFIG_PAX_KERNEXEC
15317 + mov %cr0, %esi
15318 + btr $16, %esi
15319 + mov %esi, %cr0
15320 +#endif
15321 +
15322 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15323 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15324 +
15325 +#ifdef CONFIG_PAX_KERNEXEC
15326 + bts $16, %esi
15327 + mov %esi, %cr0
15328 +#endif
15329 +
15330 pushl_cfi $__ESPFIX_SS
15331 pushl_cfi %eax /* new kernel esp */
15332 /* Disable interrupts, but do not irqtrace this section: we
15333 @@ -601,35 +828,23 @@ work_resched:
15334 movl TI_flags(%ebp), %ecx
15335 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15336 # than syscall tracing?
15337 - jz restore_all
15338 + jz restore_all_pax
15339 testb $_TIF_NEED_RESCHED, %cl
15340 jnz work_resched
15341
15342 work_notifysig: # deal with pending signals and
15343 # notify-resume requests
15344 + movl %esp, %eax
15345 #ifdef CONFIG_VM86
15346 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15347 - movl %esp, %eax
15348 - jne work_notifysig_v86 # returning to kernel-space or
15349 + jz 1f # returning to kernel-space or
15350 # vm86-space
15351 - TRACE_IRQS_ON
15352 - ENABLE_INTERRUPTS(CLBR_NONE)
15353 - movb PT_CS(%esp), %bl
15354 - andb $SEGMENT_RPL_MASK, %bl
15355 - cmpb $USER_RPL, %bl
15356 - jb resume_kernel
15357 - xorl %edx, %edx
15358 - call do_notify_resume
15359 - jmp resume_userspace
15360
15361 - ALIGN
15362 -work_notifysig_v86:
15363 pushl_cfi %ecx # save ti_flags for do_notify_resume
15364 call save_v86_state # %eax contains pt_regs pointer
15365 popl_cfi %ecx
15366 movl %eax, %esp
15367 -#else
15368 - movl %esp, %eax
15369 +1:
15370 #endif
15371 TRACE_IRQS_ON
15372 ENABLE_INTERRUPTS(CLBR_NONE)
15373 @@ -640,7 +855,7 @@ work_notifysig_v86:
15374 xorl %edx, %edx
15375 call do_notify_resume
15376 jmp resume_userspace
15377 -END(work_pending)
15378 +ENDPROC(work_pending)
15379
15380 # perform syscall exit tracing
15381 ALIGN
15382 @@ -648,11 +863,14 @@ syscall_trace_entry:
15383 movl $-ENOSYS,PT_EAX(%esp)
15384 movl %esp, %eax
15385 call syscall_trace_enter
15386 +
15387 + pax_erase_kstack
15388 +
15389 /* What it returned is what we'll actually use. */
15390 cmpl $(NR_syscalls), %eax
15391 jnae syscall_call
15392 jmp syscall_exit
15393 -END(syscall_trace_entry)
15394 +ENDPROC(syscall_trace_entry)
15395
15396 # perform syscall exit tracing
15397 ALIGN
15398 @@ -665,20 +883,24 @@ syscall_exit_work:
15399 movl %esp, %eax
15400 call syscall_trace_leave
15401 jmp resume_userspace
15402 -END(syscall_exit_work)
15403 +ENDPROC(syscall_exit_work)
15404 CFI_ENDPROC
15405
15406 RING0_INT_FRAME # can't unwind into user space anyway
15407 syscall_fault:
15408 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15409 + push %ss
15410 + pop %ds
15411 +#endif
15412 GET_THREAD_INFO(%ebp)
15413 movl $-EFAULT,PT_EAX(%esp)
15414 jmp resume_userspace
15415 -END(syscall_fault)
15416 +ENDPROC(syscall_fault)
15417
15418 syscall_badsys:
15419 movl $-ENOSYS,PT_EAX(%esp)
15420 jmp resume_userspace
15421 -END(syscall_badsys)
15422 +ENDPROC(syscall_badsys)
15423 CFI_ENDPROC
15424 /*
15425 * End of kprobes section
15426 @@ -750,6 +972,36 @@ ENTRY(ptregs_clone)
15427 CFI_ENDPROC
15428 ENDPROC(ptregs_clone)
15429
15430 + ALIGN;
15431 +ENTRY(kernel_execve)
15432 + CFI_STARTPROC
15433 + pushl_cfi %ebp
15434 + sub $PT_OLDSS+4,%esp
15435 + pushl_cfi %edi
15436 + pushl_cfi %ecx
15437 + pushl_cfi %eax
15438 + lea 3*4(%esp),%edi
15439 + mov $PT_OLDSS/4+1,%ecx
15440 + xorl %eax,%eax
15441 + rep stosl
15442 + popl_cfi %eax
15443 + popl_cfi %ecx
15444 + popl_cfi %edi
15445 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15446 + pushl_cfi %esp
15447 + call sys_execve
15448 + add $4,%esp
15449 + CFI_ADJUST_CFA_OFFSET -4
15450 + GET_THREAD_INFO(%ebp)
15451 + test %eax,%eax
15452 + jz syscall_exit
15453 + add $PT_OLDSS+4,%esp
15454 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15455 + popl_cfi %ebp
15456 + ret
15457 + CFI_ENDPROC
15458 +ENDPROC(kernel_execve)
15459 +
15460 .macro FIXUP_ESPFIX_STACK
15461 /*
15462 * Switch back for ESPFIX stack to the normal zerobased stack
15463 @@ -759,8 +1011,15 @@ ENDPROC(ptregs_clone)
15464 * normal stack and adjusts ESP with the matching offset.
15465 */
15466 /* fixup the stack */
15467 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15468 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15469 +#ifdef CONFIG_SMP
15470 + movl PER_CPU_VAR(cpu_number), %ebx
15471 + shll $PAGE_SHIFT_asm, %ebx
15472 + addl $cpu_gdt_table, %ebx
15473 +#else
15474 + movl $cpu_gdt_table, %ebx
15475 +#endif
15476 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15477 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15478 shl $16, %eax
15479 addl %esp, %eax /* the adjusted stack pointer */
15480 pushl_cfi $__KERNEL_DS
15481 @@ -813,7 +1072,7 @@ vector=vector+1
15482 .endr
15483 2: jmp common_interrupt
15484 .endr
15485 -END(irq_entries_start)
15486 +ENDPROC(irq_entries_start)
15487
15488 .previous
15489 END(interrupt)
15490 @@ -861,7 +1120,7 @@ ENTRY(coprocessor_error)
15491 pushl_cfi $do_coprocessor_error
15492 jmp error_code
15493 CFI_ENDPROC
15494 -END(coprocessor_error)
15495 +ENDPROC(coprocessor_error)
15496
15497 ENTRY(simd_coprocessor_error)
15498 RING0_INT_FRAME
15499 @@ -882,7 +1141,7 @@ ENTRY(simd_coprocessor_error)
15500 #endif
15501 jmp error_code
15502 CFI_ENDPROC
15503 -END(simd_coprocessor_error)
15504 +ENDPROC(simd_coprocessor_error)
15505
15506 ENTRY(device_not_available)
15507 RING0_INT_FRAME
15508 @@ -890,18 +1149,18 @@ ENTRY(device_not_available)
15509 pushl_cfi $do_device_not_available
15510 jmp error_code
15511 CFI_ENDPROC
15512 -END(device_not_available)
15513 +ENDPROC(device_not_available)
15514
15515 #ifdef CONFIG_PARAVIRT
15516 ENTRY(native_iret)
15517 iret
15518 _ASM_EXTABLE(native_iret, iret_exc)
15519 -END(native_iret)
15520 +ENDPROC(native_iret)
15521
15522 ENTRY(native_irq_enable_sysexit)
15523 sti
15524 sysexit
15525 -END(native_irq_enable_sysexit)
15526 +ENDPROC(native_irq_enable_sysexit)
15527 #endif
15528
15529 ENTRY(overflow)
15530 @@ -910,7 +1169,7 @@ ENTRY(overflow)
15531 pushl_cfi $do_overflow
15532 jmp error_code
15533 CFI_ENDPROC
15534 -END(overflow)
15535 +ENDPROC(overflow)
15536
15537 ENTRY(bounds)
15538 RING0_INT_FRAME
15539 @@ -918,7 +1177,7 @@ ENTRY(bounds)
15540 pushl_cfi $do_bounds
15541 jmp error_code
15542 CFI_ENDPROC
15543 -END(bounds)
15544 +ENDPROC(bounds)
15545
15546 ENTRY(invalid_op)
15547 RING0_INT_FRAME
15548 @@ -926,7 +1185,7 @@ ENTRY(invalid_op)
15549 pushl_cfi $do_invalid_op
15550 jmp error_code
15551 CFI_ENDPROC
15552 -END(invalid_op)
15553 +ENDPROC(invalid_op)
15554
15555 ENTRY(coprocessor_segment_overrun)
15556 RING0_INT_FRAME
15557 @@ -934,35 +1193,35 @@ ENTRY(coprocessor_segment_overrun)
15558 pushl_cfi $do_coprocessor_segment_overrun
15559 jmp error_code
15560 CFI_ENDPROC
15561 -END(coprocessor_segment_overrun)
15562 +ENDPROC(coprocessor_segment_overrun)
15563
15564 ENTRY(invalid_TSS)
15565 RING0_EC_FRAME
15566 pushl_cfi $do_invalid_TSS
15567 jmp error_code
15568 CFI_ENDPROC
15569 -END(invalid_TSS)
15570 +ENDPROC(invalid_TSS)
15571
15572 ENTRY(segment_not_present)
15573 RING0_EC_FRAME
15574 pushl_cfi $do_segment_not_present
15575 jmp error_code
15576 CFI_ENDPROC
15577 -END(segment_not_present)
15578 +ENDPROC(segment_not_present)
15579
15580 ENTRY(stack_segment)
15581 RING0_EC_FRAME
15582 pushl_cfi $do_stack_segment
15583 jmp error_code
15584 CFI_ENDPROC
15585 -END(stack_segment)
15586 +ENDPROC(stack_segment)
15587
15588 ENTRY(alignment_check)
15589 RING0_EC_FRAME
15590 pushl_cfi $do_alignment_check
15591 jmp error_code
15592 CFI_ENDPROC
15593 -END(alignment_check)
15594 +ENDPROC(alignment_check)
15595
15596 ENTRY(divide_error)
15597 RING0_INT_FRAME
15598 @@ -970,7 +1229,7 @@ ENTRY(divide_error)
15599 pushl_cfi $do_divide_error
15600 jmp error_code
15601 CFI_ENDPROC
15602 -END(divide_error)
15603 +ENDPROC(divide_error)
15604
15605 #ifdef CONFIG_X86_MCE
15606 ENTRY(machine_check)
15607 @@ -979,7 +1238,7 @@ ENTRY(machine_check)
15608 pushl_cfi machine_check_vector
15609 jmp error_code
15610 CFI_ENDPROC
15611 -END(machine_check)
15612 +ENDPROC(machine_check)
15613 #endif
15614
15615 ENTRY(spurious_interrupt_bug)
15616 @@ -988,7 +1247,7 @@ ENTRY(spurious_interrupt_bug)
15617 pushl_cfi $do_spurious_interrupt_bug
15618 jmp error_code
15619 CFI_ENDPROC
15620 -END(spurious_interrupt_bug)
15621 +ENDPROC(spurious_interrupt_bug)
15622 /*
15623 * End of kprobes section
15624 */
15625 @@ -1100,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15626
15627 ENTRY(mcount)
15628 ret
15629 -END(mcount)
15630 +ENDPROC(mcount)
15631
15632 ENTRY(ftrace_caller)
15633 cmpl $0, function_trace_stop
15634 @@ -1129,7 +1388,7 @@ ftrace_graph_call:
15635 .globl ftrace_stub
15636 ftrace_stub:
15637 ret
15638 -END(ftrace_caller)
15639 +ENDPROC(ftrace_caller)
15640
15641 #else /* ! CONFIG_DYNAMIC_FTRACE */
15642
15643 @@ -1165,7 +1424,7 @@ trace:
15644 popl %ecx
15645 popl %eax
15646 jmp ftrace_stub
15647 -END(mcount)
15648 +ENDPROC(mcount)
15649 #endif /* CONFIG_DYNAMIC_FTRACE */
15650 #endif /* CONFIG_FUNCTION_TRACER */
15651
15652 @@ -1186,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15653 popl %ecx
15654 popl %eax
15655 ret
15656 -END(ftrace_graph_caller)
15657 +ENDPROC(ftrace_graph_caller)
15658
15659 .globl return_to_handler
15660 return_to_handler:
15661 @@ -1241,15 +1500,18 @@ error_code:
15662 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15663 REG_TO_PTGS %ecx
15664 SET_KERNEL_GS %ecx
15665 - movl $(__USER_DS), %ecx
15666 + movl $(__KERNEL_DS), %ecx
15667 movl %ecx, %ds
15668 movl %ecx, %es
15669 +
15670 + pax_enter_kernel
15671 +
15672 TRACE_IRQS_OFF
15673 movl %esp,%eax # pt_regs pointer
15674 call *%edi
15675 jmp ret_from_exception
15676 CFI_ENDPROC
15677 -END(page_fault)
15678 +ENDPROC(page_fault)
15679
15680 /*
15681 * Debug traps and NMI can happen at the one SYSENTER instruction
15682 @@ -1291,7 +1553,7 @@ debug_stack_correct:
15683 call do_debug
15684 jmp ret_from_exception
15685 CFI_ENDPROC
15686 -END(debug)
15687 +ENDPROC(debug)
15688
15689 /*
15690 * NMI is doubly nasty. It can happen _while_ we're handling
15691 @@ -1328,6 +1590,9 @@ nmi_stack_correct:
15692 xorl %edx,%edx # zero error code
15693 movl %esp,%eax # pt_regs pointer
15694 call do_nmi
15695 +
15696 + pax_exit_kernel
15697 +
15698 jmp restore_all_notrace
15699 CFI_ENDPROC
15700
15701 @@ -1364,12 +1629,15 @@ nmi_espfix_stack:
15702 FIXUP_ESPFIX_STACK # %eax == %esp
15703 xorl %edx,%edx # zero error code
15704 call do_nmi
15705 +
15706 + pax_exit_kernel
15707 +
15708 RESTORE_REGS
15709 lss 12+4(%esp), %esp # back to espfix stack
15710 CFI_ADJUST_CFA_OFFSET -24
15711 jmp irq_return
15712 CFI_ENDPROC
15713 -END(nmi)
15714 +ENDPROC(nmi)
15715
15716 ENTRY(int3)
15717 RING0_INT_FRAME
15718 @@ -1381,14 +1649,14 @@ ENTRY(int3)
15719 call do_int3
15720 jmp ret_from_exception
15721 CFI_ENDPROC
15722 -END(int3)
15723 +ENDPROC(int3)
15724
15725 ENTRY(general_protection)
15726 RING0_EC_FRAME
15727 pushl_cfi $do_general_protection
15728 jmp error_code
15729 CFI_ENDPROC
15730 -END(general_protection)
15731 +ENDPROC(general_protection)
15732
15733 #ifdef CONFIG_KVM_GUEST
15734 ENTRY(async_page_fault)
15735 @@ -1396,7 +1664,7 @@ ENTRY(async_page_fault)
15736 pushl_cfi $do_async_page_fault
15737 jmp error_code
15738 CFI_ENDPROC
15739 -END(async_page_fault)
15740 +ENDPROC(async_page_fault)
15741 #endif
15742
15743 /*
15744 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15745 index 7d65133..c888d5f 100644
15746 --- a/arch/x86/kernel/entry_64.S
15747 +++ b/arch/x86/kernel/entry_64.S
15748 @@ -57,6 +57,8 @@
15749 #include <asm/percpu.h>
15750 #include <asm/asm.h>
15751 #include <linux/err.h>
15752 +#include <asm/pgtable.h>
15753 +#include <asm/alternative-asm.h>
15754
15755 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15756 #include <linux/elf-em.h>
15757 @@ -70,8 +72,9 @@
15758 #ifdef CONFIG_FUNCTION_TRACER
15759 #ifdef CONFIG_DYNAMIC_FTRACE
15760 ENTRY(mcount)
15761 + pax_force_retaddr
15762 retq
15763 -END(mcount)
15764 +ENDPROC(mcount)
15765
15766 ENTRY(ftrace_caller)
15767 cmpl $0, function_trace_stop
15768 @@ -94,8 +97,9 @@ GLOBAL(ftrace_graph_call)
15769 #endif
15770
15771 GLOBAL(ftrace_stub)
15772 + pax_force_retaddr
15773 retq
15774 -END(ftrace_caller)
15775 +ENDPROC(ftrace_caller)
15776
15777 #else /* ! CONFIG_DYNAMIC_FTRACE */
15778 ENTRY(mcount)
15779 @@ -114,6 +118,7 @@ ENTRY(mcount)
15780 #endif
15781
15782 GLOBAL(ftrace_stub)
15783 + pax_force_retaddr
15784 retq
15785
15786 trace:
15787 @@ -123,12 +128,13 @@ trace:
15788 movq 8(%rbp), %rsi
15789 subq $MCOUNT_INSN_SIZE, %rdi
15790
15791 + pax_force_fptr ftrace_trace_function
15792 call *ftrace_trace_function
15793
15794 MCOUNT_RESTORE_FRAME
15795
15796 jmp ftrace_stub
15797 -END(mcount)
15798 +ENDPROC(mcount)
15799 #endif /* CONFIG_DYNAMIC_FTRACE */
15800 #endif /* CONFIG_FUNCTION_TRACER */
15801
15802 @@ -148,8 +154,9 @@ ENTRY(ftrace_graph_caller)
15803
15804 MCOUNT_RESTORE_FRAME
15805
15806 + pax_force_retaddr
15807 retq
15808 -END(ftrace_graph_caller)
15809 +ENDPROC(ftrace_graph_caller)
15810
15811 GLOBAL(return_to_handler)
15812 subq $24, %rsp
15813 @@ -165,6 +172,7 @@ GLOBAL(return_to_handler)
15814 movq 8(%rsp), %rdx
15815 movq (%rsp), %rax
15816 addq $24, %rsp
15817 + pax_force_fptr %rdi
15818 jmp *%rdi
15819 #endif
15820
15821 @@ -180,6 +188,280 @@ ENTRY(native_usergs_sysret64)
15822 ENDPROC(native_usergs_sysret64)
15823 #endif /* CONFIG_PARAVIRT */
15824
15825 + .macro ljmpq sel, off
15826 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15827 + .byte 0x48; ljmp *1234f(%rip)
15828 + .pushsection .rodata
15829 + .align 16
15830 + 1234: .quad \off; .word \sel
15831 + .popsection
15832 +#else
15833 + pushq $\sel
15834 + pushq $\off
15835 + lretq
15836 +#endif
15837 + .endm
15838 +
15839 + .macro pax_enter_kernel
15840 + pax_set_fptr_mask
15841 +#ifdef CONFIG_PAX_KERNEXEC
15842 + call pax_enter_kernel
15843 +#endif
15844 + .endm
15845 +
15846 + .macro pax_exit_kernel
15847 +#ifdef CONFIG_PAX_KERNEXEC
15848 + call pax_exit_kernel
15849 +#endif
15850 + .endm
15851 +
15852 +#ifdef CONFIG_PAX_KERNEXEC
15853 +ENTRY(pax_enter_kernel)
15854 + pushq %rdi
15855 +
15856 +#ifdef CONFIG_PARAVIRT
15857 + PV_SAVE_REGS(CLBR_RDI)
15858 +#endif
15859 +
15860 + GET_CR0_INTO_RDI
15861 + bts $16,%rdi
15862 + jnc 3f
15863 + mov %cs,%edi
15864 + cmp $__KERNEL_CS,%edi
15865 + jnz 2f
15866 +1:
15867 +
15868 +#ifdef CONFIG_PARAVIRT
15869 + PV_RESTORE_REGS(CLBR_RDI)
15870 +#endif
15871 +
15872 + popq %rdi
15873 + pax_force_retaddr
15874 + retq
15875 +
15876 +2: ljmpq __KERNEL_CS,1f
15877 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15878 +4: SET_RDI_INTO_CR0
15879 + jmp 1b
15880 +ENDPROC(pax_enter_kernel)
15881 +
15882 +ENTRY(pax_exit_kernel)
15883 + pushq %rdi
15884 +
15885 +#ifdef CONFIG_PARAVIRT
15886 + PV_SAVE_REGS(CLBR_RDI)
15887 +#endif
15888 +
15889 + mov %cs,%rdi
15890 + cmp $__KERNEXEC_KERNEL_CS,%edi
15891 + jz 2f
15892 +1:
15893 +
15894 +#ifdef CONFIG_PARAVIRT
15895 + PV_RESTORE_REGS(CLBR_RDI);
15896 +#endif
15897 +
15898 + popq %rdi
15899 + pax_force_retaddr
15900 + retq
15901 +
15902 +2: GET_CR0_INTO_RDI
15903 + btr $16,%rdi
15904 + ljmpq __KERNEL_CS,3f
15905 +3: SET_RDI_INTO_CR0
15906 + jmp 1b
15907 +#ifdef CONFIG_PARAVIRT
15908 + PV_RESTORE_REGS(CLBR_RDI);
15909 +#endif
15910 +
15911 + popq %rdi
15912 + pax_force_retaddr
15913 + retq
15914 +ENDPROC(pax_exit_kernel)
15915 +#endif
15916 +
15917 + .macro pax_enter_kernel_user
15918 + pax_set_fptr_mask
15919 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15920 + call pax_enter_kernel_user
15921 +#endif
15922 + .endm
15923 +
15924 + .macro pax_exit_kernel_user
15925 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15926 + call pax_exit_kernel_user
15927 +#endif
15928 +#ifdef CONFIG_PAX_RANDKSTACK
15929 + pushq %rax
15930 + call pax_randomize_kstack
15931 + popq %rax
15932 +#endif
15933 + .endm
15934 +
15935 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15936 +ENTRY(pax_enter_kernel_user)
15937 + pushq %rdi
15938 + pushq %rbx
15939 +
15940 +#ifdef CONFIG_PARAVIRT
15941 + PV_SAVE_REGS(CLBR_RDI)
15942 +#endif
15943 +
15944 + GET_CR3_INTO_RDI
15945 + mov %rdi,%rbx
15946 + add $__START_KERNEL_map,%rbx
15947 + sub phys_base(%rip),%rbx
15948 +
15949 +#ifdef CONFIG_PARAVIRT
15950 + pushq %rdi
15951 + cmpl $0, pv_info+PARAVIRT_enabled
15952 + jz 1f
15953 + i = 0
15954 + .rept USER_PGD_PTRS
15955 + mov i*8(%rbx),%rsi
15956 + mov $0,%sil
15957 + lea i*8(%rbx),%rdi
15958 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15959 + i = i + 1
15960 + .endr
15961 + jmp 2f
15962 +1:
15963 +#endif
15964 +
15965 + i = 0
15966 + .rept USER_PGD_PTRS
15967 + movb $0,i*8(%rbx)
15968 + i = i + 1
15969 + .endr
15970 +
15971 +#ifdef CONFIG_PARAVIRT
15972 +2: popq %rdi
15973 +#endif
15974 + SET_RDI_INTO_CR3
15975 +
15976 +#ifdef CONFIG_PAX_KERNEXEC
15977 + GET_CR0_INTO_RDI
15978 + bts $16,%rdi
15979 + SET_RDI_INTO_CR0
15980 +#endif
15981 +
15982 +#ifdef CONFIG_PARAVIRT
15983 + PV_RESTORE_REGS(CLBR_RDI)
15984 +#endif
15985 +
15986 + popq %rbx
15987 + popq %rdi
15988 + pax_force_retaddr
15989 + retq
15990 +ENDPROC(pax_enter_kernel_user)
15991 +
15992 +ENTRY(pax_exit_kernel_user)
15993 + push %rdi
15994 +
15995 +#ifdef CONFIG_PARAVIRT
15996 + pushq %rbx
15997 + PV_SAVE_REGS(CLBR_RDI)
15998 +#endif
15999 +
16000 +#ifdef CONFIG_PAX_KERNEXEC
16001 + GET_CR0_INTO_RDI
16002 + btr $16,%rdi
16003 + SET_RDI_INTO_CR0
16004 +#endif
16005 +
16006 + GET_CR3_INTO_RDI
16007 + add $__START_KERNEL_map,%rdi
16008 + sub phys_base(%rip),%rdi
16009 +
16010 +#ifdef CONFIG_PARAVIRT
16011 + cmpl $0, pv_info+PARAVIRT_enabled
16012 + jz 1f
16013 + mov %rdi,%rbx
16014 + i = 0
16015 + .rept USER_PGD_PTRS
16016 + mov i*8(%rbx),%rsi
16017 + mov $0x67,%sil
16018 + lea i*8(%rbx),%rdi
16019 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16020 + i = i + 1
16021 + .endr
16022 + jmp 2f
16023 +1:
16024 +#endif
16025 +
16026 + i = 0
16027 + .rept USER_PGD_PTRS
16028 + movb $0x67,i*8(%rdi)
16029 + i = i + 1
16030 + .endr
16031 +
16032 +#ifdef CONFIG_PARAVIRT
16033 +2: PV_RESTORE_REGS(CLBR_RDI)
16034 + popq %rbx
16035 +#endif
16036 +
16037 + popq %rdi
16038 + pax_force_retaddr
16039 + retq
16040 +ENDPROC(pax_exit_kernel_user)
16041 +#endif
16042 +
16043 +.macro pax_erase_kstack
16044 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16045 + call pax_erase_kstack
16046 +#endif
16047 +.endm
16048 +
16049 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16050 +ENTRY(pax_erase_kstack)
16051 + pushq %rdi
16052 + pushq %rcx
16053 + pushq %rax
16054 + pushq %r11
16055 +
16056 + GET_THREAD_INFO(%r11)
16057 + mov TI_lowest_stack(%r11), %rdi
16058 + mov $-0xBEEF, %rax
16059 + std
16060 +
16061 +1: mov %edi, %ecx
16062 + and $THREAD_SIZE_asm - 1, %ecx
16063 + shr $3, %ecx
16064 + repne scasq
16065 + jecxz 2f
16066 +
16067 + cmp $2*8, %ecx
16068 + jc 2f
16069 +
16070 + mov $2*8, %ecx
16071 + repe scasq
16072 + jecxz 2f
16073 + jne 1b
16074 +
16075 +2: cld
16076 + mov %esp, %ecx
16077 + sub %edi, %ecx
16078 +
16079 + cmp $THREAD_SIZE_asm, %rcx
16080 + jb 3f
16081 + ud2
16082 +3:
16083 +
16084 + shr $3, %ecx
16085 + rep stosq
16086 +
16087 + mov TI_task_thread_sp0(%r11), %rdi
16088 + sub $256, %rdi
16089 + mov %rdi, TI_lowest_stack(%r11)
16090 +
16091 + popq %r11
16092 + popq %rax
16093 + popq %rcx
16094 + popq %rdi
16095 + pax_force_retaddr
16096 + ret
16097 +ENDPROC(pax_erase_kstack)
16098 +#endif
16099
16100 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16101 #ifdef CONFIG_TRACE_IRQFLAGS
16102 @@ -271,8 +553,8 @@ ENDPROC(native_usergs_sysret64)
16103 .endm
16104
16105 .macro UNFAKE_STACK_FRAME
16106 - addq $8*6, %rsp
16107 - CFI_ADJUST_CFA_OFFSET -(6*8)
16108 + addq $8*6 + ARG_SKIP, %rsp
16109 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16110 .endm
16111
16112 /*
16113 @@ -359,7 +641,7 @@ ENDPROC(native_usergs_sysret64)
16114 movq %rsp, %rsi
16115
16116 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16117 - testl $3, CS-RBP(%rsi)
16118 + testb $3, CS-RBP(%rsi)
16119 je 1f
16120 SWAPGS
16121 /*
16122 @@ -394,9 +676,10 @@ ENTRY(save_rest)
16123 movq_cfi r15, R15+16
16124 movq %r11, 8(%rsp) /* return address */
16125 FIXUP_TOP_OF_STACK %r11, 16
16126 + pax_force_retaddr
16127 ret
16128 CFI_ENDPROC
16129 -END(save_rest)
16130 +ENDPROC(save_rest)
16131
16132 /* save complete stack frame */
16133 .pushsection .kprobes.text, "ax"
16134 @@ -425,9 +708,10 @@ ENTRY(save_paranoid)
16135 js 1f /* negative -> in kernel */
16136 SWAPGS
16137 xorl %ebx,%ebx
16138 -1: ret
16139 +1: pax_force_retaddr_bts
16140 + ret
16141 CFI_ENDPROC
16142 -END(save_paranoid)
16143 +ENDPROC(save_paranoid)
16144 .popsection
16145
16146 /*
16147 @@ -449,7 +733,7 @@ ENTRY(ret_from_fork)
16148
16149 RESTORE_REST
16150
16151 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16152 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16153 jz retint_restore_args
16154
16155 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16156 @@ -459,7 +743,7 @@ ENTRY(ret_from_fork)
16157 jmp ret_from_sys_call # go to the SYSRET fastpath
16158
16159 CFI_ENDPROC
16160 -END(ret_from_fork)
16161 +ENDPROC(ret_from_fork)
16162
16163 /*
16164 * System call entry. Up to 6 arguments in registers are supported.
16165 @@ -495,7 +779,7 @@ END(ret_from_fork)
16166 ENTRY(system_call)
16167 CFI_STARTPROC simple
16168 CFI_SIGNAL_FRAME
16169 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16170 + CFI_DEF_CFA rsp,0
16171 CFI_REGISTER rip,rcx
16172 /*CFI_REGISTER rflags,r11*/
16173 SWAPGS_UNSAFE_STACK
16174 @@ -508,16 +792,23 @@ GLOBAL(system_call_after_swapgs)
16175
16176 movq %rsp,PER_CPU_VAR(old_rsp)
16177 movq PER_CPU_VAR(kernel_stack),%rsp
16178 + SAVE_ARGS 8*6,0
16179 + pax_enter_kernel_user
16180 +
16181 +#ifdef CONFIG_PAX_RANDKSTACK
16182 + pax_erase_kstack
16183 +#endif
16184 +
16185 /*
16186 * No need to follow this irqs off/on section - it's straight
16187 * and short:
16188 */
16189 ENABLE_INTERRUPTS(CLBR_NONE)
16190 - SAVE_ARGS 8,0
16191 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16192 movq %rcx,RIP-ARGOFFSET(%rsp)
16193 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16194 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16195 + GET_THREAD_INFO(%rcx)
16196 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16197 jnz tracesys
16198 system_call_fastpath:
16199 #if __SYSCALL_MASK == ~0
16200 @@ -527,7 +818,7 @@ system_call_fastpath:
16201 cmpl $__NR_syscall_max,%eax
16202 #endif
16203 ja badsys
16204 - movq %r10,%rcx
16205 + movq R10-ARGOFFSET(%rsp),%rcx
16206 call *sys_call_table(,%rax,8) # XXX: rip relative
16207 movq %rax,RAX-ARGOFFSET(%rsp)
16208 /*
16209 @@ -541,10 +832,13 @@ sysret_check:
16210 LOCKDEP_SYS_EXIT
16211 DISABLE_INTERRUPTS(CLBR_NONE)
16212 TRACE_IRQS_OFF
16213 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16214 + GET_THREAD_INFO(%rcx)
16215 + movl TI_flags(%rcx),%edx
16216 andl %edi,%edx
16217 jnz sysret_careful
16218 CFI_REMEMBER_STATE
16219 + pax_exit_kernel_user
16220 + pax_erase_kstack
16221 /*
16222 * sysretq will re-enable interrupts:
16223 */
16224 @@ -596,14 +890,18 @@ badsys:
16225 * jump back to the normal fast path.
16226 */
16227 auditsys:
16228 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16229 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16230 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16231 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16232 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16233 movq %rax,%rsi /* 2nd arg: syscall number */
16234 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16235 call __audit_syscall_entry
16236 +
16237 + pax_erase_kstack
16238 +
16239 LOAD_ARGS 0 /* reload call-clobbered registers */
16240 + pax_set_fptr_mask
16241 jmp system_call_fastpath
16242
16243 /*
16244 @@ -624,7 +922,7 @@ sysret_audit:
16245 /* Do syscall tracing */
16246 tracesys:
16247 #ifdef CONFIG_AUDITSYSCALL
16248 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16249 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16250 jz auditsys
16251 #endif
16252 SAVE_REST
16253 @@ -632,12 +930,16 @@ tracesys:
16254 FIXUP_TOP_OF_STACK %rdi
16255 movq %rsp,%rdi
16256 call syscall_trace_enter
16257 +
16258 + pax_erase_kstack
16259 +
16260 /*
16261 * Reload arg registers from stack in case ptrace changed them.
16262 * We don't reload %rax because syscall_trace_enter() returned
16263 * the value it wants us to use in the table lookup.
16264 */
16265 LOAD_ARGS ARGOFFSET, 1
16266 + pax_set_fptr_mask
16267 RESTORE_REST
16268 #if __SYSCALL_MASK == ~0
16269 cmpq $__NR_syscall_max,%rax
16270 @@ -646,7 +948,7 @@ tracesys:
16271 cmpl $__NR_syscall_max,%eax
16272 #endif
16273 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16274 - movq %r10,%rcx /* fixup for C */
16275 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16276 call *sys_call_table(,%rax,8)
16277 movq %rax,RAX-ARGOFFSET(%rsp)
16278 /* Use IRET because user could have changed frame */
16279 @@ -667,7 +969,9 @@ GLOBAL(int_with_check)
16280 andl %edi,%edx
16281 jnz int_careful
16282 andl $~TS_COMPAT,TI_status(%rcx)
16283 - jmp retint_swapgs
16284 + pax_exit_kernel_user
16285 + pax_erase_kstack
16286 + jmp retint_swapgs_pax
16287
16288 /* Either reschedule or signal or syscall exit tracking needed. */
16289 /* First do a reschedule test. */
16290 @@ -713,7 +1017,7 @@ int_restore_rest:
16291 TRACE_IRQS_OFF
16292 jmp int_with_check
16293 CFI_ENDPROC
16294 -END(system_call)
16295 +ENDPROC(system_call)
16296
16297 /*
16298 * Certain special system calls that need to save a complete full stack frame.
16299 @@ -729,7 +1033,7 @@ ENTRY(\label)
16300 call \func
16301 jmp ptregscall_common
16302 CFI_ENDPROC
16303 -END(\label)
16304 +ENDPROC(\label)
16305 .endm
16306
16307 PTREGSCALL stub_clone, sys_clone, %r8
16308 @@ -747,9 +1051,10 @@ ENTRY(ptregscall_common)
16309 movq_cfi_restore R12+8, r12
16310 movq_cfi_restore RBP+8, rbp
16311 movq_cfi_restore RBX+8, rbx
16312 + pax_force_retaddr
16313 ret $REST_SKIP /* pop extended registers */
16314 CFI_ENDPROC
16315 -END(ptregscall_common)
16316 +ENDPROC(ptregscall_common)
16317
16318 ENTRY(stub_execve)
16319 CFI_STARTPROC
16320 @@ -764,7 +1069,7 @@ ENTRY(stub_execve)
16321 RESTORE_REST
16322 jmp int_ret_from_sys_call
16323 CFI_ENDPROC
16324 -END(stub_execve)
16325 +ENDPROC(stub_execve)
16326
16327 /*
16328 * sigreturn is special because it needs to restore all registers on return.
16329 @@ -782,7 +1087,7 @@ ENTRY(stub_rt_sigreturn)
16330 RESTORE_REST
16331 jmp int_ret_from_sys_call
16332 CFI_ENDPROC
16333 -END(stub_rt_sigreturn)
16334 +ENDPROC(stub_rt_sigreturn)
16335
16336 #ifdef CONFIG_X86_X32_ABI
16337 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16338 @@ -851,7 +1156,7 @@ vector=vector+1
16339 2: jmp common_interrupt
16340 .endr
16341 CFI_ENDPROC
16342 -END(irq_entries_start)
16343 +ENDPROC(irq_entries_start)
16344
16345 .previous
16346 END(interrupt)
16347 @@ -871,6 +1176,16 @@ END(interrupt)
16348 subq $ORIG_RAX-RBP, %rsp
16349 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16350 SAVE_ARGS_IRQ
16351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16352 + testb $3, CS(%rdi)
16353 + jnz 1f
16354 + pax_enter_kernel
16355 + jmp 2f
16356 +1: pax_enter_kernel_user
16357 +2:
16358 +#else
16359 + pax_enter_kernel
16360 +#endif
16361 call \func
16362 .endm
16363
16364 @@ -902,7 +1217,7 @@ ret_from_intr:
16365
16366 exit_intr:
16367 GET_THREAD_INFO(%rcx)
16368 - testl $3,CS-ARGOFFSET(%rsp)
16369 + testb $3,CS-ARGOFFSET(%rsp)
16370 je retint_kernel
16371
16372 /* Interrupt came from user space */
16373 @@ -924,12 +1239,16 @@ retint_swapgs: /* return to user-space */
16374 * The iretq could re-enable interrupts:
16375 */
16376 DISABLE_INTERRUPTS(CLBR_ANY)
16377 + pax_exit_kernel_user
16378 +retint_swapgs_pax:
16379 TRACE_IRQS_IRETQ
16380 SWAPGS
16381 jmp restore_args
16382
16383 retint_restore_args: /* return to kernel space */
16384 DISABLE_INTERRUPTS(CLBR_ANY)
16385 + pax_exit_kernel
16386 + pax_force_retaddr RIP-ARGOFFSET
16387 /*
16388 * The iretq could re-enable interrupts:
16389 */
16390 @@ -1012,7 +1331,7 @@ ENTRY(retint_kernel)
16391 #endif
16392
16393 CFI_ENDPROC
16394 -END(common_interrupt)
16395 +ENDPROC(common_interrupt)
16396 /*
16397 * End of kprobes section
16398 */
16399 @@ -1029,7 +1348,7 @@ ENTRY(\sym)
16400 interrupt \do_sym
16401 jmp ret_from_intr
16402 CFI_ENDPROC
16403 -END(\sym)
16404 +ENDPROC(\sym)
16405 .endm
16406
16407 #ifdef CONFIG_SMP
16408 @@ -1102,12 +1421,22 @@ ENTRY(\sym)
16409 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16410 call error_entry
16411 DEFAULT_FRAME 0
16412 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16413 + testb $3, CS(%rsp)
16414 + jnz 1f
16415 + pax_enter_kernel
16416 + jmp 2f
16417 +1: pax_enter_kernel_user
16418 +2:
16419 +#else
16420 + pax_enter_kernel
16421 +#endif
16422 movq %rsp,%rdi /* pt_regs pointer */
16423 xorl %esi,%esi /* no error code */
16424 call \do_sym
16425 jmp error_exit /* %ebx: no swapgs flag */
16426 CFI_ENDPROC
16427 -END(\sym)
16428 +ENDPROC(\sym)
16429 .endm
16430
16431 .macro paranoidzeroentry sym do_sym
16432 @@ -1119,15 +1448,25 @@ ENTRY(\sym)
16433 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16434 call save_paranoid
16435 TRACE_IRQS_OFF
16436 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16437 + testb $3, CS(%rsp)
16438 + jnz 1f
16439 + pax_enter_kernel
16440 + jmp 2f
16441 +1: pax_enter_kernel_user
16442 +2:
16443 +#else
16444 + pax_enter_kernel
16445 +#endif
16446 movq %rsp,%rdi /* pt_regs pointer */
16447 xorl %esi,%esi /* no error code */
16448 call \do_sym
16449 jmp paranoid_exit /* %ebx: no swapgs flag */
16450 CFI_ENDPROC
16451 -END(\sym)
16452 +ENDPROC(\sym)
16453 .endm
16454
16455 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16456 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16457 .macro paranoidzeroentry_ist sym do_sym ist
16458 ENTRY(\sym)
16459 INTR_FRAME
16460 @@ -1137,14 +1476,30 @@ ENTRY(\sym)
16461 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16462 call save_paranoid
16463 TRACE_IRQS_OFF_DEBUG
16464 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16465 + testb $3, CS(%rsp)
16466 + jnz 1f
16467 + pax_enter_kernel
16468 + jmp 2f
16469 +1: pax_enter_kernel_user
16470 +2:
16471 +#else
16472 + pax_enter_kernel
16473 +#endif
16474 movq %rsp,%rdi /* pt_regs pointer */
16475 xorl %esi,%esi /* no error code */
16476 +#ifdef CONFIG_SMP
16477 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16478 + lea init_tss(%r12), %r12
16479 +#else
16480 + lea init_tss(%rip), %r12
16481 +#endif
16482 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16483 call \do_sym
16484 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16485 jmp paranoid_exit /* %ebx: no swapgs flag */
16486 CFI_ENDPROC
16487 -END(\sym)
16488 +ENDPROC(\sym)
16489 .endm
16490
16491 .macro errorentry sym do_sym
16492 @@ -1155,13 +1510,23 @@ ENTRY(\sym)
16493 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16494 call error_entry
16495 DEFAULT_FRAME 0
16496 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16497 + testb $3, CS(%rsp)
16498 + jnz 1f
16499 + pax_enter_kernel
16500 + jmp 2f
16501 +1: pax_enter_kernel_user
16502 +2:
16503 +#else
16504 + pax_enter_kernel
16505 +#endif
16506 movq %rsp,%rdi /* pt_regs pointer */
16507 movq ORIG_RAX(%rsp),%rsi /* get error code */
16508 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16509 call \do_sym
16510 jmp error_exit /* %ebx: no swapgs flag */
16511 CFI_ENDPROC
16512 -END(\sym)
16513 +ENDPROC(\sym)
16514 .endm
16515
16516 /* error code is on the stack already */
16517 @@ -1174,13 +1539,23 @@ ENTRY(\sym)
16518 call save_paranoid
16519 DEFAULT_FRAME 0
16520 TRACE_IRQS_OFF
16521 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16522 + testb $3, CS(%rsp)
16523 + jnz 1f
16524 + pax_enter_kernel
16525 + jmp 2f
16526 +1: pax_enter_kernel_user
16527 +2:
16528 +#else
16529 + pax_enter_kernel
16530 +#endif
16531 movq %rsp,%rdi /* pt_regs pointer */
16532 movq ORIG_RAX(%rsp),%rsi /* get error code */
16533 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16534 call \do_sym
16535 jmp paranoid_exit /* %ebx: no swapgs flag */
16536 CFI_ENDPROC
16537 -END(\sym)
16538 +ENDPROC(\sym)
16539 .endm
16540
16541 zeroentry divide_error do_divide_error
16542 @@ -1210,9 +1585,10 @@ gs_change:
16543 2: mfence /* workaround */
16544 SWAPGS
16545 popfq_cfi
16546 + pax_force_retaddr
16547 ret
16548 CFI_ENDPROC
16549 -END(native_load_gs_index)
16550 +ENDPROC(native_load_gs_index)
16551
16552 _ASM_EXTABLE(gs_change,bad_gs)
16553 .section .fixup,"ax"
16554 @@ -1231,13 +1607,14 @@ ENTRY(kernel_thread_helper)
16555 * Here we are in the child and the registers are set as they were
16556 * at kernel_thread() invocation in the parent.
16557 */
16558 + pax_force_fptr %rsi
16559 call *%rsi
16560 # exit
16561 mov %eax, %edi
16562 call do_exit
16563 ud2 # padding for call trace
16564 CFI_ENDPROC
16565 -END(kernel_thread_helper)
16566 +ENDPROC(kernel_thread_helper)
16567
16568 /*
16569 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16570 @@ -1264,11 +1641,11 @@ ENTRY(kernel_execve)
16571 RESTORE_REST
16572 testq %rax,%rax
16573 je int_ret_from_sys_call
16574 - RESTORE_ARGS
16575 UNFAKE_STACK_FRAME
16576 + pax_force_retaddr
16577 ret
16578 CFI_ENDPROC
16579 -END(kernel_execve)
16580 +ENDPROC(kernel_execve)
16581
16582 /* Call softirq on interrupt stack. Interrupts are off. */
16583 ENTRY(call_softirq)
16584 @@ -1286,9 +1663,10 @@ ENTRY(call_softirq)
16585 CFI_DEF_CFA_REGISTER rsp
16586 CFI_ADJUST_CFA_OFFSET -8
16587 decl PER_CPU_VAR(irq_count)
16588 + pax_force_retaddr
16589 ret
16590 CFI_ENDPROC
16591 -END(call_softirq)
16592 +ENDPROC(call_softirq)
16593
16594 #ifdef CONFIG_XEN
16595 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16596 @@ -1326,7 +1704,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16597 decl PER_CPU_VAR(irq_count)
16598 jmp error_exit
16599 CFI_ENDPROC
16600 -END(xen_do_hypervisor_callback)
16601 +ENDPROC(xen_do_hypervisor_callback)
16602
16603 /*
16604 * Hypervisor uses this for application faults while it executes.
16605 @@ -1385,7 +1763,7 @@ ENTRY(xen_failsafe_callback)
16606 SAVE_ALL
16607 jmp error_exit
16608 CFI_ENDPROC
16609 -END(xen_failsafe_callback)
16610 +ENDPROC(xen_failsafe_callback)
16611
16612 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16613 xen_hvm_callback_vector xen_evtchn_do_upcall
16614 @@ -1434,16 +1812,31 @@ ENTRY(paranoid_exit)
16615 TRACE_IRQS_OFF_DEBUG
16616 testl %ebx,%ebx /* swapgs needed? */
16617 jnz paranoid_restore
16618 - testl $3,CS(%rsp)
16619 + testb $3,CS(%rsp)
16620 jnz paranoid_userspace
16621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16622 + pax_exit_kernel
16623 + TRACE_IRQS_IRETQ 0
16624 + SWAPGS_UNSAFE_STACK
16625 + RESTORE_ALL 8
16626 + pax_force_retaddr_bts
16627 + jmp irq_return
16628 +#endif
16629 paranoid_swapgs:
16630 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16631 + pax_exit_kernel_user
16632 +#else
16633 + pax_exit_kernel
16634 +#endif
16635 TRACE_IRQS_IRETQ 0
16636 SWAPGS_UNSAFE_STACK
16637 RESTORE_ALL 8
16638 jmp irq_return
16639 paranoid_restore:
16640 + pax_exit_kernel
16641 TRACE_IRQS_IRETQ_DEBUG 0
16642 RESTORE_ALL 8
16643 + pax_force_retaddr_bts
16644 jmp irq_return
16645 paranoid_userspace:
16646 GET_THREAD_INFO(%rcx)
16647 @@ -1472,7 +1865,7 @@ paranoid_schedule:
16648 TRACE_IRQS_OFF
16649 jmp paranoid_userspace
16650 CFI_ENDPROC
16651 -END(paranoid_exit)
16652 +ENDPROC(paranoid_exit)
16653
16654 /*
16655 * Exception entry point. This expects an error code/orig_rax on the stack.
16656 @@ -1499,12 +1892,13 @@ ENTRY(error_entry)
16657 movq_cfi r14, R14+8
16658 movq_cfi r15, R15+8
16659 xorl %ebx,%ebx
16660 - testl $3,CS+8(%rsp)
16661 + testb $3,CS+8(%rsp)
16662 je error_kernelspace
16663 error_swapgs:
16664 SWAPGS
16665 error_sti:
16666 TRACE_IRQS_OFF
16667 + pax_force_retaddr_bts
16668 ret
16669
16670 /*
16671 @@ -1531,7 +1925,7 @@ bstep_iret:
16672 movq %rcx,RIP+8(%rsp)
16673 jmp error_swapgs
16674 CFI_ENDPROC
16675 -END(error_entry)
16676 +ENDPROC(error_entry)
16677
16678
16679 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16680 @@ -1551,7 +1945,7 @@ ENTRY(error_exit)
16681 jnz retint_careful
16682 jmp retint_swapgs
16683 CFI_ENDPROC
16684 -END(error_exit)
16685 +ENDPROC(error_exit)
16686
16687 /*
16688 * Test if a given stack is an NMI stack or not.
16689 @@ -1609,9 +2003,11 @@ ENTRY(nmi)
16690 * If %cs was not the kernel segment, then the NMI triggered in user
16691 * space, which means it is definitely not nested.
16692 */
16693 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16694 + je 1f
16695 cmpl $__KERNEL_CS, 16(%rsp)
16696 jne first_nmi
16697 -
16698 +1:
16699 /*
16700 * Check the special variable on the stack to see if NMIs are
16701 * executing.
16702 @@ -1758,6 +2154,16 @@ end_repeat_nmi:
16703 */
16704 call save_paranoid
16705 DEFAULT_FRAME 0
16706 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16707 + testb $3, CS(%rsp)
16708 + jnz 1f
16709 + pax_enter_kernel
16710 + jmp 2f
16711 +1: pax_enter_kernel_user
16712 +2:
16713 +#else
16714 + pax_enter_kernel
16715 +#endif
16716 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16717 movq %rsp,%rdi
16718 movq $-1,%rsi
16719 @@ -1765,21 +2171,32 @@ end_repeat_nmi:
16720 testl %ebx,%ebx /* swapgs needed? */
16721 jnz nmi_restore
16722 nmi_swapgs:
16723 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16724 + pax_exit_kernel_user
16725 +#else
16726 + pax_exit_kernel
16727 +#endif
16728 SWAPGS_UNSAFE_STACK
16729 + RESTORE_ALL 8
16730 + /* Clear the NMI executing stack variable */
16731 + movq $0, 10*8(%rsp)
16732 + jmp irq_return
16733 nmi_restore:
16734 + pax_exit_kernel
16735 RESTORE_ALL 8
16736 + pax_force_retaddr_bts
16737 /* Clear the NMI executing stack variable */
16738 movq $0, 10*8(%rsp)
16739 jmp irq_return
16740 CFI_ENDPROC
16741 -END(nmi)
16742 +ENDPROC(nmi)
16743
16744 ENTRY(ignore_sysret)
16745 CFI_STARTPROC
16746 mov $-ENOSYS,%eax
16747 sysret
16748 CFI_ENDPROC
16749 -END(ignore_sysret)
16750 +ENDPROC(ignore_sysret)
16751
16752 /*
16753 * End of kprobes section
16754 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16755 index c3a7cb4..3ad00dc 100644
16756 --- a/arch/x86/kernel/ftrace.c
16757 +++ b/arch/x86/kernel/ftrace.c
16758 @@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
16759 {
16760 unsigned char replaced[MCOUNT_INSN_SIZE];
16761
16762 + ip = ktla_ktva(ip);
16763 +
16764 /*
16765 * Note: Due to modules and __init, code can
16766 * disappear and change, we need to protect against faulting
16767 @@ -212,7 +214,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16768 unsigned char old[MCOUNT_INSN_SIZE], *new;
16769 int ret;
16770
16771 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16772 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16773 new = ftrace_call_replace(ip, (unsigned long)func);
16774
16775 /* See comment above by declaration of modifying_ftrace_code */
16776 @@ -605,6 +607,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16777 {
16778 unsigned char code[MCOUNT_INSN_SIZE];
16779
16780 + ip = ktla_ktva(ip);
16781 +
16782 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16783 return -EFAULT;
16784
16785 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16786 index c18f59d..9c0c9f6 100644
16787 --- a/arch/x86/kernel/head32.c
16788 +++ b/arch/x86/kernel/head32.c
16789 @@ -18,6 +18,7 @@
16790 #include <asm/io_apic.h>
16791 #include <asm/bios_ebda.h>
16792 #include <asm/tlbflush.h>
16793 +#include <asm/boot.h>
16794
16795 static void __init i386_default_early_setup(void)
16796 {
16797 @@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
16798
16799 void __init i386_start_kernel(void)
16800 {
16801 - memblock_reserve(__pa_symbol(&_text),
16802 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16803 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16804
16805 #ifdef CONFIG_BLK_DEV_INITRD
16806 /* Reserve INITRD */
16807 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16808 index d42ab17..cb1b997 100644
16809 --- a/arch/x86/kernel/head_32.S
16810 +++ b/arch/x86/kernel/head_32.S
16811 @@ -26,6 +26,12 @@
16812 /* Physical address */
16813 #define pa(X) ((X) - __PAGE_OFFSET)
16814
16815 +#ifdef CONFIG_PAX_KERNEXEC
16816 +#define ta(X) (X)
16817 +#else
16818 +#define ta(X) ((X) - __PAGE_OFFSET)
16819 +#endif
16820 +
16821 /*
16822 * References to members of the new_cpu_data structure.
16823 */
16824 @@ -55,11 +61,7 @@
16825 * and small than max_low_pfn, otherwise will waste some page table entries
16826 */
16827
16828 -#if PTRS_PER_PMD > 1
16829 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16830 -#else
16831 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16832 -#endif
16833 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16834
16835 /* Number of possible pages in the lowmem region */
16836 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16837 @@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16838 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16839
16840 /*
16841 + * Real beginning of normal "text" segment
16842 + */
16843 +ENTRY(stext)
16844 +ENTRY(_stext)
16845 +
16846 +/*
16847 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16848 * %esi points to the real-mode code as a 32-bit pointer.
16849 * CS and DS must be 4 GB flat segments, but we don't depend on
16850 @@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16851 * can.
16852 */
16853 __HEAD
16854 +
16855 +#ifdef CONFIG_PAX_KERNEXEC
16856 + jmp startup_32
16857 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16858 +.fill PAGE_SIZE-5,1,0xcc
16859 +#endif
16860 +
16861 ENTRY(startup_32)
16862 movl pa(stack_start),%ecx
16863
16864 @@ -106,6 +121,57 @@ ENTRY(startup_32)
16865 2:
16866 leal -__PAGE_OFFSET(%ecx),%esp
16867
16868 +#ifdef CONFIG_SMP
16869 + movl $pa(cpu_gdt_table),%edi
16870 + movl $__per_cpu_load,%eax
16871 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16872 + rorl $16,%eax
16873 + movb %al,__KERNEL_PERCPU + 4(%edi)
16874 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16875 + movl $__per_cpu_end - 1,%eax
16876 + subl $__per_cpu_start,%eax
16877 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16878 +#endif
16879 +
16880 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16881 + movl $NR_CPUS,%ecx
16882 + movl $pa(cpu_gdt_table),%edi
16883 +1:
16884 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16885 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16886 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16887 + addl $PAGE_SIZE_asm,%edi
16888 + loop 1b
16889 +#endif
16890 +
16891 +#ifdef CONFIG_PAX_KERNEXEC
16892 + movl $pa(boot_gdt),%edi
16893 + movl $__LOAD_PHYSICAL_ADDR,%eax
16894 + movw %ax,__BOOT_CS + 2(%edi)
16895 + rorl $16,%eax
16896 + movb %al,__BOOT_CS + 4(%edi)
16897 + movb %ah,__BOOT_CS + 7(%edi)
16898 + rorl $16,%eax
16899 +
16900 + ljmp $(__BOOT_CS),$1f
16901 +1:
16902 +
16903 + movl $NR_CPUS,%ecx
16904 + movl $pa(cpu_gdt_table),%edi
16905 + addl $__PAGE_OFFSET,%eax
16906 +1:
16907 + movw %ax,__KERNEL_CS + 2(%edi)
16908 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16909 + rorl $16,%eax
16910 + movb %al,__KERNEL_CS + 4(%edi)
16911 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16912 + movb %ah,__KERNEL_CS + 7(%edi)
16913 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16914 + rorl $16,%eax
16915 + addl $PAGE_SIZE_asm,%edi
16916 + loop 1b
16917 +#endif
16918 +
16919 /*
16920 * Clear BSS first so that there are no surprises...
16921 */
16922 @@ -196,8 +262,11 @@ ENTRY(startup_32)
16923 movl %eax, pa(max_pfn_mapped)
16924
16925 /* Do early initialization of the fixmap area */
16926 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16927 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16928 +#ifdef CONFIG_COMPAT_VDSO
16929 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16930 +#else
16931 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16932 +#endif
16933 #else /* Not PAE */
16934
16935 page_pde_offset = (__PAGE_OFFSET >> 20);
16936 @@ -227,8 +296,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16937 movl %eax, pa(max_pfn_mapped)
16938
16939 /* Do early initialization of the fixmap area */
16940 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16941 - movl %eax,pa(initial_page_table+0xffc)
16942 +#ifdef CONFIG_COMPAT_VDSO
16943 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16944 +#else
16945 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16946 +#endif
16947 #endif
16948
16949 #ifdef CONFIG_PARAVIRT
16950 @@ -242,9 +314,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16951 cmpl $num_subarch_entries, %eax
16952 jae bad_subarch
16953
16954 - movl pa(subarch_entries)(,%eax,4), %eax
16955 - subl $__PAGE_OFFSET, %eax
16956 - jmp *%eax
16957 + jmp *pa(subarch_entries)(,%eax,4)
16958
16959 bad_subarch:
16960 WEAK(lguest_entry)
16961 @@ -256,10 +326,10 @@ WEAK(xen_entry)
16962 __INITDATA
16963
16964 subarch_entries:
16965 - .long default_entry /* normal x86/PC */
16966 - .long lguest_entry /* lguest hypervisor */
16967 - .long xen_entry /* Xen hypervisor */
16968 - .long default_entry /* Moorestown MID */
16969 + .long ta(default_entry) /* normal x86/PC */
16970 + .long ta(lguest_entry) /* lguest hypervisor */
16971 + .long ta(xen_entry) /* Xen hypervisor */
16972 + .long ta(default_entry) /* Moorestown MID */
16973 num_subarch_entries = (. - subarch_entries) / 4
16974 .previous
16975 #else
16976 @@ -310,6 +380,7 @@ default_entry:
16977 orl %edx,%eax
16978 movl %eax,%cr4
16979
16980 +#ifdef CONFIG_X86_PAE
16981 testb $X86_CR4_PAE, %al # check if PAE is enabled
16982 jz 6f
16983
16984 @@ -338,6 +409,9 @@ default_entry:
16985 /* Make changes effective */
16986 wrmsr
16987
16988 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16989 +#endif
16990 +
16991 6:
16992
16993 /*
16994 @@ -436,14 +510,20 @@ is386: movl $2,%ecx # set MP
16995 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16996 movl %eax,%ss # after changing gdt.
16997
16998 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16999 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17000 movl %eax,%ds
17001 movl %eax,%es
17002
17003 movl $(__KERNEL_PERCPU), %eax
17004 movl %eax,%fs # set this cpu's percpu
17005
17006 +#ifdef CONFIG_CC_STACKPROTECTOR
17007 movl $(__KERNEL_STACK_CANARY),%eax
17008 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17009 + movl $(__USER_DS),%eax
17010 +#else
17011 + xorl %eax,%eax
17012 +#endif
17013 movl %eax,%gs
17014
17015 xorl %eax,%eax # Clear LDT
17016 @@ -520,8 +600,11 @@ setup_once:
17017 * relocation. Manually set base address in stack canary
17018 * segment descriptor.
17019 */
17020 - movl $gdt_page,%eax
17021 + movl $cpu_gdt_table,%eax
17022 movl $stack_canary,%ecx
17023 +#ifdef CONFIG_SMP
17024 + addl $__per_cpu_load,%ecx
17025 +#endif
17026 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17027 shrl $16, %ecx
17028 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17029 @@ -552,7 +635,7 @@ ENDPROC(early_idt_handlers)
17030 /* This is global to keep gas from relaxing the jumps */
17031 ENTRY(early_idt_handler)
17032 cld
17033 - cmpl $2,%ss:early_recursion_flag
17034 + cmpl $1,%ss:early_recursion_flag
17035 je hlt_loop
17036 incl %ss:early_recursion_flag
17037
17038 @@ -590,8 +673,8 @@ ENTRY(early_idt_handler)
17039 pushl (20+6*4)(%esp) /* trapno */
17040 pushl $fault_msg
17041 call printk
17042 -#endif
17043 call dump_stack
17044 +#endif
17045 hlt_loop:
17046 hlt
17047 jmp hlt_loop
17048 @@ -610,8 +693,11 @@ ENDPROC(early_idt_handler)
17049 /* This is the default interrupt "handler" :-) */
17050 ALIGN
17051 ignore_int:
17052 - cld
17053 #ifdef CONFIG_PRINTK
17054 + cmpl $2,%ss:early_recursion_flag
17055 + je hlt_loop
17056 + incl %ss:early_recursion_flag
17057 + cld
17058 pushl %eax
17059 pushl %ecx
17060 pushl %edx
17061 @@ -620,9 +706,6 @@ ignore_int:
17062 movl $(__KERNEL_DS),%eax
17063 movl %eax,%ds
17064 movl %eax,%es
17065 - cmpl $2,early_recursion_flag
17066 - je hlt_loop
17067 - incl early_recursion_flag
17068 pushl 16(%esp)
17069 pushl 24(%esp)
17070 pushl 32(%esp)
17071 @@ -656,29 +739,43 @@ ENTRY(setup_once_ref)
17072 /*
17073 * BSS section
17074 */
17075 -__PAGE_ALIGNED_BSS
17076 - .align PAGE_SIZE
17077 #ifdef CONFIG_X86_PAE
17078 +.section .initial_pg_pmd,"a",@progbits
17079 initial_pg_pmd:
17080 .fill 1024*KPMDS,4,0
17081 #else
17082 +.section .initial_page_table,"a",@progbits
17083 ENTRY(initial_page_table)
17084 .fill 1024,4,0
17085 #endif
17086 +.section .initial_pg_fixmap,"a",@progbits
17087 initial_pg_fixmap:
17088 .fill 1024,4,0
17089 +.section .empty_zero_page,"a",@progbits
17090 ENTRY(empty_zero_page)
17091 .fill 4096,1,0
17092 +.section .swapper_pg_dir,"a",@progbits
17093 ENTRY(swapper_pg_dir)
17094 +#ifdef CONFIG_X86_PAE
17095 + .fill 4,8,0
17096 +#else
17097 .fill 1024,4,0
17098 +#endif
17099 +
17100 +/*
17101 + * The IDT has to be page-aligned to simplify the Pentium
17102 + * F0 0F bug workaround.. We have a special link segment
17103 + * for this.
17104 + */
17105 +.section .idt,"a",@progbits
17106 +ENTRY(idt_table)
17107 + .fill 256,8,0
17108
17109 /*
17110 * This starts the data section.
17111 */
17112 #ifdef CONFIG_X86_PAE
17113 -__PAGE_ALIGNED_DATA
17114 - /* Page-aligned for the benefit of paravirt? */
17115 - .align PAGE_SIZE
17116 +.section .initial_page_table,"a",@progbits
17117 ENTRY(initial_page_table)
17118 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17119 # if KPMDS == 3
17120 @@ -697,12 +794,20 @@ ENTRY(initial_page_table)
17121 # error "Kernel PMDs should be 1, 2 or 3"
17122 # endif
17123 .align PAGE_SIZE /* needs to be page-sized too */
17124 +
17125 +#ifdef CONFIG_PAX_PER_CPU_PGD
17126 +ENTRY(cpu_pgd)
17127 + .rept NR_CPUS
17128 + .fill 4,8,0
17129 + .endr
17130 +#endif
17131 +
17132 #endif
17133
17134 .data
17135 .balign 4
17136 ENTRY(stack_start)
17137 - .long init_thread_union+THREAD_SIZE
17138 + .long init_thread_union+THREAD_SIZE-8
17139
17140 __INITRODATA
17141 int_msg:
17142 @@ -730,7 +835,7 @@ fault_msg:
17143 * segment size, and 32-bit linear address value:
17144 */
17145
17146 - .data
17147 +.section .rodata,"a",@progbits
17148 .globl boot_gdt_descr
17149 .globl idt_descr
17150
17151 @@ -739,7 +844,7 @@ fault_msg:
17152 .word 0 # 32 bit align gdt_desc.address
17153 boot_gdt_descr:
17154 .word __BOOT_DS+7
17155 - .long boot_gdt - __PAGE_OFFSET
17156 + .long pa(boot_gdt)
17157
17158 .word 0 # 32-bit align idt_desc.address
17159 idt_descr:
17160 @@ -750,7 +855,7 @@ idt_descr:
17161 .word 0 # 32 bit align gdt_desc.address
17162 ENTRY(early_gdt_descr)
17163 .word GDT_ENTRIES*8-1
17164 - .long gdt_page /* Overwritten for secondary CPUs */
17165 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17166
17167 /*
17168 * The boot_gdt must mirror the equivalent in setup.S and is
17169 @@ -759,5 +864,65 @@ ENTRY(early_gdt_descr)
17170 .align L1_CACHE_BYTES
17171 ENTRY(boot_gdt)
17172 .fill GDT_ENTRY_BOOT_CS,8,0
17173 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17174 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17175 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17176 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17177 +
17178 + .align PAGE_SIZE_asm
17179 +ENTRY(cpu_gdt_table)
17180 + .rept NR_CPUS
17181 + .quad 0x0000000000000000 /* NULL descriptor */
17182 + .quad 0x0000000000000000 /* 0x0b reserved */
17183 + .quad 0x0000000000000000 /* 0x13 reserved */
17184 + .quad 0x0000000000000000 /* 0x1b reserved */
17185 +
17186 +#ifdef CONFIG_PAX_KERNEXEC
17187 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17188 +#else
17189 + .quad 0x0000000000000000 /* 0x20 unused */
17190 +#endif
17191 +
17192 + .quad 0x0000000000000000 /* 0x28 unused */
17193 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17194 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17195 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17196 + .quad 0x0000000000000000 /* 0x4b reserved */
17197 + .quad 0x0000000000000000 /* 0x53 reserved */
17198 + .quad 0x0000000000000000 /* 0x5b reserved */
17199 +
17200 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17201 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17202 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17203 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17204 +
17205 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17206 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17207 +
17208 + /*
17209 + * Segments used for calling PnP BIOS have byte granularity.
17210 + * The code segments and data segments have fixed 64k limits,
17211 + * the transfer segment sizes are set at run time.
17212 + */
17213 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17214 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17215 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17216 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17217 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17218 +
17219 + /*
17220 + * The APM segments have byte granularity and their bases
17221 + * are set at run time. All have 64k limits.
17222 + */
17223 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17224 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17225 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17226 +
17227 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17228 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17229 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17230 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17231 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17232 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17233 +
17234 + /* Be sure this is zeroed to avoid false validations in Xen */
17235 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17236 + .endr
17237 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17238 index 94bf9cc..400455a 100644
17239 --- a/arch/x86/kernel/head_64.S
17240 +++ b/arch/x86/kernel/head_64.S
17241 @@ -20,6 +20,8 @@
17242 #include <asm/processor-flags.h>
17243 #include <asm/percpu.h>
17244 #include <asm/nops.h>
17245 +#include <asm/cpufeature.h>
17246 +#include <asm/alternative-asm.h>
17247
17248 #ifdef CONFIG_PARAVIRT
17249 #include <asm/asm-offsets.h>
17250 @@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17251 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17252 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17253 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17254 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17255 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17256 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17257 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17258 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17259 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17260
17261 .text
17262 __HEAD
17263 @@ -88,35 +96,23 @@ startup_64:
17264 */
17265 addq %rbp, init_level4_pgt + 0(%rip)
17266 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17267 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17268 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17269 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17270 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17271
17272 addq %rbp, level3_ident_pgt + 0(%rip)
17273 +#ifndef CONFIG_XEN
17274 + addq %rbp, level3_ident_pgt + 8(%rip)
17275 +#endif
17276
17277 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17278 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17279 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17280 +
17281 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17282 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17283
17284 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17285 -
17286 - /* Add an Identity mapping if I am above 1G */
17287 - leaq _text(%rip), %rdi
17288 - andq $PMD_PAGE_MASK, %rdi
17289 -
17290 - movq %rdi, %rax
17291 - shrq $PUD_SHIFT, %rax
17292 - andq $(PTRS_PER_PUD - 1), %rax
17293 - jz ident_complete
17294 -
17295 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17296 - leaq level3_ident_pgt(%rip), %rbx
17297 - movq %rdx, 0(%rbx, %rax, 8)
17298 -
17299 - movq %rdi, %rax
17300 - shrq $PMD_SHIFT, %rax
17301 - andq $(PTRS_PER_PMD - 1), %rax
17302 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17303 - leaq level2_spare_pgt(%rip), %rbx
17304 - movq %rdx, 0(%rbx, %rax, 8)
17305 -ident_complete:
17306 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17307
17308 /*
17309 * Fixup the kernel text+data virtual addresses. Note that
17310 @@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
17311 * after the boot processor executes this code.
17312 */
17313
17314 - /* Enable PAE mode and PGE */
17315 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17316 + /* Enable PAE mode and PSE/PGE */
17317 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17318 movq %rax, %cr4
17319
17320 /* Setup early boot stage 4 level pagetables. */
17321 @@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
17322 movl $MSR_EFER, %ecx
17323 rdmsr
17324 btsl $_EFER_SCE, %eax /* Enable System Call */
17325 - btl $20,%edi /* No Execute supported? */
17326 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17327 jnc 1f
17328 btsl $_EFER_NX, %eax
17329 + leaq init_level4_pgt(%rip), %rdi
17330 +#ifndef CONFIG_EFI
17331 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17332 +#endif
17333 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17334 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17335 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17336 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17337 1: wrmsr /* Make changes effective */
17338
17339 /* Setup cr0 */
17340 @@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
17341 * jump. In addition we need to ensure %cs is set so we make this
17342 * a far return.
17343 */
17344 + pax_set_fptr_mask
17345 movq initial_code(%rip),%rax
17346 pushq $0 # fake return address to stop unwinder
17347 pushq $__KERNEL_CS # set correct cs
17348 @@ -268,7 +273,7 @@ ENTRY(secondary_startup_64)
17349 bad_address:
17350 jmp bad_address
17351
17352 - .section ".init.text","ax"
17353 + __INIT
17354 .globl early_idt_handlers
17355 early_idt_handlers:
17356 # 104(%rsp) %rflags
17357 @@ -347,11 +352,15 @@ ENTRY(early_idt_handler)
17358 addq $16,%rsp # drop vector number and error code
17359 decl early_recursion_flag(%rip)
17360 INTERRUPT_RETURN
17361 + .previous
17362
17363 + __INITDATA
17364 .balign 4
17365 early_recursion_flag:
17366 .long 0
17367 + .previous
17368
17369 + .section .rodata,"a",@progbits
17370 #ifdef CONFIG_EARLY_PRINTK
17371 early_idt_msg:
17372 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17373 @@ -360,6 +369,7 @@ early_idt_ripmsg:
17374 #endif /* CONFIG_EARLY_PRINTK */
17375 .previous
17376
17377 + .section .rodata,"a",@progbits
17378 #define NEXT_PAGE(name) \
17379 .balign PAGE_SIZE; \
17380 ENTRY(name)
17381 @@ -372,7 +382,6 @@ ENTRY(name)
17382 i = i + 1 ; \
17383 .endr
17384
17385 - .data
17386 /*
17387 * This default setting generates an ident mapping at address 0x100000
17388 * and a mapping for the kernel that precisely maps virtual address
17389 @@ -383,13 +392,41 @@ NEXT_PAGE(init_level4_pgt)
17390 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17391 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17392 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17393 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17394 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17395 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17396 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17397 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17398 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17399 .org init_level4_pgt + L4_START_KERNEL*8, 0
17400 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17401 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17402
17403 +#ifdef CONFIG_PAX_PER_CPU_PGD
17404 +NEXT_PAGE(cpu_pgd)
17405 + .rept NR_CPUS
17406 + .fill 512,8,0
17407 + .endr
17408 +#endif
17409 +
17410 NEXT_PAGE(level3_ident_pgt)
17411 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17412 +#ifdef CONFIG_XEN
17413 .fill 511,8,0
17414 +#else
17415 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17416 + .fill 510,8,0
17417 +#endif
17418 +
17419 +NEXT_PAGE(level3_vmalloc_start_pgt)
17420 + .fill 512,8,0
17421 +
17422 +NEXT_PAGE(level3_vmalloc_end_pgt)
17423 + .fill 512,8,0
17424 +
17425 +NEXT_PAGE(level3_vmemmap_pgt)
17426 + .fill L3_VMEMMAP_START,8,0
17427 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17428
17429 NEXT_PAGE(level3_kernel_pgt)
17430 .fill L3_START_KERNEL,8,0
17431 @@ -397,20 +434,23 @@ NEXT_PAGE(level3_kernel_pgt)
17432 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17433 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17434
17435 +NEXT_PAGE(level2_vmemmap_pgt)
17436 + .fill 512,8,0
17437 +
17438 NEXT_PAGE(level2_fixmap_pgt)
17439 - .fill 506,8,0
17440 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17441 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17442 - .fill 5,8,0
17443 + .fill 507,8,0
17444 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17445 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17446 + .fill 4,8,0
17447
17448 -NEXT_PAGE(level1_fixmap_pgt)
17449 +NEXT_PAGE(level1_vsyscall_pgt)
17450 .fill 512,8,0
17451
17452 -NEXT_PAGE(level2_ident_pgt)
17453 - /* Since I easily can, map the first 1G.
17454 + /* Since I easily can, map the first 2G.
17455 * Don't set NX because code runs from these pages.
17456 */
17457 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17458 +NEXT_PAGE(level2_ident_pgt)
17459 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17460
17461 NEXT_PAGE(level2_kernel_pgt)
17462 /*
17463 @@ -423,37 +463,59 @@ NEXT_PAGE(level2_kernel_pgt)
17464 * If you want to increase this then increase MODULES_VADDR
17465 * too.)
17466 */
17467 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17468 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17469 -
17470 -NEXT_PAGE(level2_spare_pgt)
17471 - .fill 512, 8, 0
17472 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17473
17474 #undef PMDS
17475 #undef NEXT_PAGE
17476
17477 - .data
17478 + .align PAGE_SIZE
17479 +ENTRY(cpu_gdt_table)
17480 + .rept NR_CPUS
17481 + .quad 0x0000000000000000 /* NULL descriptor */
17482 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17483 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17484 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17485 + .quad 0x00cffb000000ffff /* __USER32_CS */
17486 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17487 + .quad 0x00affb000000ffff /* __USER_CS */
17488 +
17489 +#ifdef CONFIG_PAX_KERNEXEC
17490 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17491 +#else
17492 + .quad 0x0 /* unused */
17493 +#endif
17494 +
17495 + .quad 0,0 /* TSS */
17496 + .quad 0,0 /* LDT */
17497 + .quad 0,0,0 /* three TLS descriptors */
17498 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17499 + /* asm/segment.h:GDT_ENTRIES must match this */
17500 +
17501 + /* zero the remaining page */
17502 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17503 + .endr
17504 +
17505 .align 16
17506 .globl early_gdt_descr
17507 early_gdt_descr:
17508 .word GDT_ENTRIES*8-1
17509 early_gdt_descr_base:
17510 - .quad INIT_PER_CPU_VAR(gdt_page)
17511 + .quad cpu_gdt_table
17512
17513 ENTRY(phys_base)
17514 /* This must match the first entry in level2_kernel_pgt */
17515 .quad 0x0000000000000000
17516
17517 #include "../../x86/xen/xen-head.S"
17518 -
17519 - .section .bss, "aw", @nobits
17520 +
17521 + .section .rodata,"a",@progbits
17522 .align L1_CACHE_BYTES
17523 ENTRY(idt_table)
17524 - .skip IDT_ENTRIES * 16
17525 + .fill 512,8,0
17526
17527 .align L1_CACHE_BYTES
17528 ENTRY(nmi_idt_table)
17529 - .skip IDT_ENTRIES * 16
17530 + .fill 512,8,0
17531
17532 __PAGE_ALIGNED_BSS
17533 .align PAGE_SIZE
17534 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17535 index 9c3bd4a..e1d9b35 100644
17536 --- a/arch/x86/kernel/i386_ksyms_32.c
17537 +++ b/arch/x86/kernel/i386_ksyms_32.c
17538 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17539 EXPORT_SYMBOL(cmpxchg8b_emu);
17540 #endif
17541
17542 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17543 +
17544 /* Networking helper routines. */
17545 EXPORT_SYMBOL(csum_partial_copy_generic);
17546 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17547 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17548
17549 EXPORT_SYMBOL(__get_user_1);
17550 EXPORT_SYMBOL(__get_user_2);
17551 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17552
17553 EXPORT_SYMBOL(csum_partial);
17554 EXPORT_SYMBOL(empty_zero_page);
17555 +
17556 +#ifdef CONFIG_PAX_KERNEXEC
17557 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17558 +#endif
17559 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17560 index f250431..54097e7 100644
17561 --- a/arch/x86/kernel/i387.c
17562 +++ b/arch/x86/kernel/i387.c
17563 @@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17564 static inline bool interrupted_user_mode(void)
17565 {
17566 struct pt_regs *regs = get_irq_regs();
17567 - return regs && user_mode_vm(regs);
17568 + return regs && user_mode(regs);
17569 }
17570
17571 /*
17572 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17573 index 36d1853..bf25736 100644
17574 --- a/arch/x86/kernel/i8259.c
17575 +++ b/arch/x86/kernel/i8259.c
17576 @@ -209,7 +209,7 @@ spurious_8259A_irq:
17577 "spurious 8259A interrupt: IRQ%d.\n", irq);
17578 spurious_irq_mask |= irqmask;
17579 }
17580 - atomic_inc(&irq_err_count);
17581 + atomic_inc_unchecked(&irq_err_count);
17582 /*
17583 * Theoretically we do not have to handle this IRQ,
17584 * but in Linux this does not cause problems and is
17585 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17586 index 8c96897..be66bfa 100644
17587 --- a/arch/x86/kernel/ioport.c
17588 +++ b/arch/x86/kernel/ioport.c
17589 @@ -6,6 +6,7 @@
17590 #include <linux/sched.h>
17591 #include <linux/kernel.h>
17592 #include <linux/capability.h>
17593 +#include <linux/security.h>
17594 #include <linux/errno.h>
17595 #include <linux/types.h>
17596 #include <linux/ioport.h>
17597 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17598
17599 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17600 return -EINVAL;
17601 +#ifdef CONFIG_GRKERNSEC_IO
17602 + if (turn_on && grsec_disable_privio) {
17603 + gr_handle_ioperm();
17604 + return -EPERM;
17605 + }
17606 +#endif
17607 if (turn_on && !capable(CAP_SYS_RAWIO))
17608 return -EPERM;
17609
17610 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17611 * because the ->io_bitmap_max value must match the bitmap
17612 * contents:
17613 */
17614 - tss = &per_cpu(init_tss, get_cpu());
17615 + tss = init_tss + get_cpu();
17616
17617 if (turn_on)
17618 bitmap_clear(t->io_bitmap_ptr, from, num);
17619 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17620 return -EINVAL;
17621 /* Trying to gain more privileges? */
17622 if (level > old) {
17623 +#ifdef CONFIG_GRKERNSEC_IO
17624 + if (grsec_disable_privio) {
17625 + gr_handle_iopl();
17626 + return -EPERM;
17627 + }
17628 +#endif
17629 if (!capable(CAP_SYS_RAWIO))
17630 return -EPERM;
17631 }
17632 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17633 index 3dafc60..aa8e9c4 100644
17634 --- a/arch/x86/kernel/irq.c
17635 +++ b/arch/x86/kernel/irq.c
17636 @@ -18,7 +18,7 @@
17637 #include <asm/mce.h>
17638 #include <asm/hw_irq.h>
17639
17640 -atomic_t irq_err_count;
17641 +atomic_unchecked_t irq_err_count;
17642
17643 /* Function pointer for generic interrupt vector handling */
17644 void (*x86_platform_ipi_callback)(void) = NULL;
17645 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17646 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17647 seq_printf(p, " Machine check polls\n");
17648 #endif
17649 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17650 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17651 #if defined(CONFIG_X86_IO_APIC)
17652 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17653 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17654 #endif
17655 return 0;
17656 }
17657 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17658
17659 u64 arch_irq_stat(void)
17660 {
17661 - u64 sum = atomic_read(&irq_err_count);
17662 + u64 sum = atomic_read_unchecked(&irq_err_count);
17663
17664 #ifdef CONFIG_X86_IO_APIC
17665 - sum += atomic_read(&irq_mis_count);
17666 + sum += atomic_read_unchecked(&irq_mis_count);
17667 #endif
17668 return sum;
17669 }
17670 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17671 index 344faf8..355f60d 100644
17672 --- a/arch/x86/kernel/irq_32.c
17673 +++ b/arch/x86/kernel/irq_32.c
17674 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17675 __asm__ __volatile__("andl %%esp,%0" :
17676 "=r" (sp) : "0" (THREAD_SIZE - 1));
17677
17678 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17679 + return sp < STACK_WARN;
17680 }
17681
17682 static void print_stack_overflow(void)
17683 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17684 * per-CPU IRQ handling contexts (thread information and stack)
17685 */
17686 union irq_ctx {
17687 - struct thread_info tinfo;
17688 - u32 stack[THREAD_SIZE/sizeof(u32)];
17689 + unsigned long previous_esp;
17690 + u32 stack[THREAD_SIZE/sizeof(u32)];
17691 } __attribute__((aligned(THREAD_SIZE)));
17692
17693 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17694 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17695 static inline int
17696 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17697 {
17698 - union irq_ctx *curctx, *irqctx;
17699 + union irq_ctx *irqctx;
17700 u32 *isp, arg1, arg2;
17701
17702 - curctx = (union irq_ctx *) current_thread_info();
17703 irqctx = __this_cpu_read(hardirq_ctx);
17704
17705 /*
17706 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17707 * handler) we can't do that and just have to keep using the
17708 * current stack (which is the irq stack already after all)
17709 */
17710 - if (unlikely(curctx == irqctx))
17711 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17712 return 0;
17713
17714 /* build the stack frame on the IRQ stack */
17715 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17716 - irqctx->tinfo.task = curctx->tinfo.task;
17717 - irqctx->tinfo.previous_esp = current_stack_pointer;
17718 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17719 + irqctx->previous_esp = current_stack_pointer;
17720
17721 - /* Copy the preempt_count so that the [soft]irq checks work. */
17722 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17723 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17724 + __set_fs(MAKE_MM_SEG(0));
17725 +#endif
17726
17727 if (unlikely(overflow))
17728 call_on_stack(print_stack_overflow, isp);
17729 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17730 : "0" (irq), "1" (desc), "2" (isp),
17731 "D" (desc->handle_irq)
17732 : "memory", "cc", "ecx");
17733 +
17734 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17735 + __set_fs(current_thread_info()->addr_limit);
17736 +#endif
17737 +
17738 return 1;
17739 }
17740
17741 @@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17742 */
17743 void __cpuinit irq_ctx_init(int cpu)
17744 {
17745 - union irq_ctx *irqctx;
17746 -
17747 if (per_cpu(hardirq_ctx, cpu))
17748 return;
17749
17750 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17751 - THREADINFO_GFP,
17752 - THREAD_SIZE_ORDER));
17753 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17754 - irqctx->tinfo.cpu = cpu;
17755 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17756 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17757 -
17758 - per_cpu(hardirq_ctx, cpu) = irqctx;
17759 -
17760 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17761 - THREADINFO_GFP,
17762 - THREAD_SIZE_ORDER));
17763 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17764 - irqctx->tinfo.cpu = cpu;
17765 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17766 -
17767 - per_cpu(softirq_ctx, cpu) = irqctx;
17768 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
17769 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
17770 +
17771 + printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17772 + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17773
17774 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17775 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17776 @@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
17777 asmlinkage void do_softirq(void)
17778 {
17779 unsigned long flags;
17780 - struct thread_info *curctx;
17781 union irq_ctx *irqctx;
17782 u32 *isp;
17783
17784 @@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
17785 local_irq_save(flags);
17786
17787 if (local_softirq_pending()) {
17788 - curctx = current_thread_info();
17789 irqctx = __this_cpu_read(softirq_ctx);
17790 - irqctx->tinfo.task = curctx->task;
17791 - irqctx->tinfo.previous_esp = current_stack_pointer;
17792 + irqctx->previous_esp = current_stack_pointer;
17793
17794 /* build the stack frame on the softirq stack */
17795 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17796 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17797 +
17798 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17799 + __set_fs(MAKE_MM_SEG(0));
17800 +#endif
17801
17802 call_on_stack(__do_softirq, isp);
17803 +
17804 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17805 + __set_fs(current_thread_info()->addr_limit);
17806 +#endif
17807 +
17808 /*
17809 * Shouldn't happen, we returned above if in_interrupt():
17810 */
17811 @@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17812 if (unlikely(!desc))
17813 return false;
17814
17815 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17816 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17817 if (unlikely(overflow))
17818 print_stack_overflow();
17819 desc->handle_irq(irq, desc);
17820 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17821 index d04d3ec..ea4b374 100644
17822 --- a/arch/x86/kernel/irq_64.c
17823 +++ b/arch/x86/kernel/irq_64.c
17824 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17825 u64 estack_top, estack_bottom;
17826 u64 curbase = (u64)task_stack_page(current);
17827
17828 - if (user_mode_vm(regs))
17829 + if (user_mode(regs))
17830 return;
17831
17832 if (regs->sp >= curbase + sizeof(struct thread_info) +
17833 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17834 index 1d5d31e..72731d4 100644
17835 --- a/arch/x86/kernel/kdebugfs.c
17836 +++ b/arch/x86/kernel/kdebugfs.c
17837 @@ -27,7 +27,7 @@ struct setup_data_node {
17838 u32 len;
17839 };
17840
17841 -static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17842 +static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
17843 size_t count, loff_t *ppos)
17844 {
17845 struct setup_data_node *node = file->private_data;
17846 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17847 index 3f61904..873cea9 100644
17848 --- a/arch/x86/kernel/kgdb.c
17849 +++ b/arch/x86/kernel/kgdb.c
17850 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17851 #ifdef CONFIG_X86_32
17852 switch (regno) {
17853 case GDB_SS:
17854 - if (!user_mode_vm(regs))
17855 + if (!user_mode(regs))
17856 *(unsigned long *)mem = __KERNEL_DS;
17857 break;
17858 case GDB_SP:
17859 - if (!user_mode_vm(regs))
17860 + if (!user_mode(regs))
17861 *(unsigned long *)mem = kernel_stack_pointer(regs);
17862 break;
17863 case GDB_GS:
17864 @@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17865 case 'k':
17866 /* clear the trace bit */
17867 linux_regs->flags &= ~X86_EFLAGS_TF;
17868 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17869 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17870
17871 /* set the trace bit if we're stepping */
17872 if (remcomInBuffer[0] == 's') {
17873 linux_regs->flags |= X86_EFLAGS_TF;
17874 - atomic_set(&kgdb_cpu_doing_single_step,
17875 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17876 raw_smp_processor_id());
17877 }
17878
17879 @@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17880
17881 switch (cmd) {
17882 case DIE_DEBUG:
17883 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17884 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17885 if (user_mode(regs))
17886 return single_step_cont(regs, args);
17887 break;
17888 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17889 index c5e410e..da6aaf9 100644
17890 --- a/arch/x86/kernel/kprobes-opt.c
17891 +++ b/arch/x86/kernel/kprobes-opt.c
17892 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17893 * Verify if the address gap is in 2GB range, because this uses
17894 * a relative jump.
17895 */
17896 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17897 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17898 if (abs(rel) > 0x7fffffff)
17899 return -ERANGE;
17900
17901 @@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17902 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17903
17904 /* Set probe function call */
17905 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17906 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17907
17908 /* Set returning jmp instruction at the tail of out-of-line buffer */
17909 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17910 - (u8 *)op->kp.addr + op->optinsn.size);
17911 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17912
17913 flush_icache_range((unsigned long) buf,
17914 (unsigned long) buf + TMPL_END_IDX +
17915 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17916 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17917
17918 /* Backup instructions which will be replaced by jump address */
17919 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17920 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17921 RELATIVE_ADDR_SIZE);
17922
17923 insn_buf[0] = RELATIVEJUMP_OPCODE;
17924 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17925 index e2f751e..dffa2a0 100644
17926 --- a/arch/x86/kernel/kprobes.c
17927 +++ b/arch/x86/kernel/kprobes.c
17928 @@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17929 } __attribute__((packed)) *insn;
17930
17931 insn = (struct __arch_relative_insn *)from;
17932 +
17933 + pax_open_kernel();
17934 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17935 insn->op = op;
17936 + pax_close_kernel();
17937 }
17938
17939 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17940 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17941 kprobe_opcode_t opcode;
17942 kprobe_opcode_t *orig_opcodes = opcodes;
17943
17944 - if (search_exception_tables((unsigned long)opcodes))
17945 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17946 return 0; /* Page fault may occur on this address. */
17947
17948 retry:
17949 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17950 /* Another subsystem puts a breakpoint, failed to recover */
17951 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17952 return 0;
17953 + pax_open_kernel();
17954 memcpy(dest, insn.kaddr, insn.length);
17955 + pax_close_kernel();
17956
17957 #ifdef CONFIG_X86_64
17958 if (insn_rip_relative(&insn)) {
17959 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17960 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17961 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17962 disp = (u8 *) dest + insn_offset_displacement(&insn);
17963 + pax_open_kernel();
17964 *(s32 *) disp = (s32) newdisp;
17965 + pax_close_kernel();
17966 }
17967 #endif
17968 return insn.length;
17969 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17970 * nor set current_kprobe, because it doesn't use single
17971 * stepping.
17972 */
17973 - regs->ip = (unsigned long)p->ainsn.insn;
17974 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17975 preempt_enable_no_resched();
17976 return;
17977 }
17978 @@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17979 if (p->opcode == BREAKPOINT_INSTRUCTION)
17980 regs->ip = (unsigned long)p->addr;
17981 else
17982 - regs->ip = (unsigned long)p->ainsn.insn;
17983 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17984 }
17985
17986 /*
17987 @@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17988 setup_singlestep(p, regs, kcb, 0);
17989 return 1;
17990 }
17991 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17992 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17993 /*
17994 * The breakpoint instruction was removed right
17995 * after we hit it. Another cpu has removed
17996 @@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17997 " movq %rax, 152(%rsp)\n"
17998 RESTORE_REGS_STRING
17999 " popfq\n"
18000 +#ifdef KERNEXEC_PLUGIN
18001 + " btsq $63,(%rsp)\n"
18002 +#endif
18003 #else
18004 " pushf\n"
18005 SAVE_REGS_STRING
18006 @@ -765,7 +775,7 @@ static void __kprobes
18007 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18008 {
18009 unsigned long *tos = stack_addr(regs);
18010 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18011 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18012 unsigned long orig_ip = (unsigned long)p->addr;
18013 kprobe_opcode_t *insn = p->ainsn.insn;
18014
18015 @@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
18016 struct die_args *args = data;
18017 int ret = NOTIFY_DONE;
18018
18019 - if (args->regs && user_mode_vm(args->regs))
18020 + if (args->regs && user_mode(args->regs))
18021 return ret;
18022
18023 switch (val) {
18024 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18025 index ebc9873..1b9724b 100644
18026 --- a/arch/x86/kernel/ldt.c
18027 +++ b/arch/x86/kernel/ldt.c
18028 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18029 if (reload) {
18030 #ifdef CONFIG_SMP
18031 preempt_disable();
18032 - load_LDT(pc);
18033 + load_LDT_nolock(pc);
18034 if (!cpumask_equal(mm_cpumask(current->mm),
18035 cpumask_of(smp_processor_id())))
18036 smp_call_function(flush_ldt, current->mm, 1);
18037 preempt_enable();
18038 #else
18039 - load_LDT(pc);
18040 + load_LDT_nolock(pc);
18041 #endif
18042 }
18043 if (oldsize) {
18044 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18045 return err;
18046
18047 for (i = 0; i < old->size; i++)
18048 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18049 + write_ldt_entry(new->ldt, i, old->ldt + i);
18050 return 0;
18051 }
18052
18053 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18054 retval = copy_ldt(&mm->context, &old_mm->context);
18055 mutex_unlock(&old_mm->context.lock);
18056 }
18057 +
18058 + if (tsk == current) {
18059 + mm->context.vdso = 0;
18060 +
18061 +#ifdef CONFIG_X86_32
18062 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18063 + mm->context.user_cs_base = 0UL;
18064 + mm->context.user_cs_limit = ~0UL;
18065 +
18066 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18067 + cpus_clear(mm->context.cpu_user_cs_mask);
18068 +#endif
18069 +
18070 +#endif
18071 +#endif
18072 +
18073 + }
18074 +
18075 return retval;
18076 }
18077
18078 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18079 }
18080 }
18081
18082 +#ifdef CONFIG_PAX_SEGMEXEC
18083 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18084 + error = -EINVAL;
18085 + goto out_unlock;
18086 + }
18087 +#endif
18088 +
18089 fill_ldt(&ldt, &ldt_info);
18090 if (oldmode)
18091 ldt.avl = 0;
18092 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18093 index 5b19e4d..6476a76 100644
18094 --- a/arch/x86/kernel/machine_kexec_32.c
18095 +++ b/arch/x86/kernel/machine_kexec_32.c
18096 @@ -26,7 +26,7 @@
18097 #include <asm/cacheflush.h>
18098 #include <asm/debugreg.h>
18099
18100 -static void set_idt(void *newidt, __u16 limit)
18101 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18102 {
18103 struct desc_ptr curidt;
18104
18105 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18106 }
18107
18108
18109 -static void set_gdt(void *newgdt, __u16 limit)
18110 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18111 {
18112 struct desc_ptr curgdt;
18113
18114 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18115 }
18116
18117 control_page = page_address(image->control_code_page);
18118 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18119 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18120
18121 relocate_kernel_ptr = control_page;
18122 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18123 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18124 index 0327e2b..e43737b 100644
18125 --- a/arch/x86/kernel/microcode_intel.c
18126 +++ b/arch/x86/kernel/microcode_intel.c
18127 @@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18128
18129 static int get_ucode_user(void *to, const void *from, size_t n)
18130 {
18131 - return copy_from_user(to, from, n);
18132 + return copy_from_user(to, (const void __force_user *)from, n);
18133 }
18134
18135 static enum ucode_state
18136 request_microcode_user(int cpu, const void __user *buf, size_t size)
18137 {
18138 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18139 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18140 }
18141
18142 static void microcode_fini_cpu(int cpu)
18143 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18144 index f21fd94..61565cd 100644
18145 --- a/arch/x86/kernel/module.c
18146 +++ b/arch/x86/kernel/module.c
18147 @@ -35,15 +35,60 @@
18148 #define DEBUGP(fmt...)
18149 #endif
18150
18151 -void *module_alloc(unsigned long size)
18152 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18153 {
18154 - if (PAGE_ALIGN(size) > MODULES_LEN)
18155 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18156 return NULL;
18157 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18158 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18159 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18160 -1, __builtin_return_address(0));
18161 }
18162
18163 +void *module_alloc(unsigned long size)
18164 +{
18165 +
18166 +#ifdef CONFIG_PAX_KERNEXEC
18167 + return __module_alloc(size, PAGE_KERNEL);
18168 +#else
18169 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18170 +#endif
18171 +
18172 +}
18173 +
18174 +#ifdef CONFIG_PAX_KERNEXEC
18175 +#ifdef CONFIG_X86_32
18176 +void *module_alloc_exec(unsigned long size)
18177 +{
18178 + struct vm_struct *area;
18179 +
18180 + if (size == 0)
18181 + return NULL;
18182 +
18183 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18184 + return area ? area->addr : NULL;
18185 +}
18186 +EXPORT_SYMBOL(module_alloc_exec);
18187 +
18188 +void module_free_exec(struct module *mod, void *module_region)
18189 +{
18190 + vunmap(module_region);
18191 +}
18192 +EXPORT_SYMBOL(module_free_exec);
18193 +#else
18194 +void module_free_exec(struct module *mod, void *module_region)
18195 +{
18196 + module_free(mod, module_region);
18197 +}
18198 +EXPORT_SYMBOL(module_free_exec);
18199 +
18200 +void *module_alloc_exec(unsigned long size)
18201 +{
18202 + return __module_alloc(size, PAGE_KERNEL_RX);
18203 +}
18204 +EXPORT_SYMBOL(module_alloc_exec);
18205 +#endif
18206 +#endif
18207 +
18208 #ifdef CONFIG_X86_32
18209 int apply_relocate(Elf32_Shdr *sechdrs,
18210 const char *strtab,
18211 @@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18212 unsigned int i;
18213 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18214 Elf32_Sym *sym;
18215 - uint32_t *location;
18216 + uint32_t *plocation, location;
18217
18218 DEBUGP("Applying relocate section %u to %u\n", relsec,
18219 sechdrs[relsec].sh_info);
18220 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18221 /* This is where to make the change */
18222 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18223 - + rel[i].r_offset;
18224 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18225 + location = (uint32_t)plocation;
18226 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18227 + plocation = ktla_ktva((void *)plocation);
18228 /* This is the symbol it is referring to. Note that all
18229 undefined symbols have been resolved. */
18230 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18231 @@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18232 switch (ELF32_R_TYPE(rel[i].r_info)) {
18233 case R_386_32:
18234 /* We add the value into the location given */
18235 - *location += sym->st_value;
18236 + pax_open_kernel();
18237 + *plocation += sym->st_value;
18238 + pax_close_kernel();
18239 break;
18240 case R_386_PC32:
18241 /* Add the value, subtract its postition */
18242 - *location += sym->st_value - (uint32_t)location;
18243 + pax_open_kernel();
18244 + *plocation += sym->st_value - location;
18245 + pax_close_kernel();
18246 break;
18247 default:
18248 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18249 @@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18250 case R_X86_64_NONE:
18251 break;
18252 case R_X86_64_64:
18253 + pax_open_kernel();
18254 *(u64 *)loc = val;
18255 + pax_close_kernel();
18256 break;
18257 case R_X86_64_32:
18258 + pax_open_kernel();
18259 *(u32 *)loc = val;
18260 + pax_close_kernel();
18261 if (val != *(u32 *)loc)
18262 goto overflow;
18263 break;
18264 case R_X86_64_32S:
18265 + pax_open_kernel();
18266 *(s32 *)loc = val;
18267 + pax_close_kernel();
18268 if ((s64)val != *(s32 *)loc)
18269 goto overflow;
18270 break;
18271 case R_X86_64_PC32:
18272 val -= (u64)loc;
18273 + pax_open_kernel();
18274 *(u32 *)loc = val;
18275 + pax_close_kernel();
18276 +
18277 #if 0
18278 if ((s64)val != *(s32 *)loc)
18279 goto overflow;
18280 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18281 index a0b2f84..875ab81 100644
18282 --- a/arch/x86/kernel/nmi.c
18283 +++ b/arch/x86/kernel/nmi.c
18284 @@ -460,6 +460,17 @@ static inline void nmi_nesting_postprocess(void)
18285 dotraplinkage notrace __kprobes void
18286 do_nmi(struct pt_regs *regs, long error_code)
18287 {
18288 +
18289 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18290 + if (!user_mode(regs)) {
18291 + unsigned long cs = regs->cs & 0xFFFF;
18292 + unsigned long ip = ktva_ktla(regs->ip);
18293 +
18294 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18295 + regs->ip = ip;
18296 + }
18297 +#endif
18298 +
18299 nmi_nesting_preprocess(regs);
18300
18301 nmi_enter();
18302 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18303 index 676b8c7..870ba04 100644
18304 --- a/arch/x86/kernel/paravirt-spinlocks.c
18305 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18306 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18307 arch_spin_lock(lock);
18308 }
18309
18310 -struct pv_lock_ops pv_lock_ops = {
18311 +struct pv_lock_ops pv_lock_ops __read_only = {
18312 #ifdef CONFIG_SMP
18313 .spin_is_locked = __ticket_spin_is_locked,
18314 .spin_is_contended = __ticket_spin_is_contended,
18315 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18316 index 9ce8859..b49bf51 100644
18317 --- a/arch/x86/kernel/paravirt.c
18318 +++ b/arch/x86/kernel/paravirt.c
18319 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18320 {
18321 return x;
18322 }
18323 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18324 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18325 +#endif
18326
18327 void __init default_banner(void)
18328 {
18329 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18330 if (opfunc == NULL)
18331 /* If there's no function, patch it with a ud2a (BUG) */
18332 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18333 - else if (opfunc == _paravirt_nop)
18334 + else if (opfunc == (void *)_paravirt_nop)
18335 /* If the operation is a nop, then nop the callsite */
18336 ret = paravirt_patch_nop();
18337
18338 /* identity functions just return their single argument */
18339 - else if (opfunc == _paravirt_ident_32)
18340 + else if (opfunc == (void *)_paravirt_ident_32)
18341 ret = paravirt_patch_ident_32(insnbuf, len);
18342 - else if (opfunc == _paravirt_ident_64)
18343 + else if (opfunc == (void *)_paravirt_ident_64)
18344 ret = paravirt_patch_ident_64(insnbuf, len);
18345 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18346 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18347 + ret = paravirt_patch_ident_64(insnbuf, len);
18348 +#endif
18349
18350 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18351 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18352 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18353 if (insn_len > len || start == NULL)
18354 insn_len = len;
18355 else
18356 - memcpy(insnbuf, start, insn_len);
18357 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18358
18359 return insn_len;
18360 }
18361 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18362 preempt_enable();
18363 }
18364
18365 -struct pv_info pv_info = {
18366 +struct pv_info pv_info __read_only = {
18367 .name = "bare hardware",
18368 .paravirt_enabled = 0,
18369 .kernel_rpl = 0,
18370 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
18371 #endif
18372 };
18373
18374 -struct pv_init_ops pv_init_ops = {
18375 +struct pv_init_ops pv_init_ops __read_only = {
18376 .patch = native_patch,
18377 };
18378
18379 -struct pv_time_ops pv_time_ops = {
18380 +struct pv_time_ops pv_time_ops __read_only = {
18381 .sched_clock = native_sched_clock,
18382 .steal_clock = native_steal_clock,
18383 };
18384
18385 -struct pv_irq_ops pv_irq_ops = {
18386 +struct pv_irq_ops pv_irq_ops __read_only = {
18387 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18388 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18389 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18390 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18391 #endif
18392 };
18393
18394 -struct pv_cpu_ops pv_cpu_ops = {
18395 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18396 .cpuid = native_cpuid,
18397 .get_debugreg = native_get_debugreg,
18398 .set_debugreg = native_set_debugreg,
18399 @@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18400 .end_context_switch = paravirt_nop,
18401 };
18402
18403 -struct pv_apic_ops pv_apic_ops = {
18404 +struct pv_apic_ops pv_apic_ops __read_only = {
18405 #ifdef CONFIG_X86_LOCAL_APIC
18406 .startup_ipi_hook = paravirt_nop,
18407 #endif
18408 };
18409
18410 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18411 +#ifdef CONFIG_X86_32
18412 +#ifdef CONFIG_X86_PAE
18413 +/* 64-bit pagetable entries */
18414 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18415 +#else
18416 /* 32-bit pagetable entries */
18417 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18418 +#endif
18419 #else
18420 /* 64-bit pagetable entries */
18421 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18422 #endif
18423
18424 -struct pv_mmu_ops pv_mmu_ops = {
18425 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18426
18427 .read_cr2 = native_read_cr2,
18428 .write_cr2 = native_write_cr2,
18429 @@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18430 .make_pud = PTE_IDENT,
18431
18432 .set_pgd = native_set_pgd,
18433 + .set_pgd_batched = native_set_pgd_batched,
18434 #endif
18435 #endif /* PAGETABLE_LEVELS >= 3 */
18436
18437 @@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18438 },
18439
18440 .set_fixmap = native_set_fixmap,
18441 +
18442 +#ifdef CONFIG_PAX_KERNEXEC
18443 + .pax_open_kernel = native_pax_open_kernel,
18444 + .pax_close_kernel = native_pax_close_kernel,
18445 +#endif
18446 +
18447 };
18448
18449 EXPORT_SYMBOL_GPL(pv_time_ops);
18450 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18451 index 35ccf75..7a15747 100644
18452 --- a/arch/x86/kernel/pci-iommu_table.c
18453 +++ b/arch/x86/kernel/pci-iommu_table.c
18454 @@ -2,7 +2,7 @@
18455 #include <asm/iommu_table.h>
18456 #include <linux/string.h>
18457 #include <linux/kallsyms.h>
18458 -
18459 +#include <linux/sched.h>
18460
18461 #define DEBUG 1
18462
18463 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18464 index 735279e..5008677 100644
18465 --- a/arch/x86/kernel/process.c
18466 +++ b/arch/x86/kernel/process.c
18467 @@ -34,7 +34,8 @@
18468 * section. Since TSS's are completely CPU-local, we want them
18469 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18470 */
18471 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18472 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18473 +EXPORT_SYMBOL(init_tss);
18474
18475 #ifdef CONFIG_X86_64
18476 static DEFINE_PER_CPU(unsigned char, is_idle);
18477 @@ -92,7 +93,7 @@ void arch_task_cache_init(void)
18478 task_xstate_cachep =
18479 kmem_cache_create("task_xstate", xstate_size,
18480 __alignof__(union thread_xstate),
18481 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18482 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18483 }
18484
18485 static inline void drop_fpu(struct task_struct *tsk)
18486 @@ -115,7 +116,7 @@ void exit_thread(void)
18487 unsigned long *bp = t->io_bitmap_ptr;
18488
18489 if (bp) {
18490 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18491 + struct tss_struct *tss = init_tss + get_cpu();
18492
18493 t->io_bitmap_ptr = NULL;
18494 clear_thread_flag(TIF_IO_BITMAP);
18495 @@ -147,7 +148,7 @@ void show_regs_common(void)
18496
18497 printk(KERN_CONT "\n");
18498 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18499 - current->pid, current->comm, print_tainted(),
18500 + task_pid_nr(current), current->comm, print_tainted(),
18501 init_utsname()->release,
18502 (int)strcspn(init_utsname()->version, " "),
18503 init_utsname()->version);
18504 @@ -161,6 +162,9 @@ void flush_thread(void)
18505 {
18506 struct task_struct *tsk = current;
18507
18508 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18509 + loadsegment(gs, 0);
18510 +#endif
18511 flush_ptrace_hw_breakpoint(tsk);
18512 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18513 drop_fpu(tsk);
18514 @@ -318,10 +322,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18515 regs.di = (unsigned long) arg;
18516
18517 #ifdef CONFIG_X86_32
18518 - regs.ds = __USER_DS;
18519 - regs.es = __USER_DS;
18520 + regs.ds = __KERNEL_DS;
18521 + regs.es = __KERNEL_DS;
18522 regs.fs = __KERNEL_PERCPU;
18523 - regs.gs = __KERNEL_STACK_CANARY;
18524 + savesegment(gs, regs.gs);
18525 #else
18526 regs.ss = __KERNEL_DS;
18527 #endif
18528 @@ -407,7 +411,7 @@ static void __exit_idle(void)
18529 void exit_idle(void)
18530 {
18531 /* idle loop has pid 0 */
18532 - if (current->pid)
18533 + if (task_pid_nr(current))
18534 return;
18535 __exit_idle();
18536 }
18537 @@ -516,7 +520,7 @@ bool set_pm_idle_to_default(void)
18538
18539 return ret;
18540 }
18541 -void stop_this_cpu(void *dummy)
18542 +__noreturn void stop_this_cpu(void *dummy)
18543 {
18544 local_irq_disable();
18545 /*
18546 @@ -746,16 +750,37 @@ static int __init idle_setup(char *str)
18547 }
18548 early_param("idle", idle_setup);
18549
18550 -unsigned long arch_align_stack(unsigned long sp)
18551 +#ifdef CONFIG_PAX_RANDKSTACK
18552 +void pax_randomize_kstack(struct pt_regs *regs)
18553 {
18554 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18555 - sp -= get_random_int() % 8192;
18556 - return sp & ~0xf;
18557 -}
18558 + struct thread_struct *thread = &current->thread;
18559 + unsigned long time;
18560
18561 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18562 -{
18563 - unsigned long range_end = mm->brk + 0x02000000;
18564 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18565 -}
18566 + if (!randomize_va_space)
18567 + return;
18568 +
18569 + if (v8086_mode(regs))
18570 + return;
18571
18572 + rdtscl(time);
18573 +
18574 + /* P4 seems to return a 0 LSB, ignore it */
18575 +#ifdef CONFIG_MPENTIUM4
18576 + time &= 0x3EUL;
18577 + time <<= 2;
18578 +#elif defined(CONFIG_X86_64)
18579 + time &= 0xFUL;
18580 + time <<= 4;
18581 +#else
18582 + time &= 0x1FUL;
18583 + time <<= 3;
18584 +#endif
18585 +
18586 + thread->sp0 ^= time;
18587 + load_sp0(init_tss + smp_processor_id(), thread);
18588 +
18589 +#ifdef CONFIG_X86_64
18590 + this_cpu_write(kernel_stack, thread->sp0);
18591 +#endif
18592 +}
18593 +#endif
18594 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18595 index 516fa18..80bd9e6 100644
18596 --- a/arch/x86/kernel/process_32.c
18597 +++ b/arch/x86/kernel/process_32.c
18598 @@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18599 unsigned long thread_saved_pc(struct task_struct *tsk)
18600 {
18601 return ((unsigned long *)tsk->thread.sp)[3];
18602 +//XXX return tsk->thread.eip;
18603 }
18604
18605 void __show_regs(struct pt_regs *regs, int all)
18606 @@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18607 unsigned long sp;
18608 unsigned short ss, gs;
18609
18610 - if (user_mode_vm(regs)) {
18611 + if (user_mode(regs)) {
18612 sp = regs->sp;
18613 ss = regs->ss & 0xffff;
18614 - gs = get_user_gs(regs);
18615 } else {
18616 sp = kernel_stack_pointer(regs);
18617 savesegment(ss, ss);
18618 - savesegment(gs, gs);
18619 }
18620 + gs = get_user_gs(regs);
18621
18622 show_regs_common();
18623
18624 @@ -134,13 +134,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18625 struct task_struct *tsk;
18626 int err;
18627
18628 - childregs = task_pt_regs(p);
18629 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18630 *childregs = *regs;
18631 childregs->ax = 0;
18632 childregs->sp = sp;
18633
18634 p->thread.sp = (unsigned long) childregs;
18635 p->thread.sp0 = (unsigned long) (childregs+1);
18636 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18637
18638 p->thread.ip = (unsigned long) ret_from_fork;
18639
18640 @@ -231,7 +232,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18641 struct thread_struct *prev = &prev_p->thread,
18642 *next = &next_p->thread;
18643 int cpu = smp_processor_id();
18644 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18645 + struct tss_struct *tss = init_tss + cpu;
18646 fpu_switch_t fpu;
18647
18648 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18649 @@ -255,6 +256,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18650 */
18651 lazy_save_gs(prev->gs);
18652
18653 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18654 + __set_fs(task_thread_info(next_p)->addr_limit);
18655 +#endif
18656 +
18657 /*
18658 * Load the per-thread Thread-Local Storage descriptor.
18659 */
18660 @@ -285,6 +290,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18661 */
18662 arch_end_context_switch(next_p);
18663
18664 + this_cpu_write(current_task, next_p);
18665 + this_cpu_write(current_tinfo, &next_p->tinfo);
18666 +
18667 /*
18668 * Restore %gs if needed (which is common)
18669 */
18670 @@ -293,8 +301,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18671
18672 switch_fpu_finish(next_p, fpu);
18673
18674 - this_cpu_write(current_task, next_p);
18675 -
18676 return prev_p;
18677 }
18678
18679 @@ -324,4 +330,3 @@ unsigned long get_wchan(struct task_struct *p)
18680 } while (count++ < 16);
18681 return 0;
18682 }
18683 -
18684 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18685 index 61cdf7f..797f06a 100644
18686 --- a/arch/x86/kernel/process_64.c
18687 +++ b/arch/x86/kernel/process_64.c
18688 @@ -153,8 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18689 struct pt_regs *childregs;
18690 struct task_struct *me = current;
18691
18692 - childregs = ((struct pt_regs *)
18693 - (THREAD_SIZE + task_stack_page(p))) - 1;
18694 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18695 *childregs = *regs;
18696
18697 childregs->ax = 0;
18698 @@ -166,6 +165,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18699 p->thread.sp = (unsigned long) childregs;
18700 p->thread.sp0 = (unsigned long) (childregs+1);
18701 p->thread.usersp = me->thread.usersp;
18702 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18703
18704 set_tsk_thread_flag(p, TIF_FORK);
18705
18706 @@ -271,7 +271,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18707 struct thread_struct *prev = &prev_p->thread;
18708 struct thread_struct *next = &next_p->thread;
18709 int cpu = smp_processor_id();
18710 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18711 + struct tss_struct *tss = init_tss + cpu;
18712 unsigned fsindex, gsindex;
18713 fpu_switch_t fpu;
18714
18715 @@ -353,10 +353,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18716 prev->usersp = this_cpu_read(old_rsp);
18717 this_cpu_write(old_rsp, next->usersp);
18718 this_cpu_write(current_task, next_p);
18719 + this_cpu_write(current_tinfo, &next_p->tinfo);
18720
18721 - this_cpu_write(kernel_stack,
18722 - (unsigned long)task_stack_page(next_p) +
18723 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18724 + this_cpu_write(kernel_stack, next->sp0);
18725
18726 /*
18727 * Now maybe reload the debug registers and handle I/O bitmaps
18728 @@ -425,12 +424,11 @@ unsigned long get_wchan(struct task_struct *p)
18729 if (!p || p == current || p->state == TASK_RUNNING)
18730 return 0;
18731 stack = (unsigned long)task_stack_page(p);
18732 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18733 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18734 return 0;
18735 fp = *(u64 *)(p->thread.sp);
18736 do {
18737 - if (fp < (unsigned long)stack ||
18738 - fp >= (unsigned long)stack+THREAD_SIZE)
18739 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18740 return 0;
18741 ip = *(u64 *)(fp+8);
18742 if (!in_sched_functions(ip))
18743 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18744 index c4c6a5c..905f440 100644
18745 --- a/arch/x86/kernel/ptrace.c
18746 +++ b/arch/x86/kernel/ptrace.c
18747 @@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18748 unsigned long addr, unsigned long data)
18749 {
18750 int ret;
18751 - unsigned long __user *datap = (unsigned long __user *)data;
18752 + unsigned long __user *datap = (__force unsigned long __user *)data;
18753
18754 switch (request) {
18755 /* read the word at location addr in the USER area. */
18756 @@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18757 if ((int) addr < 0)
18758 return -EIO;
18759 ret = do_get_thread_area(child, addr,
18760 - (struct user_desc __user *)data);
18761 + (__force struct user_desc __user *) data);
18762 break;
18763
18764 case PTRACE_SET_THREAD_AREA:
18765 if ((int) addr < 0)
18766 return -EIO;
18767 ret = do_set_thread_area(child, addr,
18768 - (struct user_desc __user *)data, 0);
18769 + (__force struct user_desc __user *) data, 0);
18770 break;
18771 #endif
18772
18773 @@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18774 memset(info, 0, sizeof(*info));
18775 info->si_signo = SIGTRAP;
18776 info->si_code = si_code;
18777 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18778 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18779 }
18780
18781 void user_single_step_siginfo(struct task_struct *tsk,
18782 @@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18783 # define IS_IA32 0
18784 #endif
18785
18786 +#ifdef CONFIG_GRKERNSEC_SETXID
18787 +extern void gr_delayed_cred_worker(void);
18788 +#endif
18789 +
18790 /*
18791 * We must return the syscall number to actually look up in the table.
18792 * This can be -1L to skip running any syscall at all.
18793 @@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18794 {
18795 long ret = 0;
18796
18797 +#ifdef CONFIG_GRKERNSEC_SETXID
18798 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18799 + gr_delayed_cred_worker();
18800 +#endif
18801 +
18802 /*
18803 * If we stepped into a sysenter/syscall insn, it trapped in
18804 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18805 @@ -1511,6 +1520,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18806 {
18807 bool step;
18808
18809 +#ifdef CONFIG_GRKERNSEC_SETXID
18810 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18811 + gr_delayed_cred_worker();
18812 +#endif
18813 +
18814 audit_syscall_exit(regs);
18815
18816 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18817 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18818 index 42eb330..139955c 100644
18819 --- a/arch/x86/kernel/pvclock.c
18820 +++ b/arch/x86/kernel/pvclock.c
18821 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18822 return pv_tsc_khz;
18823 }
18824
18825 -static atomic64_t last_value = ATOMIC64_INIT(0);
18826 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18827
18828 void pvclock_resume(void)
18829 {
18830 - atomic64_set(&last_value, 0);
18831 + atomic64_set_unchecked(&last_value, 0);
18832 }
18833
18834 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18835 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18836 * updating at the same time, and one of them could be slightly behind,
18837 * making the assumption that last_value always go forward fail to hold.
18838 */
18839 - last = atomic64_read(&last_value);
18840 + last = atomic64_read_unchecked(&last_value);
18841 do {
18842 if (ret < last)
18843 return last;
18844 - last = atomic64_cmpxchg(&last_value, last, ret);
18845 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18846 } while (unlikely(last != ret));
18847
18848 return ret;
18849 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18850 index 5de92f1..776788d 100644
18851 --- a/arch/x86/kernel/reboot.c
18852 +++ b/arch/x86/kernel/reboot.c
18853 @@ -36,7 +36,7 @@ void (*pm_power_off)(void);
18854 EXPORT_SYMBOL(pm_power_off);
18855
18856 static const struct desc_ptr no_idt = {};
18857 -static int reboot_mode;
18858 +static unsigned short reboot_mode;
18859 enum reboot_type reboot_type = BOOT_ACPI;
18860 int reboot_force;
18861
18862 @@ -157,11 +157,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
18863 return 0;
18864 }
18865
18866 -void machine_real_restart(unsigned int type)
18867 +__noreturn void machine_real_restart(unsigned int type)
18868 {
18869 void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
18870 real_mode_header->machine_real_restart_asm;
18871
18872 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18873 + struct desc_struct *gdt;
18874 +#endif
18875 +
18876 local_irq_disable();
18877
18878 /*
18879 @@ -189,10 +193,38 @@ void machine_real_restart(unsigned int type)
18880 * boot)". This seems like a fairly standard thing that gets set by
18881 * REBOOT.COM programs, and the previous reset routine did this
18882 * too. */
18883 - *((unsigned short *)0x472) = reboot_mode;
18884 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18885
18886 /* Jump to the identity-mapped low memory code */
18887 +
18888 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18889 + gdt = get_cpu_gdt_table(smp_processor_id());
18890 + pax_open_kernel();
18891 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18892 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18893 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18894 + loadsegment(ds, __KERNEL_DS);
18895 + loadsegment(es, __KERNEL_DS);
18896 + loadsegment(ss, __KERNEL_DS);
18897 +#endif
18898 +#ifdef CONFIG_PAX_KERNEXEC
18899 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18900 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18901 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18902 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18903 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18904 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18905 +#endif
18906 + pax_close_kernel();
18907 +#endif
18908 +
18909 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18910 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18911 + unreachable();
18912 +#else
18913 restart_lowmem(type);
18914 +#endif
18915 +
18916 }
18917 #ifdef CONFIG_APM_MODULE
18918 EXPORT_SYMBOL(machine_real_restart);
18919 @@ -543,7 +575,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18920 * try to force a triple fault and then cycle between hitting the keyboard
18921 * controller and doing that
18922 */
18923 -static void native_machine_emergency_restart(void)
18924 +static void __noreturn native_machine_emergency_restart(void)
18925 {
18926 int i;
18927 int attempt = 0;
18928 @@ -670,13 +702,13 @@ void native_machine_shutdown(void)
18929 #endif
18930 }
18931
18932 -static void __machine_emergency_restart(int emergency)
18933 +static __noreturn void __machine_emergency_restart(int emergency)
18934 {
18935 reboot_emergency = emergency;
18936 machine_ops.emergency_restart();
18937 }
18938
18939 -static void native_machine_restart(char *__unused)
18940 +static void __noreturn native_machine_restart(char *__unused)
18941 {
18942 printk("machine restart\n");
18943
18944 @@ -685,7 +717,7 @@ static void native_machine_restart(char *__unused)
18945 __machine_emergency_restart(0);
18946 }
18947
18948 -static void native_machine_halt(void)
18949 +static void __noreturn native_machine_halt(void)
18950 {
18951 /* Stop other cpus and apics */
18952 machine_shutdown();
18953 @@ -695,7 +727,7 @@ static void native_machine_halt(void)
18954 stop_this_cpu(NULL);
18955 }
18956
18957 -static void native_machine_power_off(void)
18958 +static void __noreturn native_machine_power_off(void)
18959 {
18960 if (pm_power_off) {
18961 if (!reboot_force)
18962 @@ -704,6 +736,7 @@ static void native_machine_power_off(void)
18963 }
18964 /* A fallback in case there is no PM info available */
18965 tboot_shutdown(TB_SHUTDOWN_HALT);
18966 + unreachable();
18967 }
18968
18969 struct machine_ops machine_ops = {
18970 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18971 index 7a6f3b3..bed145d7 100644
18972 --- a/arch/x86/kernel/relocate_kernel_64.S
18973 +++ b/arch/x86/kernel/relocate_kernel_64.S
18974 @@ -11,6 +11,7 @@
18975 #include <asm/kexec.h>
18976 #include <asm/processor-flags.h>
18977 #include <asm/pgtable_types.h>
18978 +#include <asm/alternative-asm.h>
18979
18980 /*
18981 * Must be relocatable PIC code callable as a C function
18982 @@ -160,13 +161,14 @@ identity_mapped:
18983 xorq %rbp, %rbp
18984 xorq %r8, %r8
18985 xorq %r9, %r9
18986 - xorq %r10, %r9
18987 + xorq %r10, %r10
18988 xorq %r11, %r11
18989 xorq %r12, %r12
18990 xorq %r13, %r13
18991 xorq %r14, %r14
18992 xorq %r15, %r15
18993
18994 + pax_force_retaddr 0, 1
18995 ret
18996
18997 1:
18998 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18999 index 16be6dc..4686132 100644
19000 --- a/arch/x86/kernel/setup.c
19001 +++ b/arch/x86/kernel/setup.c
19002 @@ -440,7 +440,7 @@ static void __init parse_setup_data(void)
19003
19004 switch (data->type) {
19005 case SETUP_E820_EXT:
19006 - parse_e820_ext(data);
19007 + parse_e820_ext((struct setup_data __force_kernel *)data);
19008 break;
19009 case SETUP_DTB:
19010 add_dtb(pa_data);
19011 @@ -632,7 +632,7 @@ static void __init trim_bios_range(void)
19012 * area (640->1Mb) as ram even though it is not.
19013 * take them out.
19014 */
19015 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19016 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19017 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19018 }
19019
19020 @@ -755,14 +755,14 @@ void __init setup_arch(char **cmdline_p)
19021
19022 if (!boot_params.hdr.root_flags)
19023 root_mountflags &= ~MS_RDONLY;
19024 - init_mm.start_code = (unsigned long) _text;
19025 - init_mm.end_code = (unsigned long) _etext;
19026 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19027 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19028 init_mm.end_data = (unsigned long) _edata;
19029 init_mm.brk = _brk_end;
19030
19031 - code_resource.start = virt_to_phys(_text);
19032 - code_resource.end = virt_to_phys(_etext)-1;
19033 - data_resource.start = virt_to_phys(_etext);
19034 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19035 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19036 + data_resource.start = virt_to_phys(_sdata);
19037 data_resource.end = virt_to_phys(_edata)-1;
19038 bss_resource.start = virt_to_phys(&__bss_start);
19039 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19040 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19041 index 5a98aa2..2f9288d 100644
19042 --- a/arch/x86/kernel/setup_percpu.c
19043 +++ b/arch/x86/kernel/setup_percpu.c
19044 @@ -21,19 +21,17 @@
19045 #include <asm/cpu.h>
19046 #include <asm/stackprotector.h>
19047
19048 -DEFINE_PER_CPU(int, cpu_number);
19049 +#ifdef CONFIG_SMP
19050 +DEFINE_PER_CPU(unsigned int, cpu_number);
19051 EXPORT_PER_CPU_SYMBOL(cpu_number);
19052 +#endif
19053
19054 -#ifdef CONFIG_X86_64
19055 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19056 -#else
19057 -#define BOOT_PERCPU_OFFSET 0
19058 -#endif
19059
19060 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19061 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19062
19063 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19064 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19065 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19066 };
19067 EXPORT_SYMBOL(__per_cpu_offset);
19068 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19069 {
19070 #ifdef CONFIG_X86_32
19071 struct desc_struct gdt;
19072 + unsigned long base = per_cpu_offset(cpu);
19073
19074 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19075 - 0x2 | DESCTYPE_S, 0x8);
19076 - gdt.s = 1;
19077 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19078 + 0x83 | DESCTYPE_S, 0xC);
19079 write_gdt_entry(get_cpu_gdt_table(cpu),
19080 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19081 #endif
19082 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19083 /* alrighty, percpu areas up and running */
19084 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19085 for_each_possible_cpu(cpu) {
19086 +#ifdef CONFIG_CC_STACKPROTECTOR
19087 +#ifdef CONFIG_X86_32
19088 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19089 +#endif
19090 +#endif
19091 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19092 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19093 per_cpu(cpu_number, cpu) = cpu;
19094 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19095 */
19096 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19097 #endif
19098 +#ifdef CONFIG_CC_STACKPROTECTOR
19099 +#ifdef CONFIG_X86_32
19100 + if (!cpu)
19101 + per_cpu(stack_canary.canary, cpu) = canary;
19102 +#endif
19103 +#endif
19104 /*
19105 * Up to this point, the boot CPU has been using .init.data
19106 * area. Reload any changed state for the boot CPU.
19107 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19108 index 21af737..fb45e22 100644
19109 --- a/arch/x86/kernel/signal.c
19110 +++ b/arch/x86/kernel/signal.c
19111 @@ -191,7 +191,7 @@ static unsigned long align_sigframe(unsigned long sp)
19112 * Align the stack pointer according to the i386 ABI,
19113 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19114 */
19115 - sp = ((sp + 4) & -16ul) - 4;
19116 + sp = ((sp - 12) & -16ul) - 4;
19117 #else /* !CONFIG_X86_32 */
19118 sp = round_down(sp, 16) - 8;
19119 #endif
19120 @@ -242,11 +242,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19121 * Return an always-bogus address instead so we will die with SIGSEGV.
19122 */
19123 if (onsigstack && !likely(on_sig_stack(sp)))
19124 - return (void __user *)-1L;
19125 + return (__force void __user *)-1L;
19126
19127 /* save i387 state */
19128 if (used_math() && save_i387_xstate(*fpstate) < 0)
19129 - return (void __user *)-1L;
19130 + return (__force void __user *)-1L;
19131
19132 return (void __user *)sp;
19133 }
19134 @@ -301,9 +301,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19135 }
19136
19137 if (current->mm->context.vdso)
19138 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19139 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19140 else
19141 - restorer = &frame->retcode;
19142 + restorer = (void __user *)&frame->retcode;
19143 if (ka->sa.sa_flags & SA_RESTORER)
19144 restorer = ka->sa.sa_restorer;
19145
19146 @@ -317,7 +317,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19147 * reasons and because gdb uses it as a signature to notice
19148 * signal handler stack frames.
19149 */
19150 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19151 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19152
19153 if (err)
19154 return -EFAULT;
19155 @@ -371,7 +371,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19156 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19157
19158 /* Set up to return from userspace. */
19159 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19160 + if (current->mm->context.vdso)
19161 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19162 + else
19163 + restorer = (void __user *)&frame->retcode;
19164 if (ka->sa.sa_flags & SA_RESTORER)
19165 restorer = ka->sa.sa_restorer;
19166 put_user_ex(restorer, &frame->pretcode);
19167 @@ -383,7 +386,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19168 * reasons and because gdb uses it as a signature to notice
19169 * signal handler stack frames.
19170 */
19171 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19172 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19173 } put_user_catch(err);
19174
19175 if (err)
19176 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19177 index 7bd8a08..2659b5b 100644
19178 --- a/arch/x86/kernel/smpboot.c
19179 +++ b/arch/x86/kernel/smpboot.c
19180 @@ -679,6 +679,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
19181 idle->thread.sp = (unsigned long) (((struct pt_regs *)
19182 (THREAD_SIZE + task_stack_page(idle))) - 1);
19183 per_cpu(current_task, cpu) = idle;
19184 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
19185
19186 #ifdef CONFIG_X86_32
19187 /* Stack for startup_32 can be just as for start_secondary onwards */
19188 @@ -686,11 +687,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
19189 #else
19190 clear_tsk_thread_flag(idle, TIF_FORK);
19191 initial_gs = per_cpu_offset(cpu);
19192 - per_cpu(kernel_stack, cpu) =
19193 - (unsigned long)task_stack_page(idle) -
19194 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19195 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
19196 #endif
19197 +
19198 + pax_open_kernel();
19199 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19200 + pax_close_kernel();
19201 +
19202 initial_code = (unsigned long)start_secondary;
19203 stack_start = idle->thread.sp;
19204
19205 @@ -826,6 +829,12 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
19206
19207 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19208
19209 +#ifdef CONFIG_PAX_PER_CPU_PGD
19210 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19211 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19212 + KERNEL_PGD_PTRS);
19213 +#endif
19214 +
19215 err = do_boot_cpu(apicid, cpu, tidle);
19216 if (err) {
19217 pr_debug("do_boot_cpu failed %d\n", err);
19218 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19219 index c346d11..d43b163 100644
19220 --- a/arch/x86/kernel/step.c
19221 +++ b/arch/x86/kernel/step.c
19222 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19223 struct desc_struct *desc;
19224 unsigned long base;
19225
19226 - seg &= ~7UL;
19227 + seg >>= 3;
19228
19229 mutex_lock(&child->mm->context.lock);
19230 - if (unlikely((seg >> 3) >= child->mm->context.size))
19231 + if (unlikely(seg >= child->mm->context.size))
19232 addr = -1L; /* bogus selector, access would fault */
19233 else {
19234 desc = child->mm->context.ldt + seg;
19235 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19236 addr += base;
19237 }
19238 mutex_unlock(&child->mm->context.lock);
19239 - }
19240 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19241 + addr = ktla_ktva(addr);
19242
19243 return addr;
19244 }
19245 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19246 unsigned char opcode[15];
19247 unsigned long addr = convert_ip_to_linear(child, regs);
19248
19249 + if (addr == -EINVAL)
19250 + return 0;
19251 +
19252 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19253 for (i = 0; i < copied; i++) {
19254 switch (opcode[i]) {
19255 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19256 index 0b0cb5f..db6b9ed 100644
19257 --- a/arch/x86/kernel/sys_i386_32.c
19258 +++ b/arch/x86/kernel/sys_i386_32.c
19259 @@ -24,17 +24,224 @@
19260
19261 #include <asm/syscalls.h>
19262
19263 -/*
19264 - * Do a system call from kernel instead of calling sys_execve so we
19265 - * end up with proper pt_regs.
19266 - */
19267 -int kernel_execve(const char *filename,
19268 - const char *const argv[],
19269 - const char *const envp[])
19270 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19271 {
19272 - long __res;
19273 - asm volatile ("int $0x80"
19274 - : "=a" (__res)
19275 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19276 - return __res;
19277 + unsigned long pax_task_size = TASK_SIZE;
19278 +
19279 +#ifdef CONFIG_PAX_SEGMEXEC
19280 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19281 + pax_task_size = SEGMEXEC_TASK_SIZE;
19282 +#endif
19283 +
19284 + if (len > pax_task_size || addr > pax_task_size - len)
19285 + return -EINVAL;
19286 +
19287 + return 0;
19288 +}
19289 +
19290 +unsigned long
19291 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19292 + unsigned long len, unsigned long pgoff, unsigned long flags)
19293 +{
19294 + struct mm_struct *mm = current->mm;
19295 + struct vm_area_struct *vma;
19296 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19297 +
19298 +#ifdef CONFIG_PAX_SEGMEXEC
19299 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19300 + pax_task_size = SEGMEXEC_TASK_SIZE;
19301 +#endif
19302 +
19303 + pax_task_size -= PAGE_SIZE;
19304 +
19305 + if (len > pax_task_size)
19306 + return -ENOMEM;
19307 +
19308 + if (flags & MAP_FIXED)
19309 + return addr;
19310 +
19311 +#ifdef CONFIG_PAX_RANDMMAP
19312 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19313 +#endif
19314 +
19315 + if (addr) {
19316 + addr = PAGE_ALIGN(addr);
19317 + if (pax_task_size - len >= addr) {
19318 + vma = find_vma(mm, addr);
19319 + if (check_heap_stack_gap(vma, addr, len))
19320 + return addr;
19321 + }
19322 + }
19323 + if (len > mm->cached_hole_size) {
19324 + start_addr = addr = mm->free_area_cache;
19325 + } else {
19326 + start_addr = addr = mm->mmap_base;
19327 + mm->cached_hole_size = 0;
19328 + }
19329 +
19330 +#ifdef CONFIG_PAX_PAGEEXEC
19331 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19332 + start_addr = 0x00110000UL;
19333 +
19334 +#ifdef CONFIG_PAX_RANDMMAP
19335 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19336 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19337 +#endif
19338 +
19339 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19340 + start_addr = addr = mm->mmap_base;
19341 + else
19342 + addr = start_addr;
19343 + }
19344 +#endif
19345 +
19346 +full_search:
19347 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19348 + /* At this point: (!vma || addr < vma->vm_end). */
19349 + if (pax_task_size - len < addr) {
19350 + /*
19351 + * Start a new search - just in case we missed
19352 + * some holes.
19353 + */
19354 + if (start_addr != mm->mmap_base) {
19355 + start_addr = addr = mm->mmap_base;
19356 + mm->cached_hole_size = 0;
19357 + goto full_search;
19358 + }
19359 + return -ENOMEM;
19360 + }
19361 + if (check_heap_stack_gap(vma, addr, len))
19362 + break;
19363 + if (addr + mm->cached_hole_size < vma->vm_start)
19364 + mm->cached_hole_size = vma->vm_start - addr;
19365 + addr = vma->vm_end;
19366 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19367 + start_addr = addr = mm->mmap_base;
19368 + mm->cached_hole_size = 0;
19369 + goto full_search;
19370 + }
19371 + }
19372 +
19373 + /*
19374 + * Remember the place where we stopped the search:
19375 + */
19376 + mm->free_area_cache = addr + len;
19377 + return addr;
19378 +}
19379 +
19380 +unsigned long
19381 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19382 + const unsigned long len, const unsigned long pgoff,
19383 + const unsigned long flags)
19384 +{
19385 + struct vm_area_struct *vma;
19386 + struct mm_struct *mm = current->mm;
19387 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19388 +
19389 +#ifdef CONFIG_PAX_SEGMEXEC
19390 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19391 + pax_task_size = SEGMEXEC_TASK_SIZE;
19392 +#endif
19393 +
19394 + pax_task_size -= PAGE_SIZE;
19395 +
19396 + /* requested length too big for entire address space */
19397 + if (len > pax_task_size)
19398 + return -ENOMEM;
19399 +
19400 + if (flags & MAP_FIXED)
19401 + return addr;
19402 +
19403 +#ifdef CONFIG_PAX_PAGEEXEC
19404 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19405 + goto bottomup;
19406 +#endif
19407 +
19408 +#ifdef CONFIG_PAX_RANDMMAP
19409 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19410 +#endif
19411 +
19412 + /* requesting a specific address */
19413 + if (addr) {
19414 + addr = PAGE_ALIGN(addr);
19415 + if (pax_task_size - len >= addr) {
19416 + vma = find_vma(mm, addr);
19417 + if (check_heap_stack_gap(vma, addr, len))
19418 + return addr;
19419 + }
19420 + }
19421 +
19422 + /* check if free_area_cache is useful for us */
19423 + if (len <= mm->cached_hole_size) {
19424 + mm->cached_hole_size = 0;
19425 + mm->free_area_cache = mm->mmap_base;
19426 + }
19427 +
19428 + /* either no address requested or can't fit in requested address hole */
19429 + addr = mm->free_area_cache;
19430 +
19431 + /* make sure it can fit in the remaining address space */
19432 + if (addr > len) {
19433 + vma = find_vma(mm, addr-len);
19434 + if (check_heap_stack_gap(vma, addr - len, len))
19435 + /* remember the address as a hint for next time */
19436 + return (mm->free_area_cache = addr-len);
19437 + }
19438 +
19439 + if (mm->mmap_base < len)
19440 + goto bottomup;
19441 +
19442 + addr = mm->mmap_base-len;
19443 +
19444 + do {
19445 + /*
19446 + * Lookup failure means no vma is above this address,
19447 + * else if new region fits below vma->vm_start,
19448 + * return with success:
19449 + */
19450 + vma = find_vma(mm, addr);
19451 + if (check_heap_stack_gap(vma, addr, len))
19452 + /* remember the address as a hint for next time */
19453 + return (mm->free_area_cache = addr);
19454 +
19455 + /* remember the largest hole we saw so far */
19456 + if (addr + mm->cached_hole_size < vma->vm_start)
19457 + mm->cached_hole_size = vma->vm_start - addr;
19458 +
19459 + /* try just below the current vma->vm_start */
19460 + addr = skip_heap_stack_gap(vma, len);
19461 + } while (!IS_ERR_VALUE(addr));
19462 +
19463 +bottomup:
19464 + /*
19465 + * A failed mmap() very likely causes application failure,
19466 + * so fall back to the bottom-up function here. This scenario
19467 + * can happen with large stack limits and large mmap()
19468 + * allocations.
19469 + */
19470 +
19471 +#ifdef CONFIG_PAX_SEGMEXEC
19472 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19473 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19474 + else
19475 +#endif
19476 +
19477 + mm->mmap_base = TASK_UNMAPPED_BASE;
19478 +
19479 +#ifdef CONFIG_PAX_RANDMMAP
19480 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19481 + mm->mmap_base += mm->delta_mmap;
19482 +#endif
19483 +
19484 + mm->free_area_cache = mm->mmap_base;
19485 + mm->cached_hole_size = ~0UL;
19486 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19487 + /*
19488 + * Restore the topdown base:
19489 + */
19490 + mm->mmap_base = base;
19491 + mm->free_area_cache = base;
19492 + mm->cached_hole_size = ~0UL;
19493 +
19494 + return addr;
19495 }
19496 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19497 index b4d3c39..82bb73b 100644
19498 --- a/arch/x86/kernel/sys_x86_64.c
19499 +++ b/arch/x86/kernel/sys_x86_64.c
19500 @@ -95,8 +95,8 @@ out:
19501 return error;
19502 }
19503
19504 -static void find_start_end(unsigned long flags, unsigned long *begin,
19505 - unsigned long *end)
19506 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19507 + unsigned long *begin, unsigned long *end)
19508 {
19509 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19510 unsigned long new_begin;
19511 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19512 *begin = new_begin;
19513 }
19514 } else {
19515 - *begin = TASK_UNMAPPED_BASE;
19516 + *begin = mm->mmap_base;
19517 *end = TASK_SIZE;
19518 }
19519 }
19520 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19521 if (flags & MAP_FIXED)
19522 return addr;
19523
19524 - find_start_end(flags, &begin, &end);
19525 + find_start_end(mm, flags, &begin, &end);
19526
19527 if (len > end)
19528 return -ENOMEM;
19529
19530 +#ifdef CONFIG_PAX_RANDMMAP
19531 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19532 +#endif
19533 +
19534 if (addr) {
19535 addr = PAGE_ALIGN(addr);
19536 vma = find_vma(mm, addr);
19537 - if (end - len >= addr &&
19538 - (!vma || addr + len <= vma->vm_start))
19539 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19540 return addr;
19541 }
19542 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19543 @@ -172,7 +175,7 @@ full_search:
19544 }
19545 return -ENOMEM;
19546 }
19547 - if (!vma || addr + len <= vma->vm_start) {
19548 + if (check_heap_stack_gap(vma, addr, len)) {
19549 /*
19550 * Remember the place where we stopped the search:
19551 */
19552 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19553 {
19554 struct vm_area_struct *vma;
19555 struct mm_struct *mm = current->mm;
19556 - unsigned long addr = addr0, start_addr;
19557 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19558
19559 /* requested length too big for entire address space */
19560 if (len > TASK_SIZE)
19561 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19562 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19563 goto bottomup;
19564
19565 +#ifdef CONFIG_PAX_RANDMMAP
19566 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19567 +#endif
19568 +
19569 /* requesting a specific address */
19570 if (addr) {
19571 addr = PAGE_ALIGN(addr);
19572 - vma = find_vma(mm, addr);
19573 - if (TASK_SIZE - len >= addr &&
19574 - (!vma || addr + len <= vma->vm_start))
19575 - return addr;
19576 + if (TASK_SIZE - len >= addr) {
19577 + vma = find_vma(mm, addr);
19578 + if (check_heap_stack_gap(vma, addr, len))
19579 + return addr;
19580 + }
19581 }
19582
19583 /* check if free_area_cache is useful for us */
19584 @@ -240,7 +248,7 @@ try_again:
19585 * return with success:
19586 */
19587 vma = find_vma(mm, addr);
19588 - if (!vma || addr+len <= vma->vm_start)
19589 + if (check_heap_stack_gap(vma, addr, len))
19590 /* remember the address as a hint for next time */
19591 return mm->free_area_cache = addr;
19592
19593 @@ -249,8 +257,8 @@ try_again:
19594 mm->cached_hole_size = vma->vm_start - addr;
19595
19596 /* try just below the current vma->vm_start */
19597 - addr = vma->vm_start-len;
19598 - } while (len < vma->vm_start);
19599 + addr = skip_heap_stack_gap(vma, len);
19600 + } while (!IS_ERR_VALUE(addr));
19601
19602 fail:
19603 /*
19604 @@ -270,13 +278,21 @@ bottomup:
19605 * can happen with large stack limits and large mmap()
19606 * allocations.
19607 */
19608 + mm->mmap_base = TASK_UNMAPPED_BASE;
19609 +
19610 +#ifdef CONFIG_PAX_RANDMMAP
19611 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19612 + mm->mmap_base += mm->delta_mmap;
19613 +#endif
19614 +
19615 + mm->free_area_cache = mm->mmap_base;
19616 mm->cached_hole_size = ~0UL;
19617 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19618 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19619 /*
19620 * Restore the topdown base:
19621 */
19622 - mm->free_area_cache = mm->mmap_base;
19623 + mm->mmap_base = base;
19624 + mm->free_area_cache = base;
19625 mm->cached_hole_size = ~0UL;
19626
19627 return addr;
19628 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19629 index f84fe00..93fe08f 100644
19630 --- a/arch/x86/kernel/tboot.c
19631 +++ b/arch/x86/kernel/tboot.c
19632 @@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
19633
19634 void tboot_shutdown(u32 shutdown_type)
19635 {
19636 - void (*shutdown)(void);
19637 + void (* __noreturn shutdown)(void);
19638
19639 if (!tboot_enabled())
19640 return;
19641 @@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
19642
19643 switch_to_tboot_pt();
19644
19645 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19646 + shutdown = (void *)tboot->shutdown_entry;
19647 shutdown();
19648
19649 /* should not reach here */
19650 @@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19651 return 0;
19652 }
19653
19654 -static atomic_t ap_wfs_count;
19655 +static atomic_unchecked_t ap_wfs_count;
19656
19657 static int tboot_wait_for_aps(int num_aps)
19658 {
19659 @@ -324,9 +324,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19660 {
19661 switch (action) {
19662 case CPU_DYING:
19663 - atomic_inc(&ap_wfs_count);
19664 + atomic_inc_unchecked(&ap_wfs_count);
19665 if (num_online_cpus() == 1)
19666 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19667 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19668 return NOTIFY_BAD;
19669 break;
19670 }
19671 @@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
19672
19673 tboot_create_trampoline();
19674
19675 - atomic_set(&ap_wfs_count, 0);
19676 + atomic_set_unchecked(&ap_wfs_count, 0);
19677 register_hotcpu_notifier(&tboot_cpu_notifier);
19678
19679 acpi_os_set_prepare_sleep(&tboot_sleep);
19680 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19681 index 24d3c91..d06b473 100644
19682 --- a/arch/x86/kernel/time.c
19683 +++ b/arch/x86/kernel/time.c
19684 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19685 {
19686 unsigned long pc = instruction_pointer(regs);
19687
19688 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19689 + if (!user_mode(regs) && in_lock_functions(pc)) {
19690 #ifdef CONFIG_FRAME_POINTER
19691 - return *(unsigned long *)(regs->bp + sizeof(long));
19692 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19693 #else
19694 unsigned long *sp =
19695 (unsigned long *)kernel_stack_pointer(regs);
19696 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19697 * or above a saved flags. Eflags has bits 22-31 zero,
19698 * kernel addresses don't.
19699 */
19700 +
19701 +#ifdef CONFIG_PAX_KERNEXEC
19702 + return ktla_ktva(sp[0]);
19703 +#else
19704 if (sp[0] >> 22)
19705 return sp[0];
19706 if (sp[1] >> 22)
19707 return sp[1];
19708 #endif
19709 +
19710 +#endif
19711 }
19712 return pc;
19713 }
19714 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19715 index 9d9d2f9..cad418a 100644
19716 --- a/arch/x86/kernel/tls.c
19717 +++ b/arch/x86/kernel/tls.c
19718 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19719 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19720 return -EINVAL;
19721
19722 +#ifdef CONFIG_PAX_SEGMEXEC
19723 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19724 + return -EINVAL;
19725 +#endif
19726 +
19727 set_tls_desc(p, idx, &info, 1);
19728
19729 return 0;
19730 @@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
19731
19732 if (kbuf)
19733 info = kbuf;
19734 - else if (__copy_from_user(infobuf, ubuf, count))
19735 + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
19736 return -EFAULT;
19737 else
19738 info = infobuf;
19739 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19740 index 05b31d9..501d3ba 100644
19741 --- a/arch/x86/kernel/traps.c
19742 +++ b/arch/x86/kernel/traps.c
19743 @@ -67,12 +67,6 @@ asmlinkage int system_call(void);
19744
19745 /* Do we ignore FPU interrupts ? */
19746 char ignore_fpu_irq;
19747 -
19748 -/*
19749 - * The IDT has to be page-aligned to simplify the Pentium
19750 - * F0 0F bug workaround.
19751 - */
19752 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19753 #endif
19754
19755 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19756 @@ -105,13 +99,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19757 }
19758
19759 static void __kprobes
19760 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19761 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19762 long error_code, siginfo_t *info)
19763 {
19764 struct task_struct *tsk = current;
19765
19766 #ifdef CONFIG_X86_32
19767 - if (regs->flags & X86_VM_MASK) {
19768 + if (v8086_mode(regs)) {
19769 /*
19770 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19771 * On nmi (interrupt 2), do_trap should not be called.
19772 @@ -122,7 +116,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19773 }
19774 #endif
19775
19776 - if (!user_mode(regs))
19777 + if (!user_mode_novm(regs))
19778 goto kernel_trap;
19779
19780 #ifdef CONFIG_X86_32
19781 @@ -145,7 +139,7 @@ trap_signal:
19782 printk_ratelimit()) {
19783 printk(KERN_INFO
19784 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19785 - tsk->comm, tsk->pid, str,
19786 + tsk->comm, task_pid_nr(tsk), str,
19787 regs->ip, regs->sp, error_code);
19788 print_vma_addr(" in ", regs->ip);
19789 printk("\n");
19790 @@ -162,8 +156,20 @@ kernel_trap:
19791 if (!fixup_exception(regs)) {
19792 tsk->thread.error_code = error_code;
19793 tsk->thread.trap_nr = trapnr;
19794 +
19795 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19796 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19797 + str = "PAX: suspicious stack segment fault";
19798 +#endif
19799 +
19800 die(str, regs, error_code);
19801 }
19802 +
19803 +#ifdef CONFIG_PAX_REFCOUNT
19804 + if (trapnr == 4)
19805 + pax_report_refcount_overflow(regs);
19806 +#endif
19807 +
19808 return;
19809
19810 #ifdef CONFIG_X86_32
19811 @@ -256,14 +262,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19812 conditional_sti(regs);
19813
19814 #ifdef CONFIG_X86_32
19815 - if (regs->flags & X86_VM_MASK)
19816 + if (v8086_mode(regs))
19817 goto gp_in_vm86;
19818 #endif
19819
19820 tsk = current;
19821 - if (!user_mode(regs))
19822 + if (!user_mode_novm(regs))
19823 goto gp_in_kernel;
19824
19825 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19826 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19827 + struct mm_struct *mm = tsk->mm;
19828 + unsigned long limit;
19829 +
19830 + down_write(&mm->mmap_sem);
19831 + limit = mm->context.user_cs_limit;
19832 + if (limit < TASK_SIZE) {
19833 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19834 + up_write(&mm->mmap_sem);
19835 + return;
19836 + }
19837 + up_write(&mm->mmap_sem);
19838 + }
19839 +#endif
19840 +
19841 tsk->thread.error_code = error_code;
19842 tsk->thread.trap_nr = X86_TRAP_GP;
19843
19844 @@ -296,6 +318,13 @@ gp_in_kernel:
19845 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19846 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19847 return;
19848 +
19849 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19850 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19851 + die("PAX: suspicious general protection fault", regs, error_code);
19852 + else
19853 +#endif
19854 +
19855 die("general protection fault", regs, error_code);
19856 }
19857
19858 @@ -431,7 +460,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19859 /* It's safe to allow irq's after DR6 has been saved */
19860 preempt_conditional_sti(regs);
19861
19862 - if (regs->flags & X86_VM_MASK) {
19863 + if (v8086_mode(regs)) {
19864 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19865 X86_TRAP_DB);
19866 preempt_conditional_cli(regs);
19867 @@ -446,7 +475,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19868 * We already checked v86 mode above, so we can check for kernel mode
19869 * by just checking the CPL of CS.
19870 */
19871 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19872 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19873 tsk->thread.debugreg6 &= ~DR_STEP;
19874 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19875 regs->flags &= ~X86_EFLAGS_TF;
19876 @@ -477,7 +506,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19877 return;
19878 conditional_sti(regs);
19879
19880 - if (!user_mode_vm(regs))
19881 + if (!user_mode(regs))
19882 {
19883 if (!fixup_exception(regs)) {
19884 task->thread.error_code = error_code;
19885 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
19886 index dc4e910..c9dedab 100644
19887 --- a/arch/x86/kernel/uprobes.c
19888 +++ b/arch/x86/kernel/uprobes.c
19889 @@ -606,7 +606,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
19890 int ret = NOTIFY_DONE;
19891
19892 /* We are only interested in userspace traps */
19893 - if (regs && !user_mode_vm(regs))
19894 + if (regs && !user_mode(regs))
19895 return NOTIFY_DONE;
19896
19897 switch (val) {
19898 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19899 index b9242ba..50c5edd 100644
19900 --- a/arch/x86/kernel/verify_cpu.S
19901 +++ b/arch/x86/kernel/verify_cpu.S
19902 @@ -20,6 +20,7 @@
19903 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19904 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19905 * arch/x86/kernel/head_32.S: processor startup
19906 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19907 *
19908 * verify_cpu, returns the status of longmode and SSE in register %eax.
19909 * 0: Success 1: Failure
19910 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19911 index 255f58a..5e91150 100644
19912 --- a/arch/x86/kernel/vm86_32.c
19913 +++ b/arch/x86/kernel/vm86_32.c
19914 @@ -41,6 +41,7 @@
19915 #include <linux/ptrace.h>
19916 #include <linux/audit.h>
19917 #include <linux/stddef.h>
19918 +#include <linux/grsecurity.h>
19919
19920 #include <asm/uaccess.h>
19921 #include <asm/io.h>
19922 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19923 do_exit(SIGSEGV);
19924 }
19925
19926 - tss = &per_cpu(init_tss, get_cpu());
19927 + tss = init_tss + get_cpu();
19928 current->thread.sp0 = current->thread.saved_sp0;
19929 current->thread.sysenter_cs = __KERNEL_CS;
19930 load_sp0(tss, &current->thread);
19931 @@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19932 struct task_struct *tsk;
19933 int tmp, ret = -EPERM;
19934
19935 +#ifdef CONFIG_GRKERNSEC_VM86
19936 + if (!capable(CAP_SYS_RAWIO)) {
19937 + gr_handle_vm86();
19938 + goto out;
19939 + }
19940 +#endif
19941 +
19942 tsk = current;
19943 if (tsk->thread.saved_sp0)
19944 goto out;
19945 @@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19946 int tmp, ret;
19947 struct vm86plus_struct __user *v86;
19948
19949 +#ifdef CONFIG_GRKERNSEC_VM86
19950 + if (!capable(CAP_SYS_RAWIO)) {
19951 + gr_handle_vm86();
19952 + ret = -EPERM;
19953 + goto out;
19954 + }
19955 +#endif
19956 +
19957 tsk = current;
19958 switch (cmd) {
19959 case VM86_REQUEST_IRQ:
19960 @@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19961 tsk->thread.saved_fs = info->regs32->fs;
19962 tsk->thread.saved_gs = get_user_gs(info->regs32);
19963
19964 - tss = &per_cpu(init_tss, get_cpu());
19965 + tss = init_tss + get_cpu();
19966 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19967 if (cpu_has_sep)
19968 tsk->thread.sysenter_cs = 0;
19969 @@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19970 goto cannot_handle;
19971 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19972 goto cannot_handle;
19973 - intr_ptr = (unsigned long __user *) (i << 2);
19974 + intr_ptr = (__force unsigned long __user *) (i << 2);
19975 if (get_user(segoffs, intr_ptr))
19976 goto cannot_handle;
19977 if ((segoffs >> 16) == BIOSSEG)
19978 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19979 index 22a1530..8fbaaad 100644
19980 --- a/arch/x86/kernel/vmlinux.lds.S
19981 +++ b/arch/x86/kernel/vmlinux.lds.S
19982 @@ -26,6 +26,13 @@
19983 #include <asm/page_types.h>
19984 #include <asm/cache.h>
19985 #include <asm/boot.h>
19986 +#include <asm/segment.h>
19987 +
19988 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19989 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19990 +#else
19991 +#define __KERNEL_TEXT_OFFSET 0
19992 +#endif
19993
19994 #undef i386 /* in case the preprocessor is a 32bit one */
19995
19996 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19997
19998 PHDRS {
19999 text PT_LOAD FLAGS(5); /* R_E */
20000 +#ifdef CONFIG_X86_32
20001 + module PT_LOAD FLAGS(5); /* R_E */
20002 +#endif
20003 +#ifdef CONFIG_XEN
20004 + rodata PT_LOAD FLAGS(5); /* R_E */
20005 +#else
20006 + rodata PT_LOAD FLAGS(4); /* R__ */
20007 +#endif
20008 data PT_LOAD FLAGS(6); /* RW_ */
20009 -#ifdef CONFIG_X86_64
20010 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20011 #ifdef CONFIG_SMP
20012 percpu PT_LOAD FLAGS(6); /* RW_ */
20013 #endif
20014 + text.init PT_LOAD FLAGS(5); /* R_E */
20015 + text.exit PT_LOAD FLAGS(5); /* R_E */
20016 init PT_LOAD FLAGS(7); /* RWE */
20017 -#endif
20018 note PT_NOTE FLAGS(0); /* ___ */
20019 }
20020
20021 SECTIONS
20022 {
20023 #ifdef CONFIG_X86_32
20024 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20025 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20026 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20027 #else
20028 - . = __START_KERNEL;
20029 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20030 + . = __START_KERNEL;
20031 #endif
20032
20033 /* Text and read-only data */
20034 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20035 - _text = .;
20036 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20037 /* bootstrapping code */
20038 +#ifdef CONFIG_X86_32
20039 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20040 +#else
20041 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20042 +#endif
20043 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20044 + _text = .;
20045 HEAD_TEXT
20046 #ifdef CONFIG_X86_32
20047 . = ALIGN(PAGE_SIZE);
20048 @@ -108,13 +128,48 @@ SECTIONS
20049 IRQENTRY_TEXT
20050 *(.fixup)
20051 *(.gnu.warning)
20052 - /* End of text section */
20053 - _etext = .;
20054 } :text = 0x9090
20055
20056 - NOTES :text :note
20057 + . += __KERNEL_TEXT_OFFSET;
20058
20059 - EXCEPTION_TABLE(16) :text = 0x9090
20060 +#ifdef CONFIG_X86_32
20061 + . = ALIGN(PAGE_SIZE);
20062 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20063 +
20064 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20065 + MODULES_EXEC_VADDR = .;
20066 + BYTE(0)
20067 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20068 + . = ALIGN(HPAGE_SIZE) - 1;
20069 + MODULES_EXEC_END = .;
20070 +#endif
20071 +
20072 + } :module
20073 +#endif
20074 +
20075 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20076 + /* End of text section */
20077 + BYTE(0)
20078 + _etext = . - __KERNEL_TEXT_OFFSET;
20079 + }
20080 +
20081 +#ifdef CONFIG_X86_32
20082 + . = ALIGN(PAGE_SIZE);
20083 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20084 + *(.idt)
20085 + . = ALIGN(PAGE_SIZE);
20086 + *(.empty_zero_page)
20087 + *(.initial_pg_fixmap)
20088 + *(.initial_pg_pmd)
20089 + *(.initial_page_table)
20090 + *(.swapper_pg_dir)
20091 + } :rodata
20092 +#endif
20093 +
20094 + . = ALIGN(PAGE_SIZE);
20095 + NOTES :rodata :note
20096 +
20097 + EXCEPTION_TABLE(16) :rodata
20098
20099 #if defined(CONFIG_DEBUG_RODATA)
20100 /* .text should occupy whole number of pages */
20101 @@ -126,16 +181,20 @@ SECTIONS
20102
20103 /* Data */
20104 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20105 +
20106 +#ifdef CONFIG_PAX_KERNEXEC
20107 + . = ALIGN(HPAGE_SIZE);
20108 +#else
20109 + . = ALIGN(PAGE_SIZE);
20110 +#endif
20111 +
20112 /* Start of data section */
20113 _sdata = .;
20114
20115 /* init_task */
20116 INIT_TASK_DATA(THREAD_SIZE)
20117
20118 -#ifdef CONFIG_X86_32
20119 - /* 32 bit has nosave before _edata */
20120 NOSAVE_DATA
20121 -#endif
20122
20123 PAGE_ALIGNED_DATA(PAGE_SIZE)
20124
20125 @@ -176,12 +235,19 @@ SECTIONS
20126 #endif /* CONFIG_X86_64 */
20127
20128 /* Init code and data - will be freed after init */
20129 - . = ALIGN(PAGE_SIZE);
20130 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20131 + BYTE(0)
20132 +
20133 +#ifdef CONFIG_PAX_KERNEXEC
20134 + . = ALIGN(HPAGE_SIZE);
20135 +#else
20136 + . = ALIGN(PAGE_SIZE);
20137 +#endif
20138 +
20139 __init_begin = .; /* paired with __init_end */
20140 - }
20141 + } :init.begin
20142
20143 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20144 +#ifdef CONFIG_SMP
20145 /*
20146 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20147 * output PHDR, so the next output section - .init.text - should
20148 @@ -190,12 +256,27 @@ SECTIONS
20149 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20150 #endif
20151
20152 - INIT_TEXT_SECTION(PAGE_SIZE)
20153 -#ifdef CONFIG_X86_64
20154 - :init
20155 -#endif
20156 + . = ALIGN(PAGE_SIZE);
20157 + init_begin = .;
20158 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20159 + VMLINUX_SYMBOL(_sinittext) = .;
20160 + INIT_TEXT
20161 + VMLINUX_SYMBOL(_einittext) = .;
20162 + . = ALIGN(PAGE_SIZE);
20163 + } :text.init
20164
20165 - INIT_DATA_SECTION(16)
20166 + /*
20167 + * .exit.text is discard at runtime, not link time, to deal with
20168 + * references from .altinstructions and .eh_frame
20169 + */
20170 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20171 + EXIT_TEXT
20172 + . = ALIGN(16);
20173 + } :text.exit
20174 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20175 +
20176 + . = ALIGN(PAGE_SIZE);
20177 + INIT_DATA_SECTION(16) :init
20178
20179 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20180 __x86_cpu_dev_start = .;
20181 @@ -257,19 +338,12 @@ SECTIONS
20182 }
20183
20184 . = ALIGN(8);
20185 - /*
20186 - * .exit.text is discard at runtime, not link time, to deal with
20187 - * references from .altinstructions and .eh_frame
20188 - */
20189 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20190 - EXIT_TEXT
20191 - }
20192
20193 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20194 EXIT_DATA
20195 }
20196
20197 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20198 +#ifndef CONFIG_SMP
20199 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20200 #endif
20201
20202 @@ -288,16 +362,10 @@ SECTIONS
20203 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20204 __smp_locks = .;
20205 *(.smp_locks)
20206 - . = ALIGN(PAGE_SIZE);
20207 __smp_locks_end = .;
20208 + . = ALIGN(PAGE_SIZE);
20209 }
20210
20211 -#ifdef CONFIG_X86_64
20212 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20213 - NOSAVE_DATA
20214 - }
20215 -#endif
20216 -
20217 /* BSS */
20218 . = ALIGN(PAGE_SIZE);
20219 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20220 @@ -313,6 +381,7 @@ SECTIONS
20221 __brk_base = .;
20222 . += 64 * 1024; /* 64k alignment slop space */
20223 *(.brk_reservation) /* areas brk users have reserved */
20224 + . = ALIGN(HPAGE_SIZE);
20225 __brk_limit = .;
20226 }
20227
20228 @@ -339,13 +408,12 @@ SECTIONS
20229 * for the boot processor.
20230 */
20231 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20232 -INIT_PER_CPU(gdt_page);
20233 INIT_PER_CPU(irq_stack_union);
20234
20235 /*
20236 * Build-time check on the image size:
20237 */
20238 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20239 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20240 "kernel image bigger than KERNEL_IMAGE_SIZE");
20241
20242 #ifdef CONFIG_SMP
20243 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20244 index 5db36ca..2938af9 100644
20245 --- a/arch/x86/kernel/vsyscall_64.c
20246 +++ b/arch/x86/kernel/vsyscall_64.c
20247 @@ -54,15 +54,13 @@
20248 DEFINE_VVAR(int, vgetcpu_mode);
20249 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20250
20251 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20252 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20253
20254 static int __init vsyscall_setup(char *str)
20255 {
20256 if (str) {
20257 if (!strcmp("emulate", str))
20258 vsyscall_mode = EMULATE;
20259 - else if (!strcmp("native", str))
20260 - vsyscall_mode = NATIVE;
20261 else if (!strcmp("none", str))
20262 vsyscall_mode = NONE;
20263 else
20264 @@ -309,8 +307,7 @@ done:
20265 return true;
20266
20267 sigsegv:
20268 - force_sig(SIGSEGV, current);
20269 - return true;
20270 + do_group_exit(SIGKILL);
20271 }
20272
20273 /*
20274 @@ -363,10 +360,7 @@ void __init map_vsyscall(void)
20275 extern char __vvar_page;
20276 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20277
20278 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20279 - vsyscall_mode == NATIVE
20280 - ? PAGE_KERNEL_VSYSCALL
20281 - : PAGE_KERNEL_VVAR);
20282 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20283 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20284 (unsigned long)VSYSCALL_START);
20285
20286 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20287 index 9796c2f..f686fbf 100644
20288 --- a/arch/x86/kernel/x8664_ksyms_64.c
20289 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20290 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20291 EXPORT_SYMBOL(copy_user_generic_string);
20292 EXPORT_SYMBOL(copy_user_generic_unrolled);
20293 EXPORT_SYMBOL(__copy_user_nocache);
20294 -EXPORT_SYMBOL(_copy_from_user);
20295 -EXPORT_SYMBOL(_copy_to_user);
20296
20297 EXPORT_SYMBOL(copy_page);
20298 EXPORT_SYMBOL(clear_page);
20299 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20300 index bd18149..2ea0183 100644
20301 --- a/arch/x86/kernel/xsave.c
20302 +++ b/arch/x86/kernel/xsave.c
20303 @@ -129,7 +129,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20304 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20305 return -EINVAL;
20306
20307 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20308 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20309 fx_sw_user->extended_size -
20310 FP_XSTATE_MAGIC2_SIZE));
20311 if (err)
20312 @@ -265,7 +265,7 @@ fx_only:
20313 * the other extended state.
20314 */
20315 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20316 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20317 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20318 }
20319
20320 /*
20321 @@ -294,7 +294,7 @@ int restore_i387_xstate(void __user *buf)
20322 if (use_xsave())
20323 err = restore_user_xstate(buf);
20324 else
20325 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20326 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20327 buf);
20328 if (unlikely(err)) {
20329 /*
20330 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20331 index 7df1c6d..9ea7c79 100644
20332 --- a/arch/x86/kvm/cpuid.c
20333 +++ b/arch/x86/kvm/cpuid.c
20334 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20335 struct kvm_cpuid2 *cpuid,
20336 struct kvm_cpuid_entry2 __user *entries)
20337 {
20338 - int r;
20339 + int r, i;
20340
20341 r = -E2BIG;
20342 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20343 goto out;
20344 r = -EFAULT;
20345 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20346 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20347 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20348 goto out;
20349 + for (i = 0; i < cpuid->nent; ++i) {
20350 + struct kvm_cpuid_entry2 cpuid_entry;
20351 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20352 + goto out;
20353 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20354 + }
20355 vcpu->arch.cpuid_nent = cpuid->nent;
20356 kvm_apic_set_version(vcpu);
20357 kvm_x86_ops->cpuid_update(vcpu);
20358 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20359 struct kvm_cpuid2 *cpuid,
20360 struct kvm_cpuid_entry2 __user *entries)
20361 {
20362 - int r;
20363 + int r, i;
20364
20365 r = -E2BIG;
20366 if (cpuid->nent < vcpu->arch.cpuid_nent)
20367 goto out;
20368 r = -EFAULT;
20369 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20370 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20371 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20372 goto out;
20373 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20374 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20375 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20376 + goto out;
20377 + }
20378 return 0;
20379
20380 out:
20381 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20382 index 4837375..2cc9722 100644
20383 --- a/arch/x86/kvm/emulate.c
20384 +++ b/arch/x86/kvm/emulate.c
20385 @@ -256,6 +256,7 @@ struct gprefix {
20386
20387 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20388 do { \
20389 + unsigned long _tmp; \
20390 __asm__ __volatile__ ( \
20391 _PRE_EFLAGS("0", "4", "2") \
20392 _op _suffix " %"_x"3,%1; " \
20393 @@ -270,8 +271,6 @@ struct gprefix {
20394 /* Raw emulation: instruction has two explicit operands. */
20395 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20396 do { \
20397 - unsigned long _tmp; \
20398 - \
20399 switch ((ctxt)->dst.bytes) { \
20400 case 2: \
20401 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20402 @@ -287,7 +286,6 @@ struct gprefix {
20403
20404 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20405 do { \
20406 - unsigned long _tmp; \
20407 switch ((ctxt)->dst.bytes) { \
20408 case 1: \
20409 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20410 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20411 index 93c1574..d6097dc 100644
20412 --- a/arch/x86/kvm/lapic.c
20413 +++ b/arch/x86/kvm/lapic.c
20414 @@ -54,7 +54,7 @@
20415 #define APIC_BUS_CYCLE_NS 1
20416
20417 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20418 -#define apic_debug(fmt, arg...)
20419 +#define apic_debug(fmt, arg...) do {} while (0)
20420
20421 #define APIC_LVT_NUM 6
20422 /* 14 is the version for Xeon and Pentium 8.4.8*/
20423 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20424 index 34f9709..8eca2d5 100644
20425 --- a/arch/x86/kvm/paging_tmpl.h
20426 +++ b/arch/x86/kvm/paging_tmpl.h
20427 @@ -197,7 +197,7 @@ retry_walk:
20428 if (unlikely(kvm_is_error_hva(host_addr)))
20429 goto error;
20430
20431 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20432 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20433 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20434 goto error;
20435
20436 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20437 index f75af40..285b18f 100644
20438 --- a/arch/x86/kvm/svm.c
20439 +++ b/arch/x86/kvm/svm.c
20440 @@ -3516,7 +3516,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20441 int cpu = raw_smp_processor_id();
20442
20443 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20444 +
20445 + pax_open_kernel();
20446 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20447 + pax_close_kernel();
20448 +
20449 load_TR_desc();
20450 }
20451
20452 @@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20453 #endif
20454 #endif
20455
20456 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20457 + __set_fs(current_thread_info()->addr_limit);
20458 +#endif
20459 +
20460 reload_tss(vcpu);
20461
20462 local_irq_disable();
20463 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20464 index 86c8704..d9277bb 100644
20465 --- a/arch/x86/kvm/vmx.c
20466 +++ b/arch/x86/kvm/vmx.c
20467 @@ -1317,7 +1317,11 @@ static void reload_tss(void)
20468 struct desc_struct *descs;
20469
20470 descs = (void *)gdt->address;
20471 +
20472 + pax_open_kernel();
20473 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20474 + pax_close_kernel();
20475 +
20476 load_TR_desc();
20477 }
20478
20479 @@ -1527,6 +1531,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
20480 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
20481 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
20482
20483 +#ifdef CONFIG_PAX_PER_CPU_PGD
20484 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
20485 +#endif
20486 +
20487 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
20488 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
20489 vmx->loaded_vmcs->cpu = cpu;
20490 @@ -2650,8 +2658,11 @@ static __init int hardware_setup(void)
20491 if (!cpu_has_vmx_flexpriority())
20492 flexpriority_enabled = 0;
20493
20494 - if (!cpu_has_vmx_tpr_shadow())
20495 - kvm_x86_ops->update_cr8_intercept = NULL;
20496 + if (!cpu_has_vmx_tpr_shadow()) {
20497 + pax_open_kernel();
20498 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20499 + pax_close_kernel();
20500 + }
20501
20502 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20503 kvm_disable_largepages();
20504 @@ -3697,7 +3708,10 @@ static void vmx_set_constant_host_state(void)
20505
20506 vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
20507 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
20508 +
20509 +#ifndef CONFIG_PAX_PER_CPU_PGD
20510 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
20511 +#endif
20512
20513 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
20514 #ifdef CONFIG_X86_64
20515 @@ -3719,7 +3733,7 @@ static void vmx_set_constant_host_state(void)
20516 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20517
20518 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20519 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20520 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20521
20522 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20523 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20524 @@ -6257,6 +6271,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20525 "jmp .Lkvm_vmx_return \n\t"
20526 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20527 ".Lkvm_vmx_return: "
20528 +
20529 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20530 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20531 + ".Lkvm_vmx_return2: "
20532 +#endif
20533 +
20534 /* Save guest registers, load host registers, keep flags */
20535 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20536 "pop %0 \n\t"
20537 @@ -6305,6 +6325,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20538 #endif
20539 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20540 [wordsize]"i"(sizeof(ulong))
20541 +
20542 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20543 + ,[cs]"i"(__KERNEL_CS)
20544 +#endif
20545 +
20546 : "cc", "memory"
20547 , R"ax", R"bx", R"di", R"si"
20548 #ifdef CONFIG_X86_64
20549 @@ -6312,7 +6337,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20550 #endif
20551 );
20552
20553 -#ifndef CONFIG_X86_64
20554 +#ifdef CONFIG_X86_32
20555 /*
20556 * The sysexit path does not restore ds/es, so we must set them to
20557 * a reasonable value ourselves.
20558 @@ -6321,8 +6346,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20559 * may be executed in interrupt context, which saves and restore segments
20560 * around it, nullifying its effect.
20561 */
20562 - loadsegment(ds, __USER_DS);
20563 - loadsegment(es, __USER_DS);
20564 + loadsegment(ds, __KERNEL_DS);
20565 + loadsegment(es, __KERNEL_DS);
20566 + loadsegment(ss, __KERNEL_DS);
20567 +
20568 +#ifdef CONFIG_PAX_KERNEXEC
20569 + loadsegment(fs, __KERNEL_PERCPU);
20570 +#endif
20571 +
20572 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20573 + __set_fs(current_thread_info()->addr_limit);
20574 +#endif
20575 +
20576 #endif
20577
20578 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
20579 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20580 index 14c290d..0dae6e5 100644
20581 --- a/arch/x86/kvm/x86.c
20582 +++ b/arch/x86/kvm/x86.c
20583 @@ -1361,8 +1361,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20584 {
20585 struct kvm *kvm = vcpu->kvm;
20586 int lm = is_long_mode(vcpu);
20587 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20588 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20589 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20590 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20591 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20592 : kvm->arch.xen_hvm_config.blob_size_32;
20593 u32 page_num = data & ~PAGE_MASK;
20594 @@ -2218,6 +2218,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20595 if (n < msr_list.nmsrs)
20596 goto out;
20597 r = -EFAULT;
20598 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20599 + goto out;
20600 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20601 num_msrs_to_save * sizeof(u32)))
20602 goto out;
20603 @@ -2343,7 +2345,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20604 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20605 struct kvm_interrupt *irq)
20606 {
20607 - if (irq->irq < 0 || irq->irq >= 256)
20608 + if (irq->irq >= 256)
20609 return -EINVAL;
20610 if (irqchip_in_kernel(vcpu->kvm))
20611 return -ENXIO;
20612 @@ -4880,7 +4882,7 @@ static void kvm_set_mmio_spte_mask(void)
20613 kvm_mmu_set_mmio_spte_mask(mask);
20614 }
20615
20616 -int kvm_arch_init(void *opaque)
20617 +int kvm_arch_init(const void *opaque)
20618 {
20619 int r;
20620 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20621 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20622 index 642d880..44e0f3f 100644
20623 --- a/arch/x86/lguest/boot.c
20624 +++ b/arch/x86/lguest/boot.c
20625 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20626 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20627 * Launcher to reboot us.
20628 */
20629 -static void lguest_restart(char *reason)
20630 +static __noreturn void lguest_restart(char *reason)
20631 {
20632 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20633 + BUG();
20634 }
20635
20636 /*G:050
20637 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20638 index 00933d5..3a64af9 100644
20639 --- a/arch/x86/lib/atomic64_386_32.S
20640 +++ b/arch/x86/lib/atomic64_386_32.S
20641 @@ -48,6 +48,10 @@ BEGIN(read)
20642 movl (v), %eax
20643 movl 4(v), %edx
20644 RET_ENDP
20645 +BEGIN(read_unchecked)
20646 + movl (v), %eax
20647 + movl 4(v), %edx
20648 +RET_ENDP
20649 #undef v
20650
20651 #define v %esi
20652 @@ -55,6 +59,10 @@ BEGIN(set)
20653 movl %ebx, (v)
20654 movl %ecx, 4(v)
20655 RET_ENDP
20656 +BEGIN(set_unchecked)
20657 + movl %ebx, (v)
20658 + movl %ecx, 4(v)
20659 +RET_ENDP
20660 #undef v
20661
20662 #define v %esi
20663 @@ -70,6 +78,20 @@ RET_ENDP
20664 BEGIN(add)
20665 addl %eax, (v)
20666 adcl %edx, 4(v)
20667 +
20668 +#ifdef CONFIG_PAX_REFCOUNT
20669 + jno 0f
20670 + subl %eax, (v)
20671 + sbbl %edx, 4(v)
20672 + int $4
20673 +0:
20674 + _ASM_EXTABLE(0b, 0b)
20675 +#endif
20676 +
20677 +RET_ENDP
20678 +BEGIN(add_unchecked)
20679 + addl %eax, (v)
20680 + adcl %edx, 4(v)
20681 RET_ENDP
20682 #undef v
20683
20684 @@ -77,6 +99,24 @@ RET_ENDP
20685 BEGIN(add_return)
20686 addl (v), %eax
20687 adcl 4(v), %edx
20688 +
20689 +#ifdef CONFIG_PAX_REFCOUNT
20690 + into
20691 +1234:
20692 + _ASM_EXTABLE(1234b, 2f)
20693 +#endif
20694 +
20695 + movl %eax, (v)
20696 + movl %edx, 4(v)
20697 +
20698 +#ifdef CONFIG_PAX_REFCOUNT
20699 +2:
20700 +#endif
20701 +
20702 +RET_ENDP
20703 +BEGIN(add_return_unchecked)
20704 + addl (v), %eax
20705 + adcl 4(v), %edx
20706 movl %eax, (v)
20707 movl %edx, 4(v)
20708 RET_ENDP
20709 @@ -86,6 +126,20 @@ RET_ENDP
20710 BEGIN(sub)
20711 subl %eax, (v)
20712 sbbl %edx, 4(v)
20713 +
20714 +#ifdef CONFIG_PAX_REFCOUNT
20715 + jno 0f
20716 + addl %eax, (v)
20717 + adcl %edx, 4(v)
20718 + int $4
20719 +0:
20720 + _ASM_EXTABLE(0b, 0b)
20721 +#endif
20722 +
20723 +RET_ENDP
20724 +BEGIN(sub_unchecked)
20725 + subl %eax, (v)
20726 + sbbl %edx, 4(v)
20727 RET_ENDP
20728 #undef v
20729
20730 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20731 sbbl $0, %edx
20732 addl (v), %eax
20733 adcl 4(v), %edx
20734 +
20735 +#ifdef CONFIG_PAX_REFCOUNT
20736 + into
20737 +1234:
20738 + _ASM_EXTABLE(1234b, 2f)
20739 +#endif
20740 +
20741 + movl %eax, (v)
20742 + movl %edx, 4(v)
20743 +
20744 +#ifdef CONFIG_PAX_REFCOUNT
20745 +2:
20746 +#endif
20747 +
20748 +RET_ENDP
20749 +BEGIN(sub_return_unchecked)
20750 + negl %edx
20751 + negl %eax
20752 + sbbl $0, %edx
20753 + addl (v), %eax
20754 + adcl 4(v), %edx
20755 movl %eax, (v)
20756 movl %edx, 4(v)
20757 RET_ENDP
20758 @@ -105,6 +180,20 @@ RET_ENDP
20759 BEGIN(inc)
20760 addl $1, (v)
20761 adcl $0, 4(v)
20762 +
20763 +#ifdef CONFIG_PAX_REFCOUNT
20764 + jno 0f
20765 + subl $1, (v)
20766 + sbbl $0, 4(v)
20767 + int $4
20768 +0:
20769 + _ASM_EXTABLE(0b, 0b)
20770 +#endif
20771 +
20772 +RET_ENDP
20773 +BEGIN(inc_unchecked)
20774 + addl $1, (v)
20775 + adcl $0, 4(v)
20776 RET_ENDP
20777 #undef v
20778
20779 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20780 movl 4(v), %edx
20781 addl $1, %eax
20782 adcl $0, %edx
20783 +
20784 +#ifdef CONFIG_PAX_REFCOUNT
20785 + into
20786 +1234:
20787 + _ASM_EXTABLE(1234b, 2f)
20788 +#endif
20789 +
20790 + movl %eax, (v)
20791 + movl %edx, 4(v)
20792 +
20793 +#ifdef CONFIG_PAX_REFCOUNT
20794 +2:
20795 +#endif
20796 +
20797 +RET_ENDP
20798 +BEGIN(inc_return_unchecked)
20799 + movl (v), %eax
20800 + movl 4(v), %edx
20801 + addl $1, %eax
20802 + adcl $0, %edx
20803 movl %eax, (v)
20804 movl %edx, 4(v)
20805 RET_ENDP
20806 @@ -123,6 +232,20 @@ RET_ENDP
20807 BEGIN(dec)
20808 subl $1, (v)
20809 sbbl $0, 4(v)
20810 +
20811 +#ifdef CONFIG_PAX_REFCOUNT
20812 + jno 0f
20813 + addl $1, (v)
20814 + adcl $0, 4(v)
20815 + int $4
20816 +0:
20817 + _ASM_EXTABLE(0b, 0b)
20818 +#endif
20819 +
20820 +RET_ENDP
20821 +BEGIN(dec_unchecked)
20822 + subl $1, (v)
20823 + sbbl $0, 4(v)
20824 RET_ENDP
20825 #undef v
20826
20827 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20828 movl 4(v), %edx
20829 subl $1, %eax
20830 sbbl $0, %edx
20831 +
20832 +#ifdef CONFIG_PAX_REFCOUNT
20833 + into
20834 +1234:
20835 + _ASM_EXTABLE(1234b, 2f)
20836 +#endif
20837 +
20838 + movl %eax, (v)
20839 + movl %edx, 4(v)
20840 +
20841 +#ifdef CONFIG_PAX_REFCOUNT
20842 +2:
20843 +#endif
20844 +
20845 +RET_ENDP
20846 +BEGIN(dec_return_unchecked)
20847 + movl (v), %eax
20848 + movl 4(v), %edx
20849 + subl $1, %eax
20850 + sbbl $0, %edx
20851 movl %eax, (v)
20852 movl %edx, 4(v)
20853 RET_ENDP
20854 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20855 adcl %edx, %edi
20856 addl (v), %eax
20857 adcl 4(v), %edx
20858 +
20859 +#ifdef CONFIG_PAX_REFCOUNT
20860 + into
20861 +1234:
20862 + _ASM_EXTABLE(1234b, 2f)
20863 +#endif
20864 +
20865 cmpl %eax, %ecx
20866 je 3f
20867 1:
20868 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20869 1:
20870 addl $1, %eax
20871 adcl $0, %edx
20872 +
20873 +#ifdef CONFIG_PAX_REFCOUNT
20874 + into
20875 +1234:
20876 + _ASM_EXTABLE(1234b, 2f)
20877 +#endif
20878 +
20879 movl %eax, (v)
20880 movl %edx, 4(v)
20881 movl $1, %eax
20882 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20883 movl 4(v), %edx
20884 subl $1, %eax
20885 sbbl $0, %edx
20886 +
20887 +#ifdef CONFIG_PAX_REFCOUNT
20888 + into
20889 +1234:
20890 + _ASM_EXTABLE(1234b, 1f)
20891 +#endif
20892 +
20893 js 1f
20894 movl %eax, (v)
20895 movl %edx, 4(v)
20896 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20897 index f5cc9eb..51fa319 100644
20898 --- a/arch/x86/lib/atomic64_cx8_32.S
20899 +++ b/arch/x86/lib/atomic64_cx8_32.S
20900 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20901 CFI_STARTPROC
20902
20903 read64 %ecx
20904 + pax_force_retaddr
20905 ret
20906 CFI_ENDPROC
20907 ENDPROC(atomic64_read_cx8)
20908
20909 +ENTRY(atomic64_read_unchecked_cx8)
20910 + CFI_STARTPROC
20911 +
20912 + read64 %ecx
20913 + pax_force_retaddr
20914 + ret
20915 + CFI_ENDPROC
20916 +ENDPROC(atomic64_read_unchecked_cx8)
20917 +
20918 ENTRY(atomic64_set_cx8)
20919 CFI_STARTPROC
20920
20921 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20922 cmpxchg8b (%esi)
20923 jne 1b
20924
20925 + pax_force_retaddr
20926 ret
20927 CFI_ENDPROC
20928 ENDPROC(atomic64_set_cx8)
20929
20930 +ENTRY(atomic64_set_unchecked_cx8)
20931 + CFI_STARTPROC
20932 +
20933 +1:
20934 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20935 + * are atomic on 586 and newer */
20936 + cmpxchg8b (%esi)
20937 + jne 1b
20938 +
20939 + pax_force_retaddr
20940 + ret
20941 + CFI_ENDPROC
20942 +ENDPROC(atomic64_set_unchecked_cx8)
20943 +
20944 ENTRY(atomic64_xchg_cx8)
20945 CFI_STARTPROC
20946
20947 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
20948 cmpxchg8b (%esi)
20949 jne 1b
20950
20951 + pax_force_retaddr
20952 ret
20953 CFI_ENDPROC
20954 ENDPROC(atomic64_xchg_cx8)
20955
20956 -.macro addsub_return func ins insc
20957 -ENTRY(atomic64_\func\()_return_cx8)
20958 +.macro addsub_return func ins insc unchecked=""
20959 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20960 CFI_STARTPROC
20961 SAVE ebp
20962 SAVE ebx
20963 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20964 movl %edx, %ecx
20965 \ins\()l %esi, %ebx
20966 \insc\()l %edi, %ecx
20967 +
20968 +.ifb \unchecked
20969 +#ifdef CONFIG_PAX_REFCOUNT
20970 + into
20971 +2:
20972 + _ASM_EXTABLE(2b, 3f)
20973 +#endif
20974 +.endif
20975 +
20976 LOCK_PREFIX
20977 cmpxchg8b (%ebp)
20978 jne 1b
20979 -
20980 -10:
20981 movl %ebx, %eax
20982 movl %ecx, %edx
20983 +
20984 +.ifb \unchecked
20985 +#ifdef CONFIG_PAX_REFCOUNT
20986 +3:
20987 +#endif
20988 +.endif
20989 +
20990 RESTORE edi
20991 RESTORE esi
20992 RESTORE ebx
20993 RESTORE ebp
20994 + pax_force_retaddr
20995 ret
20996 CFI_ENDPROC
20997 -ENDPROC(atomic64_\func\()_return_cx8)
20998 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20999 .endm
21000
21001 addsub_return add add adc
21002 addsub_return sub sub sbb
21003 +addsub_return add add adc _unchecked
21004 +addsub_return sub sub sbb _unchecked
21005
21006 -.macro incdec_return func ins insc
21007 -ENTRY(atomic64_\func\()_return_cx8)
21008 +.macro incdec_return func ins insc unchecked=""
21009 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21010 CFI_STARTPROC
21011 SAVE ebx
21012
21013 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21014 movl %edx, %ecx
21015 \ins\()l $1, %ebx
21016 \insc\()l $0, %ecx
21017 +
21018 +.ifb \unchecked
21019 +#ifdef CONFIG_PAX_REFCOUNT
21020 + into
21021 +2:
21022 + _ASM_EXTABLE(2b, 3f)
21023 +#endif
21024 +.endif
21025 +
21026 LOCK_PREFIX
21027 cmpxchg8b (%esi)
21028 jne 1b
21029
21030 -10:
21031 movl %ebx, %eax
21032 movl %ecx, %edx
21033 +
21034 +.ifb \unchecked
21035 +#ifdef CONFIG_PAX_REFCOUNT
21036 +3:
21037 +#endif
21038 +.endif
21039 +
21040 RESTORE ebx
21041 + pax_force_retaddr
21042 ret
21043 CFI_ENDPROC
21044 -ENDPROC(atomic64_\func\()_return_cx8)
21045 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21046 .endm
21047
21048 incdec_return inc add adc
21049 incdec_return dec sub sbb
21050 +incdec_return inc add adc _unchecked
21051 +incdec_return dec sub sbb _unchecked
21052
21053 ENTRY(atomic64_dec_if_positive_cx8)
21054 CFI_STARTPROC
21055 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21056 movl %edx, %ecx
21057 subl $1, %ebx
21058 sbb $0, %ecx
21059 +
21060 +#ifdef CONFIG_PAX_REFCOUNT
21061 + into
21062 +1234:
21063 + _ASM_EXTABLE(1234b, 2f)
21064 +#endif
21065 +
21066 js 2f
21067 LOCK_PREFIX
21068 cmpxchg8b (%esi)
21069 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21070 movl %ebx, %eax
21071 movl %ecx, %edx
21072 RESTORE ebx
21073 + pax_force_retaddr
21074 ret
21075 CFI_ENDPROC
21076 ENDPROC(atomic64_dec_if_positive_cx8)
21077 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21078 movl %edx, %ecx
21079 addl %ebp, %ebx
21080 adcl %edi, %ecx
21081 +
21082 +#ifdef CONFIG_PAX_REFCOUNT
21083 + into
21084 +1234:
21085 + _ASM_EXTABLE(1234b, 3f)
21086 +#endif
21087 +
21088 LOCK_PREFIX
21089 cmpxchg8b (%esi)
21090 jne 1b
21091 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21092 CFI_ADJUST_CFA_OFFSET -8
21093 RESTORE ebx
21094 RESTORE ebp
21095 + pax_force_retaddr
21096 ret
21097 4:
21098 cmpl %edx, 4(%esp)
21099 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21100 xorl %ecx, %ecx
21101 addl $1, %ebx
21102 adcl %edx, %ecx
21103 +
21104 +#ifdef CONFIG_PAX_REFCOUNT
21105 + into
21106 +1234:
21107 + _ASM_EXTABLE(1234b, 3f)
21108 +#endif
21109 +
21110 LOCK_PREFIX
21111 cmpxchg8b (%esi)
21112 jne 1b
21113 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21114 movl $1, %eax
21115 3:
21116 RESTORE ebx
21117 + pax_force_retaddr
21118 ret
21119 CFI_ENDPROC
21120 ENDPROC(atomic64_inc_not_zero_cx8)
21121 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21122 index 2af5df3..62b1a5a 100644
21123 --- a/arch/x86/lib/checksum_32.S
21124 +++ b/arch/x86/lib/checksum_32.S
21125 @@ -29,7 +29,8 @@
21126 #include <asm/dwarf2.h>
21127 #include <asm/errno.h>
21128 #include <asm/asm.h>
21129 -
21130 +#include <asm/segment.h>
21131 +
21132 /*
21133 * computes a partial checksum, e.g. for TCP/UDP fragments
21134 */
21135 @@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21136
21137 #define ARGBASE 16
21138 #define FP 12
21139 -
21140 -ENTRY(csum_partial_copy_generic)
21141 +
21142 +ENTRY(csum_partial_copy_generic_to_user)
21143 CFI_STARTPROC
21144 +
21145 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21146 + pushl_cfi %gs
21147 + popl_cfi %es
21148 + jmp csum_partial_copy_generic
21149 +#endif
21150 +
21151 +ENTRY(csum_partial_copy_generic_from_user)
21152 +
21153 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21154 + pushl_cfi %gs
21155 + popl_cfi %ds
21156 +#endif
21157 +
21158 +ENTRY(csum_partial_copy_generic)
21159 subl $4,%esp
21160 CFI_ADJUST_CFA_OFFSET 4
21161 pushl_cfi %edi
21162 @@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
21163 jmp 4f
21164 SRC(1: movw (%esi), %bx )
21165 addl $2, %esi
21166 -DST( movw %bx, (%edi) )
21167 +DST( movw %bx, %es:(%edi) )
21168 addl $2, %edi
21169 addw %bx, %ax
21170 adcl $0, %eax
21171 @@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
21172 SRC(1: movl (%esi), %ebx )
21173 SRC( movl 4(%esi), %edx )
21174 adcl %ebx, %eax
21175 -DST( movl %ebx, (%edi) )
21176 +DST( movl %ebx, %es:(%edi) )
21177 adcl %edx, %eax
21178 -DST( movl %edx, 4(%edi) )
21179 +DST( movl %edx, %es:4(%edi) )
21180
21181 SRC( movl 8(%esi), %ebx )
21182 SRC( movl 12(%esi), %edx )
21183 adcl %ebx, %eax
21184 -DST( movl %ebx, 8(%edi) )
21185 +DST( movl %ebx, %es:8(%edi) )
21186 adcl %edx, %eax
21187 -DST( movl %edx, 12(%edi) )
21188 +DST( movl %edx, %es:12(%edi) )
21189
21190 SRC( movl 16(%esi), %ebx )
21191 SRC( movl 20(%esi), %edx )
21192 adcl %ebx, %eax
21193 -DST( movl %ebx, 16(%edi) )
21194 +DST( movl %ebx, %es:16(%edi) )
21195 adcl %edx, %eax
21196 -DST( movl %edx, 20(%edi) )
21197 +DST( movl %edx, %es:20(%edi) )
21198
21199 SRC( movl 24(%esi), %ebx )
21200 SRC( movl 28(%esi), %edx )
21201 adcl %ebx, %eax
21202 -DST( movl %ebx, 24(%edi) )
21203 +DST( movl %ebx, %es:24(%edi) )
21204 adcl %edx, %eax
21205 -DST( movl %edx, 28(%edi) )
21206 +DST( movl %edx, %es:28(%edi) )
21207
21208 lea 32(%esi), %esi
21209 lea 32(%edi), %edi
21210 @@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
21211 shrl $2, %edx # This clears CF
21212 SRC(3: movl (%esi), %ebx )
21213 adcl %ebx, %eax
21214 -DST( movl %ebx, (%edi) )
21215 +DST( movl %ebx, %es:(%edi) )
21216 lea 4(%esi), %esi
21217 lea 4(%edi), %edi
21218 dec %edx
21219 @@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
21220 jb 5f
21221 SRC( movw (%esi), %cx )
21222 leal 2(%esi), %esi
21223 -DST( movw %cx, (%edi) )
21224 +DST( movw %cx, %es:(%edi) )
21225 leal 2(%edi), %edi
21226 je 6f
21227 shll $16,%ecx
21228 SRC(5: movb (%esi), %cl )
21229 -DST( movb %cl, (%edi) )
21230 +DST( movb %cl, %es:(%edi) )
21231 6: addl %ecx, %eax
21232 adcl $0, %eax
21233 7:
21234 @@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
21235
21236 6001:
21237 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21238 - movl $-EFAULT, (%ebx)
21239 + movl $-EFAULT, %ss:(%ebx)
21240
21241 # zero the complete destination - computing the rest
21242 # is too much work
21243 @@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
21244
21245 6002:
21246 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21247 - movl $-EFAULT,(%ebx)
21248 + movl $-EFAULT,%ss:(%ebx)
21249 jmp 5000b
21250
21251 .previous
21252
21253 + pushl_cfi %ss
21254 + popl_cfi %ds
21255 + pushl_cfi %ss
21256 + popl_cfi %es
21257 popl_cfi %ebx
21258 CFI_RESTORE ebx
21259 popl_cfi %esi
21260 @@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
21261 popl_cfi %ecx # equivalent to addl $4,%esp
21262 ret
21263 CFI_ENDPROC
21264 -ENDPROC(csum_partial_copy_generic)
21265 +ENDPROC(csum_partial_copy_generic_to_user)
21266
21267 #else
21268
21269 /* Version for PentiumII/PPro */
21270
21271 #define ROUND1(x) \
21272 + nop; nop; nop; \
21273 SRC(movl x(%esi), %ebx ) ; \
21274 addl %ebx, %eax ; \
21275 - DST(movl %ebx, x(%edi) ) ;
21276 + DST(movl %ebx, %es:x(%edi)) ;
21277
21278 #define ROUND(x) \
21279 + nop; nop; nop; \
21280 SRC(movl x(%esi), %ebx ) ; \
21281 adcl %ebx, %eax ; \
21282 - DST(movl %ebx, x(%edi) ) ;
21283 + DST(movl %ebx, %es:x(%edi)) ;
21284
21285 #define ARGBASE 12
21286 -
21287 -ENTRY(csum_partial_copy_generic)
21288 +
21289 +ENTRY(csum_partial_copy_generic_to_user)
21290 CFI_STARTPROC
21291 +
21292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21293 + pushl_cfi %gs
21294 + popl_cfi %es
21295 + jmp csum_partial_copy_generic
21296 +#endif
21297 +
21298 +ENTRY(csum_partial_copy_generic_from_user)
21299 +
21300 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21301 + pushl_cfi %gs
21302 + popl_cfi %ds
21303 +#endif
21304 +
21305 +ENTRY(csum_partial_copy_generic)
21306 pushl_cfi %ebx
21307 CFI_REL_OFFSET ebx, 0
21308 pushl_cfi %edi
21309 @@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
21310 subl %ebx, %edi
21311 lea -1(%esi),%edx
21312 andl $-32,%edx
21313 - lea 3f(%ebx,%ebx), %ebx
21314 + lea 3f(%ebx,%ebx,2), %ebx
21315 testl %esi, %esi
21316 jmp *%ebx
21317 1: addl $64,%esi
21318 @@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
21319 jb 5f
21320 SRC( movw (%esi), %dx )
21321 leal 2(%esi), %esi
21322 -DST( movw %dx, (%edi) )
21323 +DST( movw %dx, %es:(%edi) )
21324 leal 2(%edi), %edi
21325 je 6f
21326 shll $16,%edx
21327 5:
21328 SRC( movb (%esi), %dl )
21329 -DST( movb %dl, (%edi) )
21330 +DST( movb %dl, %es:(%edi) )
21331 6: addl %edx, %eax
21332 adcl $0, %eax
21333 7:
21334 .section .fixup, "ax"
21335 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21336 - movl $-EFAULT, (%ebx)
21337 + movl $-EFAULT, %ss:(%ebx)
21338 # zero the complete destination (computing the rest is too much work)
21339 movl ARGBASE+8(%esp),%edi # dst
21340 movl ARGBASE+12(%esp),%ecx # len
21341 @@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
21342 rep; stosb
21343 jmp 7b
21344 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21345 - movl $-EFAULT, (%ebx)
21346 + movl $-EFAULT, %ss:(%ebx)
21347 jmp 7b
21348 .previous
21349
21350 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21351 + pushl_cfi %ss
21352 + popl_cfi %ds
21353 + pushl_cfi %ss
21354 + popl_cfi %es
21355 +#endif
21356 +
21357 popl_cfi %esi
21358 CFI_RESTORE esi
21359 popl_cfi %edi
21360 @@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
21361 CFI_RESTORE ebx
21362 ret
21363 CFI_ENDPROC
21364 -ENDPROC(csum_partial_copy_generic)
21365 +ENDPROC(csum_partial_copy_generic_to_user)
21366
21367 #undef ROUND
21368 #undef ROUND1
21369 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21370 index f2145cf..cea889d 100644
21371 --- a/arch/x86/lib/clear_page_64.S
21372 +++ b/arch/x86/lib/clear_page_64.S
21373 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21374 movl $4096/8,%ecx
21375 xorl %eax,%eax
21376 rep stosq
21377 + pax_force_retaddr
21378 ret
21379 CFI_ENDPROC
21380 ENDPROC(clear_page_c)
21381 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21382 movl $4096,%ecx
21383 xorl %eax,%eax
21384 rep stosb
21385 + pax_force_retaddr
21386 ret
21387 CFI_ENDPROC
21388 ENDPROC(clear_page_c_e)
21389 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21390 leaq 64(%rdi),%rdi
21391 jnz .Lloop
21392 nop
21393 + pax_force_retaddr
21394 ret
21395 CFI_ENDPROC
21396 .Lclear_page_end:
21397 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21398
21399 #include <asm/cpufeature.h>
21400
21401 - .section .altinstr_replacement,"ax"
21402 + .section .altinstr_replacement,"a"
21403 1: .byte 0xeb /* jmp <disp8> */
21404 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21405 2: .byte 0xeb /* jmp <disp8> */
21406 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21407 index 1e572c5..2a162cd 100644
21408 --- a/arch/x86/lib/cmpxchg16b_emu.S
21409 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21410 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21411
21412 popf
21413 mov $1, %al
21414 + pax_force_retaddr
21415 ret
21416
21417 not_same:
21418 popf
21419 xor %al,%al
21420 + pax_force_retaddr
21421 ret
21422
21423 CFI_ENDPROC
21424 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21425 index 6b34d04..dccb07f 100644
21426 --- a/arch/x86/lib/copy_page_64.S
21427 +++ b/arch/x86/lib/copy_page_64.S
21428 @@ -9,6 +9,7 @@ copy_page_c:
21429 CFI_STARTPROC
21430 movl $4096/8,%ecx
21431 rep movsq
21432 + pax_force_retaddr
21433 ret
21434 CFI_ENDPROC
21435 ENDPROC(copy_page_c)
21436 @@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21437
21438 ENTRY(copy_page)
21439 CFI_STARTPROC
21440 - subq $2*8,%rsp
21441 - CFI_ADJUST_CFA_OFFSET 2*8
21442 + subq $3*8,%rsp
21443 + CFI_ADJUST_CFA_OFFSET 3*8
21444 movq %rbx,(%rsp)
21445 CFI_REL_OFFSET rbx, 0
21446 movq %r12,1*8(%rsp)
21447 CFI_REL_OFFSET r12, 1*8
21448 + movq %r13,2*8(%rsp)
21449 + CFI_REL_OFFSET r13, 2*8
21450
21451 movl $(4096/64)-5,%ecx
21452 .p2align 4
21453 @@ -37,7 +40,7 @@ ENTRY(copy_page)
21454 movq 16 (%rsi), %rdx
21455 movq 24 (%rsi), %r8
21456 movq 32 (%rsi), %r9
21457 - movq 40 (%rsi), %r10
21458 + movq 40 (%rsi), %r13
21459 movq 48 (%rsi), %r11
21460 movq 56 (%rsi), %r12
21461
21462 @@ -48,7 +51,7 @@ ENTRY(copy_page)
21463 movq %rdx, 16 (%rdi)
21464 movq %r8, 24 (%rdi)
21465 movq %r9, 32 (%rdi)
21466 - movq %r10, 40 (%rdi)
21467 + movq %r13, 40 (%rdi)
21468 movq %r11, 48 (%rdi)
21469 movq %r12, 56 (%rdi)
21470
21471 @@ -67,7 +70,7 @@ ENTRY(copy_page)
21472 movq 16 (%rsi), %rdx
21473 movq 24 (%rsi), %r8
21474 movq 32 (%rsi), %r9
21475 - movq 40 (%rsi), %r10
21476 + movq 40 (%rsi), %r13
21477 movq 48 (%rsi), %r11
21478 movq 56 (%rsi), %r12
21479
21480 @@ -76,7 +79,7 @@ ENTRY(copy_page)
21481 movq %rdx, 16 (%rdi)
21482 movq %r8, 24 (%rdi)
21483 movq %r9, 32 (%rdi)
21484 - movq %r10, 40 (%rdi)
21485 + movq %r13, 40 (%rdi)
21486 movq %r11, 48 (%rdi)
21487 movq %r12, 56 (%rdi)
21488
21489 @@ -89,8 +92,11 @@ ENTRY(copy_page)
21490 CFI_RESTORE rbx
21491 movq 1*8(%rsp),%r12
21492 CFI_RESTORE r12
21493 - addq $2*8,%rsp
21494 - CFI_ADJUST_CFA_OFFSET -2*8
21495 + movq 2*8(%rsp),%r13
21496 + CFI_RESTORE r13
21497 + addq $3*8,%rsp
21498 + CFI_ADJUST_CFA_OFFSET -3*8
21499 + pax_force_retaddr
21500 ret
21501 .Lcopy_page_end:
21502 CFI_ENDPROC
21503 @@ -101,7 +107,7 @@ ENDPROC(copy_page)
21504
21505 #include <asm/cpufeature.h>
21506
21507 - .section .altinstr_replacement,"ax"
21508 + .section .altinstr_replacement,"a"
21509 1: .byte 0xeb /* jmp <disp8> */
21510 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21511 2:
21512 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21513 index 5b2995f..78e7644 100644
21514 --- a/arch/x86/lib/copy_user_64.S
21515 +++ b/arch/x86/lib/copy_user_64.S
21516 @@ -17,6 +17,7 @@
21517 #include <asm/cpufeature.h>
21518 #include <asm/alternative-asm.h>
21519 #include <asm/asm.h>
21520 +#include <asm/pgtable.h>
21521
21522 /*
21523 * By placing feature2 after feature1 in altinstructions section, we logically
21524 @@ -30,7 +31,7 @@
21525 .byte 0xe9 /* 32bit jump */
21526 .long \orig-1f /* by default jump to orig */
21527 1:
21528 - .section .altinstr_replacement,"ax"
21529 + .section .altinstr_replacement,"a"
21530 2: .byte 0xe9 /* near jump with 32bit immediate */
21531 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21532 3: .byte 0xe9 /* near jump with 32bit immediate */
21533 @@ -69,47 +70,20 @@
21534 #endif
21535 .endm
21536
21537 -/* Standard copy_to_user with segment limit checking */
21538 -ENTRY(_copy_to_user)
21539 - CFI_STARTPROC
21540 - GET_THREAD_INFO(%rax)
21541 - movq %rdi,%rcx
21542 - addq %rdx,%rcx
21543 - jc bad_to_user
21544 - cmpq TI_addr_limit(%rax),%rcx
21545 - ja bad_to_user
21546 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21547 - copy_user_generic_unrolled,copy_user_generic_string, \
21548 - copy_user_enhanced_fast_string
21549 - CFI_ENDPROC
21550 -ENDPROC(_copy_to_user)
21551 -
21552 -/* Standard copy_from_user with segment limit checking */
21553 -ENTRY(_copy_from_user)
21554 - CFI_STARTPROC
21555 - GET_THREAD_INFO(%rax)
21556 - movq %rsi,%rcx
21557 - addq %rdx,%rcx
21558 - jc bad_from_user
21559 - cmpq TI_addr_limit(%rax),%rcx
21560 - ja bad_from_user
21561 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21562 - copy_user_generic_unrolled,copy_user_generic_string, \
21563 - copy_user_enhanced_fast_string
21564 - CFI_ENDPROC
21565 -ENDPROC(_copy_from_user)
21566 -
21567 .section .fixup,"ax"
21568 /* must zero dest */
21569 ENTRY(bad_from_user)
21570 bad_from_user:
21571 CFI_STARTPROC
21572 + testl %edx,%edx
21573 + js bad_to_user
21574 movl %edx,%ecx
21575 xorl %eax,%eax
21576 rep
21577 stosb
21578 bad_to_user:
21579 movl %edx,%eax
21580 + pax_force_retaddr
21581 ret
21582 CFI_ENDPROC
21583 ENDPROC(bad_from_user)
21584 @@ -139,19 +113,19 @@ ENTRY(copy_user_generic_unrolled)
21585 jz 17f
21586 1: movq (%rsi),%r8
21587 2: movq 1*8(%rsi),%r9
21588 -3: movq 2*8(%rsi),%r10
21589 +3: movq 2*8(%rsi),%rax
21590 4: movq 3*8(%rsi),%r11
21591 5: movq %r8,(%rdi)
21592 6: movq %r9,1*8(%rdi)
21593 -7: movq %r10,2*8(%rdi)
21594 +7: movq %rax,2*8(%rdi)
21595 8: movq %r11,3*8(%rdi)
21596 9: movq 4*8(%rsi),%r8
21597 10: movq 5*8(%rsi),%r9
21598 -11: movq 6*8(%rsi),%r10
21599 +11: movq 6*8(%rsi),%rax
21600 12: movq 7*8(%rsi),%r11
21601 13: movq %r8,4*8(%rdi)
21602 14: movq %r9,5*8(%rdi)
21603 -15: movq %r10,6*8(%rdi)
21604 +15: movq %rax,6*8(%rdi)
21605 16: movq %r11,7*8(%rdi)
21606 leaq 64(%rsi),%rsi
21607 leaq 64(%rdi),%rdi
21608 @@ -177,6 +151,7 @@ ENTRY(copy_user_generic_unrolled)
21609 decl %ecx
21610 jnz 21b
21611 23: xor %eax,%eax
21612 + pax_force_retaddr
21613 ret
21614
21615 .section .fixup,"ax"
21616 @@ -246,6 +221,7 @@ ENTRY(copy_user_generic_string)
21617 3: rep
21618 movsb
21619 4: xorl %eax,%eax
21620 + pax_force_retaddr
21621 ret
21622
21623 .section .fixup,"ax"
21624 @@ -279,6 +255,7 @@ ENTRY(copy_user_enhanced_fast_string)
21625 1: rep
21626 movsb
21627 2: xorl %eax,%eax
21628 + pax_force_retaddr
21629 ret
21630
21631 .section .fixup,"ax"
21632 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21633 index cacddc7..09d49e4 100644
21634 --- a/arch/x86/lib/copy_user_nocache_64.S
21635 +++ b/arch/x86/lib/copy_user_nocache_64.S
21636 @@ -8,6 +8,7 @@
21637
21638 #include <linux/linkage.h>
21639 #include <asm/dwarf2.h>
21640 +#include <asm/alternative-asm.h>
21641
21642 #define FIX_ALIGNMENT 1
21643
21644 @@ -15,6 +16,7 @@
21645 #include <asm/asm-offsets.h>
21646 #include <asm/thread_info.h>
21647 #include <asm/asm.h>
21648 +#include <asm/pgtable.h>
21649
21650 .macro ALIGN_DESTINATION
21651 #ifdef FIX_ALIGNMENT
21652 @@ -48,6 +50,15 @@
21653 */
21654 ENTRY(__copy_user_nocache)
21655 CFI_STARTPROC
21656 +
21657 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21658 + mov $PAX_USER_SHADOW_BASE,%rcx
21659 + cmp %rcx,%rsi
21660 + jae 1f
21661 + add %rcx,%rsi
21662 +1:
21663 +#endif
21664 +
21665 cmpl $8,%edx
21666 jb 20f /* less then 8 bytes, go to byte copy loop */
21667 ALIGN_DESTINATION
21668 @@ -57,19 +68,19 @@ ENTRY(__copy_user_nocache)
21669 jz 17f
21670 1: movq (%rsi),%r8
21671 2: movq 1*8(%rsi),%r9
21672 -3: movq 2*8(%rsi),%r10
21673 +3: movq 2*8(%rsi),%rax
21674 4: movq 3*8(%rsi),%r11
21675 5: movnti %r8,(%rdi)
21676 6: movnti %r9,1*8(%rdi)
21677 -7: movnti %r10,2*8(%rdi)
21678 +7: movnti %rax,2*8(%rdi)
21679 8: movnti %r11,3*8(%rdi)
21680 9: movq 4*8(%rsi),%r8
21681 10: movq 5*8(%rsi),%r9
21682 -11: movq 6*8(%rsi),%r10
21683 +11: movq 6*8(%rsi),%rax
21684 12: movq 7*8(%rsi),%r11
21685 13: movnti %r8,4*8(%rdi)
21686 14: movnti %r9,5*8(%rdi)
21687 -15: movnti %r10,6*8(%rdi)
21688 +15: movnti %rax,6*8(%rdi)
21689 16: movnti %r11,7*8(%rdi)
21690 leaq 64(%rsi),%rsi
21691 leaq 64(%rdi),%rdi
21692 @@ -96,6 +107,7 @@ ENTRY(__copy_user_nocache)
21693 jnz 21b
21694 23: xorl %eax,%eax
21695 sfence
21696 + pax_force_retaddr
21697 ret
21698
21699 .section .fixup,"ax"
21700 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21701 index 2419d5f..953ee51 100644
21702 --- a/arch/x86/lib/csum-copy_64.S
21703 +++ b/arch/x86/lib/csum-copy_64.S
21704 @@ -9,6 +9,7 @@
21705 #include <asm/dwarf2.h>
21706 #include <asm/errno.h>
21707 #include <asm/asm.h>
21708 +#include <asm/alternative-asm.h>
21709
21710 /*
21711 * Checksum copy with exception handling.
21712 @@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
21713 CFI_RESTORE rbp
21714 addq $7*8, %rsp
21715 CFI_ADJUST_CFA_OFFSET -7*8
21716 + pax_force_retaddr 0, 1
21717 ret
21718 CFI_RESTORE_STATE
21719
21720 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21721 index 25b7ae8..3b52ccd 100644
21722 --- a/arch/x86/lib/csum-wrappers_64.c
21723 +++ b/arch/x86/lib/csum-wrappers_64.c
21724 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21725 len -= 2;
21726 }
21727 }
21728 - isum = csum_partial_copy_generic((__force const void *)src,
21729 +
21730 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21731 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21732 + src += PAX_USER_SHADOW_BASE;
21733 +#endif
21734 +
21735 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21736 dst, len, isum, errp, NULL);
21737 if (unlikely(*errp))
21738 goto out_err;
21739 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21740 }
21741
21742 *errp = 0;
21743 - return csum_partial_copy_generic(src, (void __force *)dst,
21744 +
21745 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21746 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21747 + dst += PAX_USER_SHADOW_BASE;
21748 +#endif
21749 +
21750 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21751 len, isum, NULL, errp);
21752 }
21753 EXPORT_SYMBOL(csum_partial_copy_to_user);
21754 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21755 index b33b1fb..219f389 100644
21756 --- a/arch/x86/lib/getuser.S
21757 +++ b/arch/x86/lib/getuser.S
21758 @@ -33,15 +33,38 @@
21759 #include <asm/asm-offsets.h>
21760 #include <asm/thread_info.h>
21761 #include <asm/asm.h>
21762 +#include <asm/segment.h>
21763 +#include <asm/pgtable.h>
21764 +#include <asm/alternative-asm.h>
21765 +
21766 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21767 +#define __copyuser_seg gs;
21768 +#else
21769 +#define __copyuser_seg
21770 +#endif
21771
21772 .text
21773 ENTRY(__get_user_1)
21774 CFI_STARTPROC
21775 +
21776 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21777 GET_THREAD_INFO(%_ASM_DX)
21778 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21779 jae bad_get_user
21780 -1: movzb (%_ASM_AX),%edx
21781 +
21782 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21783 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21784 + cmp %_ASM_DX,%_ASM_AX
21785 + jae 1234f
21786 + add %_ASM_DX,%_ASM_AX
21787 +1234:
21788 +#endif
21789 +
21790 +#endif
21791 +
21792 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21793 xor %eax,%eax
21794 + pax_force_retaddr
21795 ret
21796 CFI_ENDPROC
21797 ENDPROC(__get_user_1)
21798 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21799 ENTRY(__get_user_2)
21800 CFI_STARTPROC
21801 add $1,%_ASM_AX
21802 +
21803 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21804 jc bad_get_user
21805 GET_THREAD_INFO(%_ASM_DX)
21806 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21807 jae bad_get_user
21808 -2: movzwl -1(%_ASM_AX),%edx
21809 +
21810 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21811 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21812 + cmp %_ASM_DX,%_ASM_AX
21813 + jae 1234f
21814 + add %_ASM_DX,%_ASM_AX
21815 +1234:
21816 +#endif
21817 +
21818 +#endif
21819 +
21820 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21821 xor %eax,%eax
21822 + pax_force_retaddr
21823 ret
21824 CFI_ENDPROC
21825 ENDPROC(__get_user_2)
21826 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21827 ENTRY(__get_user_4)
21828 CFI_STARTPROC
21829 add $3,%_ASM_AX
21830 +
21831 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21832 jc bad_get_user
21833 GET_THREAD_INFO(%_ASM_DX)
21834 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21835 jae bad_get_user
21836 -3: mov -3(%_ASM_AX),%edx
21837 +
21838 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21839 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21840 + cmp %_ASM_DX,%_ASM_AX
21841 + jae 1234f
21842 + add %_ASM_DX,%_ASM_AX
21843 +1234:
21844 +#endif
21845 +
21846 +#endif
21847 +
21848 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21849 xor %eax,%eax
21850 + pax_force_retaddr
21851 ret
21852 CFI_ENDPROC
21853 ENDPROC(__get_user_4)
21854 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21855 GET_THREAD_INFO(%_ASM_DX)
21856 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21857 jae bad_get_user
21858 +
21859 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21860 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21861 + cmp %_ASM_DX,%_ASM_AX
21862 + jae 1234f
21863 + add %_ASM_DX,%_ASM_AX
21864 +1234:
21865 +#endif
21866 +
21867 4: movq -7(%_ASM_AX),%_ASM_DX
21868 xor %eax,%eax
21869 + pax_force_retaddr
21870 ret
21871 CFI_ENDPROC
21872 ENDPROC(__get_user_8)
21873 @@ -91,6 +152,7 @@ bad_get_user:
21874 CFI_STARTPROC
21875 xor %edx,%edx
21876 mov $(-EFAULT),%_ASM_AX
21877 + pax_force_retaddr
21878 ret
21879 CFI_ENDPROC
21880 END(bad_get_user)
21881 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21882 index b1e6c4b..21ae8fc 100644
21883 --- a/arch/x86/lib/insn.c
21884 +++ b/arch/x86/lib/insn.c
21885 @@ -21,6 +21,11 @@
21886 #include <linux/string.h>
21887 #include <asm/inat.h>
21888 #include <asm/insn.h>
21889 +#ifdef __KERNEL__
21890 +#include <asm/pgtable_types.h>
21891 +#else
21892 +#define ktla_ktva(addr) addr
21893 +#endif
21894
21895 /* Verify next sizeof(t) bytes can be on the same instruction */
21896 #define validate_next(t, insn, n) \
21897 @@ -49,8 +54,8 @@
21898 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21899 {
21900 memset(insn, 0, sizeof(*insn));
21901 - insn->kaddr = kaddr;
21902 - insn->next_byte = kaddr;
21903 + insn->kaddr = ktla_ktva(kaddr);
21904 + insn->next_byte = ktla_ktva(kaddr);
21905 insn->x86_64 = x86_64 ? 1 : 0;
21906 insn->opnd_bytes = 4;
21907 if (x86_64)
21908 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21909 index 05a95e7..326f2fa 100644
21910 --- a/arch/x86/lib/iomap_copy_64.S
21911 +++ b/arch/x86/lib/iomap_copy_64.S
21912 @@ -17,6 +17,7 @@
21913
21914 #include <linux/linkage.h>
21915 #include <asm/dwarf2.h>
21916 +#include <asm/alternative-asm.h>
21917
21918 /*
21919 * override generic version in lib/iomap_copy.c
21920 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21921 CFI_STARTPROC
21922 movl %edx,%ecx
21923 rep movsd
21924 + pax_force_retaddr
21925 ret
21926 CFI_ENDPROC
21927 ENDPROC(__iowrite32_copy)
21928 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21929 index 1c273be..da9cc0e 100644
21930 --- a/arch/x86/lib/memcpy_64.S
21931 +++ b/arch/x86/lib/memcpy_64.S
21932 @@ -33,6 +33,7 @@
21933 rep movsq
21934 movl %edx, %ecx
21935 rep movsb
21936 + pax_force_retaddr
21937 ret
21938 .Lmemcpy_e:
21939 .previous
21940 @@ -49,6 +50,7 @@
21941 movq %rdi, %rax
21942 movq %rdx, %rcx
21943 rep movsb
21944 + pax_force_retaddr
21945 ret
21946 .Lmemcpy_e_e:
21947 .previous
21948 @@ -76,13 +78,13 @@ ENTRY(memcpy)
21949 */
21950 movq 0*8(%rsi), %r8
21951 movq 1*8(%rsi), %r9
21952 - movq 2*8(%rsi), %r10
21953 + movq 2*8(%rsi), %rcx
21954 movq 3*8(%rsi), %r11
21955 leaq 4*8(%rsi), %rsi
21956
21957 movq %r8, 0*8(%rdi)
21958 movq %r9, 1*8(%rdi)
21959 - movq %r10, 2*8(%rdi)
21960 + movq %rcx, 2*8(%rdi)
21961 movq %r11, 3*8(%rdi)
21962 leaq 4*8(%rdi), %rdi
21963 jae .Lcopy_forward_loop
21964 @@ -105,12 +107,12 @@ ENTRY(memcpy)
21965 subq $0x20, %rdx
21966 movq -1*8(%rsi), %r8
21967 movq -2*8(%rsi), %r9
21968 - movq -3*8(%rsi), %r10
21969 + movq -3*8(%rsi), %rcx
21970 movq -4*8(%rsi), %r11
21971 leaq -4*8(%rsi), %rsi
21972 movq %r8, -1*8(%rdi)
21973 movq %r9, -2*8(%rdi)
21974 - movq %r10, -3*8(%rdi)
21975 + movq %rcx, -3*8(%rdi)
21976 movq %r11, -4*8(%rdi)
21977 leaq -4*8(%rdi), %rdi
21978 jae .Lcopy_backward_loop
21979 @@ -130,12 +132,13 @@ ENTRY(memcpy)
21980 */
21981 movq 0*8(%rsi), %r8
21982 movq 1*8(%rsi), %r9
21983 - movq -2*8(%rsi, %rdx), %r10
21984 + movq -2*8(%rsi, %rdx), %rcx
21985 movq -1*8(%rsi, %rdx), %r11
21986 movq %r8, 0*8(%rdi)
21987 movq %r9, 1*8(%rdi)
21988 - movq %r10, -2*8(%rdi, %rdx)
21989 + movq %rcx, -2*8(%rdi, %rdx)
21990 movq %r11, -1*8(%rdi, %rdx)
21991 + pax_force_retaddr
21992 retq
21993 .p2align 4
21994 .Lless_16bytes:
21995 @@ -148,6 +151,7 @@ ENTRY(memcpy)
21996 movq -1*8(%rsi, %rdx), %r9
21997 movq %r8, 0*8(%rdi)
21998 movq %r9, -1*8(%rdi, %rdx)
21999 + pax_force_retaddr
22000 retq
22001 .p2align 4
22002 .Lless_8bytes:
22003 @@ -161,6 +165,7 @@ ENTRY(memcpy)
22004 movl -4(%rsi, %rdx), %r8d
22005 movl %ecx, (%rdi)
22006 movl %r8d, -4(%rdi, %rdx)
22007 + pax_force_retaddr
22008 retq
22009 .p2align 4
22010 .Lless_3bytes:
22011 @@ -179,6 +184,7 @@ ENTRY(memcpy)
22012 movb %cl, (%rdi)
22013
22014 .Lend:
22015 + pax_force_retaddr
22016 retq
22017 CFI_ENDPROC
22018 ENDPROC(memcpy)
22019 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22020 index ee16461..c39c199 100644
22021 --- a/arch/x86/lib/memmove_64.S
22022 +++ b/arch/x86/lib/memmove_64.S
22023 @@ -61,13 +61,13 @@ ENTRY(memmove)
22024 5:
22025 sub $0x20, %rdx
22026 movq 0*8(%rsi), %r11
22027 - movq 1*8(%rsi), %r10
22028 + movq 1*8(%rsi), %rcx
22029 movq 2*8(%rsi), %r9
22030 movq 3*8(%rsi), %r8
22031 leaq 4*8(%rsi), %rsi
22032
22033 movq %r11, 0*8(%rdi)
22034 - movq %r10, 1*8(%rdi)
22035 + movq %rcx, 1*8(%rdi)
22036 movq %r9, 2*8(%rdi)
22037 movq %r8, 3*8(%rdi)
22038 leaq 4*8(%rdi), %rdi
22039 @@ -81,10 +81,10 @@ ENTRY(memmove)
22040 4:
22041 movq %rdx, %rcx
22042 movq -8(%rsi, %rdx), %r11
22043 - lea -8(%rdi, %rdx), %r10
22044 + lea -8(%rdi, %rdx), %r9
22045 shrq $3, %rcx
22046 rep movsq
22047 - movq %r11, (%r10)
22048 + movq %r11, (%r9)
22049 jmp 13f
22050 .Lmemmove_end_forward:
22051
22052 @@ -95,14 +95,14 @@ ENTRY(memmove)
22053 7:
22054 movq %rdx, %rcx
22055 movq (%rsi), %r11
22056 - movq %rdi, %r10
22057 + movq %rdi, %r9
22058 leaq -8(%rsi, %rdx), %rsi
22059 leaq -8(%rdi, %rdx), %rdi
22060 shrq $3, %rcx
22061 std
22062 rep movsq
22063 cld
22064 - movq %r11, (%r10)
22065 + movq %r11, (%r9)
22066 jmp 13f
22067
22068 /*
22069 @@ -127,13 +127,13 @@ ENTRY(memmove)
22070 8:
22071 subq $0x20, %rdx
22072 movq -1*8(%rsi), %r11
22073 - movq -2*8(%rsi), %r10
22074 + movq -2*8(%rsi), %rcx
22075 movq -3*8(%rsi), %r9
22076 movq -4*8(%rsi), %r8
22077 leaq -4*8(%rsi), %rsi
22078
22079 movq %r11, -1*8(%rdi)
22080 - movq %r10, -2*8(%rdi)
22081 + movq %rcx, -2*8(%rdi)
22082 movq %r9, -3*8(%rdi)
22083 movq %r8, -4*8(%rdi)
22084 leaq -4*8(%rdi), %rdi
22085 @@ -151,11 +151,11 @@ ENTRY(memmove)
22086 * Move data from 16 bytes to 31 bytes.
22087 */
22088 movq 0*8(%rsi), %r11
22089 - movq 1*8(%rsi), %r10
22090 + movq 1*8(%rsi), %rcx
22091 movq -2*8(%rsi, %rdx), %r9
22092 movq -1*8(%rsi, %rdx), %r8
22093 movq %r11, 0*8(%rdi)
22094 - movq %r10, 1*8(%rdi)
22095 + movq %rcx, 1*8(%rdi)
22096 movq %r9, -2*8(%rdi, %rdx)
22097 movq %r8, -1*8(%rdi, %rdx)
22098 jmp 13f
22099 @@ -167,9 +167,9 @@ ENTRY(memmove)
22100 * Move data from 8 bytes to 15 bytes.
22101 */
22102 movq 0*8(%rsi), %r11
22103 - movq -1*8(%rsi, %rdx), %r10
22104 + movq -1*8(%rsi, %rdx), %r9
22105 movq %r11, 0*8(%rdi)
22106 - movq %r10, -1*8(%rdi, %rdx)
22107 + movq %r9, -1*8(%rdi, %rdx)
22108 jmp 13f
22109 10:
22110 cmpq $4, %rdx
22111 @@ -178,9 +178,9 @@ ENTRY(memmove)
22112 * Move data from 4 bytes to 7 bytes.
22113 */
22114 movl (%rsi), %r11d
22115 - movl -4(%rsi, %rdx), %r10d
22116 + movl -4(%rsi, %rdx), %r9d
22117 movl %r11d, (%rdi)
22118 - movl %r10d, -4(%rdi, %rdx)
22119 + movl %r9d, -4(%rdi, %rdx)
22120 jmp 13f
22121 11:
22122 cmp $2, %rdx
22123 @@ -189,9 +189,9 @@ ENTRY(memmove)
22124 * Move data from 2 bytes to 3 bytes.
22125 */
22126 movw (%rsi), %r11w
22127 - movw -2(%rsi, %rdx), %r10w
22128 + movw -2(%rsi, %rdx), %r9w
22129 movw %r11w, (%rdi)
22130 - movw %r10w, -2(%rdi, %rdx)
22131 + movw %r9w, -2(%rdi, %rdx)
22132 jmp 13f
22133 12:
22134 cmp $1, %rdx
22135 @@ -202,6 +202,7 @@ ENTRY(memmove)
22136 movb (%rsi), %r11b
22137 movb %r11b, (%rdi)
22138 13:
22139 + pax_force_retaddr
22140 retq
22141 CFI_ENDPROC
22142
22143 @@ -210,6 +211,7 @@ ENTRY(memmove)
22144 /* Forward moving data. */
22145 movq %rdx, %rcx
22146 rep movsb
22147 + pax_force_retaddr
22148 retq
22149 .Lmemmove_end_forward_efs:
22150 .previous
22151 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22152 index 2dcb380..963660a 100644
22153 --- a/arch/x86/lib/memset_64.S
22154 +++ b/arch/x86/lib/memset_64.S
22155 @@ -30,6 +30,7 @@
22156 movl %edx,%ecx
22157 rep stosb
22158 movq %r9,%rax
22159 + pax_force_retaddr
22160 ret
22161 .Lmemset_e:
22162 .previous
22163 @@ -52,6 +53,7 @@
22164 movq %rdx,%rcx
22165 rep stosb
22166 movq %r9,%rax
22167 + pax_force_retaddr
22168 ret
22169 .Lmemset_e_e:
22170 .previous
22171 @@ -59,7 +61,7 @@
22172 ENTRY(memset)
22173 ENTRY(__memset)
22174 CFI_STARTPROC
22175 - movq %rdi,%r10
22176 + movq %rdi,%r11
22177
22178 /* expand byte value */
22179 movzbl %sil,%ecx
22180 @@ -117,7 +119,8 @@ ENTRY(__memset)
22181 jnz .Lloop_1
22182
22183 .Lende:
22184 - movq %r10,%rax
22185 + movq %r11,%rax
22186 + pax_force_retaddr
22187 ret
22188
22189 CFI_RESTORE_STATE
22190 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22191 index c9f2d9b..e7fd2c0 100644
22192 --- a/arch/x86/lib/mmx_32.c
22193 +++ b/arch/x86/lib/mmx_32.c
22194 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22195 {
22196 void *p;
22197 int i;
22198 + unsigned long cr0;
22199
22200 if (unlikely(in_interrupt()))
22201 return __memcpy(to, from, len);
22202 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22203 kernel_fpu_begin();
22204
22205 __asm__ __volatile__ (
22206 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22207 - " prefetch 64(%0)\n"
22208 - " prefetch 128(%0)\n"
22209 - " prefetch 192(%0)\n"
22210 - " prefetch 256(%0)\n"
22211 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22212 + " prefetch 64(%1)\n"
22213 + " prefetch 128(%1)\n"
22214 + " prefetch 192(%1)\n"
22215 + " prefetch 256(%1)\n"
22216 "2: \n"
22217 ".section .fixup, \"ax\"\n"
22218 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22219 + "3: \n"
22220 +
22221 +#ifdef CONFIG_PAX_KERNEXEC
22222 + " movl %%cr0, %0\n"
22223 + " movl %0, %%eax\n"
22224 + " andl $0xFFFEFFFF, %%eax\n"
22225 + " movl %%eax, %%cr0\n"
22226 +#endif
22227 +
22228 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22229 +
22230 +#ifdef CONFIG_PAX_KERNEXEC
22231 + " movl %0, %%cr0\n"
22232 +#endif
22233 +
22234 " jmp 2b\n"
22235 ".previous\n"
22236 _ASM_EXTABLE(1b, 3b)
22237 - : : "r" (from));
22238 + : "=&r" (cr0) : "r" (from) : "ax");
22239
22240 for ( ; i > 5; i--) {
22241 __asm__ __volatile__ (
22242 - "1: prefetch 320(%0)\n"
22243 - "2: movq (%0), %%mm0\n"
22244 - " movq 8(%0), %%mm1\n"
22245 - " movq 16(%0), %%mm2\n"
22246 - " movq 24(%0), %%mm3\n"
22247 - " movq %%mm0, (%1)\n"
22248 - " movq %%mm1, 8(%1)\n"
22249 - " movq %%mm2, 16(%1)\n"
22250 - " movq %%mm3, 24(%1)\n"
22251 - " movq 32(%0), %%mm0\n"
22252 - " movq 40(%0), %%mm1\n"
22253 - " movq 48(%0), %%mm2\n"
22254 - " movq 56(%0), %%mm3\n"
22255 - " movq %%mm0, 32(%1)\n"
22256 - " movq %%mm1, 40(%1)\n"
22257 - " movq %%mm2, 48(%1)\n"
22258 - " movq %%mm3, 56(%1)\n"
22259 + "1: prefetch 320(%1)\n"
22260 + "2: movq (%1), %%mm0\n"
22261 + " movq 8(%1), %%mm1\n"
22262 + " movq 16(%1), %%mm2\n"
22263 + " movq 24(%1), %%mm3\n"
22264 + " movq %%mm0, (%2)\n"
22265 + " movq %%mm1, 8(%2)\n"
22266 + " movq %%mm2, 16(%2)\n"
22267 + " movq %%mm3, 24(%2)\n"
22268 + " movq 32(%1), %%mm0\n"
22269 + " movq 40(%1), %%mm1\n"
22270 + " movq 48(%1), %%mm2\n"
22271 + " movq 56(%1), %%mm3\n"
22272 + " movq %%mm0, 32(%2)\n"
22273 + " movq %%mm1, 40(%2)\n"
22274 + " movq %%mm2, 48(%2)\n"
22275 + " movq %%mm3, 56(%2)\n"
22276 ".section .fixup, \"ax\"\n"
22277 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22278 + "3:\n"
22279 +
22280 +#ifdef CONFIG_PAX_KERNEXEC
22281 + " movl %%cr0, %0\n"
22282 + " movl %0, %%eax\n"
22283 + " andl $0xFFFEFFFF, %%eax\n"
22284 + " movl %%eax, %%cr0\n"
22285 +#endif
22286 +
22287 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22288 +
22289 +#ifdef CONFIG_PAX_KERNEXEC
22290 + " movl %0, %%cr0\n"
22291 +#endif
22292 +
22293 " jmp 2b\n"
22294 ".previous\n"
22295 _ASM_EXTABLE(1b, 3b)
22296 - : : "r" (from), "r" (to) : "memory");
22297 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22298
22299 from += 64;
22300 to += 64;
22301 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22302 static void fast_copy_page(void *to, void *from)
22303 {
22304 int i;
22305 + unsigned long cr0;
22306
22307 kernel_fpu_begin();
22308
22309 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22310 * but that is for later. -AV
22311 */
22312 __asm__ __volatile__(
22313 - "1: prefetch (%0)\n"
22314 - " prefetch 64(%0)\n"
22315 - " prefetch 128(%0)\n"
22316 - " prefetch 192(%0)\n"
22317 - " prefetch 256(%0)\n"
22318 + "1: prefetch (%1)\n"
22319 + " prefetch 64(%1)\n"
22320 + " prefetch 128(%1)\n"
22321 + " prefetch 192(%1)\n"
22322 + " prefetch 256(%1)\n"
22323 "2: \n"
22324 ".section .fixup, \"ax\"\n"
22325 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22326 + "3: \n"
22327 +
22328 +#ifdef CONFIG_PAX_KERNEXEC
22329 + " movl %%cr0, %0\n"
22330 + " movl %0, %%eax\n"
22331 + " andl $0xFFFEFFFF, %%eax\n"
22332 + " movl %%eax, %%cr0\n"
22333 +#endif
22334 +
22335 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22336 +
22337 +#ifdef CONFIG_PAX_KERNEXEC
22338 + " movl %0, %%cr0\n"
22339 +#endif
22340 +
22341 " jmp 2b\n"
22342 ".previous\n"
22343 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22344 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22345
22346 for (i = 0; i < (4096-320)/64; i++) {
22347 __asm__ __volatile__ (
22348 - "1: prefetch 320(%0)\n"
22349 - "2: movq (%0), %%mm0\n"
22350 - " movntq %%mm0, (%1)\n"
22351 - " movq 8(%0), %%mm1\n"
22352 - " movntq %%mm1, 8(%1)\n"
22353 - " movq 16(%0), %%mm2\n"
22354 - " movntq %%mm2, 16(%1)\n"
22355 - " movq 24(%0), %%mm3\n"
22356 - " movntq %%mm3, 24(%1)\n"
22357 - " movq 32(%0), %%mm4\n"
22358 - " movntq %%mm4, 32(%1)\n"
22359 - " movq 40(%0), %%mm5\n"
22360 - " movntq %%mm5, 40(%1)\n"
22361 - " movq 48(%0), %%mm6\n"
22362 - " movntq %%mm6, 48(%1)\n"
22363 - " movq 56(%0), %%mm7\n"
22364 - " movntq %%mm7, 56(%1)\n"
22365 + "1: prefetch 320(%1)\n"
22366 + "2: movq (%1), %%mm0\n"
22367 + " movntq %%mm0, (%2)\n"
22368 + " movq 8(%1), %%mm1\n"
22369 + " movntq %%mm1, 8(%2)\n"
22370 + " movq 16(%1), %%mm2\n"
22371 + " movntq %%mm2, 16(%2)\n"
22372 + " movq 24(%1), %%mm3\n"
22373 + " movntq %%mm3, 24(%2)\n"
22374 + " movq 32(%1), %%mm4\n"
22375 + " movntq %%mm4, 32(%2)\n"
22376 + " movq 40(%1), %%mm5\n"
22377 + " movntq %%mm5, 40(%2)\n"
22378 + " movq 48(%1), %%mm6\n"
22379 + " movntq %%mm6, 48(%2)\n"
22380 + " movq 56(%1), %%mm7\n"
22381 + " movntq %%mm7, 56(%2)\n"
22382 ".section .fixup, \"ax\"\n"
22383 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22384 + "3:\n"
22385 +
22386 +#ifdef CONFIG_PAX_KERNEXEC
22387 + " movl %%cr0, %0\n"
22388 + " movl %0, %%eax\n"
22389 + " andl $0xFFFEFFFF, %%eax\n"
22390 + " movl %%eax, %%cr0\n"
22391 +#endif
22392 +
22393 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22394 +
22395 +#ifdef CONFIG_PAX_KERNEXEC
22396 + " movl %0, %%cr0\n"
22397 +#endif
22398 +
22399 " jmp 2b\n"
22400 ".previous\n"
22401 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22402 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22403
22404 from += 64;
22405 to += 64;
22406 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22407 static void fast_copy_page(void *to, void *from)
22408 {
22409 int i;
22410 + unsigned long cr0;
22411
22412 kernel_fpu_begin();
22413
22414 __asm__ __volatile__ (
22415 - "1: prefetch (%0)\n"
22416 - " prefetch 64(%0)\n"
22417 - " prefetch 128(%0)\n"
22418 - " prefetch 192(%0)\n"
22419 - " prefetch 256(%0)\n"
22420 + "1: prefetch (%1)\n"
22421 + " prefetch 64(%1)\n"
22422 + " prefetch 128(%1)\n"
22423 + " prefetch 192(%1)\n"
22424 + " prefetch 256(%1)\n"
22425 "2: \n"
22426 ".section .fixup, \"ax\"\n"
22427 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22428 + "3: \n"
22429 +
22430 +#ifdef CONFIG_PAX_KERNEXEC
22431 + " movl %%cr0, %0\n"
22432 + " movl %0, %%eax\n"
22433 + " andl $0xFFFEFFFF, %%eax\n"
22434 + " movl %%eax, %%cr0\n"
22435 +#endif
22436 +
22437 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22438 +
22439 +#ifdef CONFIG_PAX_KERNEXEC
22440 + " movl %0, %%cr0\n"
22441 +#endif
22442 +
22443 " jmp 2b\n"
22444 ".previous\n"
22445 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22446 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22447
22448 for (i = 0; i < 4096/64; i++) {
22449 __asm__ __volatile__ (
22450 - "1: prefetch 320(%0)\n"
22451 - "2: movq (%0), %%mm0\n"
22452 - " movq 8(%0), %%mm1\n"
22453 - " movq 16(%0), %%mm2\n"
22454 - " movq 24(%0), %%mm3\n"
22455 - " movq %%mm0, (%1)\n"
22456 - " movq %%mm1, 8(%1)\n"
22457 - " movq %%mm2, 16(%1)\n"
22458 - " movq %%mm3, 24(%1)\n"
22459 - " movq 32(%0), %%mm0\n"
22460 - " movq 40(%0), %%mm1\n"
22461 - " movq 48(%0), %%mm2\n"
22462 - " movq 56(%0), %%mm3\n"
22463 - " movq %%mm0, 32(%1)\n"
22464 - " movq %%mm1, 40(%1)\n"
22465 - " movq %%mm2, 48(%1)\n"
22466 - " movq %%mm3, 56(%1)\n"
22467 + "1: prefetch 320(%1)\n"
22468 + "2: movq (%1), %%mm0\n"
22469 + " movq 8(%1), %%mm1\n"
22470 + " movq 16(%1), %%mm2\n"
22471 + " movq 24(%1), %%mm3\n"
22472 + " movq %%mm0, (%2)\n"
22473 + " movq %%mm1, 8(%2)\n"
22474 + " movq %%mm2, 16(%2)\n"
22475 + " movq %%mm3, 24(%2)\n"
22476 + " movq 32(%1), %%mm0\n"
22477 + " movq 40(%1), %%mm1\n"
22478 + " movq 48(%1), %%mm2\n"
22479 + " movq 56(%1), %%mm3\n"
22480 + " movq %%mm0, 32(%2)\n"
22481 + " movq %%mm1, 40(%2)\n"
22482 + " movq %%mm2, 48(%2)\n"
22483 + " movq %%mm3, 56(%2)\n"
22484 ".section .fixup, \"ax\"\n"
22485 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22486 + "3:\n"
22487 +
22488 +#ifdef CONFIG_PAX_KERNEXEC
22489 + " movl %%cr0, %0\n"
22490 + " movl %0, %%eax\n"
22491 + " andl $0xFFFEFFFF, %%eax\n"
22492 + " movl %%eax, %%cr0\n"
22493 +#endif
22494 +
22495 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22496 +
22497 +#ifdef CONFIG_PAX_KERNEXEC
22498 + " movl %0, %%cr0\n"
22499 +#endif
22500 +
22501 " jmp 2b\n"
22502 ".previous\n"
22503 _ASM_EXTABLE(1b, 3b)
22504 - : : "r" (from), "r" (to) : "memory");
22505 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22506
22507 from += 64;
22508 to += 64;
22509 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22510 index 69fa106..adda88b 100644
22511 --- a/arch/x86/lib/msr-reg.S
22512 +++ b/arch/x86/lib/msr-reg.S
22513 @@ -3,6 +3,7 @@
22514 #include <asm/dwarf2.h>
22515 #include <asm/asm.h>
22516 #include <asm/msr.h>
22517 +#include <asm/alternative-asm.h>
22518
22519 #ifdef CONFIG_X86_64
22520 /*
22521 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22522 CFI_STARTPROC
22523 pushq_cfi %rbx
22524 pushq_cfi %rbp
22525 - movq %rdi, %r10 /* Save pointer */
22526 + movq %rdi, %r9 /* Save pointer */
22527 xorl %r11d, %r11d /* Return value */
22528 movl (%rdi), %eax
22529 movl 4(%rdi), %ecx
22530 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22531 movl 28(%rdi), %edi
22532 CFI_REMEMBER_STATE
22533 1: \op
22534 -2: movl %eax, (%r10)
22535 +2: movl %eax, (%r9)
22536 movl %r11d, %eax /* Return value */
22537 - movl %ecx, 4(%r10)
22538 - movl %edx, 8(%r10)
22539 - movl %ebx, 12(%r10)
22540 - movl %ebp, 20(%r10)
22541 - movl %esi, 24(%r10)
22542 - movl %edi, 28(%r10)
22543 + movl %ecx, 4(%r9)
22544 + movl %edx, 8(%r9)
22545 + movl %ebx, 12(%r9)
22546 + movl %ebp, 20(%r9)
22547 + movl %esi, 24(%r9)
22548 + movl %edi, 28(%r9)
22549 popq_cfi %rbp
22550 popq_cfi %rbx
22551 + pax_force_retaddr
22552 ret
22553 3:
22554 CFI_RESTORE_STATE
22555 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22556 index 7f951c8..ebd573a 100644
22557 --- a/arch/x86/lib/putuser.S
22558 +++ b/arch/x86/lib/putuser.S
22559 @@ -15,7 +15,9 @@
22560 #include <asm/thread_info.h>
22561 #include <asm/errno.h>
22562 #include <asm/asm.h>
22563 -
22564 +#include <asm/segment.h>
22565 +#include <asm/pgtable.h>
22566 +#include <asm/alternative-asm.h>
22567
22568 /*
22569 * __put_user_X
22570 @@ -29,52 +31,119 @@
22571 * as they get called from within inline assembly.
22572 */
22573
22574 -#define ENTER CFI_STARTPROC ; \
22575 - GET_THREAD_INFO(%_ASM_BX)
22576 -#define EXIT ret ; \
22577 +#define ENTER CFI_STARTPROC
22578 +#define EXIT pax_force_retaddr; ret ; \
22579 CFI_ENDPROC
22580
22581 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22582 +#define _DEST %_ASM_CX,%_ASM_BX
22583 +#else
22584 +#define _DEST %_ASM_CX
22585 +#endif
22586 +
22587 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22588 +#define __copyuser_seg gs;
22589 +#else
22590 +#define __copyuser_seg
22591 +#endif
22592 +
22593 .text
22594 ENTRY(__put_user_1)
22595 ENTER
22596 +
22597 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22598 + GET_THREAD_INFO(%_ASM_BX)
22599 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22600 jae bad_put_user
22601 -1: movb %al,(%_ASM_CX)
22602 +
22603 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22604 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22605 + cmp %_ASM_BX,%_ASM_CX
22606 + jb 1234f
22607 + xor %ebx,%ebx
22608 +1234:
22609 +#endif
22610 +
22611 +#endif
22612 +
22613 +1: __copyuser_seg movb %al,(_DEST)
22614 xor %eax,%eax
22615 EXIT
22616 ENDPROC(__put_user_1)
22617
22618 ENTRY(__put_user_2)
22619 ENTER
22620 +
22621 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22622 + GET_THREAD_INFO(%_ASM_BX)
22623 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22624 sub $1,%_ASM_BX
22625 cmp %_ASM_BX,%_ASM_CX
22626 jae bad_put_user
22627 -2: movw %ax,(%_ASM_CX)
22628 +
22629 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22630 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22631 + cmp %_ASM_BX,%_ASM_CX
22632 + jb 1234f
22633 + xor %ebx,%ebx
22634 +1234:
22635 +#endif
22636 +
22637 +#endif
22638 +
22639 +2: __copyuser_seg movw %ax,(_DEST)
22640 xor %eax,%eax
22641 EXIT
22642 ENDPROC(__put_user_2)
22643
22644 ENTRY(__put_user_4)
22645 ENTER
22646 +
22647 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22648 + GET_THREAD_INFO(%_ASM_BX)
22649 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22650 sub $3,%_ASM_BX
22651 cmp %_ASM_BX,%_ASM_CX
22652 jae bad_put_user
22653 -3: movl %eax,(%_ASM_CX)
22654 +
22655 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22656 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22657 + cmp %_ASM_BX,%_ASM_CX
22658 + jb 1234f
22659 + xor %ebx,%ebx
22660 +1234:
22661 +#endif
22662 +
22663 +#endif
22664 +
22665 +3: __copyuser_seg movl %eax,(_DEST)
22666 xor %eax,%eax
22667 EXIT
22668 ENDPROC(__put_user_4)
22669
22670 ENTRY(__put_user_8)
22671 ENTER
22672 +
22673 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22674 + GET_THREAD_INFO(%_ASM_BX)
22675 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22676 sub $7,%_ASM_BX
22677 cmp %_ASM_BX,%_ASM_CX
22678 jae bad_put_user
22679 -4: mov %_ASM_AX,(%_ASM_CX)
22680 +
22681 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22682 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22683 + cmp %_ASM_BX,%_ASM_CX
22684 + jb 1234f
22685 + xor %ebx,%ebx
22686 +1234:
22687 +#endif
22688 +
22689 +#endif
22690 +
22691 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22692 #ifdef CONFIG_X86_32
22693 -5: movl %edx,4(%_ASM_CX)
22694 +5: __copyuser_seg movl %edx,4(_DEST)
22695 #endif
22696 xor %eax,%eax
22697 EXIT
22698 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22699 index 1cad221..de671ee 100644
22700 --- a/arch/x86/lib/rwlock.S
22701 +++ b/arch/x86/lib/rwlock.S
22702 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22703 FRAME
22704 0: LOCK_PREFIX
22705 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22706 +
22707 +#ifdef CONFIG_PAX_REFCOUNT
22708 + jno 1234f
22709 + LOCK_PREFIX
22710 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22711 + int $4
22712 +1234:
22713 + _ASM_EXTABLE(1234b, 1234b)
22714 +#endif
22715 +
22716 1: rep; nop
22717 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22718 jne 1b
22719 LOCK_PREFIX
22720 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22721 +
22722 +#ifdef CONFIG_PAX_REFCOUNT
22723 + jno 1234f
22724 + LOCK_PREFIX
22725 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22726 + int $4
22727 +1234:
22728 + _ASM_EXTABLE(1234b, 1234b)
22729 +#endif
22730 +
22731 jnz 0b
22732 ENDFRAME
22733 + pax_force_retaddr
22734 ret
22735 CFI_ENDPROC
22736 END(__write_lock_failed)
22737 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22738 FRAME
22739 0: LOCK_PREFIX
22740 READ_LOCK_SIZE(inc) (%__lock_ptr)
22741 +
22742 +#ifdef CONFIG_PAX_REFCOUNT
22743 + jno 1234f
22744 + LOCK_PREFIX
22745 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22746 + int $4
22747 +1234:
22748 + _ASM_EXTABLE(1234b, 1234b)
22749 +#endif
22750 +
22751 1: rep; nop
22752 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22753 js 1b
22754 LOCK_PREFIX
22755 READ_LOCK_SIZE(dec) (%__lock_ptr)
22756 +
22757 +#ifdef CONFIG_PAX_REFCOUNT
22758 + jno 1234f
22759 + LOCK_PREFIX
22760 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22761 + int $4
22762 +1234:
22763 + _ASM_EXTABLE(1234b, 1234b)
22764 +#endif
22765 +
22766 js 0b
22767 ENDFRAME
22768 + pax_force_retaddr
22769 ret
22770 CFI_ENDPROC
22771 END(__read_lock_failed)
22772 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22773 index 5dff5f0..cadebf4 100644
22774 --- a/arch/x86/lib/rwsem.S
22775 +++ b/arch/x86/lib/rwsem.S
22776 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22777 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22778 CFI_RESTORE __ASM_REG(dx)
22779 restore_common_regs
22780 + pax_force_retaddr
22781 ret
22782 CFI_ENDPROC
22783 ENDPROC(call_rwsem_down_read_failed)
22784 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22785 movq %rax,%rdi
22786 call rwsem_down_write_failed
22787 restore_common_regs
22788 + pax_force_retaddr
22789 ret
22790 CFI_ENDPROC
22791 ENDPROC(call_rwsem_down_write_failed)
22792 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22793 movq %rax,%rdi
22794 call rwsem_wake
22795 restore_common_regs
22796 -1: ret
22797 +1: pax_force_retaddr
22798 + ret
22799 CFI_ENDPROC
22800 ENDPROC(call_rwsem_wake)
22801
22802 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22803 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22804 CFI_RESTORE __ASM_REG(dx)
22805 restore_common_regs
22806 + pax_force_retaddr
22807 ret
22808 CFI_ENDPROC
22809 ENDPROC(call_rwsem_downgrade_wake)
22810 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22811 index a63efd6..ccecad8 100644
22812 --- a/arch/x86/lib/thunk_64.S
22813 +++ b/arch/x86/lib/thunk_64.S
22814 @@ -8,6 +8,7 @@
22815 #include <linux/linkage.h>
22816 #include <asm/dwarf2.h>
22817 #include <asm/calling.h>
22818 +#include <asm/alternative-asm.h>
22819
22820 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22821 .macro THUNK name, func, put_ret_addr_in_rdi=0
22822 @@ -41,5 +42,6 @@
22823 SAVE_ARGS
22824 restore:
22825 RESTORE_ARGS
22826 + pax_force_retaddr
22827 ret
22828 CFI_ENDPROC
22829 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22830 index 1781b2f..90368dd 100644
22831 --- a/arch/x86/lib/usercopy_32.c
22832 +++ b/arch/x86/lib/usercopy_32.c
22833 @@ -42,10 +42,12 @@ do { \
22834 int __d0; \
22835 might_fault(); \
22836 __asm__ __volatile__( \
22837 + __COPYUSER_SET_ES \
22838 "0: rep; stosl\n" \
22839 " movl %2,%0\n" \
22840 "1: rep; stosb\n" \
22841 "2:\n" \
22842 + __COPYUSER_RESTORE_ES \
22843 ".section .fixup,\"ax\"\n" \
22844 "3: lea 0(%2,%0,4),%0\n" \
22845 " jmp 2b\n" \
22846 @@ -97,7 +99,7 @@ EXPORT_SYMBOL(__clear_user);
22847
22848 #ifdef CONFIG_X86_INTEL_USERCOPY
22849 static unsigned long
22850 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22851 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22852 {
22853 int d0, d1;
22854 __asm__ __volatile__(
22855 @@ -109,36 +111,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22856 " .align 2,0x90\n"
22857 "3: movl 0(%4), %%eax\n"
22858 "4: movl 4(%4), %%edx\n"
22859 - "5: movl %%eax, 0(%3)\n"
22860 - "6: movl %%edx, 4(%3)\n"
22861 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22862 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22863 "7: movl 8(%4), %%eax\n"
22864 "8: movl 12(%4),%%edx\n"
22865 - "9: movl %%eax, 8(%3)\n"
22866 - "10: movl %%edx, 12(%3)\n"
22867 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22868 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22869 "11: movl 16(%4), %%eax\n"
22870 "12: movl 20(%4), %%edx\n"
22871 - "13: movl %%eax, 16(%3)\n"
22872 - "14: movl %%edx, 20(%3)\n"
22873 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22874 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22875 "15: movl 24(%4), %%eax\n"
22876 "16: movl 28(%4), %%edx\n"
22877 - "17: movl %%eax, 24(%3)\n"
22878 - "18: movl %%edx, 28(%3)\n"
22879 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22880 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22881 "19: movl 32(%4), %%eax\n"
22882 "20: movl 36(%4), %%edx\n"
22883 - "21: movl %%eax, 32(%3)\n"
22884 - "22: movl %%edx, 36(%3)\n"
22885 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22886 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22887 "23: movl 40(%4), %%eax\n"
22888 "24: movl 44(%4), %%edx\n"
22889 - "25: movl %%eax, 40(%3)\n"
22890 - "26: movl %%edx, 44(%3)\n"
22891 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22892 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22893 "27: movl 48(%4), %%eax\n"
22894 "28: movl 52(%4), %%edx\n"
22895 - "29: movl %%eax, 48(%3)\n"
22896 - "30: movl %%edx, 52(%3)\n"
22897 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22898 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22899 "31: movl 56(%4), %%eax\n"
22900 "32: movl 60(%4), %%edx\n"
22901 - "33: movl %%eax, 56(%3)\n"
22902 - "34: movl %%edx, 60(%3)\n"
22903 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22904 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22905 " addl $-64, %0\n"
22906 " addl $64, %4\n"
22907 " addl $64, %3\n"
22908 @@ -148,10 +150,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22909 " shrl $2, %0\n"
22910 " andl $3, %%eax\n"
22911 " cld\n"
22912 + __COPYUSER_SET_ES
22913 "99: rep; movsl\n"
22914 "36: movl %%eax, %0\n"
22915 "37: rep; movsb\n"
22916 "100:\n"
22917 + __COPYUSER_RESTORE_ES
22918 ".section .fixup,\"ax\"\n"
22919 "101: lea 0(%%eax,%0,4),%0\n"
22920 " jmp 100b\n"
22921 @@ -201,46 +205,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22922 }
22923
22924 static unsigned long
22925 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22926 +{
22927 + int d0, d1;
22928 + __asm__ __volatile__(
22929 + " .align 2,0x90\n"
22930 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22931 + " cmpl $67, %0\n"
22932 + " jbe 3f\n"
22933 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22934 + " .align 2,0x90\n"
22935 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22936 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22937 + "5: movl %%eax, 0(%3)\n"
22938 + "6: movl %%edx, 4(%3)\n"
22939 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22940 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22941 + "9: movl %%eax, 8(%3)\n"
22942 + "10: movl %%edx, 12(%3)\n"
22943 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22944 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22945 + "13: movl %%eax, 16(%3)\n"
22946 + "14: movl %%edx, 20(%3)\n"
22947 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22948 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22949 + "17: movl %%eax, 24(%3)\n"
22950 + "18: movl %%edx, 28(%3)\n"
22951 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22952 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22953 + "21: movl %%eax, 32(%3)\n"
22954 + "22: movl %%edx, 36(%3)\n"
22955 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22956 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22957 + "25: movl %%eax, 40(%3)\n"
22958 + "26: movl %%edx, 44(%3)\n"
22959 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22960 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22961 + "29: movl %%eax, 48(%3)\n"
22962 + "30: movl %%edx, 52(%3)\n"
22963 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22964 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22965 + "33: movl %%eax, 56(%3)\n"
22966 + "34: movl %%edx, 60(%3)\n"
22967 + " addl $-64, %0\n"
22968 + " addl $64, %4\n"
22969 + " addl $64, %3\n"
22970 + " cmpl $63, %0\n"
22971 + " ja 1b\n"
22972 + "35: movl %0, %%eax\n"
22973 + " shrl $2, %0\n"
22974 + " andl $3, %%eax\n"
22975 + " cld\n"
22976 + "99: rep; "__copyuser_seg" movsl\n"
22977 + "36: movl %%eax, %0\n"
22978 + "37: rep; "__copyuser_seg" movsb\n"
22979 + "100:\n"
22980 + ".section .fixup,\"ax\"\n"
22981 + "101: lea 0(%%eax,%0,4),%0\n"
22982 + " jmp 100b\n"
22983 + ".previous\n"
22984 + _ASM_EXTABLE(1b,100b)
22985 + _ASM_EXTABLE(2b,100b)
22986 + _ASM_EXTABLE(3b,100b)
22987 + _ASM_EXTABLE(4b,100b)
22988 + _ASM_EXTABLE(5b,100b)
22989 + _ASM_EXTABLE(6b,100b)
22990 + _ASM_EXTABLE(7b,100b)
22991 + _ASM_EXTABLE(8b,100b)
22992 + _ASM_EXTABLE(9b,100b)
22993 + _ASM_EXTABLE(10b,100b)
22994 + _ASM_EXTABLE(11b,100b)
22995 + _ASM_EXTABLE(12b,100b)
22996 + _ASM_EXTABLE(13b,100b)
22997 + _ASM_EXTABLE(14b,100b)
22998 + _ASM_EXTABLE(15b,100b)
22999 + _ASM_EXTABLE(16b,100b)
23000 + _ASM_EXTABLE(17b,100b)
23001 + _ASM_EXTABLE(18b,100b)
23002 + _ASM_EXTABLE(19b,100b)
23003 + _ASM_EXTABLE(20b,100b)
23004 + _ASM_EXTABLE(21b,100b)
23005 + _ASM_EXTABLE(22b,100b)
23006 + _ASM_EXTABLE(23b,100b)
23007 + _ASM_EXTABLE(24b,100b)
23008 + _ASM_EXTABLE(25b,100b)
23009 + _ASM_EXTABLE(26b,100b)
23010 + _ASM_EXTABLE(27b,100b)
23011 + _ASM_EXTABLE(28b,100b)
23012 + _ASM_EXTABLE(29b,100b)
23013 + _ASM_EXTABLE(30b,100b)
23014 + _ASM_EXTABLE(31b,100b)
23015 + _ASM_EXTABLE(32b,100b)
23016 + _ASM_EXTABLE(33b,100b)
23017 + _ASM_EXTABLE(34b,100b)
23018 + _ASM_EXTABLE(35b,100b)
23019 + _ASM_EXTABLE(36b,100b)
23020 + _ASM_EXTABLE(37b,100b)
23021 + _ASM_EXTABLE(99b,101b)
23022 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23023 + : "1"(to), "2"(from), "0"(size)
23024 + : "eax", "edx", "memory");
23025 + return size;
23026 +}
23027 +
23028 +static unsigned long __size_overflow(3)
23029 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23030 {
23031 int d0, d1;
23032 __asm__ __volatile__(
23033 " .align 2,0x90\n"
23034 - "0: movl 32(%4), %%eax\n"
23035 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23036 " cmpl $67, %0\n"
23037 " jbe 2f\n"
23038 - "1: movl 64(%4), %%eax\n"
23039 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23040 " .align 2,0x90\n"
23041 - "2: movl 0(%4), %%eax\n"
23042 - "21: movl 4(%4), %%edx\n"
23043 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23044 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23045 " movl %%eax, 0(%3)\n"
23046 " movl %%edx, 4(%3)\n"
23047 - "3: movl 8(%4), %%eax\n"
23048 - "31: movl 12(%4),%%edx\n"
23049 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23050 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23051 " movl %%eax, 8(%3)\n"
23052 " movl %%edx, 12(%3)\n"
23053 - "4: movl 16(%4), %%eax\n"
23054 - "41: movl 20(%4), %%edx\n"
23055 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23056 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23057 " movl %%eax, 16(%3)\n"
23058 " movl %%edx, 20(%3)\n"
23059 - "10: movl 24(%4), %%eax\n"
23060 - "51: movl 28(%4), %%edx\n"
23061 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23062 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23063 " movl %%eax, 24(%3)\n"
23064 " movl %%edx, 28(%3)\n"
23065 - "11: movl 32(%4), %%eax\n"
23066 - "61: movl 36(%4), %%edx\n"
23067 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23068 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23069 " movl %%eax, 32(%3)\n"
23070 " movl %%edx, 36(%3)\n"
23071 - "12: movl 40(%4), %%eax\n"
23072 - "71: movl 44(%4), %%edx\n"
23073 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23074 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23075 " movl %%eax, 40(%3)\n"
23076 " movl %%edx, 44(%3)\n"
23077 - "13: movl 48(%4), %%eax\n"
23078 - "81: movl 52(%4), %%edx\n"
23079 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23080 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23081 " movl %%eax, 48(%3)\n"
23082 " movl %%edx, 52(%3)\n"
23083 - "14: movl 56(%4), %%eax\n"
23084 - "91: movl 60(%4), %%edx\n"
23085 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23086 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23087 " movl %%eax, 56(%3)\n"
23088 " movl %%edx, 60(%3)\n"
23089 " addl $-64, %0\n"
23090 @@ -252,9 +360,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23091 " shrl $2, %0\n"
23092 " andl $3, %%eax\n"
23093 " cld\n"
23094 - "6: rep; movsl\n"
23095 + "6: rep; "__copyuser_seg" movsl\n"
23096 " movl %%eax,%0\n"
23097 - "7: rep; movsb\n"
23098 + "7: rep; "__copyuser_seg" movsb\n"
23099 "8:\n"
23100 ".section .fixup,\"ax\"\n"
23101 "9: lea 0(%%eax,%0,4),%0\n"
23102 @@ -297,48 +405,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23103 * hyoshiok@miraclelinux.com
23104 */
23105
23106 -static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23107 +static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
23108 const void __user *from, unsigned long size)
23109 {
23110 int d0, d1;
23111
23112 __asm__ __volatile__(
23113 " .align 2,0x90\n"
23114 - "0: movl 32(%4), %%eax\n"
23115 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23116 " cmpl $67, %0\n"
23117 " jbe 2f\n"
23118 - "1: movl 64(%4), %%eax\n"
23119 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23120 " .align 2,0x90\n"
23121 - "2: movl 0(%4), %%eax\n"
23122 - "21: movl 4(%4), %%edx\n"
23123 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23124 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23125 " movnti %%eax, 0(%3)\n"
23126 " movnti %%edx, 4(%3)\n"
23127 - "3: movl 8(%4), %%eax\n"
23128 - "31: movl 12(%4),%%edx\n"
23129 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23130 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23131 " movnti %%eax, 8(%3)\n"
23132 " movnti %%edx, 12(%3)\n"
23133 - "4: movl 16(%4), %%eax\n"
23134 - "41: movl 20(%4), %%edx\n"
23135 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23136 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23137 " movnti %%eax, 16(%3)\n"
23138 " movnti %%edx, 20(%3)\n"
23139 - "10: movl 24(%4), %%eax\n"
23140 - "51: movl 28(%4), %%edx\n"
23141 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23142 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23143 " movnti %%eax, 24(%3)\n"
23144 " movnti %%edx, 28(%3)\n"
23145 - "11: movl 32(%4), %%eax\n"
23146 - "61: movl 36(%4), %%edx\n"
23147 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23148 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23149 " movnti %%eax, 32(%3)\n"
23150 " movnti %%edx, 36(%3)\n"
23151 - "12: movl 40(%4), %%eax\n"
23152 - "71: movl 44(%4), %%edx\n"
23153 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23154 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23155 " movnti %%eax, 40(%3)\n"
23156 " movnti %%edx, 44(%3)\n"
23157 - "13: movl 48(%4), %%eax\n"
23158 - "81: movl 52(%4), %%edx\n"
23159 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23160 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23161 " movnti %%eax, 48(%3)\n"
23162 " movnti %%edx, 52(%3)\n"
23163 - "14: movl 56(%4), %%eax\n"
23164 - "91: movl 60(%4), %%edx\n"
23165 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23166 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23167 " movnti %%eax, 56(%3)\n"
23168 " movnti %%edx, 60(%3)\n"
23169 " addl $-64, %0\n"
23170 @@ -351,9 +459,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23171 " shrl $2, %0\n"
23172 " andl $3, %%eax\n"
23173 " cld\n"
23174 - "6: rep; movsl\n"
23175 + "6: rep; "__copyuser_seg" movsl\n"
23176 " movl %%eax,%0\n"
23177 - "7: rep; movsb\n"
23178 + "7: rep; "__copyuser_seg" movsb\n"
23179 "8:\n"
23180 ".section .fixup,\"ax\"\n"
23181 "9: lea 0(%%eax,%0,4),%0\n"
23182 @@ -391,48 +499,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23183 return size;
23184 }
23185
23186 -static unsigned long __copy_user_intel_nocache(void *to,
23187 +static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
23188 const void __user *from, unsigned long size)
23189 {
23190 int d0, d1;
23191
23192 __asm__ __volatile__(
23193 " .align 2,0x90\n"
23194 - "0: movl 32(%4), %%eax\n"
23195 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23196 " cmpl $67, %0\n"
23197 " jbe 2f\n"
23198 - "1: movl 64(%4), %%eax\n"
23199 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23200 " .align 2,0x90\n"
23201 - "2: movl 0(%4), %%eax\n"
23202 - "21: movl 4(%4), %%edx\n"
23203 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23204 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23205 " movnti %%eax, 0(%3)\n"
23206 " movnti %%edx, 4(%3)\n"
23207 - "3: movl 8(%4), %%eax\n"
23208 - "31: movl 12(%4),%%edx\n"
23209 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23210 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23211 " movnti %%eax, 8(%3)\n"
23212 " movnti %%edx, 12(%3)\n"
23213 - "4: movl 16(%4), %%eax\n"
23214 - "41: movl 20(%4), %%edx\n"
23215 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23216 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23217 " movnti %%eax, 16(%3)\n"
23218 " movnti %%edx, 20(%3)\n"
23219 - "10: movl 24(%4), %%eax\n"
23220 - "51: movl 28(%4), %%edx\n"
23221 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23222 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23223 " movnti %%eax, 24(%3)\n"
23224 " movnti %%edx, 28(%3)\n"
23225 - "11: movl 32(%4), %%eax\n"
23226 - "61: movl 36(%4), %%edx\n"
23227 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23228 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23229 " movnti %%eax, 32(%3)\n"
23230 " movnti %%edx, 36(%3)\n"
23231 - "12: movl 40(%4), %%eax\n"
23232 - "71: movl 44(%4), %%edx\n"
23233 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23234 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23235 " movnti %%eax, 40(%3)\n"
23236 " movnti %%edx, 44(%3)\n"
23237 - "13: movl 48(%4), %%eax\n"
23238 - "81: movl 52(%4), %%edx\n"
23239 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23240 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23241 " movnti %%eax, 48(%3)\n"
23242 " movnti %%edx, 52(%3)\n"
23243 - "14: movl 56(%4), %%eax\n"
23244 - "91: movl 60(%4), %%edx\n"
23245 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23246 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23247 " movnti %%eax, 56(%3)\n"
23248 " movnti %%edx, 60(%3)\n"
23249 " addl $-64, %0\n"
23250 @@ -445,9 +553,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23251 " shrl $2, %0\n"
23252 " andl $3, %%eax\n"
23253 " cld\n"
23254 - "6: rep; movsl\n"
23255 + "6: rep; "__copyuser_seg" movsl\n"
23256 " movl %%eax,%0\n"
23257 - "7: rep; movsb\n"
23258 + "7: rep; "__copyuser_seg" movsb\n"
23259 "8:\n"
23260 ".section .fixup,\"ax\"\n"
23261 "9: lea 0(%%eax,%0,4),%0\n"
23262 @@ -487,32 +595,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23263 */
23264 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23265 unsigned long size);
23266 -unsigned long __copy_user_intel(void __user *to, const void *from,
23267 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23268 + unsigned long size);
23269 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23270 unsigned long size);
23271 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23272 const void __user *from, unsigned long size);
23273 #endif /* CONFIG_X86_INTEL_USERCOPY */
23274
23275 /* Generic arbitrary sized copy. */
23276 -#define __copy_user(to, from, size) \
23277 +#define __copy_user(to, from, size, prefix, set, restore) \
23278 do { \
23279 int __d0, __d1, __d2; \
23280 __asm__ __volatile__( \
23281 + set \
23282 " cmp $7,%0\n" \
23283 " jbe 1f\n" \
23284 " movl %1,%0\n" \
23285 " negl %0\n" \
23286 " andl $7,%0\n" \
23287 " subl %0,%3\n" \
23288 - "4: rep; movsb\n" \
23289 + "4: rep; "prefix"movsb\n" \
23290 " movl %3,%0\n" \
23291 " shrl $2,%0\n" \
23292 " andl $3,%3\n" \
23293 " .align 2,0x90\n" \
23294 - "0: rep; movsl\n" \
23295 + "0: rep; "prefix"movsl\n" \
23296 " movl %3,%0\n" \
23297 - "1: rep; movsb\n" \
23298 + "1: rep; "prefix"movsb\n" \
23299 "2:\n" \
23300 + restore \
23301 ".section .fixup,\"ax\"\n" \
23302 "5: addl %3,%0\n" \
23303 " jmp 2b\n" \
23304 @@ -537,14 +649,14 @@ do { \
23305 " negl %0\n" \
23306 " andl $7,%0\n" \
23307 " subl %0,%3\n" \
23308 - "4: rep; movsb\n" \
23309 + "4: rep; "__copyuser_seg"movsb\n" \
23310 " movl %3,%0\n" \
23311 " shrl $2,%0\n" \
23312 " andl $3,%3\n" \
23313 " .align 2,0x90\n" \
23314 - "0: rep; movsl\n" \
23315 + "0: rep; "__copyuser_seg"movsl\n" \
23316 " movl %3,%0\n" \
23317 - "1: rep; movsb\n" \
23318 + "1: rep; "__copyuser_seg"movsb\n" \
23319 "2:\n" \
23320 ".section .fixup,\"ax\"\n" \
23321 "5: addl %3,%0\n" \
23322 @@ -627,9 +739,9 @@ survive:
23323 }
23324 #endif
23325 if (movsl_is_ok(to, from, n))
23326 - __copy_user(to, from, n);
23327 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23328 else
23329 - n = __copy_user_intel(to, from, n);
23330 + n = __generic_copy_to_user_intel(to, from, n);
23331 return n;
23332 }
23333 EXPORT_SYMBOL(__copy_to_user_ll);
23334 @@ -649,10 +761,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23335 unsigned long n)
23336 {
23337 if (movsl_is_ok(to, from, n))
23338 - __copy_user(to, from, n);
23339 + __copy_user(to, from, n, __copyuser_seg, "", "");
23340 else
23341 - n = __copy_user_intel((void __user *)to,
23342 - (const void *)from, n);
23343 + n = __generic_copy_from_user_intel(to, from, n);
23344 return n;
23345 }
23346 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23347 @@ -679,65 +790,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23348 if (n > 64 && cpu_has_xmm2)
23349 n = __copy_user_intel_nocache(to, from, n);
23350 else
23351 - __copy_user(to, from, n);
23352 + __copy_user(to, from, n, __copyuser_seg, "", "");
23353 #else
23354 - __copy_user(to, from, n);
23355 + __copy_user(to, from, n, __copyuser_seg, "", "");
23356 #endif
23357 return n;
23358 }
23359 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23360
23361 -/**
23362 - * copy_to_user: - Copy a block of data into user space.
23363 - * @to: Destination address, in user space.
23364 - * @from: Source address, in kernel space.
23365 - * @n: Number of bytes to copy.
23366 - *
23367 - * Context: User context only. This function may sleep.
23368 - *
23369 - * Copy data from kernel space to user space.
23370 - *
23371 - * Returns number of bytes that could not be copied.
23372 - * On success, this will be zero.
23373 - */
23374 -unsigned long
23375 -copy_to_user(void __user *to, const void *from, unsigned long n)
23376 -{
23377 - if (access_ok(VERIFY_WRITE, to, n))
23378 - n = __copy_to_user(to, from, n);
23379 - return n;
23380 -}
23381 -EXPORT_SYMBOL(copy_to_user);
23382 -
23383 -/**
23384 - * copy_from_user: - Copy a block of data from user space.
23385 - * @to: Destination address, in kernel space.
23386 - * @from: Source address, in user space.
23387 - * @n: Number of bytes to copy.
23388 - *
23389 - * Context: User context only. This function may sleep.
23390 - *
23391 - * Copy data from user space to kernel space.
23392 - *
23393 - * Returns number of bytes that could not be copied.
23394 - * On success, this will be zero.
23395 - *
23396 - * If some data could not be copied, this function will pad the copied
23397 - * data to the requested size using zero bytes.
23398 - */
23399 -unsigned long
23400 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23401 -{
23402 - if (access_ok(VERIFY_READ, from, n))
23403 - n = __copy_from_user(to, from, n);
23404 - else
23405 - memset(to, 0, n);
23406 - return n;
23407 -}
23408 -EXPORT_SYMBOL(_copy_from_user);
23409 -
23410 void copy_from_user_overflow(void)
23411 {
23412 WARN(1, "Buffer overflow detected!\n");
23413 }
23414 EXPORT_SYMBOL(copy_from_user_overflow);
23415 +
23416 +void copy_to_user_overflow(void)
23417 +{
23418 + WARN(1, "Buffer overflow detected!\n");
23419 +}
23420 +EXPORT_SYMBOL(copy_to_user_overflow);
23421 +
23422 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23423 +void __set_fs(mm_segment_t x)
23424 +{
23425 + switch (x.seg) {
23426 + case 0:
23427 + loadsegment(gs, 0);
23428 + break;
23429 + case TASK_SIZE_MAX:
23430 + loadsegment(gs, __USER_DS);
23431 + break;
23432 + case -1UL:
23433 + loadsegment(gs, __KERNEL_DS);
23434 + break;
23435 + default:
23436 + BUG();
23437 + }
23438 + return;
23439 +}
23440 +EXPORT_SYMBOL(__set_fs);
23441 +
23442 +void set_fs(mm_segment_t x)
23443 +{
23444 + current_thread_info()->addr_limit = x;
23445 + __set_fs(x);
23446 +}
23447 +EXPORT_SYMBOL(set_fs);
23448 +#endif
23449 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23450 index e5b130b..6690d31 100644
23451 --- a/arch/x86/lib/usercopy_64.c
23452 +++ b/arch/x86/lib/usercopy_64.c
23453 @@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23454 {
23455 long __d0;
23456 might_fault();
23457 +
23458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23459 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23460 + addr += PAX_USER_SHADOW_BASE;
23461 +#endif
23462 +
23463 /* no memory constraint because it doesn't change any memory gcc knows
23464 about */
23465 asm volatile(
23466 @@ -52,12 +58,20 @@ unsigned long clear_user(void __user *to, unsigned long n)
23467 }
23468 EXPORT_SYMBOL(clear_user);
23469
23470 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23471 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23472 {
23473 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23474 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23475 - }
23476 - return len;
23477 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23478 +
23479 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23480 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23481 + to += PAX_USER_SHADOW_BASE;
23482 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23483 + from += PAX_USER_SHADOW_BASE;
23484 +#endif
23485 +
23486 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23487 + }
23488 + return len;
23489 }
23490 EXPORT_SYMBOL(copy_in_user);
23491
23492 @@ -67,7 +81,7 @@ EXPORT_SYMBOL(copy_in_user);
23493 * it is not necessary to optimize tail handling.
23494 */
23495 unsigned long
23496 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23497 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23498 {
23499 char c;
23500 unsigned zero_len;
23501 @@ -84,3 +98,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23502 break;
23503 return len;
23504 }
23505 +
23506 +void copy_from_user_overflow(void)
23507 +{
23508 + WARN(1, "Buffer overflow detected!\n");
23509 +}
23510 +EXPORT_SYMBOL(copy_from_user_overflow);
23511 +
23512 +void copy_to_user_overflow(void)
23513 +{
23514 + WARN(1, "Buffer overflow detected!\n");
23515 +}
23516 +EXPORT_SYMBOL(copy_to_user_overflow);
23517 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23518 index 903ec1e..c4166b2 100644
23519 --- a/arch/x86/mm/extable.c
23520 +++ b/arch/x86/mm/extable.c
23521 @@ -6,12 +6,24 @@
23522 static inline unsigned long
23523 ex_insn_addr(const struct exception_table_entry *x)
23524 {
23525 - return (unsigned long)&x->insn + x->insn;
23526 + unsigned long reloc = 0;
23527 +
23528 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23529 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23530 +#endif
23531 +
23532 + return (unsigned long)&x->insn + x->insn + reloc;
23533 }
23534 static inline unsigned long
23535 ex_fixup_addr(const struct exception_table_entry *x)
23536 {
23537 - return (unsigned long)&x->fixup + x->fixup;
23538 + unsigned long reloc = 0;
23539 +
23540 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23541 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23542 +#endif
23543 +
23544 + return (unsigned long)&x->fixup + x->fixup + reloc;
23545 }
23546
23547 int fixup_exception(struct pt_regs *regs)
23548 @@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
23549 unsigned long new_ip;
23550
23551 #ifdef CONFIG_PNPBIOS
23552 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23553 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23554 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23555 extern u32 pnp_bios_is_utter_crap;
23556 pnp_bios_is_utter_crap = 1;
23557 @@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
23558 i += 4;
23559 p->fixup -= i;
23560 i += 4;
23561 +
23562 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23563 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
23564 + p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23565 + p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23566 +#endif
23567 +
23568 }
23569 }
23570
23571 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23572 index 76dcd9d..e9dffde 100644
23573 --- a/arch/x86/mm/fault.c
23574 +++ b/arch/x86/mm/fault.c
23575 @@ -13,11 +13,18 @@
23576 #include <linux/perf_event.h> /* perf_sw_event */
23577 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23578 #include <linux/prefetch.h> /* prefetchw */
23579 +#include <linux/unistd.h>
23580 +#include <linux/compiler.h>
23581
23582 #include <asm/traps.h> /* dotraplinkage, ... */
23583 #include <asm/pgalloc.h> /* pgd_*(), ... */
23584 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23585 #include <asm/fixmap.h> /* VSYSCALL_START */
23586 +#include <asm/tlbflush.h>
23587 +
23588 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23589 +#include <asm/stacktrace.h>
23590 +#endif
23591
23592 /*
23593 * Page fault error code bits:
23594 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23595 int ret = 0;
23596
23597 /* kprobe_running() needs smp_processor_id() */
23598 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23599 + if (kprobes_built_in() && !user_mode(regs)) {
23600 preempt_disable();
23601 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23602 ret = 1;
23603 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23604 return !instr_lo || (instr_lo>>1) == 1;
23605 case 0x00:
23606 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23607 - if (probe_kernel_address(instr, opcode))
23608 + if (user_mode(regs)) {
23609 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23610 + return 0;
23611 + } else if (probe_kernel_address(instr, opcode))
23612 return 0;
23613
23614 *prefetch = (instr_lo == 0xF) &&
23615 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23616 while (instr < max_instr) {
23617 unsigned char opcode;
23618
23619 - if (probe_kernel_address(instr, opcode))
23620 + if (user_mode(regs)) {
23621 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23622 + break;
23623 + } else if (probe_kernel_address(instr, opcode))
23624 break;
23625
23626 instr++;
23627 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23628 force_sig_info(si_signo, &info, tsk);
23629 }
23630
23631 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23632 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23633 +#endif
23634 +
23635 +#ifdef CONFIG_PAX_EMUTRAMP
23636 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23637 +#endif
23638 +
23639 +#ifdef CONFIG_PAX_PAGEEXEC
23640 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23641 +{
23642 + pgd_t *pgd;
23643 + pud_t *pud;
23644 + pmd_t *pmd;
23645 +
23646 + pgd = pgd_offset(mm, address);
23647 + if (!pgd_present(*pgd))
23648 + return NULL;
23649 + pud = pud_offset(pgd, address);
23650 + if (!pud_present(*pud))
23651 + return NULL;
23652 + pmd = pmd_offset(pud, address);
23653 + if (!pmd_present(*pmd))
23654 + return NULL;
23655 + return pmd;
23656 +}
23657 +#endif
23658 +
23659 DEFINE_SPINLOCK(pgd_lock);
23660 LIST_HEAD(pgd_list);
23661
23662 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23663 for (address = VMALLOC_START & PMD_MASK;
23664 address >= TASK_SIZE && address < FIXADDR_TOP;
23665 address += PMD_SIZE) {
23666 +
23667 +#ifdef CONFIG_PAX_PER_CPU_PGD
23668 + unsigned long cpu;
23669 +#else
23670 struct page *page;
23671 +#endif
23672
23673 spin_lock(&pgd_lock);
23674 +
23675 +#ifdef CONFIG_PAX_PER_CPU_PGD
23676 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23677 + pgd_t *pgd = get_cpu_pgd(cpu);
23678 + pmd_t *ret;
23679 +#else
23680 list_for_each_entry(page, &pgd_list, lru) {
23681 + pgd_t *pgd = page_address(page);
23682 spinlock_t *pgt_lock;
23683 pmd_t *ret;
23684
23685 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23686 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23687
23688 spin_lock(pgt_lock);
23689 - ret = vmalloc_sync_one(page_address(page), address);
23690 +#endif
23691 +
23692 + ret = vmalloc_sync_one(pgd, address);
23693 +
23694 +#ifndef CONFIG_PAX_PER_CPU_PGD
23695 spin_unlock(pgt_lock);
23696 +#endif
23697
23698 if (!ret)
23699 break;
23700 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23701 * an interrupt in the middle of a task switch..
23702 */
23703 pgd_paddr = read_cr3();
23704 +
23705 +#ifdef CONFIG_PAX_PER_CPU_PGD
23706 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23707 +#endif
23708 +
23709 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23710 if (!pmd_k)
23711 return -1;
23712 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23713 * happen within a race in page table update. In the later
23714 * case just flush:
23715 */
23716 +
23717 +#ifdef CONFIG_PAX_PER_CPU_PGD
23718 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23719 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23720 +#else
23721 pgd = pgd_offset(current->active_mm, address);
23722 +#endif
23723 +
23724 pgd_ref = pgd_offset_k(address);
23725 if (pgd_none(*pgd_ref))
23726 return -1;
23727 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23728 static int is_errata100(struct pt_regs *regs, unsigned long address)
23729 {
23730 #ifdef CONFIG_X86_64
23731 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23732 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23733 return 1;
23734 #endif
23735 return 0;
23736 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23737 }
23738
23739 static const char nx_warning[] = KERN_CRIT
23740 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23741 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23742
23743 static void
23744 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23745 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23746 if (!oops_may_print())
23747 return;
23748
23749 - if (error_code & PF_INSTR) {
23750 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23751 unsigned int level;
23752
23753 pte_t *pte = lookup_address(address, &level);
23754
23755 if (pte && pte_present(*pte) && !pte_exec(*pte))
23756 - printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
23757 + printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
23758 }
23759
23760 +#ifdef CONFIG_PAX_KERNEXEC
23761 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23762 + if (current->signal->curr_ip)
23763 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23764 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23765 + else
23766 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23767 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23768 + }
23769 +#endif
23770 +
23771 printk(KERN_ALERT "BUG: unable to handle kernel ");
23772 if (address < PAGE_SIZE)
23773 printk(KERN_CONT "NULL pointer dereference");
23774 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23775 }
23776 #endif
23777
23778 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23779 + if (pax_is_fetch_fault(regs, error_code, address)) {
23780 +
23781 +#ifdef CONFIG_PAX_EMUTRAMP
23782 + switch (pax_handle_fetch_fault(regs)) {
23783 + case 2:
23784 + return;
23785 + }
23786 +#endif
23787 +
23788 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23789 + do_group_exit(SIGKILL);
23790 + }
23791 +#endif
23792 +
23793 if (unlikely(show_unhandled_signals))
23794 show_signal_msg(regs, error_code, address, tsk);
23795
23796 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23797 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23798 printk(KERN_ERR
23799 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23800 - tsk->comm, tsk->pid, address);
23801 + tsk->comm, task_pid_nr(tsk), address);
23802 code = BUS_MCEERR_AR;
23803 }
23804 #endif
23805 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23806 return 1;
23807 }
23808
23809 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23810 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23811 +{
23812 + pte_t *pte;
23813 + pmd_t *pmd;
23814 + spinlock_t *ptl;
23815 + unsigned char pte_mask;
23816 +
23817 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23818 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23819 + return 0;
23820 +
23821 + /* PaX: it's our fault, let's handle it if we can */
23822 +
23823 + /* PaX: take a look at read faults before acquiring any locks */
23824 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23825 + /* instruction fetch attempt from a protected page in user mode */
23826 + up_read(&mm->mmap_sem);
23827 +
23828 +#ifdef CONFIG_PAX_EMUTRAMP
23829 + switch (pax_handle_fetch_fault(regs)) {
23830 + case 2:
23831 + return 1;
23832 + }
23833 +#endif
23834 +
23835 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23836 + do_group_exit(SIGKILL);
23837 + }
23838 +
23839 + pmd = pax_get_pmd(mm, address);
23840 + if (unlikely(!pmd))
23841 + return 0;
23842 +
23843 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23844 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23845 + pte_unmap_unlock(pte, ptl);
23846 + return 0;
23847 + }
23848 +
23849 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23850 + /* write attempt to a protected page in user mode */
23851 + pte_unmap_unlock(pte, ptl);
23852 + return 0;
23853 + }
23854 +
23855 +#ifdef CONFIG_SMP
23856 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23857 +#else
23858 + if (likely(address > get_limit(regs->cs)))
23859 +#endif
23860 + {
23861 + set_pte(pte, pte_mkread(*pte));
23862 + __flush_tlb_one(address);
23863 + pte_unmap_unlock(pte, ptl);
23864 + up_read(&mm->mmap_sem);
23865 + return 1;
23866 + }
23867 +
23868 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23869 +
23870 + /*
23871 + * PaX: fill DTLB with user rights and retry
23872 + */
23873 + __asm__ __volatile__ (
23874 + "orb %2,(%1)\n"
23875 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23876 +/*
23877 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23878 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23879 + * page fault when examined during a TLB load attempt. this is true not only
23880 + * for PTEs holding a non-present entry but also present entries that will
23881 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23882 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23883 + * for our target pages since their PTEs are simply not in the TLBs at all.
23884 +
23885 + * the best thing in omitting it is that we gain around 15-20% speed in the
23886 + * fast path of the page fault handler and can get rid of tracing since we
23887 + * can no longer flush unintended entries.
23888 + */
23889 + "invlpg (%0)\n"
23890 +#endif
23891 + __copyuser_seg"testb $0,(%0)\n"
23892 + "xorb %3,(%1)\n"
23893 + :
23894 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23895 + : "memory", "cc");
23896 + pte_unmap_unlock(pte, ptl);
23897 + up_read(&mm->mmap_sem);
23898 + return 1;
23899 +}
23900 +#endif
23901 +
23902 /*
23903 * Handle a spurious fault caused by a stale TLB entry.
23904 *
23905 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23906 static inline int
23907 access_error(unsigned long error_code, struct vm_area_struct *vma)
23908 {
23909 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23910 + return 1;
23911 +
23912 if (error_code & PF_WRITE) {
23913 /* write, present and write, not present: */
23914 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23915 @@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23916 {
23917 struct vm_area_struct *vma;
23918 struct task_struct *tsk;
23919 - unsigned long address;
23920 struct mm_struct *mm;
23921 int fault;
23922 int write = error_code & PF_WRITE;
23923 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23924 (write ? FAULT_FLAG_WRITE : 0);
23925
23926 - tsk = current;
23927 - mm = tsk->mm;
23928 -
23929 /* Get the faulting address: */
23930 - address = read_cr2();
23931 + unsigned long address = read_cr2();
23932 +
23933 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23934 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23935 + if (!search_exception_tables(regs->ip)) {
23936 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23937 + bad_area_nosemaphore(regs, error_code, address);
23938 + return;
23939 + }
23940 + if (address < PAX_USER_SHADOW_BASE) {
23941 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23942 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23943 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23944 + } else
23945 + address -= PAX_USER_SHADOW_BASE;
23946 + }
23947 +#endif
23948 +
23949 + tsk = current;
23950 + mm = tsk->mm;
23951
23952 /*
23953 * Detect and handle instructions that would cause a page fault for
23954 @@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23955 * User-mode registers count as a user access even for any
23956 * potential system fault or CPU buglet:
23957 */
23958 - if (user_mode_vm(regs)) {
23959 + if (user_mode(regs)) {
23960 local_irq_enable();
23961 error_code |= PF_USER;
23962 } else {
23963 @@ -1132,6 +1339,11 @@ retry:
23964 might_sleep();
23965 }
23966
23967 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23968 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23969 + return;
23970 +#endif
23971 +
23972 vma = find_vma(mm, address);
23973 if (unlikely(!vma)) {
23974 bad_area(regs, error_code, address);
23975 @@ -1143,18 +1355,24 @@ retry:
23976 bad_area(regs, error_code, address);
23977 return;
23978 }
23979 - if (error_code & PF_USER) {
23980 - /*
23981 - * Accessing the stack below %sp is always a bug.
23982 - * The large cushion allows instructions like enter
23983 - * and pusha to work. ("enter $65535, $31" pushes
23984 - * 32 pointers and then decrements %sp by 65535.)
23985 - */
23986 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23987 - bad_area(regs, error_code, address);
23988 - return;
23989 - }
23990 + /*
23991 + * Accessing the stack below %sp is always a bug.
23992 + * The large cushion allows instructions like enter
23993 + * and pusha to work. ("enter $65535, $31" pushes
23994 + * 32 pointers and then decrements %sp by 65535.)
23995 + */
23996 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23997 + bad_area(regs, error_code, address);
23998 + return;
23999 }
24000 +
24001 +#ifdef CONFIG_PAX_SEGMEXEC
24002 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24003 + bad_area(regs, error_code, address);
24004 + return;
24005 + }
24006 +#endif
24007 +
24008 if (unlikely(expand_stack(vma, address))) {
24009 bad_area(regs, error_code, address);
24010 return;
24011 @@ -1209,3 +1427,292 @@ good_area:
24012
24013 up_read(&mm->mmap_sem);
24014 }
24015 +
24016 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24017 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24018 +{
24019 + struct mm_struct *mm = current->mm;
24020 + unsigned long ip = regs->ip;
24021 +
24022 + if (v8086_mode(regs))
24023 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24024 +
24025 +#ifdef CONFIG_PAX_PAGEEXEC
24026 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24027 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24028 + return true;
24029 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24030 + return true;
24031 + return false;
24032 + }
24033 +#endif
24034 +
24035 +#ifdef CONFIG_PAX_SEGMEXEC
24036 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24037 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24038 + return true;
24039 + return false;
24040 + }
24041 +#endif
24042 +
24043 + return false;
24044 +}
24045 +#endif
24046 +
24047 +#ifdef CONFIG_PAX_EMUTRAMP
24048 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24049 +{
24050 + int err;
24051 +
24052 + do { /* PaX: libffi trampoline emulation */
24053 + unsigned char mov, jmp;
24054 + unsigned int addr1, addr2;
24055 +
24056 +#ifdef CONFIG_X86_64
24057 + if ((regs->ip + 9) >> 32)
24058 + break;
24059 +#endif
24060 +
24061 + err = get_user(mov, (unsigned char __user *)regs->ip);
24062 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24063 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24064 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24065 +
24066 + if (err)
24067 + break;
24068 +
24069 + if (mov == 0xB8 && jmp == 0xE9) {
24070 + regs->ax = addr1;
24071 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24072 + return 2;
24073 + }
24074 + } while (0);
24075 +
24076 + do { /* PaX: gcc trampoline emulation #1 */
24077 + unsigned char mov1, mov2;
24078 + unsigned short jmp;
24079 + unsigned int addr1, addr2;
24080 +
24081 +#ifdef CONFIG_X86_64
24082 + if ((regs->ip + 11) >> 32)
24083 + break;
24084 +#endif
24085 +
24086 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24087 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24088 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24089 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24090 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24091 +
24092 + if (err)
24093 + break;
24094 +
24095 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24096 + regs->cx = addr1;
24097 + regs->ax = addr2;
24098 + regs->ip = addr2;
24099 + return 2;
24100 + }
24101 + } while (0);
24102 +
24103 + do { /* PaX: gcc trampoline emulation #2 */
24104 + unsigned char mov, jmp;
24105 + unsigned int addr1, addr2;
24106 +
24107 +#ifdef CONFIG_X86_64
24108 + if ((regs->ip + 9) >> 32)
24109 + break;
24110 +#endif
24111 +
24112 + err = get_user(mov, (unsigned char __user *)regs->ip);
24113 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24114 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24115 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24116 +
24117 + if (err)
24118 + break;
24119 +
24120 + if (mov == 0xB9 && jmp == 0xE9) {
24121 + regs->cx = addr1;
24122 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24123 + return 2;
24124 + }
24125 + } while (0);
24126 +
24127 + return 1; /* PaX in action */
24128 +}
24129 +
24130 +#ifdef CONFIG_X86_64
24131 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24132 +{
24133 + int err;
24134 +
24135 + do { /* PaX: libffi trampoline emulation */
24136 + unsigned short mov1, mov2, jmp1;
24137 + unsigned char stcclc, jmp2;
24138 + unsigned long addr1, addr2;
24139 +
24140 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24141 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24142 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24143 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24144 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24145 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24146 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24147 +
24148 + if (err)
24149 + break;
24150 +
24151 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24152 + regs->r11 = addr1;
24153 + regs->r10 = addr2;
24154 + if (stcclc == 0xF8)
24155 + regs->flags &= ~X86_EFLAGS_CF;
24156 + else
24157 + regs->flags |= X86_EFLAGS_CF;
24158 + regs->ip = addr1;
24159 + return 2;
24160 + }
24161 + } while (0);
24162 +
24163 + do { /* PaX: gcc trampoline emulation #1 */
24164 + unsigned short mov1, mov2, jmp1;
24165 + unsigned char jmp2;
24166 + unsigned int addr1;
24167 + unsigned long addr2;
24168 +
24169 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24170 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24171 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24172 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24173 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24174 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24175 +
24176 + if (err)
24177 + break;
24178 +
24179 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24180 + regs->r11 = addr1;
24181 + regs->r10 = addr2;
24182 + regs->ip = addr1;
24183 + return 2;
24184 + }
24185 + } while (0);
24186 +
24187 + do { /* PaX: gcc trampoline emulation #2 */
24188 + unsigned short mov1, mov2, jmp1;
24189 + unsigned char jmp2;
24190 + unsigned long addr1, addr2;
24191 +
24192 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24193 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24194 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24195 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24196 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24197 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24198 +
24199 + if (err)
24200 + break;
24201 +
24202 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24203 + regs->r11 = addr1;
24204 + regs->r10 = addr2;
24205 + regs->ip = addr1;
24206 + return 2;
24207 + }
24208 + } while (0);
24209 +
24210 + return 1; /* PaX in action */
24211 +}
24212 +#endif
24213 +
24214 +/*
24215 + * PaX: decide what to do with offenders (regs->ip = fault address)
24216 + *
24217 + * returns 1 when task should be killed
24218 + * 2 when gcc trampoline was detected
24219 + */
24220 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24221 +{
24222 + if (v8086_mode(regs))
24223 + return 1;
24224 +
24225 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24226 + return 1;
24227 +
24228 +#ifdef CONFIG_X86_32
24229 + return pax_handle_fetch_fault_32(regs);
24230 +#else
24231 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24232 + return pax_handle_fetch_fault_32(regs);
24233 + else
24234 + return pax_handle_fetch_fault_64(regs);
24235 +#endif
24236 +}
24237 +#endif
24238 +
24239 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24240 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24241 +{
24242 + long i;
24243 +
24244 + printk(KERN_ERR "PAX: bytes at PC: ");
24245 + for (i = 0; i < 20; i++) {
24246 + unsigned char c;
24247 + if (get_user(c, (unsigned char __force_user *)pc+i))
24248 + printk(KERN_CONT "?? ");
24249 + else
24250 + printk(KERN_CONT "%02x ", c);
24251 + }
24252 + printk("\n");
24253 +
24254 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24255 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24256 + unsigned long c;
24257 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24258 +#ifdef CONFIG_X86_32
24259 + printk(KERN_CONT "???????? ");
24260 +#else
24261 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24262 + printk(KERN_CONT "???????? ???????? ");
24263 + else
24264 + printk(KERN_CONT "???????????????? ");
24265 +#endif
24266 + } else {
24267 +#ifdef CONFIG_X86_64
24268 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24269 + printk(KERN_CONT "%08x ", (unsigned int)c);
24270 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24271 + } else
24272 +#endif
24273 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24274 + }
24275 + }
24276 + printk("\n");
24277 +}
24278 +#endif
24279 +
24280 +/**
24281 + * probe_kernel_write(): safely attempt to write to a location
24282 + * @dst: address to write to
24283 + * @src: pointer to the data that shall be written
24284 + * @size: size of the data chunk
24285 + *
24286 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24287 + * happens, handle that and return -EFAULT.
24288 + */
24289 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24290 +{
24291 + long ret;
24292 + mm_segment_t old_fs = get_fs();
24293 +
24294 + set_fs(KERNEL_DS);
24295 + pagefault_disable();
24296 + pax_open_kernel();
24297 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24298 + pax_close_kernel();
24299 + pagefault_enable();
24300 + set_fs(old_fs);
24301 +
24302 + return ret ? -EFAULT : 0;
24303 +}
24304 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24305 index dd74e46..7d26398 100644
24306 --- a/arch/x86/mm/gup.c
24307 +++ b/arch/x86/mm/gup.c
24308 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24309 addr = start;
24310 len = (unsigned long) nr_pages << PAGE_SHIFT;
24311 end = start + len;
24312 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24313 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24314 (void __user *)start, len)))
24315 return 0;
24316
24317 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24318 index 6f31ee5..8ee4164 100644
24319 --- a/arch/x86/mm/highmem_32.c
24320 +++ b/arch/x86/mm/highmem_32.c
24321 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24322 idx = type + KM_TYPE_NR*smp_processor_id();
24323 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24324 BUG_ON(!pte_none(*(kmap_pte-idx)));
24325 +
24326 + pax_open_kernel();
24327 set_pte(kmap_pte-idx, mk_pte(page, prot));
24328 + pax_close_kernel();
24329 +
24330 arch_flush_lazy_mmu_mode();
24331
24332 return (void *)vaddr;
24333 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24334 index b91e485..d00e7c9 100644
24335 --- a/arch/x86/mm/hugetlbpage.c
24336 +++ b/arch/x86/mm/hugetlbpage.c
24337 @@ -277,13 +277,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24338 struct hstate *h = hstate_file(file);
24339 struct mm_struct *mm = current->mm;
24340 struct vm_area_struct *vma;
24341 - unsigned long start_addr;
24342 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24343 +
24344 +#ifdef CONFIG_PAX_SEGMEXEC
24345 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24346 + pax_task_size = SEGMEXEC_TASK_SIZE;
24347 +#endif
24348 +
24349 + pax_task_size -= PAGE_SIZE;
24350
24351 if (len > mm->cached_hole_size) {
24352 - start_addr = mm->free_area_cache;
24353 + start_addr = mm->free_area_cache;
24354 } else {
24355 - start_addr = TASK_UNMAPPED_BASE;
24356 - mm->cached_hole_size = 0;
24357 + start_addr = mm->mmap_base;
24358 + mm->cached_hole_size = 0;
24359 }
24360
24361 full_search:
24362 @@ -291,26 +298,27 @@ full_search:
24363
24364 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24365 /* At this point: (!vma || addr < vma->vm_end). */
24366 - if (TASK_SIZE - len < addr) {
24367 + if (pax_task_size - len < addr) {
24368 /*
24369 * Start a new search - just in case we missed
24370 * some holes.
24371 */
24372 - if (start_addr != TASK_UNMAPPED_BASE) {
24373 - start_addr = TASK_UNMAPPED_BASE;
24374 + if (start_addr != mm->mmap_base) {
24375 + start_addr = mm->mmap_base;
24376 mm->cached_hole_size = 0;
24377 goto full_search;
24378 }
24379 return -ENOMEM;
24380 }
24381 - if (!vma || addr + len <= vma->vm_start) {
24382 - mm->free_area_cache = addr + len;
24383 - return addr;
24384 - }
24385 + if (check_heap_stack_gap(vma, addr, len))
24386 + break;
24387 if (addr + mm->cached_hole_size < vma->vm_start)
24388 mm->cached_hole_size = vma->vm_start - addr;
24389 addr = ALIGN(vma->vm_end, huge_page_size(h));
24390 }
24391 +
24392 + mm->free_area_cache = addr + len;
24393 + return addr;
24394 }
24395
24396 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24397 @@ -321,9 +329,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24398 struct mm_struct *mm = current->mm;
24399 struct vm_area_struct *vma;
24400 unsigned long base = mm->mmap_base;
24401 - unsigned long addr = addr0;
24402 + unsigned long addr;
24403 unsigned long largest_hole = mm->cached_hole_size;
24404 - unsigned long start_addr;
24405
24406 /* don't allow allocations above current base */
24407 if (mm->free_area_cache > base)
24408 @@ -333,16 +340,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24409 largest_hole = 0;
24410 mm->free_area_cache = base;
24411 }
24412 -try_again:
24413 - start_addr = mm->free_area_cache;
24414
24415 /* make sure it can fit in the remaining address space */
24416 if (mm->free_area_cache < len)
24417 goto fail;
24418
24419 /* either no address requested or can't fit in requested address hole */
24420 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24421 + addr = mm->free_area_cache - len;
24422 do {
24423 + addr &= huge_page_mask(h);
24424 /*
24425 * Lookup failure means no vma is above this address,
24426 * i.e. return with success:
24427 @@ -351,10 +357,10 @@ try_again:
24428 if (!vma)
24429 return addr;
24430
24431 - if (addr + len <= vma->vm_start) {
24432 + if (check_heap_stack_gap(vma, addr, len)) {
24433 /* remember the address as a hint for next time */
24434 - mm->cached_hole_size = largest_hole;
24435 - return (mm->free_area_cache = addr);
24436 + mm->cached_hole_size = largest_hole;
24437 + return (mm->free_area_cache = addr);
24438 } else if (mm->free_area_cache == vma->vm_end) {
24439 /* pull free_area_cache down to the first hole */
24440 mm->free_area_cache = vma->vm_start;
24441 @@ -363,29 +369,34 @@ try_again:
24442
24443 /* remember the largest hole we saw so far */
24444 if (addr + largest_hole < vma->vm_start)
24445 - largest_hole = vma->vm_start - addr;
24446 + largest_hole = vma->vm_start - addr;
24447
24448 /* try just below the current vma->vm_start */
24449 - addr = (vma->vm_start - len) & huge_page_mask(h);
24450 - } while (len <= vma->vm_start);
24451 + addr = skip_heap_stack_gap(vma, len);
24452 + } while (!IS_ERR_VALUE(addr));
24453
24454 fail:
24455 /*
24456 - * if hint left us with no space for the requested
24457 - * mapping then try again:
24458 - */
24459 - if (start_addr != base) {
24460 - mm->free_area_cache = base;
24461 - largest_hole = 0;
24462 - goto try_again;
24463 - }
24464 - /*
24465 * A failed mmap() very likely causes application failure,
24466 * so fall back to the bottom-up function here. This scenario
24467 * can happen with large stack limits and large mmap()
24468 * allocations.
24469 */
24470 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24471 +
24472 +#ifdef CONFIG_PAX_SEGMEXEC
24473 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24474 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24475 + else
24476 +#endif
24477 +
24478 + mm->mmap_base = TASK_UNMAPPED_BASE;
24479 +
24480 +#ifdef CONFIG_PAX_RANDMMAP
24481 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24482 + mm->mmap_base += mm->delta_mmap;
24483 +#endif
24484 +
24485 + mm->free_area_cache = mm->mmap_base;
24486 mm->cached_hole_size = ~0UL;
24487 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24488 len, pgoff, flags);
24489 @@ -393,6 +404,7 @@ fail:
24490 /*
24491 * Restore the topdown base:
24492 */
24493 + mm->mmap_base = base;
24494 mm->free_area_cache = base;
24495 mm->cached_hole_size = ~0UL;
24496
24497 @@ -406,10 +418,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24498 struct hstate *h = hstate_file(file);
24499 struct mm_struct *mm = current->mm;
24500 struct vm_area_struct *vma;
24501 + unsigned long pax_task_size = TASK_SIZE;
24502
24503 if (len & ~huge_page_mask(h))
24504 return -EINVAL;
24505 - if (len > TASK_SIZE)
24506 +
24507 +#ifdef CONFIG_PAX_SEGMEXEC
24508 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24509 + pax_task_size = SEGMEXEC_TASK_SIZE;
24510 +#endif
24511 +
24512 + pax_task_size -= PAGE_SIZE;
24513 +
24514 + if (len > pax_task_size)
24515 return -ENOMEM;
24516
24517 if (flags & MAP_FIXED) {
24518 @@ -421,8 +442,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24519 if (addr) {
24520 addr = ALIGN(addr, huge_page_size(h));
24521 vma = find_vma(mm, addr);
24522 - if (TASK_SIZE - len >= addr &&
24523 - (!vma || addr + len <= vma->vm_start))
24524 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24525 return addr;
24526 }
24527 if (mm->get_unmapped_area == arch_get_unmapped_area)
24528 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24529 index bc4e9d8..ca4c14b 100644
24530 --- a/arch/x86/mm/init.c
24531 +++ b/arch/x86/mm/init.c
24532 @@ -16,6 +16,8 @@
24533 #include <asm/tlb.h>
24534 #include <asm/proto.h>
24535 #include <asm/dma.h> /* for MAX_DMA_PFN */
24536 +#include <asm/desc.h>
24537 +#include <asm/bios_ebda.h>
24538
24539 unsigned long __initdata pgt_buf_start;
24540 unsigned long __meminitdata pgt_buf_end;
24541 @@ -38,7 +40,7 @@ struct map_range {
24542 static void __init find_early_table_space(struct map_range *mr, unsigned long end,
24543 int use_pse, int use_gbpages)
24544 {
24545 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24546 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24547 phys_addr_t base;
24548
24549 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24550 @@ -317,10 +319,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24551 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24552 * mmio resources as well as potential bios/acpi data regions.
24553 */
24554 +
24555 +#ifdef CONFIG_GRKERNSEC_KMEM
24556 +static unsigned int ebda_start __read_only;
24557 +static unsigned int ebda_end __read_only;
24558 +#endif
24559 +
24560 int devmem_is_allowed(unsigned long pagenr)
24561 {
24562 +#ifdef CONFIG_GRKERNSEC_KMEM
24563 + /* allow BDA */
24564 + if (!pagenr)
24565 + return 1;
24566 + /* allow EBDA */
24567 + if (pagenr >= ebda_start && pagenr < ebda_end)
24568 + return 1;
24569 +#else
24570 + if (!pagenr)
24571 + return 1;
24572 +#ifdef CONFIG_VM86
24573 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24574 + return 1;
24575 +#endif
24576 +#endif
24577 +
24578 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24579 + return 1;
24580 +#ifdef CONFIG_GRKERNSEC_KMEM
24581 + /* throw out everything else below 1MB */
24582 if (pagenr <= 256)
24583 - return 1;
24584 + return 0;
24585 +#endif
24586 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24587 return 0;
24588 if (!page_is_ram(pagenr))
24589 @@ -377,8 +406,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24590 #endif
24591 }
24592
24593 +#ifdef CONFIG_GRKERNSEC_KMEM
24594 +static inline void gr_init_ebda(void)
24595 +{
24596 + unsigned int ebda_addr;
24597 + unsigned int ebda_size = 0;
24598 +
24599 + ebda_addr = get_bios_ebda();
24600 + if (ebda_addr) {
24601 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24602 + ebda_size <<= 10;
24603 + }
24604 + if (ebda_addr && ebda_size) {
24605 + ebda_start = ebda_addr >> PAGE_SHIFT;
24606 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24607 + } else {
24608 + ebda_start = 0x9f000 >> PAGE_SHIFT;
24609 + ebda_end = 0xa0000 >> PAGE_SHIFT;
24610 + }
24611 +}
24612 +#else
24613 +static inline void gr_init_ebda(void) { }
24614 +#endif
24615 +
24616 void free_initmem(void)
24617 {
24618 +#ifdef CONFIG_PAX_KERNEXEC
24619 +#ifdef CONFIG_X86_32
24620 + /* PaX: limit KERNEL_CS to actual size */
24621 + unsigned long addr, limit;
24622 + struct desc_struct d;
24623 + int cpu;
24624 +#else
24625 + pgd_t *pgd;
24626 + pud_t *pud;
24627 + pmd_t *pmd;
24628 + unsigned long addr, end;
24629 +#endif
24630 +#endif
24631 +
24632 + gr_init_ebda();
24633 +
24634 +#ifdef CONFIG_PAX_KERNEXEC
24635 +#ifdef CONFIG_X86_32
24636 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24637 + limit = (limit - 1UL) >> PAGE_SHIFT;
24638 +
24639 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24640 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24641 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24642 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24643 + }
24644 +
24645 + /* PaX: make KERNEL_CS read-only */
24646 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24647 + if (!paravirt_enabled())
24648 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24649 +/*
24650 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24651 + pgd = pgd_offset_k(addr);
24652 + pud = pud_offset(pgd, addr);
24653 + pmd = pmd_offset(pud, addr);
24654 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24655 + }
24656 +*/
24657 +#ifdef CONFIG_X86_PAE
24658 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24659 +/*
24660 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24661 + pgd = pgd_offset_k(addr);
24662 + pud = pud_offset(pgd, addr);
24663 + pmd = pmd_offset(pud, addr);
24664 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24665 + }
24666 +*/
24667 +#endif
24668 +
24669 +#ifdef CONFIG_MODULES
24670 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24671 +#endif
24672 +
24673 +#else
24674 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24675 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24676 + pgd = pgd_offset_k(addr);
24677 + pud = pud_offset(pgd, addr);
24678 + pmd = pmd_offset(pud, addr);
24679 + if (!pmd_present(*pmd))
24680 + continue;
24681 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24682 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24683 + else
24684 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24685 + }
24686 +
24687 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24688 + end = addr + KERNEL_IMAGE_SIZE;
24689 + for (; addr < end; addr += PMD_SIZE) {
24690 + pgd = pgd_offset_k(addr);
24691 + pud = pud_offset(pgd, addr);
24692 + pmd = pmd_offset(pud, addr);
24693 + if (!pmd_present(*pmd))
24694 + continue;
24695 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24696 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24697 + }
24698 +#endif
24699 +
24700 + flush_tlb_all();
24701 +#endif
24702 +
24703 free_init_pages("unused kernel memory",
24704 (unsigned long)(&__init_begin),
24705 (unsigned long)(&__init_end));
24706 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24707 index 575d86f..4987469 100644
24708 --- a/arch/x86/mm/init_32.c
24709 +++ b/arch/x86/mm/init_32.c
24710 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24711 }
24712
24713 /*
24714 - * Creates a middle page table and puts a pointer to it in the
24715 - * given global directory entry. This only returns the gd entry
24716 - * in non-PAE compilation mode, since the middle layer is folded.
24717 - */
24718 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24719 -{
24720 - pud_t *pud;
24721 - pmd_t *pmd_table;
24722 -
24723 -#ifdef CONFIG_X86_PAE
24724 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24725 - if (after_bootmem)
24726 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24727 - else
24728 - pmd_table = (pmd_t *)alloc_low_page();
24729 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24730 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24731 - pud = pud_offset(pgd, 0);
24732 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24733 -
24734 - return pmd_table;
24735 - }
24736 -#endif
24737 - pud = pud_offset(pgd, 0);
24738 - pmd_table = pmd_offset(pud, 0);
24739 -
24740 - return pmd_table;
24741 -}
24742 -
24743 -/*
24744 * Create a page table and place a pointer to it in a middle page
24745 * directory entry:
24746 */
24747 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24748 page_table = (pte_t *)alloc_low_page();
24749
24750 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24751 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24752 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24753 +#else
24754 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24755 +#endif
24756 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24757 }
24758
24759 return pte_offset_kernel(pmd, 0);
24760 }
24761
24762 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24763 +{
24764 + pud_t *pud;
24765 + pmd_t *pmd_table;
24766 +
24767 + pud = pud_offset(pgd, 0);
24768 + pmd_table = pmd_offset(pud, 0);
24769 +
24770 + return pmd_table;
24771 +}
24772 +
24773 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24774 {
24775 int pgd_idx = pgd_index(vaddr);
24776 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24777 int pgd_idx, pmd_idx;
24778 unsigned long vaddr;
24779 pgd_t *pgd;
24780 + pud_t *pud;
24781 pmd_t *pmd;
24782 pte_t *pte = NULL;
24783
24784 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24785 pgd = pgd_base + pgd_idx;
24786
24787 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24788 - pmd = one_md_table_init(pgd);
24789 - pmd = pmd + pmd_index(vaddr);
24790 + pud = pud_offset(pgd, vaddr);
24791 + pmd = pmd_offset(pud, vaddr);
24792 +
24793 +#ifdef CONFIG_X86_PAE
24794 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24795 +#endif
24796 +
24797 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24798 pmd++, pmd_idx++) {
24799 pte = page_table_kmap_check(one_page_table_init(pmd),
24800 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24801 }
24802 }
24803
24804 -static inline int is_kernel_text(unsigned long addr)
24805 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24806 {
24807 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24808 - return 1;
24809 - return 0;
24810 + if ((start > ktla_ktva((unsigned long)_etext) ||
24811 + end <= ktla_ktva((unsigned long)_stext)) &&
24812 + (start > ktla_ktva((unsigned long)_einittext) ||
24813 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24814 +
24815 +#ifdef CONFIG_ACPI_SLEEP
24816 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24817 +#endif
24818 +
24819 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24820 + return 0;
24821 + return 1;
24822 }
24823
24824 /*
24825 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24826 unsigned long last_map_addr = end;
24827 unsigned long start_pfn, end_pfn;
24828 pgd_t *pgd_base = swapper_pg_dir;
24829 - int pgd_idx, pmd_idx, pte_ofs;
24830 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24831 unsigned long pfn;
24832 pgd_t *pgd;
24833 + pud_t *pud;
24834 pmd_t *pmd;
24835 pte_t *pte;
24836 unsigned pages_2m, pages_4k;
24837 @@ -280,8 +281,13 @@ repeat:
24838 pfn = start_pfn;
24839 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24840 pgd = pgd_base + pgd_idx;
24841 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24842 - pmd = one_md_table_init(pgd);
24843 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24844 + pud = pud_offset(pgd, 0);
24845 + pmd = pmd_offset(pud, 0);
24846 +
24847 +#ifdef CONFIG_X86_PAE
24848 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24849 +#endif
24850
24851 if (pfn >= end_pfn)
24852 continue;
24853 @@ -293,14 +299,13 @@ repeat:
24854 #endif
24855 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24856 pmd++, pmd_idx++) {
24857 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24858 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24859
24860 /*
24861 * Map with big pages if possible, otherwise
24862 * create normal page tables:
24863 */
24864 if (use_pse) {
24865 - unsigned int addr2;
24866 pgprot_t prot = PAGE_KERNEL_LARGE;
24867 /*
24868 * first pass will use the same initial
24869 @@ -310,11 +315,7 @@ repeat:
24870 __pgprot(PTE_IDENT_ATTR |
24871 _PAGE_PSE);
24872
24873 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24874 - PAGE_OFFSET + PAGE_SIZE-1;
24875 -
24876 - if (is_kernel_text(addr) ||
24877 - is_kernel_text(addr2))
24878 + if (is_kernel_text(address, address + PMD_SIZE))
24879 prot = PAGE_KERNEL_LARGE_EXEC;
24880
24881 pages_2m++;
24882 @@ -331,7 +332,7 @@ repeat:
24883 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24884 pte += pte_ofs;
24885 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24886 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24887 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24888 pgprot_t prot = PAGE_KERNEL;
24889 /*
24890 * first pass will use the same initial
24891 @@ -339,7 +340,7 @@ repeat:
24892 */
24893 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24894
24895 - if (is_kernel_text(addr))
24896 + if (is_kernel_text(address, address + PAGE_SIZE))
24897 prot = PAGE_KERNEL_EXEC;
24898
24899 pages_4k++;
24900 @@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24901
24902 pud = pud_offset(pgd, va);
24903 pmd = pmd_offset(pud, va);
24904 - if (!pmd_present(*pmd))
24905 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24906 break;
24907
24908 pte = pte_offset_kernel(pmd, va);
24909 @@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24910
24911 static void __init pagetable_init(void)
24912 {
24913 - pgd_t *pgd_base = swapper_pg_dir;
24914 -
24915 - permanent_kmaps_init(pgd_base);
24916 + permanent_kmaps_init(swapper_pg_dir);
24917 }
24918
24919 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24920 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24921 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24922
24923 /* user-defined highmem size */
24924 @@ -734,6 +733,12 @@ void __init mem_init(void)
24925
24926 pci_iommu_alloc();
24927
24928 +#ifdef CONFIG_PAX_PER_CPU_PGD
24929 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24930 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24931 + KERNEL_PGD_PTRS);
24932 +#endif
24933 +
24934 #ifdef CONFIG_FLATMEM
24935 BUG_ON(!mem_map);
24936 #endif
24937 @@ -760,7 +765,7 @@ void __init mem_init(void)
24938 reservedpages++;
24939
24940 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24941 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24942 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24943 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24944
24945 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24946 @@ -801,10 +806,10 @@ void __init mem_init(void)
24947 ((unsigned long)&__init_end -
24948 (unsigned long)&__init_begin) >> 10,
24949
24950 - (unsigned long)&_etext, (unsigned long)&_edata,
24951 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24952 + (unsigned long)&_sdata, (unsigned long)&_edata,
24953 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24954
24955 - (unsigned long)&_text, (unsigned long)&_etext,
24956 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24957 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24958
24959 /*
24960 @@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24961 if (!kernel_set_to_readonly)
24962 return;
24963
24964 + start = ktla_ktva(start);
24965 pr_debug("Set kernel text: %lx - %lx for read write\n",
24966 start, start+size);
24967
24968 @@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
24969 if (!kernel_set_to_readonly)
24970 return;
24971
24972 + start = ktla_ktva(start);
24973 pr_debug("Set kernel text: %lx - %lx for read only\n",
24974 start, start+size);
24975
24976 @@ -924,6 +931,7 @@ void mark_rodata_ro(void)
24977 unsigned long start = PFN_ALIGN(_text);
24978 unsigned long size = PFN_ALIGN(_etext) - start;
24979
24980 + start = ktla_ktva(start);
24981 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24982 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24983 size >> 10);
24984 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24985 index 2b6b4a3..c17210d 100644
24986 --- a/arch/x86/mm/init_64.c
24987 +++ b/arch/x86/mm/init_64.c
24988 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24989 * around without checking the pgd every time.
24990 */
24991
24992 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24993 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24994 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24995
24996 int force_personality32;
24997 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24998
24999 for (address = start; address <= end; address += PGDIR_SIZE) {
25000 const pgd_t *pgd_ref = pgd_offset_k(address);
25001 +
25002 +#ifdef CONFIG_PAX_PER_CPU_PGD
25003 + unsigned long cpu;
25004 +#else
25005 struct page *page;
25006 +#endif
25007
25008 if (pgd_none(*pgd_ref))
25009 continue;
25010
25011 spin_lock(&pgd_lock);
25012 +
25013 +#ifdef CONFIG_PAX_PER_CPU_PGD
25014 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25015 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
25016 +#else
25017 list_for_each_entry(page, &pgd_list, lru) {
25018 pgd_t *pgd;
25019 spinlock_t *pgt_lock;
25020 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25021 /* the pgt_lock only for Xen */
25022 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25023 spin_lock(pgt_lock);
25024 +#endif
25025
25026 if (pgd_none(*pgd))
25027 set_pgd(pgd, *pgd_ref);
25028 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25029 BUG_ON(pgd_page_vaddr(*pgd)
25030 != pgd_page_vaddr(*pgd_ref));
25031
25032 +#ifndef CONFIG_PAX_PER_CPU_PGD
25033 spin_unlock(pgt_lock);
25034 +#endif
25035 +
25036 }
25037 spin_unlock(&pgd_lock);
25038 }
25039 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25040 {
25041 if (pgd_none(*pgd)) {
25042 pud_t *pud = (pud_t *)spp_getpage();
25043 - pgd_populate(&init_mm, pgd, pud);
25044 + pgd_populate_kernel(&init_mm, pgd, pud);
25045 if (pud != pud_offset(pgd, 0))
25046 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25047 pud, pud_offset(pgd, 0));
25048 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25049 {
25050 if (pud_none(*pud)) {
25051 pmd_t *pmd = (pmd_t *) spp_getpage();
25052 - pud_populate(&init_mm, pud, pmd);
25053 + pud_populate_kernel(&init_mm, pud, pmd);
25054 if (pmd != pmd_offset(pud, 0))
25055 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25056 pmd, pmd_offset(pud, 0));
25057 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25058 pmd = fill_pmd(pud, vaddr);
25059 pte = fill_pte(pmd, vaddr);
25060
25061 + pax_open_kernel();
25062 set_pte(pte, new_pte);
25063 + pax_close_kernel();
25064
25065 /*
25066 * It's enough to flush this one mapping.
25067 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25068 pgd = pgd_offset_k((unsigned long)__va(phys));
25069 if (pgd_none(*pgd)) {
25070 pud = (pud_t *) spp_getpage();
25071 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25072 - _PAGE_USER));
25073 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25074 }
25075 pud = pud_offset(pgd, (unsigned long)__va(phys));
25076 if (pud_none(*pud)) {
25077 pmd = (pmd_t *) spp_getpage();
25078 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25079 - _PAGE_USER));
25080 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25081 }
25082 pmd = pmd_offset(pud, phys);
25083 BUG_ON(!pmd_none(*pmd));
25084 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25085 if (pfn >= pgt_buf_top)
25086 panic("alloc_low_page: ran out of memory");
25087
25088 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25089 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25090 clear_page(adr);
25091 *phys = pfn * PAGE_SIZE;
25092 return adr;
25093 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25094
25095 phys = __pa(virt);
25096 left = phys & (PAGE_SIZE - 1);
25097 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25098 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25099 adr = (void *)(((unsigned long)adr) | left);
25100
25101 return adr;
25102 @@ -548,7 +562,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25103 unmap_low_page(pmd);
25104
25105 spin_lock(&init_mm.page_table_lock);
25106 - pud_populate(&init_mm, pud, __va(pmd_phys));
25107 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25108 spin_unlock(&init_mm.page_table_lock);
25109 }
25110 __flush_tlb_all();
25111 @@ -594,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start,
25112 unmap_low_page(pud);
25113
25114 spin_lock(&init_mm.page_table_lock);
25115 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25116 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25117 spin_unlock(&init_mm.page_table_lock);
25118 pgd_changed = true;
25119 }
25120 @@ -686,6 +700,12 @@ void __init mem_init(void)
25121
25122 pci_iommu_alloc();
25123
25124 +#ifdef CONFIG_PAX_PER_CPU_PGD
25125 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25126 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25127 + KERNEL_PGD_PTRS);
25128 +#endif
25129 +
25130 /* clear_bss() already clear the empty_zero_page */
25131
25132 reservedpages = 0;
25133 @@ -846,8 +866,8 @@ int kern_addr_valid(unsigned long addr)
25134 static struct vm_area_struct gate_vma = {
25135 .vm_start = VSYSCALL_START,
25136 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25137 - .vm_page_prot = PAGE_READONLY_EXEC,
25138 - .vm_flags = VM_READ | VM_EXEC
25139 + .vm_page_prot = PAGE_READONLY,
25140 + .vm_flags = VM_READ
25141 };
25142
25143 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25144 @@ -881,7 +901,7 @@ int in_gate_area_no_mm(unsigned long addr)
25145
25146 const char *arch_vma_name(struct vm_area_struct *vma)
25147 {
25148 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25149 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25150 return "[vdso]";
25151 if (vma == &gate_vma)
25152 return "[vsyscall]";
25153 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25154 index 7b179b4..6bd1777 100644
25155 --- a/arch/x86/mm/iomap_32.c
25156 +++ b/arch/x86/mm/iomap_32.c
25157 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25158 type = kmap_atomic_idx_push();
25159 idx = type + KM_TYPE_NR * smp_processor_id();
25160 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25161 +
25162 + pax_open_kernel();
25163 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25164 + pax_close_kernel();
25165 +
25166 arch_flush_lazy_mmu_mode();
25167
25168 return (void *)vaddr;
25169 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25170 index 78fe3f1..8293b6f 100644
25171 --- a/arch/x86/mm/ioremap.c
25172 +++ b/arch/x86/mm/ioremap.c
25173 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25174 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25175 int is_ram = page_is_ram(pfn);
25176
25177 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25178 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25179 return NULL;
25180 WARN_ON_ONCE(is_ram);
25181 }
25182 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25183
25184 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25185 if (page_is_ram(start >> PAGE_SHIFT))
25186 +#ifdef CONFIG_HIGHMEM
25187 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25188 +#endif
25189 return __va(phys);
25190
25191 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25192 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25193 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25194
25195 static __initdata int after_paging_init;
25196 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25197 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25198
25199 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25200 {
25201 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25202 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25203
25204 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25205 - memset(bm_pte, 0, sizeof(bm_pte));
25206 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25207 + pmd_populate_user(&init_mm, pmd, bm_pte);
25208
25209 /*
25210 * The boot-ioremap range spans multiple pmds, for which
25211 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25212 index d87dd6d..bf3fa66 100644
25213 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25214 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25215 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25216 * memory (e.g. tracked pages)? For now, we need this to avoid
25217 * invoking kmemcheck for PnP BIOS calls.
25218 */
25219 - if (regs->flags & X86_VM_MASK)
25220 + if (v8086_mode(regs))
25221 return false;
25222 - if (regs->cs != __KERNEL_CS)
25223 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25224 return false;
25225
25226 pte = kmemcheck_pte_lookup(address);
25227 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25228 index 845df68..1d8d29f 100644
25229 --- a/arch/x86/mm/mmap.c
25230 +++ b/arch/x86/mm/mmap.c
25231 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25232 * Leave an at least ~128 MB hole with possible stack randomization.
25233 */
25234 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25235 -#define MAX_GAP (TASK_SIZE/6*5)
25236 +#define MAX_GAP (pax_task_size/6*5)
25237
25238 static int mmap_is_legacy(void)
25239 {
25240 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25241 return rnd << PAGE_SHIFT;
25242 }
25243
25244 -static unsigned long mmap_base(void)
25245 +static unsigned long mmap_base(struct mm_struct *mm)
25246 {
25247 unsigned long gap = rlimit(RLIMIT_STACK);
25248 + unsigned long pax_task_size = TASK_SIZE;
25249 +
25250 +#ifdef CONFIG_PAX_SEGMEXEC
25251 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25252 + pax_task_size = SEGMEXEC_TASK_SIZE;
25253 +#endif
25254
25255 if (gap < MIN_GAP)
25256 gap = MIN_GAP;
25257 else if (gap > MAX_GAP)
25258 gap = MAX_GAP;
25259
25260 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25261 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25262 }
25263
25264 /*
25265 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25266 * does, but not when emulating X86_32
25267 */
25268 -static unsigned long mmap_legacy_base(void)
25269 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25270 {
25271 - if (mmap_is_ia32())
25272 + if (mmap_is_ia32()) {
25273 +
25274 +#ifdef CONFIG_PAX_SEGMEXEC
25275 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25276 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25277 + else
25278 +#endif
25279 +
25280 return TASK_UNMAPPED_BASE;
25281 - else
25282 + } else
25283 return TASK_UNMAPPED_BASE + mmap_rnd();
25284 }
25285
25286 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25287 void arch_pick_mmap_layout(struct mm_struct *mm)
25288 {
25289 if (mmap_is_legacy()) {
25290 - mm->mmap_base = mmap_legacy_base();
25291 + mm->mmap_base = mmap_legacy_base(mm);
25292 +
25293 +#ifdef CONFIG_PAX_RANDMMAP
25294 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25295 + mm->mmap_base += mm->delta_mmap;
25296 +#endif
25297 +
25298 mm->get_unmapped_area = arch_get_unmapped_area;
25299 mm->unmap_area = arch_unmap_area;
25300 } else {
25301 - mm->mmap_base = mmap_base();
25302 + mm->mmap_base = mmap_base(mm);
25303 +
25304 +#ifdef CONFIG_PAX_RANDMMAP
25305 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25306 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25307 +#endif
25308 +
25309 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25310 mm->unmap_area = arch_unmap_area_topdown;
25311 }
25312 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25313 index dc0b727..dc9d71a 100644
25314 --- a/arch/x86/mm/mmio-mod.c
25315 +++ b/arch/x86/mm/mmio-mod.c
25316 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25317 break;
25318 default:
25319 {
25320 - unsigned char *ip = (unsigned char *)instptr;
25321 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25322 my_trace->opcode = MMIO_UNKNOWN_OP;
25323 my_trace->width = 0;
25324 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25325 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25326 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25327 void __iomem *addr)
25328 {
25329 - static atomic_t next_id;
25330 + static atomic_unchecked_t next_id;
25331 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25332 /* These are page-unaligned. */
25333 struct mmiotrace_map map = {
25334 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25335 .private = trace
25336 },
25337 .phys = offset,
25338 - .id = atomic_inc_return(&next_id)
25339 + .id = atomic_inc_return_unchecked(&next_id)
25340 };
25341 map.map_id = trace->id;
25342
25343 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25344 index b008656..773eac2 100644
25345 --- a/arch/x86/mm/pageattr-test.c
25346 +++ b/arch/x86/mm/pageattr-test.c
25347 @@ -36,7 +36,7 @@ enum {
25348
25349 static int pte_testbit(pte_t pte)
25350 {
25351 - return pte_flags(pte) & _PAGE_UNUSED1;
25352 + return pte_flags(pte) & _PAGE_CPA_TEST;
25353 }
25354
25355 struct split_state {
25356 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25357 index a718e0d..45efc32 100644
25358 --- a/arch/x86/mm/pageattr.c
25359 +++ b/arch/x86/mm/pageattr.c
25360 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25361 */
25362 #ifdef CONFIG_PCI_BIOS
25363 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25364 - pgprot_val(forbidden) |= _PAGE_NX;
25365 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25366 #endif
25367
25368 /*
25369 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25370 * Does not cover __inittext since that is gone later on. On
25371 * 64bit we do not enforce !NX on the low mapping
25372 */
25373 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25374 - pgprot_val(forbidden) |= _PAGE_NX;
25375 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25376 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25377
25378 +#ifdef CONFIG_DEBUG_RODATA
25379 /*
25380 * The .rodata section needs to be read-only. Using the pfn
25381 * catches all aliases.
25382 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25383 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25384 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25385 pgprot_val(forbidden) |= _PAGE_RW;
25386 +#endif
25387
25388 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25389 /*
25390 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25391 }
25392 #endif
25393
25394 +#ifdef CONFIG_PAX_KERNEXEC
25395 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25396 + pgprot_val(forbidden) |= _PAGE_RW;
25397 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25398 + }
25399 +#endif
25400 +
25401 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25402
25403 return prot;
25404 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25405 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25406 {
25407 /* change init_mm */
25408 + pax_open_kernel();
25409 set_pte_atomic(kpte, pte);
25410 +
25411 #ifdef CONFIG_X86_32
25412 if (!SHARED_KERNEL_PMD) {
25413 +
25414 +#ifdef CONFIG_PAX_PER_CPU_PGD
25415 + unsigned long cpu;
25416 +#else
25417 struct page *page;
25418 +#endif
25419
25420 +#ifdef CONFIG_PAX_PER_CPU_PGD
25421 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25422 + pgd_t *pgd = get_cpu_pgd(cpu);
25423 +#else
25424 list_for_each_entry(page, &pgd_list, lru) {
25425 - pgd_t *pgd;
25426 + pgd_t *pgd = (pgd_t *)page_address(page);
25427 +#endif
25428 +
25429 pud_t *pud;
25430 pmd_t *pmd;
25431
25432 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25433 + pgd += pgd_index(address);
25434 pud = pud_offset(pgd, address);
25435 pmd = pmd_offset(pud, address);
25436 set_pte_atomic((pte_t *)pmd, pte);
25437 }
25438 }
25439 #endif
25440 + pax_close_kernel();
25441 }
25442
25443 static int
25444 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25445 index 3d68ef6..7f69136 100644
25446 --- a/arch/x86/mm/pat.c
25447 +++ b/arch/x86/mm/pat.c
25448 @@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
25449
25450 if (!entry) {
25451 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
25452 - current->comm, current->pid, start, end - 1);
25453 + current->comm, task_pid_nr(current), start, end - 1);
25454 return -EINVAL;
25455 }
25456
25457 @@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25458
25459 while (cursor < to) {
25460 if (!devmem_is_allowed(pfn)) {
25461 - printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
25462 - current->comm, from, to - 1);
25463 + printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
25464 + current->comm, from, to - 1, cursor);
25465 return 0;
25466 }
25467 cursor += PAGE_SIZE;
25468 @@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25469 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
25470 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
25471 "for [mem %#010Lx-%#010Lx]\n",
25472 - current->comm, current->pid,
25473 + current->comm, task_pid_nr(current),
25474 cattr_name(flags),
25475 base, (unsigned long long)(base + size-1));
25476 return -EINVAL;
25477 @@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25478 flags = lookup_memtype(paddr);
25479 if (want_flags != flags) {
25480 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
25481 - current->comm, current->pid,
25482 + current->comm, task_pid_nr(current),
25483 cattr_name(want_flags),
25484 (unsigned long long)paddr,
25485 (unsigned long long)(paddr + size - 1),
25486 @@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25487 free_memtype(paddr, paddr + size);
25488 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25489 " for [mem %#010Lx-%#010Lx], got %s\n",
25490 - current->comm, current->pid,
25491 + current->comm, task_pid_nr(current),
25492 cattr_name(want_flags),
25493 (unsigned long long)paddr,
25494 (unsigned long long)(paddr + size - 1),
25495 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25496 index 9f0614d..92ae64a 100644
25497 --- a/arch/x86/mm/pf_in.c
25498 +++ b/arch/x86/mm/pf_in.c
25499 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25500 int i;
25501 enum reason_type rv = OTHERS;
25502
25503 - p = (unsigned char *)ins_addr;
25504 + p = (unsigned char *)ktla_ktva(ins_addr);
25505 p += skip_prefix(p, &prf);
25506 p += get_opcode(p, &opcode);
25507
25508 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25509 struct prefix_bits prf;
25510 int i;
25511
25512 - p = (unsigned char *)ins_addr;
25513 + p = (unsigned char *)ktla_ktva(ins_addr);
25514 p += skip_prefix(p, &prf);
25515 p += get_opcode(p, &opcode);
25516
25517 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25518 struct prefix_bits prf;
25519 int i;
25520
25521 - p = (unsigned char *)ins_addr;
25522 + p = (unsigned char *)ktla_ktva(ins_addr);
25523 p += skip_prefix(p, &prf);
25524 p += get_opcode(p, &opcode);
25525
25526 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25527 struct prefix_bits prf;
25528 int i;
25529
25530 - p = (unsigned char *)ins_addr;
25531 + p = (unsigned char *)ktla_ktva(ins_addr);
25532 p += skip_prefix(p, &prf);
25533 p += get_opcode(p, &opcode);
25534 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25535 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25536 struct prefix_bits prf;
25537 int i;
25538
25539 - p = (unsigned char *)ins_addr;
25540 + p = (unsigned char *)ktla_ktva(ins_addr);
25541 p += skip_prefix(p, &prf);
25542 p += get_opcode(p, &opcode);
25543 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25544 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25545 index 8573b83..4f3ed7e 100644
25546 --- a/arch/x86/mm/pgtable.c
25547 +++ b/arch/x86/mm/pgtable.c
25548 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25549 list_del(&page->lru);
25550 }
25551
25552 -#define UNSHARED_PTRS_PER_PGD \
25553 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25554 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25555 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25556
25557 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25558 +{
25559 + unsigned int count = USER_PGD_PTRS;
25560
25561 + while (count--)
25562 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25563 +}
25564 +#endif
25565 +
25566 +#ifdef CONFIG_PAX_PER_CPU_PGD
25567 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25568 +{
25569 + unsigned int count = USER_PGD_PTRS;
25570 +
25571 + while (count--) {
25572 + pgd_t pgd;
25573 +
25574 +#ifdef CONFIG_X86_64
25575 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25576 +#else
25577 + pgd = *src++;
25578 +#endif
25579 +
25580 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25581 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25582 +#endif
25583 +
25584 + *dst++ = pgd;
25585 + }
25586 +
25587 +}
25588 +#endif
25589 +
25590 +#ifdef CONFIG_X86_64
25591 +#define pxd_t pud_t
25592 +#define pyd_t pgd_t
25593 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25594 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25595 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25596 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25597 +#define PYD_SIZE PGDIR_SIZE
25598 +#else
25599 +#define pxd_t pmd_t
25600 +#define pyd_t pud_t
25601 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25602 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25603 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25604 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25605 +#define PYD_SIZE PUD_SIZE
25606 +#endif
25607 +
25608 +#ifdef CONFIG_PAX_PER_CPU_PGD
25609 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25610 +static inline void pgd_dtor(pgd_t *pgd) {}
25611 +#else
25612 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25613 {
25614 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25615 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25616 pgd_list_del(pgd);
25617 spin_unlock(&pgd_lock);
25618 }
25619 +#endif
25620
25621 /*
25622 * List of all pgd's needed for non-PAE so it can invalidate entries
25623 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25624 * -- wli
25625 */
25626
25627 -#ifdef CONFIG_X86_PAE
25628 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25629 /*
25630 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25631 * updating the top-level pagetable entries to guarantee the
25632 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25633 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25634 * and initialize the kernel pmds here.
25635 */
25636 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25637 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25638
25639 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25640 {
25641 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25642 */
25643 flush_tlb_mm(mm);
25644 }
25645 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25646 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25647 #else /* !CONFIG_X86_PAE */
25648
25649 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25650 -#define PREALLOCATED_PMDS 0
25651 +#define PREALLOCATED_PXDS 0
25652
25653 #endif /* CONFIG_X86_PAE */
25654
25655 -static void free_pmds(pmd_t *pmds[])
25656 +static void free_pxds(pxd_t *pxds[])
25657 {
25658 int i;
25659
25660 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25661 - if (pmds[i])
25662 - free_page((unsigned long)pmds[i]);
25663 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25664 + if (pxds[i])
25665 + free_page((unsigned long)pxds[i]);
25666 }
25667
25668 -static int preallocate_pmds(pmd_t *pmds[])
25669 +static int preallocate_pxds(pxd_t *pxds[])
25670 {
25671 int i;
25672 bool failed = false;
25673
25674 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25675 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25676 - if (pmd == NULL)
25677 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25678 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25679 + if (pxd == NULL)
25680 failed = true;
25681 - pmds[i] = pmd;
25682 + pxds[i] = pxd;
25683 }
25684
25685 if (failed) {
25686 - free_pmds(pmds);
25687 + free_pxds(pxds);
25688 return -ENOMEM;
25689 }
25690
25691 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25692 * preallocate which never got a corresponding vma will need to be
25693 * freed manually.
25694 */
25695 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25696 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25697 {
25698 int i;
25699
25700 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25701 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25702 pgd_t pgd = pgdp[i];
25703
25704 if (pgd_val(pgd) != 0) {
25705 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25706 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25707
25708 - pgdp[i] = native_make_pgd(0);
25709 + set_pgd(pgdp + i, native_make_pgd(0));
25710
25711 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25712 - pmd_free(mm, pmd);
25713 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25714 + pxd_free(mm, pxd);
25715 }
25716 }
25717 }
25718
25719 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25720 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25721 {
25722 - pud_t *pud;
25723 + pyd_t *pyd;
25724 unsigned long addr;
25725 int i;
25726
25727 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25728 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25729 return;
25730
25731 - pud = pud_offset(pgd, 0);
25732 +#ifdef CONFIG_X86_64
25733 + pyd = pyd_offset(mm, 0L);
25734 +#else
25735 + pyd = pyd_offset(pgd, 0L);
25736 +#endif
25737
25738 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25739 - i++, pud++, addr += PUD_SIZE) {
25740 - pmd_t *pmd = pmds[i];
25741 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25742 + i++, pyd++, addr += PYD_SIZE) {
25743 + pxd_t *pxd = pxds[i];
25744
25745 if (i >= KERNEL_PGD_BOUNDARY)
25746 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25747 - sizeof(pmd_t) * PTRS_PER_PMD);
25748 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25749 + sizeof(pxd_t) * PTRS_PER_PMD);
25750
25751 - pud_populate(mm, pud, pmd);
25752 + pyd_populate(mm, pyd, pxd);
25753 }
25754 }
25755
25756 pgd_t *pgd_alloc(struct mm_struct *mm)
25757 {
25758 pgd_t *pgd;
25759 - pmd_t *pmds[PREALLOCATED_PMDS];
25760 + pxd_t *pxds[PREALLOCATED_PXDS];
25761
25762 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25763
25764 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25765
25766 mm->pgd = pgd;
25767
25768 - if (preallocate_pmds(pmds) != 0)
25769 + if (preallocate_pxds(pxds) != 0)
25770 goto out_free_pgd;
25771
25772 if (paravirt_pgd_alloc(mm) != 0)
25773 - goto out_free_pmds;
25774 + goto out_free_pxds;
25775
25776 /*
25777 * Make sure that pre-populating the pmds is atomic with
25778 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25779 spin_lock(&pgd_lock);
25780
25781 pgd_ctor(mm, pgd);
25782 - pgd_prepopulate_pmd(mm, pgd, pmds);
25783 + pgd_prepopulate_pxd(mm, pgd, pxds);
25784
25785 spin_unlock(&pgd_lock);
25786
25787 return pgd;
25788
25789 -out_free_pmds:
25790 - free_pmds(pmds);
25791 +out_free_pxds:
25792 + free_pxds(pxds);
25793 out_free_pgd:
25794 free_page((unsigned long)pgd);
25795 out:
25796 @@ -295,7 +356,7 @@ out:
25797
25798 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25799 {
25800 - pgd_mop_up_pmds(mm, pgd);
25801 + pgd_mop_up_pxds(mm, pgd);
25802 pgd_dtor(pgd);
25803 paravirt_pgd_free(mm, pgd);
25804 free_page((unsigned long)pgd);
25805 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25806 index a69bcb8..19068ab 100644
25807 --- a/arch/x86/mm/pgtable_32.c
25808 +++ b/arch/x86/mm/pgtable_32.c
25809 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25810 return;
25811 }
25812 pte = pte_offset_kernel(pmd, vaddr);
25813 +
25814 + pax_open_kernel();
25815 if (pte_val(pteval))
25816 set_pte_at(&init_mm, vaddr, pte, pteval);
25817 else
25818 pte_clear(&init_mm, vaddr, pte);
25819 + pax_close_kernel();
25820
25821 /*
25822 * It's enough to flush this one mapping.
25823 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25824 index 410531d..0f16030 100644
25825 --- a/arch/x86/mm/setup_nx.c
25826 +++ b/arch/x86/mm/setup_nx.c
25827 @@ -5,8 +5,10 @@
25828 #include <asm/pgtable.h>
25829 #include <asm/proto.h>
25830
25831 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25832 static int disable_nx __cpuinitdata;
25833
25834 +#ifndef CONFIG_PAX_PAGEEXEC
25835 /*
25836 * noexec = on|off
25837 *
25838 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25839 return 0;
25840 }
25841 early_param("noexec", noexec_setup);
25842 +#endif
25843 +
25844 +#endif
25845
25846 void __cpuinit x86_configure_nx(void)
25847 {
25848 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25849 if (cpu_has_nx && !disable_nx)
25850 __supported_pte_mask |= _PAGE_NX;
25851 else
25852 +#endif
25853 __supported_pte_mask &= ~_PAGE_NX;
25854 }
25855
25856 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25857 index 5e57e11..64874249 100644
25858 --- a/arch/x86/mm/tlb.c
25859 +++ b/arch/x86/mm/tlb.c
25860 @@ -66,7 +66,11 @@ void leave_mm(int cpu)
25861 BUG();
25862 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
25863 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
25864 +
25865 +#ifndef CONFIG_PAX_PER_CPU_PGD
25866 load_cr3(swapper_pg_dir);
25867 +#endif
25868 +
25869 }
25870 }
25871 EXPORT_SYMBOL_GPL(leave_mm);
25872 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25873 index 877b9a1..a8ecf42 100644
25874 --- a/arch/x86/net/bpf_jit.S
25875 +++ b/arch/x86/net/bpf_jit.S
25876 @@ -9,6 +9,7 @@
25877 */
25878 #include <linux/linkage.h>
25879 #include <asm/dwarf2.h>
25880 +#include <asm/alternative-asm.h>
25881
25882 /*
25883 * Calling convention :
25884 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25885 jle bpf_slow_path_word
25886 mov (SKBDATA,%rsi),%eax
25887 bswap %eax /* ntohl() */
25888 + pax_force_retaddr
25889 ret
25890
25891 sk_load_half:
25892 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25893 jle bpf_slow_path_half
25894 movzwl (SKBDATA,%rsi),%eax
25895 rol $8,%ax # ntohs()
25896 + pax_force_retaddr
25897 ret
25898
25899 sk_load_byte:
25900 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25901 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25902 jle bpf_slow_path_byte
25903 movzbl (SKBDATA,%rsi),%eax
25904 + pax_force_retaddr
25905 ret
25906
25907 /**
25908 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25909 movzbl (SKBDATA,%rsi),%ebx
25910 and $15,%bl
25911 shl $2,%bl
25912 + pax_force_retaddr
25913 ret
25914
25915 /* rsi contains offset and can be scratched */
25916 @@ -109,6 +114,7 @@ bpf_slow_path_word:
25917 js bpf_error
25918 mov -12(%rbp),%eax
25919 bswap %eax
25920 + pax_force_retaddr
25921 ret
25922
25923 bpf_slow_path_half:
25924 @@ -117,12 +123,14 @@ bpf_slow_path_half:
25925 mov -12(%rbp),%ax
25926 rol $8,%ax
25927 movzwl %ax,%eax
25928 + pax_force_retaddr
25929 ret
25930
25931 bpf_slow_path_byte:
25932 bpf_slow_path_common(1)
25933 js bpf_error
25934 movzbl -12(%rbp),%eax
25935 + pax_force_retaddr
25936 ret
25937
25938 bpf_slow_path_byte_msh:
25939 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25940 and $15,%al
25941 shl $2,%al
25942 xchg %eax,%ebx
25943 + pax_force_retaddr
25944 ret
25945
25946 #define sk_negative_common(SIZE) \
25947 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25948 sk_negative_common(4)
25949 mov (%rax), %eax
25950 bswap %eax
25951 + pax_force_retaddr
25952 ret
25953
25954 bpf_slow_path_half_neg:
25955 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25956 mov (%rax),%ax
25957 rol $8,%ax
25958 movzwl %ax,%eax
25959 + pax_force_retaddr
25960 ret
25961
25962 bpf_slow_path_byte_neg:
25963 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
25964 .globl sk_load_byte_negative_offset
25965 sk_negative_common(1)
25966 movzbl (%rax), %eax
25967 + pax_force_retaddr
25968 ret
25969
25970 bpf_slow_path_byte_msh_neg:
25971 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
25972 and $15,%al
25973 shl $2,%al
25974 xchg %eax,%ebx
25975 + pax_force_retaddr
25976 ret
25977
25978 bpf_error:
25979 @@ -197,4 +210,5 @@ bpf_error:
25980 xor %eax,%eax
25981 mov -8(%rbp),%rbx
25982 leaveq
25983 + pax_force_retaddr
25984 ret
25985 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25986 index 0597f95..a12c36e 100644
25987 --- a/arch/x86/net/bpf_jit_comp.c
25988 +++ b/arch/x86/net/bpf_jit_comp.c
25989 @@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
25990 set_fs(old_fs);
25991 }
25992
25993 +struct bpf_jit_work {
25994 + struct work_struct work;
25995 + void *image;
25996 +};
25997 +
25998 #define CHOOSE_LOAD_FUNC(K, func) \
25999 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
26000
26001 @@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26002 if (addrs == NULL)
26003 return;
26004
26005 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26006 + if (!fp->work)
26007 + goto out;
26008 +
26009 /* Before first pass, make a rough estimation of addrs[]
26010 * each bpf instruction is translated to less than 64 bytes
26011 */
26012 @@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26013 break;
26014 default:
26015 /* hmm, too complex filter, give up with jit compiler */
26016 - goto out;
26017 + goto error;
26018 }
26019 ilen = prog - temp;
26020 if (image) {
26021 if (unlikely(proglen + ilen > oldproglen)) {
26022 pr_err("bpb_jit_compile fatal error\n");
26023 - kfree(addrs);
26024 - module_free(NULL, image);
26025 - return;
26026 + module_free_exec(NULL, image);
26027 + goto error;
26028 }
26029 + pax_open_kernel();
26030 memcpy(image + proglen, temp, ilen);
26031 + pax_close_kernel();
26032 }
26033 proglen += ilen;
26034 addrs[i] = proglen;
26035 @@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26036 break;
26037 }
26038 if (proglen == oldproglen) {
26039 - image = module_alloc(max_t(unsigned int,
26040 - proglen,
26041 - sizeof(struct work_struct)));
26042 + image = module_alloc_exec(proglen);
26043 if (!image)
26044 - goto out;
26045 + goto error;
26046 }
26047 oldproglen = proglen;
26048 }
26049 @@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26050 bpf_flush_icache(image, image + proglen);
26051
26052 fp->bpf_func = (void *)image;
26053 - }
26054 + } else
26055 +error:
26056 + kfree(fp->work);
26057 +
26058 out:
26059 kfree(addrs);
26060 return;
26061 @@ -648,18 +659,20 @@ out:
26062
26063 static void jit_free_defer(struct work_struct *arg)
26064 {
26065 - module_free(NULL, arg);
26066 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26067 + kfree(arg);
26068 }
26069
26070 /* run from softirq, we must use a work_struct to call
26071 - * module_free() from process context
26072 + * module_free_exec() from process context
26073 */
26074 void bpf_jit_free(struct sk_filter *fp)
26075 {
26076 if (fp->bpf_func != sk_run_filter) {
26077 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
26078 + struct work_struct *work = &fp->work->work;
26079
26080 INIT_WORK(work, jit_free_defer);
26081 + fp->work->image = fp->bpf_func;
26082 schedule_work(work);
26083 }
26084 }
26085 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26086 index d6aa6e8..266395a 100644
26087 --- a/arch/x86/oprofile/backtrace.c
26088 +++ b/arch/x86/oprofile/backtrace.c
26089 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26090 struct stack_frame_ia32 *fp;
26091 unsigned long bytes;
26092
26093 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26094 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26095 if (bytes != sizeof(bufhead))
26096 return NULL;
26097
26098 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26099 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26100
26101 oprofile_add_trace(bufhead[0].return_address);
26102
26103 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26104 struct stack_frame bufhead[2];
26105 unsigned long bytes;
26106
26107 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26108 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26109 if (bytes != sizeof(bufhead))
26110 return NULL;
26111
26112 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26113 {
26114 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26115
26116 - if (!user_mode_vm(regs)) {
26117 + if (!user_mode(regs)) {
26118 unsigned long stack = kernel_stack_pointer(regs);
26119 if (depth)
26120 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26121 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26122 index 140942f..8a5cc55 100644
26123 --- a/arch/x86/pci/mrst.c
26124 +++ b/arch/x86/pci/mrst.c
26125 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26126 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26127 pci_mmcfg_late_init();
26128 pcibios_enable_irq = mrst_pci_irq_enable;
26129 - pci_root_ops = pci_mrst_ops;
26130 + pax_open_kernel();
26131 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26132 + pax_close_kernel();
26133 pci_soc_mode = 1;
26134 /* Continue with standard init */
26135 return 1;
26136 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26137 index da8fe05..7ee6704 100644
26138 --- a/arch/x86/pci/pcbios.c
26139 +++ b/arch/x86/pci/pcbios.c
26140 @@ -79,50 +79,93 @@ union bios32 {
26141 static struct {
26142 unsigned long address;
26143 unsigned short segment;
26144 -} bios32_indirect = { 0, __KERNEL_CS };
26145 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26146
26147 /*
26148 * Returns the entry point for the given service, NULL on error
26149 */
26150
26151 -static unsigned long bios32_service(unsigned long service)
26152 +static unsigned long __devinit bios32_service(unsigned long service)
26153 {
26154 unsigned char return_code; /* %al */
26155 unsigned long address; /* %ebx */
26156 unsigned long length; /* %ecx */
26157 unsigned long entry; /* %edx */
26158 unsigned long flags;
26159 + struct desc_struct d, *gdt;
26160
26161 local_irq_save(flags);
26162 - __asm__("lcall *(%%edi); cld"
26163 +
26164 + gdt = get_cpu_gdt_table(smp_processor_id());
26165 +
26166 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26167 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26168 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26169 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26170 +
26171 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26172 : "=a" (return_code),
26173 "=b" (address),
26174 "=c" (length),
26175 "=d" (entry)
26176 : "0" (service),
26177 "1" (0),
26178 - "D" (&bios32_indirect));
26179 + "D" (&bios32_indirect),
26180 + "r"(__PCIBIOS_DS)
26181 + : "memory");
26182 +
26183 + pax_open_kernel();
26184 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26185 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26186 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26187 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26188 + pax_close_kernel();
26189 +
26190 local_irq_restore(flags);
26191
26192 switch (return_code) {
26193 - case 0:
26194 - return address + entry;
26195 - case 0x80: /* Not present */
26196 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26197 - return 0;
26198 - default: /* Shouldn't happen */
26199 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26200 - service, return_code);
26201 + case 0: {
26202 + int cpu;
26203 + unsigned char flags;
26204 +
26205 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26206 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26207 + printk(KERN_WARNING "bios32_service: not valid\n");
26208 return 0;
26209 + }
26210 + address = address + PAGE_OFFSET;
26211 + length += 16UL; /* some BIOSs underreport this... */
26212 + flags = 4;
26213 + if (length >= 64*1024*1024) {
26214 + length >>= PAGE_SHIFT;
26215 + flags |= 8;
26216 + }
26217 +
26218 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26219 + gdt = get_cpu_gdt_table(cpu);
26220 + pack_descriptor(&d, address, length, 0x9b, flags);
26221 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26222 + pack_descriptor(&d, address, length, 0x93, flags);
26223 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26224 + }
26225 + return entry;
26226 + }
26227 + case 0x80: /* Not present */
26228 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26229 + return 0;
26230 + default: /* Shouldn't happen */
26231 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26232 + service, return_code);
26233 + return 0;
26234 }
26235 }
26236
26237 static struct {
26238 unsigned long address;
26239 unsigned short segment;
26240 -} pci_indirect = { 0, __KERNEL_CS };
26241 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26242
26243 -static int pci_bios_present;
26244 +static int pci_bios_present __read_only;
26245
26246 static int __devinit check_pcibios(void)
26247 {
26248 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26249 unsigned long flags, pcibios_entry;
26250
26251 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26252 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26253 + pci_indirect.address = pcibios_entry;
26254
26255 local_irq_save(flags);
26256 - __asm__(
26257 - "lcall *(%%edi); cld\n\t"
26258 + __asm__("movw %w6, %%ds\n\t"
26259 + "lcall *%%ss:(%%edi); cld\n\t"
26260 + "push %%ss\n\t"
26261 + "pop %%ds\n\t"
26262 "jc 1f\n\t"
26263 "xor %%ah, %%ah\n"
26264 "1:"
26265 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26266 "=b" (ebx),
26267 "=c" (ecx)
26268 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26269 - "D" (&pci_indirect)
26270 + "D" (&pci_indirect),
26271 + "r" (__PCIBIOS_DS)
26272 : "memory");
26273 local_irq_restore(flags);
26274
26275 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26276
26277 switch (len) {
26278 case 1:
26279 - __asm__("lcall *(%%esi); cld\n\t"
26280 + __asm__("movw %w6, %%ds\n\t"
26281 + "lcall *%%ss:(%%esi); cld\n\t"
26282 + "push %%ss\n\t"
26283 + "pop %%ds\n\t"
26284 "jc 1f\n\t"
26285 "xor %%ah, %%ah\n"
26286 "1:"
26287 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26288 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26289 "b" (bx),
26290 "D" ((long)reg),
26291 - "S" (&pci_indirect));
26292 + "S" (&pci_indirect),
26293 + "r" (__PCIBIOS_DS));
26294 /*
26295 * Zero-extend the result beyond 8 bits, do not trust the
26296 * BIOS having done it:
26297 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26298 *value &= 0xff;
26299 break;
26300 case 2:
26301 - __asm__("lcall *(%%esi); cld\n\t"
26302 + __asm__("movw %w6, %%ds\n\t"
26303 + "lcall *%%ss:(%%esi); cld\n\t"
26304 + "push %%ss\n\t"
26305 + "pop %%ds\n\t"
26306 "jc 1f\n\t"
26307 "xor %%ah, %%ah\n"
26308 "1:"
26309 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26310 : "1" (PCIBIOS_READ_CONFIG_WORD),
26311 "b" (bx),
26312 "D" ((long)reg),
26313 - "S" (&pci_indirect));
26314 + "S" (&pci_indirect),
26315 + "r" (__PCIBIOS_DS));
26316 /*
26317 * Zero-extend the result beyond 16 bits, do not trust the
26318 * BIOS having done it:
26319 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26320 *value &= 0xffff;
26321 break;
26322 case 4:
26323 - __asm__("lcall *(%%esi); cld\n\t"
26324 + __asm__("movw %w6, %%ds\n\t"
26325 + "lcall *%%ss:(%%esi); cld\n\t"
26326 + "push %%ss\n\t"
26327 + "pop %%ds\n\t"
26328 "jc 1f\n\t"
26329 "xor %%ah, %%ah\n"
26330 "1:"
26331 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26332 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26333 "b" (bx),
26334 "D" ((long)reg),
26335 - "S" (&pci_indirect));
26336 + "S" (&pci_indirect),
26337 + "r" (__PCIBIOS_DS));
26338 break;
26339 }
26340
26341 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26342
26343 switch (len) {
26344 case 1:
26345 - __asm__("lcall *(%%esi); cld\n\t"
26346 + __asm__("movw %w6, %%ds\n\t"
26347 + "lcall *%%ss:(%%esi); cld\n\t"
26348 + "push %%ss\n\t"
26349 + "pop %%ds\n\t"
26350 "jc 1f\n\t"
26351 "xor %%ah, %%ah\n"
26352 "1:"
26353 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26354 "c" (value),
26355 "b" (bx),
26356 "D" ((long)reg),
26357 - "S" (&pci_indirect));
26358 + "S" (&pci_indirect),
26359 + "r" (__PCIBIOS_DS));
26360 break;
26361 case 2:
26362 - __asm__("lcall *(%%esi); cld\n\t"
26363 + __asm__("movw %w6, %%ds\n\t"
26364 + "lcall *%%ss:(%%esi); cld\n\t"
26365 + "push %%ss\n\t"
26366 + "pop %%ds\n\t"
26367 "jc 1f\n\t"
26368 "xor %%ah, %%ah\n"
26369 "1:"
26370 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26371 "c" (value),
26372 "b" (bx),
26373 "D" ((long)reg),
26374 - "S" (&pci_indirect));
26375 + "S" (&pci_indirect),
26376 + "r" (__PCIBIOS_DS));
26377 break;
26378 case 4:
26379 - __asm__("lcall *(%%esi); cld\n\t"
26380 + __asm__("movw %w6, %%ds\n\t"
26381 + "lcall *%%ss:(%%esi); cld\n\t"
26382 + "push %%ss\n\t"
26383 + "pop %%ds\n\t"
26384 "jc 1f\n\t"
26385 "xor %%ah, %%ah\n"
26386 "1:"
26387 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26388 "c" (value),
26389 "b" (bx),
26390 "D" ((long)reg),
26391 - "S" (&pci_indirect));
26392 + "S" (&pci_indirect),
26393 + "r" (__PCIBIOS_DS));
26394 break;
26395 }
26396
26397 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26398
26399 DBG("PCI: Fetching IRQ routing table... ");
26400 __asm__("push %%es\n\t"
26401 + "movw %w8, %%ds\n\t"
26402 "push %%ds\n\t"
26403 "pop %%es\n\t"
26404 - "lcall *(%%esi); cld\n\t"
26405 + "lcall *%%ss:(%%esi); cld\n\t"
26406 "pop %%es\n\t"
26407 + "push %%ss\n\t"
26408 + "pop %%ds\n"
26409 "jc 1f\n\t"
26410 "xor %%ah, %%ah\n"
26411 "1:"
26412 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26413 "1" (0),
26414 "D" ((long) &opt),
26415 "S" (&pci_indirect),
26416 - "m" (opt)
26417 + "m" (opt),
26418 + "r" (__PCIBIOS_DS)
26419 : "memory");
26420 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26421 if (ret & 0xff00)
26422 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26423 {
26424 int ret;
26425
26426 - __asm__("lcall *(%%esi); cld\n\t"
26427 + __asm__("movw %w5, %%ds\n\t"
26428 + "lcall *%%ss:(%%esi); cld\n\t"
26429 + "push %%ss\n\t"
26430 + "pop %%ds\n"
26431 "jc 1f\n\t"
26432 "xor %%ah, %%ah\n"
26433 "1:"
26434 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26435 : "0" (PCIBIOS_SET_PCI_HW_INT),
26436 "b" ((dev->bus->number << 8) | dev->devfn),
26437 "c" ((irq << 8) | (pin + 10)),
26438 - "S" (&pci_indirect));
26439 + "S" (&pci_indirect),
26440 + "r" (__PCIBIOS_DS));
26441 return !(ret & 0xff00);
26442 }
26443 EXPORT_SYMBOL(pcibios_set_irq_routing);
26444 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26445 index 40e4469..1ab536e 100644
26446 --- a/arch/x86/platform/efi/efi_32.c
26447 +++ b/arch/x86/platform/efi/efi_32.c
26448 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26449 {
26450 struct desc_ptr gdt_descr;
26451
26452 +#ifdef CONFIG_PAX_KERNEXEC
26453 + struct desc_struct d;
26454 +#endif
26455 +
26456 local_irq_save(efi_rt_eflags);
26457
26458 load_cr3(initial_page_table);
26459 __flush_tlb_all();
26460
26461 +#ifdef CONFIG_PAX_KERNEXEC
26462 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26463 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26464 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26465 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26466 +#endif
26467 +
26468 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26469 gdt_descr.size = GDT_SIZE - 1;
26470 load_gdt(&gdt_descr);
26471 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26472 {
26473 struct desc_ptr gdt_descr;
26474
26475 +#ifdef CONFIG_PAX_KERNEXEC
26476 + struct desc_struct d;
26477 +
26478 + memset(&d, 0, sizeof d);
26479 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26480 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26481 +#endif
26482 +
26483 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26484 gdt_descr.size = GDT_SIZE - 1;
26485 load_gdt(&gdt_descr);
26486 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26487 index fbe66e6..eae5e38 100644
26488 --- a/arch/x86/platform/efi/efi_stub_32.S
26489 +++ b/arch/x86/platform/efi/efi_stub_32.S
26490 @@ -6,7 +6,9 @@
26491 */
26492
26493 #include <linux/linkage.h>
26494 +#include <linux/init.h>
26495 #include <asm/page_types.h>
26496 +#include <asm/segment.h>
26497
26498 /*
26499 * efi_call_phys(void *, ...) is a function with variable parameters.
26500 @@ -20,7 +22,7 @@
26501 * service functions will comply with gcc calling convention, too.
26502 */
26503
26504 -.text
26505 +__INIT
26506 ENTRY(efi_call_phys)
26507 /*
26508 * 0. The function can only be called in Linux kernel. So CS has been
26509 @@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
26510 * The mapping of lower virtual memory has been created in prelog and
26511 * epilog.
26512 */
26513 - movl $1f, %edx
26514 - subl $__PAGE_OFFSET, %edx
26515 - jmp *%edx
26516 +#ifdef CONFIG_PAX_KERNEXEC
26517 + movl $(__KERNEXEC_EFI_DS), %edx
26518 + mov %edx, %ds
26519 + mov %edx, %es
26520 + mov %edx, %ss
26521 + addl $2f,(1f)
26522 + ljmp *(1f)
26523 +
26524 +__INITDATA
26525 +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
26526 +.previous
26527 +
26528 +2:
26529 + subl $2b,(1b)
26530 +#else
26531 + jmp 1f-__PAGE_OFFSET
26532 1:
26533 +#endif
26534
26535 /*
26536 * 2. Now on the top of stack is the return
26537 @@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
26538 * parameter 2, ..., param n. To make things easy, we save the return
26539 * address of efi_call_phys in a global variable.
26540 */
26541 - popl %edx
26542 - movl %edx, saved_return_addr
26543 - /* get the function pointer into ECX*/
26544 - popl %ecx
26545 - movl %ecx, efi_rt_function_ptr
26546 - movl $2f, %edx
26547 - subl $__PAGE_OFFSET, %edx
26548 - pushl %edx
26549 + popl (saved_return_addr)
26550 + popl (efi_rt_function_ptr)
26551
26552 /*
26553 * 3. Clear PG bit in %CR0.
26554 @@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
26555 /*
26556 * 5. Call the physical function.
26557 */
26558 - jmp *%ecx
26559 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26560
26561 -2:
26562 /*
26563 * 6. After EFI runtime service returns, control will return to
26564 * following instruction. We'd better readjust stack pointer first.
26565 @@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
26566 movl %cr0, %edx
26567 orl $0x80000000, %edx
26568 movl %edx, %cr0
26569 - jmp 1f
26570 -1:
26571 +
26572 /*
26573 * 8. Now restore the virtual mode from flat mode by
26574 * adding EIP with PAGE_OFFSET.
26575 */
26576 - movl $1f, %edx
26577 - jmp *%edx
26578 +#ifdef CONFIG_PAX_KERNEXEC
26579 + movl $(__KERNEL_DS), %edx
26580 + mov %edx, %ds
26581 + mov %edx, %es
26582 + mov %edx, %ss
26583 + ljmp $(__KERNEL_CS),$1f
26584 +#else
26585 + jmp 1f+__PAGE_OFFSET
26586 +#endif
26587 1:
26588
26589 /*
26590 * 9. Balance the stack. And because EAX contain the return value,
26591 * we'd better not clobber it.
26592 */
26593 - leal efi_rt_function_ptr, %edx
26594 - movl (%edx), %ecx
26595 - pushl %ecx
26596 + pushl (efi_rt_function_ptr)
26597
26598 /*
26599 - * 10. Push the saved return address onto the stack and return.
26600 + * 10. Return to the saved return address.
26601 */
26602 - leal saved_return_addr, %edx
26603 - movl (%edx), %ecx
26604 - pushl %ecx
26605 - ret
26606 + jmpl *(saved_return_addr)
26607 ENDPROC(efi_call_phys)
26608 .previous
26609
26610 -.data
26611 +__INITDATA
26612 saved_return_addr:
26613 .long 0
26614 efi_rt_function_ptr:
26615 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26616 index 4c07cca..2c8427d 100644
26617 --- a/arch/x86/platform/efi/efi_stub_64.S
26618 +++ b/arch/x86/platform/efi/efi_stub_64.S
26619 @@ -7,6 +7,7 @@
26620 */
26621
26622 #include <linux/linkage.h>
26623 +#include <asm/alternative-asm.h>
26624
26625 #define SAVE_XMM \
26626 mov %rsp, %rax; \
26627 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26628 call *%rdi
26629 addq $32, %rsp
26630 RESTORE_XMM
26631 + pax_force_retaddr 0, 1
26632 ret
26633 ENDPROC(efi_call0)
26634
26635 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26636 call *%rdi
26637 addq $32, %rsp
26638 RESTORE_XMM
26639 + pax_force_retaddr 0, 1
26640 ret
26641 ENDPROC(efi_call1)
26642
26643 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26644 call *%rdi
26645 addq $32, %rsp
26646 RESTORE_XMM
26647 + pax_force_retaddr 0, 1
26648 ret
26649 ENDPROC(efi_call2)
26650
26651 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26652 call *%rdi
26653 addq $32, %rsp
26654 RESTORE_XMM
26655 + pax_force_retaddr 0, 1
26656 ret
26657 ENDPROC(efi_call3)
26658
26659 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26660 call *%rdi
26661 addq $32, %rsp
26662 RESTORE_XMM
26663 + pax_force_retaddr 0, 1
26664 ret
26665 ENDPROC(efi_call4)
26666
26667 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26668 call *%rdi
26669 addq $48, %rsp
26670 RESTORE_XMM
26671 + pax_force_retaddr 0, 1
26672 ret
26673 ENDPROC(efi_call5)
26674
26675 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26676 call *%rdi
26677 addq $48, %rsp
26678 RESTORE_XMM
26679 + pax_force_retaddr 0, 1
26680 ret
26681 ENDPROC(efi_call6)
26682 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26683 index fd41a92..9c33628 100644
26684 --- a/arch/x86/platform/mrst/mrst.c
26685 +++ b/arch/x86/platform/mrst/mrst.c
26686 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26687 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26688 int sfi_mrtc_num;
26689
26690 -static void mrst_power_off(void)
26691 +static __noreturn void mrst_power_off(void)
26692 {
26693 + BUG();
26694 }
26695
26696 -static void mrst_reboot(void)
26697 +static __noreturn void mrst_reboot(void)
26698 {
26699 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26700 + BUG();
26701 }
26702
26703 /* parse all the mtimer info to a static mtimer array */
26704 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26705 index 218cdb1..fd55c08 100644
26706 --- a/arch/x86/power/cpu.c
26707 +++ b/arch/x86/power/cpu.c
26708 @@ -132,7 +132,7 @@ static void do_fpu_end(void)
26709 static void fix_processor_context(void)
26710 {
26711 int cpu = smp_processor_id();
26712 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26713 + struct tss_struct *t = init_tss + cpu;
26714
26715 set_tss_desc(cpu, t); /*
26716 * This just modifies memory; should not be
26717 @@ -142,7 +142,9 @@ static void fix_processor_context(void)
26718 */
26719
26720 #ifdef CONFIG_X86_64
26721 + pax_open_kernel();
26722 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26723 + pax_close_kernel();
26724
26725 syscall_init(); /* This sets MSR_*STAR and related */
26726 #endif
26727 diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
26728 index cbca565..bae7133 100644
26729 --- a/arch/x86/realmode/init.c
26730 +++ b/arch/x86/realmode/init.c
26731 @@ -62,7 +62,13 @@ void __init setup_real_mode(void)
26732 __va(real_mode_header->trampoline_header);
26733
26734 #ifdef CONFIG_X86_32
26735 - trampoline_header->start = __pa(startup_32_smp);
26736 + trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
26737 +
26738 +#ifdef CONFIG_PAX_KERNEXEC
26739 + trampoline_header->start -= LOAD_PHYSICAL_ADDR;
26740 +#endif
26741 +
26742 + trampoline_header->boot_cs = __BOOT_CS;
26743 trampoline_header->gdt_limit = __BOOT_DS + 7;
26744 trampoline_header->gdt_base = __pa(boot_gdt);
26745 #else
26746 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
26747 index 5b84a2d..a004393 100644
26748 --- a/arch/x86/realmode/rm/Makefile
26749 +++ b/arch/x86/realmode/rm/Makefile
26750 @@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
26751 $(call cc-option, -fno-unit-at-a-time)) \
26752 $(call cc-option, -fno-stack-protector) \
26753 $(call cc-option, -mpreferred-stack-boundary=2)
26754 +ifdef CONSTIFY_PLUGIN
26755 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
26756 +endif
26757 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
26758 GCOV_PROFILE := n
26759 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
26760 index c1b2791..f9e31c7 100644
26761 --- a/arch/x86/realmode/rm/trampoline_32.S
26762 +++ b/arch/x86/realmode/rm/trampoline_32.S
26763 @@ -25,6 +25,12 @@
26764 #include <asm/page_types.h>
26765 #include "realmode.h"
26766
26767 +#ifdef CONFIG_PAX_KERNEXEC
26768 +#define ta(X) (X)
26769 +#else
26770 +#define ta(X) (pa_ ## X)
26771 +#endif
26772 +
26773 .text
26774 .code16
26775
26776 @@ -39,8 +45,6 @@ ENTRY(trampoline_start)
26777
26778 cli # We should be safe anyway
26779
26780 - movl tr_start, %eax # where we need to go
26781 -
26782 movl $0xA5A5A5A5, trampoline_status
26783 # write marker for master knows we're running
26784
26785 @@ -56,7 +60,7 @@ ENTRY(trampoline_start)
26786 movw $1, %dx # protected mode (PE) bit
26787 lmsw %dx # into protected mode
26788
26789 - ljmpl $__BOOT_CS, $pa_startup_32
26790 + ljmpl *(trampoline_header)
26791
26792 .section ".text32","ax"
26793 .code32
26794 @@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
26795 .balign 8
26796 GLOBAL(trampoline_header)
26797 tr_start: .space 4
26798 - tr_gdt_pad: .space 2
26799 + tr_boot_cs: .space 2
26800 tr_gdt: .space 6
26801 END(trampoline_header)
26802
26803 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
26804 index bb360dc..3e5945f 100644
26805 --- a/arch/x86/realmode/rm/trampoline_64.S
26806 +++ b/arch/x86/realmode/rm/trampoline_64.S
26807 @@ -107,7 +107,7 @@ ENTRY(startup_32)
26808 wrmsr
26809
26810 # Enable paging and in turn activate Long Mode
26811 - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
26812 + movl $(X86_CR0_PG | X86_CR0_PE), %eax
26813 movl %eax, %cr0
26814
26815 /*
26816 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26817 index 5a1847d..deccb30 100644
26818 --- a/arch/x86/tools/relocs.c
26819 +++ b/arch/x86/tools/relocs.c
26820 @@ -12,10 +12,13 @@
26821 #include <regex.h>
26822 #include <tools/le_byteshift.h>
26823
26824 +#include "../../../include/generated/autoconf.h"
26825 +
26826 static void die(char *fmt, ...);
26827
26828 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26829 static Elf32_Ehdr ehdr;
26830 +static Elf32_Phdr *phdr;
26831 static unsigned long reloc_count, reloc_idx;
26832 static unsigned long *relocs;
26833 static unsigned long reloc16_count, reloc16_idx;
26834 @@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
26835 }
26836 }
26837
26838 +static void read_phdrs(FILE *fp)
26839 +{
26840 + unsigned int i;
26841 +
26842 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26843 + if (!phdr) {
26844 + die("Unable to allocate %d program headers\n",
26845 + ehdr.e_phnum);
26846 + }
26847 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26848 + die("Seek to %d failed: %s\n",
26849 + ehdr.e_phoff, strerror(errno));
26850 + }
26851 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26852 + die("Cannot read ELF program headers: %s\n",
26853 + strerror(errno));
26854 + }
26855 + for(i = 0; i < ehdr.e_phnum; i++) {
26856 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26857 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26858 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26859 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26860 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26861 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26862 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26863 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26864 + }
26865 +
26866 +}
26867 +
26868 static void read_shdrs(FILE *fp)
26869 {
26870 - int i;
26871 + unsigned int i;
26872 Elf32_Shdr shdr;
26873
26874 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26875 @@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
26876
26877 static void read_strtabs(FILE *fp)
26878 {
26879 - int i;
26880 + unsigned int i;
26881 for (i = 0; i < ehdr.e_shnum; i++) {
26882 struct section *sec = &secs[i];
26883 if (sec->shdr.sh_type != SHT_STRTAB) {
26884 @@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
26885
26886 static void read_symtabs(FILE *fp)
26887 {
26888 - int i,j;
26889 + unsigned int i,j;
26890 for (i = 0; i < ehdr.e_shnum; i++) {
26891 struct section *sec = &secs[i];
26892 if (sec->shdr.sh_type != SHT_SYMTAB) {
26893 @@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
26894 }
26895
26896
26897 -static void read_relocs(FILE *fp)
26898 +static void read_relocs(FILE *fp, int use_real_mode)
26899 {
26900 - int i,j;
26901 + unsigned int i,j;
26902 + uint32_t base;
26903 +
26904 for (i = 0; i < ehdr.e_shnum; i++) {
26905 struct section *sec = &secs[i];
26906 if (sec->shdr.sh_type != SHT_REL) {
26907 @@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
26908 die("Cannot read symbol table: %s\n",
26909 strerror(errno));
26910 }
26911 + base = 0;
26912 +
26913 +#ifdef CONFIG_X86_32
26914 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
26915 + if (phdr[j].p_type != PT_LOAD )
26916 + continue;
26917 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26918 + continue;
26919 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26920 + break;
26921 + }
26922 +#endif
26923 +
26924 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26925 Elf32_Rel *rel = &sec->reltab[j];
26926 - rel->r_offset = elf32_to_cpu(rel->r_offset);
26927 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26928 rel->r_info = elf32_to_cpu(rel->r_info);
26929 }
26930 }
26931 @@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
26932
26933 static void print_absolute_symbols(void)
26934 {
26935 - int i;
26936 + unsigned int i;
26937 printf("Absolute symbols\n");
26938 printf(" Num: Value Size Type Bind Visibility Name\n");
26939 for (i = 0; i < ehdr.e_shnum; i++) {
26940 struct section *sec = &secs[i];
26941 char *sym_strtab;
26942 - int j;
26943 + unsigned int j;
26944
26945 if (sec->shdr.sh_type != SHT_SYMTAB) {
26946 continue;
26947 @@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
26948
26949 static void print_absolute_relocs(void)
26950 {
26951 - int i, printed = 0;
26952 + unsigned int i, printed = 0;
26953
26954 for (i = 0; i < ehdr.e_shnum; i++) {
26955 struct section *sec = &secs[i];
26956 struct section *sec_applies, *sec_symtab;
26957 char *sym_strtab;
26958 Elf32_Sym *sh_symtab;
26959 - int j;
26960 + unsigned int j;
26961 if (sec->shdr.sh_type != SHT_REL) {
26962 continue;
26963 }
26964 @@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
26965 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26966 int use_real_mode)
26967 {
26968 - int i;
26969 + unsigned int i;
26970 /* Walk through the relocations */
26971 for (i = 0; i < ehdr.e_shnum; i++) {
26972 char *sym_strtab;
26973 Elf32_Sym *sh_symtab;
26974 struct section *sec_applies, *sec_symtab;
26975 - int j;
26976 + unsigned int j;
26977 struct section *sec = &secs[i];
26978
26979 if (sec->shdr.sh_type != SHT_REL) {
26980 @@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26981 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26982 r_type = ELF32_R_TYPE(rel->r_info);
26983
26984 + if (!use_real_mode) {
26985 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26986 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26987 + continue;
26988 +
26989 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26990 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26991 + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26992 + continue;
26993 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26994 + continue;
26995 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26996 + continue;
26997 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26998 + continue;
26999 +#endif
27000 + }
27001 +
27002 shn_abs = sym->st_shndx == SHN_ABS;
27003
27004 switch (r_type) {
27005 @@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
27006
27007 static void emit_relocs(int as_text, int use_real_mode)
27008 {
27009 - int i;
27010 + unsigned int i;
27011 /* Count how many relocations I have and allocate space for them. */
27012 reloc_count = 0;
27013 walk_relocs(count_reloc, use_real_mode);
27014 @@ -808,10 +874,11 @@ int main(int argc, char **argv)
27015 fname, strerror(errno));
27016 }
27017 read_ehdr(fp);
27018 + read_phdrs(fp);
27019 read_shdrs(fp);
27020 read_strtabs(fp);
27021 read_symtabs(fp);
27022 - read_relocs(fp);
27023 + read_relocs(fp, use_real_mode);
27024 if (show_absolute_syms) {
27025 print_absolute_symbols();
27026 return 0;
27027 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
27028 index fd14be1..e3c79c0 100644
27029 --- a/arch/x86/vdso/Makefile
27030 +++ b/arch/x86/vdso/Makefile
27031 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
27032 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
27033 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
27034
27035 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27036 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27037 GCOV_PROFILE := n
27038
27039 #
27040 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
27041 index 66e6d93..587f435 100644
27042 --- a/arch/x86/vdso/vdso32-setup.c
27043 +++ b/arch/x86/vdso/vdso32-setup.c
27044 @@ -25,6 +25,7 @@
27045 #include <asm/tlbflush.h>
27046 #include <asm/vdso.h>
27047 #include <asm/proto.h>
27048 +#include <asm/mman.h>
27049
27050 enum {
27051 VDSO_DISABLED = 0,
27052 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
27053 void enable_sep_cpu(void)
27054 {
27055 int cpu = get_cpu();
27056 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
27057 + struct tss_struct *tss = init_tss + cpu;
27058
27059 if (!boot_cpu_has(X86_FEATURE_SEP)) {
27060 put_cpu();
27061 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
27062 gate_vma.vm_start = FIXADDR_USER_START;
27063 gate_vma.vm_end = FIXADDR_USER_END;
27064 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
27065 - gate_vma.vm_page_prot = __P101;
27066 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
27067
27068 return 0;
27069 }
27070 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27071 if (compat)
27072 addr = VDSO_HIGH_BASE;
27073 else {
27074 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27075 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27076 if (IS_ERR_VALUE(addr)) {
27077 ret = addr;
27078 goto up_fail;
27079 }
27080 }
27081
27082 - current->mm->context.vdso = (void *)addr;
27083 + current->mm->context.vdso = addr;
27084
27085 if (compat_uses_vma || !compat) {
27086 /*
27087 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27088 }
27089
27090 current_thread_info()->sysenter_return =
27091 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27092 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27093
27094 up_fail:
27095 if (ret)
27096 - current->mm->context.vdso = NULL;
27097 + current->mm->context.vdso = 0;
27098
27099 up_write(&mm->mmap_sem);
27100
27101 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
27102
27103 const char *arch_vma_name(struct vm_area_struct *vma)
27104 {
27105 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27106 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27107 return "[vdso]";
27108 +
27109 +#ifdef CONFIG_PAX_SEGMEXEC
27110 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27111 + return "[vdso]";
27112 +#endif
27113 +
27114 return NULL;
27115 }
27116
27117 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27118 * Check to see if the corresponding task was created in compat vdso
27119 * mode.
27120 */
27121 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27122 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27123 return &gate_vma;
27124 return NULL;
27125 }
27126 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27127 index 00aaf04..4a26505 100644
27128 --- a/arch/x86/vdso/vma.c
27129 +++ b/arch/x86/vdso/vma.c
27130 @@ -16,8 +16,6 @@
27131 #include <asm/vdso.h>
27132 #include <asm/page.h>
27133
27134 -unsigned int __read_mostly vdso_enabled = 1;
27135 -
27136 extern char vdso_start[], vdso_end[];
27137 extern unsigned short vdso_sync_cpuid;
27138
27139 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27140 * unaligned here as a result of stack start randomization.
27141 */
27142 addr = PAGE_ALIGN(addr);
27143 - addr = align_addr(addr, NULL, ALIGN_VDSO);
27144
27145 return addr;
27146 }
27147 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
27148 unsigned size)
27149 {
27150 struct mm_struct *mm = current->mm;
27151 - unsigned long addr;
27152 + unsigned long addr = 0;
27153 int ret;
27154
27155 - if (!vdso_enabled)
27156 - return 0;
27157 -
27158 down_write(&mm->mmap_sem);
27159 +
27160 +#ifdef CONFIG_PAX_RANDMMAP
27161 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27162 +#endif
27163 +
27164 addr = vdso_addr(mm->start_stack, size);
27165 + addr = align_addr(addr, NULL, ALIGN_VDSO);
27166 addr = get_unmapped_area(NULL, addr, size, 0, 0);
27167 if (IS_ERR_VALUE(addr)) {
27168 ret = addr;
27169 goto up_fail;
27170 }
27171
27172 - current->mm->context.vdso = (void *)addr;
27173 + mm->context.vdso = addr;
27174
27175 ret = install_special_mapping(mm, addr, size,
27176 VM_READ|VM_EXEC|
27177 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27178 pages);
27179 - if (ret) {
27180 - current->mm->context.vdso = NULL;
27181 - goto up_fail;
27182 - }
27183 + if (ret)
27184 + mm->context.vdso = 0;
27185
27186 up_fail:
27187 up_write(&mm->mmap_sem);
27188 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27189 vdsox32_size);
27190 }
27191 #endif
27192 -
27193 -static __init int vdso_setup(char *s)
27194 -{
27195 - vdso_enabled = simple_strtoul(s, NULL, 0);
27196 - return 0;
27197 -}
27198 -__setup("vdso=", vdso_setup);
27199 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27200 index ff962d4..d34a191 100644
27201 --- a/arch/x86/xen/enlighten.c
27202 +++ b/arch/x86/xen/enlighten.c
27203 @@ -97,8 +97,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27204
27205 struct shared_info xen_dummy_shared_info;
27206
27207 -void *xen_initial_gdt;
27208 -
27209 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27210 __read_mostly int xen_have_vector_callback;
27211 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27212 @@ -885,21 +883,21 @@ static u32 xen_safe_apic_wait_icr_idle(void)
27213
27214 static void set_xen_basic_apic_ops(void)
27215 {
27216 - apic->read = xen_apic_read;
27217 - apic->write = xen_apic_write;
27218 - apic->icr_read = xen_apic_icr_read;
27219 - apic->icr_write = xen_apic_icr_write;
27220 - apic->wait_icr_idle = xen_apic_wait_icr_idle;
27221 - apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
27222 - apic->set_apic_id = xen_set_apic_id;
27223 - apic->get_apic_id = xen_get_apic_id;
27224 + *(void **)&apic->read = xen_apic_read;
27225 + *(void **)&apic->write = xen_apic_write;
27226 + *(void **)&apic->icr_read = xen_apic_icr_read;
27227 + *(void **)&apic->icr_write = xen_apic_icr_write;
27228 + *(void **)&apic->wait_icr_idle = xen_apic_wait_icr_idle;
27229 + *(void **)&apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
27230 + *(void **)&apic->set_apic_id = xen_set_apic_id;
27231 + *(void **)&apic->get_apic_id = xen_get_apic_id;
27232
27233 #ifdef CONFIG_SMP
27234 - apic->send_IPI_allbutself = xen_send_IPI_allbutself;
27235 - apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
27236 - apic->send_IPI_mask = xen_send_IPI_mask;
27237 - apic->send_IPI_all = xen_send_IPI_all;
27238 - apic->send_IPI_self = xen_send_IPI_self;
27239 + *(void **)&apic->send_IPI_allbutself = xen_send_IPI_allbutself;
27240 + *(void **)&apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
27241 + *(void **)&apic->send_IPI_mask = xen_send_IPI_mask;
27242 + *(void **)&apic->send_IPI_all = xen_send_IPI_all;
27243 + *(void **)&apic->send_IPI_self = xen_send_IPI_self;
27244 #endif
27245 }
27246
27247 @@ -1175,30 +1173,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27248 #endif
27249 };
27250
27251 -static void xen_reboot(int reason)
27252 +static __noreturn void xen_reboot(int reason)
27253 {
27254 struct sched_shutdown r = { .reason = reason };
27255
27256 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27257 - BUG();
27258 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27259 + BUG();
27260 }
27261
27262 -static void xen_restart(char *msg)
27263 +static __noreturn void xen_restart(char *msg)
27264 {
27265 xen_reboot(SHUTDOWN_reboot);
27266 }
27267
27268 -static void xen_emergency_restart(void)
27269 +static __noreturn void xen_emergency_restart(void)
27270 {
27271 xen_reboot(SHUTDOWN_reboot);
27272 }
27273
27274 -static void xen_machine_halt(void)
27275 +static __noreturn void xen_machine_halt(void)
27276 {
27277 xen_reboot(SHUTDOWN_poweroff);
27278 }
27279
27280 -static void xen_machine_power_off(void)
27281 +static __noreturn void xen_machine_power_off(void)
27282 {
27283 if (pm_power_off)
27284 pm_power_off();
27285 @@ -1301,7 +1299,17 @@ asmlinkage void __init xen_start_kernel(void)
27286 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27287
27288 /* Work out if we support NX */
27289 - x86_configure_nx();
27290 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27291 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27292 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27293 + unsigned l, h;
27294 +
27295 + __supported_pte_mask |= _PAGE_NX;
27296 + rdmsr(MSR_EFER, l, h);
27297 + l |= EFER_NX;
27298 + wrmsr(MSR_EFER, l, h);
27299 + }
27300 +#endif
27301
27302 xen_setup_features();
27303
27304 @@ -1332,13 +1340,6 @@ asmlinkage void __init xen_start_kernel(void)
27305
27306 machine_ops = xen_machine_ops;
27307
27308 - /*
27309 - * The only reliable way to retain the initial address of the
27310 - * percpu gdt_page is to remember it here, so we can go and
27311 - * mark it RW later, when the initial percpu area is freed.
27312 - */
27313 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27314 -
27315 xen_smp_init();
27316
27317 #ifdef CONFIG_ACPI_NUMA
27318 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27319 index 3a73785..0d30df2 100644
27320 --- a/arch/x86/xen/mmu.c
27321 +++ b/arch/x86/xen/mmu.c
27322 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27323 convert_pfn_mfn(init_level4_pgt);
27324 convert_pfn_mfn(level3_ident_pgt);
27325 convert_pfn_mfn(level3_kernel_pgt);
27326 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27327 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27328 + convert_pfn_mfn(level3_vmemmap_pgt);
27329
27330 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27331 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27332 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27333 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27334 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27335 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27336 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27337 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27338 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27339 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27340 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27341 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27342 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27343
27344 @@ -1940,6 +1947,7 @@ static void __init xen_post_allocator_init(void)
27345 pv_mmu_ops.set_pud = xen_set_pud;
27346 #if PAGETABLE_LEVELS == 4
27347 pv_mmu_ops.set_pgd = xen_set_pgd;
27348 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27349 #endif
27350
27351 /* This will work as long as patching hasn't happened yet
27352 @@ -2021,6 +2029,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27353 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27354 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27355 .set_pgd = xen_set_pgd_hyper,
27356 + .set_pgd_batched = xen_set_pgd_hyper,
27357
27358 .alloc_pud = xen_alloc_pmd_init,
27359 .release_pud = xen_release_pmd_init,
27360 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27361 index afb250d..627075f 100644
27362 --- a/arch/x86/xen/smp.c
27363 +++ b/arch/x86/xen/smp.c
27364 @@ -231,11 +231,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27365 {
27366 BUG_ON(smp_processor_id() != 0);
27367 native_smp_prepare_boot_cpu();
27368 -
27369 - /* We've switched to the "real" per-cpu gdt, so make sure the
27370 - old memory can be recycled */
27371 - make_lowmem_page_readwrite(xen_initial_gdt);
27372 -
27373 xen_filter_cpu_maps();
27374 xen_setup_vcpu_info_placement();
27375 }
27376 @@ -302,12 +297,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27377 gdt = get_cpu_gdt_table(cpu);
27378
27379 ctxt->flags = VGCF_IN_KERNEL;
27380 - ctxt->user_regs.ds = __USER_DS;
27381 - ctxt->user_regs.es = __USER_DS;
27382 + ctxt->user_regs.ds = __KERNEL_DS;
27383 + ctxt->user_regs.es = __KERNEL_DS;
27384 ctxt->user_regs.ss = __KERNEL_DS;
27385 #ifdef CONFIG_X86_32
27386 ctxt->user_regs.fs = __KERNEL_PERCPU;
27387 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27388 + savesegment(gs, ctxt->user_regs.gs);
27389 #else
27390 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27391 #endif
27392 @@ -357,13 +352,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
27393 int rc;
27394
27395 per_cpu(current_task, cpu) = idle;
27396 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27397 #ifdef CONFIG_X86_32
27398 irq_ctx_init(cpu);
27399 #else
27400 clear_tsk_thread_flag(idle, TIF_FORK);
27401 - per_cpu(kernel_stack, cpu) =
27402 - (unsigned long)task_stack_page(idle) -
27403 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27404 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27405 #endif
27406 xen_setup_runstate_info(cpu);
27407 xen_setup_timer(cpu);
27408 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27409 index f9643fc..602e8af 100644
27410 --- a/arch/x86/xen/xen-asm_32.S
27411 +++ b/arch/x86/xen/xen-asm_32.S
27412 @@ -84,14 +84,14 @@ ENTRY(xen_iret)
27413 ESP_OFFSET=4 # bytes pushed onto stack
27414
27415 /*
27416 - * Store vcpu_info pointer for easy access. Do it this way to
27417 - * avoid having to reload %fs
27418 + * Store vcpu_info pointer for easy access.
27419 */
27420 #ifdef CONFIG_SMP
27421 - GET_THREAD_INFO(%eax)
27422 - movl TI_cpu(%eax), %eax
27423 - movl __per_cpu_offset(,%eax,4), %eax
27424 - mov xen_vcpu(%eax), %eax
27425 + push %fs
27426 + mov $(__KERNEL_PERCPU), %eax
27427 + mov %eax, %fs
27428 + mov PER_CPU_VAR(xen_vcpu), %eax
27429 + pop %fs
27430 #else
27431 movl xen_vcpu, %eax
27432 #endif
27433 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27434 index aaa7291..3f77960 100644
27435 --- a/arch/x86/xen/xen-head.S
27436 +++ b/arch/x86/xen/xen-head.S
27437 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27438 #ifdef CONFIG_X86_32
27439 mov %esi,xen_start_info
27440 mov $init_thread_union+THREAD_SIZE,%esp
27441 +#ifdef CONFIG_SMP
27442 + movl $cpu_gdt_table,%edi
27443 + movl $__per_cpu_load,%eax
27444 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27445 + rorl $16,%eax
27446 + movb %al,__KERNEL_PERCPU + 4(%edi)
27447 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27448 + movl $__per_cpu_end - 1,%eax
27449 + subl $__per_cpu_start,%eax
27450 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27451 +#endif
27452 #else
27453 mov %rsi,xen_start_info
27454 mov $init_thread_union+THREAD_SIZE,%rsp
27455 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27456 index 202d4c1..99b072a 100644
27457 --- a/arch/x86/xen/xen-ops.h
27458 +++ b/arch/x86/xen/xen-ops.h
27459 @@ -10,8 +10,6 @@
27460 extern const char xen_hypervisor_callback[];
27461 extern const char xen_failsafe_callback[];
27462
27463 -extern void *xen_initial_gdt;
27464 -
27465 struct trap_info;
27466 void xen_copy_trap_info(struct trap_info *traps);
27467
27468 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27469 index 525bd3d..ef888b1 100644
27470 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27471 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27472 @@ -119,9 +119,9 @@
27473 ----------------------------------------------------------------------*/
27474
27475 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27476 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27477 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27478 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27479 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27480
27481 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27482 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27483 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27484 index 2f33760..835e50a 100644
27485 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27486 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27487 @@ -11,6 +11,7 @@
27488 #ifndef _XTENSA_CORE_H
27489 #define _XTENSA_CORE_H
27490
27491 +#include <linux/const.h>
27492
27493 /****************************************************************************
27494 Parameters Useful for Any Code, USER or PRIVILEGED
27495 @@ -112,9 +113,9 @@
27496 ----------------------------------------------------------------------*/
27497
27498 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27499 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27500 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27501 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27502 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27503
27504 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27505 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27506 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27507 index af00795..2bb8105 100644
27508 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27509 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27510 @@ -11,6 +11,7 @@
27511 #ifndef _XTENSA_CORE_CONFIGURATION_H
27512 #define _XTENSA_CORE_CONFIGURATION_H
27513
27514 +#include <linux/const.h>
27515
27516 /****************************************************************************
27517 Parameters Useful for Any Code, USER or PRIVILEGED
27518 @@ -118,9 +119,9 @@
27519 ----------------------------------------------------------------------*/
27520
27521 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27522 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27523 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27524 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27525 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27526
27527 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27528 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27529 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27530 index 58916af..9cb880b 100644
27531 --- a/block/blk-iopoll.c
27532 +++ b/block/blk-iopoll.c
27533 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27534 }
27535 EXPORT_SYMBOL(blk_iopoll_complete);
27536
27537 -static void blk_iopoll_softirq(struct softirq_action *h)
27538 +static void blk_iopoll_softirq(void)
27539 {
27540 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27541 int rearm = 0, budget = blk_iopoll_budget;
27542 diff --git a/block/blk-map.c b/block/blk-map.c
27543 index 623e1cd..ca1e109 100644
27544 --- a/block/blk-map.c
27545 +++ b/block/blk-map.c
27546 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27547 if (!len || !kbuf)
27548 return -EINVAL;
27549
27550 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27551 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27552 if (do_copy)
27553 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27554 else
27555 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27556 index 467c8de..4bddc6d 100644
27557 --- a/block/blk-softirq.c
27558 +++ b/block/blk-softirq.c
27559 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27560 * Softirq action handler - move entries to local list and loop over them
27561 * while passing them to the queue registered handler.
27562 */
27563 -static void blk_done_softirq(struct softirq_action *h)
27564 +static void blk_done_softirq(void)
27565 {
27566 struct list_head *cpu_list, local_list;
27567
27568 diff --git a/block/bsg.c b/block/bsg.c
27569 index ff64ae3..593560c 100644
27570 --- a/block/bsg.c
27571 +++ b/block/bsg.c
27572 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27573 struct sg_io_v4 *hdr, struct bsg_device *bd,
27574 fmode_t has_write_perm)
27575 {
27576 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27577 + unsigned char *cmdptr;
27578 +
27579 if (hdr->request_len > BLK_MAX_CDB) {
27580 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27581 if (!rq->cmd)
27582 return -ENOMEM;
27583 - }
27584 + cmdptr = rq->cmd;
27585 + } else
27586 + cmdptr = tmpcmd;
27587
27588 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27589 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27590 hdr->request_len))
27591 return -EFAULT;
27592
27593 + if (cmdptr != rq->cmd)
27594 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27595 +
27596 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27597 if (blk_verify_command(rq->cmd, has_write_perm))
27598 return -EPERM;
27599 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27600 index 7c668c8..db3521c 100644
27601 --- a/block/compat_ioctl.c
27602 +++ b/block/compat_ioctl.c
27603 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27604 err |= __get_user(f->spec1, &uf->spec1);
27605 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27606 err |= __get_user(name, &uf->name);
27607 - f->name = compat_ptr(name);
27608 + f->name = (void __force_kernel *)compat_ptr(name);
27609 if (err) {
27610 err = -EFAULT;
27611 goto out;
27612 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27613 index 6296b40..417c00f 100644
27614 --- a/block/partitions/efi.c
27615 +++ b/block/partitions/efi.c
27616 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27617 if (!gpt)
27618 return NULL;
27619
27620 + if (!le32_to_cpu(gpt->num_partition_entries))
27621 + return NULL;
27622 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27623 + if (!pte)
27624 + return NULL;
27625 +
27626 count = le32_to_cpu(gpt->num_partition_entries) *
27627 le32_to_cpu(gpt->sizeof_partition_entry);
27628 - if (!count)
27629 - return NULL;
27630 - pte = kzalloc(count, GFP_KERNEL);
27631 - if (!pte)
27632 - return NULL;
27633 -
27634 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27635 (u8 *) pte,
27636 count) < count) {
27637 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27638 index 9a87daa..fb17486 100644
27639 --- a/block/scsi_ioctl.c
27640 +++ b/block/scsi_ioctl.c
27641 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27642 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27643 struct sg_io_hdr *hdr, fmode_t mode)
27644 {
27645 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27646 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27647 + unsigned char *cmdptr;
27648 +
27649 + if (rq->cmd != rq->__cmd)
27650 + cmdptr = rq->cmd;
27651 + else
27652 + cmdptr = tmpcmd;
27653 +
27654 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27655 return -EFAULT;
27656 +
27657 + if (cmdptr != rq->cmd)
27658 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27659 +
27660 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27661 return -EPERM;
27662
27663 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27664 int err;
27665 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27666 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27667 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27668 + unsigned char *cmdptr;
27669
27670 if (!sic)
27671 return -EINVAL;
27672 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27673 */
27674 err = -EFAULT;
27675 rq->cmd_len = cmdlen;
27676 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27677 +
27678 + if (rq->cmd != rq->__cmd)
27679 + cmdptr = rq->cmd;
27680 + else
27681 + cmdptr = tmpcmd;
27682 +
27683 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27684 goto error;
27685
27686 + if (rq->cmd != cmdptr)
27687 + memcpy(rq->cmd, cmdptr, cmdlen);
27688 +
27689 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27690 goto error;
27691
27692 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27693 index 671d4d6..5f24030 100644
27694 --- a/crypto/cryptd.c
27695 +++ b/crypto/cryptd.c
27696 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27697
27698 struct cryptd_blkcipher_request_ctx {
27699 crypto_completion_t complete;
27700 -};
27701 +} __no_const;
27702
27703 struct cryptd_hash_ctx {
27704 struct crypto_shash *child;
27705 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27706
27707 struct cryptd_aead_request_ctx {
27708 crypto_completion_t complete;
27709 -};
27710 +} __no_const;
27711
27712 static void cryptd_queue_worker(struct work_struct *work);
27713
27714 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27715 index e6defd8..c26a225 100644
27716 --- a/drivers/acpi/apei/cper.c
27717 +++ b/drivers/acpi/apei/cper.c
27718 @@ -38,12 +38,12 @@
27719 */
27720 u64 cper_next_record_id(void)
27721 {
27722 - static atomic64_t seq;
27723 + static atomic64_unchecked_t seq;
27724
27725 - if (!atomic64_read(&seq))
27726 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27727 + if (!atomic64_read_unchecked(&seq))
27728 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27729
27730 - return atomic64_inc_return(&seq);
27731 + return atomic64_inc_return_unchecked(&seq);
27732 }
27733 EXPORT_SYMBOL_GPL(cper_next_record_id);
27734
27735 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27736 index 7586544..636a2f0 100644
27737 --- a/drivers/acpi/ec_sys.c
27738 +++ b/drivers/acpi/ec_sys.c
27739 @@ -12,6 +12,7 @@
27740 #include <linux/acpi.h>
27741 #include <linux/debugfs.h>
27742 #include <linux/module.h>
27743 +#include <linux/uaccess.h>
27744 #include "internal.h"
27745
27746 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27747 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27748 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27749 */
27750 unsigned int size = EC_SPACE_SIZE;
27751 - u8 *data = (u8 *) buf;
27752 + u8 data;
27753 loff_t init_off = *off;
27754 int err = 0;
27755
27756 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27757 size = count;
27758
27759 while (size) {
27760 - err = ec_read(*off, &data[*off - init_off]);
27761 + err = ec_read(*off, &data);
27762 if (err)
27763 return err;
27764 + if (put_user(data, &buf[*off - init_off]))
27765 + return -EFAULT;
27766 *off += 1;
27767 size--;
27768 }
27769 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27770
27771 unsigned int size = count;
27772 loff_t init_off = *off;
27773 - u8 *data = (u8 *) buf;
27774 int err = 0;
27775
27776 if (*off >= EC_SPACE_SIZE)
27777 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27778 }
27779
27780 while (size) {
27781 - u8 byte_write = data[*off - init_off];
27782 + u8 byte_write;
27783 + if (get_user(byte_write, &buf[*off - init_off]))
27784 + return -EFAULT;
27785 err = ec_write(*off, byte_write);
27786 if (err)
27787 return err;
27788 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27789 index 251c7b62..000462d 100644
27790 --- a/drivers/acpi/proc.c
27791 +++ b/drivers/acpi/proc.c
27792 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27793 size_t count, loff_t * ppos)
27794 {
27795 struct list_head *node, *next;
27796 - char strbuf[5];
27797 - char str[5] = "";
27798 - unsigned int len = count;
27799 + char strbuf[5] = {0};
27800
27801 - if (len > 4)
27802 - len = 4;
27803 - if (len < 0)
27804 + if (count > 4)
27805 + count = 4;
27806 + if (copy_from_user(strbuf, buffer, count))
27807 return -EFAULT;
27808 -
27809 - if (copy_from_user(strbuf, buffer, len))
27810 - return -EFAULT;
27811 - strbuf[len] = '\0';
27812 - sscanf(strbuf, "%s", str);
27813 + strbuf[count] = '\0';
27814
27815 mutex_lock(&acpi_device_lock);
27816 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27817 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27818 if (!dev->wakeup.flags.valid)
27819 continue;
27820
27821 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27822 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27823 if (device_can_wakeup(&dev->dev)) {
27824 bool enable = !device_may_wakeup(&dev->dev);
27825 device_set_wakeup_enable(&dev->dev, enable);
27826 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27827 index bbac51e..4c094f9 100644
27828 --- a/drivers/acpi/processor_driver.c
27829 +++ b/drivers/acpi/processor_driver.c
27830 @@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27831 return 0;
27832 #endif
27833
27834 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27835 + BUG_ON(pr->id >= nr_cpu_ids);
27836
27837 /*
27838 * Buggy BIOS check
27839 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27840 index cece3a4..0845256 100644
27841 --- a/drivers/ata/libata-core.c
27842 +++ b/drivers/ata/libata-core.c
27843 @@ -4743,7 +4743,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27844 struct ata_port *ap;
27845 unsigned int tag;
27846
27847 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27848 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27849 ap = qc->ap;
27850
27851 qc->flags = 0;
27852 @@ -4759,7 +4759,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27853 struct ata_port *ap;
27854 struct ata_link *link;
27855
27856 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27857 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27858 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27859 ap = qc->ap;
27860 link = qc->dev->link;
27861 @@ -5823,6 +5823,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27862 return;
27863
27864 spin_lock(&lock);
27865 + pax_open_kernel();
27866
27867 for (cur = ops->inherits; cur; cur = cur->inherits) {
27868 void **inherit = (void **)cur;
27869 @@ -5836,8 +5837,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27870 if (IS_ERR(*pp))
27871 *pp = NULL;
27872
27873 - ops->inherits = NULL;
27874 + *(struct ata_port_operations **)&ops->inherits = NULL;
27875
27876 + pax_close_kernel();
27877 spin_unlock(&lock);
27878 }
27879
27880 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27881 index ac6a5be..c7176b1 100644
27882 --- a/drivers/ata/pata_arasan_cf.c
27883 +++ b/drivers/ata/pata_arasan_cf.c
27884 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27885 /* Handle platform specific quirks */
27886 if (pdata->quirk) {
27887 if (pdata->quirk & CF_BROKEN_PIO) {
27888 - ap->ops->set_piomode = NULL;
27889 + pax_open_kernel();
27890 + *(void **)&ap->ops->set_piomode = NULL;
27891 + pax_close_kernel();
27892 ap->pio_mask = 0;
27893 }
27894 if (pdata->quirk & CF_BROKEN_MWDMA)
27895 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27896 index f9b983a..887b9d8 100644
27897 --- a/drivers/atm/adummy.c
27898 +++ b/drivers/atm/adummy.c
27899 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27900 vcc->pop(vcc, skb);
27901 else
27902 dev_kfree_skb_any(skb);
27903 - atomic_inc(&vcc->stats->tx);
27904 + atomic_inc_unchecked(&vcc->stats->tx);
27905
27906 return 0;
27907 }
27908 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27909 index 89b30f3..7964211 100644
27910 --- a/drivers/atm/ambassador.c
27911 +++ b/drivers/atm/ambassador.c
27912 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27913 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27914
27915 // VC layer stats
27916 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27917 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27918
27919 // free the descriptor
27920 kfree (tx_descr);
27921 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27922 dump_skb ("<<<", vc, skb);
27923
27924 // VC layer stats
27925 - atomic_inc(&atm_vcc->stats->rx);
27926 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27927 __net_timestamp(skb);
27928 // end of our responsibility
27929 atm_vcc->push (atm_vcc, skb);
27930 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27931 } else {
27932 PRINTK (KERN_INFO, "dropped over-size frame");
27933 // should we count this?
27934 - atomic_inc(&atm_vcc->stats->rx_drop);
27935 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27936 }
27937
27938 } else {
27939 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27940 }
27941
27942 if (check_area (skb->data, skb->len)) {
27943 - atomic_inc(&atm_vcc->stats->tx_err);
27944 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27945 return -ENOMEM; // ?
27946 }
27947
27948 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27949 index b22d71c..d6e1049 100644
27950 --- a/drivers/atm/atmtcp.c
27951 +++ b/drivers/atm/atmtcp.c
27952 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27953 if (vcc->pop) vcc->pop(vcc,skb);
27954 else dev_kfree_skb(skb);
27955 if (dev_data) return 0;
27956 - atomic_inc(&vcc->stats->tx_err);
27957 + atomic_inc_unchecked(&vcc->stats->tx_err);
27958 return -ENOLINK;
27959 }
27960 size = skb->len+sizeof(struct atmtcp_hdr);
27961 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27962 if (!new_skb) {
27963 if (vcc->pop) vcc->pop(vcc,skb);
27964 else dev_kfree_skb(skb);
27965 - atomic_inc(&vcc->stats->tx_err);
27966 + atomic_inc_unchecked(&vcc->stats->tx_err);
27967 return -ENOBUFS;
27968 }
27969 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27970 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27971 if (vcc->pop) vcc->pop(vcc,skb);
27972 else dev_kfree_skb(skb);
27973 out_vcc->push(out_vcc,new_skb);
27974 - atomic_inc(&vcc->stats->tx);
27975 - atomic_inc(&out_vcc->stats->rx);
27976 + atomic_inc_unchecked(&vcc->stats->tx);
27977 + atomic_inc_unchecked(&out_vcc->stats->rx);
27978 return 0;
27979 }
27980
27981 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27982 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27983 read_unlock(&vcc_sklist_lock);
27984 if (!out_vcc) {
27985 - atomic_inc(&vcc->stats->tx_err);
27986 + atomic_inc_unchecked(&vcc->stats->tx_err);
27987 goto done;
27988 }
27989 skb_pull(skb,sizeof(struct atmtcp_hdr));
27990 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27991 __net_timestamp(new_skb);
27992 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27993 out_vcc->push(out_vcc,new_skb);
27994 - atomic_inc(&vcc->stats->tx);
27995 - atomic_inc(&out_vcc->stats->rx);
27996 + atomic_inc_unchecked(&vcc->stats->tx);
27997 + atomic_inc_unchecked(&out_vcc->stats->rx);
27998 done:
27999 if (vcc->pop) vcc->pop(vcc,skb);
28000 else dev_kfree_skb(skb);
28001 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
28002 index 2059ee4..faf51c7 100644
28003 --- a/drivers/atm/eni.c
28004 +++ b/drivers/atm/eni.c
28005 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
28006 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
28007 vcc->dev->number);
28008 length = 0;
28009 - atomic_inc(&vcc->stats->rx_err);
28010 + atomic_inc_unchecked(&vcc->stats->rx_err);
28011 }
28012 else {
28013 length = ATM_CELL_SIZE-1; /* no HEC */
28014 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
28015 size);
28016 }
28017 eff = length = 0;
28018 - atomic_inc(&vcc->stats->rx_err);
28019 + atomic_inc_unchecked(&vcc->stats->rx_err);
28020 }
28021 else {
28022 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
28023 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
28024 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
28025 vcc->dev->number,vcc->vci,length,size << 2,descr);
28026 length = eff = 0;
28027 - atomic_inc(&vcc->stats->rx_err);
28028 + atomic_inc_unchecked(&vcc->stats->rx_err);
28029 }
28030 }
28031 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
28032 @@ -767,7 +767,7 @@ rx_dequeued++;
28033 vcc->push(vcc,skb);
28034 pushed++;
28035 }
28036 - atomic_inc(&vcc->stats->rx);
28037 + atomic_inc_unchecked(&vcc->stats->rx);
28038 }
28039 wake_up(&eni_dev->rx_wait);
28040 }
28041 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
28042 PCI_DMA_TODEVICE);
28043 if (vcc->pop) vcc->pop(vcc,skb);
28044 else dev_kfree_skb_irq(skb);
28045 - atomic_inc(&vcc->stats->tx);
28046 + atomic_inc_unchecked(&vcc->stats->tx);
28047 wake_up(&eni_dev->tx_wait);
28048 dma_complete++;
28049 }
28050 @@ -1567,7 +1567,7 @@ tx_complete++;
28051 /*--------------------------------- entries ---------------------------------*/
28052
28053
28054 -static const char *media_name[] __devinitdata = {
28055 +static const char *media_name[] __devinitconst = {
28056 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
28057 "UTP", "05?", "06?", "07?", /* 4- 7 */
28058 "TAXI","09?", "10?", "11?", /* 8-11 */
28059 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
28060 index 86fed1b..6dc4721 100644
28061 --- a/drivers/atm/firestream.c
28062 +++ b/drivers/atm/firestream.c
28063 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
28064 }
28065 }
28066
28067 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28068 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28069
28070 fs_dprintk (FS_DEBUG_TXMEM, "i");
28071 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
28072 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
28073 #endif
28074 skb_put (skb, qe->p1 & 0xffff);
28075 ATM_SKB(skb)->vcc = atm_vcc;
28076 - atomic_inc(&atm_vcc->stats->rx);
28077 + atomic_inc_unchecked(&atm_vcc->stats->rx);
28078 __net_timestamp(skb);
28079 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
28080 atm_vcc->push (atm_vcc, skb);
28081 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
28082 kfree (pe);
28083 }
28084 if (atm_vcc)
28085 - atomic_inc(&atm_vcc->stats->rx_drop);
28086 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28087 break;
28088 case 0x1f: /* Reassembly abort: no buffers. */
28089 /* Silently increment error counter. */
28090 if (atm_vcc)
28091 - atomic_inc(&atm_vcc->stats->rx_drop);
28092 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28093 break;
28094 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
28095 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
28096 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
28097 index 361f5ae..7fc552d 100644
28098 --- a/drivers/atm/fore200e.c
28099 +++ b/drivers/atm/fore200e.c
28100 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
28101 #endif
28102 /* check error condition */
28103 if (*entry->status & STATUS_ERROR)
28104 - atomic_inc(&vcc->stats->tx_err);
28105 + atomic_inc_unchecked(&vcc->stats->tx_err);
28106 else
28107 - atomic_inc(&vcc->stats->tx);
28108 + atomic_inc_unchecked(&vcc->stats->tx);
28109 }
28110 }
28111
28112 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28113 if (skb == NULL) {
28114 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
28115
28116 - atomic_inc(&vcc->stats->rx_drop);
28117 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28118 return -ENOMEM;
28119 }
28120
28121 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28122
28123 dev_kfree_skb_any(skb);
28124
28125 - atomic_inc(&vcc->stats->rx_drop);
28126 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28127 return -ENOMEM;
28128 }
28129
28130 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28131
28132 vcc->push(vcc, skb);
28133 - atomic_inc(&vcc->stats->rx);
28134 + atomic_inc_unchecked(&vcc->stats->rx);
28135
28136 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28137
28138 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
28139 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28140 fore200e->atm_dev->number,
28141 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28142 - atomic_inc(&vcc->stats->rx_err);
28143 + atomic_inc_unchecked(&vcc->stats->rx_err);
28144 }
28145 }
28146
28147 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
28148 goto retry_here;
28149 }
28150
28151 - atomic_inc(&vcc->stats->tx_err);
28152 + atomic_inc_unchecked(&vcc->stats->tx_err);
28153
28154 fore200e->tx_sat++;
28155 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
28156 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
28157 index b182c2f..1c6fa8a 100644
28158 --- a/drivers/atm/he.c
28159 +++ b/drivers/atm/he.c
28160 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28161
28162 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28163 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28164 - atomic_inc(&vcc->stats->rx_drop);
28165 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28166 goto return_host_buffers;
28167 }
28168
28169 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28170 RBRQ_LEN_ERR(he_dev->rbrq_head)
28171 ? "LEN_ERR" : "",
28172 vcc->vpi, vcc->vci);
28173 - atomic_inc(&vcc->stats->rx_err);
28174 + atomic_inc_unchecked(&vcc->stats->rx_err);
28175 goto return_host_buffers;
28176 }
28177
28178 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28179 vcc->push(vcc, skb);
28180 spin_lock(&he_dev->global_lock);
28181
28182 - atomic_inc(&vcc->stats->rx);
28183 + atomic_inc_unchecked(&vcc->stats->rx);
28184
28185 return_host_buffers:
28186 ++pdus_assembled;
28187 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28188 tpd->vcc->pop(tpd->vcc, tpd->skb);
28189 else
28190 dev_kfree_skb_any(tpd->skb);
28191 - atomic_inc(&tpd->vcc->stats->tx_err);
28192 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28193 }
28194 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28195 return;
28196 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28197 vcc->pop(vcc, skb);
28198 else
28199 dev_kfree_skb_any(skb);
28200 - atomic_inc(&vcc->stats->tx_err);
28201 + atomic_inc_unchecked(&vcc->stats->tx_err);
28202 return -EINVAL;
28203 }
28204
28205 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28206 vcc->pop(vcc, skb);
28207 else
28208 dev_kfree_skb_any(skb);
28209 - atomic_inc(&vcc->stats->tx_err);
28210 + atomic_inc_unchecked(&vcc->stats->tx_err);
28211 return -EINVAL;
28212 }
28213 #endif
28214 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28215 vcc->pop(vcc, skb);
28216 else
28217 dev_kfree_skb_any(skb);
28218 - atomic_inc(&vcc->stats->tx_err);
28219 + atomic_inc_unchecked(&vcc->stats->tx_err);
28220 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28221 return -ENOMEM;
28222 }
28223 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28224 vcc->pop(vcc, skb);
28225 else
28226 dev_kfree_skb_any(skb);
28227 - atomic_inc(&vcc->stats->tx_err);
28228 + atomic_inc_unchecked(&vcc->stats->tx_err);
28229 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28230 return -ENOMEM;
28231 }
28232 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28233 __enqueue_tpd(he_dev, tpd, cid);
28234 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28235
28236 - atomic_inc(&vcc->stats->tx);
28237 + atomic_inc_unchecked(&vcc->stats->tx);
28238
28239 return 0;
28240 }
28241 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28242 index 7d01c2a..4e3ac01 100644
28243 --- a/drivers/atm/horizon.c
28244 +++ b/drivers/atm/horizon.c
28245 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28246 {
28247 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28248 // VC layer stats
28249 - atomic_inc(&vcc->stats->rx);
28250 + atomic_inc_unchecked(&vcc->stats->rx);
28251 __net_timestamp(skb);
28252 // end of our responsibility
28253 vcc->push (vcc, skb);
28254 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28255 dev->tx_iovec = NULL;
28256
28257 // VC layer stats
28258 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28259 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28260
28261 // free the skb
28262 hrz_kfree_skb (skb);
28263 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28264 index 8974bd2..b856f85 100644
28265 --- a/drivers/atm/idt77252.c
28266 +++ b/drivers/atm/idt77252.c
28267 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28268 else
28269 dev_kfree_skb(skb);
28270
28271 - atomic_inc(&vcc->stats->tx);
28272 + atomic_inc_unchecked(&vcc->stats->tx);
28273 }
28274
28275 atomic_dec(&scq->used);
28276 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28277 if ((sb = dev_alloc_skb(64)) == NULL) {
28278 printk("%s: Can't allocate buffers for aal0.\n",
28279 card->name);
28280 - atomic_add(i, &vcc->stats->rx_drop);
28281 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28282 break;
28283 }
28284 if (!atm_charge(vcc, sb->truesize)) {
28285 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28286 card->name);
28287 - atomic_add(i - 1, &vcc->stats->rx_drop);
28288 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28289 dev_kfree_skb(sb);
28290 break;
28291 }
28292 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28293 ATM_SKB(sb)->vcc = vcc;
28294 __net_timestamp(sb);
28295 vcc->push(vcc, sb);
28296 - atomic_inc(&vcc->stats->rx);
28297 + atomic_inc_unchecked(&vcc->stats->rx);
28298
28299 cell += ATM_CELL_PAYLOAD;
28300 }
28301 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28302 "(CDC: %08x)\n",
28303 card->name, len, rpp->len, readl(SAR_REG_CDC));
28304 recycle_rx_pool_skb(card, rpp);
28305 - atomic_inc(&vcc->stats->rx_err);
28306 + atomic_inc_unchecked(&vcc->stats->rx_err);
28307 return;
28308 }
28309 if (stat & SAR_RSQE_CRC) {
28310 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28311 recycle_rx_pool_skb(card, rpp);
28312 - atomic_inc(&vcc->stats->rx_err);
28313 + atomic_inc_unchecked(&vcc->stats->rx_err);
28314 return;
28315 }
28316 if (skb_queue_len(&rpp->queue) > 1) {
28317 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28318 RXPRINTK("%s: Can't alloc RX skb.\n",
28319 card->name);
28320 recycle_rx_pool_skb(card, rpp);
28321 - atomic_inc(&vcc->stats->rx_err);
28322 + atomic_inc_unchecked(&vcc->stats->rx_err);
28323 return;
28324 }
28325 if (!atm_charge(vcc, skb->truesize)) {
28326 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28327 __net_timestamp(skb);
28328
28329 vcc->push(vcc, skb);
28330 - atomic_inc(&vcc->stats->rx);
28331 + atomic_inc_unchecked(&vcc->stats->rx);
28332
28333 return;
28334 }
28335 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28336 __net_timestamp(skb);
28337
28338 vcc->push(vcc, skb);
28339 - atomic_inc(&vcc->stats->rx);
28340 + atomic_inc_unchecked(&vcc->stats->rx);
28341
28342 if (skb->truesize > SAR_FB_SIZE_3)
28343 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28344 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28345 if (vcc->qos.aal != ATM_AAL0) {
28346 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28347 card->name, vpi, vci);
28348 - atomic_inc(&vcc->stats->rx_drop);
28349 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28350 goto drop;
28351 }
28352
28353 if ((sb = dev_alloc_skb(64)) == NULL) {
28354 printk("%s: Can't allocate buffers for AAL0.\n",
28355 card->name);
28356 - atomic_inc(&vcc->stats->rx_err);
28357 + atomic_inc_unchecked(&vcc->stats->rx_err);
28358 goto drop;
28359 }
28360
28361 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28362 ATM_SKB(sb)->vcc = vcc;
28363 __net_timestamp(sb);
28364 vcc->push(vcc, sb);
28365 - atomic_inc(&vcc->stats->rx);
28366 + atomic_inc_unchecked(&vcc->stats->rx);
28367
28368 drop:
28369 skb_pull(queue, 64);
28370 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28371
28372 if (vc == NULL) {
28373 printk("%s: NULL connection in send().\n", card->name);
28374 - atomic_inc(&vcc->stats->tx_err);
28375 + atomic_inc_unchecked(&vcc->stats->tx_err);
28376 dev_kfree_skb(skb);
28377 return -EINVAL;
28378 }
28379 if (!test_bit(VCF_TX, &vc->flags)) {
28380 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28381 - atomic_inc(&vcc->stats->tx_err);
28382 + atomic_inc_unchecked(&vcc->stats->tx_err);
28383 dev_kfree_skb(skb);
28384 return -EINVAL;
28385 }
28386 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28387 break;
28388 default:
28389 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28390 - atomic_inc(&vcc->stats->tx_err);
28391 + atomic_inc_unchecked(&vcc->stats->tx_err);
28392 dev_kfree_skb(skb);
28393 return -EINVAL;
28394 }
28395
28396 if (skb_shinfo(skb)->nr_frags != 0) {
28397 printk("%s: No scatter-gather yet.\n", card->name);
28398 - atomic_inc(&vcc->stats->tx_err);
28399 + atomic_inc_unchecked(&vcc->stats->tx_err);
28400 dev_kfree_skb(skb);
28401 return -EINVAL;
28402 }
28403 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28404
28405 err = queue_skb(card, vc, skb, oam);
28406 if (err) {
28407 - atomic_inc(&vcc->stats->tx_err);
28408 + atomic_inc_unchecked(&vcc->stats->tx_err);
28409 dev_kfree_skb(skb);
28410 return err;
28411 }
28412 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28413 skb = dev_alloc_skb(64);
28414 if (!skb) {
28415 printk("%s: Out of memory in send_oam().\n", card->name);
28416 - atomic_inc(&vcc->stats->tx_err);
28417 + atomic_inc_unchecked(&vcc->stats->tx_err);
28418 return -ENOMEM;
28419 }
28420 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28421 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28422 index d438601..8b98495 100644
28423 --- a/drivers/atm/iphase.c
28424 +++ b/drivers/atm/iphase.c
28425 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28426 status = (u_short) (buf_desc_ptr->desc_mode);
28427 if (status & (RX_CER | RX_PTE | RX_OFL))
28428 {
28429 - atomic_inc(&vcc->stats->rx_err);
28430 + atomic_inc_unchecked(&vcc->stats->rx_err);
28431 IF_ERR(printk("IA: bad packet, dropping it");)
28432 if (status & RX_CER) {
28433 IF_ERR(printk(" cause: packet CRC error\n");)
28434 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28435 len = dma_addr - buf_addr;
28436 if (len > iadev->rx_buf_sz) {
28437 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28438 - atomic_inc(&vcc->stats->rx_err);
28439 + atomic_inc_unchecked(&vcc->stats->rx_err);
28440 goto out_free_desc;
28441 }
28442
28443 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28444 ia_vcc = INPH_IA_VCC(vcc);
28445 if (ia_vcc == NULL)
28446 {
28447 - atomic_inc(&vcc->stats->rx_err);
28448 + atomic_inc_unchecked(&vcc->stats->rx_err);
28449 atm_return(vcc, skb->truesize);
28450 dev_kfree_skb_any(skb);
28451 goto INCR_DLE;
28452 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28453 if ((length > iadev->rx_buf_sz) || (length >
28454 (skb->len - sizeof(struct cpcs_trailer))))
28455 {
28456 - atomic_inc(&vcc->stats->rx_err);
28457 + atomic_inc_unchecked(&vcc->stats->rx_err);
28458 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28459 length, skb->len);)
28460 atm_return(vcc, skb->truesize);
28461 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28462
28463 IF_RX(printk("rx_dle_intr: skb push");)
28464 vcc->push(vcc,skb);
28465 - atomic_inc(&vcc->stats->rx);
28466 + atomic_inc_unchecked(&vcc->stats->rx);
28467 iadev->rx_pkt_cnt++;
28468 }
28469 INCR_DLE:
28470 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28471 {
28472 struct k_sonet_stats *stats;
28473 stats = &PRIV(_ia_dev[board])->sonet_stats;
28474 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28475 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28476 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28477 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28478 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28479 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28480 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28481 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28482 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28483 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28484 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28485 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28486 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28487 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28488 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28489 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28490 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28491 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28492 }
28493 ia_cmds.status = 0;
28494 break;
28495 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28496 if ((desc == 0) || (desc > iadev->num_tx_desc))
28497 {
28498 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28499 - atomic_inc(&vcc->stats->tx);
28500 + atomic_inc_unchecked(&vcc->stats->tx);
28501 if (vcc->pop)
28502 vcc->pop(vcc, skb);
28503 else
28504 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28505 ATM_DESC(skb) = vcc->vci;
28506 skb_queue_tail(&iadev->tx_dma_q, skb);
28507
28508 - atomic_inc(&vcc->stats->tx);
28509 + atomic_inc_unchecked(&vcc->stats->tx);
28510 iadev->tx_pkt_cnt++;
28511 /* Increment transaction counter */
28512 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28513
28514 #if 0
28515 /* add flow control logic */
28516 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28517 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28518 if (iavcc->vc_desc_cnt > 10) {
28519 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28520 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28521 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28522 index 68c7588..7036683 100644
28523 --- a/drivers/atm/lanai.c
28524 +++ b/drivers/atm/lanai.c
28525 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28526 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28527 lanai_endtx(lanai, lvcc);
28528 lanai_free_skb(lvcc->tx.atmvcc, skb);
28529 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28530 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28531 }
28532
28533 /* Try to fill the buffer - don't call unless there is backlog */
28534 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28535 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28536 __net_timestamp(skb);
28537 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28538 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28539 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28540 out:
28541 lvcc->rx.buf.ptr = end;
28542 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28543 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28544 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28545 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28546 lanai->stats.service_rxnotaal5++;
28547 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28548 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28549 return 0;
28550 }
28551 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28552 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28553 int bytes;
28554 read_unlock(&vcc_sklist_lock);
28555 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28556 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28557 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28558 lvcc->stats.x.aal5.service_trash++;
28559 bytes = (SERVICE_GET_END(s) * 16) -
28560 (((unsigned long) lvcc->rx.buf.ptr) -
28561 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28562 }
28563 if (s & SERVICE_STREAM) {
28564 read_unlock(&vcc_sklist_lock);
28565 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28566 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28567 lvcc->stats.x.aal5.service_stream++;
28568 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28569 "PDU on VCI %d!\n", lanai->number, vci);
28570 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28571 return 0;
28572 }
28573 DPRINTK("got rx crc error on vci %d\n", vci);
28574 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28575 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28576 lvcc->stats.x.aal5.service_rxcrc++;
28577 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28578 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28579 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28580 index 1c70c45..300718d 100644
28581 --- a/drivers/atm/nicstar.c
28582 +++ b/drivers/atm/nicstar.c
28583 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28584 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28585 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28586 card->index);
28587 - atomic_inc(&vcc->stats->tx_err);
28588 + atomic_inc_unchecked(&vcc->stats->tx_err);
28589 dev_kfree_skb_any(skb);
28590 return -EINVAL;
28591 }
28592 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28593 if (!vc->tx) {
28594 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28595 card->index);
28596 - atomic_inc(&vcc->stats->tx_err);
28597 + atomic_inc_unchecked(&vcc->stats->tx_err);
28598 dev_kfree_skb_any(skb);
28599 return -EINVAL;
28600 }
28601 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28602 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28603 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28604 card->index);
28605 - atomic_inc(&vcc->stats->tx_err);
28606 + atomic_inc_unchecked(&vcc->stats->tx_err);
28607 dev_kfree_skb_any(skb);
28608 return -EINVAL;
28609 }
28610
28611 if (skb_shinfo(skb)->nr_frags != 0) {
28612 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28613 - atomic_inc(&vcc->stats->tx_err);
28614 + atomic_inc_unchecked(&vcc->stats->tx_err);
28615 dev_kfree_skb_any(skb);
28616 return -EINVAL;
28617 }
28618 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28619 }
28620
28621 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28622 - atomic_inc(&vcc->stats->tx_err);
28623 + atomic_inc_unchecked(&vcc->stats->tx_err);
28624 dev_kfree_skb_any(skb);
28625 return -EIO;
28626 }
28627 - atomic_inc(&vcc->stats->tx);
28628 + atomic_inc_unchecked(&vcc->stats->tx);
28629
28630 return 0;
28631 }
28632 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28633 printk
28634 ("nicstar%d: Can't allocate buffers for aal0.\n",
28635 card->index);
28636 - atomic_add(i, &vcc->stats->rx_drop);
28637 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28638 break;
28639 }
28640 if (!atm_charge(vcc, sb->truesize)) {
28641 RXPRINTK
28642 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28643 card->index);
28644 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28645 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28646 dev_kfree_skb_any(sb);
28647 break;
28648 }
28649 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28650 ATM_SKB(sb)->vcc = vcc;
28651 __net_timestamp(sb);
28652 vcc->push(vcc, sb);
28653 - atomic_inc(&vcc->stats->rx);
28654 + atomic_inc_unchecked(&vcc->stats->rx);
28655 cell += ATM_CELL_PAYLOAD;
28656 }
28657
28658 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28659 if (iovb == NULL) {
28660 printk("nicstar%d: Out of iovec buffers.\n",
28661 card->index);
28662 - atomic_inc(&vcc->stats->rx_drop);
28663 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28664 recycle_rx_buf(card, skb);
28665 return;
28666 }
28667 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28668 small or large buffer itself. */
28669 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28670 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28671 - atomic_inc(&vcc->stats->rx_err);
28672 + atomic_inc_unchecked(&vcc->stats->rx_err);
28673 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28674 NS_MAX_IOVECS);
28675 NS_PRV_IOVCNT(iovb) = 0;
28676 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28677 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28678 card->index);
28679 which_list(card, skb);
28680 - atomic_inc(&vcc->stats->rx_err);
28681 + atomic_inc_unchecked(&vcc->stats->rx_err);
28682 recycle_rx_buf(card, skb);
28683 vc->rx_iov = NULL;
28684 recycle_iov_buf(card, iovb);
28685 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28686 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28687 card->index);
28688 which_list(card, skb);
28689 - atomic_inc(&vcc->stats->rx_err);
28690 + atomic_inc_unchecked(&vcc->stats->rx_err);
28691 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28692 NS_PRV_IOVCNT(iovb));
28693 vc->rx_iov = NULL;
28694 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28695 printk(" - PDU size mismatch.\n");
28696 else
28697 printk(".\n");
28698 - atomic_inc(&vcc->stats->rx_err);
28699 + atomic_inc_unchecked(&vcc->stats->rx_err);
28700 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28701 NS_PRV_IOVCNT(iovb));
28702 vc->rx_iov = NULL;
28703 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28704 /* skb points to a small buffer */
28705 if (!atm_charge(vcc, skb->truesize)) {
28706 push_rxbufs(card, skb);
28707 - atomic_inc(&vcc->stats->rx_drop);
28708 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28709 } else {
28710 skb_put(skb, len);
28711 dequeue_sm_buf(card, skb);
28712 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28713 ATM_SKB(skb)->vcc = vcc;
28714 __net_timestamp(skb);
28715 vcc->push(vcc, skb);
28716 - atomic_inc(&vcc->stats->rx);
28717 + atomic_inc_unchecked(&vcc->stats->rx);
28718 }
28719 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28720 struct sk_buff *sb;
28721 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28722 if (len <= NS_SMBUFSIZE) {
28723 if (!atm_charge(vcc, sb->truesize)) {
28724 push_rxbufs(card, sb);
28725 - atomic_inc(&vcc->stats->rx_drop);
28726 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28727 } else {
28728 skb_put(sb, len);
28729 dequeue_sm_buf(card, sb);
28730 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28731 ATM_SKB(sb)->vcc = vcc;
28732 __net_timestamp(sb);
28733 vcc->push(vcc, sb);
28734 - atomic_inc(&vcc->stats->rx);
28735 + atomic_inc_unchecked(&vcc->stats->rx);
28736 }
28737
28738 push_rxbufs(card, skb);
28739 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28740
28741 if (!atm_charge(vcc, skb->truesize)) {
28742 push_rxbufs(card, skb);
28743 - atomic_inc(&vcc->stats->rx_drop);
28744 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28745 } else {
28746 dequeue_lg_buf(card, skb);
28747 #ifdef NS_USE_DESTRUCTORS
28748 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28749 ATM_SKB(skb)->vcc = vcc;
28750 __net_timestamp(skb);
28751 vcc->push(vcc, skb);
28752 - atomic_inc(&vcc->stats->rx);
28753 + atomic_inc_unchecked(&vcc->stats->rx);
28754 }
28755
28756 push_rxbufs(card, sb);
28757 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28758 printk
28759 ("nicstar%d: Out of huge buffers.\n",
28760 card->index);
28761 - atomic_inc(&vcc->stats->rx_drop);
28762 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28763 recycle_iovec_rx_bufs(card,
28764 (struct iovec *)
28765 iovb->data,
28766 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28767 card->hbpool.count++;
28768 } else
28769 dev_kfree_skb_any(hb);
28770 - atomic_inc(&vcc->stats->rx_drop);
28771 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28772 } else {
28773 /* Copy the small buffer to the huge buffer */
28774 sb = (struct sk_buff *)iov->iov_base;
28775 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28776 #endif /* NS_USE_DESTRUCTORS */
28777 __net_timestamp(hb);
28778 vcc->push(vcc, hb);
28779 - atomic_inc(&vcc->stats->rx);
28780 + atomic_inc_unchecked(&vcc->stats->rx);
28781 }
28782 }
28783
28784 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28785 index 9851093..adb2b1e 100644
28786 --- a/drivers/atm/solos-pci.c
28787 +++ b/drivers/atm/solos-pci.c
28788 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28789 }
28790 atm_charge(vcc, skb->truesize);
28791 vcc->push(vcc, skb);
28792 - atomic_inc(&vcc->stats->rx);
28793 + atomic_inc_unchecked(&vcc->stats->rx);
28794 break;
28795
28796 case PKT_STATUS:
28797 @@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28798 vcc = SKB_CB(oldskb)->vcc;
28799
28800 if (vcc) {
28801 - atomic_inc(&vcc->stats->tx);
28802 + atomic_inc_unchecked(&vcc->stats->tx);
28803 solos_pop(vcc, oldskb);
28804 } else
28805 dev_kfree_skb_irq(oldskb);
28806 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28807 index 0215934..ce9f5b1 100644
28808 --- a/drivers/atm/suni.c
28809 +++ b/drivers/atm/suni.c
28810 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28811
28812
28813 #define ADD_LIMITED(s,v) \
28814 - atomic_add((v),&stats->s); \
28815 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28816 + atomic_add_unchecked((v),&stats->s); \
28817 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28818
28819
28820 static void suni_hz(unsigned long from_timer)
28821 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28822 index 5120a96..e2572bd 100644
28823 --- a/drivers/atm/uPD98402.c
28824 +++ b/drivers/atm/uPD98402.c
28825 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28826 struct sonet_stats tmp;
28827 int error = 0;
28828
28829 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28830 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28831 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28832 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28833 if (zero && !error) {
28834 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28835
28836
28837 #define ADD_LIMITED(s,v) \
28838 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28839 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28840 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28841 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28842 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28843 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28844
28845
28846 static void stat_event(struct atm_dev *dev)
28847 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28848 if (reason & uPD98402_INT_PFM) stat_event(dev);
28849 if (reason & uPD98402_INT_PCO) {
28850 (void) GET(PCOCR); /* clear interrupt cause */
28851 - atomic_add(GET(HECCT),
28852 + atomic_add_unchecked(GET(HECCT),
28853 &PRIV(dev)->sonet_stats.uncorr_hcs);
28854 }
28855 if ((reason & uPD98402_INT_RFO) &&
28856 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28857 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28858 uPD98402_INT_LOS),PIMR); /* enable them */
28859 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28860 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28861 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28862 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28863 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28864 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28865 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28866 return 0;
28867 }
28868
28869 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28870 index abe4e20..83c4727 100644
28871 --- a/drivers/atm/zatm.c
28872 +++ b/drivers/atm/zatm.c
28873 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28874 }
28875 if (!size) {
28876 dev_kfree_skb_irq(skb);
28877 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28878 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28879 continue;
28880 }
28881 if (!atm_charge(vcc,skb->truesize)) {
28882 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28883 skb->len = size;
28884 ATM_SKB(skb)->vcc = vcc;
28885 vcc->push(vcc,skb);
28886 - atomic_inc(&vcc->stats->rx);
28887 + atomic_inc_unchecked(&vcc->stats->rx);
28888 }
28889 zout(pos & 0xffff,MTA(mbx));
28890 #if 0 /* probably a stupid idea */
28891 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28892 skb_queue_head(&zatm_vcc->backlog,skb);
28893 break;
28894 }
28895 - atomic_inc(&vcc->stats->tx);
28896 + atomic_inc_unchecked(&vcc->stats->tx);
28897 wake_up(&zatm_vcc->tx_wait);
28898 }
28899
28900 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28901 index 765c3a2..771ace6 100644
28902 --- a/drivers/base/devtmpfs.c
28903 +++ b/drivers/base/devtmpfs.c
28904 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28905 if (!thread)
28906 return 0;
28907
28908 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28909 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28910 if (err)
28911 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28912 else
28913 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28914 index cbb463b..babe2cf 100644
28915 --- a/drivers/base/power/wakeup.c
28916 +++ b/drivers/base/power/wakeup.c
28917 @@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
28918 * They need to be modified together atomically, so it's better to use one
28919 * atomic variable to hold them both.
28920 */
28921 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28922 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28923
28924 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28925 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28926
28927 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28928 {
28929 - unsigned int comb = atomic_read(&combined_event_count);
28930 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28931
28932 *cnt = (comb >> IN_PROGRESS_BITS);
28933 *inpr = comb & MAX_IN_PROGRESS;
28934 @@ -385,7 +385,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28935 ws->start_prevent_time = ws->last_time;
28936
28937 /* Increment the counter of events in progress. */
28938 - cec = atomic_inc_return(&combined_event_count);
28939 + cec = atomic_inc_return_unchecked(&combined_event_count);
28940
28941 trace_wakeup_source_activate(ws->name, cec);
28942 }
28943 @@ -511,7 +511,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28944 * Increment the counter of registered wakeup events and decrement the
28945 * couter of wakeup events in progress simultaneously.
28946 */
28947 - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
28948 + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28949 trace_wakeup_source_deactivate(ws->name, cec);
28950
28951 split_counters(&cnt, &inpr);
28952 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28953 index b0f553b..77b928b 100644
28954 --- a/drivers/block/cciss.c
28955 +++ b/drivers/block/cciss.c
28956 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28957 int err;
28958 u32 cp;
28959
28960 + memset(&arg64, 0, sizeof(arg64));
28961 +
28962 err = 0;
28963 err |=
28964 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28965 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28966 while (!list_empty(&h->reqQ)) {
28967 c = list_entry(h->reqQ.next, CommandList_struct, list);
28968 /* can't do anything if fifo is full */
28969 - if ((h->access.fifo_full(h))) {
28970 + if ((h->access->fifo_full(h))) {
28971 dev_warn(&h->pdev->dev, "fifo full\n");
28972 break;
28973 }
28974 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28975 h->Qdepth--;
28976
28977 /* Tell the controller execute command */
28978 - h->access.submit_command(h, c);
28979 + h->access->submit_command(h, c);
28980
28981 /* Put job onto the completed Q */
28982 addQ(&h->cmpQ, c);
28983 @@ -3443,17 +3445,17 @@ startio:
28984
28985 static inline unsigned long get_next_completion(ctlr_info_t *h)
28986 {
28987 - return h->access.command_completed(h);
28988 + return h->access->command_completed(h);
28989 }
28990
28991 static inline int interrupt_pending(ctlr_info_t *h)
28992 {
28993 - return h->access.intr_pending(h);
28994 + return h->access->intr_pending(h);
28995 }
28996
28997 static inline long interrupt_not_for_us(ctlr_info_t *h)
28998 {
28999 - return ((h->access.intr_pending(h) == 0) ||
29000 + return ((h->access->intr_pending(h) == 0) ||
29001 (h->interrupts_enabled == 0));
29002 }
29003
29004 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
29005 u32 a;
29006
29007 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
29008 - return h->access.command_completed(h);
29009 + return h->access->command_completed(h);
29010
29011 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
29012 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
29013 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
29014 trans_support & CFGTBL_Trans_use_short_tags);
29015
29016 /* Change the access methods to the performant access methods */
29017 - h->access = SA5_performant_access;
29018 + h->access = &SA5_performant_access;
29019 h->transMethod = CFGTBL_Trans_Performant;
29020
29021 return;
29022 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
29023 if (prod_index < 0)
29024 return -ENODEV;
29025 h->product_name = products[prod_index].product_name;
29026 - h->access = *(products[prod_index].access);
29027 + h->access = products[prod_index].access;
29028
29029 if (cciss_board_disabled(h)) {
29030 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
29031 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
29032 }
29033
29034 /* make sure the board interrupts are off */
29035 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
29036 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
29037 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
29038 if (rc)
29039 goto clean2;
29040 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
29041 * fake ones to scoop up any residual completions.
29042 */
29043 spin_lock_irqsave(&h->lock, flags);
29044 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
29045 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
29046 spin_unlock_irqrestore(&h->lock, flags);
29047 free_irq(h->intr[h->intr_mode], h);
29048 rc = cciss_request_irq(h, cciss_msix_discard_completions,
29049 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
29050 dev_info(&h->pdev->dev, "Board READY.\n");
29051 dev_info(&h->pdev->dev,
29052 "Waiting for stale completions to drain.\n");
29053 - h->access.set_intr_mask(h, CCISS_INTR_ON);
29054 + h->access->set_intr_mask(h, CCISS_INTR_ON);
29055 msleep(10000);
29056 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
29057 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
29058
29059 rc = controller_reset_failed(h->cfgtable);
29060 if (rc)
29061 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
29062 cciss_scsi_setup(h);
29063
29064 /* Turn the interrupts on so we can service requests */
29065 - h->access.set_intr_mask(h, CCISS_INTR_ON);
29066 + h->access->set_intr_mask(h, CCISS_INTR_ON);
29067
29068 /* Get the firmware version */
29069 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
29070 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
29071 kfree(flush_buf);
29072 if (return_code != IO_OK)
29073 dev_warn(&h->pdev->dev, "Error flushing cache\n");
29074 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
29075 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
29076 free_irq(h->intr[h->intr_mode], h);
29077 }
29078
29079 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
29080 index 7fda30e..eb5dfe0 100644
29081 --- a/drivers/block/cciss.h
29082 +++ b/drivers/block/cciss.h
29083 @@ -101,7 +101,7 @@ struct ctlr_info
29084 /* information about each logical volume */
29085 drive_info_struct *drv[CISS_MAX_LUN];
29086
29087 - struct access_method access;
29088 + struct access_method *access;
29089
29090 /* queue and queue Info */
29091 struct list_head reqQ;
29092 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
29093 index 9125bbe..eede5c8 100644
29094 --- a/drivers/block/cpqarray.c
29095 +++ b/drivers/block/cpqarray.c
29096 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29097 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29098 goto Enomem4;
29099 }
29100 - hba[i]->access.set_intr_mask(hba[i], 0);
29101 + hba[i]->access->set_intr_mask(hba[i], 0);
29102 if (request_irq(hba[i]->intr, do_ida_intr,
29103 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29104 {
29105 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29106 add_timer(&hba[i]->timer);
29107
29108 /* Enable IRQ now that spinlock and rate limit timer are set up */
29109 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29110 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29111
29112 for(j=0; j<NWD; j++) {
29113 struct gendisk *disk = ida_gendisk[i][j];
29114 @@ -694,7 +694,7 @@ DBGINFO(
29115 for(i=0; i<NR_PRODUCTS; i++) {
29116 if (board_id == products[i].board_id) {
29117 c->product_name = products[i].product_name;
29118 - c->access = *(products[i].access);
29119 + c->access = products[i].access;
29120 break;
29121 }
29122 }
29123 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
29124 hba[ctlr]->intr = intr;
29125 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29126 hba[ctlr]->product_name = products[j].product_name;
29127 - hba[ctlr]->access = *(products[j].access);
29128 + hba[ctlr]->access = products[j].access;
29129 hba[ctlr]->ctlr = ctlr;
29130 hba[ctlr]->board_id = board_id;
29131 hba[ctlr]->pci_dev = NULL; /* not PCI */
29132 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
29133
29134 while((c = h->reqQ) != NULL) {
29135 /* Can't do anything if we're busy */
29136 - if (h->access.fifo_full(h) == 0)
29137 + if (h->access->fifo_full(h) == 0)
29138 return;
29139
29140 /* Get the first entry from the request Q */
29141 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
29142 h->Qdepth--;
29143
29144 /* Tell the controller to do our bidding */
29145 - h->access.submit_command(h, c);
29146 + h->access->submit_command(h, c);
29147
29148 /* Get onto the completion Q */
29149 addQ(&h->cmpQ, c);
29150 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29151 unsigned long flags;
29152 __u32 a,a1;
29153
29154 - istat = h->access.intr_pending(h);
29155 + istat = h->access->intr_pending(h);
29156 /* Is this interrupt for us? */
29157 if (istat == 0)
29158 return IRQ_NONE;
29159 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29160 */
29161 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29162 if (istat & FIFO_NOT_EMPTY) {
29163 - while((a = h->access.command_completed(h))) {
29164 + while((a = h->access->command_completed(h))) {
29165 a1 = a; a &= ~3;
29166 if ((c = h->cmpQ) == NULL)
29167 {
29168 @@ -1449,11 +1449,11 @@ static int sendcmd(
29169 /*
29170 * Disable interrupt
29171 */
29172 - info_p->access.set_intr_mask(info_p, 0);
29173 + info_p->access->set_intr_mask(info_p, 0);
29174 /* Make sure there is room in the command FIFO */
29175 /* Actually it should be completely empty at this time. */
29176 for (i = 200000; i > 0; i--) {
29177 - temp = info_p->access.fifo_full(info_p);
29178 + temp = info_p->access->fifo_full(info_p);
29179 if (temp != 0) {
29180 break;
29181 }
29182 @@ -1466,7 +1466,7 @@ DBG(
29183 /*
29184 * Send the cmd
29185 */
29186 - info_p->access.submit_command(info_p, c);
29187 + info_p->access->submit_command(info_p, c);
29188 complete = pollcomplete(ctlr);
29189
29190 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29191 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29192 * we check the new geometry. Then turn interrupts back on when
29193 * we're done.
29194 */
29195 - host->access.set_intr_mask(host, 0);
29196 + host->access->set_intr_mask(host, 0);
29197 getgeometry(ctlr);
29198 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29199 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29200
29201 for(i=0; i<NWD; i++) {
29202 struct gendisk *disk = ida_gendisk[ctlr][i];
29203 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29204 /* Wait (up to 2 seconds) for a command to complete */
29205
29206 for (i = 200000; i > 0; i--) {
29207 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
29208 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
29209 if (done == 0) {
29210 udelay(10); /* a short fixed delay */
29211 } else
29212 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29213 index be73e9d..7fbf140 100644
29214 --- a/drivers/block/cpqarray.h
29215 +++ b/drivers/block/cpqarray.h
29216 @@ -99,7 +99,7 @@ struct ctlr_info {
29217 drv_info_t drv[NWD];
29218 struct proc_dir_entry *proc;
29219
29220 - struct access_method access;
29221 + struct access_method *access;
29222
29223 cmdlist_t *reqQ;
29224 cmdlist_t *cmpQ;
29225 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29226 index 02f013a..afeba24 100644
29227 --- a/drivers/block/drbd/drbd_int.h
29228 +++ b/drivers/block/drbd/drbd_int.h
29229 @@ -735,7 +735,7 @@ struct drbd_request;
29230 struct drbd_epoch {
29231 struct list_head list;
29232 unsigned int barrier_nr;
29233 - atomic_t epoch_size; /* increased on every request added. */
29234 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29235 atomic_t active; /* increased on every req. added, and dec on every finished. */
29236 unsigned long flags;
29237 };
29238 @@ -1110,7 +1110,7 @@ struct drbd_conf {
29239 void *int_dig_in;
29240 void *int_dig_vv;
29241 wait_queue_head_t seq_wait;
29242 - atomic_t packet_seq;
29243 + atomic_unchecked_t packet_seq;
29244 unsigned int peer_seq;
29245 spinlock_t peer_seq_lock;
29246 unsigned int minor;
29247 @@ -1651,30 +1651,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29248
29249 static inline void drbd_tcp_cork(struct socket *sock)
29250 {
29251 - int __user val = 1;
29252 + int val = 1;
29253 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29254 - (char __user *)&val, sizeof(val));
29255 + (char __force_user *)&val, sizeof(val));
29256 }
29257
29258 static inline void drbd_tcp_uncork(struct socket *sock)
29259 {
29260 - int __user val = 0;
29261 + int val = 0;
29262 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29263 - (char __user *)&val, sizeof(val));
29264 + (char __force_user *)&val, sizeof(val));
29265 }
29266
29267 static inline void drbd_tcp_nodelay(struct socket *sock)
29268 {
29269 - int __user val = 1;
29270 + int val = 1;
29271 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29272 - (char __user *)&val, sizeof(val));
29273 + (char __force_user *)&val, sizeof(val));
29274 }
29275
29276 static inline void drbd_tcp_quickack(struct socket *sock)
29277 {
29278 - int __user val = 2;
29279 + int val = 2;
29280 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29281 - (char __user *)&val, sizeof(val));
29282 + (char __force_user *)&val, sizeof(val));
29283 }
29284
29285 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29286 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29287 index 920ede2..cb827ba 100644
29288 --- a/drivers/block/drbd/drbd_main.c
29289 +++ b/drivers/block/drbd/drbd_main.c
29290 @@ -2555,7 +2555,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29291 p.sector = sector;
29292 p.block_id = block_id;
29293 p.blksize = blksize;
29294 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29295 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29296
29297 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29298 return false;
29299 @@ -2853,7 +2853,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29300
29301 p.sector = cpu_to_be64(req->sector);
29302 p.block_id = (unsigned long)req;
29303 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29304 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29305
29306 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29307
29308 @@ -3138,7 +3138,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29309 atomic_set(&mdev->unacked_cnt, 0);
29310 atomic_set(&mdev->local_cnt, 0);
29311 atomic_set(&mdev->net_cnt, 0);
29312 - atomic_set(&mdev->packet_seq, 0);
29313 + atomic_set_unchecked(&mdev->packet_seq, 0);
29314 atomic_set(&mdev->pp_in_use, 0);
29315 atomic_set(&mdev->pp_in_use_by_net, 0);
29316 atomic_set(&mdev->rs_sect_in, 0);
29317 @@ -3220,8 +3220,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29318 mdev->receiver.t_state);
29319
29320 /* no need to lock it, I'm the only thread alive */
29321 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29322 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29323 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29324 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29325 mdev->al_writ_cnt =
29326 mdev->bm_writ_cnt =
29327 mdev->read_cnt =
29328 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29329 index 6d4de6a..7b7ad4b 100644
29330 --- a/drivers/block/drbd/drbd_nl.c
29331 +++ b/drivers/block/drbd/drbd_nl.c
29332 @@ -2387,7 +2387,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29333 module_put(THIS_MODULE);
29334 }
29335
29336 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29337 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29338
29339 static unsigned short *
29340 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29341 @@ -2458,7 +2458,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29342 cn_reply->id.idx = CN_IDX_DRBD;
29343 cn_reply->id.val = CN_VAL_DRBD;
29344
29345 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29346 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29347 cn_reply->ack = 0; /* not used here. */
29348 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29349 (int)((char *)tl - (char *)reply->tag_list);
29350 @@ -2490,7 +2490,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29351 cn_reply->id.idx = CN_IDX_DRBD;
29352 cn_reply->id.val = CN_VAL_DRBD;
29353
29354 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29355 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29356 cn_reply->ack = 0; /* not used here. */
29357 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29358 (int)((char *)tl - (char *)reply->tag_list);
29359 @@ -2568,7 +2568,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29360 cn_reply->id.idx = CN_IDX_DRBD;
29361 cn_reply->id.val = CN_VAL_DRBD;
29362
29363 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29364 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29365 cn_reply->ack = 0; // not used here.
29366 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29367 (int)((char*)tl - (char*)reply->tag_list);
29368 @@ -2607,7 +2607,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29369 cn_reply->id.idx = CN_IDX_DRBD;
29370 cn_reply->id.val = CN_VAL_DRBD;
29371
29372 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29373 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29374 cn_reply->ack = 0; /* not used here. */
29375 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29376 (int)((char *)tl - (char *)reply->tag_list);
29377 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29378 index ea4836e..272d72a 100644
29379 --- a/drivers/block/drbd/drbd_receiver.c
29380 +++ b/drivers/block/drbd/drbd_receiver.c
29381 @@ -893,7 +893,7 @@ retry:
29382 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29383 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29384
29385 - atomic_set(&mdev->packet_seq, 0);
29386 + atomic_set_unchecked(&mdev->packet_seq, 0);
29387 mdev->peer_seq = 0;
29388
29389 if (drbd_send_protocol(mdev) == -1)
29390 @@ -994,7 +994,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29391 do {
29392 next_epoch = NULL;
29393
29394 - epoch_size = atomic_read(&epoch->epoch_size);
29395 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29396
29397 switch (ev & ~EV_CLEANUP) {
29398 case EV_PUT:
29399 @@ -1030,7 +1030,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29400 rv = FE_DESTROYED;
29401 } else {
29402 epoch->flags = 0;
29403 - atomic_set(&epoch->epoch_size, 0);
29404 + atomic_set_unchecked(&epoch->epoch_size, 0);
29405 /* atomic_set(&epoch->active, 0); is already zero */
29406 if (rv == FE_STILL_LIVE)
29407 rv = FE_RECYCLED;
29408 @@ -1205,14 +1205,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29409 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29410 drbd_flush(mdev);
29411
29412 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29413 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29414 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29415 if (epoch)
29416 break;
29417 }
29418
29419 epoch = mdev->current_epoch;
29420 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29421 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29422
29423 D_ASSERT(atomic_read(&epoch->active) == 0);
29424 D_ASSERT(epoch->flags == 0);
29425 @@ -1224,11 +1224,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29426 }
29427
29428 epoch->flags = 0;
29429 - atomic_set(&epoch->epoch_size, 0);
29430 + atomic_set_unchecked(&epoch->epoch_size, 0);
29431 atomic_set(&epoch->active, 0);
29432
29433 spin_lock(&mdev->epoch_lock);
29434 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29435 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29436 list_add(&epoch->list, &mdev->current_epoch->list);
29437 mdev->current_epoch = epoch;
29438 mdev->epochs++;
29439 @@ -1695,7 +1695,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29440 spin_unlock(&mdev->peer_seq_lock);
29441
29442 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29443 - atomic_inc(&mdev->current_epoch->epoch_size);
29444 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29445 return drbd_drain_block(mdev, data_size);
29446 }
29447
29448 @@ -1721,7 +1721,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29449
29450 spin_lock(&mdev->epoch_lock);
29451 e->epoch = mdev->current_epoch;
29452 - atomic_inc(&e->epoch->epoch_size);
29453 + atomic_inc_unchecked(&e->epoch->epoch_size);
29454 atomic_inc(&e->epoch->active);
29455 spin_unlock(&mdev->epoch_lock);
29456
29457 @@ -3936,7 +3936,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29458 D_ASSERT(list_empty(&mdev->done_ee));
29459
29460 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29461 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29462 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29463 D_ASSERT(list_empty(&mdev->current_epoch->list));
29464 }
29465
29466 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29467 index 3bba655..6151b66 100644
29468 --- a/drivers/block/loop.c
29469 +++ b/drivers/block/loop.c
29470 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29471 mm_segment_t old_fs = get_fs();
29472
29473 set_fs(get_ds());
29474 - bw = file->f_op->write(file, buf, len, &pos);
29475 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29476 set_fs(old_fs);
29477 if (likely(bw == len))
29478 return 0;
29479 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29480 index ea6f632..eafb192 100644
29481 --- a/drivers/char/Kconfig
29482 +++ b/drivers/char/Kconfig
29483 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29484
29485 config DEVKMEM
29486 bool "/dev/kmem virtual device support"
29487 - default y
29488 + default n
29489 + depends on !GRKERNSEC_KMEM
29490 help
29491 Say Y here if you want to support the /dev/kmem device. The
29492 /dev/kmem device is rarely used, but can be used for certain
29493 @@ -581,6 +582,7 @@ config DEVPORT
29494 bool
29495 depends on !M68K
29496 depends on ISA || PCI
29497 + depends on !GRKERNSEC_KMEM
29498 default y
29499
29500 source "drivers/s390/char/Kconfig"
29501 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29502 index 2e04433..22afc64 100644
29503 --- a/drivers/char/agp/frontend.c
29504 +++ b/drivers/char/agp/frontend.c
29505 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29506 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29507 return -EFAULT;
29508
29509 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29510 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29511 return -EFAULT;
29512
29513 client = agp_find_client_by_pid(reserve.pid);
29514 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29515 index 21cb980..f15107c 100644
29516 --- a/drivers/char/genrtc.c
29517 +++ b/drivers/char/genrtc.c
29518 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29519 switch (cmd) {
29520
29521 case RTC_PLL_GET:
29522 + memset(&pll, 0, sizeof(pll));
29523 if (get_rtc_pll(&pll))
29524 return -EINVAL;
29525 else
29526 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29527 index dfd7876..c0b0885 100644
29528 --- a/drivers/char/hpet.c
29529 +++ b/drivers/char/hpet.c
29530 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29531 }
29532
29533 static int
29534 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29535 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29536 struct hpet_info *info)
29537 {
29538 struct hpet_timer __iomem *timer;
29539 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29540 index 2c29942..604c5ba 100644
29541 --- a/drivers/char/ipmi/ipmi_msghandler.c
29542 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29543 @@ -420,7 +420,7 @@ struct ipmi_smi {
29544 struct proc_dir_entry *proc_dir;
29545 char proc_dir_name[10];
29546
29547 - atomic_t stats[IPMI_NUM_STATS];
29548 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29549
29550 /*
29551 * run_to_completion duplicate of smb_info, smi_info
29552 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29553
29554
29555 #define ipmi_inc_stat(intf, stat) \
29556 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29557 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29558 #define ipmi_get_stat(intf, stat) \
29559 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29560 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29561
29562 static int is_lan_addr(struct ipmi_addr *addr)
29563 {
29564 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29565 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29566 init_waitqueue_head(&intf->waitq);
29567 for (i = 0; i < IPMI_NUM_STATS; i++)
29568 - atomic_set(&intf->stats[i], 0);
29569 + atomic_set_unchecked(&intf->stats[i], 0);
29570
29571 intf->proc_dir = NULL;
29572
29573 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29574 index 1e638ff..a869ef5 100644
29575 --- a/drivers/char/ipmi/ipmi_si_intf.c
29576 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29577 @@ -275,7 +275,7 @@ struct smi_info {
29578 unsigned char slave_addr;
29579
29580 /* Counters and things for the proc filesystem. */
29581 - atomic_t stats[SI_NUM_STATS];
29582 + atomic_unchecked_t stats[SI_NUM_STATS];
29583
29584 struct task_struct *thread;
29585
29586 @@ -284,9 +284,9 @@ struct smi_info {
29587 };
29588
29589 #define smi_inc_stat(smi, stat) \
29590 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29591 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29592 #define smi_get_stat(smi, stat) \
29593 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29594 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29595
29596 #define SI_MAX_PARMS 4
29597
29598 @@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29599 atomic_set(&new_smi->req_events, 0);
29600 new_smi->run_to_completion = 0;
29601 for (i = 0; i < SI_NUM_STATS; i++)
29602 - atomic_set(&new_smi->stats[i], 0);
29603 + atomic_set_unchecked(&new_smi->stats[i], 0);
29604
29605 new_smi->interrupt_disabled = 1;
29606 atomic_set(&new_smi->stop_operation, 0);
29607 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29608 index 47ff7e4..0c7d340 100644
29609 --- a/drivers/char/mbcs.c
29610 +++ b/drivers/char/mbcs.c
29611 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29612 return 0;
29613 }
29614
29615 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29616 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29617 {
29618 .part_num = MBCS_PART_NUM,
29619 .mfg_num = MBCS_MFG_NUM,
29620 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29621 index 67c3371..ba8429d 100644
29622 --- a/drivers/char/mem.c
29623 +++ b/drivers/char/mem.c
29624 @@ -18,6 +18,7 @@
29625 #include <linux/raw.h>
29626 #include <linux/tty.h>
29627 #include <linux/capability.h>
29628 +#include <linux/security.h>
29629 #include <linux/ptrace.h>
29630 #include <linux/device.h>
29631 #include <linux/highmem.h>
29632 @@ -35,6 +36,10 @@
29633 # include <linux/efi.h>
29634 #endif
29635
29636 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29637 +extern const struct file_operations grsec_fops;
29638 +#endif
29639 +
29640 static inline unsigned long size_inside_page(unsigned long start,
29641 unsigned long size)
29642 {
29643 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29644
29645 while (cursor < to) {
29646 if (!devmem_is_allowed(pfn)) {
29647 +#ifdef CONFIG_GRKERNSEC_KMEM
29648 + gr_handle_mem_readwrite(from, to);
29649 +#else
29650 printk(KERN_INFO
29651 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29652 current->comm, from, to);
29653 +#endif
29654 return 0;
29655 }
29656 cursor += PAGE_SIZE;
29657 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29658 }
29659 return 1;
29660 }
29661 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29662 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29663 +{
29664 + return 0;
29665 +}
29666 #else
29667 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29668 {
29669 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29670
29671 while (count > 0) {
29672 unsigned long remaining;
29673 + char *temp;
29674
29675 sz = size_inside_page(p, count);
29676
29677 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29678 if (!ptr)
29679 return -EFAULT;
29680
29681 - remaining = copy_to_user(buf, ptr, sz);
29682 +#ifdef CONFIG_PAX_USERCOPY
29683 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29684 + if (!temp) {
29685 + unxlate_dev_mem_ptr(p, ptr);
29686 + return -ENOMEM;
29687 + }
29688 + memcpy(temp, ptr, sz);
29689 +#else
29690 + temp = ptr;
29691 +#endif
29692 +
29693 + remaining = copy_to_user(buf, temp, sz);
29694 +
29695 +#ifdef CONFIG_PAX_USERCOPY
29696 + kfree(temp);
29697 +#endif
29698 +
29699 unxlate_dev_mem_ptr(p, ptr);
29700 if (remaining)
29701 return -EFAULT;
29702 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29703 size_t count, loff_t *ppos)
29704 {
29705 unsigned long p = *ppos;
29706 - ssize_t low_count, read, sz;
29707 + ssize_t low_count, read, sz, err = 0;
29708 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29709 - int err = 0;
29710
29711 read = 0;
29712 if (p < (unsigned long) high_memory) {
29713 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29714 }
29715 #endif
29716 while (low_count > 0) {
29717 + char *temp;
29718 +
29719 sz = size_inside_page(p, low_count);
29720
29721 /*
29722 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29723 */
29724 kbuf = xlate_dev_kmem_ptr((char *)p);
29725
29726 - if (copy_to_user(buf, kbuf, sz))
29727 +#ifdef CONFIG_PAX_USERCOPY
29728 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29729 + if (!temp)
29730 + return -ENOMEM;
29731 + memcpy(temp, kbuf, sz);
29732 +#else
29733 + temp = kbuf;
29734 +#endif
29735 +
29736 + err = copy_to_user(buf, temp, sz);
29737 +
29738 +#ifdef CONFIG_PAX_USERCOPY
29739 + kfree(temp);
29740 +#endif
29741 +
29742 + if (err)
29743 return -EFAULT;
29744 buf += sz;
29745 p += sz;
29746 @@ -831,6 +878,9 @@ static const struct memdev {
29747 #ifdef CONFIG_CRASH_DUMP
29748 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29749 #endif
29750 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29751 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29752 +#endif
29753 };
29754
29755 static int memory_open(struct inode *inode, struct file *filp)
29756 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29757 index 9df78e2..01ba9ae 100644
29758 --- a/drivers/char/nvram.c
29759 +++ b/drivers/char/nvram.c
29760 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29761
29762 spin_unlock_irq(&rtc_lock);
29763
29764 - if (copy_to_user(buf, contents, tmp - contents))
29765 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29766 return -EFAULT;
29767
29768 *ppos = i;
29769 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
29770 index 0a484b4..f48ccd1 100644
29771 --- a/drivers/char/pcmcia/synclink_cs.c
29772 +++ b/drivers/char/pcmcia/synclink_cs.c
29773 @@ -2340,9 +2340,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
29774
29775 if (debug_level >= DEBUG_LEVEL_INFO)
29776 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
29777 - __FILE__,__LINE__, info->device_name, port->count);
29778 + __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
29779
29780 - WARN_ON(!port->count);
29781 + WARN_ON(!atomic_read(&port->count));
29782
29783 if (tty_port_close_start(port, tty, filp) == 0)
29784 goto cleanup;
29785 @@ -2360,7 +2360,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
29786 cleanup:
29787 if (debug_level >= DEBUG_LEVEL_INFO)
29788 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
29789 - tty->driver->name, port->count);
29790 + tty->driver->name, atomic_read(&port->count));
29791 }
29792
29793 /* Wait until the transmitter is empty.
29794 @@ -2502,7 +2502,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
29795
29796 if (debug_level >= DEBUG_LEVEL_INFO)
29797 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
29798 - __FILE__,__LINE__,tty->driver->name, port->count);
29799 + __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
29800
29801 /* If port is closing, signal caller to try again */
29802 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
29803 @@ -2522,11 +2522,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
29804 goto cleanup;
29805 }
29806 spin_lock(&port->lock);
29807 - port->count++;
29808 + atomic_inc(&port->count);
29809 spin_unlock(&port->lock);
29810 spin_unlock_irqrestore(&info->netlock, flags);
29811
29812 - if (port->count == 1) {
29813 + if (atomic_read(&port->count) == 1) {
29814 /* 1st open on this device, init hardware */
29815 retval = startup(info, tty);
29816 if (retval < 0)
29817 @@ -3891,7 +3891,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
29818 unsigned short new_crctype;
29819
29820 /* return error if TTY interface open */
29821 - if (info->port.count)
29822 + if (atomic_read(&info->port.count))
29823 return -EBUSY;
29824
29825 switch (encoding)
29826 @@ -3994,7 +3994,7 @@ static int hdlcdev_open(struct net_device *dev)
29827
29828 /* arbitrate between network and tty opens */
29829 spin_lock_irqsave(&info->netlock, flags);
29830 - if (info->port.count != 0 || info->netcount != 0) {
29831 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
29832 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
29833 spin_unlock_irqrestore(&info->netlock, flags);
29834 return -EBUSY;
29835 @@ -4083,7 +4083,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
29836 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
29837
29838 /* return error if TTY interface open */
29839 - if (info->port.count)
29840 + if (atomic_read(&info->port.count))
29841 return -EBUSY;
29842
29843 if (cmd != SIOCWANDEV)
29844 diff --git a/drivers/char/random.c b/drivers/char/random.c
29845 index d98b2a6..f0ceb97 100644
29846 --- a/drivers/char/random.c
29847 +++ b/drivers/char/random.c
29848 @@ -272,8 +272,13 @@
29849 /*
29850 * Configuration information
29851 */
29852 +#ifdef CONFIG_GRKERNSEC_RANDNET
29853 +#define INPUT_POOL_WORDS 512
29854 +#define OUTPUT_POOL_WORDS 128
29855 +#else
29856 #define INPUT_POOL_WORDS 128
29857 #define OUTPUT_POOL_WORDS 32
29858 +#endif
29859 #define SEC_XFER_SIZE 512
29860 #define EXTRACT_SIZE 10
29861
29862 @@ -313,10 +318,17 @@ static struct poolinfo {
29863 int poolwords;
29864 int tap1, tap2, tap3, tap4, tap5;
29865 } poolinfo_table[] = {
29866 +#ifdef CONFIG_GRKERNSEC_RANDNET
29867 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29868 + { 512, 411, 308, 208, 104, 1 },
29869 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29870 + { 128, 103, 76, 51, 25, 1 },
29871 +#else
29872 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29873 { 128, 103, 76, 51, 25, 1 },
29874 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29875 { 32, 26, 20, 14, 7, 1 },
29876 +#endif
29877 #if 0
29878 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29879 { 2048, 1638, 1231, 819, 411, 1 },
29880 @@ -527,8 +539,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
29881 input_rotate += i ? 7 : 14;
29882 }
29883
29884 - ACCESS_ONCE(r->input_rotate) = input_rotate;
29885 - ACCESS_ONCE(r->add_ptr) = i;
29886 + ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
29887 + ACCESS_ONCE_RW(r->add_ptr) = i;
29888 smp_wmb();
29889
29890 if (out)
29891 @@ -799,6 +811,17 @@ void add_disk_randomness(struct gendisk *disk)
29892 }
29893 #endif
29894
29895 +#ifdef CONFIG_PAX_LATENT_ENTROPY
29896 +u64 latent_entropy;
29897 +
29898 +__init void transfer_latent_entropy(void)
29899 +{
29900 + mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy), NULL);
29901 + mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy), NULL);
29902 +// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
29903 +}
29904 +#endif
29905 +
29906 /*********************************************************************
29907 *
29908 * Entropy extraction routines
29909 @@ -1008,7 +1031,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29910
29911 extract_buf(r, tmp);
29912 i = min_t(int, nbytes, EXTRACT_SIZE);
29913 - if (copy_to_user(buf, tmp, i)) {
29914 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29915 ret = -EFAULT;
29916 break;
29917 }
29918 @@ -1342,7 +1365,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29919 #include <linux/sysctl.h>
29920
29921 static int min_read_thresh = 8, min_write_thresh;
29922 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29923 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29924 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29925 static char sysctl_bootid[16];
29926
29927 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29928 index 45713f0..8286d21 100644
29929 --- a/drivers/char/sonypi.c
29930 +++ b/drivers/char/sonypi.c
29931 @@ -54,6 +54,7 @@
29932
29933 #include <asm/uaccess.h>
29934 #include <asm/io.h>
29935 +#include <asm/local.h>
29936
29937 #include <linux/sonypi.h>
29938
29939 @@ -490,7 +491,7 @@ static struct sonypi_device {
29940 spinlock_t fifo_lock;
29941 wait_queue_head_t fifo_proc_list;
29942 struct fasync_struct *fifo_async;
29943 - int open_count;
29944 + local_t open_count;
29945 int model;
29946 struct input_dev *input_jog_dev;
29947 struct input_dev *input_key_dev;
29948 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29949 static int sonypi_misc_release(struct inode *inode, struct file *file)
29950 {
29951 mutex_lock(&sonypi_device.lock);
29952 - sonypi_device.open_count--;
29953 + local_dec(&sonypi_device.open_count);
29954 mutex_unlock(&sonypi_device.lock);
29955 return 0;
29956 }
29957 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29958 {
29959 mutex_lock(&sonypi_device.lock);
29960 /* Flush input queue on first open */
29961 - if (!sonypi_device.open_count)
29962 + if (!local_read(&sonypi_device.open_count))
29963 kfifo_reset(&sonypi_device.fifo);
29964 - sonypi_device.open_count++;
29965 + local_inc(&sonypi_device.open_count);
29966 mutex_unlock(&sonypi_device.lock);
29967
29968 return 0;
29969 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29970 index 08427ab..1ab10b7 100644
29971 --- a/drivers/char/tpm/tpm.c
29972 +++ b/drivers/char/tpm/tpm.c
29973 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29974 chip->vendor.req_complete_val)
29975 goto out_recv;
29976
29977 - if ((status == chip->vendor.req_canceled)) {
29978 + if (status == chip->vendor.req_canceled) {
29979 dev_err(chip->dev, "Operation Canceled\n");
29980 rc = -ECANCELED;
29981 goto out;
29982 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29983 index 0636520..169c1d0 100644
29984 --- a/drivers/char/tpm/tpm_bios.c
29985 +++ b/drivers/char/tpm/tpm_bios.c
29986 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29987 event = addr;
29988
29989 if ((event->event_type == 0 && event->event_size == 0) ||
29990 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29991 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29992 return NULL;
29993
29994 return addr;
29995 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29996 return NULL;
29997
29998 if ((event->event_type == 0 && event->event_size == 0) ||
29999 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
30000 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
30001 return NULL;
30002
30003 (*pos)++;
30004 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
30005 int i;
30006
30007 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
30008 - seq_putc(m, data[i]);
30009 + if (!seq_putc(m, data[i]))
30010 + return -EFAULT;
30011
30012 return 0;
30013 }
30014 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
30015 log->bios_event_log_end = log->bios_event_log + len;
30016
30017 virt = acpi_os_map_memory(start, len);
30018 + if (!virt) {
30019 + kfree(log->bios_event_log);
30020 + log->bios_event_log = NULL;
30021 + return -EFAULT;
30022 + }
30023
30024 - memcpy(log->bios_event_log, virt, len);
30025 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
30026
30027 acpi_os_unmap_memory(virt, len);
30028 return 0;
30029 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
30030 index cdf2f54..e55c197 100644
30031 --- a/drivers/char/virtio_console.c
30032 +++ b/drivers/char/virtio_console.c
30033 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
30034 if (to_user) {
30035 ssize_t ret;
30036
30037 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
30038 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
30039 if (ret)
30040 return -EFAULT;
30041 } else {
30042 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
30043 if (!port_has_data(port) && !port->host_connected)
30044 return 0;
30045
30046 - return fill_readbuf(port, ubuf, count, true);
30047 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
30048 }
30049
30050 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
30051 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
30052 index 97f5064..202b6e6 100644
30053 --- a/drivers/edac/edac_pci_sysfs.c
30054 +++ b/drivers/edac/edac_pci_sysfs.c
30055 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
30056 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
30057 static int edac_pci_poll_msec = 1000; /* one second workq period */
30058
30059 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
30060 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
30061 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
30062 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
30063
30064 static struct kobject *edac_pci_top_main_kobj;
30065 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
30066 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30067 edac_printk(KERN_CRIT, EDAC_PCI,
30068 "Signaled System Error on %s\n",
30069 pci_name(dev));
30070 - atomic_inc(&pci_nonparity_count);
30071 + atomic_inc_unchecked(&pci_nonparity_count);
30072 }
30073
30074 if (status & (PCI_STATUS_PARITY)) {
30075 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30076 "Master Data Parity Error on %s\n",
30077 pci_name(dev));
30078
30079 - atomic_inc(&pci_parity_count);
30080 + atomic_inc_unchecked(&pci_parity_count);
30081 }
30082
30083 if (status & (PCI_STATUS_DETECTED_PARITY)) {
30084 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30085 "Detected Parity Error on %s\n",
30086 pci_name(dev));
30087
30088 - atomic_inc(&pci_parity_count);
30089 + atomic_inc_unchecked(&pci_parity_count);
30090 }
30091 }
30092
30093 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30094 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
30095 "Signaled System Error on %s\n",
30096 pci_name(dev));
30097 - atomic_inc(&pci_nonparity_count);
30098 + atomic_inc_unchecked(&pci_nonparity_count);
30099 }
30100
30101 if (status & (PCI_STATUS_PARITY)) {
30102 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30103 "Master Data Parity Error on "
30104 "%s\n", pci_name(dev));
30105
30106 - atomic_inc(&pci_parity_count);
30107 + atomic_inc_unchecked(&pci_parity_count);
30108 }
30109
30110 if (status & (PCI_STATUS_DETECTED_PARITY)) {
30111 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30112 "Detected Parity Error on %s\n",
30113 pci_name(dev));
30114
30115 - atomic_inc(&pci_parity_count);
30116 + atomic_inc_unchecked(&pci_parity_count);
30117 }
30118 }
30119 }
30120 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
30121 if (!check_pci_errors)
30122 return;
30123
30124 - before_count = atomic_read(&pci_parity_count);
30125 + before_count = atomic_read_unchecked(&pci_parity_count);
30126
30127 /* scan all PCI devices looking for a Parity Error on devices and
30128 * bridges.
30129 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
30130 /* Only if operator has selected panic on PCI Error */
30131 if (edac_pci_get_panic_on_pe()) {
30132 /* If the count is different 'after' from 'before' */
30133 - if (before_count != atomic_read(&pci_parity_count))
30134 + if (before_count != atomic_read_unchecked(&pci_parity_count))
30135 panic("EDAC: PCI Parity Error");
30136 }
30137 }
30138 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
30139 index 8c87a5e..a19cbd7 100644
30140 --- a/drivers/edac/mce_amd.h
30141 +++ b/drivers/edac/mce_amd.h
30142 @@ -80,7 +80,7 @@ extern const char * const ii_msgs[];
30143 struct amd_decoder_ops {
30144 bool (*dc_mce)(u16, u8);
30145 bool (*ic_mce)(u16, u8);
30146 -};
30147 +} __no_const;
30148
30149 void amd_report_gart_errors(bool);
30150 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
30151 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
30152 index 57ea7f4..789e3c3 100644
30153 --- a/drivers/firewire/core-card.c
30154 +++ b/drivers/firewire/core-card.c
30155 @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
30156
30157 void fw_core_remove_card(struct fw_card *card)
30158 {
30159 - struct fw_card_driver dummy_driver = dummy_driver_template;
30160 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
30161
30162 card->driver->update_phy_reg(card, 4,
30163 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
30164 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
30165 index 2783f69..9f4b0cc 100644
30166 --- a/drivers/firewire/core-cdev.c
30167 +++ b/drivers/firewire/core-cdev.c
30168 @@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
30169 int ret;
30170
30171 if ((request->channels == 0 && request->bandwidth == 0) ||
30172 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
30173 - request->bandwidth < 0)
30174 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
30175 return -EINVAL;
30176
30177 r = kmalloc(sizeof(*r), GFP_KERNEL);
30178 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
30179 index 780708d..ad60a66 100644
30180 --- a/drivers/firewire/core-transaction.c
30181 +++ b/drivers/firewire/core-transaction.c
30182 @@ -37,6 +37,7 @@
30183 #include <linux/timer.h>
30184 #include <linux/types.h>
30185 #include <linux/workqueue.h>
30186 +#include <linux/sched.h>
30187
30188 #include <asm/byteorder.h>
30189
30190 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30191 index 515a42c..5ecf3ba 100644
30192 --- a/drivers/firewire/core.h
30193 +++ b/drivers/firewire/core.h
30194 @@ -111,6 +111,7 @@ struct fw_card_driver {
30195
30196 int (*stop_iso)(struct fw_iso_context *ctx);
30197 };
30198 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30199
30200 void fw_card_initialize(struct fw_card *card,
30201 const struct fw_card_driver *driver, struct device *device);
30202 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30203 index b298158..7ed8432 100644
30204 --- a/drivers/firmware/dmi_scan.c
30205 +++ b/drivers/firmware/dmi_scan.c
30206 @@ -452,11 +452,6 @@ void __init dmi_scan_machine(void)
30207 }
30208 }
30209 else {
30210 - /*
30211 - * no iounmap() for that ioremap(); it would be a no-op, but
30212 - * it's so early in setup that sucker gets confused into doing
30213 - * what it shouldn't if we actually call it.
30214 - */
30215 p = dmi_ioremap(0xF0000, 0x10000);
30216 if (p == NULL)
30217 goto error;
30218 @@ -726,7 +721,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30219 if (buf == NULL)
30220 return -1;
30221
30222 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30223 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30224
30225 iounmap(buf);
30226 return 0;
30227 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30228 index 82d5c20..44a7177 100644
30229 --- a/drivers/gpio/gpio-vr41xx.c
30230 +++ b/drivers/gpio/gpio-vr41xx.c
30231 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30232 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30233 maskl, pendl, maskh, pendh);
30234
30235 - atomic_inc(&irq_err_count);
30236 + atomic_inc_unchecked(&irq_err_count);
30237
30238 return -EINVAL;
30239 }
30240 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30241 index 3252e70..b5314ace 100644
30242 --- a/drivers/gpu/drm/drm_crtc_helper.c
30243 +++ b/drivers/gpu/drm/drm_crtc_helper.c
30244 @@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30245 struct drm_crtc *tmp;
30246 int crtc_mask = 1;
30247
30248 - WARN(!crtc, "checking null crtc?\n");
30249 + BUG_ON(!crtc);
30250
30251 dev = crtc->dev;
30252
30253 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30254 index 8a9d079..606cdd5 100644
30255 --- a/drivers/gpu/drm/drm_drv.c
30256 +++ b/drivers/gpu/drm/drm_drv.c
30257 @@ -318,7 +318,7 @@ module_exit(drm_core_exit);
30258 /**
30259 * Copy and IOCTL return string to user space
30260 */
30261 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30262 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30263 {
30264 int len;
30265
30266 @@ -401,7 +401,7 @@ long drm_ioctl(struct file *filp,
30267 return -ENODEV;
30268
30269 atomic_inc(&dev->ioctl_count);
30270 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30271 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30272 ++file_priv->ioctl_count;
30273
30274 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30275 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30276 index 123de28..43a0897 100644
30277 --- a/drivers/gpu/drm/drm_fops.c
30278 +++ b/drivers/gpu/drm/drm_fops.c
30279 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30280 }
30281
30282 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30283 - atomic_set(&dev->counts[i], 0);
30284 + atomic_set_unchecked(&dev->counts[i], 0);
30285
30286 dev->sigdata.lock = NULL;
30287
30288 @@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
30289
30290 retcode = drm_open_helper(inode, filp, dev);
30291 if (!retcode) {
30292 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30293 - if (!dev->open_count++)
30294 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30295 + if (local_inc_return(&dev->open_count) == 1)
30296 retcode = drm_setup(dev);
30297 }
30298 if (!retcode) {
30299 @@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
30300
30301 mutex_lock(&drm_global_mutex);
30302
30303 - DRM_DEBUG("open_count = %d\n", dev->open_count);
30304 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30305
30306 if (dev->driver->preclose)
30307 dev->driver->preclose(dev, file_priv);
30308 @@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30309 * Begin inline drm_release
30310 */
30311
30312 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30313 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30314 task_pid_nr(current),
30315 (long)old_encode_dev(file_priv->minor->device),
30316 - dev->open_count);
30317 + local_read(&dev->open_count));
30318
30319 /* Release any auth tokens that might point to this file_priv,
30320 (do that under the drm_global_mutex) */
30321 @@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30322 * End inline drm_release
30323 */
30324
30325 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30326 - if (!--dev->open_count) {
30327 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30328 + if (local_dec_and_test(&dev->open_count)) {
30329 if (atomic_read(&dev->ioctl_count)) {
30330 DRM_ERROR("Device busy: %d\n",
30331 atomic_read(&dev->ioctl_count));
30332 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30333 index c87dc96..326055d 100644
30334 --- a/drivers/gpu/drm/drm_global.c
30335 +++ b/drivers/gpu/drm/drm_global.c
30336 @@ -36,7 +36,7 @@
30337 struct drm_global_item {
30338 struct mutex mutex;
30339 void *object;
30340 - int refcount;
30341 + atomic_t refcount;
30342 };
30343
30344 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30345 @@ -49,7 +49,7 @@ void drm_global_init(void)
30346 struct drm_global_item *item = &glob[i];
30347 mutex_init(&item->mutex);
30348 item->object = NULL;
30349 - item->refcount = 0;
30350 + atomic_set(&item->refcount, 0);
30351 }
30352 }
30353
30354 @@ -59,7 +59,7 @@ void drm_global_release(void)
30355 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30356 struct drm_global_item *item = &glob[i];
30357 BUG_ON(item->object != NULL);
30358 - BUG_ON(item->refcount != 0);
30359 + BUG_ON(atomic_read(&item->refcount) != 0);
30360 }
30361 }
30362
30363 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30364 void *object;
30365
30366 mutex_lock(&item->mutex);
30367 - if (item->refcount == 0) {
30368 + if (atomic_read(&item->refcount) == 0) {
30369 item->object = kzalloc(ref->size, GFP_KERNEL);
30370 if (unlikely(item->object == NULL)) {
30371 ret = -ENOMEM;
30372 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30373 goto out_err;
30374
30375 }
30376 - ++item->refcount;
30377 + atomic_inc(&item->refcount);
30378 ref->object = item->object;
30379 object = item->object;
30380 mutex_unlock(&item->mutex);
30381 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30382 struct drm_global_item *item = &glob[ref->global_type];
30383
30384 mutex_lock(&item->mutex);
30385 - BUG_ON(item->refcount == 0);
30386 + BUG_ON(atomic_read(&item->refcount) == 0);
30387 BUG_ON(ref->object != item->object);
30388 - if (--item->refcount == 0) {
30389 + if (atomic_dec_and_test(&item->refcount)) {
30390 ref->release(ref);
30391 item->object = NULL;
30392 }
30393 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30394 index ab1162d..42587b2 100644
30395 --- a/drivers/gpu/drm/drm_info.c
30396 +++ b/drivers/gpu/drm/drm_info.c
30397 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30398 struct drm_local_map *map;
30399 struct drm_map_list *r_list;
30400
30401 - /* Hardcoded from _DRM_FRAME_BUFFER,
30402 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30403 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30404 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30405 + static const char * const types[] = {
30406 + [_DRM_FRAME_BUFFER] = "FB",
30407 + [_DRM_REGISTERS] = "REG",
30408 + [_DRM_SHM] = "SHM",
30409 + [_DRM_AGP] = "AGP",
30410 + [_DRM_SCATTER_GATHER] = "SG",
30411 + [_DRM_CONSISTENT] = "PCI",
30412 + [_DRM_GEM] = "GEM" };
30413 const char *type;
30414 int i;
30415
30416 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30417 map = r_list->map;
30418 if (!map)
30419 continue;
30420 - if (map->type < 0 || map->type > 5)
30421 + if (map->type >= ARRAY_SIZE(types))
30422 type = "??";
30423 else
30424 type = types[map->type];
30425 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30426 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30427 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30428 vma->vm_flags & VM_IO ? 'i' : '-',
30429 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30430 + 0);
30431 +#else
30432 vma->vm_pgoff);
30433 +#endif
30434
30435 #if defined(__i386__)
30436 pgprot = pgprot_val(vma->vm_page_prot);
30437 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30438 index 637fcc3..e890b33 100644
30439 --- a/drivers/gpu/drm/drm_ioc32.c
30440 +++ b/drivers/gpu/drm/drm_ioc32.c
30441 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30442 request = compat_alloc_user_space(nbytes);
30443 if (!access_ok(VERIFY_WRITE, request, nbytes))
30444 return -EFAULT;
30445 - list = (struct drm_buf_desc *) (request + 1);
30446 + list = (struct drm_buf_desc __user *) (request + 1);
30447
30448 if (__put_user(count, &request->count)
30449 || __put_user(list, &request->list))
30450 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30451 request = compat_alloc_user_space(nbytes);
30452 if (!access_ok(VERIFY_WRITE, request, nbytes))
30453 return -EFAULT;
30454 - list = (struct drm_buf_pub *) (request + 1);
30455 + list = (struct drm_buf_pub __user *) (request + 1);
30456
30457 if (__put_user(count, &request->count)
30458 || __put_user(list, &request->list))
30459 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30460 index 64a62c6..ceab35e 100644
30461 --- a/drivers/gpu/drm/drm_ioctl.c
30462 +++ b/drivers/gpu/drm/drm_ioctl.c
30463 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30464 stats->data[i].value =
30465 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30466 else
30467 - stats->data[i].value = atomic_read(&dev->counts[i]);
30468 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30469 stats->data[i].type = dev->types[i];
30470 }
30471
30472 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30473 index 5211520..c744d85 100644
30474 --- a/drivers/gpu/drm/drm_lock.c
30475 +++ b/drivers/gpu/drm/drm_lock.c
30476 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30477 if (drm_lock_take(&master->lock, lock->context)) {
30478 master->lock.file_priv = file_priv;
30479 master->lock.lock_time = jiffies;
30480 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30481 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30482 break; /* Got lock */
30483 }
30484
30485 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30486 return -EINVAL;
30487 }
30488
30489 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30490 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30491
30492 if (drm_lock_free(&master->lock, lock->context)) {
30493 /* FIXME: Should really bail out here. */
30494 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30495 index 21bcd4a..8e074e0 100644
30496 --- a/drivers/gpu/drm/drm_stub.c
30497 +++ b/drivers/gpu/drm/drm_stub.c
30498 @@ -511,7 +511,7 @@ void drm_unplug_dev(struct drm_device *dev)
30499
30500 drm_device_set_unplugged(dev);
30501
30502 - if (dev->open_count == 0) {
30503 + if (local_read(&dev->open_count) == 0) {
30504 drm_put_dev(dev);
30505 }
30506 mutex_unlock(&drm_global_mutex);
30507 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30508 index fa94391..ed26ec8 100644
30509 --- a/drivers/gpu/drm/i810/i810_dma.c
30510 +++ b/drivers/gpu/drm/i810/i810_dma.c
30511 @@ -943,8 +943,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30512 dma->buflist[vertex->idx],
30513 vertex->discard, vertex->used);
30514
30515 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30516 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30517 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30518 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30519 sarea_priv->last_enqueue = dev_priv->counter - 1;
30520 sarea_priv->last_dispatch = (int)hw_status[5];
30521
30522 @@ -1104,8 +1104,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30523 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30524 mc->last_render);
30525
30526 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30527 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30528 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30529 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30530 sarea_priv->last_enqueue = dev_priv->counter - 1;
30531 sarea_priv->last_dispatch = (int)hw_status[5];
30532
30533 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30534 index c9339f4..f5e1b9d 100644
30535 --- a/drivers/gpu/drm/i810/i810_drv.h
30536 +++ b/drivers/gpu/drm/i810/i810_drv.h
30537 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30538 int page_flipping;
30539
30540 wait_queue_head_t irq_queue;
30541 - atomic_t irq_received;
30542 - atomic_t irq_emitted;
30543 + atomic_unchecked_t irq_received;
30544 + atomic_unchecked_t irq_emitted;
30545
30546 int front_offset;
30547 } drm_i810_private_t;
30548 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30549 index 5363e9c..59360d1 100644
30550 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30551 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30552 @@ -518,7 +518,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30553 I915_READ(GTIMR));
30554 }
30555 seq_printf(m, "Interrupts received: %d\n",
30556 - atomic_read(&dev_priv->irq_received));
30557 + atomic_read_unchecked(&dev_priv->irq_received));
30558 for (i = 0; i < I915_NUM_RINGS; i++) {
30559 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30560 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30561 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30562 index 36822b9..b725e1b 100644
30563 --- a/drivers/gpu/drm/i915/i915_dma.c
30564 +++ b/drivers/gpu/drm/i915/i915_dma.c
30565 @@ -1266,7 +1266,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30566 bool can_switch;
30567
30568 spin_lock(&dev->count_lock);
30569 - can_switch = (dev->open_count == 0);
30570 + can_switch = (local_read(&dev->open_count) == 0);
30571 spin_unlock(&dev->count_lock);
30572 return can_switch;
30573 }
30574 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30575 index b0b676a..d107105 100644
30576 --- a/drivers/gpu/drm/i915/i915_drv.h
30577 +++ b/drivers/gpu/drm/i915/i915_drv.h
30578 @@ -268,7 +268,7 @@ struct drm_i915_display_funcs {
30579 /* render clock increase/decrease */
30580 /* display clock increase/decrease */
30581 /* pll clock increase/decrease */
30582 -};
30583 +} __no_const;
30584
30585 struct intel_device_info {
30586 u8 gen;
30587 @@ -386,7 +386,7 @@ typedef struct drm_i915_private {
30588 int current_page;
30589 int page_flipping;
30590
30591 - atomic_t irq_received;
30592 + atomic_unchecked_t irq_received;
30593
30594 /* protects the irq masks */
30595 spinlock_t irq_lock;
30596 @@ -985,7 +985,7 @@ struct drm_i915_gem_object {
30597 * will be page flipped away on the next vblank. When it
30598 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30599 */
30600 - atomic_t pending_flip;
30601 + atomic_unchecked_t pending_flip;
30602 };
30603
30604 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30605 @@ -1434,7 +1434,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
30606 struct drm_i915_private *dev_priv, unsigned port);
30607 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30608 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30609 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30610 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30611 {
30612 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30613 }
30614 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30615 index 974a9f1..b3ebd45 100644
30616 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30617 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30618 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30619 i915_gem_clflush_object(obj);
30620
30621 if (obj->base.pending_write_domain)
30622 - cd->flips |= atomic_read(&obj->pending_flip);
30623 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30624
30625 /* The actual obj->write_domain will be updated with
30626 * pending_write_domain after we emit the accumulated flush for all
30627 @@ -916,9 +916,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30628
30629 static int
30630 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30631 - int count)
30632 + unsigned int count)
30633 {
30634 - int i;
30635 + unsigned int i;
30636
30637 for (i = 0; i < count; i++) {
30638 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30639 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30640 index ed3224c..6618589 100644
30641 --- a/drivers/gpu/drm/i915/i915_irq.c
30642 +++ b/drivers/gpu/drm/i915/i915_irq.c
30643 @@ -433,7 +433,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
30644 int vblank = 0;
30645 bool blc_event;
30646
30647 - atomic_inc(&dev_priv->irq_received);
30648 + atomic_inc_unchecked(&dev_priv->irq_received);
30649
30650 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
30651 PIPE_VBLANK_INTERRUPT_STATUS;
30652 @@ -586,7 +586,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30653 irqreturn_t ret = IRQ_NONE;
30654 int i;
30655
30656 - atomic_inc(&dev_priv->irq_received);
30657 + atomic_inc_unchecked(&dev_priv->irq_received);
30658
30659 /* disable master interrupt before clearing iir */
30660 de_ier = I915_READ(DEIER);
30661 @@ -661,7 +661,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30662 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30663 u32 hotplug_mask;
30664
30665 - atomic_inc(&dev_priv->irq_received);
30666 + atomic_inc_unchecked(&dev_priv->irq_received);
30667
30668 /* disable master interrupt before clearing iir */
30669 de_ier = I915_READ(DEIER);
30670 @@ -1646,7 +1646,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30671 {
30672 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30673
30674 - atomic_set(&dev_priv->irq_received, 0);
30675 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30676
30677
30678 I915_WRITE(HWSTAM, 0xeffe);
30679 @@ -1673,7 +1673,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
30680 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30681 int pipe;
30682
30683 - atomic_set(&dev_priv->irq_received, 0);
30684 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30685
30686 /* VLV magic */
30687 I915_WRITE(VLV_IMR, 0);
30688 @@ -1969,7 +1969,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
30689 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30690 int pipe;
30691
30692 - atomic_set(&dev_priv->irq_received, 0);
30693 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30694
30695 for_each_pipe(pipe)
30696 I915_WRITE(PIPESTAT(pipe), 0);
30697 @@ -2020,7 +2020,7 @@ static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
30698 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
30699 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
30700
30701 - atomic_inc(&dev_priv->irq_received);
30702 + atomic_inc_unchecked(&dev_priv->irq_received);
30703
30704 iir = I915_READ16(IIR);
30705 if (iir == 0)
30706 @@ -2105,7 +2105,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
30707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30708 int pipe;
30709
30710 - atomic_set(&dev_priv->irq_received, 0);
30711 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30712
30713 if (I915_HAS_HOTPLUG(dev)) {
30714 I915_WRITE(PORT_HOTPLUG_EN, 0);
30715 @@ -2200,7 +2200,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
30716 };
30717 int pipe, ret = IRQ_NONE;
30718
30719 - atomic_inc(&dev_priv->irq_received);
30720 + atomic_inc_unchecked(&dev_priv->irq_received);
30721
30722 iir = I915_READ(IIR);
30723 do {
30724 @@ -2326,7 +2326,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
30725 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30726 int pipe;
30727
30728 - atomic_set(&dev_priv->irq_received, 0);
30729 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30730
30731 if (I915_HAS_HOTPLUG(dev)) {
30732 I915_WRITE(PORT_HOTPLUG_EN, 0);
30733 @@ -2436,7 +2436,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
30734 int irq_received;
30735 int ret = IRQ_NONE, pipe;
30736
30737 - atomic_inc(&dev_priv->irq_received);
30738 + atomic_inc_unchecked(&dev_priv->irq_received);
30739
30740 iir = I915_READ(IIR);
30741
30742 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30743 index 8a11131..46eeeaa 100644
30744 --- a/drivers/gpu/drm/i915/intel_display.c
30745 +++ b/drivers/gpu/drm/i915/intel_display.c
30746 @@ -2000,7 +2000,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30747
30748 wait_event(dev_priv->pending_flip_queue,
30749 atomic_read(&dev_priv->mm.wedged) ||
30750 - atomic_read(&obj->pending_flip) == 0);
30751 + atomic_read_unchecked(&obj->pending_flip) == 0);
30752
30753 /* Big Hammer, we also need to ensure that any pending
30754 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30755 @@ -5914,9 +5914,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30756
30757 obj = work->old_fb_obj;
30758
30759 - atomic_clear_mask(1 << intel_crtc->plane,
30760 - &obj->pending_flip.counter);
30761 - if (atomic_read(&obj->pending_flip) == 0)
30762 + atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
30763 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30764 wake_up(&dev_priv->pending_flip_queue);
30765
30766 schedule_work(&work->work);
30767 @@ -6253,7 +6252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30768 /* Block clients from rendering to the new back buffer until
30769 * the flip occurs and the object is no longer visible.
30770 */
30771 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30772 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30773
30774 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30775 if (ret)
30776 @@ -6268,7 +6267,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30777 return 0;
30778
30779 cleanup_pending:
30780 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30781 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30782 drm_gem_object_unreference(&work->old_fb_obj->base);
30783 drm_gem_object_unreference(&obj->base);
30784 mutex_unlock(&dev->struct_mutex);
30785 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30786 index 54558a0..2d97005 100644
30787 --- a/drivers/gpu/drm/mga/mga_drv.h
30788 +++ b/drivers/gpu/drm/mga/mga_drv.h
30789 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30790 u32 clear_cmd;
30791 u32 maccess;
30792
30793 - atomic_t vbl_received; /**< Number of vblanks received. */
30794 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30795 wait_queue_head_t fence_queue;
30796 - atomic_t last_fence_retired;
30797 + atomic_unchecked_t last_fence_retired;
30798 u32 next_fence_to_post;
30799
30800 unsigned int fb_cpp;
30801 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30802 index 2581202..f230a8d9 100644
30803 --- a/drivers/gpu/drm/mga/mga_irq.c
30804 +++ b/drivers/gpu/drm/mga/mga_irq.c
30805 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30806 if (crtc != 0)
30807 return 0;
30808
30809 - return atomic_read(&dev_priv->vbl_received);
30810 + return atomic_read_unchecked(&dev_priv->vbl_received);
30811 }
30812
30813
30814 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30815 /* VBLANK interrupt */
30816 if (status & MGA_VLINEPEN) {
30817 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30818 - atomic_inc(&dev_priv->vbl_received);
30819 + atomic_inc_unchecked(&dev_priv->vbl_received);
30820 drm_handle_vblank(dev, 0);
30821 handled = 1;
30822 }
30823 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30824 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30825 MGA_WRITE(MGA_PRIMEND, prim_end);
30826
30827 - atomic_inc(&dev_priv->last_fence_retired);
30828 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30829 DRM_WAKEUP(&dev_priv->fence_queue);
30830 handled = 1;
30831 }
30832 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30833 * using fences.
30834 */
30835 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30836 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30837 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30838 - *sequence) <= (1 << 23)));
30839
30840 *sequence = cur_fence;
30841 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30842 index 2f11e16..191267e 100644
30843 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30844 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30845 @@ -5340,7 +5340,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30846 struct bit_table {
30847 const char id;
30848 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30849 -};
30850 +} __no_const;
30851
30852 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30853
30854 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30855 index b863a3a..c55e0dc 100644
30856 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30857 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30858 @@ -302,7 +302,7 @@ struct nouveau_exec_engine {
30859 u32 handle, u16 class);
30860 void (*set_tile_region)(struct drm_device *dev, int i);
30861 void (*tlb_flush)(struct drm_device *, int engine);
30862 -};
30863 +} __no_const;
30864
30865 struct nouveau_instmem_engine {
30866 void *priv;
30867 @@ -324,13 +324,13 @@ struct nouveau_instmem_engine {
30868 struct nouveau_mc_engine {
30869 int (*init)(struct drm_device *dev);
30870 void (*takedown)(struct drm_device *dev);
30871 -};
30872 +} __no_const;
30873
30874 struct nouveau_timer_engine {
30875 int (*init)(struct drm_device *dev);
30876 void (*takedown)(struct drm_device *dev);
30877 uint64_t (*read)(struct drm_device *dev);
30878 -};
30879 +} __no_const;
30880
30881 struct nouveau_fb_engine {
30882 int num_tiles;
30883 @@ -547,7 +547,7 @@ struct nouveau_vram_engine {
30884 void (*put)(struct drm_device *, struct nouveau_mem **);
30885
30886 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30887 -};
30888 +} __no_const;
30889
30890 struct nouveau_engine {
30891 struct nouveau_instmem_engine instmem;
30892 @@ -693,7 +693,7 @@ struct drm_nouveau_private {
30893 struct drm_global_reference mem_global_ref;
30894 struct ttm_bo_global_ref bo_global_ref;
30895 struct ttm_bo_device bdev;
30896 - atomic_t validate_sequence;
30897 + atomic_unchecked_t validate_sequence;
30898 int (*move)(struct nouveau_channel *,
30899 struct ttm_buffer_object *,
30900 struct ttm_mem_reg *, struct ttm_mem_reg *);
30901 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30902 index 30f5423..abca136 100644
30903 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30904 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30905 @@ -319,7 +319,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30906 int trycnt = 0;
30907 int ret, i;
30908
30909 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30910 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30911 retry:
30912 if (++trycnt > 100000) {
30913 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30914 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30915 index 19706f0..f257368 100644
30916 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30917 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30918 @@ -490,7 +490,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30919 bool can_switch;
30920
30921 spin_lock(&dev->count_lock);
30922 - can_switch = (dev->open_count == 0);
30923 + can_switch = (local_read(&dev->open_count) == 0);
30924 spin_unlock(&dev->count_lock);
30925 return can_switch;
30926 }
30927 diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30928 index a9514ea..369d511 100644
30929 --- a/drivers/gpu/drm/nouveau/nv50_sor.c
30930 +++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30931 @@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30932 }
30933
30934 if (nv_encoder->dcb->type == OUTPUT_DP) {
30935 - struct dp_train_func func = {
30936 + static struct dp_train_func func = {
30937 .link_set = nv50_sor_dp_link_set,
30938 .train_set = nv50_sor_dp_train_set,
30939 .train_adj = nv50_sor_dp_train_adj
30940 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30941 index c50b075..6b07dfc 100644
30942 --- a/drivers/gpu/drm/nouveau/nvd0_display.c
30943 +++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30944 @@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30945 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30946
30947 if (nv_encoder->dcb->type == OUTPUT_DP) {
30948 - struct dp_train_func func = {
30949 + static struct dp_train_func func = {
30950 .link_set = nvd0_sor_dp_link_set,
30951 .train_set = nvd0_sor_dp_train_set,
30952 .train_adj = nvd0_sor_dp_train_adj
30953 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30954 index bcac90b..53bfc76 100644
30955 --- a/drivers/gpu/drm/r128/r128_cce.c
30956 +++ b/drivers/gpu/drm/r128/r128_cce.c
30957 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30958
30959 /* GH: Simple idle check.
30960 */
30961 - atomic_set(&dev_priv->idle_count, 0);
30962 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30963
30964 /* We don't support anything other than bus-mastering ring mode,
30965 * but the ring can be in either AGP or PCI space for the ring
30966 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30967 index 930c71b..499aded 100644
30968 --- a/drivers/gpu/drm/r128/r128_drv.h
30969 +++ b/drivers/gpu/drm/r128/r128_drv.h
30970 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30971 int is_pci;
30972 unsigned long cce_buffers_offset;
30973
30974 - atomic_t idle_count;
30975 + atomic_unchecked_t idle_count;
30976
30977 int page_flipping;
30978 int current_page;
30979 u32 crtc_offset;
30980 u32 crtc_offset_cntl;
30981
30982 - atomic_t vbl_received;
30983 + atomic_unchecked_t vbl_received;
30984
30985 u32 color_fmt;
30986 unsigned int front_offset;
30987 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30988 index 429d5a0..7e899ed 100644
30989 --- a/drivers/gpu/drm/r128/r128_irq.c
30990 +++ b/drivers/gpu/drm/r128/r128_irq.c
30991 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30992 if (crtc != 0)
30993 return 0;
30994
30995 - return atomic_read(&dev_priv->vbl_received);
30996 + return atomic_read_unchecked(&dev_priv->vbl_received);
30997 }
30998
30999 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
31000 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
31001 /* VBLANK interrupt */
31002 if (status & R128_CRTC_VBLANK_INT) {
31003 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
31004 - atomic_inc(&dev_priv->vbl_received);
31005 + atomic_inc_unchecked(&dev_priv->vbl_received);
31006 drm_handle_vblank(dev, 0);
31007 return IRQ_HANDLED;
31008 }
31009 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
31010 index a9e33ce..09edd4b 100644
31011 --- a/drivers/gpu/drm/r128/r128_state.c
31012 +++ b/drivers/gpu/drm/r128/r128_state.c
31013 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
31014
31015 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
31016 {
31017 - if (atomic_read(&dev_priv->idle_count) == 0)
31018 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
31019 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
31020 else
31021 - atomic_set(&dev_priv->idle_count, 0);
31022 + atomic_set_unchecked(&dev_priv->idle_count, 0);
31023 }
31024
31025 #endif
31026 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
31027 index 5a82b6b..9e69c73 100644
31028 --- a/drivers/gpu/drm/radeon/mkregtable.c
31029 +++ b/drivers/gpu/drm/radeon/mkregtable.c
31030 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
31031 regex_t mask_rex;
31032 regmatch_t match[4];
31033 char buf[1024];
31034 - size_t end;
31035 + long end;
31036 int len;
31037 int done = 0;
31038 int r;
31039 unsigned o;
31040 struct offset *offset;
31041 char last_reg_s[10];
31042 - int last_reg;
31043 + unsigned long last_reg;
31044
31045 if (regcomp
31046 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
31047 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
31048 index fefcca5..20a5b90 100644
31049 --- a/drivers/gpu/drm/radeon/radeon.h
31050 +++ b/drivers/gpu/drm/radeon/radeon.h
31051 @@ -743,7 +743,7 @@ struct r600_blit_cp_primitives {
31052 int x2, int y2);
31053 void (*draw_auto)(struct radeon_device *rdev);
31054 void (*set_default_state)(struct radeon_device *rdev);
31055 -};
31056 +} __no_const;
31057
31058 struct r600_blit {
31059 struct radeon_bo *shader_obj;
31060 @@ -1244,7 +1244,7 @@ struct radeon_asic {
31061 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
31062 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
31063 } pflip;
31064 -};
31065 +} __no_const;
31066
31067 /*
31068 * Asic structures
31069 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
31070 index 066c98b..96ab858 100644
31071 --- a/drivers/gpu/drm/radeon/radeon_device.c
31072 +++ b/drivers/gpu/drm/radeon/radeon_device.c
31073 @@ -692,7 +692,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
31074 bool can_switch;
31075
31076 spin_lock(&dev->count_lock);
31077 - can_switch = (dev->open_count == 0);
31078 + can_switch = (local_read(&dev->open_count) == 0);
31079 spin_unlock(&dev->count_lock);
31080 return can_switch;
31081 }
31082 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
31083 index a1b59ca..86f2d44 100644
31084 --- a/drivers/gpu/drm/radeon/radeon_drv.h
31085 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
31086 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
31087
31088 /* SW interrupt */
31089 wait_queue_head_t swi_queue;
31090 - atomic_t swi_emitted;
31091 + atomic_unchecked_t swi_emitted;
31092 int vblank_crtc;
31093 uint32_t irq_enable_reg;
31094 uint32_t r500_disp_irq_reg;
31095 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
31096 index 48b7cea..342236f 100644
31097 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
31098 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
31099 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
31100 request = compat_alloc_user_space(sizeof(*request));
31101 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
31102 || __put_user(req32.param, &request->param)
31103 - || __put_user((void __user *)(unsigned long)req32.value,
31104 + || __put_user((unsigned long)req32.value,
31105 &request->value))
31106 return -EFAULT;
31107
31108 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
31109 index 00da384..32f972d 100644
31110 --- a/drivers/gpu/drm/radeon/radeon_irq.c
31111 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
31112 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
31113 unsigned int ret;
31114 RING_LOCALS;
31115
31116 - atomic_inc(&dev_priv->swi_emitted);
31117 - ret = atomic_read(&dev_priv->swi_emitted);
31118 + atomic_inc_unchecked(&dev_priv->swi_emitted);
31119 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
31120
31121 BEGIN_RING(4);
31122 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
31123 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
31124 drm_radeon_private_t *dev_priv =
31125 (drm_radeon_private_t *) dev->dev_private;
31126
31127 - atomic_set(&dev_priv->swi_emitted, 0);
31128 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31129 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31130
31131 dev->max_vblank_count = 0x001fffff;
31132 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31133 index e8422ae..d22d4a8 100644
31134 --- a/drivers/gpu/drm/radeon/radeon_state.c
31135 +++ b/drivers/gpu/drm/radeon/radeon_state.c
31136 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31137 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31138 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31139
31140 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31141 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31142 sarea_priv->nbox * sizeof(depth_boxes[0])))
31143 return -EFAULT;
31144
31145 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31146 {
31147 drm_radeon_private_t *dev_priv = dev->dev_private;
31148 drm_radeon_getparam_t *param = data;
31149 - int value;
31150 + int value = 0;
31151
31152 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31153
31154 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31155 index c94a225..5795d34 100644
31156 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
31157 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31158 @@ -852,8 +852,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31159 }
31160 if (unlikely(ttm_vm_ops == NULL)) {
31161 ttm_vm_ops = vma->vm_ops;
31162 - radeon_ttm_vm_ops = *ttm_vm_ops;
31163 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31164 + pax_open_kernel();
31165 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31166 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31167 + pax_close_kernel();
31168 }
31169 vma->vm_ops = &radeon_ttm_vm_ops;
31170 return 0;
31171 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31172 index 159b6a4..fa82487 100644
31173 --- a/drivers/gpu/drm/radeon/rs690.c
31174 +++ b/drivers/gpu/drm/radeon/rs690.c
31175 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31176 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31177 rdev->pm.sideport_bandwidth.full)
31178 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31179 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31180 + read_delay_latency.full = dfixed_const(800 * 1000);
31181 read_delay_latency.full = dfixed_div(read_delay_latency,
31182 rdev->pm.igp_sideport_mclk);
31183 + a.full = dfixed_const(370);
31184 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31185 } else {
31186 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31187 rdev->pm.k8_bandwidth.full)
31188 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31189 index ebc6fac..a8313ed 100644
31190 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31191 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31192 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
31193 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31194 struct shrink_control *sc)
31195 {
31196 - static atomic_t start_pool = ATOMIC_INIT(0);
31197 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31198 unsigned i;
31199 - unsigned pool_offset = atomic_add_return(1, &start_pool);
31200 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31201 struct ttm_page_pool *pool;
31202 int shrink_pages = sc->nr_to_scan;
31203
31204 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31205 index 88edacc..1e5412b 100644
31206 --- a/drivers/gpu/drm/via/via_drv.h
31207 +++ b/drivers/gpu/drm/via/via_drv.h
31208 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31209 typedef uint32_t maskarray_t[5];
31210
31211 typedef struct drm_via_irq {
31212 - atomic_t irq_received;
31213 + atomic_unchecked_t irq_received;
31214 uint32_t pending_mask;
31215 uint32_t enable_mask;
31216 wait_queue_head_t irq_queue;
31217 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
31218 struct timeval last_vblank;
31219 int last_vblank_valid;
31220 unsigned usec_per_vblank;
31221 - atomic_t vbl_received;
31222 + atomic_unchecked_t vbl_received;
31223 drm_via_state_t hc_state;
31224 char pci_buf[VIA_PCI_BUF_SIZE];
31225 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31226 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31227 index d391f48..10c8ca3 100644
31228 --- a/drivers/gpu/drm/via/via_irq.c
31229 +++ b/drivers/gpu/drm/via/via_irq.c
31230 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31231 if (crtc != 0)
31232 return 0;
31233
31234 - return atomic_read(&dev_priv->vbl_received);
31235 + return atomic_read_unchecked(&dev_priv->vbl_received);
31236 }
31237
31238 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31239 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31240
31241 status = VIA_READ(VIA_REG_INTERRUPT);
31242 if (status & VIA_IRQ_VBLANK_PENDING) {
31243 - atomic_inc(&dev_priv->vbl_received);
31244 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31245 + atomic_inc_unchecked(&dev_priv->vbl_received);
31246 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31247 do_gettimeofday(&cur_vblank);
31248 if (dev_priv->last_vblank_valid) {
31249 dev_priv->usec_per_vblank =
31250 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31251 dev_priv->last_vblank = cur_vblank;
31252 dev_priv->last_vblank_valid = 1;
31253 }
31254 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31255 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31256 DRM_DEBUG("US per vblank is: %u\n",
31257 dev_priv->usec_per_vblank);
31258 }
31259 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31260
31261 for (i = 0; i < dev_priv->num_irqs; ++i) {
31262 if (status & cur_irq->pending_mask) {
31263 - atomic_inc(&cur_irq->irq_received);
31264 + atomic_inc_unchecked(&cur_irq->irq_received);
31265 DRM_WAKEUP(&cur_irq->irq_queue);
31266 handled = 1;
31267 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31268 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31269 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31270 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31271 masks[irq][4]));
31272 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31273 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31274 } else {
31275 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31276 (((cur_irq_sequence =
31277 - atomic_read(&cur_irq->irq_received)) -
31278 + atomic_read_unchecked(&cur_irq->irq_received)) -
31279 *sequence) <= (1 << 23)));
31280 }
31281 *sequence = cur_irq_sequence;
31282 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31283 }
31284
31285 for (i = 0; i < dev_priv->num_irqs; ++i) {
31286 - atomic_set(&cur_irq->irq_received, 0);
31287 + atomic_set_unchecked(&cur_irq->irq_received, 0);
31288 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31289 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31290 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31291 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31292 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31293 case VIA_IRQ_RELATIVE:
31294 irqwait->request.sequence +=
31295 - atomic_read(&cur_irq->irq_received);
31296 + atomic_read_unchecked(&cur_irq->irq_received);
31297 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31298 case VIA_IRQ_ABSOLUTE:
31299 break;
31300 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31301 index d0f2c07..9ebd9c3 100644
31302 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31303 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31304 @@ -263,7 +263,7 @@ struct vmw_private {
31305 * Fencing and IRQs.
31306 */
31307
31308 - atomic_t marker_seq;
31309 + atomic_unchecked_t marker_seq;
31310 wait_queue_head_t fence_queue;
31311 wait_queue_head_t fifo_queue;
31312 int fence_queue_waiters; /* Protected by hw_mutex */
31313 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31314 index a0c2f12..68ae6cb 100644
31315 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31316 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31317 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31318 (unsigned int) min,
31319 (unsigned int) fifo->capabilities);
31320
31321 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31322 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31323 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31324 vmw_marker_queue_init(&fifo->marker_queue);
31325 return vmw_fifo_send_fence(dev_priv, &dummy);
31326 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31327 if (reserveable)
31328 iowrite32(bytes, fifo_mem +
31329 SVGA_FIFO_RESERVED);
31330 - return fifo_mem + (next_cmd >> 2);
31331 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31332 } else {
31333 need_bounce = true;
31334 }
31335 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31336
31337 fm = vmw_fifo_reserve(dev_priv, bytes);
31338 if (unlikely(fm == NULL)) {
31339 - *seqno = atomic_read(&dev_priv->marker_seq);
31340 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31341 ret = -ENOMEM;
31342 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31343 false, 3*HZ);
31344 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31345 }
31346
31347 do {
31348 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31349 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31350 } while (*seqno == 0);
31351
31352 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31353 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31354 index cabc95f..14b3d77 100644
31355 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31356 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31357 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31358 * emitted. Then the fence is stale and signaled.
31359 */
31360
31361 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31362 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31363 > VMW_FENCE_WRAP);
31364
31365 return ret;
31366 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31367
31368 if (fifo_idle)
31369 down_read(&fifo_state->rwsem);
31370 - signal_seq = atomic_read(&dev_priv->marker_seq);
31371 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31372 ret = 0;
31373
31374 for (;;) {
31375 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31376 index 8a8725c..afed796 100644
31377 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31378 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31379 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31380 while (!vmw_lag_lt(queue, us)) {
31381 spin_lock(&queue->lock);
31382 if (list_empty(&queue->head))
31383 - seqno = atomic_read(&dev_priv->marker_seq);
31384 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31385 else {
31386 marker = list_first_entry(&queue->head,
31387 struct vmw_marker, head);
31388 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31389 index 1f6957c..b579481 100644
31390 --- a/drivers/hid/hid-core.c
31391 +++ b/drivers/hid/hid-core.c
31392 @@ -2153,7 +2153,7 @@ static bool hid_ignore(struct hid_device *hdev)
31393
31394 int hid_add_device(struct hid_device *hdev)
31395 {
31396 - static atomic_t id = ATOMIC_INIT(0);
31397 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31398 int ret;
31399
31400 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31401 @@ -2188,7 +2188,7 @@ int hid_add_device(struct hid_device *hdev)
31402 /* XXX hack, any other cleaner solution after the driver core
31403 * is converted to allow more than 20 bytes as the device name? */
31404 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31405 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31406 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31407
31408 hid_debug_register(hdev, dev_name(&hdev->dev));
31409 ret = device_add(&hdev->dev);
31410 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31411 index eec3291..8ed706b 100644
31412 --- a/drivers/hid/hid-wiimote-debug.c
31413 +++ b/drivers/hid/hid-wiimote-debug.c
31414 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31415 else if (size == 0)
31416 return -EIO;
31417
31418 - if (copy_to_user(u, buf, size))
31419 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
31420 return -EFAULT;
31421
31422 *off += size;
31423 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31424 index 14599e2..711c965 100644
31425 --- a/drivers/hid/usbhid/hiddev.c
31426 +++ b/drivers/hid/usbhid/hiddev.c
31427 @@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31428 break;
31429
31430 case HIDIOCAPPLICATION:
31431 - if (arg < 0 || arg >= hid->maxapplication)
31432 + if (arg >= hid->maxapplication)
31433 break;
31434
31435 for (i = 0; i < hid->maxcollection; i++)
31436 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31437 index 4065374..10ed7dc 100644
31438 --- a/drivers/hv/channel.c
31439 +++ b/drivers/hv/channel.c
31440 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31441 int ret = 0;
31442 int t;
31443
31444 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31445 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31446 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31447 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31448
31449 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31450 if (ret)
31451 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31452 index 86f8885..ab9cb2b 100644
31453 --- a/drivers/hv/hv.c
31454 +++ b/drivers/hv/hv.c
31455 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31456 u64 output_address = (output) ? virt_to_phys(output) : 0;
31457 u32 output_address_hi = output_address >> 32;
31458 u32 output_address_lo = output_address & 0xFFFFFFFF;
31459 - void *hypercall_page = hv_context.hypercall_page;
31460 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31461
31462 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31463 "=a"(hv_status_lo) : "d" (control_hi),
31464 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31465 index b9426a6..677ce34 100644
31466 --- a/drivers/hv/hyperv_vmbus.h
31467 +++ b/drivers/hv/hyperv_vmbus.h
31468 @@ -555,7 +555,7 @@ enum vmbus_connect_state {
31469 struct vmbus_connection {
31470 enum vmbus_connect_state conn_state;
31471
31472 - atomic_t next_gpadl_handle;
31473 + atomic_unchecked_t next_gpadl_handle;
31474
31475 /*
31476 * Represents channel interrupts. Each bit position represents a
31477 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31478 index a220e57..428f54d 100644
31479 --- a/drivers/hv/vmbus_drv.c
31480 +++ b/drivers/hv/vmbus_drv.c
31481 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31482 {
31483 int ret = 0;
31484
31485 - static atomic_t device_num = ATOMIC_INIT(0);
31486 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31487
31488 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31489 - atomic_inc_return(&device_num));
31490 + atomic_inc_return_unchecked(&device_num));
31491
31492 child_device_obj->device.bus = &hv_bus;
31493 child_device_obj->device.parent = &hv_acpi_dev->dev;
31494 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31495 index 34ad5a2..e2b0ae8 100644
31496 --- a/drivers/hwmon/acpi_power_meter.c
31497 +++ b/drivers/hwmon/acpi_power_meter.c
31498 @@ -308,8 +308,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31499 return res;
31500
31501 temp /= 1000;
31502 - if (temp < 0)
31503 - return -EINVAL;
31504
31505 mutex_lock(&resource->lock);
31506 resource->trip[attr->index - 7] = temp;
31507 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31508 index 8b011d0..3de24a1 100644
31509 --- a/drivers/hwmon/sht15.c
31510 +++ b/drivers/hwmon/sht15.c
31511 @@ -166,7 +166,7 @@ struct sht15_data {
31512 int supply_uV;
31513 bool supply_uV_valid;
31514 struct work_struct update_supply_work;
31515 - atomic_t interrupt_handled;
31516 + atomic_unchecked_t interrupt_handled;
31517 };
31518
31519 /**
31520 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31521 return ret;
31522
31523 gpio_direction_input(data->pdata->gpio_data);
31524 - atomic_set(&data->interrupt_handled, 0);
31525 + atomic_set_unchecked(&data->interrupt_handled, 0);
31526
31527 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31528 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31529 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31530 /* Only relevant if the interrupt hasn't occurred. */
31531 - if (!atomic_read(&data->interrupt_handled))
31532 + if (!atomic_read_unchecked(&data->interrupt_handled))
31533 schedule_work(&data->read_work);
31534 }
31535 ret = wait_event_timeout(data->wait_queue,
31536 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31537
31538 /* First disable the interrupt */
31539 disable_irq_nosync(irq);
31540 - atomic_inc(&data->interrupt_handled);
31541 + atomic_inc_unchecked(&data->interrupt_handled);
31542 /* Then schedule a reading work struct */
31543 if (data->state != SHT15_READING_NOTHING)
31544 schedule_work(&data->read_work);
31545 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31546 * If not, then start the interrupt again - care here as could
31547 * have gone low in meantime so verify it hasn't!
31548 */
31549 - atomic_set(&data->interrupt_handled, 0);
31550 + atomic_set_unchecked(&data->interrupt_handled, 0);
31551 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31552 /* If still not occurred or another handler was scheduled */
31553 if (gpio_get_value(data->pdata->gpio_data)
31554 - || atomic_read(&data->interrupt_handled))
31555 + || atomic_read_unchecked(&data->interrupt_handled))
31556 return;
31557 }
31558
31559 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31560 index 378fcb5..5e91fa8 100644
31561 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31562 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31563 @@ -43,7 +43,7 @@
31564 extern struct i2c_adapter amd756_smbus;
31565
31566 static struct i2c_adapter *s4882_adapter;
31567 -static struct i2c_algorithm *s4882_algo;
31568 +static i2c_algorithm_no_const *s4882_algo;
31569
31570 /* Wrapper access functions for multiplexed SMBus */
31571 static DEFINE_MUTEX(amd756_lock);
31572 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31573 index 29015eb..af2d8e9 100644
31574 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31575 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31576 @@ -41,7 +41,7 @@
31577 extern struct i2c_adapter *nforce2_smbus;
31578
31579 static struct i2c_adapter *s4985_adapter;
31580 -static struct i2c_algorithm *s4985_algo;
31581 +static i2c_algorithm_no_const *s4985_algo;
31582
31583 /* Wrapper access functions for multiplexed SMBus */
31584 static DEFINE_MUTEX(nforce2_lock);
31585 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31586 index 1038c38..eb92f51 100644
31587 --- a/drivers/i2c/i2c-mux.c
31588 +++ b/drivers/i2c/i2c-mux.c
31589 @@ -30,7 +30,7 @@
31590 /* multiplexer per channel data */
31591 struct i2c_mux_priv {
31592 struct i2c_adapter adap;
31593 - struct i2c_algorithm algo;
31594 + i2c_algorithm_no_const algo;
31595
31596 struct i2c_adapter *parent;
31597 void *mux_priv; /* the mux chip/device */
31598 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31599 index 57d00ca..0145194 100644
31600 --- a/drivers/ide/aec62xx.c
31601 +++ b/drivers/ide/aec62xx.c
31602 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31603 .cable_detect = atp86x_cable_detect,
31604 };
31605
31606 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31607 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31608 { /* 0: AEC6210 */
31609 .name = DRV_NAME,
31610 .init_chipset = init_chipset_aec62xx,
31611 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31612 index 2c8016a..911a27c 100644
31613 --- a/drivers/ide/alim15x3.c
31614 +++ b/drivers/ide/alim15x3.c
31615 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31616 .dma_sff_read_status = ide_dma_sff_read_status,
31617 };
31618
31619 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31620 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31621 .name = DRV_NAME,
31622 .init_chipset = init_chipset_ali15x3,
31623 .init_hwif = init_hwif_ali15x3,
31624 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31625 index 3747b25..56fc995 100644
31626 --- a/drivers/ide/amd74xx.c
31627 +++ b/drivers/ide/amd74xx.c
31628 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31629 .udma_mask = udma, \
31630 }
31631
31632 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31633 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31634 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31635 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31636 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31637 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31638 index 15f0ead..cb43480 100644
31639 --- a/drivers/ide/atiixp.c
31640 +++ b/drivers/ide/atiixp.c
31641 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31642 .cable_detect = atiixp_cable_detect,
31643 };
31644
31645 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31646 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31647 { /* 0: IXP200/300/400/700 */
31648 .name = DRV_NAME,
31649 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31650 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31651 index 5f80312..d1fc438 100644
31652 --- a/drivers/ide/cmd64x.c
31653 +++ b/drivers/ide/cmd64x.c
31654 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31655 .dma_sff_read_status = ide_dma_sff_read_status,
31656 };
31657
31658 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31659 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31660 { /* 0: CMD643 */
31661 .name = DRV_NAME,
31662 .init_chipset = init_chipset_cmd64x,
31663 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31664 index 2c1e5f7..1444762 100644
31665 --- a/drivers/ide/cs5520.c
31666 +++ b/drivers/ide/cs5520.c
31667 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31668 .set_dma_mode = cs5520_set_dma_mode,
31669 };
31670
31671 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31672 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31673 .name = DRV_NAME,
31674 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31675 .port_ops = &cs5520_port_ops,
31676 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31677 index 4dc4eb9..49b40ad 100644
31678 --- a/drivers/ide/cs5530.c
31679 +++ b/drivers/ide/cs5530.c
31680 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31681 .udma_filter = cs5530_udma_filter,
31682 };
31683
31684 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31685 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31686 .name = DRV_NAME,
31687 .init_chipset = init_chipset_cs5530,
31688 .init_hwif = init_hwif_cs5530,
31689 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31690 index 5059faf..18d4c85 100644
31691 --- a/drivers/ide/cs5535.c
31692 +++ b/drivers/ide/cs5535.c
31693 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31694 .cable_detect = cs5535_cable_detect,
31695 };
31696
31697 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31698 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31699 .name = DRV_NAME,
31700 .port_ops = &cs5535_port_ops,
31701 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31702 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31703 index 847553f..3ffb49d 100644
31704 --- a/drivers/ide/cy82c693.c
31705 +++ b/drivers/ide/cy82c693.c
31706 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31707 .set_dma_mode = cy82c693_set_dma_mode,
31708 };
31709
31710 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31711 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31712 .name = DRV_NAME,
31713 .init_iops = init_iops_cy82c693,
31714 .port_ops = &cy82c693_port_ops,
31715 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31716 index 58c51cd..4aec3b8 100644
31717 --- a/drivers/ide/hpt366.c
31718 +++ b/drivers/ide/hpt366.c
31719 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31720 }
31721 };
31722
31723 -static const struct hpt_info hpt36x __devinitdata = {
31724 +static const struct hpt_info hpt36x __devinitconst = {
31725 .chip_name = "HPT36x",
31726 .chip_type = HPT36x,
31727 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31728 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31729 .timings = &hpt36x_timings
31730 };
31731
31732 -static const struct hpt_info hpt370 __devinitdata = {
31733 +static const struct hpt_info hpt370 __devinitconst = {
31734 .chip_name = "HPT370",
31735 .chip_type = HPT370,
31736 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31737 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31738 .timings = &hpt37x_timings
31739 };
31740
31741 -static const struct hpt_info hpt370a __devinitdata = {
31742 +static const struct hpt_info hpt370a __devinitconst = {
31743 .chip_name = "HPT370A",
31744 .chip_type = HPT370A,
31745 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31746 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31747 .timings = &hpt37x_timings
31748 };
31749
31750 -static const struct hpt_info hpt374 __devinitdata = {
31751 +static const struct hpt_info hpt374 __devinitconst = {
31752 .chip_name = "HPT374",
31753 .chip_type = HPT374,
31754 .udma_mask = ATA_UDMA5,
31755 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31756 .timings = &hpt37x_timings
31757 };
31758
31759 -static const struct hpt_info hpt372 __devinitdata = {
31760 +static const struct hpt_info hpt372 __devinitconst = {
31761 .chip_name = "HPT372",
31762 .chip_type = HPT372,
31763 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31764 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31765 .timings = &hpt37x_timings
31766 };
31767
31768 -static const struct hpt_info hpt372a __devinitdata = {
31769 +static const struct hpt_info hpt372a __devinitconst = {
31770 .chip_name = "HPT372A",
31771 .chip_type = HPT372A,
31772 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31773 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31774 .timings = &hpt37x_timings
31775 };
31776
31777 -static const struct hpt_info hpt302 __devinitdata = {
31778 +static const struct hpt_info hpt302 __devinitconst = {
31779 .chip_name = "HPT302",
31780 .chip_type = HPT302,
31781 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31782 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31783 .timings = &hpt37x_timings
31784 };
31785
31786 -static const struct hpt_info hpt371 __devinitdata = {
31787 +static const struct hpt_info hpt371 __devinitconst = {
31788 .chip_name = "HPT371",
31789 .chip_type = HPT371,
31790 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31791 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31792 .timings = &hpt37x_timings
31793 };
31794
31795 -static const struct hpt_info hpt372n __devinitdata = {
31796 +static const struct hpt_info hpt372n __devinitconst = {
31797 .chip_name = "HPT372N",
31798 .chip_type = HPT372N,
31799 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31800 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31801 .timings = &hpt37x_timings
31802 };
31803
31804 -static const struct hpt_info hpt302n __devinitdata = {
31805 +static const struct hpt_info hpt302n __devinitconst = {
31806 .chip_name = "HPT302N",
31807 .chip_type = HPT302N,
31808 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31809 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31810 .timings = &hpt37x_timings
31811 };
31812
31813 -static const struct hpt_info hpt371n __devinitdata = {
31814 +static const struct hpt_info hpt371n __devinitconst = {
31815 .chip_name = "HPT371N",
31816 .chip_type = HPT371N,
31817 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31818 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31819 .dma_sff_read_status = ide_dma_sff_read_status,
31820 };
31821
31822 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31823 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31824 { /* 0: HPT36x */
31825 .name = DRV_NAME,
31826 .init_chipset = init_chipset_hpt366,
31827 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31828 index 8126824..55a2798 100644
31829 --- a/drivers/ide/ide-cd.c
31830 +++ b/drivers/ide/ide-cd.c
31831 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31832 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31833 if ((unsigned long)buf & alignment
31834 || blk_rq_bytes(rq) & q->dma_pad_mask
31835 - || object_is_on_stack(buf))
31836 + || object_starts_on_stack(buf))
31837 drive->dma = 0;
31838 }
31839 }
31840 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31841 index 7f56b73..dab5b67 100644
31842 --- a/drivers/ide/ide-pci-generic.c
31843 +++ b/drivers/ide/ide-pci-generic.c
31844 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31845 .udma_mask = ATA_UDMA6, \
31846 }
31847
31848 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31849 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31850 /* 0: Unknown */
31851 DECLARE_GENERIC_PCI_DEV(0),
31852
31853 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31854 index 560e66d..d5dd180 100644
31855 --- a/drivers/ide/it8172.c
31856 +++ b/drivers/ide/it8172.c
31857 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31858 .set_dma_mode = it8172_set_dma_mode,
31859 };
31860
31861 -static const struct ide_port_info it8172_port_info __devinitdata = {
31862 +static const struct ide_port_info it8172_port_info __devinitconst = {
31863 .name = DRV_NAME,
31864 .port_ops = &it8172_port_ops,
31865 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31866 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31867 index 46816ba..1847aeb 100644
31868 --- a/drivers/ide/it8213.c
31869 +++ b/drivers/ide/it8213.c
31870 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31871 .cable_detect = it8213_cable_detect,
31872 };
31873
31874 -static const struct ide_port_info it8213_chipset __devinitdata = {
31875 +static const struct ide_port_info it8213_chipset __devinitconst = {
31876 .name = DRV_NAME,
31877 .enablebits = { {0x41, 0x80, 0x80} },
31878 .port_ops = &it8213_port_ops,
31879 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31880 index 2e3169f..c5611db 100644
31881 --- a/drivers/ide/it821x.c
31882 +++ b/drivers/ide/it821x.c
31883 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31884 .cable_detect = it821x_cable_detect,
31885 };
31886
31887 -static const struct ide_port_info it821x_chipset __devinitdata = {
31888 +static const struct ide_port_info it821x_chipset __devinitconst = {
31889 .name = DRV_NAME,
31890 .init_chipset = init_chipset_it821x,
31891 .init_hwif = init_hwif_it821x,
31892 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31893 index 74c2c4a..efddd7d 100644
31894 --- a/drivers/ide/jmicron.c
31895 +++ b/drivers/ide/jmicron.c
31896 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31897 .cable_detect = jmicron_cable_detect,
31898 };
31899
31900 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31901 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31902 .name = DRV_NAME,
31903 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31904 .port_ops = &jmicron_port_ops,
31905 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31906 index 95327a2..73f78d8 100644
31907 --- a/drivers/ide/ns87415.c
31908 +++ b/drivers/ide/ns87415.c
31909 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31910 .dma_sff_read_status = superio_dma_sff_read_status,
31911 };
31912
31913 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31914 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31915 .name = DRV_NAME,
31916 .init_hwif = init_hwif_ns87415,
31917 .tp_ops = &ns87415_tp_ops,
31918 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31919 index 1a53a4c..39edc66 100644
31920 --- a/drivers/ide/opti621.c
31921 +++ b/drivers/ide/opti621.c
31922 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31923 .set_pio_mode = opti621_set_pio_mode,
31924 };
31925
31926 -static const struct ide_port_info opti621_chipset __devinitdata = {
31927 +static const struct ide_port_info opti621_chipset __devinitconst = {
31928 .name = DRV_NAME,
31929 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31930 .port_ops = &opti621_port_ops,
31931 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31932 index 9546fe2..2e5ceb6 100644
31933 --- a/drivers/ide/pdc202xx_new.c
31934 +++ b/drivers/ide/pdc202xx_new.c
31935 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31936 .udma_mask = udma, \
31937 }
31938
31939 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31940 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31941 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31942 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31943 };
31944 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31945 index 3a35ec6..5634510 100644
31946 --- a/drivers/ide/pdc202xx_old.c
31947 +++ b/drivers/ide/pdc202xx_old.c
31948 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31949 .max_sectors = sectors, \
31950 }
31951
31952 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31953 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31954 { /* 0: PDC20246 */
31955 .name = DRV_NAME,
31956 .init_chipset = init_chipset_pdc202xx,
31957 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31958 index 1892e81..fe0fd60 100644
31959 --- a/drivers/ide/piix.c
31960 +++ b/drivers/ide/piix.c
31961 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31962 .udma_mask = udma, \
31963 }
31964
31965 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31966 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31967 /* 0: MPIIX */
31968 { /*
31969 * MPIIX actually has only a single IDE channel mapped to
31970 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31971 index a6414a8..c04173e 100644
31972 --- a/drivers/ide/rz1000.c
31973 +++ b/drivers/ide/rz1000.c
31974 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31975 }
31976 }
31977
31978 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31979 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31980 .name = DRV_NAME,
31981 .host_flags = IDE_HFLAG_NO_DMA,
31982 };
31983 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31984 index 356b9b5..d4758eb 100644
31985 --- a/drivers/ide/sc1200.c
31986 +++ b/drivers/ide/sc1200.c
31987 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31988 .dma_sff_read_status = ide_dma_sff_read_status,
31989 };
31990
31991 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31992 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31993 .name = DRV_NAME,
31994 .port_ops = &sc1200_port_ops,
31995 .dma_ops = &sc1200_dma_ops,
31996 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31997 index b7f5b0c..9701038 100644
31998 --- a/drivers/ide/scc_pata.c
31999 +++ b/drivers/ide/scc_pata.c
32000 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
32001 .dma_sff_read_status = scc_dma_sff_read_status,
32002 };
32003
32004 -static const struct ide_port_info scc_chipset __devinitdata = {
32005 +static const struct ide_port_info scc_chipset __devinitconst = {
32006 .name = "sccIDE",
32007 .init_iops = init_iops_scc,
32008 .init_dma = scc_init_dma,
32009 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
32010 index 35fb8da..24d72ef 100644
32011 --- a/drivers/ide/serverworks.c
32012 +++ b/drivers/ide/serverworks.c
32013 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
32014 .cable_detect = svwks_cable_detect,
32015 };
32016
32017 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
32018 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
32019 { /* 0: OSB4 */
32020 .name = DRV_NAME,
32021 .init_chipset = init_chipset_svwks,
32022 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
32023 index ddeda44..46f7e30 100644
32024 --- a/drivers/ide/siimage.c
32025 +++ b/drivers/ide/siimage.c
32026 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
32027 .udma_mask = ATA_UDMA6, \
32028 }
32029
32030 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
32031 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
32032 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
32033 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
32034 };
32035 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
32036 index 4a00225..09e61b4 100644
32037 --- a/drivers/ide/sis5513.c
32038 +++ b/drivers/ide/sis5513.c
32039 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
32040 .cable_detect = sis_cable_detect,
32041 };
32042
32043 -static const struct ide_port_info sis5513_chipset __devinitdata = {
32044 +static const struct ide_port_info sis5513_chipset __devinitconst = {
32045 .name = DRV_NAME,
32046 .init_chipset = init_chipset_sis5513,
32047 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
32048 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
32049 index f21dc2a..d051cd2 100644
32050 --- a/drivers/ide/sl82c105.c
32051 +++ b/drivers/ide/sl82c105.c
32052 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
32053 .dma_sff_read_status = ide_dma_sff_read_status,
32054 };
32055
32056 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
32057 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
32058 .name = DRV_NAME,
32059 .init_chipset = init_chipset_sl82c105,
32060 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
32061 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
32062 index 864ffe0..863a5e9 100644
32063 --- a/drivers/ide/slc90e66.c
32064 +++ b/drivers/ide/slc90e66.c
32065 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
32066 .cable_detect = slc90e66_cable_detect,
32067 };
32068
32069 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
32070 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
32071 .name = DRV_NAME,
32072 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
32073 .port_ops = &slc90e66_port_ops,
32074 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
32075 index 4799d5c..1794678 100644
32076 --- a/drivers/ide/tc86c001.c
32077 +++ b/drivers/ide/tc86c001.c
32078 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
32079 .dma_sff_read_status = ide_dma_sff_read_status,
32080 };
32081
32082 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
32083 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
32084 .name = DRV_NAME,
32085 .init_hwif = init_hwif_tc86c001,
32086 .port_ops = &tc86c001_port_ops,
32087 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
32088 index 281c914..55ce1b8 100644
32089 --- a/drivers/ide/triflex.c
32090 +++ b/drivers/ide/triflex.c
32091 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
32092 .set_dma_mode = triflex_set_mode,
32093 };
32094
32095 -static const struct ide_port_info triflex_device __devinitdata = {
32096 +static const struct ide_port_info triflex_device __devinitconst = {
32097 .name = DRV_NAME,
32098 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
32099 .port_ops = &triflex_port_ops,
32100 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
32101 index 4b42ca0..e494a98 100644
32102 --- a/drivers/ide/trm290.c
32103 +++ b/drivers/ide/trm290.c
32104 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
32105 .dma_check = trm290_dma_check,
32106 };
32107
32108 -static const struct ide_port_info trm290_chipset __devinitdata = {
32109 +static const struct ide_port_info trm290_chipset __devinitconst = {
32110 .name = DRV_NAME,
32111 .init_hwif = init_hwif_trm290,
32112 .tp_ops = &trm290_tp_ops,
32113 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
32114 index f46f49c..eb77678 100644
32115 --- a/drivers/ide/via82cxxx.c
32116 +++ b/drivers/ide/via82cxxx.c
32117 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
32118 .cable_detect = via82cxxx_cable_detect,
32119 };
32120
32121 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
32122 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
32123 .name = DRV_NAME,
32124 .init_chipset = init_chipset_via82cxxx,
32125 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
32126 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
32127 index 73d4531..c90cd2d 100644
32128 --- a/drivers/ieee802154/fakehard.c
32129 +++ b/drivers/ieee802154/fakehard.c
32130 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
32131 phy->transmit_power = 0xbf;
32132
32133 dev->netdev_ops = &fake_ops;
32134 - dev->ml_priv = &fake_mlme;
32135 + dev->ml_priv = (void *)&fake_mlme;
32136
32137 priv = netdev_priv(dev);
32138 priv->phy = phy;
32139 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32140 index c889aae..6cf5aa7 100644
32141 --- a/drivers/infiniband/core/cm.c
32142 +++ b/drivers/infiniband/core/cm.c
32143 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32144
32145 struct cm_counter_group {
32146 struct kobject obj;
32147 - atomic_long_t counter[CM_ATTR_COUNT];
32148 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32149 };
32150
32151 struct cm_counter_attribute {
32152 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32153 struct ib_mad_send_buf *msg = NULL;
32154 int ret;
32155
32156 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32157 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32158 counter[CM_REQ_COUNTER]);
32159
32160 /* Quick state check to discard duplicate REQs. */
32161 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32162 if (!cm_id_priv)
32163 return;
32164
32165 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32166 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32167 counter[CM_REP_COUNTER]);
32168 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32169 if (ret)
32170 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32171 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32172 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32173 spin_unlock_irq(&cm_id_priv->lock);
32174 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32175 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32176 counter[CM_RTU_COUNTER]);
32177 goto out;
32178 }
32179 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32180 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32181 dreq_msg->local_comm_id);
32182 if (!cm_id_priv) {
32183 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32184 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32185 counter[CM_DREQ_COUNTER]);
32186 cm_issue_drep(work->port, work->mad_recv_wc);
32187 return -EINVAL;
32188 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32189 case IB_CM_MRA_REP_RCVD:
32190 break;
32191 case IB_CM_TIMEWAIT:
32192 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32193 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32194 counter[CM_DREQ_COUNTER]);
32195 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32196 goto unlock;
32197 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32198 cm_free_msg(msg);
32199 goto deref;
32200 case IB_CM_DREQ_RCVD:
32201 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32202 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32203 counter[CM_DREQ_COUNTER]);
32204 goto unlock;
32205 default:
32206 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32207 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32208 cm_id_priv->msg, timeout)) {
32209 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32210 - atomic_long_inc(&work->port->
32211 + atomic_long_inc_unchecked(&work->port->
32212 counter_group[CM_RECV_DUPLICATES].
32213 counter[CM_MRA_COUNTER]);
32214 goto out;
32215 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32216 break;
32217 case IB_CM_MRA_REQ_RCVD:
32218 case IB_CM_MRA_REP_RCVD:
32219 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32220 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32221 counter[CM_MRA_COUNTER]);
32222 /* fall through */
32223 default:
32224 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32225 case IB_CM_LAP_IDLE:
32226 break;
32227 case IB_CM_MRA_LAP_SENT:
32228 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32229 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32230 counter[CM_LAP_COUNTER]);
32231 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32232 goto unlock;
32233 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32234 cm_free_msg(msg);
32235 goto deref;
32236 case IB_CM_LAP_RCVD:
32237 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32238 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32239 counter[CM_LAP_COUNTER]);
32240 goto unlock;
32241 default:
32242 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32243 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32244 if (cur_cm_id_priv) {
32245 spin_unlock_irq(&cm.lock);
32246 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32247 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32248 counter[CM_SIDR_REQ_COUNTER]);
32249 goto out; /* Duplicate message. */
32250 }
32251 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32252 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32253 msg->retries = 1;
32254
32255 - atomic_long_add(1 + msg->retries,
32256 + atomic_long_add_unchecked(1 + msg->retries,
32257 &port->counter_group[CM_XMIT].counter[attr_index]);
32258 if (msg->retries)
32259 - atomic_long_add(msg->retries,
32260 + atomic_long_add_unchecked(msg->retries,
32261 &port->counter_group[CM_XMIT_RETRIES].
32262 counter[attr_index]);
32263
32264 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32265 }
32266
32267 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32268 - atomic_long_inc(&port->counter_group[CM_RECV].
32269 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32270 counter[attr_id - CM_ATTR_ID_OFFSET]);
32271
32272 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32273 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32274 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32275
32276 return sprintf(buf, "%ld\n",
32277 - atomic_long_read(&group->counter[cm_attr->index]));
32278 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32279 }
32280
32281 static const struct sysfs_ops cm_counter_ops = {
32282 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32283 index 176c8f9..2627b62 100644
32284 --- a/drivers/infiniband/core/fmr_pool.c
32285 +++ b/drivers/infiniband/core/fmr_pool.c
32286 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
32287
32288 struct task_struct *thread;
32289
32290 - atomic_t req_ser;
32291 - atomic_t flush_ser;
32292 + atomic_unchecked_t req_ser;
32293 + atomic_unchecked_t flush_ser;
32294
32295 wait_queue_head_t force_wait;
32296 };
32297 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32298 struct ib_fmr_pool *pool = pool_ptr;
32299
32300 do {
32301 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32302 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32303 ib_fmr_batch_release(pool);
32304
32305 - atomic_inc(&pool->flush_ser);
32306 + atomic_inc_unchecked(&pool->flush_ser);
32307 wake_up_interruptible(&pool->force_wait);
32308
32309 if (pool->flush_function)
32310 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32311 }
32312
32313 set_current_state(TASK_INTERRUPTIBLE);
32314 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32315 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32316 !kthread_should_stop())
32317 schedule();
32318 __set_current_state(TASK_RUNNING);
32319 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32320 pool->dirty_watermark = params->dirty_watermark;
32321 pool->dirty_len = 0;
32322 spin_lock_init(&pool->pool_lock);
32323 - atomic_set(&pool->req_ser, 0);
32324 - atomic_set(&pool->flush_ser, 0);
32325 + atomic_set_unchecked(&pool->req_ser, 0);
32326 + atomic_set_unchecked(&pool->flush_ser, 0);
32327 init_waitqueue_head(&pool->force_wait);
32328
32329 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32330 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32331 }
32332 spin_unlock_irq(&pool->pool_lock);
32333
32334 - serial = atomic_inc_return(&pool->req_ser);
32335 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32336 wake_up_process(pool->thread);
32337
32338 if (wait_event_interruptible(pool->force_wait,
32339 - atomic_read(&pool->flush_ser) - serial >= 0))
32340 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32341 return -EINTR;
32342
32343 return 0;
32344 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32345 } else {
32346 list_add_tail(&fmr->list, &pool->dirty_list);
32347 if (++pool->dirty_len >= pool->dirty_watermark) {
32348 - atomic_inc(&pool->req_ser);
32349 + atomic_inc_unchecked(&pool->req_ser);
32350 wake_up_process(pool->thread);
32351 }
32352 }
32353 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32354 index 57e07c6..56d09d4 100644
32355 --- a/drivers/infiniband/hw/cxgb4/mem.c
32356 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32357 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32358 int err;
32359 struct fw_ri_tpte tpt;
32360 u32 stag_idx;
32361 - static atomic_t key;
32362 + static atomic_unchecked_t key;
32363
32364 if (c4iw_fatal_error(rdev))
32365 return -EIO;
32366 @@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32367 if (rdev->stats.stag.cur > rdev->stats.stag.max)
32368 rdev->stats.stag.max = rdev->stats.stag.cur;
32369 mutex_unlock(&rdev->stats.lock);
32370 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32371 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32372 }
32373 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32374 __func__, stag_state, type, pdid, stag_idx);
32375 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32376 index 79b3dbc..96e5fcc 100644
32377 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32378 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32379 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32380 struct ib_atomic_eth *ateth;
32381 struct ipath_ack_entry *e;
32382 u64 vaddr;
32383 - atomic64_t *maddr;
32384 + atomic64_unchecked_t *maddr;
32385 u64 sdata;
32386 u32 rkey;
32387 u8 next;
32388 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32389 IB_ACCESS_REMOTE_ATOMIC)))
32390 goto nack_acc_unlck;
32391 /* Perform atomic OP and save result. */
32392 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32393 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32394 sdata = be64_to_cpu(ateth->swap_data);
32395 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32396 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32397 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32398 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32399 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32400 be64_to_cpu(ateth->compare_data),
32401 sdata);
32402 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32403 index 1f95bba..9530f87 100644
32404 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32405 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32406 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32407 unsigned long flags;
32408 struct ib_wc wc;
32409 u64 sdata;
32410 - atomic64_t *maddr;
32411 + atomic64_unchecked_t *maddr;
32412 enum ib_wc_status send_status;
32413
32414 /*
32415 @@ -382,11 +382,11 @@ again:
32416 IB_ACCESS_REMOTE_ATOMIC)))
32417 goto acc_err;
32418 /* Perform atomic OP and save result. */
32419 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32420 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32421 sdata = wqe->wr.wr.atomic.compare_add;
32422 *(u64 *) sqp->s_sge.sge.vaddr =
32423 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32424 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32425 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32426 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32427 sdata, wqe->wr.wr.atomic.swap);
32428 goto send_comp;
32429 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32430 index 7140199..da60063 100644
32431 --- a/drivers/infiniband/hw/nes/nes.c
32432 +++ b/drivers/infiniband/hw/nes/nes.c
32433 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32434 LIST_HEAD(nes_adapter_list);
32435 static LIST_HEAD(nes_dev_list);
32436
32437 -atomic_t qps_destroyed;
32438 +atomic_unchecked_t qps_destroyed;
32439
32440 static unsigned int ee_flsh_adapter;
32441 static unsigned int sysfs_nonidx_addr;
32442 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32443 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32444 struct nes_adapter *nesadapter = nesdev->nesadapter;
32445
32446 - atomic_inc(&qps_destroyed);
32447 + atomic_inc_unchecked(&qps_destroyed);
32448
32449 /* Free the control structures */
32450
32451 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32452 index c438e46..ca30356 100644
32453 --- a/drivers/infiniband/hw/nes/nes.h
32454 +++ b/drivers/infiniband/hw/nes/nes.h
32455 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32456 extern unsigned int wqm_quanta;
32457 extern struct list_head nes_adapter_list;
32458
32459 -extern atomic_t cm_connects;
32460 -extern atomic_t cm_accepts;
32461 -extern atomic_t cm_disconnects;
32462 -extern atomic_t cm_closes;
32463 -extern atomic_t cm_connecteds;
32464 -extern atomic_t cm_connect_reqs;
32465 -extern atomic_t cm_rejects;
32466 -extern atomic_t mod_qp_timouts;
32467 -extern atomic_t qps_created;
32468 -extern atomic_t qps_destroyed;
32469 -extern atomic_t sw_qps_destroyed;
32470 +extern atomic_unchecked_t cm_connects;
32471 +extern atomic_unchecked_t cm_accepts;
32472 +extern atomic_unchecked_t cm_disconnects;
32473 +extern atomic_unchecked_t cm_closes;
32474 +extern atomic_unchecked_t cm_connecteds;
32475 +extern atomic_unchecked_t cm_connect_reqs;
32476 +extern atomic_unchecked_t cm_rejects;
32477 +extern atomic_unchecked_t mod_qp_timouts;
32478 +extern atomic_unchecked_t qps_created;
32479 +extern atomic_unchecked_t qps_destroyed;
32480 +extern atomic_unchecked_t sw_qps_destroyed;
32481 extern u32 mh_detected;
32482 extern u32 mh_pauses_sent;
32483 extern u32 cm_packets_sent;
32484 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32485 extern u32 cm_packets_received;
32486 extern u32 cm_packets_dropped;
32487 extern u32 cm_packets_retrans;
32488 -extern atomic_t cm_listens_created;
32489 -extern atomic_t cm_listens_destroyed;
32490 +extern atomic_unchecked_t cm_listens_created;
32491 +extern atomic_unchecked_t cm_listens_destroyed;
32492 extern u32 cm_backlog_drops;
32493 -extern atomic_t cm_loopbacks;
32494 -extern atomic_t cm_nodes_created;
32495 -extern atomic_t cm_nodes_destroyed;
32496 -extern atomic_t cm_accel_dropped_pkts;
32497 -extern atomic_t cm_resets_recvd;
32498 -extern atomic_t pau_qps_created;
32499 -extern atomic_t pau_qps_destroyed;
32500 +extern atomic_unchecked_t cm_loopbacks;
32501 +extern atomic_unchecked_t cm_nodes_created;
32502 +extern atomic_unchecked_t cm_nodes_destroyed;
32503 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32504 +extern atomic_unchecked_t cm_resets_recvd;
32505 +extern atomic_unchecked_t pau_qps_created;
32506 +extern atomic_unchecked_t pau_qps_destroyed;
32507
32508 extern u32 int_mod_timer_init;
32509 extern u32 int_mod_cq_depth_256;
32510 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32511 index 020e95c..fbb3450 100644
32512 --- a/drivers/infiniband/hw/nes/nes_cm.c
32513 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32514 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32515 u32 cm_packets_retrans;
32516 u32 cm_packets_created;
32517 u32 cm_packets_received;
32518 -atomic_t cm_listens_created;
32519 -atomic_t cm_listens_destroyed;
32520 +atomic_unchecked_t cm_listens_created;
32521 +atomic_unchecked_t cm_listens_destroyed;
32522 u32 cm_backlog_drops;
32523 -atomic_t cm_loopbacks;
32524 -atomic_t cm_nodes_created;
32525 -atomic_t cm_nodes_destroyed;
32526 -atomic_t cm_accel_dropped_pkts;
32527 -atomic_t cm_resets_recvd;
32528 +atomic_unchecked_t cm_loopbacks;
32529 +atomic_unchecked_t cm_nodes_created;
32530 +atomic_unchecked_t cm_nodes_destroyed;
32531 +atomic_unchecked_t cm_accel_dropped_pkts;
32532 +atomic_unchecked_t cm_resets_recvd;
32533
32534 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32535 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32536 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32537
32538 static struct nes_cm_core *g_cm_core;
32539
32540 -atomic_t cm_connects;
32541 -atomic_t cm_accepts;
32542 -atomic_t cm_disconnects;
32543 -atomic_t cm_closes;
32544 -atomic_t cm_connecteds;
32545 -atomic_t cm_connect_reqs;
32546 -atomic_t cm_rejects;
32547 +atomic_unchecked_t cm_connects;
32548 +atomic_unchecked_t cm_accepts;
32549 +atomic_unchecked_t cm_disconnects;
32550 +atomic_unchecked_t cm_closes;
32551 +atomic_unchecked_t cm_connecteds;
32552 +atomic_unchecked_t cm_connect_reqs;
32553 +atomic_unchecked_t cm_rejects;
32554
32555 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32556 {
32557 @@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32558 kfree(listener);
32559 listener = NULL;
32560 ret = 0;
32561 - atomic_inc(&cm_listens_destroyed);
32562 + atomic_inc_unchecked(&cm_listens_destroyed);
32563 } else {
32564 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32565 }
32566 @@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32567 cm_node->rem_mac);
32568
32569 add_hte_node(cm_core, cm_node);
32570 - atomic_inc(&cm_nodes_created);
32571 + atomic_inc_unchecked(&cm_nodes_created);
32572
32573 return cm_node;
32574 }
32575 @@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32576 }
32577
32578 atomic_dec(&cm_core->node_cnt);
32579 - atomic_inc(&cm_nodes_destroyed);
32580 + atomic_inc_unchecked(&cm_nodes_destroyed);
32581 nesqp = cm_node->nesqp;
32582 if (nesqp) {
32583 nesqp->cm_node = NULL;
32584 @@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32585
32586 static void drop_packet(struct sk_buff *skb)
32587 {
32588 - atomic_inc(&cm_accel_dropped_pkts);
32589 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32590 dev_kfree_skb_any(skb);
32591 }
32592
32593 @@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32594 {
32595
32596 int reset = 0; /* whether to send reset in case of err.. */
32597 - atomic_inc(&cm_resets_recvd);
32598 + atomic_inc_unchecked(&cm_resets_recvd);
32599 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32600 " refcnt=%d\n", cm_node, cm_node->state,
32601 atomic_read(&cm_node->ref_count));
32602 @@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32603 rem_ref_cm_node(cm_node->cm_core, cm_node);
32604 return NULL;
32605 }
32606 - atomic_inc(&cm_loopbacks);
32607 + atomic_inc_unchecked(&cm_loopbacks);
32608 loopbackremotenode->loopbackpartner = cm_node;
32609 loopbackremotenode->tcp_cntxt.rcv_wscale =
32610 NES_CM_DEFAULT_RCV_WND_SCALE;
32611 @@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32612 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32613 else {
32614 rem_ref_cm_node(cm_core, cm_node);
32615 - atomic_inc(&cm_accel_dropped_pkts);
32616 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32617 dev_kfree_skb_any(skb);
32618 }
32619 break;
32620 @@ -2891,7 +2891,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32621
32622 if ((cm_id) && (cm_id->event_handler)) {
32623 if (issue_disconn) {
32624 - atomic_inc(&cm_disconnects);
32625 + atomic_inc_unchecked(&cm_disconnects);
32626 cm_event.event = IW_CM_EVENT_DISCONNECT;
32627 cm_event.status = disconn_status;
32628 cm_event.local_addr = cm_id->local_addr;
32629 @@ -2913,7 +2913,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32630 }
32631
32632 if (issue_close) {
32633 - atomic_inc(&cm_closes);
32634 + atomic_inc_unchecked(&cm_closes);
32635 nes_disconnect(nesqp, 1);
32636
32637 cm_id->provider_data = nesqp;
32638 @@ -3049,7 +3049,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32639
32640 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32641 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32642 - atomic_inc(&cm_accepts);
32643 + atomic_inc_unchecked(&cm_accepts);
32644
32645 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32646 netdev_refcnt_read(nesvnic->netdev));
32647 @@ -3251,7 +3251,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32648 struct nes_cm_core *cm_core;
32649 u8 *start_buff;
32650
32651 - atomic_inc(&cm_rejects);
32652 + atomic_inc_unchecked(&cm_rejects);
32653 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32654 loopback = cm_node->loopbackpartner;
32655 cm_core = cm_node->cm_core;
32656 @@ -3311,7 +3311,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32657 ntohl(cm_id->local_addr.sin_addr.s_addr),
32658 ntohs(cm_id->local_addr.sin_port));
32659
32660 - atomic_inc(&cm_connects);
32661 + atomic_inc_unchecked(&cm_connects);
32662 nesqp->active_conn = 1;
32663
32664 /* cache the cm_id in the qp */
32665 @@ -3421,7 +3421,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32666 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32667 return err;
32668 }
32669 - atomic_inc(&cm_listens_created);
32670 + atomic_inc_unchecked(&cm_listens_created);
32671 }
32672
32673 cm_id->add_ref(cm_id);
32674 @@ -3522,7 +3522,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32675
32676 if (nesqp->destroyed)
32677 return;
32678 - atomic_inc(&cm_connecteds);
32679 + atomic_inc_unchecked(&cm_connecteds);
32680 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32681 " local port 0x%04X. jiffies = %lu.\n",
32682 nesqp->hwqp.qp_id,
32683 @@ -3709,7 +3709,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32684
32685 cm_id->add_ref(cm_id);
32686 ret = cm_id->event_handler(cm_id, &cm_event);
32687 - atomic_inc(&cm_closes);
32688 + atomic_inc_unchecked(&cm_closes);
32689 cm_event.event = IW_CM_EVENT_CLOSE;
32690 cm_event.status = 0;
32691 cm_event.provider_data = cm_id->provider_data;
32692 @@ -3745,7 +3745,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32693 return;
32694 cm_id = cm_node->cm_id;
32695
32696 - atomic_inc(&cm_connect_reqs);
32697 + atomic_inc_unchecked(&cm_connect_reqs);
32698 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32699 cm_node, cm_id, jiffies);
32700
32701 @@ -3785,7 +3785,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32702 return;
32703 cm_id = cm_node->cm_id;
32704
32705 - atomic_inc(&cm_connect_reqs);
32706 + atomic_inc_unchecked(&cm_connect_reqs);
32707 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32708 cm_node, cm_id, jiffies);
32709
32710 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32711 index 3ba7be3..c81f6ff 100644
32712 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32713 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32714 @@ -40,8 +40,8 @@
32715 #include "nes.h"
32716 #include "nes_mgt.h"
32717
32718 -atomic_t pau_qps_created;
32719 -atomic_t pau_qps_destroyed;
32720 +atomic_unchecked_t pau_qps_created;
32721 +atomic_unchecked_t pau_qps_destroyed;
32722
32723 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32724 {
32725 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32726 {
32727 struct sk_buff *skb;
32728 unsigned long flags;
32729 - atomic_inc(&pau_qps_destroyed);
32730 + atomic_inc_unchecked(&pau_qps_destroyed);
32731
32732 /* Free packets that have not yet been forwarded */
32733 /* Lock is acquired by skb_dequeue when removing the skb */
32734 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32735 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32736 skb_queue_head_init(&nesqp->pau_list);
32737 spin_lock_init(&nesqp->pau_lock);
32738 - atomic_inc(&pau_qps_created);
32739 + atomic_inc_unchecked(&pau_qps_created);
32740 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32741 }
32742
32743 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32744 index f3a3ecf..57d311d 100644
32745 --- a/drivers/infiniband/hw/nes/nes_nic.c
32746 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32747 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32748 target_stat_values[++index] = mh_detected;
32749 target_stat_values[++index] = mh_pauses_sent;
32750 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32751 - target_stat_values[++index] = atomic_read(&cm_connects);
32752 - target_stat_values[++index] = atomic_read(&cm_accepts);
32753 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32754 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32755 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32756 - target_stat_values[++index] = atomic_read(&cm_rejects);
32757 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32758 - target_stat_values[++index] = atomic_read(&qps_created);
32759 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32760 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32761 - target_stat_values[++index] = atomic_read(&cm_closes);
32762 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32763 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32764 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32765 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32766 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32767 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32768 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32769 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32770 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32771 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32772 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32773 target_stat_values[++index] = cm_packets_sent;
32774 target_stat_values[++index] = cm_packets_bounced;
32775 target_stat_values[++index] = cm_packets_created;
32776 target_stat_values[++index] = cm_packets_received;
32777 target_stat_values[++index] = cm_packets_dropped;
32778 target_stat_values[++index] = cm_packets_retrans;
32779 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32780 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32781 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32782 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32783 target_stat_values[++index] = cm_backlog_drops;
32784 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32785 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32786 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32787 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32788 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32789 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32790 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32791 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32792 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32793 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32794 target_stat_values[++index] = nesadapter->free_4kpbl;
32795 target_stat_values[++index] = nesadapter->free_256pbl;
32796 target_stat_values[++index] = int_mod_timer_init;
32797 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32798 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32799 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32800 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32801 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32802 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32803 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32804 }
32805
32806 /**
32807 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32808 index 8b8812d..a5e1133 100644
32809 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32810 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32811 @@ -46,9 +46,9 @@
32812
32813 #include <rdma/ib_umem.h>
32814
32815 -atomic_t mod_qp_timouts;
32816 -atomic_t qps_created;
32817 -atomic_t sw_qps_destroyed;
32818 +atomic_unchecked_t mod_qp_timouts;
32819 +atomic_unchecked_t qps_created;
32820 +atomic_unchecked_t sw_qps_destroyed;
32821
32822 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32823
32824 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32825 if (init_attr->create_flags)
32826 return ERR_PTR(-EINVAL);
32827
32828 - atomic_inc(&qps_created);
32829 + atomic_inc_unchecked(&qps_created);
32830 switch (init_attr->qp_type) {
32831 case IB_QPT_RC:
32832 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32833 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32834 struct iw_cm_event cm_event;
32835 int ret = 0;
32836
32837 - atomic_inc(&sw_qps_destroyed);
32838 + atomic_inc_unchecked(&sw_qps_destroyed);
32839 nesqp->destroyed = 1;
32840
32841 /* Blow away the connection if it exists. */
32842 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32843 index 7e62f41..4c2b8e2 100644
32844 --- a/drivers/infiniband/hw/qib/qib.h
32845 +++ b/drivers/infiniband/hw/qib/qib.h
32846 @@ -51,6 +51,7 @@
32847 #include <linux/completion.h>
32848 #include <linux/kref.h>
32849 #include <linux/sched.h>
32850 +#include <linux/slab.h>
32851
32852 #include "qib_common.h"
32853 #include "qib_verbs.h"
32854 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32855 index da739d9..da1c7f4 100644
32856 --- a/drivers/input/gameport/gameport.c
32857 +++ b/drivers/input/gameport/gameport.c
32858 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32859 */
32860 static void gameport_init_port(struct gameport *gameport)
32861 {
32862 - static atomic_t gameport_no = ATOMIC_INIT(0);
32863 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32864
32865 __module_get(THIS_MODULE);
32866
32867 mutex_init(&gameport->drv_mutex);
32868 device_initialize(&gameport->dev);
32869 dev_set_name(&gameport->dev, "gameport%lu",
32870 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32871 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32872 gameport->dev.bus = &gameport_bus;
32873 gameport->dev.release = gameport_release_port;
32874 if (gameport->parent)
32875 diff --git a/drivers/input/input.c b/drivers/input/input.c
32876 index 8921c61..f5cd63d 100644
32877 --- a/drivers/input/input.c
32878 +++ b/drivers/input/input.c
32879 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32880 */
32881 int input_register_device(struct input_dev *dev)
32882 {
32883 - static atomic_t input_no = ATOMIC_INIT(0);
32884 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32885 struct input_handler *handler;
32886 const char *path;
32887 int error;
32888 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32889 dev->setkeycode = input_default_setkeycode;
32890
32891 dev_set_name(&dev->dev, "input%ld",
32892 - (unsigned long) atomic_inc_return(&input_no) - 1);
32893 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32894
32895 error = device_add(&dev->dev);
32896 if (error)
32897 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32898 index 04c69af..5f92d00 100644
32899 --- a/drivers/input/joystick/sidewinder.c
32900 +++ b/drivers/input/joystick/sidewinder.c
32901 @@ -30,6 +30,7 @@
32902 #include <linux/kernel.h>
32903 #include <linux/module.h>
32904 #include <linux/slab.h>
32905 +#include <linux/sched.h>
32906 #include <linux/init.h>
32907 #include <linux/input.h>
32908 #include <linux/gameport.h>
32909 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32910 index 83811e4..0822b90 100644
32911 --- a/drivers/input/joystick/xpad.c
32912 +++ b/drivers/input/joystick/xpad.c
32913 @@ -726,7 +726,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32914
32915 static int xpad_led_probe(struct usb_xpad *xpad)
32916 {
32917 - static atomic_t led_seq = ATOMIC_INIT(0);
32918 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32919 long led_no;
32920 struct xpad_led *led;
32921 struct led_classdev *led_cdev;
32922 @@ -739,7 +739,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32923 if (!led)
32924 return -ENOMEM;
32925
32926 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32927 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32928
32929 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32930 led->xpad = xpad;
32931 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32932 index 0110b5a..d3ad144 100644
32933 --- a/drivers/input/mousedev.c
32934 +++ b/drivers/input/mousedev.c
32935 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32936
32937 spin_unlock_irq(&client->packet_lock);
32938
32939 - if (copy_to_user(buffer, data, count))
32940 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32941 return -EFAULT;
32942
32943 return count;
32944 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32945 index d0f7533..fb8215b 100644
32946 --- a/drivers/input/serio/serio.c
32947 +++ b/drivers/input/serio/serio.c
32948 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32949 */
32950 static void serio_init_port(struct serio *serio)
32951 {
32952 - static atomic_t serio_no = ATOMIC_INIT(0);
32953 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32954
32955 __module_get(THIS_MODULE);
32956
32957 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32958 mutex_init(&serio->drv_mutex);
32959 device_initialize(&serio->dev);
32960 dev_set_name(&serio->dev, "serio%ld",
32961 - (long)atomic_inc_return(&serio_no) - 1);
32962 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32963 serio->dev.bus = &serio_bus;
32964 serio->dev.release = serio_release_port;
32965 serio->dev.groups = serio_device_attr_groups;
32966 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32967 index 38c4bd8..58965d9 100644
32968 --- a/drivers/isdn/capi/capi.c
32969 +++ b/drivers/isdn/capi/capi.c
32970 @@ -83,8 +83,8 @@ struct capiminor {
32971
32972 struct capi20_appl *ap;
32973 u32 ncci;
32974 - atomic_t datahandle;
32975 - atomic_t msgid;
32976 + atomic_unchecked_t datahandle;
32977 + atomic_unchecked_t msgid;
32978
32979 struct tty_port port;
32980 int ttyinstop;
32981 @@ -392,7 +392,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32982 capimsg_setu16(s, 2, mp->ap->applid);
32983 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32984 capimsg_setu8 (s, 5, CAPI_RESP);
32985 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32986 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32987 capimsg_setu32(s, 8, mp->ncci);
32988 capimsg_setu16(s, 12, datahandle);
32989 }
32990 @@ -513,14 +513,14 @@ static void handle_minor_send(struct capiminor *mp)
32991 mp->outbytes -= len;
32992 spin_unlock_bh(&mp->outlock);
32993
32994 - datahandle = atomic_inc_return(&mp->datahandle);
32995 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32996 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32997 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32998 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32999 capimsg_setu16(skb->data, 2, mp->ap->applid);
33000 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
33001 capimsg_setu8 (skb->data, 5, CAPI_REQ);
33002 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
33003 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
33004 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
33005 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
33006 capimsg_setu16(skb->data, 16, len); /* Data length */
33007 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
33008 index a6d9fd2..afdb8a3 100644
33009 --- a/drivers/isdn/gigaset/interface.c
33010 +++ b/drivers/isdn/gigaset/interface.c
33011 @@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
33012 }
33013 tty->driver_data = cs;
33014
33015 - ++cs->port.count;
33016 + atomic_inc(&cs->port.count);
33017
33018 - if (cs->port.count == 1) {
33019 + if (atomic_read(&cs->port.count) == 1) {
33020 tty_port_tty_set(&cs->port, tty);
33021 tty->low_latency = 1;
33022 }
33023 @@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
33024
33025 if (!cs->connected)
33026 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33027 - else if (!cs->port.count)
33028 + else if (!atomic_read(&cs->port.count))
33029 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33030 - else if (!--cs->port.count)
33031 + else if (!atomic_dec_return(&cs->port.count))
33032 tty_port_tty_set(&cs->port, NULL);
33033
33034 mutex_unlock(&cs->mutex);
33035 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
33036 index 821f7ac..28d4030 100644
33037 --- a/drivers/isdn/hardware/avm/b1.c
33038 +++ b/drivers/isdn/hardware/avm/b1.c
33039 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
33040 }
33041 if (left) {
33042 if (t4file->user) {
33043 - if (copy_from_user(buf, dp, left))
33044 + if (left > sizeof buf || copy_from_user(buf, dp, left))
33045 return -EFAULT;
33046 } else {
33047 memcpy(buf, dp, left);
33048 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
33049 }
33050 if (left) {
33051 if (config->user) {
33052 - if (copy_from_user(buf, dp, left))
33053 + if (left > sizeof buf || copy_from_user(buf, dp, left))
33054 return -EFAULT;
33055 } else {
33056 memcpy(buf, dp, left);
33057 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
33058 index dd6b53a..19d9ee6 100644
33059 --- a/drivers/isdn/hardware/eicon/divasync.h
33060 +++ b/drivers/isdn/hardware/eicon/divasync.h
33061 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
33062 } diva_didd_add_adapter_t;
33063 typedef struct _diva_didd_remove_adapter {
33064 IDI_CALL p_request;
33065 -} diva_didd_remove_adapter_t;
33066 +} __no_const diva_didd_remove_adapter_t;
33067 typedef struct _diva_didd_read_adapter_array {
33068 void *buffer;
33069 dword length;
33070 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
33071 index d303e65..28bcb7b 100644
33072 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
33073 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
33074 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
33075 typedef struct _diva_os_idi_adapter_interface {
33076 diva_init_card_proc_t cleanup_adapter_proc;
33077 diva_cmd_card_proc_t cmd_proc;
33078 -} diva_os_idi_adapter_interface_t;
33079 +} __no_const diva_os_idi_adapter_interface_t;
33080
33081 typedef struct _diva_os_xdi_adapter {
33082 struct list_head link;
33083 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
33084 index 7bc5067..fd36232 100644
33085 --- a/drivers/isdn/i4l/isdn_tty.c
33086 +++ b/drivers/isdn/i4l/isdn_tty.c
33087 @@ -1505,9 +1505,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
33088 port = &info->port;
33089 #ifdef ISDN_DEBUG_MODEM_OPEN
33090 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
33091 - port->count);
33092 + atomic_read(&port->count))
33093 #endif
33094 - port->count++;
33095 + atomic_inc(&port->count);
33096 tty->driver_data = info;
33097 port->tty = tty;
33098 tty->port = port;
33099 @@ -1553,7 +1553,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
33100 #endif
33101 return;
33102 }
33103 - if ((tty->count == 1) && (port->count != 1)) {
33104 + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
33105 /*
33106 * Uh, oh. tty->count is 1, which means that the tty
33107 * structure will be freed. Info->count should always
33108 @@ -1562,15 +1562,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
33109 * serial port won't be shutdown.
33110 */
33111 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
33112 - "info->count is %d\n", port->count);
33113 - port->count = 1;
33114 + "info->count is %d\n", atomic_read(&port->count));
33115 + atomic_set(&port->count, 1);
33116 }
33117 - if (--port->count < 0) {
33118 + if (atomic_dec_return(&port->count) < 0) {
33119 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
33120 - info->line, port->count);
33121 - port->count = 0;
33122 + info->line, atomic_read(&port->count));
33123 + atomic_set(&port->count, 0);
33124 }
33125 - if (port->count) {
33126 + if (atomic_read(&port->count)) {
33127 #ifdef ISDN_DEBUG_MODEM_OPEN
33128 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
33129 #endif
33130 @@ -1624,7 +1624,7 @@ isdn_tty_hangup(struct tty_struct *tty)
33131 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
33132 return;
33133 isdn_tty_shutdown(info);
33134 - port->count = 0;
33135 + atomic_set(&port->count, 0);
33136 port->flags &= ~ASYNC_NORMAL_ACTIVE;
33137 port->tty = NULL;
33138 wake_up_interruptible(&port->open_wait);
33139 @@ -1964,7 +1964,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
33140 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
33141 modem_info *info = &dev->mdm.info[i];
33142
33143 - if (info->port.count == 0)
33144 + if (atomic_read(&info->port.count) == 0)
33145 continue;
33146 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
33147 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
33148 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
33149 index e74df7c..03a03ba 100644
33150 --- a/drivers/isdn/icn/icn.c
33151 +++ b/drivers/isdn/icn/icn.c
33152 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
33153 if (count > len)
33154 count = len;
33155 if (user) {
33156 - if (copy_from_user(msg, buf, count))
33157 + if (count > sizeof msg || copy_from_user(msg, buf, count))
33158 return -EFAULT;
33159 } else
33160 memcpy(msg, buf, count);
33161 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
33162 index b5fdcb7..5b6c59f 100644
33163 --- a/drivers/lguest/core.c
33164 +++ b/drivers/lguest/core.c
33165 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
33166 * it's worked so far. The end address needs +1 because __get_vm_area
33167 * allocates an extra guard page, so we need space for that.
33168 */
33169 +
33170 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33171 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33172 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
33173 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33174 +#else
33175 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33176 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
33177 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33178 +#endif
33179 +
33180 if (!switcher_vma) {
33181 err = -ENOMEM;
33182 printk("lguest: could not map switcher pages high\n");
33183 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
33184 * Now the Switcher is mapped at the right address, we can't fail!
33185 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
33186 */
33187 - memcpy(switcher_vma->addr, start_switcher_text,
33188 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
33189 end_switcher_text - start_switcher_text);
33190
33191 printk(KERN_INFO "lguest: mapped switcher at %p\n",
33192 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
33193 index 39809035..ce25c5e 100644
33194 --- a/drivers/lguest/x86/core.c
33195 +++ b/drivers/lguest/x86/core.c
33196 @@ -59,7 +59,7 @@ static struct {
33197 /* Offset from where switcher.S was compiled to where we've copied it */
33198 static unsigned long switcher_offset(void)
33199 {
33200 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
33201 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
33202 }
33203
33204 /* This cpu's struct lguest_pages. */
33205 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
33206 * These copies are pretty cheap, so we do them unconditionally: */
33207 /* Save the current Host top-level page directory.
33208 */
33209 +
33210 +#ifdef CONFIG_PAX_PER_CPU_PGD
33211 + pages->state.host_cr3 = read_cr3();
33212 +#else
33213 pages->state.host_cr3 = __pa(current->mm->pgd);
33214 +#endif
33215 +
33216 /*
33217 * Set up the Guest's page tables to see this CPU's pages (and no
33218 * other CPU's pages).
33219 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33220 * compiled-in switcher code and the high-mapped copy we just made.
33221 */
33222 for (i = 0; i < IDT_ENTRIES; i++)
33223 - default_idt_entries[i] += switcher_offset();
33224 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33225
33226 /*
33227 * Set up the Switcher's per-cpu areas.
33228 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33229 * it will be undisturbed when we switch. To change %cs and jump we
33230 * need this structure to feed to Intel's "lcall" instruction.
33231 */
33232 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33233 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33234 lguest_entry.segment = LGUEST_CS;
33235
33236 /*
33237 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33238 index 40634b0..4f5855e 100644
33239 --- a/drivers/lguest/x86/switcher_32.S
33240 +++ b/drivers/lguest/x86/switcher_32.S
33241 @@ -87,6 +87,7 @@
33242 #include <asm/page.h>
33243 #include <asm/segment.h>
33244 #include <asm/lguest.h>
33245 +#include <asm/processor-flags.h>
33246
33247 // We mark the start of the code to copy
33248 // It's placed in .text tho it's never run here
33249 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33250 // Changes type when we load it: damn Intel!
33251 // For after we switch over our page tables
33252 // That entry will be read-only: we'd crash.
33253 +
33254 +#ifdef CONFIG_PAX_KERNEXEC
33255 + mov %cr0, %edx
33256 + xor $X86_CR0_WP, %edx
33257 + mov %edx, %cr0
33258 +#endif
33259 +
33260 movl $(GDT_ENTRY_TSS*8), %edx
33261 ltr %dx
33262
33263 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33264 // Let's clear it again for our return.
33265 // The GDT descriptor of the Host
33266 // Points to the table after two "size" bytes
33267 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33268 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33269 // Clear "used" from type field (byte 5, bit 2)
33270 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33271 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33272 +
33273 +#ifdef CONFIG_PAX_KERNEXEC
33274 + mov %cr0, %eax
33275 + xor $X86_CR0_WP, %eax
33276 + mov %eax, %cr0
33277 +#endif
33278
33279 // Once our page table's switched, the Guest is live!
33280 // The Host fades as we run this final step.
33281 @@ -295,13 +309,12 @@ deliver_to_host:
33282 // I consulted gcc, and it gave
33283 // These instructions, which I gladly credit:
33284 leal (%edx,%ebx,8), %eax
33285 - movzwl (%eax),%edx
33286 - movl 4(%eax), %eax
33287 - xorw %ax, %ax
33288 - orl %eax, %edx
33289 + movl 4(%eax), %edx
33290 + movw (%eax), %dx
33291 // Now the address of the handler's in %edx
33292 // We call it now: its "iret" drops us home.
33293 - jmp *%edx
33294 + ljmp $__KERNEL_CS, $1f
33295 +1: jmp *%edx
33296
33297 // Every interrupt can come to us here
33298 // But we must truly tell each apart.
33299 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33300 index 20e5c2c..9e849a9 100644
33301 --- a/drivers/macintosh/macio_asic.c
33302 +++ b/drivers/macintosh/macio_asic.c
33303 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33304 * MacIO is matched against any Apple ID, it's probe() function
33305 * will then decide wether it applies or not
33306 */
33307 -static const struct pci_device_id __devinitdata pci_ids [] = { {
33308 +static const struct pci_device_id __devinitconst pci_ids [] = { {
33309 .vendor = PCI_VENDOR_ID_APPLE,
33310 .device = PCI_ANY_ID,
33311 .subvendor = PCI_ANY_ID,
33312 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33313 index 15dbe03..743fc65 100644
33314 --- a/drivers/md/bitmap.c
33315 +++ b/drivers/md/bitmap.c
33316 @@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33317 chunk_kb ? "KB" : "B");
33318 if (bitmap->storage.file) {
33319 seq_printf(seq, ", file: ");
33320 - seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
33321 + seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
33322 }
33323
33324 seq_printf(seq, "\n");
33325 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33326 index a1a3e6d..1918bfc 100644
33327 --- a/drivers/md/dm-ioctl.c
33328 +++ b/drivers/md/dm-ioctl.c
33329 @@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33330 cmd == DM_LIST_VERSIONS_CMD)
33331 return 0;
33332
33333 - if ((cmd == DM_DEV_CREATE_CMD)) {
33334 + if (cmd == DM_DEV_CREATE_CMD) {
33335 if (!*param->name) {
33336 DMWARN("name not supplied when creating device");
33337 return -EINVAL;
33338 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33339 index b58b7a3..8018b19 100644
33340 --- a/drivers/md/dm-raid1.c
33341 +++ b/drivers/md/dm-raid1.c
33342 @@ -40,7 +40,7 @@ enum dm_raid1_error {
33343
33344 struct mirror {
33345 struct mirror_set *ms;
33346 - atomic_t error_count;
33347 + atomic_unchecked_t error_count;
33348 unsigned long error_type;
33349 struct dm_dev *dev;
33350 sector_t offset;
33351 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33352 struct mirror *m;
33353
33354 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33355 - if (!atomic_read(&m->error_count))
33356 + if (!atomic_read_unchecked(&m->error_count))
33357 return m;
33358
33359 return NULL;
33360 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33361 * simple way to tell if a device has encountered
33362 * errors.
33363 */
33364 - atomic_inc(&m->error_count);
33365 + atomic_inc_unchecked(&m->error_count);
33366
33367 if (test_and_set_bit(error_type, &m->error_type))
33368 return;
33369 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33370 struct mirror *m = get_default_mirror(ms);
33371
33372 do {
33373 - if (likely(!atomic_read(&m->error_count)))
33374 + if (likely(!atomic_read_unchecked(&m->error_count)))
33375 return m;
33376
33377 if (m-- == ms->mirror)
33378 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33379 {
33380 struct mirror *default_mirror = get_default_mirror(m->ms);
33381
33382 - return !atomic_read(&default_mirror->error_count);
33383 + return !atomic_read_unchecked(&default_mirror->error_count);
33384 }
33385
33386 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33387 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33388 */
33389 if (likely(region_in_sync(ms, region, 1)))
33390 m = choose_mirror(ms, bio->bi_sector);
33391 - else if (m && atomic_read(&m->error_count))
33392 + else if (m && atomic_read_unchecked(&m->error_count))
33393 m = NULL;
33394
33395 if (likely(m))
33396 @@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33397 }
33398
33399 ms->mirror[mirror].ms = ms;
33400 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33401 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33402 ms->mirror[mirror].error_type = 0;
33403 ms->mirror[mirror].offset = offset;
33404
33405 @@ -1352,7 +1352,7 @@ static void mirror_resume(struct dm_target *ti)
33406 */
33407 static char device_status_char(struct mirror *m)
33408 {
33409 - if (!atomic_read(&(m->error_count)))
33410 + if (!atomic_read_unchecked(&(m->error_count)))
33411 return 'A';
33412
33413 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33414 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33415 index 35c94ff..20d4c17 100644
33416 --- a/drivers/md/dm-stripe.c
33417 +++ b/drivers/md/dm-stripe.c
33418 @@ -20,7 +20,7 @@ struct stripe {
33419 struct dm_dev *dev;
33420 sector_t physical_start;
33421
33422 - atomic_t error_count;
33423 + atomic_unchecked_t error_count;
33424 };
33425
33426 struct stripe_c {
33427 @@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33428 kfree(sc);
33429 return r;
33430 }
33431 - atomic_set(&(sc->stripe[i].error_count), 0);
33432 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33433 }
33434
33435 ti->private = sc;
33436 @@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33437 DMEMIT("%d ", sc->stripes);
33438 for (i = 0; i < sc->stripes; i++) {
33439 DMEMIT("%s ", sc->stripe[i].dev->name);
33440 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33441 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33442 'D' : 'A';
33443 }
33444 buffer[i] = '\0';
33445 @@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33446 */
33447 for (i = 0; i < sc->stripes; i++)
33448 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33449 - atomic_inc(&(sc->stripe[i].error_count));
33450 - if (atomic_read(&(sc->stripe[i].error_count)) <
33451 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33452 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33453 DM_IO_ERROR_THRESHOLD)
33454 schedule_work(&sc->trigger_event);
33455 }
33456 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33457 index 2e227fb..44ead1f 100644
33458 --- a/drivers/md/dm-table.c
33459 +++ b/drivers/md/dm-table.c
33460 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33461 if (!dev_size)
33462 return 0;
33463
33464 - if ((start >= dev_size) || (start + len > dev_size)) {
33465 + if ((start >= dev_size) || (len > dev_size - start)) {
33466 DMWARN("%s: %s too small for target: "
33467 "start=%llu, len=%llu, dev_size=%llu",
33468 dm_device_name(ti->table->md), bdevname(bdev, b),
33469 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33470 index 3e2907f..c28851a 100644
33471 --- a/drivers/md/dm-thin-metadata.c
33472 +++ b/drivers/md/dm-thin-metadata.c
33473 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33474
33475 pmd->info.tm = tm;
33476 pmd->info.levels = 2;
33477 - pmd->info.value_type.context = pmd->data_sm;
33478 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33479 pmd->info.value_type.size = sizeof(__le64);
33480 pmd->info.value_type.inc = data_block_inc;
33481 pmd->info.value_type.dec = data_block_dec;
33482 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33483
33484 pmd->bl_info.tm = tm;
33485 pmd->bl_info.levels = 1;
33486 - pmd->bl_info.value_type.context = pmd->data_sm;
33487 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33488 pmd->bl_info.value_type.size = sizeof(__le64);
33489 pmd->bl_info.value_type.inc = data_block_inc;
33490 pmd->bl_info.value_type.dec = data_block_dec;
33491 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33492 index e24143c..ce2f21a1 100644
33493 --- a/drivers/md/dm.c
33494 +++ b/drivers/md/dm.c
33495 @@ -176,9 +176,9 @@ struct mapped_device {
33496 /*
33497 * Event handling.
33498 */
33499 - atomic_t event_nr;
33500 + atomic_unchecked_t event_nr;
33501 wait_queue_head_t eventq;
33502 - atomic_t uevent_seq;
33503 + atomic_unchecked_t uevent_seq;
33504 struct list_head uevent_list;
33505 spinlock_t uevent_lock; /* Protect access to uevent_list */
33506
33507 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33508 rwlock_init(&md->map_lock);
33509 atomic_set(&md->holders, 1);
33510 atomic_set(&md->open_count, 0);
33511 - atomic_set(&md->event_nr, 0);
33512 - atomic_set(&md->uevent_seq, 0);
33513 + atomic_set_unchecked(&md->event_nr, 0);
33514 + atomic_set_unchecked(&md->uevent_seq, 0);
33515 INIT_LIST_HEAD(&md->uevent_list);
33516 spin_lock_init(&md->uevent_lock);
33517
33518 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33519
33520 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33521
33522 - atomic_inc(&md->event_nr);
33523 + atomic_inc_unchecked(&md->event_nr);
33524 wake_up(&md->eventq);
33525 }
33526
33527 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33528
33529 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33530 {
33531 - return atomic_add_return(1, &md->uevent_seq);
33532 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33533 }
33534
33535 uint32_t dm_get_event_nr(struct mapped_device *md)
33536 {
33537 - return atomic_read(&md->event_nr);
33538 + return atomic_read_unchecked(&md->event_nr);
33539 }
33540
33541 int dm_wait_event(struct mapped_device *md, int event_nr)
33542 {
33543 return wait_event_interruptible(md->eventq,
33544 - (event_nr != atomic_read(&md->event_nr)));
33545 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33546 }
33547
33548 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33549 diff --git a/drivers/md/md.c b/drivers/md/md.c
33550 index d5ab449..7e9ed7b 100644
33551 --- a/drivers/md/md.c
33552 +++ b/drivers/md/md.c
33553 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33554 * start build, activate spare
33555 */
33556 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33557 -static atomic_t md_event_count;
33558 +static atomic_unchecked_t md_event_count;
33559 void md_new_event(struct mddev *mddev)
33560 {
33561 - atomic_inc(&md_event_count);
33562 + atomic_inc_unchecked(&md_event_count);
33563 wake_up(&md_event_waiters);
33564 }
33565 EXPORT_SYMBOL_GPL(md_new_event);
33566 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33567 */
33568 static void md_new_event_inintr(struct mddev *mddev)
33569 {
33570 - atomic_inc(&md_event_count);
33571 + atomic_inc_unchecked(&md_event_count);
33572 wake_up(&md_event_waiters);
33573 }
33574
33575 @@ -1565,7 +1565,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33576 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
33577 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
33578 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
33579 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33580 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33581
33582 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33583 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33584 @@ -1809,7 +1809,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33585 else
33586 sb->resync_offset = cpu_to_le64(0);
33587
33588 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33589 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33590
33591 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33592 sb->size = cpu_to_le64(mddev->dev_sectors);
33593 @@ -2803,7 +2803,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33594 static ssize_t
33595 errors_show(struct md_rdev *rdev, char *page)
33596 {
33597 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33598 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33599 }
33600
33601 static ssize_t
33602 @@ -2812,7 +2812,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33603 char *e;
33604 unsigned long n = simple_strtoul(buf, &e, 10);
33605 if (*buf && (*e == 0 || *e == '\n')) {
33606 - atomic_set(&rdev->corrected_errors, n);
33607 + atomic_set_unchecked(&rdev->corrected_errors, n);
33608 return len;
33609 }
33610 return -EINVAL;
33611 @@ -3259,8 +3259,8 @@ int md_rdev_init(struct md_rdev *rdev)
33612 rdev->sb_loaded = 0;
33613 rdev->bb_page = NULL;
33614 atomic_set(&rdev->nr_pending, 0);
33615 - atomic_set(&rdev->read_errors, 0);
33616 - atomic_set(&rdev->corrected_errors, 0);
33617 + atomic_set_unchecked(&rdev->read_errors, 0);
33618 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33619
33620 INIT_LIST_HEAD(&rdev->same_set);
33621 init_waitqueue_head(&rdev->blocked_wait);
33622 @@ -6997,7 +6997,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33623
33624 spin_unlock(&pers_lock);
33625 seq_printf(seq, "\n");
33626 - seq->poll_event = atomic_read(&md_event_count);
33627 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33628 return 0;
33629 }
33630 if (v == (void*)2) {
33631 @@ -7100,7 +7100,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33632 return error;
33633
33634 seq = file->private_data;
33635 - seq->poll_event = atomic_read(&md_event_count);
33636 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33637 return error;
33638 }
33639
33640 @@ -7114,7 +7114,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33641 /* always allow read */
33642 mask = POLLIN | POLLRDNORM;
33643
33644 - if (seq->poll_event != atomic_read(&md_event_count))
33645 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33646 mask |= POLLERR | POLLPRI;
33647 return mask;
33648 }
33649 @@ -7158,7 +7158,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33650 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33651 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33652 (int)part_stat_read(&disk->part0, sectors[1]) -
33653 - atomic_read(&disk->sync_io);
33654 + atomic_read_unchecked(&disk->sync_io);
33655 /* sync IO will cause sync_io to increase before the disk_stats
33656 * as sync_io is counted when a request starts, and
33657 * disk_stats is counted when it completes.
33658 diff --git a/drivers/md/md.h b/drivers/md/md.h
33659 index 7b4a3c3..994ad4f 100644
33660 --- a/drivers/md/md.h
33661 +++ b/drivers/md/md.h
33662 @@ -94,13 +94,13 @@ struct md_rdev {
33663 * only maintained for arrays that
33664 * support hot removal
33665 */
33666 - atomic_t read_errors; /* number of consecutive read errors that
33667 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33668 * we have tried to ignore.
33669 */
33670 struct timespec last_read_error; /* monotonic time since our
33671 * last read error
33672 */
33673 - atomic_t corrected_errors; /* number of corrected read errors,
33674 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33675 * for reporting to userspace and storing
33676 * in superblock.
33677 */
33678 @@ -435,7 +435,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33679
33680 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33681 {
33682 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33683 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33684 }
33685
33686 struct md_personality
33687 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33688 index fc90c11..c8cd9a9 100644
33689 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33690 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33691 @@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
33692 /*----------------------------------------------------------------*/
33693
33694 struct sm_checker {
33695 - struct dm_space_map sm;
33696 + dm_space_map_no_const sm;
33697
33698 struct count_array old_counts;
33699 struct count_array counts;
33700 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33701 index 3d0ed53..35dc592 100644
33702 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33703 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33704 @@ -23,7 +23,7 @@
33705 * Space map interface.
33706 */
33707 struct sm_disk {
33708 - struct dm_space_map sm;
33709 + dm_space_map_no_const sm;
33710
33711 struct ll_disk ll;
33712 struct ll_disk old_ll;
33713 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33714 index e89ae5e..062e4c2 100644
33715 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33716 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33717 @@ -43,7 +43,7 @@ struct block_op {
33718 };
33719
33720 struct sm_metadata {
33721 - struct dm_space_map sm;
33722 + dm_space_map_no_const sm;
33723
33724 struct ll_disk ll;
33725 struct ll_disk old_ll;
33726 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33727 index 1cbfc6b..56e1dbb 100644
33728 --- a/drivers/md/persistent-data/dm-space-map.h
33729 +++ b/drivers/md/persistent-data/dm-space-map.h
33730 @@ -60,6 +60,7 @@ struct dm_space_map {
33731 int (*root_size)(struct dm_space_map *sm, size_t *result);
33732 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33733 };
33734 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33735
33736 /*----------------------------------------------------------------*/
33737
33738 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33739 index 53aec45..250851c 100644
33740 --- a/drivers/md/raid1.c
33741 +++ b/drivers/md/raid1.c
33742 @@ -1685,7 +1685,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33743 if (r1_sync_page_io(rdev, sect, s,
33744 bio->bi_io_vec[idx].bv_page,
33745 READ) != 0)
33746 - atomic_add(s, &rdev->corrected_errors);
33747 + atomic_add_unchecked(s, &rdev->corrected_errors);
33748 }
33749 sectors -= s;
33750 sect += s;
33751 @@ -1907,7 +1907,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33752 test_bit(In_sync, &rdev->flags)) {
33753 if (r1_sync_page_io(rdev, sect, s,
33754 conf->tmppage, READ)) {
33755 - atomic_add(s, &rdev->corrected_errors);
33756 + atomic_add_unchecked(s, &rdev->corrected_errors);
33757 printk(KERN_INFO
33758 "md/raid1:%s: read error corrected "
33759 "(%d sectors at %llu on %s)\n",
33760 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33761 index 8da6282..8ec7103 100644
33762 --- a/drivers/md/raid10.c
33763 +++ b/drivers/md/raid10.c
33764 @@ -1784,7 +1784,7 @@ static void end_sync_read(struct bio *bio, int error)
33765 /* The write handler will notice the lack of
33766 * R10BIO_Uptodate and record any errors etc
33767 */
33768 - atomic_add(r10_bio->sectors,
33769 + atomic_add_unchecked(r10_bio->sectors,
33770 &conf->mirrors[d].rdev->corrected_errors);
33771
33772 /* for reconstruct, we always reschedule after a read.
33773 @@ -2133,7 +2133,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33774 {
33775 struct timespec cur_time_mon;
33776 unsigned long hours_since_last;
33777 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33778 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33779
33780 ktime_get_ts(&cur_time_mon);
33781
33782 @@ -2155,9 +2155,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33783 * overflowing the shift of read_errors by hours_since_last.
33784 */
33785 if (hours_since_last >= 8 * sizeof(read_errors))
33786 - atomic_set(&rdev->read_errors, 0);
33787 + atomic_set_unchecked(&rdev->read_errors, 0);
33788 else
33789 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33790 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33791 }
33792
33793 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33794 @@ -2211,8 +2211,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33795 return;
33796
33797 check_decay_read_errors(mddev, rdev);
33798 - atomic_inc(&rdev->read_errors);
33799 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33800 + atomic_inc_unchecked(&rdev->read_errors);
33801 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33802 char b[BDEVNAME_SIZE];
33803 bdevname(rdev->bdev, b);
33804
33805 @@ -2220,7 +2220,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33806 "md/raid10:%s: %s: Raid device exceeded "
33807 "read_error threshold [cur %d:max %d]\n",
33808 mdname(mddev), b,
33809 - atomic_read(&rdev->read_errors), max_read_errors);
33810 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33811 printk(KERN_NOTICE
33812 "md/raid10:%s: %s: Failing raid device\n",
33813 mdname(mddev), b);
33814 @@ -2375,7 +2375,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33815 sect +
33816 choose_data_offset(r10_bio, rdev)),
33817 bdevname(rdev->bdev, b));
33818 - atomic_add(s, &rdev->corrected_errors);
33819 + atomic_add_unchecked(s, &rdev->corrected_errors);
33820 }
33821
33822 rdev_dec_pending(rdev, mddev);
33823 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33824 index 04348d7..62a4b9b 100644
33825 --- a/drivers/md/raid5.c
33826 +++ b/drivers/md/raid5.c
33827 @@ -1736,19 +1736,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
33828 mdname(conf->mddev), STRIPE_SECTORS,
33829 (unsigned long long)s,
33830 bdevname(rdev->bdev, b));
33831 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33832 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33833 clear_bit(R5_ReadError, &sh->dev[i].flags);
33834 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33835 }
33836 - if (atomic_read(&rdev->read_errors))
33837 - atomic_set(&rdev->read_errors, 0);
33838 + if (atomic_read_unchecked(&rdev->read_errors))
33839 + atomic_set_unchecked(&rdev->read_errors, 0);
33840 } else {
33841 const char *bdn = bdevname(rdev->bdev, b);
33842 int retry = 0;
33843 int set_bad = 0;
33844
33845 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33846 - atomic_inc(&rdev->read_errors);
33847 + atomic_inc_unchecked(&rdev->read_errors);
33848 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33849 printk_ratelimited(
33850 KERN_WARNING
33851 @@ -1776,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33852 mdname(conf->mddev),
33853 (unsigned long long)s,
33854 bdn);
33855 - } else if (atomic_read(&rdev->read_errors)
33856 + } else if (atomic_read_unchecked(&rdev->read_errors)
33857 > conf->max_nr_stripes)
33858 printk(KERN_WARNING
33859 "md/raid:%s: Too many read errors, failing device %s.\n",
33860 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33861 index 131b938..8572ed1 100644
33862 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33863 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33864 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33865 .subvendor = _subvend, .subdevice = _subdev, \
33866 .driver_data = (unsigned long)&_driverdata }
33867
33868 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33869 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33870 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33871 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33872 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33873 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33874 index fa7188a..04a045e 100644
33875 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33876 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33877 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33878 union {
33879 dmx_ts_cb ts;
33880 dmx_section_cb sec;
33881 - } cb;
33882 + } __no_const cb;
33883
33884 struct dvb_demux *demux;
33885 void *priv;
33886 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33887 index 39eab73..60033e7 100644
33888 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33889 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33890 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33891 const struct dvb_device *template, void *priv, int type)
33892 {
33893 struct dvb_device *dvbdev;
33894 - struct file_operations *dvbdevfops;
33895 + file_operations_no_const *dvbdevfops;
33896 struct device *clsdev;
33897 int minor;
33898 int id;
33899 diff --git a/drivers/media/dvb/dvb-usb/az6007.c b/drivers/media/dvb/dvb-usb/az6007.c
33900 index 4008b9c..ce714f5 100644
33901 --- a/drivers/media/dvb/dvb-usb/az6007.c
33902 +++ b/drivers/media/dvb/dvb-usb/az6007.c
33903 @@ -590,7 +590,7 @@ static int az6007_read_mac_addr(struct dvb_usb_device *d, u8 mac[6])
33904 int ret;
33905
33906 ret = az6007_read(d, AZ6007_READ_DATA, 6, 0, st->data, 6);
33907 - memcpy(mac, st->data, sizeof(mac));
33908 + memcpy(mac, st->data, 6);
33909
33910 if (ret > 0)
33911 deb_info("%s: mac is %02x:%02x:%02x:%02x:%02x:%02x\n",
33912 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33913 index 3940bb0..fb3952a 100644
33914 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33915 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33916 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33917
33918 struct dib0700_adapter_state {
33919 int (*set_param_save) (struct dvb_frontend *);
33920 -};
33921 +} __no_const;
33922
33923 static int dib7070_set_param_override(struct dvb_frontend *fe)
33924 {
33925 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33926 index 9382895..ac8093c 100644
33927 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33928 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33929 @@ -95,7 +95,7 @@ struct su3000_state {
33930
33931 struct s6x0_state {
33932 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33933 -};
33934 +} __no_const;
33935
33936 /* debug */
33937 static int dvb_usb_dw2102_debug;
33938 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33939 index 404f63a..4796533 100644
33940 --- a/drivers/media/dvb/frontends/dib3000.h
33941 +++ b/drivers/media/dvb/frontends/dib3000.h
33942 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33943 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33944 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33945 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33946 -};
33947 +} __no_const;
33948
33949 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33950 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33951 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33952 index 7539a5d..06531a6 100644
33953 --- a/drivers/media/dvb/ngene/ngene-cards.c
33954 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33955 @@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33956
33957 /****************************************************************************/
33958
33959 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33960 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33961 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33962 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33963 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33964 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33965 index 16a089f..1661b11 100644
33966 --- a/drivers/media/radio/radio-cadet.c
33967 +++ b/drivers/media/radio/radio-cadet.c
33968 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33969 unsigned char readbuf[RDS_BUFFER];
33970 int i = 0;
33971
33972 + if (count > RDS_BUFFER)
33973 + return -EFAULT;
33974 mutex_lock(&dev->lock);
33975 if (dev->rdsstat == 0) {
33976 dev->rdsstat = 1;
33977 @@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33978 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33979 mutex_unlock(&dev->lock);
33980
33981 - if (copy_to_user(data, readbuf, i))
33982 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33983 return -EFAULT;
33984 return i;
33985 }
33986 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33987 index 9cde353..8c6a1c3 100644
33988 --- a/drivers/media/video/au0828/au0828.h
33989 +++ b/drivers/media/video/au0828/au0828.h
33990 @@ -191,7 +191,7 @@ struct au0828_dev {
33991
33992 /* I2C */
33993 struct i2c_adapter i2c_adap;
33994 - struct i2c_algorithm i2c_algo;
33995 + i2c_algorithm_no_const i2c_algo;
33996 struct i2c_client i2c_client;
33997 u32 i2c_rc;
33998
33999 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
34000 index 04bf662..e0ac026 100644
34001 --- a/drivers/media/video/cx88/cx88-alsa.c
34002 +++ b/drivers/media/video/cx88/cx88-alsa.c
34003 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
34004 * Only boards with eeprom and byte 1 at eeprom=1 have it
34005 */
34006
34007 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
34008 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
34009 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34010 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34011 {0, }
34012 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
34013 index 88cf9d9..bbc4b2c 100644
34014 --- a/drivers/media/video/omap/omap_vout.c
34015 +++ b/drivers/media/video/omap/omap_vout.c
34016 @@ -64,7 +64,6 @@ enum omap_vout_channels {
34017 OMAP_VIDEO2,
34018 };
34019
34020 -static struct videobuf_queue_ops video_vbq_ops;
34021 /* Variables configurable through module params*/
34022 static u32 video1_numbuffers = 3;
34023 static u32 video2_numbuffers = 3;
34024 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
34025 {
34026 struct videobuf_queue *q;
34027 struct omap_vout_device *vout = NULL;
34028 + static struct videobuf_queue_ops video_vbq_ops = {
34029 + .buf_setup = omap_vout_buffer_setup,
34030 + .buf_prepare = omap_vout_buffer_prepare,
34031 + .buf_release = omap_vout_buffer_release,
34032 + .buf_queue = omap_vout_buffer_queue,
34033 + };
34034
34035 vout = video_drvdata(file);
34036 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
34037 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
34038 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
34039
34040 q = &vout->vbq;
34041 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
34042 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
34043 - video_vbq_ops.buf_release = omap_vout_buffer_release;
34044 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
34045 spin_lock_init(&vout->vbq_lock);
34046
34047 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
34048 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34049 index 036952f..80d356d 100644
34050 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34051 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34052 @@ -196,7 +196,7 @@ struct pvr2_hdw {
34053
34054 /* I2C stuff */
34055 struct i2c_adapter i2c_adap;
34056 - struct i2c_algorithm i2c_algo;
34057 + i2c_algorithm_no_const i2c_algo;
34058 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
34059 int i2c_cx25840_hack_state;
34060 int i2c_linked;
34061 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
34062 index 02194c0..091733b 100644
34063 --- a/drivers/media/video/timblogiw.c
34064 +++ b/drivers/media/video/timblogiw.c
34065 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
34066
34067 /* Platform device functions */
34068
34069 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34070 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
34071 .vidioc_querycap = timblogiw_querycap,
34072 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
34073 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
34074 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34075 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
34076 };
34077
34078 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
34079 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
34080 .owner = THIS_MODULE,
34081 .open = timblogiw_open,
34082 .release = timblogiw_close,
34083 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34084 index d99db56..a16b959 100644
34085 --- a/drivers/message/fusion/mptbase.c
34086 +++ b/drivers/message/fusion/mptbase.c
34087 @@ -6751,8 +6751,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34088 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34089 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34090
34091 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34092 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34093 +#else
34094 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34095 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34096 +#endif
34097 +
34098 /*
34099 * Rounding UP to nearest 4-kB boundary here...
34100 */
34101 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34102 index 551262e..7551198 100644
34103 --- a/drivers/message/fusion/mptsas.c
34104 +++ b/drivers/message/fusion/mptsas.c
34105 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34106 return 0;
34107 }
34108
34109 +static inline void
34110 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34111 +{
34112 + if (phy_info->port_details) {
34113 + phy_info->port_details->rphy = rphy;
34114 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34115 + ioc->name, rphy));
34116 + }
34117 +
34118 + if (rphy) {
34119 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34120 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34121 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34122 + ioc->name, rphy, rphy->dev.release));
34123 + }
34124 +}
34125 +
34126 /* no mutex */
34127 static void
34128 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34129 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34130 return NULL;
34131 }
34132
34133 -static inline void
34134 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34135 -{
34136 - if (phy_info->port_details) {
34137 - phy_info->port_details->rphy = rphy;
34138 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34139 - ioc->name, rphy));
34140 - }
34141 -
34142 - if (rphy) {
34143 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34144 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34145 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34146 - ioc->name, rphy, rphy->dev.release));
34147 - }
34148 -}
34149 -
34150 static inline struct sas_port *
34151 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34152 {
34153 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34154 index 0c3ced7..1fe34ec 100644
34155 --- a/drivers/message/fusion/mptscsih.c
34156 +++ b/drivers/message/fusion/mptscsih.c
34157 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34158
34159 h = shost_priv(SChost);
34160
34161 - if (h) {
34162 - if (h->info_kbuf == NULL)
34163 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34164 - return h->info_kbuf;
34165 - h->info_kbuf[0] = '\0';
34166 + if (!h)
34167 + return NULL;
34168
34169 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34170 - h->info_kbuf[size-1] = '\0';
34171 - }
34172 + if (h->info_kbuf == NULL)
34173 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34174 + return h->info_kbuf;
34175 + h->info_kbuf[0] = '\0';
34176 +
34177 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34178 + h->info_kbuf[size-1] = '\0';
34179
34180 return h->info_kbuf;
34181 }
34182 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34183 index 506c36f..b137580 100644
34184 --- a/drivers/message/i2o/i2o_proc.c
34185 +++ b/drivers/message/i2o/i2o_proc.c
34186 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34187 "Array Controller Device"
34188 };
34189
34190 -static char *chtostr(u8 * chars, int n)
34191 -{
34192 - char tmp[256];
34193 - tmp[0] = 0;
34194 - return strncat(tmp, (char *)chars, n);
34195 -}
34196 -
34197 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34198 char *group)
34199 {
34200 @@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34201
34202 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34203 seq_printf(seq, "%-#8x", ddm_table.module_id);
34204 - seq_printf(seq, "%-29s",
34205 - chtostr(ddm_table.module_name_version, 28));
34206 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34207 seq_printf(seq, "%9d ", ddm_table.data_size);
34208 seq_printf(seq, "%8d", ddm_table.code_size);
34209
34210 @@ -927,8 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34211
34212 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34213 seq_printf(seq, "%-#8x", dst->module_id);
34214 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34215 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34216 + seq_printf(seq, "%-.28s", dst->module_name_version);
34217 + seq_printf(seq, "%-.8s", dst->date);
34218 seq_printf(seq, "%8d ", dst->module_size);
34219 seq_printf(seq, "%8d ", dst->mpb_size);
34220 seq_printf(seq, "0x%04x", dst->module_flags);
34221 @@ -1259,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34222 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34223 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34224 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34225 - seq_printf(seq, "Vendor info : %s\n",
34226 - chtostr((u8 *) (work32 + 2), 16));
34227 - seq_printf(seq, "Product info : %s\n",
34228 - chtostr((u8 *) (work32 + 6), 16));
34229 - seq_printf(seq, "Description : %s\n",
34230 - chtostr((u8 *) (work32 + 10), 16));
34231 - seq_printf(seq, "Product rev. : %s\n",
34232 - chtostr((u8 *) (work32 + 14), 8));
34233 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34234 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34235 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34236 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34237
34238 seq_printf(seq, "Serial number : ");
34239 print_serial_number(seq, (u8 *) (work32 + 16),
34240 @@ -1311,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34241 }
34242
34243 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34244 - seq_printf(seq, "Module name : %s\n",
34245 - chtostr(result.module_name, 24));
34246 - seq_printf(seq, "Module revision : %s\n",
34247 - chtostr(result.module_rev, 8));
34248 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
34249 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34250
34251 seq_printf(seq, "Serial number : ");
34252 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34253 @@ -1345,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34254 return 0;
34255 }
34256
34257 - seq_printf(seq, "Device name : %s\n",
34258 - chtostr(result.device_name, 64));
34259 - seq_printf(seq, "Service name : %s\n",
34260 - chtostr(result.service_name, 64));
34261 - seq_printf(seq, "Physical name : %s\n",
34262 - chtostr(result.physical_location, 64));
34263 - seq_printf(seq, "Instance number : %s\n",
34264 - chtostr(result.instance_number, 4));
34265 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
34266 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
34267 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34268 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34269
34270 return 0;
34271 }
34272 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34273 index a8c08f3..155fe3d 100644
34274 --- a/drivers/message/i2o/iop.c
34275 +++ b/drivers/message/i2o/iop.c
34276 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34277
34278 spin_lock_irqsave(&c->context_list_lock, flags);
34279
34280 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34281 - atomic_inc(&c->context_list_counter);
34282 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34283 + atomic_inc_unchecked(&c->context_list_counter);
34284
34285 - entry->context = atomic_read(&c->context_list_counter);
34286 + entry->context = atomic_read_unchecked(&c->context_list_counter);
34287
34288 list_add(&entry->list, &c->context_list);
34289
34290 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34291
34292 #if BITS_PER_LONG == 64
34293 spin_lock_init(&c->context_list_lock);
34294 - atomic_set(&c->context_list_counter, 0);
34295 + atomic_set_unchecked(&c->context_list_counter, 0);
34296 INIT_LIST_HEAD(&c->context_list);
34297 #endif
34298
34299 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34300 index 7ce65f4..e66e9bc 100644
34301 --- a/drivers/mfd/abx500-core.c
34302 +++ b/drivers/mfd/abx500-core.c
34303 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34304
34305 struct abx500_device_entry {
34306 struct list_head list;
34307 - struct abx500_ops ops;
34308 + abx500_ops_no_const ops;
34309 struct device *dev;
34310 };
34311
34312 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34313 index 2ea9998..51dabee 100644
34314 --- a/drivers/mfd/janz-cmodio.c
34315 +++ b/drivers/mfd/janz-cmodio.c
34316 @@ -13,6 +13,7 @@
34317
34318 #include <linux/kernel.h>
34319 #include <linux/module.h>
34320 +#include <linux/slab.h>
34321 #include <linux/init.h>
34322 #include <linux/pci.h>
34323 #include <linux/interrupt.h>
34324 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34325 index a981e2a..5ca0c8b 100644
34326 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
34327 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34328 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34329 * the lid is closed. This leads to interrupts as soon as a little move
34330 * is done.
34331 */
34332 - atomic_inc(&lis3->count);
34333 + atomic_inc_unchecked(&lis3->count);
34334
34335 wake_up_interruptible(&lis3->misc_wait);
34336 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34337 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34338 if (lis3->pm_dev)
34339 pm_runtime_get_sync(lis3->pm_dev);
34340
34341 - atomic_set(&lis3->count, 0);
34342 + atomic_set_unchecked(&lis3->count, 0);
34343 return 0;
34344 }
34345
34346 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34347 add_wait_queue(&lis3->misc_wait, &wait);
34348 while (true) {
34349 set_current_state(TASK_INTERRUPTIBLE);
34350 - data = atomic_xchg(&lis3->count, 0);
34351 + data = atomic_xchg_unchecked(&lis3->count, 0);
34352 if (data)
34353 break;
34354
34355 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34356 struct lis3lv02d, miscdev);
34357
34358 poll_wait(file, &lis3->misc_wait, wait);
34359 - if (atomic_read(&lis3->count))
34360 + if (atomic_read_unchecked(&lis3->count))
34361 return POLLIN | POLLRDNORM;
34362 return 0;
34363 }
34364 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34365 index 2b1482a..5d33616 100644
34366 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
34367 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34368 @@ -266,7 +266,7 @@ struct lis3lv02d {
34369 struct input_polled_dev *idev; /* input device */
34370 struct platform_device *pdev; /* platform device */
34371 struct regulator_bulk_data regulators[2];
34372 - atomic_t count; /* interrupt count after last read */
34373 + atomic_unchecked_t count; /* interrupt count after last read */
34374 union axis_conversion ac; /* hw -> logical axis */
34375 int mapped_btns[3];
34376
34377 diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
34378 index 28adefe..08aad69 100644
34379 --- a/drivers/misc/lkdtm.c
34380 +++ b/drivers/misc/lkdtm.c
34381 @@ -477,6 +477,8 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
34382 int i, n, out;
34383
34384 buf = (char *)__get_free_page(GFP_KERNEL);
34385 + if (buf == NULL)
34386 + return -ENOMEM;
34387
34388 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
34389 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
34390 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34391 index 2f30bad..c4c13d0 100644
34392 --- a/drivers/misc/sgi-gru/gruhandles.c
34393 +++ b/drivers/misc/sgi-gru/gruhandles.c
34394 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34395 unsigned long nsec;
34396
34397 nsec = CLKS2NSEC(clks);
34398 - atomic_long_inc(&mcs_op_statistics[op].count);
34399 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34400 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34401 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34402 if (mcs_op_statistics[op].max < nsec)
34403 mcs_op_statistics[op].max = nsec;
34404 }
34405 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34406 index 950dbe9..eeef0f8 100644
34407 --- a/drivers/misc/sgi-gru/gruprocfs.c
34408 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34409 @@ -32,9 +32,9 @@
34410
34411 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34412
34413 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34414 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34415 {
34416 - unsigned long val = atomic_long_read(v);
34417 + unsigned long val = atomic_long_read_unchecked(v);
34418
34419 seq_printf(s, "%16lu %s\n", val, id);
34420 }
34421 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34422
34423 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34424 for (op = 0; op < mcsop_last; op++) {
34425 - count = atomic_long_read(&mcs_op_statistics[op].count);
34426 - total = atomic_long_read(&mcs_op_statistics[op].total);
34427 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34428 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34429 max = mcs_op_statistics[op].max;
34430 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34431 count ? total / count : 0, max);
34432 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34433 index 5c3ce24..4915ccb 100644
34434 --- a/drivers/misc/sgi-gru/grutables.h
34435 +++ b/drivers/misc/sgi-gru/grutables.h
34436 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34437 * GRU statistics.
34438 */
34439 struct gru_stats_s {
34440 - atomic_long_t vdata_alloc;
34441 - atomic_long_t vdata_free;
34442 - atomic_long_t gts_alloc;
34443 - atomic_long_t gts_free;
34444 - atomic_long_t gms_alloc;
34445 - atomic_long_t gms_free;
34446 - atomic_long_t gts_double_allocate;
34447 - atomic_long_t assign_context;
34448 - atomic_long_t assign_context_failed;
34449 - atomic_long_t free_context;
34450 - atomic_long_t load_user_context;
34451 - atomic_long_t load_kernel_context;
34452 - atomic_long_t lock_kernel_context;
34453 - atomic_long_t unlock_kernel_context;
34454 - atomic_long_t steal_user_context;
34455 - atomic_long_t steal_kernel_context;
34456 - atomic_long_t steal_context_failed;
34457 - atomic_long_t nopfn;
34458 - atomic_long_t asid_new;
34459 - atomic_long_t asid_next;
34460 - atomic_long_t asid_wrap;
34461 - atomic_long_t asid_reuse;
34462 - atomic_long_t intr;
34463 - atomic_long_t intr_cbr;
34464 - atomic_long_t intr_tfh;
34465 - atomic_long_t intr_spurious;
34466 - atomic_long_t intr_mm_lock_failed;
34467 - atomic_long_t call_os;
34468 - atomic_long_t call_os_wait_queue;
34469 - atomic_long_t user_flush_tlb;
34470 - atomic_long_t user_unload_context;
34471 - atomic_long_t user_exception;
34472 - atomic_long_t set_context_option;
34473 - atomic_long_t check_context_retarget_intr;
34474 - atomic_long_t check_context_unload;
34475 - atomic_long_t tlb_dropin;
34476 - atomic_long_t tlb_preload_page;
34477 - atomic_long_t tlb_dropin_fail_no_asid;
34478 - atomic_long_t tlb_dropin_fail_upm;
34479 - atomic_long_t tlb_dropin_fail_invalid;
34480 - atomic_long_t tlb_dropin_fail_range_active;
34481 - atomic_long_t tlb_dropin_fail_idle;
34482 - atomic_long_t tlb_dropin_fail_fmm;
34483 - atomic_long_t tlb_dropin_fail_no_exception;
34484 - atomic_long_t tfh_stale_on_fault;
34485 - atomic_long_t mmu_invalidate_range;
34486 - atomic_long_t mmu_invalidate_page;
34487 - atomic_long_t flush_tlb;
34488 - atomic_long_t flush_tlb_gru;
34489 - atomic_long_t flush_tlb_gru_tgh;
34490 - atomic_long_t flush_tlb_gru_zero_asid;
34491 + atomic_long_unchecked_t vdata_alloc;
34492 + atomic_long_unchecked_t vdata_free;
34493 + atomic_long_unchecked_t gts_alloc;
34494 + atomic_long_unchecked_t gts_free;
34495 + atomic_long_unchecked_t gms_alloc;
34496 + atomic_long_unchecked_t gms_free;
34497 + atomic_long_unchecked_t gts_double_allocate;
34498 + atomic_long_unchecked_t assign_context;
34499 + atomic_long_unchecked_t assign_context_failed;
34500 + atomic_long_unchecked_t free_context;
34501 + atomic_long_unchecked_t load_user_context;
34502 + atomic_long_unchecked_t load_kernel_context;
34503 + atomic_long_unchecked_t lock_kernel_context;
34504 + atomic_long_unchecked_t unlock_kernel_context;
34505 + atomic_long_unchecked_t steal_user_context;
34506 + atomic_long_unchecked_t steal_kernel_context;
34507 + atomic_long_unchecked_t steal_context_failed;
34508 + atomic_long_unchecked_t nopfn;
34509 + atomic_long_unchecked_t asid_new;
34510 + atomic_long_unchecked_t asid_next;
34511 + atomic_long_unchecked_t asid_wrap;
34512 + atomic_long_unchecked_t asid_reuse;
34513 + atomic_long_unchecked_t intr;
34514 + atomic_long_unchecked_t intr_cbr;
34515 + atomic_long_unchecked_t intr_tfh;
34516 + atomic_long_unchecked_t intr_spurious;
34517 + atomic_long_unchecked_t intr_mm_lock_failed;
34518 + atomic_long_unchecked_t call_os;
34519 + atomic_long_unchecked_t call_os_wait_queue;
34520 + atomic_long_unchecked_t user_flush_tlb;
34521 + atomic_long_unchecked_t user_unload_context;
34522 + atomic_long_unchecked_t user_exception;
34523 + atomic_long_unchecked_t set_context_option;
34524 + atomic_long_unchecked_t check_context_retarget_intr;
34525 + atomic_long_unchecked_t check_context_unload;
34526 + atomic_long_unchecked_t tlb_dropin;
34527 + atomic_long_unchecked_t tlb_preload_page;
34528 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34529 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34530 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34531 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34532 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34533 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34534 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34535 + atomic_long_unchecked_t tfh_stale_on_fault;
34536 + atomic_long_unchecked_t mmu_invalidate_range;
34537 + atomic_long_unchecked_t mmu_invalidate_page;
34538 + atomic_long_unchecked_t flush_tlb;
34539 + atomic_long_unchecked_t flush_tlb_gru;
34540 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34541 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34542
34543 - atomic_long_t copy_gpa;
34544 - atomic_long_t read_gpa;
34545 + atomic_long_unchecked_t copy_gpa;
34546 + atomic_long_unchecked_t read_gpa;
34547
34548 - atomic_long_t mesq_receive;
34549 - atomic_long_t mesq_receive_none;
34550 - atomic_long_t mesq_send;
34551 - atomic_long_t mesq_send_failed;
34552 - atomic_long_t mesq_noop;
34553 - atomic_long_t mesq_send_unexpected_error;
34554 - atomic_long_t mesq_send_lb_overflow;
34555 - atomic_long_t mesq_send_qlimit_reached;
34556 - atomic_long_t mesq_send_amo_nacked;
34557 - atomic_long_t mesq_send_put_nacked;
34558 - atomic_long_t mesq_page_overflow;
34559 - atomic_long_t mesq_qf_locked;
34560 - atomic_long_t mesq_qf_noop_not_full;
34561 - atomic_long_t mesq_qf_switch_head_failed;
34562 - atomic_long_t mesq_qf_unexpected_error;
34563 - atomic_long_t mesq_noop_unexpected_error;
34564 - atomic_long_t mesq_noop_lb_overflow;
34565 - atomic_long_t mesq_noop_qlimit_reached;
34566 - atomic_long_t mesq_noop_amo_nacked;
34567 - atomic_long_t mesq_noop_put_nacked;
34568 - atomic_long_t mesq_noop_page_overflow;
34569 + atomic_long_unchecked_t mesq_receive;
34570 + atomic_long_unchecked_t mesq_receive_none;
34571 + atomic_long_unchecked_t mesq_send;
34572 + atomic_long_unchecked_t mesq_send_failed;
34573 + atomic_long_unchecked_t mesq_noop;
34574 + atomic_long_unchecked_t mesq_send_unexpected_error;
34575 + atomic_long_unchecked_t mesq_send_lb_overflow;
34576 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34577 + atomic_long_unchecked_t mesq_send_amo_nacked;
34578 + atomic_long_unchecked_t mesq_send_put_nacked;
34579 + atomic_long_unchecked_t mesq_page_overflow;
34580 + atomic_long_unchecked_t mesq_qf_locked;
34581 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34582 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34583 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34584 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34585 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34586 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34587 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34588 + atomic_long_unchecked_t mesq_noop_put_nacked;
34589 + atomic_long_unchecked_t mesq_noop_page_overflow;
34590
34591 };
34592
34593 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34594 tghop_invalidate, mcsop_last};
34595
34596 struct mcs_op_statistic {
34597 - atomic_long_t count;
34598 - atomic_long_t total;
34599 + atomic_long_unchecked_t count;
34600 + atomic_long_unchecked_t total;
34601 unsigned long max;
34602 };
34603
34604 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34605
34606 #define STAT(id) do { \
34607 if (gru_options & OPT_STATS) \
34608 - atomic_long_inc(&gru_stats.id); \
34609 + atomic_long_inc_unchecked(&gru_stats.id); \
34610 } while (0)
34611
34612 #ifdef CONFIG_SGI_GRU_DEBUG
34613 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34614 index c862cd4..0d176fe 100644
34615 --- a/drivers/misc/sgi-xp/xp.h
34616 +++ b/drivers/misc/sgi-xp/xp.h
34617 @@ -288,7 +288,7 @@ struct xpc_interface {
34618 xpc_notify_func, void *);
34619 void (*received) (short, int, void *);
34620 enum xp_retval (*partid_to_nasids) (short, void *);
34621 -};
34622 +} __no_const;
34623
34624 extern struct xpc_interface xpc_interface;
34625
34626 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34627 index b94d5f7..7f494c5 100644
34628 --- a/drivers/misc/sgi-xp/xpc.h
34629 +++ b/drivers/misc/sgi-xp/xpc.h
34630 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34631 void (*received_payload) (struct xpc_channel *, void *);
34632 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34633 };
34634 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34635
34636 /* struct xpc_partition act_state values (for XPC HB) */
34637
34638 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34639 /* found in xpc_main.c */
34640 extern struct device *xpc_part;
34641 extern struct device *xpc_chan;
34642 -extern struct xpc_arch_operations xpc_arch_ops;
34643 +extern xpc_arch_operations_no_const xpc_arch_ops;
34644 extern int xpc_disengage_timelimit;
34645 extern int xpc_disengage_timedout;
34646 extern int xpc_activate_IRQ_rcvd;
34647 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34648 index 8d082b4..aa749ae 100644
34649 --- a/drivers/misc/sgi-xp/xpc_main.c
34650 +++ b/drivers/misc/sgi-xp/xpc_main.c
34651 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34652 .notifier_call = xpc_system_die,
34653 };
34654
34655 -struct xpc_arch_operations xpc_arch_ops;
34656 +xpc_arch_operations_no_const xpc_arch_ops;
34657
34658 /*
34659 * Timer function to enforce the timelimit on the partition disengage.
34660 diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
34661 index 2b62232..acfaeeb 100644
34662 --- a/drivers/misc/ti-st/st_core.c
34663 +++ b/drivers/misc/ti-st/st_core.c
34664 @@ -349,6 +349,11 @@ void st_int_recv(void *disc_data,
34665 st_gdata->rx_skb = alloc_skb(
34666 st_gdata->list[type]->max_frame_size,
34667 GFP_ATOMIC);
34668 + if (st_gdata->rx_skb == NULL) {
34669 + pr_err("out of memory: dropping\n");
34670 + goto done;
34671 + }
34672 +
34673 skb_reserve(st_gdata->rx_skb,
34674 st_gdata->list[type]->reserve);
34675 /* next 2 required for BT only */
34676 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34677 index 504da71..9722d43 100644
34678 --- a/drivers/mmc/host/sdhci-pci.c
34679 +++ b/drivers/mmc/host/sdhci-pci.c
34680 @@ -653,7 +653,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34681 .probe = via_probe,
34682 };
34683
34684 -static const struct pci_device_id pci_ids[] __devinitdata = {
34685 +static const struct pci_device_id pci_ids[] __devinitconst = {
34686 {
34687 .vendor = PCI_VENDOR_ID_RICOH,
34688 .device = PCI_DEVICE_ID_RICOH_R5C822,
34689 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34690 index a4eb8b5..8c0628f 100644
34691 --- a/drivers/mtd/devices/doc2000.c
34692 +++ b/drivers/mtd/devices/doc2000.c
34693 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34694
34695 /* The ECC will not be calculated correctly if less than 512 is written */
34696 /* DBB-
34697 - if (len != 0x200 && eccbuf)
34698 + if (len != 0x200)
34699 printk(KERN_WARNING
34700 "ECC needs a full sector write (adr: %lx size %lx)\n",
34701 (long) to, (long) len);
34702 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34703 index 0650aaf..7718762 100644
34704 --- a/drivers/mtd/nand/denali.c
34705 +++ b/drivers/mtd/nand/denali.c
34706 @@ -26,6 +26,7 @@
34707 #include <linux/pci.h>
34708 #include <linux/mtd/mtd.h>
34709 #include <linux/module.h>
34710 +#include <linux/slab.h>
34711
34712 #include "denali.h"
34713
34714 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34715 index 51b9d6a..52af9a7 100644
34716 --- a/drivers/mtd/nftlmount.c
34717 +++ b/drivers/mtd/nftlmount.c
34718 @@ -24,6 +24,7 @@
34719 #include <asm/errno.h>
34720 #include <linux/delay.h>
34721 #include <linux/slab.h>
34722 +#include <linux/sched.h>
34723 #include <linux/mtd/mtd.h>
34724 #include <linux/mtd/nand.h>
34725 #include <linux/mtd/nftl.h>
34726 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34727 index 6762dc4..9956862 100644
34728 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34729 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34730 @@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34731 */
34732
34733 #define ATL2_PARAM(X, desc) \
34734 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34735 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34736 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34737 MODULE_PARM_DESC(X, desc);
34738 #else
34739 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34740 index efd80bd..21fcff0 100644
34741 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34742 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34743 @@ -487,7 +487,7 @@ struct bnx2x_rx_mode_obj {
34744
34745 int (*wait_comp)(struct bnx2x *bp,
34746 struct bnx2x_rx_mode_ramrod_params *p);
34747 -};
34748 +} __no_const;
34749
34750 /********************** Set multicast group ***********************************/
34751
34752 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34753 index 93865f8..5448741 100644
34754 --- a/drivers/net/ethernet/broadcom/tg3.h
34755 +++ b/drivers/net/ethernet/broadcom/tg3.h
34756 @@ -140,6 +140,7 @@
34757 #define CHIPREV_ID_5750_A0 0x4000
34758 #define CHIPREV_ID_5750_A1 0x4001
34759 #define CHIPREV_ID_5750_A3 0x4003
34760 +#define CHIPREV_ID_5750_C1 0x4201
34761 #define CHIPREV_ID_5750_C2 0x4202
34762 #define CHIPREV_ID_5752_A0_HW 0x5000
34763 #define CHIPREV_ID_5752_A0 0x6000
34764 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34765 index c4e8643..0979484 100644
34766 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34767 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34768 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34769 */
34770 struct l2t_skb_cb {
34771 arp_failure_handler_func arp_failure_handler;
34772 -};
34773 +} __no_const;
34774
34775 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34776
34777 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34778 index d3cd489..0fd52dd 100644
34779 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34780 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34781 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34782 for (i=0; i<ETH_ALEN; i++) {
34783 tmp.addr[i] = dev->dev_addr[i];
34784 }
34785 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34786 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34787 break;
34788
34789 case DE4X5_SET_HWADDR: /* Set the hardware address */
34790 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34791 spin_lock_irqsave(&lp->lock, flags);
34792 memcpy(&statbuf, &lp->pktStats, ioc->len);
34793 spin_unlock_irqrestore(&lp->lock, flags);
34794 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34795 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34796 return -EFAULT;
34797 break;
34798 }
34799 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34800 index ed7d1dc..d426748 100644
34801 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34802 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34803 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34804 {NULL}};
34805
34806
34807 -static const char *block_name[] __devinitdata = {
34808 +static const char *block_name[] __devinitconst = {
34809 "21140 non-MII",
34810 "21140 MII PHY",
34811 "21142 Serial PHY",
34812 diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
34813 index 75d45f8..3d9c55b 100644
34814 --- a/drivers/net/ethernet/dec/tulip/uli526x.c
34815 +++ b/drivers/net/ethernet/dec/tulip/uli526x.c
34816 @@ -129,7 +129,7 @@ struct uli526x_board_info {
34817 struct uli_phy_ops {
34818 void (*write)(struct uli526x_board_info *, u8, u8, u16);
34819 u16 (*read)(struct uli526x_board_info *, u8, u8);
34820 - } phy;
34821 + } __no_const phy;
34822 struct net_device *next_dev; /* next device */
34823 struct pci_dev *pdev; /* PCI device */
34824 spinlock_t lock;
34825 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34826 index 4d1ffca..7c1ec4d 100644
34827 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34828 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34829 @@ -236,7 +236,7 @@ struct pci_id_info {
34830 int drv_flags; /* Driver use, intended as capability flags. */
34831 };
34832
34833 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34834 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34835 { /* Sometime a Level-One switch card. */
34836 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34837 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34838 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34839 index d7bb52a..3b83588 100644
34840 --- a/drivers/net/ethernet/dlink/sundance.c
34841 +++ b/drivers/net/ethernet/dlink/sundance.c
34842 @@ -218,7 +218,7 @@ enum {
34843 struct pci_id_info {
34844 const char *name;
34845 };
34846 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34847 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34848 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34849 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34850 {"D-Link DFE-580TX 4 port Server Adapter"},
34851 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34852 index bd5cf7e..c165651 100644
34853 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34854 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34855 @@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34856
34857 if (wrapped)
34858 newacc += 65536;
34859 - ACCESS_ONCE(*acc) = newacc;
34860 + ACCESS_ONCE_RW(*acc) = newacc;
34861 }
34862
34863 void be_parse_stats(struct be_adapter *adapter)
34864 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34865 index 16b0704..d2c07d7 100644
34866 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34867 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34868 @@ -31,6 +31,8 @@
34869 #include <linux/netdevice.h>
34870 #include <linux/phy.h>
34871 #include <linux/platform_device.h>
34872 +#include <linux/interrupt.h>
34873 +#include <linux/irqreturn.h>
34874 #include <net/ip.h>
34875
34876 #include "ftgmac100.h"
34877 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34878 index 829b109..4ae5f6a 100644
34879 --- a/drivers/net/ethernet/faraday/ftmac100.c
34880 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34881 @@ -31,6 +31,8 @@
34882 #include <linux/module.h>
34883 #include <linux/netdevice.h>
34884 #include <linux/platform_device.h>
34885 +#include <linux/interrupt.h>
34886 +#include <linux/irqreturn.h>
34887
34888 #include "ftmac100.h"
34889
34890 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34891 index 9d71c9c..0e4a0ac 100644
34892 --- a/drivers/net/ethernet/fealnx.c
34893 +++ b/drivers/net/ethernet/fealnx.c
34894 @@ -150,7 +150,7 @@ struct chip_info {
34895 int flags;
34896 };
34897
34898 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34899 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34900 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34901 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34902 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34903 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34904 index fa47b85..246edeb 100644
34905 --- a/drivers/net/ethernet/intel/e1000e/e1000.h
34906 +++ b/drivers/net/ethernet/intel/e1000e/e1000.h
34907 @@ -181,7 +181,7 @@ struct e1000_info;
34908 #define E1000_TXDCTL_DMA_BURST_ENABLE \
34909 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
34910 E1000_TXDCTL_COUNT_DESC | \
34911 - (5 << 16) | /* wthresh must be +1 more than desired */\
34912 + (1 << 16) | /* wthresh must be +1 more than desired */\
34913 (1 << 8) | /* hthresh */ \
34914 0x1f) /* pthresh */
34915
34916 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34917 index ed5b409..ec37828 100644
34918 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34919 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34920 @@ -797,6 +797,7 @@ struct e1000_mac_operations {
34921 void (*rar_set)(struct e1000_hw *, u8 *, u32);
34922 s32 (*read_mac_addr)(struct e1000_hw *);
34923 };
34924 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34925
34926 /*
34927 * When to use various PHY register access functions:
34928 @@ -837,6 +838,7 @@ struct e1000_phy_operations {
34929 void (*power_up)(struct e1000_hw *);
34930 void (*power_down)(struct e1000_hw *);
34931 };
34932 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34933
34934 /* Function pointers for the NVM. */
34935 struct e1000_nvm_operations {
34936 @@ -849,9 +851,10 @@ struct e1000_nvm_operations {
34937 s32 (*validate)(struct e1000_hw *);
34938 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34939 };
34940 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34941
34942 struct e1000_mac_info {
34943 - struct e1000_mac_operations ops;
34944 + e1000_mac_operations_no_const ops;
34945 u8 addr[ETH_ALEN];
34946 u8 perm_addr[ETH_ALEN];
34947
34948 @@ -892,7 +895,7 @@ struct e1000_mac_info {
34949 };
34950
34951 struct e1000_phy_info {
34952 - struct e1000_phy_operations ops;
34953 + e1000_phy_operations_no_const ops;
34954
34955 enum e1000_phy_type type;
34956
34957 @@ -926,7 +929,7 @@ struct e1000_phy_info {
34958 };
34959
34960 struct e1000_nvm_info {
34961 - struct e1000_nvm_operations ops;
34962 + e1000_nvm_operations_no_const ops;
34963
34964 enum e1000_nvm_type type;
34965 enum e1000_nvm_override override;
34966 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34967 index c2a51dc..c2bd262 100644
34968 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34969 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34970 @@ -327,6 +327,7 @@ struct e1000_mac_operations {
34971 void (*release_swfw_sync)(struct e1000_hw *, u16);
34972
34973 };
34974 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34975
34976 struct e1000_phy_operations {
34977 s32 (*acquire)(struct e1000_hw *);
34978 @@ -343,6 +344,7 @@ struct e1000_phy_operations {
34979 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34980 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34981 };
34982 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34983
34984 struct e1000_nvm_operations {
34985 s32 (*acquire)(struct e1000_hw *);
34986 @@ -353,6 +355,7 @@ struct e1000_nvm_operations {
34987 s32 (*validate)(struct e1000_hw *);
34988 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
34989 };
34990 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34991
34992 struct e1000_info {
34993 s32 (*get_invariants)(struct e1000_hw *);
34994 @@ -364,7 +367,7 @@ struct e1000_info {
34995 extern const struct e1000_info e1000_82575_info;
34996
34997 struct e1000_mac_info {
34998 - struct e1000_mac_operations ops;
34999 + e1000_mac_operations_no_const ops;
35000
35001 u8 addr[6];
35002 u8 perm_addr[6];
35003 @@ -402,7 +405,7 @@ struct e1000_mac_info {
35004 };
35005
35006 struct e1000_phy_info {
35007 - struct e1000_phy_operations ops;
35008 + e1000_phy_operations_no_const ops;
35009
35010 enum e1000_phy_type type;
35011
35012 @@ -437,7 +440,7 @@ struct e1000_phy_info {
35013 };
35014
35015 struct e1000_nvm_info {
35016 - struct e1000_nvm_operations ops;
35017 + e1000_nvm_operations_no_const ops;
35018 enum e1000_nvm_type type;
35019 enum e1000_nvm_override override;
35020
35021 @@ -482,6 +485,7 @@ struct e1000_mbx_operations {
35022 s32 (*check_for_ack)(struct e1000_hw *, u16);
35023 s32 (*check_for_rst)(struct e1000_hw *, u16);
35024 };
35025 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35026
35027 struct e1000_mbx_stats {
35028 u32 msgs_tx;
35029 @@ -493,7 +497,7 @@ struct e1000_mbx_stats {
35030 };
35031
35032 struct e1000_mbx_info {
35033 - struct e1000_mbx_operations ops;
35034 + e1000_mbx_operations_no_const ops;
35035 struct e1000_mbx_stats stats;
35036 u32 timeout;
35037 u32 usec_delay;
35038 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
35039 index 57db3c6..aa825fc 100644
35040 --- a/drivers/net/ethernet/intel/igbvf/vf.h
35041 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
35042 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
35043 s32 (*read_mac_addr)(struct e1000_hw *);
35044 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
35045 };
35046 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35047
35048 struct e1000_mac_info {
35049 - struct e1000_mac_operations ops;
35050 + e1000_mac_operations_no_const ops;
35051 u8 addr[6];
35052 u8 perm_addr[6];
35053
35054 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
35055 s32 (*check_for_ack)(struct e1000_hw *);
35056 s32 (*check_for_rst)(struct e1000_hw *);
35057 };
35058 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35059
35060 struct e1000_mbx_stats {
35061 u32 msgs_tx;
35062 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
35063 };
35064
35065 struct e1000_mbx_info {
35066 - struct e1000_mbx_operations ops;
35067 + e1000_mbx_operations_no_const ops;
35068 struct e1000_mbx_stats stats;
35069 u32 timeout;
35070 u32 usec_delay;
35071 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35072 index dcebd12..c1fe8be 100644
35073 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35074 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35075 @@ -805,7 +805,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
35076 /* store the new cycle speed */
35077 adapter->cycle_speed = cycle_speed;
35078
35079 - ACCESS_ONCE(adapter->base_incval) = incval;
35080 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
35081 smp_mb();
35082
35083 /* grab the ptp lock */
35084 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35085 index 204848d..d8aeaec 100644
35086 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35087 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35088 @@ -2791,6 +2791,7 @@ struct ixgbe_eeprom_operations {
35089 s32 (*update_checksum)(struct ixgbe_hw *);
35090 u16 (*calc_checksum)(struct ixgbe_hw *);
35091 };
35092 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
35093
35094 struct ixgbe_mac_operations {
35095 s32 (*init_hw)(struct ixgbe_hw *);
35096 @@ -2856,6 +2857,7 @@ struct ixgbe_mac_operations {
35097 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
35098 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
35099 };
35100 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35101
35102 struct ixgbe_phy_operations {
35103 s32 (*identify)(struct ixgbe_hw *);
35104 @@ -2875,9 +2877,10 @@ struct ixgbe_phy_operations {
35105 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35106 s32 (*check_overtemp)(struct ixgbe_hw *);
35107 };
35108 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
35109
35110 struct ixgbe_eeprom_info {
35111 - struct ixgbe_eeprom_operations ops;
35112 + ixgbe_eeprom_operations_no_const ops;
35113 enum ixgbe_eeprom_type type;
35114 u32 semaphore_delay;
35115 u16 word_size;
35116 @@ -2887,7 +2890,7 @@ struct ixgbe_eeprom_info {
35117
35118 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35119 struct ixgbe_mac_info {
35120 - struct ixgbe_mac_operations ops;
35121 + ixgbe_mac_operations_no_const ops;
35122 enum ixgbe_mac_type type;
35123 u8 addr[ETH_ALEN];
35124 u8 perm_addr[ETH_ALEN];
35125 @@ -2916,7 +2919,7 @@ struct ixgbe_mac_info {
35126 };
35127
35128 struct ixgbe_phy_info {
35129 - struct ixgbe_phy_operations ops;
35130 + ixgbe_phy_operations_no_const ops;
35131 struct mdio_if_info mdio;
35132 enum ixgbe_phy_type type;
35133 u32 id;
35134 @@ -2944,6 +2947,7 @@ struct ixgbe_mbx_operations {
35135 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35136 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35137 };
35138 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35139
35140 struct ixgbe_mbx_stats {
35141 u32 msgs_tx;
35142 @@ -2955,7 +2959,7 @@ struct ixgbe_mbx_stats {
35143 };
35144
35145 struct ixgbe_mbx_info {
35146 - struct ixgbe_mbx_operations ops;
35147 + ixgbe_mbx_operations_no_const ops;
35148 struct ixgbe_mbx_stats stats;
35149 u32 timeout;
35150 u32 usec_delay;
35151 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35152 index 25c951d..cc7cf33 100644
35153 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35154 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35155 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35156 s32 (*clear_vfta)(struct ixgbe_hw *);
35157 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35158 };
35159 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35160
35161 enum ixgbe_mac_type {
35162 ixgbe_mac_unknown = 0,
35163 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35164 };
35165
35166 struct ixgbe_mac_info {
35167 - struct ixgbe_mac_operations ops;
35168 + ixgbe_mac_operations_no_const ops;
35169 u8 addr[6];
35170 u8 perm_addr[6];
35171
35172 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35173 s32 (*check_for_ack)(struct ixgbe_hw *);
35174 s32 (*check_for_rst)(struct ixgbe_hw *);
35175 };
35176 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35177
35178 struct ixgbe_mbx_stats {
35179 u32 msgs_tx;
35180 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35181 };
35182
35183 struct ixgbe_mbx_info {
35184 - struct ixgbe_mbx_operations ops;
35185 + ixgbe_mbx_operations_no_const ops;
35186 struct ixgbe_mbx_stats stats;
35187 u32 timeout;
35188 u32 udelay;
35189 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35190 index a0313de..e83a572 100644
35191 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
35192 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35193 @@ -41,6 +41,7 @@
35194 #include <linux/slab.h>
35195 #include <linux/io-mapping.h>
35196 #include <linux/delay.h>
35197 +#include <linux/sched.h>
35198
35199 #include <linux/mlx4/device.h>
35200 #include <linux/mlx4/doorbell.h>
35201 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35202 index 5046a64..71ca936 100644
35203 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35204 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35205 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35206 void (*link_down)(struct __vxge_hw_device *devh);
35207 void (*crit_err)(struct __vxge_hw_device *devh,
35208 enum vxge_hw_event type, u64 ext_data);
35209 -};
35210 +} __no_const;
35211
35212 /*
35213 * struct __vxge_hw_blockpool_entry - Block private data structure
35214 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35215 index 4a518a3..936b334 100644
35216 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35217 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35218 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35219 struct vxge_hw_mempool_dma *dma_object,
35220 u32 index,
35221 u32 is_last);
35222 -};
35223 +} __no_const;
35224
35225 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35226 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35227 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35228 index eb81da4..1592b62 100644
35229 --- a/drivers/net/ethernet/realtek/r8169.c
35230 +++ b/drivers/net/ethernet/realtek/r8169.c
35231 @@ -723,22 +723,22 @@ struct rtl8169_private {
35232 struct mdio_ops {
35233 void (*write)(void __iomem *, int, int);
35234 int (*read)(void __iomem *, int);
35235 - } mdio_ops;
35236 + } __no_const mdio_ops;
35237
35238 struct pll_power_ops {
35239 void (*down)(struct rtl8169_private *);
35240 void (*up)(struct rtl8169_private *);
35241 - } pll_power_ops;
35242 + } __no_const pll_power_ops;
35243
35244 struct jumbo_ops {
35245 void (*enable)(struct rtl8169_private *);
35246 void (*disable)(struct rtl8169_private *);
35247 - } jumbo_ops;
35248 + } __no_const jumbo_ops;
35249
35250 struct csi_ops {
35251 void (*write)(void __iomem *, int, int);
35252 u32 (*read)(void __iomem *, int);
35253 - } csi_ops;
35254 + } __no_const csi_ops;
35255
35256 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35257 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35258 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35259 index 4613591..d816601 100644
35260 --- a/drivers/net/ethernet/sis/sis190.c
35261 +++ b/drivers/net/ethernet/sis/sis190.c
35262 @@ -1618,7 +1618,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35263 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35264 struct net_device *dev)
35265 {
35266 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35267 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35268 struct sis190_private *tp = netdev_priv(dev);
35269 struct pci_dev *isa_bridge;
35270 u8 reg, tmp8;
35271 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35272 index c07cfe9..81cbf7e 100644
35273 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35274 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35275 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35276
35277 writel(value, ioaddr + MMC_CNTRL);
35278
35279 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35280 - MMC_CNTRL, value);
35281 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35282 +// MMC_CNTRL, value);
35283 }
35284
35285 /* To mask all all interrupts.*/
35286 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35287 index 2857ab0..9a1f9b0 100644
35288 --- a/drivers/net/hyperv/hyperv_net.h
35289 +++ b/drivers/net/hyperv/hyperv_net.h
35290 @@ -99,7 +99,7 @@ struct rndis_device {
35291
35292 enum rndis_device_state state;
35293 bool link_state;
35294 - atomic_t new_req_id;
35295 + atomic_unchecked_t new_req_id;
35296
35297 spinlock_t request_lock;
35298 struct list_head req_list;
35299 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35300 index 981ebb1..b34959b 100644
35301 --- a/drivers/net/hyperv/rndis_filter.c
35302 +++ b/drivers/net/hyperv/rndis_filter.c
35303 @@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35304 * template
35305 */
35306 set = &rndis_msg->msg.set_req;
35307 - set->req_id = atomic_inc_return(&dev->new_req_id);
35308 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35309
35310 /* Add to the request list */
35311 spin_lock_irqsave(&dev->request_lock, flags);
35312 @@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35313
35314 /* Setup the rndis set */
35315 halt = &request->request_msg.msg.halt_req;
35316 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35317 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35318
35319 /* Ignore return since this msg is optional. */
35320 rndis_filter_send_request(dev, request);
35321 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35322 index 5c05572..389610b 100644
35323 --- a/drivers/net/ppp/ppp_generic.c
35324 +++ b/drivers/net/ppp/ppp_generic.c
35325 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35326 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35327 struct ppp_stats stats;
35328 struct ppp_comp_stats cstats;
35329 - char *vers;
35330
35331 switch (cmd) {
35332 case SIOCGPPPSTATS:
35333 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35334 break;
35335
35336 case SIOCGPPPVER:
35337 - vers = PPP_VERSION;
35338 - if (copy_to_user(addr, vers, strlen(vers) + 1))
35339 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35340 break;
35341 err = 0;
35342 break;
35343 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
35344 index 5fb59ae..989715a 100644
35345 --- a/drivers/net/tun.c
35346 +++ b/drivers/net/tun.c
35347 @@ -1243,7 +1243,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
35348 }
35349
35350 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
35351 - unsigned long arg, int ifreq_len)
35352 + unsigned long arg, size_t ifreq_len)
35353 {
35354 struct tun_file *tfile = file->private_data;
35355 struct tun_struct *tun;
35356 @@ -1254,6 +1254,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
35357 int vnet_hdr_sz;
35358 int ret;
35359
35360 + if (ifreq_len > sizeof ifr)
35361 + return -EFAULT;
35362 +
35363 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
35364 if (copy_from_user(&ifr, argp, ifreq_len))
35365 return -EFAULT;
35366 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35367 index 62f30b4..ff99dfd 100644
35368 --- a/drivers/net/usb/hso.c
35369 +++ b/drivers/net/usb/hso.c
35370 @@ -71,7 +71,7 @@
35371 #include <asm/byteorder.h>
35372 #include <linux/serial_core.h>
35373 #include <linux/serial.h>
35374 -
35375 +#include <asm/local.h>
35376
35377 #define MOD_AUTHOR "Option Wireless"
35378 #define MOD_DESCRIPTION "USB High Speed Option driver"
35379 @@ -1182,7 +1182,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35380 struct urb *urb;
35381
35382 urb = serial->rx_urb[0];
35383 - if (serial->port.count > 0) {
35384 + if (atomic_read(&serial->port.count) > 0) {
35385 count = put_rxbuf_data(urb, serial);
35386 if (count == -1)
35387 return;
35388 @@ -1218,7 +1218,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35389 DUMP1(urb->transfer_buffer, urb->actual_length);
35390
35391 /* Anyone listening? */
35392 - if (serial->port.count == 0)
35393 + if (atomic_read(&serial->port.count) == 0)
35394 return;
35395
35396 if (status == 0) {
35397 @@ -1300,8 +1300,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35398 tty_port_tty_set(&serial->port, tty);
35399
35400 /* check for port already opened, if not set the termios */
35401 - serial->port.count++;
35402 - if (serial->port.count == 1) {
35403 + if (atomic_inc_return(&serial->port.count) == 1) {
35404 serial->rx_state = RX_IDLE;
35405 /* Force default termio settings */
35406 _hso_serial_set_termios(tty, NULL);
35407 @@ -1313,7 +1312,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35408 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35409 if (result) {
35410 hso_stop_serial_device(serial->parent);
35411 - serial->port.count--;
35412 + atomic_dec(&serial->port.count);
35413 kref_put(&serial->parent->ref, hso_serial_ref_free);
35414 }
35415 } else {
35416 @@ -1350,10 +1349,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35417
35418 /* reset the rts and dtr */
35419 /* do the actual close */
35420 - serial->port.count--;
35421 + atomic_dec(&serial->port.count);
35422
35423 - if (serial->port.count <= 0) {
35424 - serial->port.count = 0;
35425 + if (atomic_read(&serial->port.count) <= 0) {
35426 + atomic_set(&serial->port.count, 0);
35427 tty_port_tty_set(&serial->port, NULL);
35428 if (!usb_gone)
35429 hso_stop_serial_device(serial->parent);
35430 @@ -1429,7 +1428,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35431
35432 /* the actual setup */
35433 spin_lock_irqsave(&serial->serial_lock, flags);
35434 - if (serial->port.count)
35435 + if (atomic_read(&serial->port.count))
35436 _hso_serial_set_termios(tty, old);
35437 else
35438 tty->termios = old;
35439 @@ -1888,7 +1887,7 @@ static void intr_callback(struct urb *urb)
35440 D1("Pending read interrupt on port %d\n", i);
35441 spin_lock(&serial->serial_lock);
35442 if (serial->rx_state == RX_IDLE &&
35443 - serial->port.count > 0) {
35444 + atomic_read(&serial->port.count) > 0) {
35445 /* Setup and send a ctrl req read on
35446 * port i */
35447 if (!serial->rx_urb_filled[0]) {
35448 @@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
35449 /* Start all serial ports */
35450 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35451 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35452 - if (dev2ser(serial_table[i])->port.count) {
35453 + if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
35454 result =
35455 hso_start_serial_device(serial_table[i], GFP_NOIO);
35456 hso_kick_transmit(dev2ser(serial_table[i]));
35457 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35458 index 420d69b..74f90a2 100644
35459 --- a/drivers/net/wireless/ath/ath.h
35460 +++ b/drivers/net/wireless/ath/ath.h
35461 @@ -119,6 +119,7 @@ struct ath_ops {
35462 void (*write_flush) (void *);
35463 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35464 };
35465 +typedef struct ath_ops __no_const ath_ops_no_const;
35466
35467 struct ath_common;
35468 struct ath_bus_ops;
35469 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35470 index 8d78253..bebbb68 100644
35471 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35472 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35473 @@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35474 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35475 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35476
35477 - ACCESS_ONCE(ads->ds_link) = i->link;
35478 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35479 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35480 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35481
35482 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35483 ctl6 = SM(i->keytype, AR_EncrType);
35484 @@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35485
35486 if ((i->is_first || i->is_last) &&
35487 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35488 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35489 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35490 | set11nTries(i->rates, 1)
35491 | set11nTries(i->rates, 2)
35492 | set11nTries(i->rates, 3)
35493 | (i->dur_update ? AR_DurUpdateEna : 0)
35494 | SM(0, AR_BurstDur);
35495
35496 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35497 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35498 | set11nRate(i->rates, 1)
35499 | set11nRate(i->rates, 2)
35500 | set11nRate(i->rates, 3);
35501 } else {
35502 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35503 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35504 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35505 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35506 }
35507
35508 if (!i->is_first) {
35509 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35510 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35511 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35512 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35513 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35514 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35515 return;
35516 }
35517
35518 @@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35519 break;
35520 }
35521
35522 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35523 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35524 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35525 | SM(i->txpower, AR_XmitPower)
35526 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35527 @@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35528 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35529 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35530
35531 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35532 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35533 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35534 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35535
35536 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35537 return;
35538
35539 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35540 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35541 | set11nPktDurRTSCTS(i->rates, 1);
35542
35543 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35544 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35545 | set11nPktDurRTSCTS(i->rates, 3);
35546
35547 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35548 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35549 | set11nRateFlags(i->rates, 1)
35550 | set11nRateFlags(i->rates, 2)
35551 | set11nRateFlags(i->rates, 3)
35552 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35553 index d9e0824..1a874e7 100644
35554 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35555 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35556 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35557 (i->qcu << AR_TxQcuNum_S) | desc_len;
35558
35559 checksum += val;
35560 - ACCESS_ONCE(ads->info) = val;
35561 + ACCESS_ONCE_RW(ads->info) = val;
35562
35563 checksum += i->link;
35564 - ACCESS_ONCE(ads->link) = i->link;
35565 + ACCESS_ONCE_RW(ads->link) = i->link;
35566
35567 checksum += i->buf_addr[0];
35568 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35569 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35570 checksum += i->buf_addr[1];
35571 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35572 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35573 checksum += i->buf_addr[2];
35574 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35575 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35576 checksum += i->buf_addr[3];
35577 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35578 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35579
35580 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35581 - ACCESS_ONCE(ads->ctl3) = val;
35582 + ACCESS_ONCE_RW(ads->ctl3) = val;
35583 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35584 - ACCESS_ONCE(ads->ctl5) = val;
35585 + ACCESS_ONCE_RW(ads->ctl5) = val;
35586 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35587 - ACCESS_ONCE(ads->ctl7) = val;
35588 + ACCESS_ONCE_RW(ads->ctl7) = val;
35589 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35590 - ACCESS_ONCE(ads->ctl9) = val;
35591 + ACCESS_ONCE_RW(ads->ctl9) = val;
35592
35593 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35594 - ACCESS_ONCE(ads->ctl10) = checksum;
35595 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35596
35597 if (i->is_first || i->is_last) {
35598 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35599 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35600 | set11nTries(i->rates, 1)
35601 | set11nTries(i->rates, 2)
35602 | set11nTries(i->rates, 3)
35603 | (i->dur_update ? AR_DurUpdateEna : 0)
35604 | SM(0, AR_BurstDur);
35605
35606 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35607 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35608 | set11nRate(i->rates, 1)
35609 | set11nRate(i->rates, 2)
35610 | set11nRate(i->rates, 3);
35611 } else {
35612 - ACCESS_ONCE(ads->ctl13) = 0;
35613 - ACCESS_ONCE(ads->ctl14) = 0;
35614 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35615 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35616 }
35617
35618 ads->ctl20 = 0;
35619 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35620
35621 ctl17 = SM(i->keytype, AR_EncrType);
35622 if (!i->is_first) {
35623 - ACCESS_ONCE(ads->ctl11) = 0;
35624 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35625 - ACCESS_ONCE(ads->ctl15) = 0;
35626 - ACCESS_ONCE(ads->ctl16) = 0;
35627 - ACCESS_ONCE(ads->ctl17) = ctl17;
35628 - ACCESS_ONCE(ads->ctl18) = 0;
35629 - ACCESS_ONCE(ads->ctl19) = 0;
35630 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35631 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35632 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35633 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35634 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35635 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35636 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35637 return;
35638 }
35639
35640 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35641 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35642 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35643 | SM(i->txpower, AR_XmitPower)
35644 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35645 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35646 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35647 ctl12 |= SM(val, AR_PAPRDChainMask);
35648
35649 - ACCESS_ONCE(ads->ctl12) = ctl12;
35650 - ACCESS_ONCE(ads->ctl17) = ctl17;
35651 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35652 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35653
35654 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35655 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35656 | set11nPktDurRTSCTS(i->rates, 1);
35657
35658 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35659 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35660 | set11nPktDurRTSCTS(i->rates, 3);
35661
35662 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35663 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35664 | set11nRateFlags(i->rates, 1)
35665 | set11nRateFlags(i->rates, 2)
35666 | set11nRateFlags(i->rates, 3)
35667 | SM(i->rtscts_rate, AR_RTSCTSRate);
35668
35669 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35670 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35671 }
35672
35673 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35674 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35675 index 02f5007..bd0bd8f 100644
35676 --- a/drivers/net/wireless/ath/ath9k/hw.h
35677 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35678 @@ -610,7 +610,7 @@ struct ath_hw_private_ops {
35679
35680 /* ANI */
35681 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35682 -};
35683 +} __no_const;
35684
35685 /**
35686 * struct ath_hw_ops - callbacks used by hardware code and driver code
35687 @@ -640,7 +640,7 @@ struct ath_hw_ops {
35688 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35689 struct ath_hw_antcomb_conf *antconf);
35690
35691 -};
35692 +} __no_const;
35693
35694 struct ath_nf_limits {
35695 s16 max;
35696 @@ -660,7 +660,7 @@ enum ath_cal_list {
35697 #define AH_FASTCC 0x4
35698
35699 struct ath_hw {
35700 - struct ath_ops reg_ops;
35701 + ath_ops_no_const reg_ops;
35702
35703 struct ieee80211_hw *hw;
35704 struct ath_common common;
35705 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35706 index af00e2c..ab04d34 100644
35707 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35708 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35709 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35710 void (*carrsuppr)(struct brcms_phy *);
35711 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35712 void (*detach)(struct brcms_phy *);
35713 -};
35714 +} __no_const;
35715
35716 struct brcms_phy {
35717 struct brcms_phy_pub pubpi_ro;
35718 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35719 index faec404..a5277f1 100644
35720 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35721 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35722 @@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35723 */
35724 if (il3945_mod_params.disable_hw_scan) {
35725 D_INFO("Disabling hw_scan\n");
35726 - il3945_mac_ops.hw_scan = NULL;
35727 + pax_open_kernel();
35728 + *(void **)&il3945_mac_ops.hw_scan = NULL;
35729 + pax_close_kernel();
35730 }
35731
35732 D_INFO("*** LOAD DRIVER ***\n");
35733 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35734 index 7f97dec..a41d2cf 100644
35735 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35736 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35737 @@ -204,7 +204,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
35738 {
35739 struct iwl_priv *priv = file->private_data;
35740 char buf[64];
35741 - int buf_size;
35742 + size_t buf_size;
35743 u32 offset, len;
35744
35745 memset(buf, 0, sizeof(buf));
35746 @@ -481,7 +481,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
35747 struct iwl_priv *priv = file->private_data;
35748
35749 char buf[8];
35750 - int buf_size;
35751 + size_t buf_size;
35752 u32 reset_flag;
35753
35754 memset(buf, 0, sizeof(buf));
35755 @@ -562,7 +562,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
35756 {
35757 struct iwl_priv *priv = file->private_data;
35758 char buf[8];
35759 - int buf_size;
35760 + size_t buf_size;
35761 int ht40;
35762
35763 memset(buf, 0, sizeof(buf));
35764 @@ -614,7 +614,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
35765 {
35766 struct iwl_priv *priv = file->private_data;
35767 char buf[8];
35768 - int buf_size;
35769 + size_t buf_size;
35770 int value;
35771
35772 memset(buf, 0, sizeof(buf));
35773 @@ -1879,7 +1879,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
35774 {
35775 struct iwl_priv *priv = file->private_data;
35776 char buf[8];
35777 - int buf_size;
35778 + size_t buf_size;
35779 int clear;
35780
35781 memset(buf, 0, sizeof(buf));
35782 @@ -1924,7 +1924,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
35783 {
35784 struct iwl_priv *priv = file->private_data;
35785 char buf[8];
35786 - int buf_size;
35787 + size_t buf_size;
35788 int trace;
35789
35790 memset(buf, 0, sizeof(buf));
35791 @@ -1995,7 +1995,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
35792 {
35793 struct iwl_priv *priv = file->private_data;
35794 char buf[8];
35795 - int buf_size;
35796 + size_t buf_size;
35797 int missed;
35798
35799 memset(buf, 0, sizeof(buf));
35800 @@ -2036,7 +2036,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
35801
35802 struct iwl_priv *priv = file->private_data;
35803 char buf[8];
35804 - int buf_size;
35805 + size_t buf_size;
35806 int plcp;
35807
35808 memset(buf, 0, sizeof(buf));
35809 @@ -2096,7 +2096,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
35810
35811 struct iwl_priv *priv = file->private_data;
35812 char buf[8];
35813 - int buf_size;
35814 + size_t buf_size;
35815 int flush;
35816
35817 memset(buf, 0, sizeof(buf));
35818 @@ -2186,7 +2186,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
35819
35820 struct iwl_priv *priv = file->private_data;
35821 char buf[8];
35822 - int buf_size;
35823 + size_t buf_size;
35824 int rts;
35825
35826 if (!priv->cfg->ht_params)
35827 @@ -2228,7 +2228,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
35828 {
35829 struct iwl_priv *priv = file->private_data;
35830 char buf[8];
35831 - int buf_size;
35832 + size_t buf_size;
35833
35834 memset(buf, 0, sizeof(buf));
35835 buf_size = min(count, sizeof(buf) - 1);
35836 @@ -2264,7 +2264,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
35837 struct iwl_priv *priv = file->private_data;
35838 u32 event_log_flag;
35839 char buf[8];
35840 - int buf_size;
35841 + size_t buf_size;
35842
35843 memset(buf, 0, sizeof(buf));
35844 buf_size = min(count, sizeof(buf) - 1);
35845 @@ -2314,7 +2314,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
35846 struct iwl_priv *priv = file->private_data;
35847 char buf[8];
35848 u32 calib_disabled;
35849 - int buf_size;
35850 + size_t buf_size;
35851
35852 memset(buf, 0, sizeof(buf));
35853 buf_size = min(count, sizeof(buf) - 1);
35854 diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
35855 index 79c6b91..c75e8a3 100644
35856 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
35857 +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
35858 @@ -1948,7 +1948,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
35859 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
35860
35861 char buf[8];
35862 - int buf_size;
35863 + size_t buf_size;
35864 u32 reset_flag;
35865
35866 memset(buf, 0, sizeof(buf));
35867 @@ -1969,7 +1969,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
35868 {
35869 struct iwl_trans *trans = file->private_data;
35870 char buf[8];
35871 - int buf_size;
35872 + size_t buf_size;
35873 int csr;
35874
35875 memset(buf, 0, sizeof(buf));
35876 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35877 index a0b7cfd..20b49f7 100644
35878 --- a/drivers/net/wireless/mac80211_hwsim.c
35879 +++ b/drivers/net/wireless/mac80211_hwsim.c
35880 @@ -1752,9 +1752,11 @@ static int __init init_mac80211_hwsim(void)
35881 return -EINVAL;
35882
35883 if (fake_hw_scan) {
35884 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35885 - mac80211_hwsim_ops.sw_scan_start = NULL;
35886 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35887 + pax_open_kernel();
35888 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35889 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35890 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35891 + pax_close_kernel();
35892 }
35893
35894 spin_lock_init(&hwsim_radio_lock);
35895 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35896 index bd3b0bf..f9db92a 100644
35897 --- a/drivers/net/wireless/mwifiex/main.h
35898 +++ b/drivers/net/wireless/mwifiex/main.h
35899 @@ -567,7 +567,7 @@ struct mwifiex_if_ops {
35900 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35901 int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
35902 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
35903 -};
35904 +} __no_const;
35905
35906 struct mwifiex_adapter {
35907 u8 iface_type;
35908 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35909 index dfcd02a..a42a59d 100644
35910 --- a/drivers/net/wireless/rndis_wlan.c
35911 +++ b/drivers/net/wireless/rndis_wlan.c
35912 @@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35913
35914 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35915
35916 - if (rts_threshold < 0 || rts_threshold > 2347)
35917 + if (rts_threshold > 2347)
35918 rts_threshold = 2347;
35919
35920 tmp = cpu_to_le32(rts_threshold);
35921 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
35922 index 8f75402..eed109d 100644
35923 --- a/drivers/net/wireless/rt2x00/rt2x00.h
35924 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
35925 @@ -396,7 +396,7 @@ struct rt2x00_intf {
35926 * for hardware which doesn't support hardware
35927 * sequence counting.
35928 */
35929 - atomic_t seqno;
35930 + atomic_unchecked_t seqno;
35931 };
35932
35933 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
35934 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
35935 index 2fd8301..9767e8c 100644
35936 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
35937 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
35938 @@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
35939 * sequence counter given by mac80211.
35940 */
35941 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
35942 - seqno = atomic_add_return(0x10, &intf->seqno);
35943 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
35944 else
35945 - seqno = atomic_read(&intf->seqno);
35946 + seqno = atomic_read_unchecked(&intf->seqno);
35947
35948 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
35949 hdr->seq_ctrl |= cpu_to_le16(seqno);
35950 diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
35951 index 9d8f581..0f6589e 100644
35952 --- a/drivers/net/wireless/ti/wl1251/wl1251.h
35953 +++ b/drivers/net/wireless/ti/wl1251/wl1251.h
35954 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35955 void (*reset)(struct wl1251 *wl);
35956 void (*enable_irq)(struct wl1251 *wl);
35957 void (*disable_irq)(struct wl1251 *wl);
35958 -};
35959 +} __no_const;
35960
35961 struct wl1251 {
35962 struct ieee80211_hw *hw;
35963 diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
35964 index 0b3f0b5..62f68bd 100644
35965 --- a/drivers/net/wireless/ti/wlcore/wlcore.h
35966 +++ b/drivers/net/wireless/ti/wlcore/wlcore.h
35967 @@ -61,7 +61,7 @@ struct wlcore_ops {
35968 struct wl12xx_vif *wlvif);
35969 s8 (*get_pg_ver)(struct wl1271 *wl);
35970 void (*get_mac)(struct wl1271 *wl);
35971 -};
35972 +} __no_const;
35973
35974 enum wlcore_partitions {
35975 PART_DOWN,
35976 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35977 index f34b5b2..b5abb9f 100644
35978 --- a/drivers/oprofile/buffer_sync.c
35979 +++ b/drivers/oprofile/buffer_sync.c
35980 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35981 if (cookie == NO_COOKIE)
35982 offset = pc;
35983 if (cookie == INVALID_COOKIE) {
35984 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35985 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35986 offset = pc;
35987 }
35988 if (cookie != last_cookie) {
35989 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35990 /* add userspace sample */
35991
35992 if (!mm) {
35993 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35994 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35995 return 0;
35996 }
35997
35998 cookie = lookup_dcookie(mm, s->eip, &offset);
35999
36000 if (cookie == INVALID_COOKIE) {
36001 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36002 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36003 return 0;
36004 }
36005
36006 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
36007 /* ignore backtraces if failed to add a sample */
36008 if (state == sb_bt_start) {
36009 state = sb_bt_ignore;
36010 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
36011 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
36012 }
36013 }
36014 release_mm(mm);
36015 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
36016 index c0cc4e7..44d4e54 100644
36017 --- a/drivers/oprofile/event_buffer.c
36018 +++ b/drivers/oprofile/event_buffer.c
36019 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
36020 }
36021
36022 if (buffer_pos == buffer_size) {
36023 - atomic_inc(&oprofile_stats.event_lost_overflow);
36024 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
36025 return;
36026 }
36027
36028 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
36029 index ed2c3ec..deda85a 100644
36030 --- a/drivers/oprofile/oprof.c
36031 +++ b/drivers/oprofile/oprof.c
36032 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
36033 if (oprofile_ops.switch_events())
36034 return;
36035
36036 - atomic_inc(&oprofile_stats.multiplex_counter);
36037 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
36038 start_switch_worker();
36039 }
36040
36041 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
36042 index 917d28e..d62d981 100644
36043 --- a/drivers/oprofile/oprofile_stats.c
36044 +++ b/drivers/oprofile/oprofile_stats.c
36045 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
36046 cpu_buf->sample_invalid_eip = 0;
36047 }
36048
36049 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
36050 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
36051 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
36052 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
36053 - atomic_set(&oprofile_stats.multiplex_counter, 0);
36054 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
36055 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
36056 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
36057 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
36058 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
36059 }
36060
36061
36062 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
36063 index 38b6fc0..b5cbfce 100644
36064 --- a/drivers/oprofile/oprofile_stats.h
36065 +++ b/drivers/oprofile/oprofile_stats.h
36066 @@ -13,11 +13,11 @@
36067 #include <linux/atomic.h>
36068
36069 struct oprofile_stat_struct {
36070 - atomic_t sample_lost_no_mm;
36071 - atomic_t sample_lost_no_mapping;
36072 - atomic_t bt_lost_no_mapping;
36073 - atomic_t event_lost_overflow;
36074 - atomic_t multiplex_counter;
36075 + atomic_unchecked_t sample_lost_no_mm;
36076 + atomic_unchecked_t sample_lost_no_mapping;
36077 + atomic_unchecked_t bt_lost_no_mapping;
36078 + atomic_unchecked_t event_lost_overflow;
36079 + atomic_unchecked_t multiplex_counter;
36080 };
36081
36082 extern struct oprofile_stat_struct oprofile_stats;
36083 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
36084 index 849357c..b83c1e0 100644
36085 --- a/drivers/oprofile/oprofilefs.c
36086 +++ b/drivers/oprofile/oprofilefs.c
36087 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
36088
36089
36090 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36091 - char const *name, atomic_t *val)
36092 + char const *name, atomic_unchecked_t *val)
36093 {
36094 return __oprofilefs_create_file(sb, root, name,
36095 &atomic_ro_fops, 0444, val);
36096 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
36097 index 3f56bc0..707d642 100644
36098 --- a/drivers/parport/procfs.c
36099 +++ b/drivers/parport/procfs.c
36100 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
36101
36102 *ppos += len;
36103
36104 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36105 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36106 }
36107
36108 #ifdef CONFIG_PARPORT_1284
36109 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
36110
36111 *ppos += len;
36112
36113 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36114 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36115 }
36116 #endif /* IEEE1284.3 support. */
36117
36118 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
36119 index 9fff878..ad0ad53 100644
36120 --- a/drivers/pci/hotplug/cpci_hotplug.h
36121 +++ b/drivers/pci/hotplug/cpci_hotplug.h
36122 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36123 int (*hardware_test) (struct slot* slot, u32 value);
36124 u8 (*get_power) (struct slot* slot);
36125 int (*set_power) (struct slot* slot, int value);
36126 -};
36127 +} __no_const;
36128
36129 struct cpci_hp_controller {
36130 unsigned int irq;
36131 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
36132 index 76ba8a1..20ca857 100644
36133 --- a/drivers/pci/hotplug/cpqphp_nvram.c
36134 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
36135 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
36136
36137 void compaq_nvram_init (void __iomem *rom_start)
36138 {
36139 +
36140 +#ifndef CONFIG_PAX_KERNEXEC
36141 if (rom_start) {
36142 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36143 }
36144 +#endif
36145 +
36146 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36147
36148 /* initialize our int15 lock */
36149 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
36150 index b500840..d7159d3 100644
36151 --- a/drivers/pci/pcie/aspm.c
36152 +++ b/drivers/pci/pcie/aspm.c
36153 @@ -27,9 +27,9 @@
36154 #define MODULE_PARAM_PREFIX "pcie_aspm."
36155
36156 /* Note: those are not register definitions */
36157 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36158 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36159 -#define ASPM_STATE_L1 (4) /* L1 state */
36160 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36161 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36162 +#define ASPM_STATE_L1 (4U) /* L1 state */
36163 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36164 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36165
36166 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
36167 index 658ac97..05e1b90 100644
36168 --- a/drivers/pci/probe.c
36169 +++ b/drivers/pci/probe.c
36170 @@ -137,7 +137,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
36171 u16 orig_cmd;
36172 struct pci_bus_region region;
36173
36174 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36175 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36176
36177 if (!dev->mmio_always_on) {
36178 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
36179 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36180 index 27911b5..5b6db88 100644
36181 --- a/drivers/pci/proc.c
36182 +++ b/drivers/pci/proc.c
36183 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36184 static int __init pci_proc_init(void)
36185 {
36186 struct pci_dev *dev = NULL;
36187 +
36188 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
36189 +#ifdef CONFIG_GRKERNSEC_PROC_USER
36190 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36191 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36192 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36193 +#endif
36194 +#else
36195 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36196 +#endif
36197 proc_create("devices", 0, proc_bus_pci_dir,
36198 &proc_bus_pci_dev_operations);
36199 proc_initialized = 1;
36200 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36201 index 8b5610d..a4c22bb 100644
36202 --- a/drivers/platform/x86/thinkpad_acpi.c
36203 +++ b/drivers/platform/x86/thinkpad_acpi.c
36204 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36205 return 0;
36206 }
36207
36208 -void static hotkey_mask_warn_incomplete_mask(void)
36209 +static void hotkey_mask_warn_incomplete_mask(void)
36210 {
36211 /* log only what the user can fix... */
36212 const u32 wantedmask = hotkey_driver_mask &
36213 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36214 }
36215 }
36216
36217 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36218 - struct tp_nvram_state *newn,
36219 - const u32 event_mask)
36220 -{
36221 -
36222 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36223 do { \
36224 if ((event_mask & (1 << __scancode)) && \
36225 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36226 tpacpi_hotkey_send_key(__scancode); \
36227 } while (0)
36228
36229 - void issue_volchange(const unsigned int oldvol,
36230 - const unsigned int newvol)
36231 - {
36232 - unsigned int i = oldvol;
36233 +static void issue_volchange(const unsigned int oldvol,
36234 + const unsigned int newvol,
36235 + const u32 event_mask)
36236 +{
36237 + unsigned int i = oldvol;
36238
36239 - while (i > newvol) {
36240 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36241 - i--;
36242 - }
36243 - while (i < newvol) {
36244 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36245 - i++;
36246 - }
36247 + while (i > newvol) {
36248 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36249 + i--;
36250 }
36251 + while (i < newvol) {
36252 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36253 + i++;
36254 + }
36255 +}
36256
36257 - void issue_brightnesschange(const unsigned int oldbrt,
36258 - const unsigned int newbrt)
36259 - {
36260 - unsigned int i = oldbrt;
36261 +static void issue_brightnesschange(const unsigned int oldbrt,
36262 + const unsigned int newbrt,
36263 + const u32 event_mask)
36264 +{
36265 + unsigned int i = oldbrt;
36266
36267 - while (i > newbrt) {
36268 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36269 - i--;
36270 - }
36271 - while (i < newbrt) {
36272 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36273 - i++;
36274 - }
36275 + while (i > newbrt) {
36276 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36277 + i--;
36278 + }
36279 + while (i < newbrt) {
36280 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36281 + i++;
36282 }
36283 +}
36284
36285 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36286 + struct tp_nvram_state *newn,
36287 + const u32 event_mask)
36288 +{
36289 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36290 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36291 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36292 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36293 oldn->volume_level != newn->volume_level) {
36294 /* recently muted, or repeated mute keypress, or
36295 * multiple presses ending in mute */
36296 - issue_volchange(oldn->volume_level, newn->volume_level);
36297 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36298 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36299 }
36300 } else {
36301 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36302 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36303 }
36304 if (oldn->volume_level != newn->volume_level) {
36305 - issue_volchange(oldn->volume_level, newn->volume_level);
36306 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36307 } else if (oldn->volume_toggle != newn->volume_toggle) {
36308 /* repeated vol up/down keypress at end of scale ? */
36309 if (newn->volume_level == 0)
36310 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36311 /* handle brightness */
36312 if (oldn->brightness_level != newn->brightness_level) {
36313 issue_brightnesschange(oldn->brightness_level,
36314 - newn->brightness_level);
36315 + newn->brightness_level,
36316 + event_mask);
36317 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36318 /* repeated key presses that didn't change state */
36319 if (newn->brightness_level == 0)
36320 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36321 && !tp_features.bright_unkfw)
36322 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36323 }
36324 +}
36325
36326 #undef TPACPI_COMPARE_KEY
36327 #undef TPACPI_MAY_SEND_KEY
36328 -}
36329
36330 /*
36331 * Polling driver
36332 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36333 index 769d265..a3a05ca 100644
36334 --- a/drivers/pnp/pnpbios/bioscalls.c
36335 +++ b/drivers/pnp/pnpbios/bioscalls.c
36336 @@ -58,7 +58,7 @@ do { \
36337 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36338 } while(0)
36339
36340 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36341 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36342 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36343
36344 /*
36345 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36346
36347 cpu = get_cpu();
36348 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36349 +
36350 + pax_open_kernel();
36351 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36352 + pax_close_kernel();
36353
36354 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36355 spin_lock_irqsave(&pnp_bios_lock, flags);
36356 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36357 :"memory");
36358 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36359
36360 + pax_open_kernel();
36361 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36362 + pax_close_kernel();
36363 +
36364 put_cpu();
36365
36366 /* If we get here and this is set then the PnP BIOS faulted on us. */
36367 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36368 return status;
36369 }
36370
36371 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36372 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36373 {
36374 int i;
36375
36376 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36377 pnp_bios_callpoint.offset = header->fields.pm16offset;
36378 pnp_bios_callpoint.segment = PNP_CS16;
36379
36380 + pax_open_kernel();
36381 +
36382 for_each_possible_cpu(i) {
36383 struct desc_struct *gdt = get_cpu_gdt_table(i);
36384 if (!gdt)
36385 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36386 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36387 (unsigned long)__va(header->fields.pm16dseg));
36388 }
36389 +
36390 + pax_close_kernel();
36391 }
36392 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36393 index b0ecacb..7c9da2e 100644
36394 --- a/drivers/pnp/resource.c
36395 +++ b/drivers/pnp/resource.c
36396 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36397 return 1;
36398
36399 /* check if the resource is valid */
36400 - if (*irq < 0 || *irq > 15)
36401 + if (*irq > 15)
36402 return 0;
36403
36404 /* check if the resource is reserved */
36405 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36406 return 1;
36407
36408 /* check if the resource is valid */
36409 - if (*dma < 0 || *dma == 4 || *dma > 7)
36410 + if (*dma == 4 || *dma > 7)
36411 return 0;
36412
36413 /* check if the resource is reserved */
36414 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36415 index f5d6d37..739f6a9 100644
36416 --- a/drivers/power/bq27x00_battery.c
36417 +++ b/drivers/power/bq27x00_battery.c
36418 @@ -72,7 +72,7 @@
36419 struct bq27x00_device_info;
36420 struct bq27x00_access_methods {
36421 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36422 -};
36423 +} __no_const;
36424
36425 enum bq27x00_chip { BQ27000, BQ27500 };
36426
36427 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36428 index 8d53174..04c65de 100644
36429 --- a/drivers/regulator/max8660.c
36430 +++ b/drivers/regulator/max8660.c
36431 @@ -333,8 +333,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36432 max8660->shadow_regs[MAX8660_OVER1] = 5;
36433 } else {
36434 /* Otherwise devices can be toggled via software */
36435 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
36436 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
36437 + pax_open_kernel();
36438 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36439 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36440 + pax_close_kernel();
36441 }
36442
36443 /*
36444 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36445 index 970a233..ee1f241 100644
36446 --- a/drivers/regulator/mc13892-regulator.c
36447 +++ b/drivers/regulator/mc13892-regulator.c
36448 @@ -566,10 +566,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36449 }
36450 mc13xxx_unlock(mc13892);
36451
36452 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36453 + pax_open_kernel();
36454 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36455 = mc13892_vcam_set_mode;
36456 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36457 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36458 = mc13892_vcam_get_mode;
36459 + pax_close_kernel();
36460
36461 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36462 ARRAY_SIZE(mc13892_regulators));
36463 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36464 index cace6d3..f623fda 100644
36465 --- a/drivers/rtc/rtc-dev.c
36466 +++ b/drivers/rtc/rtc-dev.c
36467 @@ -14,6 +14,7 @@
36468 #include <linux/module.h>
36469 #include <linux/rtc.h>
36470 #include <linux/sched.h>
36471 +#include <linux/grsecurity.h>
36472 #include "rtc-core.h"
36473
36474 static dev_t rtc_devt;
36475 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36476 if (copy_from_user(&tm, uarg, sizeof(tm)))
36477 return -EFAULT;
36478
36479 + gr_log_timechange();
36480 +
36481 return rtc_set_time(rtc, &tm);
36482
36483 case RTC_PIE_ON:
36484 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36485 index 3fcf627..f334910 100644
36486 --- a/drivers/scsi/aacraid/aacraid.h
36487 +++ b/drivers/scsi/aacraid/aacraid.h
36488 @@ -492,7 +492,7 @@ struct adapter_ops
36489 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36490 /* Administrative operations */
36491 int (*adapter_comm)(struct aac_dev * dev, int comm);
36492 -};
36493 +} __no_const;
36494
36495 /*
36496 * Define which interrupt handler needs to be installed
36497 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36498 index 0d279c44..3d25a97 100644
36499 --- a/drivers/scsi/aacraid/linit.c
36500 +++ b/drivers/scsi/aacraid/linit.c
36501 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36502 #elif defined(__devinitconst)
36503 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36504 #else
36505 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36506 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36507 #endif
36508 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36509 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36510 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36511 index ff80552..1c4120c 100644
36512 --- a/drivers/scsi/aic94xx/aic94xx_init.c
36513 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
36514 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36515 .lldd_ata_set_dmamode = asd_set_dmamode,
36516 };
36517
36518 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36519 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36520 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36521 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36522 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36523 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36524 index 4ad7e36..d004679 100644
36525 --- a/drivers/scsi/bfa/bfa.h
36526 +++ b/drivers/scsi/bfa/bfa.h
36527 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
36528 u32 *end);
36529 int cpe_vec_q0;
36530 int rme_vec_q0;
36531 -};
36532 +} __no_const;
36533 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36534
36535 struct bfa_faa_cbfn_s {
36536 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36537 index f0f80e2..8ec946b 100644
36538 --- a/drivers/scsi/bfa/bfa_fcpim.c
36539 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36540 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36541
36542 bfa_iotag_attach(fcp);
36543
36544 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36545 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36546 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36547 (fcp->num_itns * sizeof(struct bfa_itn_s));
36548 memset(fcp->itn_arr, 0,
36549 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36550 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36551 {
36552 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36553 - struct bfa_itn_s *itn;
36554 + bfa_itn_s_no_const *itn;
36555
36556 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36557 itn->isr = isr;
36558 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36559 index 36f26da..38a34a8 100644
36560 --- a/drivers/scsi/bfa/bfa_fcpim.h
36561 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36562 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36563 struct bfa_itn_s {
36564 bfa_isr_func_t isr;
36565 };
36566 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36567
36568 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36569 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36570 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36571 struct list_head iotag_tio_free_q; /* free IO resources */
36572 struct list_head iotag_unused_q; /* unused IO resources*/
36573 struct bfa_iotag_s *iotag_arr;
36574 - struct bfa_itn_s *itn_arr;
36575 + bfa_itn_s_no_const *itn_arr;
36576 int num_ioim_reqs;
36577 int num_fwtio_reqs;
36578 int num_itns;
36579 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36580 index 1a99d4b..e85d64b 100644
36581 --- a/drivers/scsi/bfa/bfa_ioc.h
36582 +++ b/drivers/scsi/bfa/bfa_ioc.h
36583 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36584 bfa_ioc_disable_cbfn_t disable_cbfn;
36585 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36586 bfa_ioc_reset_cbfn_t reset_cbfn;
36587 -};
36588 +} __no_const;
36589
36590 /*
36591 * IOC event notification mechanism.
36592 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36593 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36594 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36595 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36596 -};
36597 +} __no_const;
36598
36599 /*
36600 * Queue element to wait for room in request queue. FIFO order is
36601 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36602 index b48c24f..dac0fbc 100644
36603 --- a/drivers/scsi/hosts.c
36604 +++ b/drivers/scsi/hosts.c
36605 @@ -42,7 +42,7 @@
36606 #include "scsi_logging.h"
36607
36608
36609 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36610 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36611
36612
36613 static void scsi_host_cls_release(struct device *dev)
36614 @@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36615 * subtract one because we increment first then return, but we need to
36616 * know what the next host number was before increment
36617 */
36618 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36619 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36620 shost->dma_channel = 0xff;
36621
36622 /* These three are default values which can be overridden */
36623 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36624 index 796482b..d08435c 100644
36625 --- a/drivers/scsi/hpsa.c
36626 +++ b/drivers/scsi/hpsa.c
36627 @@ -536,7 +536,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
36628 unsigned long flags;
36629
36630 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36631 - return h->access.command_completed(h, q);
36632 + return h->access->command_completed(h, q);
36633
36634 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
36635 a = rq->head[rq->current_entry];
36636 @@ -3354,7 +3354,7 @@ static void start_io(struct ctlr_info *h)
36637 while (!list_empty(&h->reqQ)) {
36638 c = list_entry(h->reqQ.next, struct CommandList, list);
36639 /* can't do anything if fifo is full */
36640 - if ((h->access.fifo_full(h))) {
36641 + if ((h->access->fifo_full(h))) {
36642 dev_warn(&h->pdev->dev, "fifo full\n");
36643 break;
36644 }
36645 @@ -3376,7 +3376,7 @@ static void start_io(struct ctlr_info *h)
36646
36647 /* Tell the controller execute command */
36648 spin_unlock_irqrestore(&h->lock, flags);
36649 - h->access.submit_command(h, c);
36650 + h->access->submit_command(h, c);
36651 spin_lock_irqsave(&h->lock, flags);
36652 }
36653 spin_unlock_irqrestore(&h->lock, flags);
36654 @@ -3384,17 +3384,17 @@ static void start_io(struct ctlr_info *h)
36655
36656 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
36657 {
36658 - return h->access.command_completed(h, q);
36659 + return h->access->command_completed(h, q);
36660 }
36661
36662 static inline bool interrupt_pending(struct ctlr_info *h)
36663 {
36664 - return h->access.intr_pending(h);
36665 + return h->access->intr_pending(h);
36666 }
36667
36668 static inline long interrupt_not_for_us(struct ctlr_info *h)
36669 {
36670 - return (h->access.intr_pending(h) == 0) ||
36671 + return (h->access->intr_pending(h) == 0) ||
36672 (h->interrupts_enabled == 0);
36673 }
36674
36675 @@ -4298,7 +4298,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36676 if (prod_index < 0)
36677 return -ENODEV;
36678 h->product_name = products[prod_index].product_name;
36679 - h->access = *(products[prod_index].access);
36680 + h->access = products[prod_index].access;
36681
36682 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
36683 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
36684 @@ -4580,7 +4580,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36685
36686 assert_spin_locked(&lockup_detector_lock);
36687 remove_ctlr_from_lockup_detector_list(h);
36688 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36689 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36690 spin_lock_irqsave(&h->lock, flags);
36691 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36692 spin_unlock_irqrestore(&h->lock, flags);
36693 @@ -4758,7 +4758,7 @@ reinit_after_soft_reset:
36694 }
36695
36696 /* make sure the board interrupts are off */
36697 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36698 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36699
36700 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36701 goto clean2;
36702 @@ -4792,7 +4792,7 @@ reinit_after_soft_reset:
36703 * fake ones to scoop up any residual completions.
36704 */
36705 spin_lock_irqsave(&h->lock, flags);
36706 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36707 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36708 spin_unlock_irqrestore(&h->lock, flags);
36709 free_irqs(h);
36710 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36711 @@ -4811,9 +4811,9 @@ reinit_after_soft_reset:
36712 dev_info(&h->pdev->dev, "Board READY.\n");
36713 dev_info(&h->pdev->dev,
36714 "Waiting for stale completions to drain.\n");
36715 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36716 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36717 msleep(10000);
36718 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36719 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36720
36721 rc = controller_reset_failed(h->cfgtable);
36722 if (rc)
36723 @@ -4834,7 +4834,7 @@ reinit_after_soft_reset:
36724 }
36725
36726 /* Turn the interrupts on so we can service requests */
36727 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36728 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36729
36730 hpsa_hba_inquiry(h);
36731 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36732 @@ -4886,7 +4886,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36733 * To write all data in the battery backed cache to disks
36734 */
36735 hpsa_flush_cache(h);
36736 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36737 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36738 hpsa_free_irqs_and_disable_msix(h);
36739 }
36740
36741 @@ -5055,7 +5055,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36742 return;
36743 }
36744 /* Change the access methods to the performant access methods */
36745 - h->access = SA5_performant_access;
36746 + h->access = &SA5_performant_access;
36747 h->transMethod = CFGTBL_Trans_Performant;
36748 }
36749
36750 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36751 index 9816479..c5d4e97 100644
36752 --- a/drivers/scsi/hpsa.h
36753 +++ b/drivers/scsi/hpsa.h
36754 @@ -79,7 +79,7 @@ struct ctlr_info {
36755 unsigned int msix_vector;
36756 unsigned int msi_vector;
36757 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36758 - struct access_method access;
36759 + struct access_method *access;
36760
36761 /* queue and queue Info */
36762 struct list_head reqQ;
36763 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36764 index f2df059..a3a9930 100644
36765 --- a/drivers/scsi/ips.h
36766 +++ b/drivers/scsi/ips.h
36767 @@ -1027,7 +1027,7 @@ typedef struct {
36768 int (*intr)(struct ips_ha *);
36769 void (*enableint)(struct ips_ha *);
36770 uint32_t (*statupd)(struct ips_ha *);
36771 -} ips_hw_func_t;
36772 +} __no_const ips_hw_func_t;
36773
36774 typedef struct ips_ha {
36775 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36776 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36777 index aceffad..c35c08d 100644
36778 --- a/drivers/scsi/libfc/fc_exch.c
36779 +++ b/drivers/scsi/libfc/fc_exch.c
36780 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36781 * all together if not used XXX
36782 */
36783 struct {
36784 - atomic_t no_free_exch;
36785 - atomic_t no_free_exch_xid;
36786 - atomic_t xid_not_found;
36787 - atomic_t xid_busy;
36788 - atomic_t seq_not_found;
36789 - atomic_t non_bls_resp;
36790 + atomic_unchecked_t no_free_exch;
36791 + atomic_unchecked_t no_free_exch_xid;
36792 + atomic_unchecked_t xid_not_found;
36793 + atomic_unchecked_t xid_busy;
36794 + atomic_unchecked_t seq_not_found;
36795 + atomic_unchecked_t non_bls_resp;
36796 } stats;
36797 };
36798
36799 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36800 /* allocate memory for exchange */
36801 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36802 if (!ep) {
36803 - atomic_inc(&mp->stats.no_free_exch);
36804 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36805 goto out;
36806 }
36807 memset(ep, 0, sizeof(*ep));
36808 @@ -780,7 +780,7 @@ out:
36809 return ep;
36810 err:
36811 spin_unlock_bh(&pool->lock);
36812 - atomic_inc(&mp->stats.no_free_exch_xid);
36813 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36814 mempool_free(ep, mp->ep_pool);
36815 return NULL;
36816 }
36817 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36818 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36819 ep = fc_exch_find(mp, xid);
36820 if (!ep) {
36821 - atomic_inc(&mp->stats.xid_not_found);
36822 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36823 reject = FC_RJT_OX_ID;
36824 goto out;
36825 }
36826 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36827 ep = fc_exch_find(mp, xid);
36828 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36829 if (ep) {
36830 - atomic_inc(&mp->stats.xid_busy);
36831 + atomic_inc_unchecked(&mp->stats.xid_busy);
36832 reject = FC_RJT_RX_ID;
36833 goto rel;
36834 }
36835 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36836 }
36837 xid = ep->xid; /* get our XID */
36838 } else if (!ep) {
36839 - atomic_inc(&mp->stats.xid_not_found);
36840 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36841 reject = FC_RJT_RX_ID; /* XID not found */
36842 goto out;
36843 }
36844 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36845 } else {
36846 sp = &ep->seq;
36847 if (sp->id != fh->fh_seq_id) {
36848 - atomic_inc(&mp->stats.seq_not_found);
36849 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36850 if (f_ctl & FC_FC_END_SEQ) {
36851 /*
36852 * Update sequence_id based on incoming last
36853 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36854
36855 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36856 if (!ep) {
36857 - atomic_inc(&mp->stats.xid_not_found);
36858 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36859 goto out;
36860 }
36861 if (ep->esb_stat & ESB_ST_COMPLETE) {
36862 - atomic_inc(&mp->stats.xid_not_found);
36863 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36864 goto rel;
36865 }
36866 if (ep->rxid == FC_XID_UNKNOWN)
36867 ep->rxid = ntohs(fh->fh_rx_id);
36868 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36869 - atomic_inc(&mp->stats.xid_not_found);
36870 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36871 goto rel;
36872 }
36873 if (ep->did != ntoh24(fh->fh_s_id) &&
36874 ep->did != FC_FID_FLOGI) {
36875 - atomic_inc(&mp->stats.xid_not_found);
36876 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36877 goto rel;
36878 }
36879 sof = fr_sof(fp);
36880 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36881 sp->ssb_stat |= SSB_ST_RESP;
36882 sp->id = fh->fh_seq_id;
36883 } else if (sp->id != fh->fh_seq_id) {
36884 - atomic_inc(&mp->stats.seq_not_found);
36885 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36886 goto rel;
36887 }
36888
36889 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36890 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36891
36892 if (!sp)
36893 - atomic_inc(&mp->stats.xid_not_found);
36894 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36895 else
36896 - atomic_inc(&mp->stats.non_bls_resp);
36897 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36898
36899 fc_frame_free(fp);
36900 }
36901 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36902 index d109cc3..09f4e7d 100644
36903 --- a/drivers/scsi/libsas/sas_ata.c
36904 +++ b/drivers/scsi/libsas/sas_ata.c
36905 @@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36906 .postreset = ata_std_postreset,
36907 .error_handler = ata_std_error_handler,
36908 .post_internal_cmd = sas_ata_post_internal,
36909 - .qc_defer = ata_std_qc_defer,
36910 + .qc_defer = ata_std_qc_defer,
36911 .qc_prep = ata_noop_qc_prep,
36912 .qc_issue = sas_ata_qc_issue,
36913 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36914 diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
36915 index fe5d396..e93d526 100644
36916 --- a/drivers/scsi/lpfc/Makefile
36917 +++ b/drivers/scsi/lpfc/Makefile
36918 @@ -22,7 +22,7 @@
36919 ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
36920 ccflags-$(GCOV) += -O0
36921
36922 -ccflags-y += -Werror
36923 +#ccflags-y += -Werror
36924
36925 obj-$(CONFIG_SCSI_LPFC) := lpfc.o
36926
36927 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36928 index e5da6da..c888d48 100644
36929 --- a/drivers/scsi/lpfc/lpfc.h
36930 +++ b/drivers/scsi/lpfc/lpfc.h
36931 @@ -416,7 +416,7 @@ struct lpfc_vport {
36932 struct dentry *debug_nodelist;
36933 struct dentry *vport_debugfs_root;
36934 struct lpfc_debugfs_trc *disc_trc;
36935 - atomic_t disc_trc_cnt;
36936 + atomic_unchecked_t disc_trc_cnt;
36937 #endif
36938 uint8_t stat_data_enabled;
36939 uint8_t stat_data_blocked;
36940 @@ -830,8 +830,8 @@ struct lpfc_hba {
36941 struct timer_list fabric_block_timer;
36942 unsigned long bit_flags;
36943 #define FABRIC_COMANDS_BLOCKED 0
36944 - atomic_t num_rsrc_err;
36945 - atomic_t num_cmd_success;
36946 + atomic_unchecked_t num_rsrc_err;
36947 + atomic_unchecked_t num_cmd_success;
36948 unsigned long last_rsrc_error_time;
36949 unsigned long last_ramp_down_time;
36950 unsigned long last_ramp_up_time;
36951 @@ -867,7 +867,7 @@ struct lpfc_hba {
36952
36953 struct dentry *debug_slow_ring_trc;
36954 struct lpfc_debugfs_trc *slow_ring_trc;
36955 - atomic_t slow_ring_trc_cnt;
36956 + atomic_unchecked_t slow_ring_trc_cnt;
36957 /* iDiag debugfs sub-directory */
36958 struct dentry *idiag_root;
36959 struct dentry *idiag_pci_cfg;
36960 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36961 index 3217d63..c417981 100644
36962 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36963 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36964 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36965
36966 #include <linux/debugfs.h>
36967
36968 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36969 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36970 static unsigned long lpfc_debugfs_start_time = 0L;
36971
36972 /* iDiag */
36973 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36974 lpfc_debugfs_enable = 0;
36975
36976 len = 0;
36977 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36978 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36979 (lpfc_debugfs_max_disc_trc - 1);
36980 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36981 dtp = vport->disc_trc + i;
36982 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36983 lpfc_debugfs_enable = 0;
36984
36985 len = 0;
36986 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36987 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36988 (lpfc_debugfs_max_slow_ring_trc - 1);
36989 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36990 dtp = phba->slow_ring_trc + i;
36991 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36992 !vport || !vport->disc_trc)
36993 return;
36994
36995 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36996 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36997 (lpfc_debugfs_max_disc_trc - 1);
36998 dtp = vport->disc_trc + index;
36999 dtp->fmt = fmt;
37000 dtp->data1 = data1;
37001 dtp->data2 = data2;
37002 dtp->data3 = data3;
37003 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37004 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37005 dtp->jif = jiffies;
37006 #endif
37007 return;
37008 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
37009 !phba || !phba->slow_ring_trc)
37010 return;
37011
37012 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37013 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37014 (lpfc_debugfs_max_slow_ring_trc - 1);
37015 dtp = phba->slow_ring_trc + index;
37016 dtp->fmt = fmt;
37017 dtp->data1 = data1;
37018 dtp->data2 = data2;
37019 dtp->data3 = data3;
37020 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37021 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37022 dtp->jif = jiffies;
37023 #endif
37024 return;
37025 @@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
37026 "slow_ring buffer\n");
37027 goto debug_failed;
37028 }
37029 - atomic_set(&phba->slow_ring_trc_cnt, 0);
37030 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37031 memset(phba->slow_ring_trc, 0,
37032 (sizeof(struct lpfc_debugfs_trc) *
37033 lpfc_debugfs_max_slow_ring_trc));
37034 @@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
37035 "buffer\n");
37036 goto debug_failed;
37037 }
37038 - atomic_set(&vport->disc_trc_cnt, 0);
37039 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37040
37041 snprintf(name, sizeof(name), "discovery_trace");
37042 vport->debug_disc_trc =
37043 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
37044 index 411ed48..967f553 100644
37045 --- a/drivers/scsi/lpfc/lpfc_init.c
37046 +++ b/drivers/scsi/lpfc/lpfc_init.c
37047 @@ -10341,8 +10341,10 @@ lpfc_init(void)
37048 "misc_register returned with status %d", error);
37049
37050 if (lpfc_enable_npiv) {
37051 - lpfc_transport_functions.vport_create = lpfc_vport_create;
37052 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37053 + pax_open_kernel();
37054 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37055 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37056 + pax_close_kernel();
37057 }
37058 lpfc_transport_template =
37059 fc_attach_transport(&lpfc_transport_functions);
37060 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
37061 index 66e0906..1620281 100644
37062 --- a/drivers/scsi/lpfc/lpfc_scsi.c
37063 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
37064 @@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
37065 uint32_t evt_posted;
37066
37067 spin_lock_irqsave(&phba->hbalock, flags);
37068 - atomic_inc(&phba->num_rsrc_err);
37069 + atomic_inc_unchecked(&phba->num_rsrc_err);
37070 phba->last_rsrc_error_time = jiffies;
37071
37072 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37073 @@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
37074 unsigned long flags;
37075 struct lpfc_hba *phba = vport->phba;
37076 uint32_t evt_posted;
37077 - atomic_inc(&phba->num_cmd_success);
37078 + atomic_inc_unchecked(&phba->num_cmd_success);
37079
37080 if (vport->cfg_lun_queue_depth <= queue_depth)
37081 return;
37082 @@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37083 unsigned long num_rsrc_err, num_cmd_success;
37084 int i;
37085
37086 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37087 - num_cmd_success = atomic_read(&phba->num_cmd_success);
37088 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37089 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37090
37091 /*
37092 * The error and success command counters are global per
37093 @@ -425,8 +425,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37094 }
37095 }
37096 lpfc_destroy_vport_work_array(phba, vports);
37097 - atomic_set(&phba->num_rsrc_err, 0);
37098 - atomic_set(&phba->num_cmd_success, 0);
37099 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37100 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37101 }
37102
37103 /**
37104 @@ -460,8 +460,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
37105 }
37106 }
37107 lpfc_destroy_vport_work_array(phba, vports);
37108 - atomic_set(&phba->num_rsrc_err, 0);
37109 - atomic_set(&phba->num_cmd_success, 0);
37110 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37111 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37112 }
37113
37114 /**
37115 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
37116 index ea8a0b4..812a124 100644
37117 --- a/drivers/scsi/pmcraid.c
37118 +++ b/drivers/scsi/pmcraid.c
37119 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
37120 res->scsi_dev = scsi_dev;
37121 scsi_dev->hostdata = res;
37122 res->change_detected = 0;
37123 - atomic_set(&res->read_failures, 0);
37124 - atomic_set(&res->write_failures, 0);
37125 + atomic_set_unchecked(&res->read_failures, 0);
37126 + atomic_set_unchecked(&res->write_failures, 0);
37127 rc = 0;
37128 }
37129 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37130 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
37131
37132 /* If this was a SCSI read/write command keep count of errors */
37133 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37134 - atomic_inc(&res->read_failures);
37135 + atomic_inc_unchecked(&res->read_failures);
37136 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37137 - atomic_inc(&res->write_failures);
37138 + atomic_inc_unchecked(&res->write_failures);
37139
37140 if (!RES_IS_GSCSI(res->cfg_entry) &&
37141 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37142 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
37143 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37144 * hrrq_id assigned here in queuecommand
37145 */
37146 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37147 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37148 pinstance->num_hrrq;
37149 cmd->cmd_done = pmcraid_io_done;
37150
37151 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
37152 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37153 * hrrq_id assigned here in queuecommand
37154 */
37155 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37156 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37157 pinstance->num_hrrq;
37158
37159 if (request_size) {
37160 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
37161
37162 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37163 /* add resources only after host is added into system */
37164 - if (!atomic_read(&pinstance->expose_resources))
37165 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37166 return;
37167
37168 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
37169 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
37170 init_waitqueue_head(&pinstance->reset_wait_q);
37171
37172 atomic_set(&pinstance->outstanding_cmds, 0);
37173 - atomic_set(&pinstance->last_message_id, 0);
37174 - atomic_set(&pinstance->expose_resources, 0);
37175 + atomic_set_unchecked(&pinstance->last_message_id, 0);
37176 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37177
37178 INIT_LIST_HEAD(&pinstance->free_res_q);
37179 INIT_LIST_HEAD(&pinstance->used_res_q);
37180 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
37181 /* Schedule worker thread to handle CCN and take care of adding and
37182 * removing devices to OS
37183 */
37184 - atomic_set(&pinstance->expose_resources, 1);
37185 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37186 schedule_work(&pinstance->worker_q);
37187 return rc;
37188
37189 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
37190 index e1d150f..6c6df44 100644
37191 --- a/drivers/scsi/pmcraid.h
37192 +++ b/drivers/scsi/pmcraid.h
37193 @@ -748,7 +748,7 @@ struct pmcraid_instance {
37194 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37195
37196 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37197 - atomic_t last_message_id;
37198 + atomic_unchecked_t last_message_id;
37199
37200 /* configuration table */
37201 struct pmcraid_config_table *cfg_table;
37202 @@ -777,7 +777,7 @@ struct pmcraid_instance {
37203 atomic_t outstanding_cmds;
37204
37205 /* should add/delete resources to mid-layer now ?*/
37206 - atomic_t expose_resources;
37207 + atomic_unchecked_t expose_resources;
37208
37209
37210
37211 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37212 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37213 };
37214 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37215 - atomic_t read_failures; /* count of failed READ commands */
37216 - atomic_t write_failures; /* count of failed WRITE commands */
37217 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37218 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37219
37220 /* To indicate add/delete/modify during CCN */
37221 u8 change_detected;
37222 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
37223 index 5ab9530..2dd80f7 100644
37224 --- a/drivers/scsi/qla2xxx/qla_attr.c
37225 +++ b/drivers/scsi/qla2xxx/qla_attr.c
37226 @@ -1855,7 +1855,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
37227 return 0;
37228 }
37229
37230 -struct fc_function_template qla2xxx_transport_functions = {
37231 +fc_function_template_no_const qla2xxx_transport_functions = {
37232
37233 .show_host_node_name = 1,
37234 .show_host_port_name = 1,
37235 @@ -1902,7 +1902,7 @@ struct fc_function_template qla2xxx_transport_functions = {
37236 .bsg_timeout = qla24xx_bsg_timeout,
37237 };
37238
37239 -struct fc_function_template qla2xxx_transport_vport_functions = {
37240 +fc_function_template_no_const qla2xxx_transport_vport_functions = {
37241
37242 .show_host_node_name = 1,
37243 .show_host_port_name = 1,
37244 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37245 index 39007f5..7fafc64 100644
37246 --- a/drivers/scsi/qla2xxx/qla_def.h
37247 +++ b/drivers/scsi/qla2xxx/qla_def.h
37248 @@ -2284,7 +2284,7 @@ struct isp_operations {
37249 int (*start_scsi) (srb_t *);
37250 int (*abort_isp) (struct scsi_qla_host *);
37251 int (*iospace_config)(struct qla_hw_data*);
37252 -};
37253 +} __no_const;
37254
37255 /* MSI-X Support *************************************************************/
37256
37257 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
37258 index 9eacd2d..d79629c 100644
37259 --- a/drivers/scsi/qla2xxx/qla_gbl.h
37260 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
37261 @@ -484,8 +484,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
37262 struct device_attribute;
37263 extern struct device_attribute *qla2x00_host_attrs[];
37264 struct fc_function_template;
37265 -extern struct fc_function_template qla2xxx_transport_functions;
37266 -extern struct fc_function_template qla2xxx_transport_vport_functions;
37267 +extern fc_function_template_no_const qla2xxx_transport_functions;
37268 +extern fc_function_template_no_const qla2xxx_transport_vport_functions;
37269 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
37270 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
37271 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
37272 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37273 index 96a5616..eeb185a 100644
37274 --- a/drivers/scsi/qla4xxx/ql4_def.h
37275 +++ b/drivers/scsi/qla4xxx/ql4_def.h
37276 @@ -268,7 +268,7 @@ struct ddb_entry {
37277 * (4000 only) */
37278 atomic_t relogin_timer; /* Max Time to wait for
37279 * relogin to complete */
37280 - atomic_t relogin_retry_count; /* Num of times relogin has been
37281 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37282 * retried */
37283 uint32_t default_time2wait; /* Default Min time between
37284 * relogins (+aens) */
37285 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37286 index cd15678..f7e6846 100644
37287 --- a/drivers/scsi/qla4xxx/ql4_os.c
37288 +++ b/drivers/scsi/qla4xxx/ql4_os.c
37289 @@ -2615,12 +2615,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37290 */
37291 if (!iscsi_is_session_online(cls_sess)) {
37292 /* Reset retry relogin timer */
37293 - atomic_inc(&ddb_entry->relogin_retry_count);
37294 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37295 DEBUG2(ql4_printk(KERN_INFO, ha,
37296 "%s: index[%d] relogin timed out-retrying"
37297 " relogin (%d), retry (%d)\n", __func__,
37298 ddb_entry->fw_ddb_index,
37299 - atomic_read(&ddb_entry->relogin_retry_count),
37300 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37301 ddb_entry->default_time2wait + 4));
37302 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37303 atomic_set(&ddb_entry->retry_relogin_timer,
37304 @@ -4517,7 +4517,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37305
37306 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37307 atomic_set(&ddb_entry->relogin_timer, 0);
37308 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37309 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37310 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37311 ddb_entry->default_relogin_timeout =
37312 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37313 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37314 index bbbc9c9..ce22f77 100644
37315 --- a/drivers/scsi/scsi.c
37316 +++ b/drivers/scsi/scsi.c
37317 @@ -659,7 +659,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37318 unsigned long timeout;
37319 int rtn = 0;
37320
37321 - atomic_inc(&cmd->device->iorequest_cnt);
37322 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37323
37324 /* check if the device is still usable */
37325 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37326 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37327 index dae3873..bb4bee6 100644
37328 --- a/drivers/scsi/scsi_lib.c
37329 +++ b/drivers/scsi/scsi_lib.c
37330 @@ -1425,7 +1425,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37331 shost = sdev->host;
37332 scsi_init_cmd_errh(cmd);
37333 cmd->result = DID_NO_CONNECT << 16;
37334 - atomic_inc(&cmd->device->iorequest_cnt);
37335 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37336
37337 /*
37338 * SCSI request completion path will do scsi_device_unbusy(),
37339 @@ -1451,9 +1451,9 @@ static void scsi_softirq_done(struct request *rq)
37340
37341 INIT_LIST_HEAD(&cmd->eh_entry);
37342
37343 - atomic_inc(&cmd->device->iodone_cnt);
37344 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37345 if (cmd->result)
37346 - atomic_inc(&cmd->device->ioerr_cnt);
37347 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37348
37349 disposition = scsi_decide_disposition(cmd);
37350 if (disposition != SUCCESS &&
37351 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37352 index bb7c482..7551a95 100644
37353 --- a/drivers/scsi/scsi_sysfs.c
37354 +++ b/drivers/scsi/scsi_sysfs.c
37355 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37356 char *buf) \
37357 { \
37358 struct scsi_device *sdev = to_scsi_device(dev); \
37359 - unsigned long long count = atomic_read(&sdev->field); \
37360 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37361 return snprintf(buf, 20, "0x%llx\n", count); \
37362 } \
37363 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37364 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37365 index 84a1fdf..693b0d6 100644
37366 --- a/drivers/scsi/scsi_tgt_lib.c
37367 +++ b/drivers/scsi/scsi_tgt_lib.c
37368 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37369 int err;
37370
37371 dprintk("%lx %u\n", uaddr, len);
37372 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37373 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37374 if (err) {
37375 /*
37376 * TODO: need to fixup sg_tablesize, max_segment_size,
37377 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37378 index 5797604..289a5b5 100644
37379 --- a/drivers/scsi/scsi_transport_fc.c
37380 +++ b/drivers/scsi/scsi_transport_fc.c
37381 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37382 * Netlink Infrastructure
37383 */
37384
37385 -static atomic_t fc_event_seq;
37386 +static atomic_unchecked_t fc_event_seq;
37387
37388 /**
37389 * fc_get_event_number - Obtain the next sequential FC event number
37390 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
37391 u32
37392 fc_get_event_number(void)
37393 {
37394 - return atomic_add_return(1, &fc_event_seq);
37395 + return atomic_add_return_unchecked(1, &fc_event_seq);
37396 }
37397 EXPORT_SYMBOL(fc_get_event_number);
37398
37399 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
37400 {
37401 int error;
37402
37403 - atomic_set(&fc_event_seq, 0);
37404 + atomic_set_unchecked(&fc_event_seq, 0);
37405
37406 error = transport_class_register(&fc_host_class);
37407 if (error)
37408 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37409 char *cp;
37410
37411 *val = simple_strtoul(buf, &cp, 0);
37412 - if ((*cp && (*cp != '\n')) || (*val < 0))
37413 + if (*cp && (*cp != '\n'))
37414 return -EINVAL;
37415 /*
37416 * Check for overflow; dev_loss_tmo is u32
37417 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37418 index 1cf640e..78e9014 100644
37419 --- a/drivers/scsi/scsi_transport_iscsi.c
37420 +++ b/drivers/scsi/scsi_transport_iscsi.c
37421 @@ -79,7 +79,7 @@ struct iscsi_internal {
37422 struct transport_container session_cont;
37423 };
37424
37425 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37426 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37427 static struct workqueue_struct *iscsi_eh_timer_workq;
37428
37429 static DEFINE_IDA(iscsi_sess_ida);
37430 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37431 int err;
37432
37433 ihost = shost->shost_data;
37434 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37435 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37436
37437 if (target_id == ISCSI_MAX_TARGET) {
37438 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37439 @@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
37440 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37441 ISCSI_TRANSPORT_VERSION);
37442
37443 - atomic_set(&iscsi_session_nr, 0);
37444 + atomic_set_unchecked(&iscsi_session_nr, 0);
37445
37446 err = class_register(&iscsi_transport_class);
37447 if (err)
37448 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37449 index 21a045e..ec89e03 100644
37450 --- a/drivers/scsi/scsi_transport_srp.c
37451 +++ b/drivers/scsi/scsi_transport_srp.c
37452 @@ -33,7 +33,7 @@
37453 #include "scsi_transport_srp_internal.h"
37454
37455 struct srp_host_attrs {
37456 - atomic_t next_port_id;
37457 + atomic_unchecked_t next_port_id;
37458 };
37459 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37460
37461 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37462 struct Scsi_Host *shost = dev_to_shost(dev);
37463 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37464
37465 - atomic_set(&srp_host->next_port_id, 0);
37466 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37467 return 0;
37468 }
37469
37470 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37471 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37472 rport->roles = ids->roles;
37473
37474 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37475 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37476 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37477
37478 transport_setup_device(&rport->dev);
37479 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37480 index 9c5c5f2..8414557 100644
37481 --- a/drivers/scsi/sg.c
37482 +++ b/drivers/scsi/sg.c
37483 @@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37484 sdp->disk->disk_name,
37485 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37486 NULL,
37487 - (char *)arg);
37488 + (char __user *)arg);
37489 case BLKTRACESTART:
37490 return blk_trace_startstop(sdp->device->request_queue, 1);
37491 case BLKTRACESTOP:
37492 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37493 index 1041cb8..4a946fa 100644
37494 --- a/drivers/spi/spi.c
37495 +++ b/drivers/spi/spi.c
37496 @@ -1453,7 +1453,7 @@ int spi_bus_unlock(struct spi_master *master)
37497 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37498
37499 /* portable code must never pass more than 32 bytes */
37500 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37501 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37502
37503 static u8 *buf;
37504
37505 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37506 index 34afc16..ffe44dd 100644
37507 --- a/drivers/staging/octeon/ethernet-rx.c
37508 +++ b/drivers/staging/octeon/ethernet-rx.c
37509 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37510 /* Increment RX stats for virtual ports */
37511 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37512 #ifdef CONFIG_64BIT
37513 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37514 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37515 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37516 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37517 #else
37518 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37519 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37520 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37521 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37522 #endif
37523 }
37524 netif_receive_skb(skb);
37525 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37526 dev->name);
37527 */
37528 #ifdef CONFIG_64BIT
37529 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37530 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37531 #else
37532 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37533 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37534 #endif
37535 dev_kfree_skb_irq(skb);
37536 }
37537 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37538 index 18f7a79..cc3bc24 100644
37539 --- a/drivers/staging/octeon/ethernet.c
37540 +++ b/drivers/staging/octeon/ethernet.c
37541 @@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37542 * since the RX tasklet also increments it.
37543 */
37544 #ifdef CONFIG_64BIT
37545 - atomic64_add(rx_status.dropped_packets,
37546 - (atomic64_t *)&priv->stats.rx_dropped);
37547 + atomic64_add_unchecked(rx_status.dropped_packets,
37548 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37549 #else
37550 - atomic_add(rx_status.dropped_packets,
37551 - (atomic_t *)&priv->stats.rx_dropped);
37552 + atomic_add_unchecked(rx_status.dropped_packets,
37553 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37554 #endif
37555 }
37556
37557 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37558 index dc23395..cf7e9b1 100644
37559 --- a/drivers/staging/rtl8712/rtl871x_io.h
37560 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37561 @@ -108,7 +108,7 @@ struct _io_ops {
37562 u8 *pmem);
37563 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37564 u8 *pmem);
37565 -};
37566 +} __no_const;
37567
37568 struct io_req {
37569 struct list_head list;
37570 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37571 index c7b5e8b..783d6cb 100644
37572 --- a/drivers/staging/sbe-2t3e3/netdev.c
37573 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37574 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37575 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37576
37577 if (rlen)
37578 - if (copy_to_user(data, &resp, rlen))
37579 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37580 return -EFAULT;
37581
37582 return 0;
37583 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37584 index 42cdafe..2769103 100644
37585 --- a/drivers/staging/speakup/speakup_soft.c
37586 +++ b/drivers/staging/speakup/speakup_soft.c
37587 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37588 break;
37589 } else if (!initialized) {
37590 if (*init) {
37591 - ch = *init;
37592 init++;
37593 } else {
37594 initialized = 1;
37595 }
37596 + ch = *init;
37597 } else {
37598 ch = synth_buffer_getc();
37599 }
37600 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37601 index 5d89c0f..9261317 100644
37602 --- a/drivers/staging/usbip/usbip_common.h
37603 +++ b/drivers/staging/usbip/usbip_common.h
37604 @@ -289,7 +289,7 @@ struct usbip_device {
37605 void (*shutdown)(struct usbip_device *);
37606 void (*reset)(struct usbip_device *);
37607 void (*unusable)(struct usbip_device *);
37608 - } eh_ops;
37609 + } __no_const eh_ops;
37610 };
37611
37612 #define kthread_get_run(threadfn, data, namefmt, ...) \
37613 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37614 index 88b3298..3783eee 100644
37615 --- a/drivers/staging/usbip/vhci.h
37616 +++ b/drivers/staging/usbip/vhci.h
37617 @@ -88,7 +88,7 @@ struct vhci_hcd {
37618 unsigned resuming:1;
37619 unsigned long re_timeout;
37620
37621 - atomic_t seqnum;
37622 + atomic_unchecked_t seqnum;
37623
37624 /*
37625 * NOTE:
37626 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37627 index f708cba..2de6d72 100644
37628 --- a/drivers/staging/usbip/vhci_hcd.c
37629 +++ b/drivers/staging/usbip/vhci_hcd.c
37630 @@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37631 return;
37632 }
37633
37634 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37635 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37636 if (priv->seqnum == 0xffff)
37637 dev_info(&urb->dev->dev, "seqnum max\n");
37638
37639 @@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37640 return -ENOMEM;
37641 }
37642
37643 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37644 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37645 if (unlink->seqnum == 0xffff)
37646 pr_info("seqnum max\n");
37647
37648 @@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37649 vdev->rhport = rhport;
37650 }
37651
37652 - atomic_set(&vhci->seqnum, 0);
37653 + atomic_set_unchecked(&vhci->seqnum, 0);
37654 spin_lock_init(&vhci->lock);
37655
37656 hcd->power_budget = 0; /* no limit */
37657 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37658 index f0eaf04..5a82e06 100644
37659 --- a/drivers/staging/usbip/vhci_rx.c
37660 +++ b/drivers/staging/usbip/vhci_rx.c
37661 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37662 if (!urb) {
37663 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37664 pr_info("max seqnum %d\n",
37665 - atomic_read(&the_controller->seqnum));
37666 + atomic_read_unchecked(&the_controller->seqnum));
37667 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37668 return;
37669 }
37670 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37671 index 7735027..30eed13 100644
37672 --- a/drivers/staging/vt6655/hostap.c
37673 +++ b/drivers/staging/vt6655/hostap.c
37674 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37675 *
37676 */
37677
37678 +static net_device_ops_no_const apdev_netdev_ops;
37679 +
37680 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37681 {
37682 PSDevice apdev_priv;
37683 struct net_device *dev = pDevice->dev;
37684 int ret;
37685 - const struct net_device_ops apdev_netdev_ops = {
37686 - .ndo_start_xmit = pDevice->tx_80211,
37687 - };
37688
37689 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37690
37691 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37692 *apdev_priv = *pDevice;
37693 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37694
37695 + /* only half broken now */
37696 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37697 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37698
37699 pDevice->apdev->type = ARPHRD_IEEE80211;
37700 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37701 index 51b5adf..098e320 100644
37702 --- a/drivers/staging/vt6656/hostap.c
37703 +++ b/drivers/staging/vt6656/hostap.c
37704 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37705 *
37706 */
37707
37708 +static net_device_ops_no_const apdev_netdev_ops;
37709 +
37710 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37711 {
37712 PSDevice apdev_priv;
37713 struct net_device *dev = pDevice->dev;
37714 int ret;
37715 - const struct net_device_ops apdev_netdev_ops = {
37716 - .ndo_start_xmit = pDevice->tx_80211,
37717 - };
37718
37719 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37720
37721 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37722 *apdev_priv = *pDevice;
37723 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37724
37725 + /* only half broken now */
37726 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37727 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37728
37729 pDevice->apdev->type = ARPHRD_IEEE80211;
37730 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37731 index 7843dfd..3db105f 100644
37732 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37733 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37734 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37735
37736 struct usbctlx_completor {
37737 int (*complete) (struct usbctlx_completor *);
37738 -};
37739 +} __no_const;
37740
37741 static int
37742 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37743 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37744 index 1ca66ea..76f1343 100644
37745 --- a/drivers/staging/zcache/tmem.c
37746 +++ b/drivers/staging/zcache/tmem.c
37747 @@ -39,7 +39,7 @@
37748 * A tmem host implementation must use this function to register callbacks
37749 * for memory allocation.
37750 */
37751 -static struct tmem_hostops tmem_hostops;
37752 +static tmem_hostops_no_const tmem_hostops;
37753
37754 static void tmem_objnode_tree_init(void);
37755
37756 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37757 * A tmem host implementation must use this function to register
37758 * callbacks for a page-accessible memory (PAM) implementation
37759 */
37760 -static struct tmem_pamops tmem_pamops;
37761 +static tmem_pamops_no_const tmem_pamops;
37762
37763 void tmem_register_pamops(struct tmem_pamops *m)
37764 {
37765 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37766 index 0d4aa82..f7832d4 100644
37767 --- a/drivers/staging/zcache/tmem.h
37768 +++ b/drivers/staging/zcache/tmem.h
37769 @@ -180,6 +180,7 @@ struct tmem_pamops {
37770 void (*new_obj)(struct tmem_obj *);
37771 int (*replace_in_obj)(void *, struct tmem_obj *);
37772 };
37773 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37774 extern void tmem_register_pamops(struct tmem_pamops *m);
37775
37776 /* memory allocation methods provided by the host implementation */
37777 @@ -189,6 +190,7 @@ struct tmem_hostops {
37778 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37779 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37780 };
37781 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37782 extern void tmem_register_hostops(struct tmem_hostops *m);
37783
37784 /* core tmem accessor functions */
37785 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37786 index df9824c..f699b8a 100644
37787 --- a/drivers/target/target_core_transport.c
37788 +++ b/drivers/target/target_core_transport.c
37789 @@ -1233,7 +1233,7 @@ struct se_device *transport_add_device_to_core_hba(
37790 spin_lock_init(&dev->se_port_lock);
37791 spin_lock_init(&dev->se_tmr_lock);
37792 spin_lock_init(&dev->qf_cmd_lock);
37793 - atomic_set(&dev->dev_ordered_id, 0);
37794 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37795
37796 se_dev_set_default_attribs(dev, dev_limits);
37797
37798 @@ -1402,7 +1402,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37799 * Used to determine when ORDERED commands should go from
37800 * Dormant to Active status.
37801 */
37802 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37803 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37804 smp_mb__after_atomic_inc();
37805 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37806 cmd->se_ordered_id, cmd->sam_task_attr,
37807 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
37808 index e61cabd..7617d26 100644
37809 --- a/drivers/tty/cyclades.c
37810 +++ b/drivers/tty/cyclades.c
37811 @@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
37812 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
37813 info->port.count);
37814 #endif
37815 - info->port.count++;
37816 + atomic_inc(&info->port.count);
37817 #ifdef CY_DEBUG_COUNT
37818 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
37819 - current->pid, info->port.count);
37820 + current->pid, atomic_read(&info->port.count));
37821 #endif
37822
37823 /*
37824 @@ -3987,7 +3987,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
37825 for (j = 0; j < cy_card[i].nports; j++) {
37826 info = &cy_card[i].ports[j];
37827
37828 - if (info->port.count) {
37829 + if (atomic_read(&info->port.count)) {
37830 /* XXX is the ldisc num worth this? */
37831 struct tty_struct *tty;
37832 struct tty_ldisc *ld;
37833 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
37834 index 2d691eb..be02ebd 100644
37835 --- a/drivers/tty/hvc/hvc_console.c
37836 +++ b/drivers/tty/hvc/hvc_console.c
37837 @@ -315,7 +315,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
37838
37839 spin_lock_irqsave(&hp->port.lock, flags);
37840 /* Check and then increment for fast path open. */
37841 - if (hp->port.count++ > 0) {
37842 + if (atomic_inc_return(&hp->port.count) > 1) {
37843 spin_unlock_irqrestore(&hp->port.lock, flags);
37844 hvc_kick();
37845 return 0;
37846 @@ -366,7 +366,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
37847
37848 spin_lock_irqsave(&hp->port.lock, flags);
37849
37850 - if (--hp->port.count == 0) {
37851 + if (atomic_dec_return(&hp->port.count) == 0) {
37852 spin_unlock_irqrestore(&hp->port.lock, flags);
37853 /* We are done with the tty pointer now. */
37854 tty_port_tty_set(&hp->port, NULL);
37855 @@ -384,9 +384,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
37856 */
37857 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
37858 } else {
37859 - if (hp->port.count < 0)
37860 + if (atomic_read(&hp->port.count) < 0)
37861 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
37862 - hp->vtermno, hp->port.count);
37863 + hp->vtermno, atomic_read(&hp->port.count));
37864 spin_unlock_irqrestore(&hp->port.lock, flags);
37865 }
37866
37867 @@ -412,13 +412,13 @@ static void hvc_hangup(struct tty_struct *tty)
37868 * open->hangup case this can be called after the final close so prevent
37869 * that from happening for now.
37870 */
37871 - if (hp->port.count <= 0) {
37872 + if (atomic_read(&hp->port.count) <= 0) {
37873 spin_unlock_irqrestore(&hp->port.lock, flags);
37874 return;
37875 }
37876
37877 - temp_open_count = hp->port.count;
37878 - hp->port.count = 0;
37879 + temp_open_count = atomic_read(&hp->port.count);
37880 + atomic_set(&hp->port.count, 0);
37881 spin_unlock_irqrestore(&hp->port.lock, flags);
37882 tty_port_tty_set(&hp->port, NULL);
37883
37884 @@ -471,7 +471,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
37885 return -EPIPE;
37886
37887 /* FIXME what's this (unprotected) check for? */
37888 - if (hp->port.count <= 0)
37889 + if (atomic_read(&hp->port.count) <= 0)
37890 return -EIO;
37891
37892 spin_lock_irqsave(&hp->lock, flags);
37893 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37894 index d56788c..12d8f85 100644
37895 --- a/drivers/tty/hvc/hvcs.c
37896 +++ b/drivers/tty/hvc/hvcs.c
37897 @@ -83,6 +83,7 @@
37898 #include <asm/hvcserver.h>
37899 #include <asm/uaccess.h>
37900 #include <asm/vio.h>
37901 +#include <asm/local.h>
37902
37903 /*
37904 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37905 @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37906
37907 spin_lock_irqsave(&hvcsd->lock, flags);
37908
37909 - if (hvcsd->port.count > 0) {
37910 + if (atomic_read(&hvcsd->port.count) > 0) {
37911 spin_unlock_irqrestore(&hvcsd->lock, flags);
37912 printk(KERN_INFO "HVCS: vterm state unchanged. "
37913 "The hvcs device node is still in use.\n");
37914 @@ -1134,7 +1135,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37915 if ((retval = hvcs_partner_connect(hvcsd)))
37916 goto error_release;
37917
37918 - hvcsd->port.count = 1;
37919 + atomic_set(&hvcsd->port.count, 1);
37920 hvcsd->port.tty = tty;
37921 tty->driver_data = hvcsd;
37922
37923 @@ -1168,7 +1169,7 @@ fast_open:
37924
37925 spin_lock_irqsave(&hvcsd->lock, flags);
37926 tty_port_get(&hvcsd->port);
37927 - hvcsd->port.count++;
37928 + atomic_inc(&hvcsd->port.count);
37929 hvcsd->todo_mask |= HVCS_SCHED_READ;
37930 spin_unlock_irqrestore(&hvcsd->lock, flags);
37931
37932 @@ -1212,7 +1213,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37933 hvcsd = tty->driver_data;
37934
37935 spin_lock_irqsave(&hvcsd->lock, flags);
37936 - if (--hvcsd->port.count == 0) {
37937 + if (atomic_dec_and_test(&hvcsd->port.count)) {
37938
37939 vio_disable_interrupts(hvcsd->vdev);
37940
37941 @@ -1238,10 +1239,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37942 free_irq(irq, hvcsd);
37943 tty_port_put(&hvcsd->port);
37944 return;
37945 - } else if (hvcsd->port.count < 0) {
37946 + } else if (atomic_read(&hvcsd->port.count) < 0) {
37947 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37948 " is missmanaged.\n",
37949 - hvcsd->vdev->unit_address, hvcsd->port.count);
37950 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
37951 }
37952
37953 spin_unlock_irqrestore(&hvcsd->lock, flags);
37954 @@ -1257,7 +1258,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37955
37956 spin_lock_irqsave(&hvcsd->lock, flags);
37957 /* Preserve this so that we know how many kref refs to put */
37958 - temp_open_count = hvcsd->port.count;
37959 + temp_open_count = atomic_read(&hvcsd->port.count);
37960
37961 /*
37962 * Don't kref put inside the spinlock because the destruction
37963 @@ -1272,7 +1273,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37964 tty->driver_data = NULL;
37965 hvcsd->port.tty = NULL;
37966
37967 - hvcsd->port.count = 0;
37968 + atomic_set(&hvcsd->port.count, 0);
37969
37970 /* This will drop any buffered data on the floor which is OK in a hangup
37971 * scenario. */
37972 @@ -1343,7 +1344,7 @@ static int hvcs_write(struct tty_struct *tty,
37973 * the middle of a write operation? This is a crummy place to do this
37974 * but we want to keep it all in the spinlock.
37975 */
37976 - if (hvcsd->port.count <= 0) {
37977 + if (atomic_read(&hvcsd->port.count) <= 0) {
37978 spin_unlock_irqrestore(&hvcsd->lock, flags);
37979 return -ENODEV;
37980 }
37981 @@ -1417,7 +1418,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37982 {
37983 struct hvcs_struct *hvcsd = tty->driver_data;
37984
37985 - if (!hvcsd || hvcsd->port.count <= 0)
37986 + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
37987 return 0;
37988
37989 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37990 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37991 index f8b5fa0..4ba9f89 100644
37992 --- a/drivers/tty/ipwireless/tty.c
37993 +++ b/drivers/tty/ipwireless/tty.c
37994 @@ -29,6 +29,7 @@
37995 #include <linux/tty_driver.h>
37996 #include <linux/tty_flip.h>
37997 #include <linux/uaccess.h>
37998 +#include <asm/local.h>
37999
38000 #include "tty.h"
38001 #include "network.h"
38002 @@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38003 mutex_unlock(&tty->ipw_tty_mutex);
38004 return -ENODEV;
38005 }
38006 - if (tty->port.count == 0)
38007 + if (atomic_read(&tty->port.count) == 0)
38008 tty->tx_bytes_queued = 0;
38009
38010 - tty->port.count++;
38011 + atomic_inc(&tty->port.count);
38012
38013 tty->port.tty = linux_tty;
38014 linux_tty->driver_data = tty;
38015 @@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38016
38017 static void do_ipw_close(struct ipw_tty *tty)
38018 {
38019 - tty->port.count--;
38020 -
38021 - if (tty->port.count == 0) {
38022 + if (atomic_dec_return(&tty->port.count) == 0) {
38023 struct tty_struct *linux_tty = tty->port.tty;
38024
38025 if (linux_tty != NULL) {
38026 @@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
38027 return;
38028
38029 mutex_lock(&tty->ipw_tty_mutex);
38030 - if (tty->port.count == 0) {
38031 + if (atomic_read(&tty->port.count) == 0) {
38032 mutex_unlock(&tty->ipw_tty_mutex);
38033 return;
38034 }
38035 @@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
38036 return;
38037 }
38038
38039 - if (!tty->port.count) {
38040 + if (!atomic_read(&tty->port.count)) {
38041 mutex_unlock(&tty->ipw_tty_mutex);
38042 return;
38043 }
38044 @@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
38045 return -ENODEV;
38046
38047 mutex_lock(&tty->ipw_tty_mutex);
38048 - if (!tty->port.count) {
38049 + if (!atomic_read(&tty->port.count)) {
38050 mutex_unlock(&tty->ipw_tty_mutex);
38051 return -EINVAL;
38052 }
38053 @@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
38054 if (!tty)
38055 return -ENODEV;
38056
38057 - if (!tty->port.count)
38058 + if (!atomic_read(&tty->port.count))
38059 return -EINVAL;
38060
38061 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
38062 @@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
38063 if (!tty)
38064 return 0;
38065
38066 - if (!tty->port.count)
38067 + if (!atomic_read(&tty->port.count))
38068 return 0;
38069
38070 return tty->tx_bytes_queued;
38071 @@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
38072 if (!tty)
38073 return -ENODEV;
38074
38075 - if (!tty->port.count)
38076 + if (!atomic_read(&tty->port.count))
38077 return -EINVAL;
38078
38079 return get_control_lines(tty);
38080 @@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
38081 if (!tty)
38082 return -ENODEV;
38083
38084 - if (!tty->port.count)
38085 + if (!atomic_read(&tty->port.count))
38086 return -EINVAL;
38087
38088 return set_control_lines(tty, set, clear);
38089 @@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
38090 if (!tty)
38091 return -ENODEV;
38092
38093 - if (!tty->port.count)
38094 + if (!atomic_read(&tty->port.count))
38095 return -EINVAL;
38096
38097 /* FIXME: Exactly how is the tty object locked here .. */
38098 @@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
38099 * are gone */
38100 mutex_lock(&ttyj->ipw_tty_mutex);
38101 }
38102 - while (ttyj->port.count)
38103 + while (atomic_read(&ttyj->port.count))
38104 do_ipw_close(ttyj);
38105 ipwireless_disassociate_network_ttys(network,
38106 ttyj->channel_idx);
38107 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
38108 index 324467d..504cc25 100644
38109 --- a/drivers/tty/moxa.c
38110 +++ b/drivers/tty/moxa.c
38111 @@ -1172,7 +1172,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
38112 }
38113
38114 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
38115 - ch->port.count++;
38116 + atomic_inc(&ch->port.count);
38117 tty->driver_data = ch;
38118 tty_port_tty_set(&ch->port, tty);
38119 mutex_lock(&ch->port.mutex);
38120 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
38121 index c43b683..4dab83e 100644
38122 --- a/drivers/tty/n_gsm.c
38123 +++ b/drivers/tty/n_gsm.c
38124 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
38125 kref_init(&dlci->ref);
38126 mutex_init(&dlci->mutex);
38127 dlci->fifo = &dlci->_fifo;
38128 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38129 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38130 kfree(dlci);
38131 return NULL;
38132 }
38133 @@ -2895,7 +2895,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
38134 if (dlci == NULL)
38135 return -ENOMEM;
38136 port = &dlci->port;
38137 - port->count++;
38138 + atomic_inc(&port->count);
38139 tty->driver_data = dlci;
38140 dlci_get(dlci);
38141 dlci_get(dlci->gsm->dlci[0]);
38142 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
38143 index ee1c268..0e97caf 100644
38144 --- a/drivers/tty/n_tty.c
38145 +++ b/drivers/tty/n_tty.c
38146 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
38147 {
38148 *ops = tty_ldisc_N_TTY;
38149 ops->owner = NULL;
38150 - ops->refcount = ops->flags = 0;
38151 + atomic_set(&ops->refcount, 0);
38152 + ops->flags = 0;
38153 }
38154 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
38155 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
38156 index 5505ffc..7affff9 100644
38157 --- a/drivers/tty/pty.c
38158 +++ b/drivers/tty/pty.c
38159 @@ -718,8 +718,10 @@ static void __init unix98_pty_init(void)
38160 panic("Couldn't register Unix98 pts driver");
38161
38162 /* Now create the /dev/ptmx special device */
38163 + pax_open_kernel();
38164 tty_default_fops(&ptmx_fops);
38165 - ptmx_fops.open = ptmx_open;
38166 + *(void **)&ptmx_fops.open = ptmx_open;
38167 + pax_close_kernel();
38168
38169 cdev_init(&ptmx_cdev, &ptmx_fops);
38170 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
38171 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
38172 index 777d5f9..56d67ca 100644
38173 --- a/drivers/tty/rocket.c
38174 +++ b/drivers/tty/rocket.c
38175 @@ -924,7 +924,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
38176 tty->driver_data = info;
38177 tty_port_tty_set(port, tty);
38178
38179 - if (port->count++ == 0) {
38180 + if (atomic_inc_return(&port->count) == 1) {
38181 atomic_inc(&rp_num_ports_open);
38182
38183 #ifdef ROCKET_DEBUG_OPEN
38184 @@ -933,7 +933,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
38185 #endif
38186 }
38187 #ifdef ROCKET_DEBUG_OPEN
38188 - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
38189 + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
38190 #endif
38191
38192 /*
38193 @@ -1528,7 +1528,7 @@ static void rp_hangup(struct tty_struct *tty)
38194 spin_unlock_irqrestore(&info->port.lock, flags);
38195 return;
38196 }
38197 - if (info->port.count)
38198 + if (atomic_read(&info->port.count))
38199 atomic_dec(&rp_num_ports_open);
38200 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
38201 spin_unlock_irqrestore(&info->port.lock, flags);
38202 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
38203 index 2b42a01..32a2ed3 100644
38204 --- a/drivers/tty/serial/kgdboc.c
38205 +++ b/drivers/tty/serial/kgdboc.c
38206 @@ -24,8 +24,9 @@
38207 #define MAX_CONFIG_LEN 40
38208
38209 static struct kgdb_io kgdboc_io_ops;
38210 +static struct kgdb_io kgdboc_io_ops_console;
38211
38212 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38213 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38214 static int configured = -1;
38215
38216 static char config[MAX_CONFIG_LEN];
38217 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
38218 kgdboc_unregister_kbd();
38219 if (configured == 1)
38220 kgdb_unregister_io_module(&kgdboc_io_ops);
38221 + else if (configured == 2)
38222 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
38223 }
38224
38225 static int configure_kgdboc(void)
38226 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
38227 int err;
38228 char *cptr = config;
38229 struct console *cons;
38230 + int is_console = 0;
38231
38232 err = kgdboc_option_setup(config);
38233 if (err || !strlen(config) || isspace(config[0]))
38234 goto noconfig;
38235
38236 err = -ENODEV;
38237 - kgdboc_io_ops.is_console = 0;
38238 kgdb_tty_driver = NULL;
38239
38240 kgdboc_use_kms = 0;
38241 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
38242 int idx;
38243 if (cons->device && cons->device(cons, &idx) == p &&
38244 idx == tty_line) {
38245 - kgdboc_io_ops.is_console = 1;
38246 + is_console = 1;
38247 break;
38248 }
38249 cons = cons->next;
38250 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38251 kgdb_tty_line = tty_line;
38252
38253 do_register:
38254 - err = kgdb_register_io_module(&kgdboc_io_ops);
38255 + if (is_console) {
38256 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
38257 + configured = 2;
38258 + } else {
38259 + err = kgdb_register_io_module(&kgdboc_io_ops);
38260 + configured = 1;
38261 + }
38262 if (err)
38263 goto noconfig;
38264
38265 - configured = 1;
38266 -
38267 return 0;
38268
38269 noconfig:
38270 @@ -213,7 +220,7 @@ noconfig:
38271 static int __init init_kgdboc(void)
38272 {
38273 /* Already configured? */
38274 - if (configured == 1)
38275 + if (configured >= 1)
38276 return 0;
38277
38278 return configure_kgdboc();
38279 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38280 if (config[len - 1] == '\n')
38281 config[len - 1] = '\0';
38282
38283 - if (configured == 1)
38284 + if (configured >= 1)
38285 cleanup_kgdboc();
38286
38287 /* Go and configure with the new params. */
38288 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38289 .post_exception = kgdboc_post_exp_handler,
38290 };
38291
38292 +static struct kgdb_io kgdboc_io_ops_console = {
38293 + .name = "kgdboc",
38294 + .read_char = kgdboc_get_char,
38295 + .write_char = kgdboc_put_char,
38296 + .pre_exception = kgdboc_pre_exp_handler,
38297 + .post_exception = kgdboc_post_exp_handler,
38298 + .is_console = 1
38299 +};
38300 +
38301 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38302 /* This is only available if kgdboc is a built in for early debugging */
38303 static int __init kgdboc_early_init(char *opt)
38304 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
38305 index 246b823..9e0db76 100644
38306 --- a/drivers/tty/serial/serial_core.c
38307 +++ b/drivers/tty/serial/serial_core.c
38308 @@ -1392,7 +1392,7 @@ static void uart_hangup(struct tty_struct *tty)
38309 uart_flush_buffer(tty);
38310 uart_shutdown(tty, state);
38311 spin_lock_irqsave(&port->lock, flags);
38312 - port->count = 0;
38313 + atomic_set(&port->count, 0);
38314 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
38315 spin_unlock_irqrestore(&port->lock, flags);
38316 tty_port_tty_set(port, NULL);
38317 @@ -1488,7 +1488,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38318 goto end;
38319 }
38320
38321 - port->count++;
38322 + atomic_inc(&port->count);
38323 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
38324 retval = -ENXIO;
38325 goto err_dec_count;
38326 @@ -1515,7 +1515,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38327 /*
38328 * Make sure the device is in D0 state.
38329 */
38330 - if (port->count == 1)
38331 + if (atomic_read(&port->count) == 1)
38332 uart_change_pm(state, 0);
38333
38334 /*
38335 @@ -1533,7 +1533,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38336 end:
38337 return retval;
38338 err_dec_count:
38339 - port->count--;
38340 + atomic_inc(&port->count);
38341 mutex_unlock(&port->mutex);
38342 goto end;
38343 }
38344 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
38345 index 593d40a..bdc61f3 100644
38346 --- a/drivers/tty/synclink.c
38347 +++ b/drivers/tty/synclink.c
38348 @@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
38349
38350 if (debug_level >= DEBUG_LEVEL_INFO)
38351 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
38352 - __FILE__,__LINE__, info->device_name, info->port.count);
38353 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
38354
38355 if (tty_port_close_start(&info->port, tty, filp) == 0)
38356 goto cleanup;
38357 @@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
38358 cleanup:
38359 if (debug_level >= DEBUG_LEVEL_INFO)
38360 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
38361 - tty->driver->name, info->port.count);
38362 + tty->driver->name, atomic_read(&info->port.count));
38363
38364 } /* end of mgsl_close() */
38365
38366 @@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
38367
38368 mgsl_flush_buffer(tty);
38369 shutdown(info);
38370 -
38371 - info->port.count = 0;
38372 +
38373 + atomic_set(&info->port.count, 0);
38374 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38375 info->port.tty = NULL;
38376
38377 @@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38378
38379 if (debug_level >= DEBUG_LEVEL_INFO)
38380 printk("%s(%d):block_til_ready before block on %s count=%d\n",
38381 - __FILE__,__LINE__, tty->driver->name, port->count );
38382 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38383
38384 spin_lock_irqsave(&info->irq_spinlock, flags);
38385 if (!tty_hung_up_p(filp)) {
38386 extra_count = true;
38387 - port->count--;
38388 + atomic_dec(&port->count);
38389 }
38390 spin_unlock_irqrestore(&info->irq_spinlock, flags);
38391 port->blocked_open++;
38392 @@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38393
38394 if (debug_level >= DEBUG_LEVEL_INFO)
38395 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
38396 - __FILE__,__LINE__, tty->driver->name, port->count );
38397 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38398
38399 tty_unlock();
38400 schedule();
38401 @@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38402
38403 /* FIXME: Racy on hangup during close wait */
38404 if (extra_count)
38405 - port->count++;
38406 + atomic_inc(&port->count);
38407 port->blocked_open--;
38408
38409 if (debug_level >= DEBUG_LEVEL_INFO)
38410 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
38411 - __FILE__,__LINE__, tty->driver->name, port->count );
38412 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38413
38414 if (!retval)
38415 port->flags |= ASYNC_NORMAL_ACTIVE;
38416 @@ -3398,7 +3398,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
38417
38418 if (debug_level >= DEBUG_LEVEL_INFO)
38419 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
38420 - __FILE__,__LINE__,tty->driver->name, info->port.count);
38421 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
38422
38423 /* If port is closing, signal caller to try again */
38424 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38425 @@ -3417,10 +3417,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
38426 spin_unlock_irqrestore(&info->netlock, flags);
38427 goto cleanup;
38428 }
38429 - info->port.count++;
38430 + atomic_inc(&info->port.count);
38431 spin_unlock_irqrestore(&info->netlock, flags);
38432
38433 - if (info->port.count == 1) {
38434 + if (atomic_read(&info->port.count) == 1) {
38435 /* 1st open on this device, init hardware */
38436 retval = startup(info);
38437 if (retval < 0)
38438 @@ -3444,8 +3444,8 @@ cleanup:
38439 if (retval) {
38440 if (tty->count == 1)
38441 info->port.tty = NULL; /* tty layer will release tty struct */
38442 - if(info->port.count)
38443 - info->port.count--;
38444 + if (atomic_read(&info->port.count))
38445 + atomic_dec(&info->port.count);
38446 }
38447
38448 return retval;
38449 @@ -7653,7 +7653,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38450 unsigned short new_crctype;
38451
38452 /* return error if TTY interface open */
38453 - if (info->port.count)
38454 + if (atomic_read(&info->port.count))
38455 return -EBUSY;
38456
38457 switch (encoding)
38458 @@ -7748,7 +7748,7 @@ static int hdlcdev_open(struct net_device *dev)
38459
38460 /* arbitrate between network and tty opens */
38461 spin_lock_irqsave(&info->netlock, flags);
38462 - if (info->port.count != 0 || info->netcount != 0) {
38463 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38464 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38465 spin_unlock_irqrestore(&info->netlock, flags);
38466 return -EBUSY;
38467 @@ -7834,7 +7834,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38468 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
38469
38470 /* return error if TTY interface open */
38471 - if (info->port.count)
38472 + if (atomic_read(&info->port.count))
38473 return -EBUSY;
38474
38475 if (cmd != SIOCWANDEV)
38476 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
38477 index aa1debf..9297a16 100644
38478 --- a/drivers/tty/synclink_gt.c
38479 +++ b/drivers/tty/synclink_gt.c
38480 @@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
38481 tty->driver_data = info;
38482 info->port.tty = tty;
38483
38484 - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
38485 + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
38486
38487 /* If port is closing, signal caller to try again */
38488 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38489 @@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
38490 mutex_unlock(&info->port.mutex);
38491 goto cleanup;
38492 }
38493 - info->port.count++;
38494 + atomic_inc(&info->port.count);
38495 spin_unlock_irqrestore(&info->netlock, flags);
38496
38497 - if (info->port.count == 1) {
38498 + if (atomic_read(&info->port.count) == 1) {
38499 /* 1st open on this device, init hardware */
38500 retval = startup(info);
38501 if (retval < 0) {
38502 @@ -716,8 +716,8 @@ cleanup:
38503 if (retval) {
38504 if (tty->count == 1)
38505 info->port.tty = NULL; /* tty layer will release tty struct */
38506 - if(info->port.count)
38507 - info->port.count--;
38508 + if(atomic_read(&info->port.count))
38509 + atomic_dec(&info->port.count);
38510 }
38511
38512 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
38513 @@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38514
38515 if (sanity_check(info, tty->name, "close"))
38516 return;
38517 - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
38518 + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
38519
38520 if (tty_port_close_start(&info->port, tty, filp) == 0)
38521 goto cleanup;
38522 @@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38523 tty_port_close_end(&info->port, tty);
38524 info->port.tty = NULL;
38525 cleanup:
38526 - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
38527 + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
38528 }
38529
38530 static void hangup(struct tty_struct *tty)
38531 @@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
38532 shutdown(info);
38533
38534 spin_lock_irqsave(&info->port.lock, flags);
38535 - info->port.count = 0;
38536 + atomic_set(&info->port.count, 0);
38537 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38538 info->port.tty = NULL;
38539 spin_unlock_irqrestore(&info->port.lock, flags);
38540 @@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38541 unsigned short new_crctype;
38542
38543 /* return error if TTY interface open */
38544 - if (info->port.count)
38545 + if (atomic_read(&info->port.count))
38546 return -EBUSY;
38547
38548 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
38549 @@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
38550
38551 /* arbitrate between network and tty opens */
38552 spin_lock_irqsave(&info->netlock, flags);
38553 - if (info->port.count != 0 || info->netcount != 0) {
38554 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38555 DBGINFO(("%s hdlc_open busy\n", dev->name));
38556 spin_unlock_irqrestore(&info->netlock, flags);
38557 return -EBUSY;
38558 @@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38559 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
38560
38561 /* return error if TTY interface open */
38562 - if (info->port.count)
38563 + if (atomic_read(&info->port.count))
38564 return -EBUSY;
38565
38566 if (cmd != SIOCWANDEV)
38567 @@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
38568 if (port == NULL)
38569 continue;
38570 spin_lock(&port->lock);
38571 - if ((port->port.count || port->netcount) &&
38572 + if ((atomic_read(&port->port.count) || port->netcount) &&
38573 port->pending_bh && !port->bh_running &&
38574 !port->bh_requested) {
38575 DBGISR(("%s bh queued\n", port->device_name));
38576 @@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38577 spin_lock_irqsave(&info->lock, flags);
38578 if (!tty_hung_up_p(filp)) {
38579 extra_count = true;
38580 - port->count--;
38581 + atomic_dec(&port->count);
38582 }
38583 spin_unlock_irqrestore(&info->lock, flags);
38584 port->blocked_open++;
38585 @@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38586 remove_wait_queue(&port->open_wait, &wait);
38587
38588 if (extra_count)
38589 - port->count++;
38590 + atomic_inc(&port->count);
38591 port->blocked_open--;
38592
38593 if (!retval)
38594 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
38595 index a3dddc1..8905ab2 100644
38596 --- a/drivers/tty/synclinkmp.c
38597 +++ b/drivers/tty/synclinkmp.c
38598 @@ -742,7 +742,7 @@ static int open(struct tty_struct *tty, struct file *filp)
38599
38600 if (debug_level >= DEBUG_LEVEL_INFO)
38601 printk("%s(%d):%s open(), old ref count = %d\n",
38602 - __FILE__,__LINE__,tty->driver->name, info->port.count);
38603 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
38604
38605 /* If port is closing, signal caller to try again */
38606 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38607 @@ -761,10 +761,10 @@ static int open(struct tty_struct *tty, struct file *filp)
38608 spin_unlock_irqrestore(&info->netlock, flags);
38609 goto cleanup;
38610 }
38611 - info->port.count++;
38612 + atomic_inc(&info->port.count);
38613 spin_unlock_irqrestore(&info->netlock, flags);
38614
38615 - if (info->port.count == 1) {
38616 + if (atomic_read(&info->port.count) == 1) {
38617 /* 1st open on this device, init hardware */
38618 retval = startup(info);
38619 if (retval < 0)
38620 @@ -788,8 +788,8 @@ cleanup:
38621 if (retval) {
38622 if (tty->count == 1)
38623 info->port.tty = NULL; /* tty layer will release tty struct */
38624 - if(info->port.count)
38625 - info->port.count--;
38626 + if(atomic_read(&info->port.count))
38627 + atomic_dec(&info->port.count);
38628 }
38629
38630 return retval;
38631 @@ -807,7 +807,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38632
38633 if (debug_level >= DEBUG_LEVEL_INFO)
38634 printk("%s(%d):%s close() entry, count=%d\n",
38635 - __FILE__,__LINE__, info->device_name, info->port.count);
38636 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
38637
38638 if (tty_port_close_start(&info->port, tty, filp) == 0)
38639 goto cleanup;
38640 @@ -826,7 +826,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38641 cleanup:
38642 if (debug_level >= DEBUG_LEVEL_INFO)
38643 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
38644 - tty->driver->name, info->port.count);
38645 + tty->driver->name, atomic_read(&info->port.count));
38646 }
38647
38648 /* Called by tty_hangup() when a hangup is signaled.
38649 @@ -849,7 +849,7 @@ static void hangup(struct tty_struct *tty)
38650 shutdown(info);
38651
38652 spin_lock_irqsave(&info->port.lock, flags);
38653 - info->port.count = 0;
38654 + atomic_set(&info->port.count, 0);
38655 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38656 info->port.tty = NULL;
38657 spin_unlock_irqrestore(&info->port.lock, flags);
38658 @@ -1557,7 +1557,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38659 unsigned short new_crctype;
38660
38661 /* return error if TTY interface open */
38662 - if (info->port.count)
38663 + if (atomic_read(&info->port.count))
38664 return -EBUSY;
38665
38666 switch (encoding)
38667 @@ -1652,7 +1652,7 @@ static int hdlcdev_open(struct net_device *dev)
38668
38669 /* arbitrate between network and tty opens */
38670 spin_lock_irqsave(&info->netlock, flags);
38671 - if (info->port.count != 0 || info->netcount != 0) {
38672 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38673 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38674 spin_unlock_irqrestore(&info->netlock, flags);
38675 return -EBUSY;
38676 @@ -1738,7 +1738,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38677 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
38678
38679 /* return error if TTY interface open */
38680 - if (info->port.count)
38681 + if (atomic_read(&info->port.count))
38682 return -EBUSY;
38683
38684 if (cmd != SIOCWANDEV)
38685 @@ -2623,7 +2623,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
38686 * do not request bottom half processing if the
38687 * device is not open in a normal mode.
38688 */
38689 - if ( port && (port->port.count || port->netcount) &&
38690 + if ( port && (atomic_read(&port->port.count) || port->netcount) &&
38691 port->pending_bh && !port->bh_running &&
38692 !port->bh_requested ) {
38693 if ( debug_level >= DEBUG_LEVEL_ISR )
38694 @@ -3321,12 +3321,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38695
38696 if (debug_level >= DEBUG_LEVEL_INFO)
38697 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
38698 - __FILE__,__LINE__, tty->driver->name, port->count );
38699 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38700
38701 spin_lock_irqsave(&info->lock, flags);
38702 if (!tty_hung_up_p(filp)) {
38703 extra_count = true;
38704 - port->count--;
38705 + atomic_dec(&port->count);
38706 }
38707 spin_unlock_irqrestore(&info->lock, flags);
38708 port->blocked_open++;
38709 @@ -3355,7 +3355,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38710
38711 if (debug_level >= DEBUG_LEVEL_INFO)
38712 printk("%s(%d):%s block_til_ready() count=%d\n",
38713 - __FILE__,__LINE__, tty->driver->name, port->count );
38714 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38715
38716 tty_unlock();
38717 schedule();
38718 @@ -3366,12 +3366,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38719 remove_wait_queue(&port->open_wait, &wait);
38720
38721 if (extra_count)
38722 - port->count++;
38723 + atomic_inc(&port->count);
38724 port->blocked_open--;
38725
38726 if (debug_level >= DEBUG_LEVEL_INFO)
38727 printk("%s(%d):%s block_til_ready() after, count=%d\n",
38728 - __FILE__,__LINE__, tty->driver->name, port->count );
38729 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38730
38731 if (!retval)
38732 port->flags |= ASYNC_NORMAL_ACTIVE;
38733 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38734 index 05728894..b9d44c6 100644
38735 --- a/drivers/tty/sysrq.c
38736 +++ b/drivers/tty/sysrq.c
38737 @@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38738 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38739 size_t count, loff_t *ppos)
38740 {
38741 - if (count) {
38742 + if (count && capable(CAP_SYS_ADMIN)) {
38743 char c;
38744
38745 if (get_user(c, buf))
38746 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38747 index b425c79..08a3f06 100644
38748 --- a/drivers/tty/tty_io.c
38749 +++ b/drivers/tty/tty_io.c
38750 @@ -3283,7 +3283,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38751
38752 void tty_default_fops(struct file_operations *fops)
38753 {
38754 - *fops = tty_fops;
38755 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38756 }
38757
38758 /*
38759 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38760 index 9911eb6..5abe0e1 100644
38761 --- a/drivers/tty/tty_ldisc.c
38762 +++ b/drivers/tty/tty_ldisc.c
38763 @@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38764 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38765 struct tty_ldisc_ops *ldo = ld->ops;
38766
38767 - ldo->refcount--;
38768 + atomic_dec(&ldo->refcount);
38769 module_put(ldo->owner);
38770 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38771
38772 @@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38773 spin_lock_irqsave(&tty_ldisc_lock, flags);
38774 tty_ldiscs[disc] = new_ldisc;
38775 new_ldisc->num = disc;
38776 - new_ldisc->refcount = 0;
38777 + atomic_set(&new_ldisc->refcount, 0);
38778 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38779
38780 return ret;
38781 @@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
38782 return -EINVAL;
38783
38784 spin_lock_irqsave(&tty_ldisc_lock, flags);
38785 - if (tty_ldiscs[disc]->refcount)
38786 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38787 ret = -EBUSY;
38788 else
38789 tty_ldiscs[disc] = NULL;
38790 @@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38791 if (ldops) {
38792 ret = ERR_PTR(-EAGAIN);
38793 if (try_module_get(ldops->owner)) {
38794 - ldops->refcount++;
38795 + atomic_inc(&ldops->refcount);
38796 ret = ldops;
38797 }
38798 }
38799 @@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38800 unsigned long flags;
38801
38802 spin_lock_irqsave(&tty_ldisc_lock, flags);
38803 - ldops->refcount--;
38804 + atomic_dec(&ldops->refcount);
38805 module_put(ldops->owner);
38806 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38807 }
38808 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
38809 index bf6e238..d401c04 100644
38810 --- a/drivers/tty/tty_port.c
38811 +++ b/drivers/tty/tty_port.c
38812 @@ -138,7 +138,7 @@ void tty_port_hangup(struct tty_port *port)
38813 unsigned long flags;
38814
38815 spin_lock_irqsave(&port->lock, flags);
38816 - port->count = 0;
38817 + atomic_set(&port->count, 0);
38818 port->flags &= ~ASYNC_NORMAL_ACTIVE;
38819 if (port->tty) {
38820 set_bit(TTY_IO_ERROR, &port->tty->flags);
38821 @@ -264,7 +264,7 @@ int tty_port_block_til_ready(struct tty_port *port,
38822 /* The port lock protects the port counts */
38823 spin_lock_irqsave(&port->lock, flags);
38824 if (!tty_hung_up_p(filp))
38825 - port->count--;
38826 + atomic_dec(&port->count);
38827 port->blocked_open++;
38828 spin_unlock_irqrestore(&port->lock, flags);
38829
38830 @@ -306,7 +306,7 @@ int tty_port_block_til_ready(struct tty_port *port,
38831 we must not mess that up further */
38832 spin_lock_irqsave(&port->lock, flags);
38833 if (!tty_hung_up_p(filp))
38834 - port->count++;
38835 + atomic_inc(&port->count);
38836 port->blocked_open--;
38837 if (retval == 0)
38838 port->flags |= ASYNC_NORMAL_ACTIVE;
38839 @@ -326,19 +326,19 @@ int tty_port_close_start(struct tty_port *port,
38840 return 0;
38841 }
38842
38843 - if (tty->count == 1 && port->count != 1) {
38844 + if (tty->count == 1 && atomic_read(&port->count) != 1) {
38845 printk(KERN_WARNING
38846 "tty_port_close_start: tty->count = 1 port count = %d.\n",
38847 - port->count);
38848 - port->count = 1;
38849 + atomic_read(&port->count));
38850 + atomic_set(&port->count, 1);
38851 }
38852 - if (--port->count < 0) {
38853 + if (atomic_dec_return(&port->count) < 0) {
38854 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
38855 - port->count);
38856 - port->count = 0;
38857 + atomic_read(&port->count));
38858 + atomic_set(&port->count, 0);
38859 }
38860
38861 - if (port->count) {
38862 + if (atomic_read(&port->count)) {
38863 spin_unlock_irqrestore(&port->lock, flags);
38864 if (port->ops->drop)
38865 port->ops->drop(port);
38866 @@ -418,7 +418,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
38867 {
38868 spin_lock_irq(&port->lock);
38869 if (!tty_hung_up_p(filp))
38870 - ++port->count;
38871 + atomic_inc(&port->count);
38872 spin_unlock_irq(&port->lock);
38873 tty_port_tty_set(port, tty);
38874
38875 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38876 index 48cc6f2..85584dd 100644
38877 --- a/drivers/tty/vt/keyboard.c
38878 +++ b/drivers/tty/vt/keyboard.c
38879 @@ -659,6 +659,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38880 kbd->kbdmode == VC_OFF) &&
38881 value != KVAL(K_SAK))
38882 return; /* SAK is allowed even in raw mode */
38883 +
38884 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38885 + {
38886 + void *func = fn_handler[value];
38887 + if (func == fn_show_state || func == fn_show_ptregs ||
38888 + func == fn_show_mem)
38889 + return;
38890 + }
38891 +#endif
38892 +
38893 fn_handler[value](vc);
38894 }
38895
38896 @@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38897 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38898 return -EFAULT;
38899
38900 - if (!capable(CAP_SYS_TTY_CONFIG))
38901 - perm = 0;
38902 -
38903 switch (cmd) {
38904 case KDGKBENT:
38905 /* Ensure another thread doesn't free it under us */
38906 @@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38907 spin_unlock_irqrestore(&kbd_event_lock, flags);
38908 return put_user(val, &user_kbe->kb_value);
38909 case KDSKBENT:
38910 + if (!capable(CAP_SYS_TTY_CONFIG))
38911 + perm = 0;
38912 +
38913 if (!perm)
38914 return -EPERM;
38915 if (!i && v == K_NOSUCHMAP) {
38916 @@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38917 int i, j, k;
38918 int ret;
38919
38920 - if (!capable(CAP_SYS_TTY_CONFIG))
38921 - perm = 0;
38922 -
38923 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38924 if (!kbs) {
38925 ret = -ENOMEM;
38926 @@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38927 kfree(kbs);
38928 return ((p && *p) ? -EOVERFLOW : 0);
38929 case KDSKBSENT:
38930 + if (!capable(CAP_SYS_TTY_CONFIG))
38931 + perm = 0;
38932 +
38933 if (!perm) {
38934 ret = -EPERM;
38935 goto reterr;
38936 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38937 index a783d53..cb30d94 100644
38938 --- a/drivers/uio/uio.c
38939 +++ b/drivers/uio/uio.c
38940 @@ -25,6 +25,7 @@
38941 #include <linux/kobject.h>
38942 #include <linux/cdev.h>
38943 #include <linux/uio_driver.h>
38944 +#include <asm/local.h>
38945
38946 #define UIO_MAX_DEVICES (1U << MINORBITS)
38947
38948 @@ -32,10 +33,10 @@ struct uio_device {
38949 struct module *owner;
38950 struct device *dev;
38951 int minor;
38952 - atomic_t event;
38953 + atomic_unchecked_t event;
38954 struct fasync_struct *async_queue;
38955 wait_queue_head_t wait;
38956 - int vma_count;
38957 + local_t vma_count;
38958 struct uio_info *info;
38959 struct kobject *map_dir;
38960 struct kobject *portio_dir;
38961 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38962 struct device_attribute *attr, char *buf)
38963 {
38964 struct uio_device *idev = dev_get_drvdata(dev);
38965 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38966 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38967 }
38968
38969 static struct device_attribute uio_class_attributes[] = {
38970 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38971 {
38972 struct uio_device *idev = info->uio_dev;
38973
38974 - atomic_inc(&idev->event);
38975 + atomic_inc_unchecked(&idev->event);
38976 wake_up_interruptible(&idev->wait);
38977 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38978 }
38979 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38980 }
38981
38982 listener->dev = idev;
38983 - listener->event_count = atomic_read(&idev->event);
38984 + listener->event_count = atomic_read_unchecked(&idev->event);
38985 filep->private_data = listener;
38986
38987 if (idev->info->open) {
38988 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38989 return -EIO;
38990
38991 poll_wait(filep, &idev->wait, wait);
38992 - if (listener->event_count != atomic_read(&idev->event))
38993 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38994 return POLLIN | POLLRDNORM;
38995 return 0;
38996 }
38997 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38998 do {
38999 set_current_state(TASK_INTERRUPTIBLE);
39000
39001 - event_count = atomic_read(&idev->event);
39002 + event_count = atomic_read_unchecked(&idev->event);
39003 if (event_count != listener->event_count) {
39004 if (copy_to_user(buf, &event_count, count))
39005 retval = -EFAULT;
39006 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
39007 static void uio_vma_open(struct vm_area_struct *vma)
39008 {
39009 struct uio_device *idev = vma->vm_private_data;
39010 - idev->vma_count++;
39011 + local_inc(&idev->vma_count);
39012 }
39013
39014 static void uio_vma_close(struct vm_area_struct *vma)
39015 {
39016 struct uio_device *idev = vma->vm_private_data;
39017 - idev->vma_count--;
39018 + local_dec(&idev->vma_count);
39019 }
39020
39021 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39022 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
39023 idev->owner = owner;
39024 idev->info = info;
39025 init_waitqueue_head(&idev->wait);
39026 - atomic_set(&idev->event, 0);
39027 + atomic_set_unchecked(&idev->event, 0);
39028
39029 ret = uio_get_minor(idev);
39030 if (ret)
39031 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
39032 index b7eb86a..36d28af 100644
39033 --- a/drivers/usb/atm/cxacru.c
39034 +++ b/drivers/usb/atm/cxacru.c
39035 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
39036 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
39037 if (ret < 2)
39038 return -EINVAL;
39039 - if (index < 0 || index > 0x7f)
39040 + if (index > 0x7f)
39041 return -EINVAL;
39042 pos += tmp;
39043
39044 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
39045 index ee62b35..b663594 100644
39046 --- a/drivers/usb/atm/usbatm.c
39047 +++ b/drivers/usb/atm/usbatm.c
39048 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39049 if (printk_ratelimit())
39050 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
39051 __func__, vpi, vci);
39052 - atomic_inc(&vcc->stats->rx_err);
39053 + atomic_inc_unchecked(&vcc->stats->rx_err);
39054 return;
39055 }
39056
39057 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39058 if (length > ATM_MAX_AAL5_PDU) {
39059 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
39060 __func__, length, vcc);
39061 - atomic_inc(&vcc->stats->rx_err);
39062 + atomic_inc_unchecked(&vcc->stats->rx_err);
39063 goto out;
39064 }
39065
39066 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39067 if (sarb->len < pdu_length) {
39068 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
39069 __func__, pdu_length, sarb->len, vcc);
39070 - atomic_inc(&vcc->stats->rx_err);
39071 + atomic_inc_unchecked(&vcc->stats->rx_err);
39072 goto out;
39073 }
39074
39075 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
39076 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
39077 __func__, vcc);
39078 - atomic_inc(&vcc->stats->rx_err);
39079 + atomic_inc_unchecked(&vcc->stats->rx_err);
39080 goto out;
39081 }
39082
39083 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39084 if (printk_ratelimit())
39085 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
39086 __func__, length);
39087 - atomic_inc(&vcc->stats->rx_drop);
39088 + atomic_inc_unchecked(&vcc->stats->rx_drop);
39089 goto out;
39090 }
39091
39092 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39093
39094 vcc->push(vcc, skb);
39095
39096 - atomic_inc(&vcc->stats->rx);
39097 + atomic_inc_unchecked(&vcc->stats->rx);
39098 out:
39099 skb_trim(sarb, 0);
39100 }
39101 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
39102 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
39103
39104 usbatm_pop(vcc, skb);
39105 - atomic_inc(&vcc->stats->tx);
39106 + atomic_inc_unchecked(&vcc->stats->tx);
39107
39108 skb = skb_dequeue(&instance->sndqueue);
39109 }
39110 @@ -770,11 +770,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
39111 if (!left--)
39112 return sprintf(page,
39113 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
39114 - atomic_read(&atm_dev->stats.aal5.tx),
39115 - atomic_read(&atm_dev->stats.aal5.tx_err),
39116 - atomic_read(&atm_dev->stats.aal5.rx),
39117 - atomic_read(&atm_dev->stats.aal5.rx_err),
39118 - atomic_read(&atm_dev->stats.aal5.rx_drop));
39119 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
39120 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
39121 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
39122 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
39123 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
39124
39125 if (!left--) {
39126 if (instance->disconnected)
39127 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
39128 index d956965..4179a77 100644
39129 --- a/drivers/usb/core/devices.c
39130 +++ b/drivers/usb/core/devices.c
39131 @@ -126,7 +126,7 @@ static const char format_endpt[] =
39132 * time it gets called.
39133 */
39134 static struct device_connect_event {
39135 - atomic_t count;
39136 + atomic_unchecked_t count;
39137 wait_queue_head_t wait;
39138 } device_event = {
39139 .count = ATOMIC_INIT(1),
39140 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
39141
39142 void usbfs_conn_disc_event(void)
39143 {
39144 - atomic_add(2, &device_event.count);
39145 + atomic_add_unchecked(2, &device_event.count);
39146 wake_up(&device_event.wait);
39147 }
39148
39149 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
39150
39151 poll_wait(file, &device_event.wait, wait);
39152
39153 - event_count = atomic_read(&device_event.count);
39154 + event_count = atomic_read_unchecked(&device_event.count);
39155 if (file->f_version != event_count) {
39156 file->f_version = event_count;
39157 return POLLIN | POLLRDNORM;
39158 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
39159 index 347bb05..63e1b73 100644
39160 --- a/drivers/usb/early/ehci-dbgp.c
39161 +++ b/drivers/usb/early/ehci-dbgp.c
39162 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
39163
39164 #ifdef CONFIG_KGDB
39165 static struct kgdb_io kgdbdbgp_io_ops;
39166 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
39167 +static struct kgdb_io kgdbdbgp_io_ops_console;
39168 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
39169 #else
39170 #define dbgp_kgdb_mode (0)
39171 #endif
39172 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
39173 .write_char = kgdbdbgp_write_char,
39174 };
39175
39176 +static struct kgdb_io kgdbdbgp_io_ops_console = {
39177 + .name = "kgdbdbgp",
39178 + .read_char = kgdbdbgp_read_char,
39179 + .write_char = kgdbdbgp_write_char,
39180 + .is_console = 1
39181 +};
39182 +
39183 static int kgdbdbgp_wait_time;
39184
39185 static int __init kgdbdbgp_parse_config(char *str)
39186 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
39187 ptr++;
39188 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
39189 }
39190 - kgdb_register_io_module(&kgdbdbgp_io_ops);
39191 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
39192 + if (early_dbgp_console.index != -1)
39193 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
39194 + else
39195 + kgdb_register_io_module(&kgdbdbgp_io_ops);
39196
39197 return 0;
39198 }
39199 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
39200 index 5b3f5ff..6e00893 100644
39201 --- a/drivers/usb/gadget/u_serial.c
39202 +++ b/drivers/usb/gadget/u_serial.c
39203 @@ -731,9 +731,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
39204 spin_lock_irq(&port->port_lock);
39205
39206 /* already open? Great. */
39207 - if (port->port.count) {
39208 + if (atomic_read(&port->port.count)) {
39209 status = 0;
39210 - port->port.count++;
39211 + atomic_inc(&port->port.count);
39212
39213 /* currently opening/closing? wait ... */
39214 } else if (port->openclose) {
39215 @@ -792,7 +792,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
39216 tty->driver_data = port;
39217 port->port.tty = tty;
39218
39219 - port->port.count = 1;
39220 + atomic_set(&port->port.count, 1);
39221 port->openclose = false;
39222
39223 /* if connected, start the I/O stream */
39224 @@ -834,11 +834,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
39225
39226 spin_lock_irq(&port->port_lock);
39227
39228 - if (port->port.count != 1) {
39229 - if (port->port.count == 0)
39230 + if (atomic_read(&port->port.count) != 1) {
39231 + if (atomic_read(&port->port.count) == 0)
39232 WARN_ON(1);
39233 else
39234 - --port->port.count;
39235 + atomic_dec(&port->port.count);
39236 goto exit;
39237 }
39238
39239 @@ -848,7 +848,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
39240 * and sleep if necessary
39241 */
39242 port->openclose = true;
39243 - port->port.count = 0;
39244 + atomic_set(&port->port.count, 0);
39245
39246 gser = port->port_usb;
39247 if (gser && gser->disconnect)
39248 @@ -1152,7 +1152,7 @@ static int gs_closed(struct gs_port *port)
39249 int cond;
39250
39251 spin_lock_irq(&port->port_lock);
39252 - cond = (port->port.count == 0) && !port->openclose;
39253 + cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
39254 spin_unlock_irq(&port->port_lock);
39255 return cond;
39256 }
39257 @@ -1265,7 +1265,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
39258 /* if it's already open, start I/O ... and notify the serial
39259 * protocol about open/close status (connect/disconnect).
39260 */
39261 - if (port->port.count) {
39262 + if (atomic_read(&port->port.count)) {
39263 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
39264 gs_start_io(port);
39265 if (gser->connect)
39266 @@ -1312,7 +1312,7 @@ void gserial_disconnect(struct gserial *gser)
39267
39268 port->port_usb = NULL;
39269 gser->ioport = NULL;
39270 - if (port->port.count > 0 || port->openclose) {
39271 + if (atomic_read(&port->port.count) > 0 || port->openclose) {
39272 wake_up_interruptible(&port->drain_wait);
39273 if (port->port.tty)
39274 tty_hangup(port->port.tty);
39275 @@ -1328,7 +1328,7 @@ void gserial_disconnect(struct gserial *gser)
39276
39277 /* finally, free any unused/unusable I/O buffers */
39278 spin_lock_irqsave(&port->port_lock, flags);
39279 - if (port->port.count == 0 && !port->openclose)
39280 + if (atomic_read(&port->port.count) == 0 && !port->openclose)
39281 gs_buf_free(&port->port_write_buf);
39282 gs_free_requests(gser->out, &port->read_pool, NULL);
39283 gs_free_requests(gser->out, &port->read_queue, NULL);
39284 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
39285 index b9cca6d..75c75df 100644
39286 --- a/drivers/usb/serial/console.c
39287 +++ b/drivers/usb/serial/console.c
39288 @@ -127,7 +127,7 @@ static int usb_console_setup(struct console *co, char *options)
39289
39290 info->port = port;
39291
39292 - ++port->port.count;
39293 + atomic_inc(&port->port.count);
39294 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
39295 if (serial->type->set_termios) {
39296 /*
39297 @@ -177,7 +177,7 @@ static int usb_console_setup(struct console *co, char *options)
39298 }
39299 /* Now that any required fake tty operations are completed restore
39300 * the tty port count */
39301 - --port->port.count;
39302 + atomic_dec(&port->port.count);
39303 /* The console is special in terms of closing the device so
39304 * indicate this port is now acting as a system console. */
39305 port->port.console = 1;
39306 @@ -190,7 +190,7 @@ static int usb_console_setup(struct console *co, char *options)
39307 free_tty:
39308 kfree(tty);
39309 reset_open_count:
39310 - port->port.count = 0;
39311 + atomic_set(&port->port.count, 0);
39312 usb_autopm_put_interface(serial->interface);
39313 error_get_interface:
39314 usb_serial_put(serial);
39315 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
39316 index d6bea3e..60b250e 100644
39317 --- a/drivers/usb/wusbcore/wa-hc.h
39318 +++ b/drivers/usb/wusbcore/wa-hc.h
39319 @@ -192,7 +192,7 @@ struct wahc {
39320 struct list_head xfer_delayed_list;
39321 spinlock_t xfer_list_lock;
39322 struct work_struct xfer_work;
39323 - atomic_t xfer_id_count;
39324 + atomic_unchecked_t xfer_id_count;
39325 };
39326
39327
39328 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
39329 INIT_LIST_HEAD(&wa->xfer_delayed_list);
39330 spin_lock_init(&wa->xfer_list_lock);
39331 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
39332 - atomic_set(&wa->xfer_id_count, 1);
39333 + atomic_set_unchecked(&wa->xfer_id_count, 1);
39334 }
39335
39336 /**
39337 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
39338 index 57c01ab..8a05959 100644
39339 --- a/drivers/usb/wusbcore/wa-xfer.c
39340 +++ b/drivers/usb/wusbcore/wa-xfer.c
39341 @@ -296,7 +296,7 @@ out:
39342 */
39343 static void wa_xfer_id_init(struct wa_xfer *xfer)
39344 {
39345 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
39346 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
39347 }
39348
39349 /*
39350 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
39351 index 112156f..eb81154 100644
39352 --- a/drivers/vhost/vhost.c
39353 +++ b/drivers/vhost/vhost.c
39354 @@ -635,7 +635,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
39355 return 0;
39356 }
39357
39358 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
39359 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
39360 {
39361 struct file *eventfp, *filep = NULL,
39362 *pollstart = NULL, *pollstop = NULL;
39363 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
39364 index b0b2ac3..89a4399 100644
39365 --- a/drivers/video/aty/aty128fb.c
39366 +++ b/drivers/video/aty/aty128fb.c
39367 @@ -148,7 +148,7 @@ enum {
39368 };
39369
39370 /* Must match above enum */
39371 -static const char *r128_family[] __devinitdata = {
39372 +static const char *r128_family[] __devinitconst = {
39373 "AGP",
39374 "PCI",
39375 "PRO AGP",
39376 diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
39377 index 2e471c2..f00eb3e 100644
39378 --- a/drivers/video/console/fbcon.c
39379 +++ b/drivers/video/console/fbcon.c
39380 @@ -442,7 +442,7 @@ static int __init fb_console_setup(char *this_opt)
39381
39382 while ((options = strsep(&this_opt, ",")) != NULL) {
39383 if (!strncmp(options, "font:", 5))
39384 - strcpy(fontname, options + 5);
39385 + strlcpy(fontname, options + 5, sizeof(fontname));
39386
39387 if (!strncmp(options, "scrollback:", 11)) {
39388 options += 11;
39389 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
39390 index 5c3960d..15cf8fc 100644
39391 --- a/drivers/video/fbcmap.c
39392 +++ b/drivers/video/fbcmap.c
39393 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
39394 rc = -ENODEV;
39395 goto out;
39396 }
39397 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39398 - !info->fbops->fb_setcmap)) {
39399 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39400 rc = -EINVAL;
39401 goto out1;
39402 }
39403 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
39404 index 0dff12a..2ef47b3 100644
39405 --- a/drivers/video/fbmem.c
39406 +++ b/drivers/video/fbmem.c
39407 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39408 image->dx += image->width + 8;
39409 }
39410 } else if (rotate == FB_ROTATE_UD) {
39411 - for (x = 0; x < num && image->dx >= 0; x++) {
39412 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39413 info->fbops->fb_imageblit(info, image);
39414 image->dx -= image->width + 8;
39415 }
39416 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39417 image->dy += image->height + 8;
39418 }
39419 } else if (rotate == FB_ROTATE_CCW) {
39420 - for (x = 0; x < num && image->dy >= 0; x++) {
39421 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39422 info->fbops->fb_imageblit(info, image);
39423 image->dy -= image->height + 8;
39424 }
39425 @@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
39426 return -EFAULT;
39427 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39428 return -EINVAL;
39429 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39430 + if (con2fb.framebuffer >= FB_MAX)
39431 return -EINVAL;
39432 if (!registered_fb[con2fb.framebuffer])
39433 request_module("fb%d", con2fb.framebuffer);
39434 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
39435 index 5a5d092..265c5ed 100644
39436 --- a/drivers/video/geode/gx1fb_core.c
39437 +++ b/drivers/video/geode/gx1fb_core.c
39438 @@ -29,7 +29,7 @@ static int crt_option = 1;
39439 static char panel_option[32] = "";
39440
39441 /* Modes relevant to the GX1 (taken from modedb.c) */
39442 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
39443 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
39444 /* 640x480-60 VESA */
39445 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
39446 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
39447 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
39448 index 0fad23f..0e9afa4 100644
39449 --- a/drivers/video/gxt4500.c
39450 +++ b/drivers/video/gxt4500.c
39451 @@ -156,7 +156,7 @@ struct gxt4500_par {
39452 static char *mode_option;
39453
39454 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
39455 -static const struct fb_videomode defaultmode __devinitdata = {
39456 +static const struct fb_videomode defaultmode __devinitconst = {
39457 .refresh = 60,
39458 .xres = 1280,
39459 .yres = 1024,
39460 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
39461 return 0;
39462 }
39463
39464 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
39465 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
39466 .id = "IBM GXT4500P",
39467 .type = FB_TYPE_PACKED_PIXELS,
39468 .visual = FB_VISUAL_PSEUDOCOLOR,
39469 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
39470 index 7672d2e..b56437f 100644
39471 --- a/drivers/video/i810/i810_accel.c
39472 +++ b/drivers/video/i810/i810_accel.c
39473 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
39474 }
39475 }
39476 printk("ringbuffer lockup!!!\n");
39477 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39478 i810_report_error(mmio);
39479 par->dev_flags |= LOCKUP;
39480 info->pixmap.scan_align = 1;
39481 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
39482 index b83f361..2b05a91 100644
39483 --- a/drivers/video/i810/i810_main.c
39484 +++ b/drivers/video/i810/i810_main.c
39485 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
39486 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
39487
39488 /* PCI */
39489 -static const char *i810_pci_list[] __devinitdata = {
39490 +static const char *i810_pci_list[] __devinitconst = {
39491 "Intel(R) 810 Framebuffer Device" ,
39492 "Intel(R) 810-DC100 Framebuffer Device" ,
39493 "Intel(R) 810E Framebuffer Device" ,
39494 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
39495 index de36693..3c63fc2 100644
39496 --- a/drivers/video/jz4740_fb.c
39497 +++ b/drivers/video/jz4740_fb.c
39498 @@ -136,7 +136,7 @@ struct jzfb {
39499 uint32_t pseudo_palette[16];
39500 };
39501
39502 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
39503 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
39504 .id = "JZ4740 FB",
39505 .type = FB_TYPE_PACKED_PIXELS,
39506 .visual = FB_VISUAL_TRUECOLOR,
39507 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
39508 index 3c14e43..eafa544 100644
39509 --- a/drivers/video/logo/logo_linux_clut224.ppm
39510 +++ b/drivers/video/logo/logo_linux_clut224.ppm
39511 @@ -1,1604 +1,1123 @@
39512 P3
39513 -# Standard 224-color Linux logo
39514 80 80
39515 255
39516 - 0 0 0 0 0 0 0 0 0 0 0 0
39517 - 0 0 0 0 0 0 0 0 0 0 0 0
39518 - 0 0 0 0 0 0 0 0 0 0 0 0
39519 - 0 0 0 0 0 0 0 0 0 0 0 0
39520 - 0 0 0 0 0 0 0 0 0 0 0 0
39521 - 0 0 0 0 0 0 0 0 0 0 0 0
39522 - 0 0 0 0 0 0 0 0 0 0 0 0
39523 - 0 0 0 0 0 0 0 0 0 0 0 0
39524 - 0 0 0 0 0 0 0 0 0 0 0 0
39525 - 6 6 6 6 6 6 10 10 10 10 10 10
39526 - 10 10 10 6 6 6 6 6 6 6 6 6
39527 - 0 0 0 0 0 0 0 0 0 0 0 0
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 0 0 0 0 0 0
39531 - 0 0 0 0 0 0 0 0 0 0 0 0
39532 - 0 0 0 0 0 0 0 0 0 0 0 0
39533 - 0 0 0 0 0 0 0 0 0 0 0 0
39534 - 0 0 0 0 0 0 0 0 0 0 0 0
39535 - 0 0 0 0 0 0 0 0 0 0 0 0
39536 - 0 0 0 0 0 0 0 0 0 0 0 0
39537 - 0 0 0 0 0 0 0 0 0 0 0 0
39538 - 0 0 0 0 0 0 0 0 0 0 0 0
39539 - 0 0 0 0 0 0 0 0 0 0 0 0
39540 - 0 0 0 0 0 0 0 0 0 0 0 0
39541 - 0 0 0 0 0 0 0 0 0 0 0 0
39542 - 0 0 0 0 0 0 0 0 0 0 0 0
39543 - 0 0 0 0 0 0 0 0 0 0 0 0
39544 - 0 0 0 6 6 6 10 10 10 14 14 14
39545 - 22 22 22 26 26 26 30 30 30 34 34 34
39546 - 30 30 30 30 30 30 26 26 26 18 18 18
39547 - 14 14 14 10 10 10 6 6 6 0 0 0
39548 - 0 0 0 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 0 0 0 0 0 0
39551 - 0 0 0 0 0 0 0 0 0 0 0 0
39552 - 0 0 0 0 0 0 0 0 0 0 0 0
39553 - 0 0 0 0 0 0 0 0 0 0 0 0
39554 - 0 0 0 0 0 0 0 0 0 0 0 0
39555 - 0 0 0 0 0 0 0 0 0 0 0 0
39556 - 0 0 0 0 0 0 0 0 0 0 0 0
39557 - 0 0 0 0 0 1 0 0 1 0 0 0
39558 - 0 0 0 0 0 0 0 0 0 0 0 0
39559 - 0 0 0 0 0 0 0 0 0 0 0 0
39560 - 0 0 0 0 0 0 0 0 0 0 0 0
39561 - 0 0 0 0 0 0 0 0 0 0 0 0
39562 - 0 0 0 0 0 0 0 0 0 0 0 0
39563 - 0 0 0 0 0 0 0 0 0 0 0 0
39564 - 6 6 6 14 14 14 26 26 26 42 42 42
39565 - 54 54 54 66 66 66 78 78 78 78 78 78
39566 - 78 78 78 74 74 74 66 66 66 54 54 54
39567 - 42 42 42 26 26 26 18 18 18 10 10 10
39568 - 6 6 6 0 0 0 0 0 0 0 0 0
39569 - 0 0 0 0 0 0 0 0 0 0 0 0
39570 - 0 0 0 0 0 0 0 0 0 0 0 0
39571 - 0 0 0 0 0 0 0 0 0 0 0 0
39572 - 0 0 0 0 0 0 0 0 0 0 0 0
39573 - 0 0 0 0 0 0 0 0 0 0 0 0
39574 - 0 0 0 0 0 0 0 0 0 0 0 0
39575 - 0 0 0 0 0 0 0 0 0 0 0 0
39576 - 0 0 0 0 0 0 0 0 0 0 0 0
39577 - 0 0 1 0 0 0 0 0 0 0 0 0
39578 - 0 0 0 0 0 0 0 0 0 0 0 0
39579 - 0 0 0 0 0 0 0 0 0 0 0 0
39580 - 0 0 0 0 0 0 0 0 0 0 0 0
39581 - 0 0 0 0 0 0 0 0 0 0 0 0
39582 - 0 0 0 0 0 0 0 0 0 0 0 0
39583 - 0 0 0 0 0 0 0 0 0 10 10 10
39584 - 22 22 22 42 42 42 66 66 66 86 86 86
39585 - 66 66 66 38 38 38 38 38 38 22 22 22
39586 - 26 26 26 34 34 34 54 54 54 66 66 66
39587 - 86 86 86 70 70 70 46 46 46 26 26 26
39588 - 14 14 14 6 6 6 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 0 0 0 0 0 0 0 0 0 0 0 0
39591 - 0 0 0 0 0 0 0 0 0 0 0 0
39592 - 0 0 0 0 0 0 0 0 0 0 0 0
39593 - 0 0 0 0 0 0 0 0 0 0 0 0
39594 - 0 0 0 0 0 0 0 0 0 0 0 0
39595 - 0 0 0 0 0 0 0 0 0 0 0 0
39596 - 0 0 0 0 0 0 0 0 0 0 0 0
39597 - 0 0 1 0 0 1 0 0 1 0 0 0
39598 - 0 0 0 0 0 0 0 0 0 0 0 0
39599 - 0 0 0 0 0 0 0 0 0 0 0 0
39600 - 0 0 0 0 0 0 0 0 0 0 0 0
39601 - 0 0 0 0 0 0 0 0 0 0 0 0
39602 - 0 0 0 0 0 0 0 0 0 0 0 0
39603 - 0 0 0 0 0 0 10 10 10 26 26 26
39604 - 50 50 50 82 82 82 58 58 58 6 6 6
39605 - 2 2 6 2 2 6 2 2 6 2 2 6
39606 - 2 2 6 2 2 6 2 2 6 2 2 6
39607 - 6 6 6 54 54 54 86 86 86 66 66 66
39608 - 38 38 38 18 18 18 6 6 6 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 0 0 0 0 0 0 0 0 0 0 0 0
39611 - 0 0 0 0 0 0 0 0 0 0 0 0
39612 - 0 0 0 0 0 0 0 0 0 0 0 0
39613 - 0 0 0 0 0 0 0 0 0 0 0 0
39614 - 0 0 0 0 0 0 0 0 0 0 0 0
39615 - 0 0 0 0 0 0 0 0 0 0 0 0
39616 - 0 0 0 0 0 0 0 0 0 0 0 0
39617 - 0 0 0 0 0 0 0 0 0 0 0 0
39618 - 0 0 0 0 0 0 0 0 0 0 0 0
39619 - 0 0 0 0 0 0 0 0 0 0 0 0
39620 - 0 0 0 0 0 0 0 0 0 0 0 0
39621 - 0 0 0 0 0 0 0 0 0 0 0 0
39622 - 0 0 0 0 0 0 0 0 0 0 0 0
39623 - 0 0 0 6 6 6 22 22 22 50 50 50
39624 - 78 78 78 34 34 34 2 2 6 2 2 6
39625 - 2 2 6 2 2 6 2 2 6 2 2 6
39626 - 2 2 6 2 2 6 2 2 6 2 2 6
39627 - 2 2 6 2 2 6 6 6 6 70 70 70
39628 - 78 78 78 46 46 46 22 22 22 6 6 6
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 0 0 0 0 0 0 0 0 0 0 0 0
39631 - 0 0 0 0 0 0 0 0 0 0 0 0
39632 - 0 0 0 0 0 0 0 0 0 0 0 0
39633 - 0 0 0 0 0 0 0 0 0 0 0 0
39634 - 0 0 0 0 0 0 0 0 0 0 0 0
39635 - 0 0 0 0 0 0 0 0 0 0 0 0
39636 - 0 0 0 0 0 0 0 0 0 0 0 0
39637 - 0 0 1 0 0 1 0 0 1 0 0 0
39638 - 0 0 0 0 0 0 0 0 0 0 0 0
39639 - 0 0 0 0 0 0 0 0 0 0 0 0
39640 - 0 0 0 0 0 0 0 0 0 0 0 0
39641 - 0 0 0 0 0 0 0 0 0 0 0 0
39642 - 0 0 0 0 0 0 0 0 0 0 0 0
39643 - 6 6 6 18 18 18 42 42 42 82 82 82
39644 - 26 26 26 2 2 6 2 2 6 2 2 6
39645 - 2 2 6 2 2 6 2 2 6 2 2 6
39646 - 2 2 6 2 2 6 2 2 6 14 14 14
39647 - 46 46 46 34 34 34 6 6 6 2 2 6
39648 - 42 42 42 78 78 78 42 42 42 18 18 18
39649 - 6 6 6 0 0 0 0 0 0 0 0 0
39650 - 0 0 0 0 0 0 0 0 0 0 0 0
39651 - 0 0 0 0 0 0 0 0 0 0 0 0
39652 - 0 0 0 0 0 0 0 0 0 0 0 0
39653 - 0 0 0 0 0 0 0 0 0 0 0 0
39654 - 0 0 0 0 0 0 0 0 0 0 0 0
39655 - 0 0 0 0 0 0 0 0 0 0 0 0
39656 - 0 0 0 0 0 0 0 0 0 0 0 0
39657 - 0 0 1 0 0 0 0 0 1 0 0 0
39658 - 0 0 0 0 0 0 0 0 0 0 0 0
39659 - 0 0 0 0 0 0 0 0 0 0 0 0
39660 - 0 0 0 0 0 0 0 0 0 0 0 0
39661 - 0 0 0 0 0 0 0 0 0 0 0 0
39662 - 0 0 0 0 0 0 0 0 0 0 0 0
39663 - 10 10 10 30 30 30 66 66 66 58 58 58
39664 - 2 2 6 2 2 6 2 2 6 2 2 6
39665 - 2 2 6 2 2 6 2 2 6 2 2 6
39666 - 2 2 6 2 2 6 2 2 6 26 26 26
39667 - 86 86 86 101 101 101 46 46 46 10 10 10
39668 - 2 2 6 58 58 58 70 70 70 34 34 34
39669 - 10 10 10 0 0 0 0 0 0 0 0 0
39670 - 0 0 0 0 0 0 0 0 0 0 0 0
39671 - 0 0 0 0 0 0 0 0 0 0 0 0
39672 - 0 0 0 0 0 0 0 0 0 0 0 0
39673 - 0 0 0 0 0 0 0 0 0 0 0 0
39674 - 0 0 0 0 0 0 0 0 0 0 0 0
39675 - 0 0 0 0 0 0 0 0 0 0 0 0
39676 - 0 0 0 0 0 0 0 0 0 0 0 0
39677 - 0 0 1 0 0 1 0 0 1 0 0 0
39678 - 0 0 0 0 0 0 0 0 0 0 0 0
39679 - 0 0 0 0 0 0 0 0 0 0 0 0
39680 - 0 0 0 0 0 0 0 0 0 0 0 0
39681 - 0 0 0 0 0 0 0 0 0 0 0 0
39682 - 0 0 0 0 0 0 0 0 0 0 0 0
39683 - 14 14 14 42 42 42 86 86 86 10 10 10
39684 - 2 2 6 2 2 6 2 2 6 2 2 6
39685 - 2 2 6 2 2 6 2 2 6 2 2 6
39686 - 2 2 6 2 2 6 2 2 6 30 30 30
39687 - 94 94 94 94 94 94 58 58 58 26 26 26
39688 - 2 2 6 6 6 6 78 78 78 54 54 54
39689 - 22 22 22 6 6 6 0 0 0 0 0 0
39690 - 0 0 0 0 0 0 0 0 0 0 0 0
39691 - 0 0 0 0 0 0 0 0 0 0 0 0
39692 - 0 0 0 0 0 0 0 0 0 0 0 0
39693 - 0 0 0 0 0 0 0 0 0 0 0 0
39694 - 0 0 0 0 0 0 0 0 0 0 0 0
39695 - 0 0 0 0 0 0 0 0 0 0 0 0
39696 - 0 0 0 0 0 0 0 0 0 0 0 0
39697 - 0 0 0 0 0 0 0 0 0 0 0 0
39698 - 0 0 0 0 0 0 0 0 0 0 0 0
39699 - 0 0 0 0 0 0 0 0 0 0 0 0
39700 - 0 0 0 0 0 0 0 0 0 0 0 0
39701 - 0 0 0 0 0 0 0 0 0 0 0 0
39702 - 0 0 0 0 0 0 0 0 0 6 6 6
39703 - 22 22 22 62 62 62 62 62 62 2 2 6
39704 - 2 2 6 2 2 6 2 2 6 2 2 6
39705 - 2 2 6 2 2 6 2 2 6 2 2 6
39706 - 2 2 6 2 2 6 2 2 6 26 26 26
39707 - 54 54 54 38 38 38 18 18 18 10 10 10
39708 - 2 2 6 2 2 6 34 34 34 82 82 82
39709 - 38 38 38 14 14 14 0 0 0 0 0 0
39710 - 0 0 0 0 0 0 0 0 0 0 0 0
39711 - 0 0 0 0 0 0 0 0 0 0 0 0
39712 - 0 0 0 0 0 0 0 0 0 0 0 0
39713 - 0 0 0 0 0 0 0 0 0 0 0 0
39714 - 0 0 0 0 0 0 0 0 0 0 0 0
39715 - 0 0 0 0 0 0 0 0 0 0 0 0
39716 - 0 0 0 0 0 0 0 0 0 0 0 0
39717 - 0 0 0 0 0 1 0 0 1 0 0 0
39718 - 0 0 0 0 0 0 0 0 0 0 0 0
39719 - 0 0 0 0 0 0 0 0 0 0 0 0
39720 - 0 0 0 0 0 0 0 0 0 0 0 0
39721 - 0 0 0 0 0 0 0 0 0 0 0 0
39722 - 0 0 0 0 0 0 0 0 0 6 6 6
39723 - 30 30 30 78 78 78 30 30 30 2 2 6
39724 - 2 2 6 2 2 6 2 2 6 2 2 6
39725 - 2 2 6 2 2 6 2 2 6 2 2 6
39726 - 2 2 6 2 2 6 2 2 6 10 10 10
39727 - 10 10 10 2 2 6 2 2 6 2 2 6
39728 - 2 2 6 2 2 6 2 2 6 78 78 78
39729 - 50 50 50 18 18 18 6 6 6 0 0 0
39730 - 0 0 0 0 0 0 0 0 0 0 0 0
39731 - 0 0 0 0 0 0 0 0 0 0 0 0
39732 - 0 0 0 0 0 0 0 0 0 0 0 0
39733 - 0 0 0 0 0 0 0 0 0 0 0 0
39734 - 0 0 0 0 0 0 0 0 0 0 0 0
39735 - 0 0 0 0 0 0 0 0 0 0 0 0
39736 - 0 0 0 0 0 0 0 0 0 0 0 0
39737 - 0 0 1 0 0 0 0 0 0 0 0 0
39738 - 0 0 0 0 0 0 0 0 0 0 0 0
39739 - 0 0 0 0 0 0 0 0 0 0 0 0
39740 - 0 0 0 0 0 0 0 0 0 0 0 0
39741 - 0 0 0 0 0 0 0 0 0 0 0 0
39742 - 0 0 0 0 0 0 0 0 0 10 10 10
39743 - 38 38 38 86 86 86 14 14 14 2 2 6
39744 - 2 2 6 2 2 6 2 2 6 2 2 6
39745 - 2 2 6 2 2 6 2 2 6 2 2 6
39746 - 2 2 6 2 2 6 2 2 6 2 2 6
39747 - 2 2 6 2 2 6 2 2 6 2 2 6
39748 - 2 2 6 2 2 6 2 2 6 54 54 54
39749 - 66 66 66 26 26 26 6 6 6 0 0 0
39750 - 0 0 0 0 0 0 0 0 0 0 0 0
39751 - 0 0 0 0 0 0 0 0 0 0 0 0
39752 - 0 0 0 0 0 0 0 0 0 0 0 0
39753 - 0 0 0 0 0 0 0 0 0 0 0 0
39754 - 0 0 0 0 0 0 0 0 0 0 0 0
39755 - 0 0 0 0 0 0 0 0 0 0 0 0
39756 - 0 0 0 0 0 0 0 0 0 0 0 0
39757 - 0 0 0 0 0 1 0 0 1 0 0 0
39758 - 0 0 0 0 0 0 0 0 0 0 0 0
39759 - 0 0 0 0 0 0 0 0 0 0 0 0
39760 - 0 0 0 0 0 0 0 0 0 0 0 0
39761 - 0 0 0 0 0 0 0 0 0 0 0 0
39762 - 0 0 0 0 0 0 0 0 0 14 14 14
39763 - 42 42 42 82 82 82 2 2 6 2 2 6
39764 - 2 2 6 6 6 6 10 10 10 2 2 6
39765 - 2 2 6 2 2 6 2 2 6 2 2 6
39766 - 2 2 6 2 2 6 2 2 6 6 6 6
39767 - 14 14 14 10 10 10 2 2 6 2 2 6
39768 - 2 2 6 2 2 6 2 2 6 18 18 18
39769 - 82 82 82 34 34 34 10 10 10 0 0 0
39770 - 0 0 0 0 0 0 0 0 0 0 0 0
39771 - 0 0 0 0 0 0 0 0 0 0 0 0
39772 - 0 0 0 0 0 0 0 0 0 0 0 0
39773 - 0 0 0 0 0 0 0 0 0 0 0 0
39774 - 0 0 0 0 0 0 0 0 0 0 0 0
39775 - 0 0 0 0 0 0 0 0 0 0 0 0
39776 - 0 0 0 0 0 0 0 0 0 0 0 0
39777 - 0 0 1 0 0 0 0 0 0 0 0 0
39778 - 0 0 0 0 0 0 0 0 0 0 0 0
39779 - 0 0 0 0 0 0 0 0 0 0 0 0
39780 - 0 0 0 0 0 0 0 0 0 0 0 0
39781 - 0 0 0 0 0 0 0 0 0 0 0 0
39782 - 0 0 0 0 0 0 0 0 0 14 14 14
39783 - 46 46 46 86 86 86 2 2 6 2 2 6
39784 - 6 6 6 6 6 6 22 22 22 34 34 34
39785 - 6 6 6 2 2 6 2 2 6 2 2 6
39786 - 2 2 6 2 2 6 18 18 18 34 34 34
39787 - 10 10 10 50 50 50 22 22 22 2 2 6
39788 - 2 2 6 2 2 6 2 2 6 10 10 10
39789 - 86 86 86 42 42 42 14 14 14 0 0 0
39790 - 0 0 0 0 0 0 0 0 0 0 0 0
39791 - 0 0 0 0 0 0 0 0 0 0 0 0
39792 - 0 0 0 0 0 0 0 0 0 0 0 0
39793 - 0 0 0 0 0 0 0 0 0 0 0 0
39794 - 0 0 0 0 0 0 0 0 0 0 0 0
39795 - 0 0 0 0 0 0 0 0 0 0 0 0
39796 - 0 0 0 0 0 0 0 0 0 0 0 0
39797 - 0 0 1 0 0 1 0 0 1 0 0 0
39798 - 0 0 0 0 0 0 0 0 0 0 0 0
39799 - 0 0 0 0 0 0 0 0 0 0 0 0
39800 - 0 0 0 0 0 0 0 0 0 0 0 0
39801 - 0 0 0 0 0 0 0 0 0 0 0 0
39802 - 0 0 0 0 0 0 0 0 0 14 14 14
39803 - 46 46 46 86 86 86 2 2 6 2 2 6
39804 - 38 38 38 116 116 116 94 94 94 22 22 22
39805 - 22 22 22 2 2 6 2 2 6 2 2 6
39806 - 14 14 14 86 86 86 138 138 138 162 162 162
39807 -154 154 154 38 38 38 26 26 26 6 6 6
39808 - 2 2 6 2 2 6 2 2 6 2 2 6
39809 - 86 86 86 46 46 46 14 14 14 0 0 0
39810 - 0 0 0 0 0 0 0 0 0 0 0 0
39811 - 0 0 0 0 0 0 0 0 0 0 0 0
39812 - 0 0 0 0 0 0 0 0 0 0 0 0
39813 - 0 0 0 0 0 0 0 0 0 0 0 0
39814 - 0 0 0 0 0 0 0 0 0 0 0 0
39815 - 0 0 0 0 0 0 0 0 0 0 0 0
39816 - 0 0 0 0 0 0 0 0 0 0 0 0
39817 - 0 0 0 0 0 0 0 0 0 0 0 0
39818 - 0 0 0 0 0 0 0 0 0 0 0 0
39819 - 0 0 0 0 0 0 0 0 0 0 0 0
39820 - 0 0 0 0 0 0 0 0 0 0 0 0
39821 - 0 0 0 0 0 0 0 0 0 0 0 0
39822 - 0 0 0 0 0 0 0 0 0 14 14 14
39823 - 46 46 46 86 86 86 2 2 6 14 14 14
39824 -134 134 134 198 198 198 195 195 195 116 116 116
39825 - 10 10 10 2 2 6 2 2 6 6 6 6
39826 -101 98 89 187 187 187 210 210 210 218 218 218
39827 -214 214 214 134 134 134 14 14 14 6 6 6
39828 - 2 2 6 2 2 6 2 2 6 2 2 6
39829 - 86 86 86 50 50 50 18 18 18 6 6 6
39830 - 0 0 0 0 0 0 0 0 0 0 0 0
39831 - 0 0 0 0 0 0 0 0 0 0 0 0
39832 - 0 0 0 0 0 0 0 0 0 0 0 0
39833 - 0 0 0 0 0 0 0 0 0 0 0 0
39834 - 0 0 0 0 0 0 0 0 0 0 0 0
39835 - 0 0 0 0 0 0 0 0 0 0 0 0
39836 - 0 0 0 0 0 0 0 0 1 0 0 0
39837 - 0 0 1 0 0 1 0 0 1 0 0 0
39838 - 0 0 0 0 0 0 0 0 0 0 0 0
39839 - 0 0 0 0 0 0 0 0 0 0 0 0
39840 - 0 0 0 0 0 0 0 0 0 0 0 0
39841 - 0 0 0 0 0 0 0 0 0 0 0 0
39842 - 0 0 0 0 0 0 0 0 0 14 14 14
39843 - 46 46 46 86 86 86 2 2 6 54 54 54
39844 -218 218 218 195 195 195 226 226 226 246 246 246
39845 - 58 58 58 2 2 6 2 2 6 30 30 30
39846 -210 210 210 253 253 253 174 174 174 123 123 123
39847 -221 221 221 234 234 234 74 74 74 2 2 6
39848 - 2 2 6 2 2 6 2 2 6 2 2 6
39849 - 70 70 70 58 58 58 22 22 22 6 6 6
39850 - 0 0 0 0 0 0 0 0 0 0 0 0
39851 - 0 0 0 0 0 0 0 0 0 0 0 0
39852 - 0 0 0 0 0 0 0 0 0 0 0 0
39853 - 0 0 0 0 0 0 0 0 0 0 0 0
39854 - 0 0 0 0 0 0 0 0 0 0 0 0
39855 - 0 0 0 0 0 0 0 0 0 0 0 0
39856 - 0 0 0 0 0 0 0 0 0 0 0 0
39857 - 0 0 0 0 0 0 0 0 0 0 0 0
39858 - 0 0 0 0 0 0 0 0 0 0 0 0
39859 - 0 0 0 0 0 0 0 0 0 0 0 0
39860 - 0 0 0 0 0 0 0 0 0 0 0 0
39861 - 0 0 0 0 0 0 0 0 0 0 0 0
39862 - 0 0 0 0 0 0 0 0 0 14 14 14
39863 - 46 46 46 82 82 82 2 2 6 106 106 106
39864 -170 170 170 26 26 26 86 86 86 226 226 226
39865 -123 123 123 10 10 10 14 14 14 46 46 46
39866 -231 231 231 190 190 190 6 6 6 70 70 70
39867 - 90 90 90 238 238 238 158 158 158 2 2 6
39868 - 2 2 6 2 2 6 2 2 6 2 2 6
39869 - 70 70 70 58 58 58 22 22 22 6 6 6
39870 - 0 0 0 0 0 0 0 0 0 0 0 0
39871 - 0 0 0 0 0 0 0 0 0 0 0 0
39872 - 0 0 0 0 0 0 0 0 0 0 0 0
39873 - 0 0 0 0 0 0 0 0 0 0 0 0
39874 - 0 0 0 0 0 0 0 0 0 0 0 0
39875 - 0 0 0 0 0 0 0 0 0 0 0 0
39876 - 0 0 0 0 0 0 0 0 1 0 0 0
39877 - 0 0 1 0 0 1 0 0 1 0 0 0
39878 - 0 0 0 0 0 0 0 0 0 0 0 0
39879 - 0 0 0 0 0 0 0 0 0 0 0 0
39880 - 0 0 0 0 0 0 0 0 0 0 0 0
39881 - 0 0 0 0 0 0 0 0 0 0 0 0
39882 - 0 0 0 0 0 0 0 0 0 14 14 14
39883 - 42 42 42 86 86 86 6 6 6 116 116 116
39884 -106 106 106 6 6 6 70 70 70 149 149 149
39885 -128 128 128 18 18 18 38 38 38 54 54 54
39886 -221 221 221 106 106 106 2 2 6 14 14 14
39887 - 46 46 46 190 190 190 198 198 198 2 2 6
39888 - 2 2 6 2 2 6 2 2 6 2 2 6
39889 - 74 74 74 62 62 62 22 22 22 6 6 6
39890 - 0 0 0 0 0 0 0 0 0 0 0 0
39891 - 0 0 0 0 0 0 0 0 0 0 0 0
39892 - 0 0 0 0 0 0 0 0 0 0 0 0
39893 - 0 0 0 0 0 0 0 0 0 0 0 0
39894 - 0 0 0 0 0 0 0 0 0 0 0 0
39895 - 0 0 0 0 0 0 0 0 0 0 0 0
39896 - 0 0 0 0 0 0 0 0 1 0 0 0
39897 - 0 0 1 0 0 0 0 0 1 0 0 0
39898 - 0 0 0 0 0 0 0 0 0 0 0 0
39899 - 0 0 0 0 0 0 0 0 0 0 0 0
39900 - 0 0 0 0 0 0 0 0 0 0 0 0
39901 - 0 0 0 0 0 0 0 0 0 0 0 0
39902 - 0 0 0 0 0 0 0 0 0 14 14 14
39903 - 42 42 42 94 94 94 14 14 14 101 101 101
39904 -128 128 128 2 2 6 18 18 18 116 116 116
39905 -118 98 46 121 92 8 121 92 8 98 78 10
39906 -162 162 162 106 106 106 2 2 6 2 2 6
39907 - 2 2 6 195 195 195 195 195 195 6 6 6
39908 - 2 2 6 2 2 6 2 2 6 2 2 6
39909 - 74 74 74 62 62 62 22 22 22 6 6 6
39910 - 0 0 0 0 0 0 0 0 0 0 0 0
39911 - 0 0 0 0 0 0 0 0 0 0 0 0
39912 - 0 0 0 0 0 0 0 0 0 0 0 0
39913 - 0 0 0 0 0 0 0 0 0 0 0 0
39914 - 0 0 0 0 0 0 0 0 0 0 0 0
39915 - 0 0 0 0 0 0 0 0 0 0 0 0
39916 - 0 0 0 0 0 0 0 0 1 0 0 1
39917 - 0 0 1 0 0 0 0 0 1 0 0 0
39918 - 0 0 0 0 0 0 0 0 0 0 0 0
39919 - 0 0 0 0 0 0 0 0 0 0 0 0
39920 - 0 0 0 0 0 0 0 0 0 0 0 0
39921 - 0 0 0 0 0 0 0 0 0 0 0 0
39922 - 0 0 0 0 0 0 0 0 0 10 10 10
39923 - 38 38 38 90 90 90 14 14 14 58 58 58
39924 -210 210 210 26 26 26 54 38 6 154 114 10
39925 -226 170 11 236 186 11 225 175 15 184 144 12
39926 -215 174 15 175 146 61 37 26 9 2 2 6
39927 - 70 70 70 246 246 246 138 138 138 2 2 6
39928 - 2 2 6 2 2 6 2 2 6 2 2 6
39929 - 70 70 70 66 66 66 26 26 26 6 6 6
39930 - 0 0 0 0 0 0 0 0 0 0 0 0
39931 - 0 0 0 0 0 0 0 0 0 0 0 0
39932 - 0 0 0 0 0 0 0 0 0 0 0 0
39933 - 0 0 0 0 0 0 0 0 0 0 0 0
39934 - 0 0 0 0 0 0 0 0 0 0 0 0
39935 - 0 0 0 0 0 0 0 0 0 0 0 0
39936 - 0 0 0 0 0 0 0 0 0 0 0 0
39937 - 0 0 0 0 0 0 0 0 0 0 0 0
39938 - 0 0 0 0 0 0 0 0 0 0 0 0
39939 - 0 0 0 0 0 0 0 0 0 0 0 0
39940 - 0 0 0 0 0 0 0 0 0 0 0 0
39941 - 0 0 0 0 0 0 0 0 0 0 0 0
39942 - 0 0 0 0 0 0 0 0 0 10 10 10
39943 - 38 38 38 86 86 86 14 14 14 10 10 10
39944 -195 195 195 188 164 115 192 133 9 225 175 15
39945 -239 182 13 234 190 10 232 195 16 232 200 30
39946 -245 207 45 241 208 19 232 195 16 184 144 12
39947 -218 194 134 211 206 186 42 42 42 2 2 6
39948 - 2 2 6 2 2 6 2 2 6 2 2 6
39949 - 50 50 50 74 74 74 30 30 30 6 6 6
39950 - 0 0 0 0 0 0 0 0 0 0 0 0
39951 - 0 0 0 0 0 0 0 0 0 0 0 0
39952 - 0 0 0 0 0 0 0 0 0 0 0 0
39953 - 0 0 0 0 0 0 0 0 0 0 0 0
39954 - 0 0 0 0 0 0 0 0 0 0 0 0
39955 - 0 0 0 0 0 0 0 0 0 0 0 0
39956 - 0 0 0 0 0 0 0 0 0 0 0 0
39957 - 0 0 0 0 0 0 0 0 0 0 0 0
39958 - 0 0 0 0 0 0 0 0 0 0 0 0
39959 - 0 0 0 0 0 0 0 0 0 0 0 0
39960 - 0 0 0 0 0 0 0 0 0 0 0 0
39961 - 0 0 0 0 0 0 0 0 0 0 0 0
39962 - 0 0 0 0 0 0 0 0 0 10 10 10
39963 - 34 34 34 86 86 86 14 14 14 2 2 6
39964 -121 87 25 192 133 9 219 162 10 239 182 13
39965 -236 186 11 232 195 16 241 208 19 244 214 54
39966 -246 218 60 246 218 38 246 215 20 241 208 19
39967 -241 208 19 226 184 13 121 87 25 2 2 6
39968 - 2 2 6 2 2 6 2 2 6 2 2 6
39969 - 50 50 50 82 82 82 34 34 34 10 10 10
39970 - 0 0 0 0 0 0 0 0 0 0 0 0
39971 - 0 0 0 0 0 0 0 0 0 0 0 0
39972 - 0 0 0 0 0 0 0 0 0 0 0 0
39973 - 0 0 0 0 0 0 0 0 0 0 0 0
39974 - 0 0 0 0 0 0 0 0 0 0 0 0
39975 - 0 0 0 0 0 0 0 0 0 0 0 0
39976 - 0 0 0 0 0 0 0 0 0 0 0 0
39977 - 0 0 0 0 0 0 0 0 0 0 0 0
39978 - 0 0 0 0 0 0 0 0 0 0 0 0
39979 - 0 0 0 0 0 0 0 0 0 0 0 0
39980 - 0 0 0 0 0 0 0 0 0 0 0 0
39981 - 0 0 0 0 0 0 0 0 0 0 0 0
39982 - 0 0 0 0 0 0 0 0 0 10 10 10
39983 - 34 34 34 82 82 82 30 30 30 61 42 6
39984 -180 123 7 206 145 10 230 174 11 239 182 13
39985 -234 190 10 238 202 15 241 208 19 246 218 74
39986 -246 218 38 246 215 20 246 215 20 246 215 20
39987 -226 184 13 215 174 15 184 144 12 6 6 6
39988 - 2 2 6 2 2 6 2 2 6 2 2 6
39989 - 26 26 26 94 94 94 42 42 42 14 14 14
39990 - 0 0 0 0 0 0 0 0 0 0 0 0
39991 - 0 0 0 0 0 0 0 0 0 0 0 0
39992 - 0 0 0 0 0 0 0 0 0 0 0 0
39993 - 0 0 0 0 0 0 0 0 0 0 0 0
39994 - 0 0 0 0 0 0 0 0 0 0 0 0
39995 - 0 0 0 0 0 0 0 0 0 0 0 0
39996 - 0 0 0 0 0 0 0 0 0 0 0 0
39997 - 0 0 0 0 0 0 0 0 0 0 0 0
39998 - 0 0 0 0 0 0 0 0 0 0 0 0
39999 - 0 0 0 0 0 0 0 0 0 0 0 0
40000 - 0 0 0 0 0 0 0 0 0 0 0 0
40001 - 0 0 0 0 0 0 0 0 0 0 0 0
40002 - 0 0 0 0 0 0 0 0 0 10 10 10
40003 - 30 30 30 78 78 78 50 50 50 104 69 6
40004 -192 133 9 216 158 10 236 178 12 236 186 11
40005 -232 195 16 241 208 19 244 214 54 245 215 43
40006 -246 215 20 246 215 20 241 208 19 198 155 10
40007 -200 144 11 216 158 10 156 118 10 2 2 6
40008 - 2 2 6 2 2 6 2 2 6 2 2 6
40009 - 6 6 6 90 90 90 54 54 54 18 18 18
40010 - 6 6 6 0 0 0 0 0 0 0 0 0
40011 - 0 0 0 0 0 0 0 0 0 0 0 0
40012 - 0 0 0 0 0 0 0 0 0 0 0 0
40013 - 0 0 0 0 0 0 0 0 0 0 0 0
40014 - 0 0 0 0 0 0 0 0 0 0 0 0
40015 - 0 0 0 0 0 0 0 0 0 0 0 0
40016 - 0 0 0 0 0 0 0 0 0 0 0 0
40017 - 0 0 0 0 0 0 0 0 0 0 0 0
40018 - 0 0 0 0 0 0 0 0 0 0 0 0
40019 - 0 0 0 0 0 0 0 0 0 0 0 0
40020 - 0 0 0 0 0 0 0 0 0 0 0 0
40021 - 0 0 0 0 0 0 0 0 0 0 0 0
40022 - 0 0 0 0 0 0 0 0 0 10 10 10
40023 - 30 30 30 78 78 78 46 46 46 22 22 22
40024 -137 92 6 210 162 10 239 182 13 238 190 10
40025 -238 202 15 241 208 19 246 215 20 246 215 20
40026 -241 208 19 203 166 17 185 133 11 210 150 10
40027 -216 158 10 210 150 10 102 78 10 2 2 6
40028 - 6 6 6 54 54 54 14 14 14 2 2 6
40029 - 2 2 6 62 62 62 74 74 74 30 30 30
40030 - 10 10 10 0 0 0 0 0 0 0 0 0
40031 - 0 0 0 0 0 0 0 0 0 0 0 0
40032 - 0 0 0 0 0 0 0 0 0 0 0 0
40033 - 0 0 0 0 0 0 0 0 0 0 0 0
40034 - 0 0 0 0 0 0 0 0 0 0 0 0
40035 - 0 0 0 0 0 0 0 0 0 0 0 0
40036 - 0 0 0 0 0 0 0 0 0 0 0 0
40037 - 0 0 0 0 0 0 0 0 0 0 0 0
40038 - 0 0 0 0 0 0 0 0 0 0 0 0
40039 - 0 0 0 0 0 0 0 0 0 0 0 0
40040 - 0 0 0 0 0 0 0 0 0 0 0 0
40041 - 0 0 0 0 0 0 0 0 0 0 0 0
40042 - 0 0 0 0 0 0 0 0 0 10 10 10
40043 - 34 34 34 78 78 78 50 50 50 6 6 6
40044 - 94 70 30 139 102 15 190 146 13 226 184 13
40045 -232 200 30 232 195 16 215 174 15 190 146 13
40046 -168 122 10 192 133 9 210 150 10 213 154 11
40047 -202 150 34 182 157 106 101 98 89 2 2 6
40048 - 2 2 6 78 78 78 116 116 116 58 58 58
40049 - 2 2 6 22 22 22 90 90 90 46 46 46
40050 - 18 18 18 6 6 6 0 0 0 0 0 0
40051 - 0 0 0 0 0 0 0 0 0 0 0 0
40052 - 0 0 0 0 0 0 0 0 0 0 0 0
40053 - 0 0 0 0 0 0 0 0 0 0 0 0
40054 - 0 0 0 0 0 0 0 0 0 0 0 0
40055 - 0 0 0 0 0 0 0 0 0 0 0 0
40056 - 0 0 0 0 0 0 0 0 0 0 0 0
40057 - 0 0 0 0 0 0 0 0 0 0 0 0
40058 - 0 0 0 0 0 0 0 0 0 0 0 0
40059 - 0 0 0 0 0 0 0 0 0 0 0 0
40060 - 0 0 0 0 0 0 0 0 0 0 0 0
40061 - 0 0 0 0 0 0 0 0 0 0 0 0
40062 - 0 0 0 0 0 0 0 0 0 10 10 10
40063 - 38 38 38 86 86 86 50 50 50 6 6 6
40064 -128 128 128 174 154 114 156 107 11 168 122 10
40065 -198 155 10 184 144 12 197 138 11 200 144 11
40066 -206 145 10 206 145 10 197 138 11 188 164 115
40067 -195 195 195 198 198 198 174 174 174 14 14 14
40068 - 2 2 6 22 22 22 116 116 116 116 116 116
40069 - 22 22 22 2 2 6 74 74 74 70 70 70
40070 - 30 30 30 10 10 10 0 0 0 0 0 0
40071 - 0 0 0 0 0 0 0 0 0 0 0 0
40072 - 0 0 0 0 0 0 0 0 0 0 0 0
40073 - 0 0 0 0 0 0 0 0 0 0 0 0
40074 - 0 0 0 0 0 0 0 0 0 0 0 0
40075 - 0 0 0 0 0 0 0 0 0 0 0 0
40076 - 0 0 0 0 0 0 0 0 0 0 0 0
40077 - 0 0 0 0 0 0 0 0 0 0 0 0
40078 - 0 0 0 0 0 0 0 0 0 0 0 0
40079 - 0 0 0 0 0 0 0 0 0 0 0 0
40080 - 0 0 0 0 0 0 0 0 0 0 0 0
40081 - 0 0 0 0 0 0 0 0 0 0 0 0
40082 - 0 0 0 0 0 0 6 6 6 18 18 18
40083 - 50 50 50 101 101 101 26 26 26 10 10 10
40084 -138 138 138 190 190 190 174 154 114 156 107 11
40085 -197 138 11 200 144 11 197 138 11 192 133 9
40086 -180 123 7 190 142 34 190 178 144 187 187 187
40087 -202 202 202 221 221 221 214 214 214 66 66 66
40088 - 2 2 6 2 2 6 50 50 50 62 62 62
40089 - 6 6 6 2 2 6 10 10 10 90 90 90
40090 - 50 50 50 18 18 18 6 6 6 0 0 0
40091 - 0 0 0 0 0 0 0 0 0 0 0 0
40092 - 0 0 0 0 0 0 0 0 0 0 0 0
40093 - 0 0 0 0 0 0 0 0 0 0 0 0
40094 - 0 0 0 0 0 0 0 0 0 0 0 0
40095 - 0 0 0 0 0 0 0 0 0 0 0 0
40096 - 0 0 0 0 0 0 0 0 0 0 0 0
40097 - 0 0 0 0 0 0 0 0 0 0 0 0
40098 - 0 0 0 0 0 0 0 0 0 0 0 0
40099 - 0 0 0 0 0 0 0 0 0 0 0 0
40100 - 0 0 0 0 0 0 0 0 0 0 0 0
40101 - 0 0 0 0 0 0 0 0 0 0 0 0
40102 - 0 0 0 0 0 0 10 10 10 34 34 34
40103 - 74 74 74 74 74 74 2 2 6 6 6 6
40104 -144 144 144 198 198 198 190 190 190 178 166 146
40105 -154 121 60 156 107 11 156 107 11 168 124 44
40106 -174 154 114 187 187 187 190 190 190 210 210 210
40107 -246 246 246 253 253 253 253 253 253 182 182 182
40108 - 6 6 6 2 2 6 2 2 6 2 2 6
40109 - 2 2 6 2 2 6 2 2 6 62 62 62
40110 - 74 74 74 34 34 34 14 14 14 0 0 0
40111 - 0 0 0 0 0 0 0 0 0 0 0 0
40112 - 0 0 0 0 0 0 0 0 0 0 0 0
40113 - 0 0 0 0 0 0 0 0 0 0 0 0
40114 - 0 0 0 0 0 0 0 0 0 0 0 0
40115 - 0 0 0 0 0 0 0 0 0 0 0 0
40116 - 0 0 0 0 0 0 0 0 0 0 0 0
40117 - 0 0 0 0 0 0 0 0 0 0 0 0
40118 - 0 0 0 0 0 0 0 0 0 0 0 0
40119 - 0 0 0 0 0 0 0 0 0 0 0 0
40120 - 0 0 0 0 0 0 0 0 0 0 0 0
40121 - 0 0 0 0 0 0 0 0 0 0 0 0
40122 - 0 0 0 10 10 10 22 22 22 54 54 54
40123 - 94 94 94 18 18 18 2 2 6 46 46 46
40124 -234 234 234 221 221 221 190 190 190 190 190 190
40125 -190 190 190 187 187 187 187 187 187 190 190 190
40126 -190 190 190 195 195 195 214 214 214 242 242 242
40127 -253 253 253 253 253 253 253 253 253 253 253 253
40128 - 82 82 82 2 2 6 2 2 6 2 2 6
40129 - 2 2 6 2 2 6 2 2 6 14 14 14
40130 - 86 86 86 54 54 54 22 22 22 6 6 6
40131 - 0 0 0 0 0 0 0 0 0 0 0 0
40132 - 0 0 0 0 0 0 0 0 0 0 0 0
40133 - 0 0 0 0 0 0 0 0 0 0 0 0
40134 - 0 0 0 0 0 0 0 0 0 0 0 0
40135 - 0 0 0 0 0 0 0 0 0 0 0 0
40136 - 0 0 0 0 0 0 0 0 0 0 0 0
40137 - 0 0 0 0 0 0 0 0 0 0 0 0
40138 - 0 0 0 0 0 0 0 0 0 0 0 0
40139 - 0 0 0 0 0 0 0 0 0 0 0 0
40140 - 0 0 0 0 0 0 0 0 0 0 0 0
40141 - 0 0 0 0 0 0 0 0 0 0 0 0
40142 - 6 6 6 18 18 18 46 46 46 90 90 90
40143 - 46 46 46 18 18 18 6 6 6 182 182 182
40144 -253 253 253 246 246 246 206 206 206 190 190 190
40145 -190 190 190 190 190 190 190 190 190 190 190 190
40146 -206 206 206 231 231 231 250 250 250 253 253 253
40147 -253 253 253 253 253 253 253 253 253 253 253 253
40148 -202 202 202 14 14 14 2 2 6 2 2 6
40149 - 2 2 6 2 2 6 2 2 6 2 2 6
40150 - 42 42 42 86 86 86 42 42 42 18 18 18
40151 - 6 6 6 0 0 0 0 0 0 0 0 0
40152 - 0 0 0 0 0 0 0 0 0 0 0 0
40153 - 0 0 0 0 0 0 0 0 0 0 0 0
40154 - 0 0 0 0 0 0 0 0 0 0 0 0
40155 - 0 0 0 0 0 0 0 0 0 0 0 0
40156 - 0 0 0 0 0 0 0 0 0 0 0 0
40157 - 0 0 0 0 0 0 0 0 0 0 0 0
40158 - 0 0 0 0 0 0 0 0 0 0 0 0
40159 - 0 0 0 0 0 0 0 0 0 0 0 0
40160 - 0 0 0 0 0 0 0 0 0 0 0 0
40161 - 0 0 0 0 0 0 0 0 0 6 6 6
40162 - 14 14 14 38 38 38 74 74 74 66 66 66
40163 - 2 2 6 6 6 6 90 90 90 250 250 250
40164 -253 253 253 253 253 253 238 238 238 198 198 198
40165 -190 190 190 190 190 190 195 195 195 221 221 221
40166 -246 246 246 253 253 253 253 253 253 253 253 253
40167 -253 253 253 253 253 253 253 253 253 253 253 253
40168 -253 253 253 82 82 82 2 2 6 2 2 6
40169 - 2 2 6 2 2 6 2 2 6 2 2 6
40170 - 2 2 6 78 78 78 70 70 70 34 34 34
40171 - 14 14 14 6 6 6 0 0 0 0 0 0
40172 - 0 0 0 0 0 0 0 0 0 0 0 0
40173 - 0 0 0 0 0 0 0 0 0 0 0 0
40174 - 0 0 0 0 0 0 0 0 0 0 0 0
40175 - 0 0 0 0 0 0 0 0 0 0 0 0
40176 - 0 0 0 0 0 0 0 0 0 0 0 0
40177 - 0 0 0 0 0 0 0 0 0 0 0 0
40178 - 0 0 0 0 0 0 0 0 0 0 0 0
40179 - 0 0 0 0 0 0 0 0 0 0 0 0
40180 - 0 0 0 0 0 0 0 0 0 0 0 0
40181 - 0 0 0 0 0 0 0 0 0 14 14 14
40182 - 34 34 34 66 66 66 78 78 78 6 6 6
40183 - 2 2 6 18 18 18 218 218 218 253 253 253
40184 -253 253 253 253 253 253 253 253 253 246 246 246
40185 -226 226 226 231 231 231 246 246 246 253 253 253
40186 -253 253 253 253 253 253 253 253 253 253 253 253
40187 -253 253 253 253 253 253 253 253 253 253 253 253
40188 -253 253 253 178 178 178 2 2 6 2 2 6
40189 - 2 2 6 2 2 6 2 2 6 2 2 6
40190 - 2 2 6 18 18 18 90 90 90 62 62 62
40191 - 30 30 30 10 10 10 0 0 0 0 0 0
40192 - 0 0 0 0 0 0 0 0 0 0 0 0
40193 - 0 0 0 0 0 0 0 0 0 0 0 0
40194 - 0 0 0 0 0 0 0 0 0 0 0 0
40195 - 0 0 0 0 0 0 0 0 0 0 0 0
40196 - 0 0 0 0 0 0 0 0 0 0 0 0
40197 - 0 0 0 0 0 0 0 0 0 0 0 0
40198 - 0 0 0 0 0 0 0 0 0 0 0 0
40199 - 0 0 0 0 0 0 0 0 0 0 0 0
40200 - 0 0 0 0 0 0 0 0 0 0 0 0
40201 - 0 0 0 0 0 0 10 10 10 26 26 26
40202 - 58 58 58 90 90 90 18 18 18 2 2 6
40203 - 2 2 6 110 110 110 253 253 253 253 253 253
40204 -253 253 253 253 253 253 253 253 253 253 253 253
40205 -250 250 250 253 253 253 253 253 253 253 253 253
40206 -253 253 253 253 253 253 253 253 253 253 253 253
40207 -253 253 253 253 253 253 253 253 253 253 253 253
40208 -253 253 253 231 231 231 18 18 18 2 2 6
40209 - 2 2 6 2 2 6 2 2 6 2 2 6
40210 - 2 2 6 2 2 6 18 18 18 94 94 94
40211 - 54 54 54 26 26 26 10 10 10 0 0 0
40212 - 0 0 0 0 0 0 0 0 0 0 0 0
40213 - 0 0 0 0 0 0 0 0 0 0 0 0
40214 - 0 0 0 0 0 0 0 0 0 0 0 0
40215 - 0 0 0 0 0 0 0 0 0 0 0 0
40216 - 0 0 0 0 0 0 0 0 0 0 0 0
40217 - 0 0 0 0 0 0 0 0 0 0 0 0
40218 - 0 0 0 0 0 0 0 0 0 0 0 0
40219 - 0 0 0 0 0 0 0 0 0 0 0 0
40220 - 0 0 0 0 0 0 0 0 0 0 0 0
40221 - 0 0 0 6 6 6 22 22 22 50 50 50
40222 - 90 90 90 26 26 26 2 2 6 2 2 6
40223 - 14 14 14 195 195 195 250 250 250 253 253 253
40224 -253 253 253 253 253 253 253 253 253 253 253 253
40225 -253 253 253 253 253 253 253 253 253 253 253 253
40226 -253 253 253 253 253 253 253 253 253 253 253 253
40227 -253 253 253 253 253 253 253 253 253 253 253 253
40228 -250 250 250 242 242 242 54 54 54 2 2 6
40229 - 2 2 6 2 2 6 2 2 6 2 2 6
40230 - 2 2 6 2 2 6 2 2 6 38 38 38
40231 - 86 86 86 50 50 50 22 22 22 6 6 6
40232 - 0 0 0 0 0 0 0 0 0 0 0 0
40233 - 0 0 0 0 0 0 0 0 0 0 0 0
40234 - 0 0 0 0 0 0 0 0 0 0 0 0
40235 - 0 0 0 0 0 0 0 0 0 0 0 0
40236 - 0 0 0 0 0 0 0 0 0 0 0 0
40237 - 0 0 0 0 0 0 0 0 0 0 0 0
40238 - 0 0 0 0 0 0 0 0 0 0 0 0
40239 - 0 0 0 0 0 0 0 0 0 0 0 0
40240 - 0 0 0 0 0 0 0 0 0 0 0 0
40241 - 6 6 6 14 14 14 38 38 38 82 82 82
40242 - 34 34 34 2 2 6 2 2 6 2 2 6
40243 - 42 42 42 195 195 195 246 246 246 253 253 253
40244 -253 253 253 253 253 253 253 253 253 250 250 250
40245 -242 242 242 242 242 242 250 250 250 253 253 253
40246 -253 253 253 253 253 253 253 253 253 253 253 253
40247 -253 253 253 250 250 250 246 246 246 238 238 238
40248 -226 226 226 231 231 231 101 101 101 6 6 6
40249 - 2 2 6 2 2 6 2 2 6 2 2 6
40250 - 2 2 6 2 2 6 2 2 6 2 2 6
40251 - 38 38 38 82 82 82 42 42 42 14 14 14
40252 - 6 6 6 0 0 0 0 0 0 0 0 0
40253 - 0 0 0 0 0 0 0 0 0 0 0 0
40254 - 0 0 0 0 0 0 0 0 0 0 0 0
40255 - 0 0 0 0 0 0 0 0 0 0 0 0
40256 - 0 0 0 0 0 0 0 0 0 0 0 0
40257 - 0 0 0 0 0 0 0 0 0 0 0 0
40258 - 0 0 0 0 0 0 0 0 0 0 0 0
40259 - 0 0 0 0 0 0 0 0 0 0 0 0
40260 - 0 0 0 0 0 0 0 0 0 0 0 0
40261 - 10 10 10 26 26 26 62 62 62 66 66 66
40262 - 2 2 6 2 2 6 2 2 6 6 6 6
40263 - 70 70 70 170 170 170 206 206 206 234 234 234
40264 -246 246 246 250 250 250 250 250 250 238 238 238
40265 -226 226 226 231 231 231 238 238 238 250 250 250
40266 -250 250 250 250 250 250 246 246 246 231 231 231
40267 -214 214 214 206 206 206 202 202 202 202 202 202
40268 -198 198 198 202 202 202 182 182 182 18 18 18
40269 - 2 2 6 2 2 6 2 2 6 2 2 6
40270 - 2 2 6 2 2 6 2 2 6 2 2 6
40271 - 2 2 6 62 62 62 66 66 66 30 30 30
40272 - 10 10 10 0 0 0 0 0 0 0 0 0
40273 - 0 0 0 0 0 0 0 0 0 0 0 0
40274 - 0 0 0 0 0 0 0 0 0 0 0 0
40275 - 0 0 0 0 0 0 0 0 0 0 0 0
40276 - 0 0 0 0 0 0 0 0 0 0 0 0
40277 - 0 0 0 0 0 0 0 0 0 0 0 0
40278 - 0 0 0 0 0 0 0 0 0 0 0 0
40279 - 0 0 0 0 0 0 0 0 0 0 0 0
40280 - 0 0 0 0 0 0 0 0 0 0 0 0
40281 - 14 14 14 42 42 42 82 82 82 18 18 18
40282 - 2 2 6 2 2 6 2 2 6 10 10 10
40283 - 94 94 94 182 182 182 218 218 218 242 242 242
40284 -250 250 250 253 253 253 253 253 253 250 250 250
40285 -234 234 234 253 253 253 253 253 253 253 253 253
40286 -253 253 253 253 253 253 253 253 253 246 246 246
40287 -238 238 238 226 226 226 210 210 210 202 202 202
40288 -195 195 195 195 195 195 210 210 210 158 158 158
40289 - 6 6 6 14 14 14 50 50 50 14 14 14
40290 - 2 2 6 2 2 6 2 2 6 2 2 6
40291 - 2 2 6 6 6 6 86 86 86 46 46 46
40292 - 18 18 18 6 6 6 0 0 0 0 0 0
40293 - 0 0 0 0 0 0 0 0 0 0 0 0
40294 - 0 0 0 0 0 0 0 0 0 0 0 0
40295 - 0 0 0 0 0 0 0 0 0 0 0 0
40296 - 0 0 0 0 0 0 0 0 0 0 0 0
40297 - 0 0 0 0 0 0 0 0 0 0 0 0
40298 - 0 0 0 0 0 0 0 0 0 0 0 0
40299 - 0 0 0 0 0 0 0 0 0 0 0 0
40300 - 0 0 0 0 0 0 0 0 0 6 6 6
40301 - 22 22 22 54 54 54 70 70 70 2 2 6
40302 - 2 2 6 10 10 10 2 2 6 22 22 22
40303 -166 166 166 231 231 231 250 250 250 253 253 253
40304 -253 253 253 253 253 253 253 253 253 250 250 250
40305 -242 242 242 253 253 253 253 253 253 253 253 253
40306 -253 253 253 253 253 253 253 253 253 253 253 253
40307 -253 253 253 253 253 253 253 253 253 246 246 246
40308 -231 231 231 206 206 206 198 198 198 226 226 226
40309 - 94 94 94 2 2 6 6 6 6 38 38 38
40310 - 30 30 30 2 2 6 2 2 6 2 2 6
40311 - 2 2 6 2 2 6 62 62 62 66 66 66
40312 - 26 26 26 10 10 10 0 0 0 0 0 0
40313 - 0 0 0 0 0 0 0 0 0 0 0 0
40314 - 0 0 0 0 0 0 0 0 0 0 0 0
40315 - 0 0 0 0 0 0 0 0 0 0 0 0
40316 - 0 0 0 0 0 0 0 0 0 0 0 0
40317 - 0 0 0 0 0 0 0 0 0 0 0 0
40318 - 0 0 0 0 0 0 0 0 0 0 0 0
40319 - 0 0 0 0 0 0 0 0 0 0 0 0
40320 - 0 0 0 0 0 0 0 0 0 10 10 10
40321 - 30 30 30 74 74 74 50 50 50 2 2 6
40322 - 26 26 26 26 26 26 2 2 6 106 106 106
40323 -238 238 238 253 253 253 253 253 253 253 253 253
40324 -253 253 253 253 253 253 253 253 253 253 253 253
40325 -253 253 253 253 253 253 253 253 253 253 253 253
40326 -253 253 253 253 253 253 253 253 253 253 253 253
40327 -253 253 253 253 253 253 253 253 253 253 253 253
40328 -253 253 253 246 246 246 218 218 218 202 202 202
40329 -210 210 210 14 14 14 2 2 6 2 2 6
40330 - 30 30 30 22 22 22 2 2 6 2 2 6
40331 - 2 2 6 2 2 6 18 18 18 86 86 86
40332 - 42 42 42 14 14 14 0 0 0 0 0 0
40333 - 0 0 0 0 0 0 0 0 0 0 0 0
40334 - 0 0 0 0 0 0 0 0 0 0 0 0
40335 - 0 0 0 0 0 0 0 0 0 0 0 0
40336 - 0 0 0 0 0 0 0 0 0 0 0 0
40337 - 0 0 0 0 0 0 0 0 0 0 0 0
40338 - 0 0 0 0 0 0 0 0 0 0 0 0
40339 - 0 0 0 0 0 0 0 0 0 0 0 0
40340 - 0 0 0 0 0 0 0 0 0 14 14 14
40341 - 42 42 42 90 90 90 22 22 22 2 2 6
40342 - 42 42 42 2 2 6 18 18 18 218 218 218
40343 -253 253 253 253 253 253 253 253 253 253 253 253
40344 -253 253 253 253 253 253 253 253 253 253 253 253
40345 -253 253 253 253 253 253 253 253 253 253 253 253
40346 -253 253 253 253 253 253 253 253 253 253 253 253
40347 -253 253 253 253 253 253 253 253 253 253 253 253
40348 -253 253 253 253 253 253 250 250 250 221 221 221
40349 -218 218 218 101 101 101 2 2 6 14 14 14
40350 - 18 18 18 38 38 38 10 10 10 2 2 6
40351 - 2 2 6 2 2 6 2 2 6 78 78 78
40352 - 58 58 58 22 22 22 6 6 6 0 0 0
40353 - 0 0 0 0 0 0 0 0 0 0 0 0
40354 - 0 0 0 0 0 0 0 0 0 0 0 0
40355 - 0 0 0 0 0 0 0 0 0 0 0 0
40356 - 0 0 0 0 0 0 0 0 0 0 0 0
40357 - 0 0 0 0 0 0 0 0 0 0 0 0
40358 - 0 0 0 0 0 0 0 0 0 0 0 0
40359 - 0 0 0 0 0 0 0 0 0 0 0 0
40360 - 0 0 0 0 0 0 6 6 6 18 18 18
40361 - 54 54 54 82 82 82 2 2 6 26 26 26
40362 - 22 22 22 2 2 6 123 123 123 253 253 253
40363 -253 253 253 253 253 253 253 253 253 253 253 253
40364 -253 253 253 253 253 253 253 253 253 253 253 253
40365 -253 253 253 253 253 253 253 253 253 253 253 253
40366 -253 253 253 253 253 253 253 253 253 253 253 253
40367 -253 253 253 253 253 253 253 253 253 253 253 253
40368 -253 253 253 253 253 253 253 253 253 250 250 250
40369 -238 238 238 198 198 198 6 6 6 38 38 38
40370 - 58 58 58 26 26 26 38 38 38 2 2 6
40371 - 2 2 6 2 2 6 2 2 6 46 46 46
40372 - 78 78 78 30 30 30 10 10 10 0 0 0
40373 - 0 0 0 0 0 0 0 0 0 0 0 0
40374 - 0 0 0 0 0 0 0 0 0 0 0 0
40375 - 0 0 0 0 0 0 0 0 0 0 0 0
40376 - 0 0 0 0 0 0 0 0 0 0 0 0
40377 - 0 0 0 0 0 0 0 0 0 0 0 0
40378 - 0 0 0 0 0 0 0 0 0 0 0 0
40379 - 0 0 0 0 0 0 0 0 0 0 0 0
40380 - 0 0 0 0 0 0 10 10 10 30 30 30
40381 - 74 74 74 58 58 58 2 2 6 42 42 42
40382 - 2 2 6 22 22 22 231 231 231 253 253 253
40383 -253 253 253 253 253 253 253 253 253 253 253 253
40384 -253 253 253 253 253 253 253 253 253 250 250 250
40385 -253 253 253 253 253 253 253 253 253 253 253 253
40386 -253 253 253 253 253 253 253 253 253 253 253 253
40387 -253 253 253 253 253 253 253 253 253 253 253 253
40388 -253 253 253 253 253 253 253 253 253 253 253 253
40389 -253 253 253 246 246 246 46 46 46 38 38 38
40390 - 42 42 42 14 14 14 38 38 38 14 14 14
40391 - 2 2 6 2 2 6 2 2 6 6 6 6
40392 - 86 86 86 46 46 46 14 14 14 0 0 0
40393 - 0 0 0 0 0 0 0 0 0 0 0 0
40394 - 0 0 0 0 0 0 0 0 0 0 0 0
40395 - 0 0 0 0 0 0 0 0 0 0 0 0
40396 - 0 0 0 0 0 0 0 0 0 0 0 0
40397 - 0 0 0 0 0 0 0 0 0 0 0 0
40398 - 0 0 0 0 0 0 0 0 0 0 0 0
40399 - 0 0 0 0 0 0 0 0 0 0 0 0
40400 - 0 0 0 6 6 6 14 14 14 42 42 42
40401 - 90 90 90 18 18 18 18 18 18 26 26 26
40402 - 2 2 6 116 116 116 253 253 253 253 253 253
40403 -253 253 253 253 253 253 253 253 253 253 253 253
40404 -253 253 253 253 253 253 250 250 250 238 238 238
40405 -253 253 253 253 253 253 253 253 253 253 253 253
40406 -253 253 253 253 253 253 253 253 253 253 253 253
40407 -253 253 253 253 253 253 253 253 253 253 253 253
40408 -253 253 253 253 253 253 253 253 253 253 253 253
40409 -253 253 253 253 253 253 94 94 94 6 6 6
40410 - 2 2 6 2 2 6 10 10 10 34 34 34
40411 - 2 2 6 2 2 6 2 2 6 2 2 6
40412 - 74 74 74 58 58 58 22 22 22 6 6 6
40413 - 0 0 0 0 0 0 0 0 0 0 0 0
40414 - 0 0 0 0 0 0 0 0 0 0 0 0
40415 - 0 0 0 0 0 0 0 0 0 0 0 0
40416 - 0 0 0 0 0 0 0 0 0 0 0 0
40417 - 0 0 0 0 0 0 0 0 0 0 0 0
40418 - 0 0 0 0 0 0 0 0 0 0 0 0
40419 - 0 0 0 0 0 0 0 0 0 0 0 0
40420 - 0 0 0 10 10 10 26 26 26 66 66 66
40421 - 82 82 82 2 2 6 38 38 38 6 6 6
40422 - 14 14 14 210 210 210 253 253 253 253 253 253
40423 -253 253 253 253 253 253 253 253 253 253 253 253
40424 -253 253 253 253 253 253 246 246 246 242 242 242
40425 -253 253 253 253 253 253 253 253 253 253 253 253
40426 -253 253 253 253 253 253 253 253 253 253 253 253
40427 -253 253 253 253 253 253 253 253 253 253 253 253
40428 -253 253 253 253 253 253 253 253 253 253 253 253
40429 -253 253 253 253 253 253 144 144 144 2 2 6
40430 - 2 2 6 2 2 6 2 2 6 46 46 46
40431 - 2 2 6 2 2 6 2 2 6 2 2 6
40432 - 42 42 42 74 74 74 30 30 30 10 10 10
40433 - 0 0 0 0 0 0 0 0 0 0 0 0
40434 - 0 0 0 0 0 0 0 0 0 0 0 0
40435 - 0 0 0 0 0 0 0 0 0 0 0 0
40436 - 0 0 0 0 0 0 0 0 0 0 0 0
40437 - 0 0 0 0 0 0 0 0 0 0 0 0
40438 - 0 0 0 0 0 0 0 0 0 0 0 0
40439 - 0 0 0 0 0 0 0 0 0 0 0 0
40440 - 6 6 6 14 14 14 42 42 42 90 90 90
40441 - 26 26 26 6 6 6 42 42 42 2 2 6
40442 - 74 74 74 250 250 250 253 253 253 253 253 253
40443 -253 253 253 253 253 253 253 253 253 253 253 253
40444 -253 253 253 253 253 253 242 242 242 242 242 242
40445 -253 253 253 253 253 253 253 253 253 253 253 253
40446 -253 253 253 253 253 253 253 253 253 253 253 253
40447 -253 253 253 253 253 253 253 253 253 253 253 253
40448 -253 253 253 253 253 253 253 253 253 253 253 253
40449 -253 253 253 253 253 253 182 182 182 2 2 6
40450 - 2 2 6 2 2 6 2 2 6 46 46 46
40451 - 2 2 6 2 2 6 2 2 6 2 2 6
40452 - 10 10 10 86 86 86 38 38 38 10 10 10
40453 - 0 0 0 0 0 0 0 0 0 0 0 0
40454 - 0 0 0 0 0 0 0 0 0 0 0 0
40455 - 0 0 0 0 0 0 0 0 0 0 0 0
40456 - 0 0 0 0 0 0 0 0 0 0 0 0
40457 - 0 0 0 0 0 0 0 0 0 0 0 0
40458 - 0 0 0 0 0 0 0 0 0 0 0 0
40459 - 0 0 0 0 0 0 0 0 0 0 0 0
40460 - 10 10 10 26 26 26 66 66 66 82 82 82
40461 - 2 2 6 22 22 22 18 18 18 2 2 6
40462 -149 149 149 253 253 253 253 253 253 253 253 253
40463 -253 253 253 253 253 253 253 253 253 253 253 253
40464 -253 253 253 253 253 253 234 234 234 242 242 242
40465 -253 253 253 253 253 253 253 253 253 253 253 253
40466 -253 253 253 253 253 253 253 253 253 253 253 253
40467 -253 253 253 253 253 253 253 253 253 253 253 253
40468 -253 253 253 253 253 253 253 253 253 253 253 253
40469 -253 253 253 253 253 253 206 206 206 2 2 6
40470 - 2 2 6 2 2 6 2 2 6 38 38 38
40471 - 2 2 6 2 2 6 2 2 6 2 2 6
40472 - 6 6 6 86 86 86 46 46 46 14 14 14
40473 - 0 0 0 0 0 0 0 0 0 0 0 0
40474 - 0 0 0 0 0 0 0 0 0 0 0 0
40475 - 0 0 0 0 0 0 0 0 0 0 0 0
40476 - 0 0 0 0 0 0 0 0 0 0 0 0
40477 - 0 0 0 0 0 0 0 0 0 0 0 0
40478 - 0 0 0 0 0 0 0 0 0 0 0 0
40479 - 0 0 0 0 0 0 0 0 0 6 6 6
40480 - 18 18 18 46 46 46 86 86 86 18 18 18
40481 - 2 2 6 34 34 34 10 10 10 6 6 6
40482 -210 210 210 253 253 253 253 253 253 253 253 253
40483 -253 253 253 253 253 253 253 253 253 253 253 253
40484 -253 253 253 253 253 253 234 234 234 242 242 242
40485 -253 253 253 253 253 253 253 253 253 253 253 253
40486 -253 253 253 253 253 253 253 253 253 253 253 253
40487 -253 253 253 253 253 253 253 253 253 253 253 253
40488 -253 253 253 253 253 253 253 253 253 253 253 253
40489 -253 253 253 253 253 253 221 221 221 6 6 6
40490 - 2 2 6 2 2 6 6 6 6 30 30 30
40491 - 2 2 6 2 2 6 2 2 6 2 2 6
40492 - 2 2 6 82 82 82 54 54 54 18 18 18
40493 - 6 6 6 0 0 0 0 0 0 0 0 0
40494 - 0 0 0 0 0 0 0 0 0 0 0 0
40495 - 0 0 0 0 0 0 0 0 0 0 0 0
40496 - 0 0 0 0 0 0 0 0 0 0 0 0
40497 - 0 0 0 0 0 0 0 0 0 0 0 0
40498 - 0 0 0 0 0 0 0 0 0 0 0 0
40499 - 0 0 0 0 0 0 0 0 0 10 10 10
40500 - 26 26 26 66 66 66 62 62 62 2 2 6
40501 - 2 2 6 38 38 38 10 10 10 26 26 26
40502 -238 238 238 253 253 253 253 253 253 253 253 253
40503 -253 253 253 253 253 253 253 253 253 253 253 253
40504 -253 253 253 253 253 253 231 231 231 238 238 238
40505 -253 253 253 253 253 253 253 253 253 253 253 253
40506 -253 253 253 253 253 253 253 253 253 253 253 253
40507 -253 253 253 253 253 253 253 253 253 253 253 253
40508 -253 253 253 253 253 253 253 253 253 253 253 253
40509 -253 253 253 253 253 253 231 231 231 6 6 6
40510 - 2 2 6 2 2 6 10 10 10 30 30 30
40511 - 2 2 6 2 2 6 2 2 6 2 2 6
40512 - 2 2 6 66 66 66 58 58 58 22 22 22
40513 - 6 6 6 0 0 0 0 0 0 0 0 0
40514 - 0 0 0 0 0 0 0 0 0 0 0 0
40515 - 0 0 0 0 0 0 0 0 0 0 0 0
40516 - 0 0 0 0 0 0 0 0 0 0 0 0
40517 - 0 0 0 0 0 0 0 0 0 0 0 0
40518 - 0 0 0 0 0 0 0 0 0 0 0 0
40519 - 0 0 0 0 0 0 0 0 0 10 10 10
40520 - 38 38 38 78 78 78 6 6 6 2 2 6
40521 - 2 2 6 46 46 46 14 14 14 42 42 42
40522 -246 246 246 253 253 253 253 253 253 253 253 253
40523 -253 253 253 253 253 253 253 253 253 253 253 253
40524 -253 253 253 253 253 253 231 231 231 242 242 242
40525 -253 253 253 253 253 253 253 253 253 253 253 253
40526 -253 253 253 253 253 253 253 253 253 253 253 253
40527 -253 253 253 253 253 253 253 253 253 253 253 253
40528 -253 253 253 253 253 253 253 253 253 253 253 253
40529 -253 253 253 253 253 253 234 234 234 10 10 10
40530 - 2 2 6 2 2 6 22 22 22 14 14 14
40531 - 2 2 6 2 2 6 2 2 6 2 2 6
40532 - 2 2 6 66 66 66 62 62 62 22 22 22
40533 - 6 6 6 0 0 0 0 0 0 0 0 0
40534 - 0 0 0 0 0 0 0 0 0 0 0 0
40535 - 0 0 0 0 0 0 0 0 0 0 0 0
40536 - 0 0 0 0 0 0 0 0 0 0 0 0
40537 - 0 0 0 0 0 0 0 0 0 0 0 0
40538 - 0 0 0 0 0 0 0 0 0 0 0 0
40539 - 0 0 0 0 0 0 6 6 6 18 18 18
40540 - 50 50 50 74 74 74 2 2 6 2 2 6
40541 - 14 14 14 70 70 70 34 34 34 62 62 62
40542 -250 250 250 253 253 253 253 253 253 253 253 253
40543 -253 253 253 253 253 253 253 253 253 253 253 253
40544 -253 253 253 253 253 253 231 231 231 246 246 246
40545 -253 253 253 253 253 253 253 253 253 253 253 253
40546 -253 253 253 253 253 253 253 253 253 253 253 253
40547 -253 253 253 253 253 253 253 253 253 253 253 253
40548 -253 253 253 253 253 253 253 253 253 253 253 253
40549 -253 253 253 253 253 253 234 234 234 14 14 14
40550 - 2 2 6 2 2 6 30 30 30 2 2 6
40551 - 2 2 6 2 2 6 2 2 6 2 2 6
40552 - 2 2 6 66 66 66 62 62 62 22 22 22
40553 - 6 6 6 0 0 0 0 0 0 0 0 0
40554 - 0 0 0 0 0 0 0 0 0 0 0 0
40555 - 0 0 0 0 0 0 0 0 0 0 0 0
40556 - 0 0 0 0 0 0 0 0 0 0 0 0
40557 - 0 0 0 0 0 0 0 0 0 0 0 0
40558 - 0 0 0 0 0 0 0 0 0 0 0 0
40559 - 0 0 0 0 0 0 6 6 6 18 18 18
40560 - 54 54 54 62 62 62 2 2 6 2 2 6
40561 - 2 2 6 30 30 30 46 46 46 70 70 70
40562 -250 250 250 253 253 253 253 253 253 253 253 253
40563 -253 253 253 253 253 253 253 253 253 253 253 253
40564 -253 253 253 253 253 253 231 231 231 246 246 246
40565 -253 253 253 253 253 253 253 253 253 253 253 253
40566 -253 253 253 253 253 253 253 253 253 253 253 253
40567 -253 253 253 253 253 253 253 253 253 253 253 253
40568 -253 253 253 253 253 253 253 253 253 253 253 253
40569 -253 253 253 253 253 253 226 226 226 10 10 10
40570 - 2 2 6 6 6 6 30 30 30 2 2 6
40571 - 2 2 6 2 2 6 2 2 6 2 2 6
40572 - 2 2 6 66 66 66 58 58 58 22 22 22
40573 - 6 6 6 0 0 0 0 0 0 0 0 0
40574 - 0 0 0 0 0 0 0 0 0 0 0 0
40575 - 0 0 0 0 0 0 0 0 0 0 0 0
40576 - 0 0 0 0 0 0 0 0 0 0 0 0
40577 - 0 0 0 0 0 0 0 0 0 0 0 0
40578 - 0 0 0 0 0 0 0 0 0 0 0 0
40579 - 0 0 0 0 0 0 6 6 6 22 22 22
40580 - 58 58 58 62 62 62 2 2 6 2 2 6
40581 - 2 2 6 2 2 6 30 30 30 78 78 78
40582 -250 250 250 253 253 253 253 253 253 253 253 253
40583 -253 253 253 253 253 253 253 253 253 253 253 253
40584 -253 253 253 253 253 253 231 231 231 246 246 246
40585 -253 253 253 253 253 253 253 253 253 253 253 253
40586 -253 253 253 253 253 253 253 253 253 253 253 253
40587 -253 253 253 253 253 253 253 253 253 253 253 253
40588 -253 253 253 253 253 253 253 253 253 253 253 253
40589 -253 253 253 253 253 253 206 206 206 2 2 6
40590 - 22 22 22 34 34 34 18 14 6 22 22 22
40591 - 26 26 26 18 18 18 6 6 6 2 2 6
40592 - 2 2 6 82 82 82 54 54 54 18 18 18
40593 - 6 6 6 0 0 0 0 0 0 0 0 0
40594 - 0 0 0 0 0 0 0 0 0 0 0 0
40595 - 0 0 0 0 0 0 0 0 0 0 0 0
40596 - 0 0 0 0 0 0 0 0 0 0 0 0
40597 - 0 0 0 0 0 0 0 0 0 0 0 0
40598 - 0 0 0 0 0 0 0 0 0 0 0 0
40599 - 0 0 0 0 0 0 6 6 6 26 26 26
40600 - 62 62 62 106 106 106 74 54 14 185 133 11
40601 -210 162 10 121 92 8 6 6 6 62 62 62
40602 -238 238 238 253 253 253 253 253 253 253 253 253
40603 -253 253 253 253 253 253 253 253 253 253 253 253
40604 -253 253 253 253 253 253 231 231 231 246 246 246
40605 -253 253 253 253 253 253 253 253 253 253 253 253
40606 -253 253 253 253 253 253 253 253 253 253 253 253
40607 -253 253 253 253 253 253 253 253 253 253 253 253
40608 -253 253 253 253 253 253 253 253 253 253 253 253
40609 -253 253 253 253 253 253 158 158 158 18 18 18
40610 - 14 14 14 2 2 6 2 2 6 2 2 6
40611 - 6 6 6 18 18 18 66 66 66 38 38 38
40612 - 6 6 6 94 94 94 50 50 50 18 18 18
40613 - 6 6 6 0 0 0 0 0 0 0 0 0
40614 - 0 0 0 0 0 0 0 0 0 0 0 0
40615 - 0 0 0 0 0 0 0 0 0 0 0 0
40616 - 0 0 0 0 0 0 0 0 0 0 0 0
40617 - 0 0 0 0 0 0 0 0 0 0 0 0
40618 - 0 0 0 0 0 0 0 0 0 6 6 6
40619 - 10 10 10 10 10 10 18 18 18 38 38 38
40620 - 78 78 78 142 134 106 216 158 10 242 186 14
40621 -246 190 14 246 190 14 156 118 10 10 10 10
40622 - 90 90 90 238 238 238 253 253 253 253 253 253
40623 -253 253 253 253 253 253 253 253 253 253 253 253
40624 -253 253 253 253 253 253 231 231 231 250 250 250
40625 -253 253 253 253 253 253 253 253 253 253 253 253
40626 -253 253 253 253 253 253 253 253 253 253 253 253
40627 -253 253 253 253 253 253 253 253 253 253 253 253
40628 -253 253 253 253 253 253 253 253 253 246 230 190
40629 -238 204 91 238 204 91 181 142 44 37 26 9
40630 - 2 2 6 2 2 6 2 2 6 2 2 6
40631 - 2 2 6 2 2 6 38 38 38 46 46 46
40632 - 26 26 26 106 106 106 54 54 54 18 18 18
40633 - 6 6 6 0 0 0 0 0 0 0 0 0
40634 - 0 0 0 0 0 0 0 0 0 0 0 0
40635 - 0 0 0 0 0 0 0 0 0 0 0 0
40636 - 0 0 0 0 0 0 0 0 0 0 0 0
40637 - 0 0 0 0 0 0 0 0 0 0 0 0
40638 - 0 0 0 6 6 6 14 14 14 22 22 22
40639 - 30 30 30 38 38 38 50 50 50 70 70 70
40640 -106 106 106 190 142 34 226 170 11 242 186 14
40641 -246 190 14 246 190 14 246 190 14 154 114 10
40642 - 6 6 6 74 74 74 226 226 226 253 253 253
40643 -253 253 253 253 253 253 253 253 253 253 253 253
40644 -253 253 253 253 253 253 231 231 231 250 250 250
40645 -253 253 253 253 253 253 253 253 253 253 253 253
40646 -253 253 253 253 253 253 253 253 253 253 253 253
40647 -253 253 253 253 253 253 253 253 253 253 253 253
40648 -253 253 253 253 253 253 253 253 253 228 184 62
40649 -241 196 14 241 208 19 232 195 16 38 30 10
40650 - 2 2 6 2 2 6 2 2 6 2 2 6
40651 - 2 2 6 6 6 6 30 30 30 26 26 26
40652 -203 166 17 154 142 90 66 66 66 26 26 26
40653 - 6 6 6 0 0 0 0 0 0 0 0 0
40654 - 0 0 0 0 0 0 0 0 0 0 0 0
40655 - 0 0 0 0 0 0 0 0 0 0 0 0
40656 - 0 0 0 0 0 0 0 0 0 0 0 0
40657 - 0 0 0 0 0 0 0 0 0 0 0 0
40658 - 6 6 6 18 18 18 38 38 38 58 58 58
40659 - 78 78 78 86 86 86 101 101 101 123 123 123
40660 -175 146 61 210 150 10 234 174 13 246 186 14
40661 -246 190 14 246 190 14 246 190 14 238 190 10
40662 -102 78 10 2 2 6 46 46 46 198 198 198
40663 -253 253 253 253 253 253 253 253 253 253 253 253
40664 -253 253 253 253 253 253 234 234 234 242 242 242
40665 -253 253 253 253 253 253 253 253 253 253 253 253
40666 -253 253 253 253 253 253 253 253 253 253 253 253
40667 -253 253 253 253 253 253 253 253 253 253 253 253
40668 -253 253 253 253 253 253 253 253 253 224 178 62
40669 -242 186 14 241 196 14 210 166 10 22 18 6
40670 - 2 2 6 2 2 6 2 2 6 2 2 6
40671 - 2 2 6 2 2 6 6 6 6 121 92 8
40672 -238 202 15 232 195 16 82 82 82 34 34 34
40673 - 10 10 10 0 0 0 0 0 0 0 0 0
40674 - 0 0 0 0 0 0 0 0 0 0 0 0
40675 - 0 0 0 0 0 0 0 0 0 0 0 0
40676 - 0 0 0 0 0 0 0 0 0 0 0 0
40677 - 0 0 0 0 0 0 0 0 0 0 0 0
40678 - 14 14 14 38 38 38 70 70 70 154 122 46
40679 -190 142 34 200 144 11 197 138 11 197 138 11
40680 -213 154 11 226 170 11 242 186 14 246 190 14
40681 -246 190 14 246 190 14 246 190 14 246 190 14
40682 -225 175 15 46 32 6 2 2 6 22 22 22
40683 -158 158 158 250 250 250 253 253 253 253 253 253
40684 -253 253 253 253 253 253 253 253 253 253 253 253
40685 -253 253 253 253 253 253 253 253 253 253 253 253
40686 -253 253 253 253 253 253 253 253 253 253 253 253
40687 -253 253 253 253 253 253 253 253 253 253 253 253
40688 -253 253 253 250 250 250 242 242 242 224 178 62
40689 -239 182 13 236 186 11 213 154 11 46 32 6
40690 - 2 2 6 2 2 6 2 2 6 2 2 6
40691 - 2 2 6 2 2 6 61 42 6 225 175 15
40692 -238 190 10 236 186 11 112 100 78 42 42 42
40693 - 14 14 14 0 0 0 0 0 0 0 0 0
40694 - 0 0 0 0 0 0 0 0 0 0 0 0
40695 - 0 0 0 0 0 0 0 0 0 0 0 0
40696 - 0 0 0 0 0 0 0 0 0 0 0 0
40697 - 0 0 0 0 0 0 0 0 0 6 6 6
40698 - 22 22 22 54 54 54 154 122 46 213 154 11
40699 -226 170 11 230 174 11 226 170 11 226 170 11
40700 -236 178 12 242 186 14 246 190 14 246 190 14
40701 -246 190 14 246 190 14 246 190 14 246 190 14
40702 -241 196 14 184 144 12 10 10 10 2 2 6
40703 - 6 6 6 116 116 116 242 242 242 253 253 253
40704 -253 253 253 253 253 253 253 253 253 253 253 253
40705 -253 253 253 253 253 253 253 253 253 253 253 253
40706 -253 253 253 253 253 253 253 253 253 253 253 253
40707 -253 253 253 253 253 253 253 253 253 253 253 253
40708 -253 253 253 231 231 231 198 198 198 214 170 54
40709 -236 178 12 236 178 12 210 150 10 137 92 6
40710 - 18 14 6 2 2 6 2 2 6 2 2 6
40711 - 6 6 6 70 47 6 200 144 11 236 178 12
40712 -239 182 13 239 182 13 124 112 88 58 58 58
40713 - 22 22 22 6 6 6 0 0 0 0 0 0
40714 - 0 0 0 0 0 0 0 0 0 0 0 0
40715 - 0 0 0 0 0 0 0 0 0 0 0 0
40716 - 0 0 0 0 0 0 0 0 0 0 0 0
40717 - 0 0 0 0 0 0 0 0 0 10 10 10
40718 - 30 30 30 70 70 70 180 133 36 226 170 11
40719 -239 182 13 242 186 14 242 186 14 246 186 14
40720 -246 190 14 246 190 14 246 190 14 246 190 14
40721 -246 190 14 246 190 14 246 190 14 246 190 14
40722 -246 190 14 232 195 16 98 70 6 2 2 6
40723 - 2 2 6 2 2 6 66 66 66 221 221 221
40724 -253 253 253 253 253 253 253 253 253 253 253 253
40725 -253 253 253 253 253 253 253 253 253 253 253 253
40726 -253 253 253 253 253 253 253 253 253 253 253 253
40727 -253 253 253 253 253 253 253 253 253 253 253 253
40728 -253 253 253 206 206 206 198 198 198 214 166 58
40729 -230 174 11 230 174 11 216 158 10 192 133 9
40730 -163 110 8 116 81 8 102 78 10 116 81 8
40731 -167 114 7 197 138 11 226 170 11 239 182 13
40732 -242 186 14 242 186 14 162 146 94 78 78 78
40733 - 34 34 34 14 14 14 6 6 6 0 0 0
40734 - 0 0 0 0 0 0 0 0 0 0 0 0
40735 - 0 0 0 0 0 0 0 0 0 0 0 0
40736 - 0 0 0 0 0 0 0 0 0 0 0 0
40737 - 0 0 0 0 0 0 0 0 0 6 6 6
40738 - 30 30 30 78 78 78 190 142 34 226 170 11
40739 -239 182 13 246 190 14 246 190 14 246 190 14
40740 -246 190 14 246 190 14 246 190 14 246 190 14
40741 -246 190 14 246 190 14 246 190 14 246 190 14
40742 -246 190 14 241 196 14 203 166 17 22 18 6
40743 - 2 2 6 2 2 6 2 2 6 38 38 38
40744 -218 218 218 253 253 253 253 253 253 253 253 253
40745 -253 253 253 253 253 253 253 253 253 253 253 253
40746 -253 253 253 253 253 253 253 253 253 253 253 253
40747 -253 253 253 253 253 253 253 253 253 253 253 253
40748 -250 250 250 206 206 206 198 198 198 202 162 69
40749 -226 170 11 236 178 12 224 166 10 210 150 10
40750 -200 144 11 197 138 11 192 133 9 197 138 11
40751 -210 150 10 226 170 11 242 186 14 246 190 14
40752 -246 190 14 246 186 14 225 175 15 124 112 88
40753 - 62 62 62 30 30 30 14 14 14 6 6 6
40754 - 0 0 0 0 0 0 0 0 0 0 0 0
40755 - 0 0 0 0 0 0 0 0 0 0 0 0
40756 - 0 0 0 0 0 0 0 0 0 0 0 0
40757 - 0 0 0 0 0 0 0 0 0 10 10 10
40758 - 30 30 30 78 78 78 174 135 50 224 166 10
40759 -239 182 13 246 190 14 246 190 14 246 190 14
40760 -246 190 14 246 190 14 246 190 14 246 190 14
40761 -246 190 14 246 190 14 246 190 14 246 190 14
40762 -246 190 14 246 190 14 241 196 14 139 102 15
40763 - 2 2 6 2 2 6 2 2 6 2 2 6
40764 - 78 78 78 250 250 250 253 253 253 253 253 253
40765 -253 253 253 253 253 253 253 253 253 253 253 253
40766 -253 253 253 253 253 253 253 253 253 253 253 253
40767 -253 253 253 253 253 253 253 253 253 253 253 253
40768 -250 250 250 214 214 214 198 198 198 190 150 46
40769 -219 162 10 236 178 12 234 174 13 224 166 10
40770 -216 158 10 213 154 11 213 154 11 216 158 10
40771 -226 170 11 239 182 13 246 190 14 246 190 14
40772 -246 190 14 246 190 14 242 186 14 206 162 42
40773 -101 101 101 58 58 58 30 30 30 14 14 14
40774 - 6 6 6 0 0 0 0 0 0 0 0 0
40775 - 0 0 0 0 0 0 0 0 0 0 0 0
40776 - 0 0 0 0 0 0 0 0 0 0 0 0
40777 - 0 0 0 0 0 0 0 0 0 10 10 10
40778 - 30 30 30 74 74 74 174 135 50 216 158 10
40779 -236 178 12 246 190 14 246 190 14 246 190 14
40780 -246 190 14 246 190 14 246 190 14 246 190 14
40781 -246 190 14 246 190 14 246 190 14 246 190 14
40782 -246 190 14 246 190 14 241 196 14 226 184 13
40783 - 61 42 6 2 2 6 2 2 6 2 2 6
40784 - 22 22 22 238 238 238 253 253 253 253 253 253
40785 -253 253 253 253 253 253 253 253 253 253 253 253
40786 -253 253 253 253 253 253 253 253 253 253 253 253
40787 -253 253 253 253 253 253 253 253 253 253 253 253
40788 -253 253 253 226 226 226 187 187 187 180 133 36
40789 -216 158 10 236 178 12 239 182 13 236 178 12
40790 -230 174 11 226 170 11 226 170 11 230 174 11
40791 -236 178 12 242 186 14 246 190 14 246 190 14
40792 -246 190 14 246 190 14 246 186 14 239 182 13
40793 -206 162 42 106 106 106 66 66 66 34 34 34
40794 - 14 14 14 6 6 6 0 0 0 0 0 0
40795 - 0 0 0 0 0 0 0 0 0 0 0 0
40796 - 0 0 0 0 0 0 0 0 0 0 0 0
40797 - 0 0 0 0 0 0 0 0 0 6 6 6
40798 - 26 26 26 70 70 70 163 133 67 213 154 11
40799 -236 178 12 246 190 14 246 190 14 246 190 14
40800 -246 190 14 246 190 14 246 190 14 246 190 14
40801 -246 190 14 246 190 14 246 190 14 246 190 14
40802 -246 190 14 246 190 14 246 190 14 241 196 14
40803 -190 146 13 18 14 6 2 2 6 2 2 6
40804 - 46 46 46 246 246 246 253 253 253 253 253 253
40805 -253 253 253 253 253 253 253 253 253 253 253 253
40806 -253 253 253 253 253 253 253 253 253 253 253 253
40807 -253 253 253 253 253 253 253 253 253 253 253 253
40808 -253 253 253 221 221 221 86 86 86 156 107 11
40809 -216 158 10 236 178 12 242 186 14 246 186 14
40810 -242 186 14 239 182 13 239 182 13 242 186 14
40811 -242 186 14 246 186 14 246 190 14 246 190 14
40812 -246 190 14 246 190 14 246 190 14 246 190 14
40813 -242 186 14 225 175 15 142 122 72 66 66 66
40814 - 30 30 30 10 10 10 0 0 0 0 0 0
40815 - 0 0 0 0 0 0 0 0 0 0 0 0
40816 - 0 0 0 0 0 0 0 0 0 0 0 0
40817 - 0 0 0 0 0 0 0 0 0 6 6 6
40818 - 26 26 26 70 70 70 163 133 67 210 150 10
40819 -236 178 12 246 190 14 246 190 14 246 190 14
40820 -246 190 14 246 190 14 246 190 14 246 190 14
40821 -246 190 14 246 190 14 246 190 14 246 190 14
40822 -246 190 14 246 190 14 246 190 14 246 190 14
40823 -232 195 16 121 92 8 34 34 34 106 106 106
40824 -221 221 221 253 253 253 253 253 253 253 253 253
40825 -253 253 253 253 253 253 253 253 253 253 253 253
40826 -253 253 253 253 253 253 253 253 253 253 253 253
40827 -253 253 253 253 253 253 253 253 253 253 253 253
40828 -242 242 242 82 82 82 18 14 6 163 110 8
40829 -216 158 10 236 178 12 242 186 14 246 190 14
40830 -246 190 14 246 190 14 246 190 14 246 190 14
40831 -246 190 14 246 190 14 246 190 14 246 190 14
40832 -246 190 14 246 190 14 246 190 14 246 190 14
40833 -246 190 14 246 190 14 242 186 14 163 133 67
40834 - 46 46 46 18 18 18 6 6 6 0 0 0
40835 - 0 0 0 0 0 0 0 0 0 0 0 0
40836 - 0 0 0 0 0 0 0 0 0 0 0 0
40837 - 0 0 0 0 0 0 0 0 0 10 10 10
40838 - 30 30 30 78 78 78 163 133 67 210 150 10
40839 -236 178 12 246 186 14 246 190 14 246 190 14
40840 -246 190 14 246 190 14 246 190 14 246 190 14
40841 -246 190 14 246 190 14 246 190 14 246 190 14
40842 -246 190 14 246 190 14 246 190 14 246 190 14
40843 -241 196 14 215 174 15 190 178 144 253 253 253
40844 -253 253 253 253 253 253 253 253 253 253 253 253
40845 -253 253 253 253 253 253 253 253 253 253 253 253
40846 -253 253 253 253 253 253 253 253 253 253 253 253
40847 -253 253 253 253 253 253 253 253 253 218 218 218
40848 - 58 58 58 2 2 6 22 18 6 167 114 7
40849 -216 158 10 236 178 12 246 186 14 246 190 14
40850 -246 190 14 246 190 14 246 190 14 246 190 14
40851 -246 190 14 246 190 14 246 190 14 246 190 14
40852 -246 190 14 246 190 14 246 190 14 246 190 14
40853 -246 190 14 246 186 14 242 186 14 190 150 46
40854 - 54 54 54 22 22 22 6 6 6 0 0 0
40855 - 0 0 0 0 0 0 0 0 0 0 0 0
40856 - 0 0 0 0 0 0 0 0 0 0 0 0
40857 - 0 0 0 0 0 0 0 0 0 14 14 14
40858 - 38 38 38 86 86 86 180 133 36 213 154 11
40859 -236 178 12 246 186 14 246 190 14 246 190 14
40860 -246 190 14 246 190 14 246 190 14 246 190 14
40861 -246 190 14 246 190 14 246 190 14 246 190 14
40862 -246 190 14 246 190 14 246 190 14 246 190 14
40863 -246 190 14 232 195 16 190 146 13 214 214 214
40864 -253 253 253 253 253 253 253 253 253 253 253 253
40865 -253 253 253 253 253 253 253 253 253 253 253 253
40866 -253 253 253 253 253 253 253 253 253 253 253 253
40867 -253 253 253 250 250 250 170 170 170 26 26 26
40868 - 2 2 6 2 2 6 37 26 9 163 110 8
40869 -219 162 10 239 182 13 246 186 14 246 190 14
40870 -246 190 14 246 190 14 246 190 14 246 190 14
40871 -246 190 14 246 190 14 246 190 14 246 190 14
40872 -246 190 14 246 190 14 246 190 14 246 190 14
40873 -246 186 14 236 178 12 224 166 10 142 122 72
40874 - 46 46 46 18 18 18 6 6 6 0 0 0
40875 - 0 0 0 0 0 0 0 0 0 0 0 0
40876 - 0 0 0 0 0 0 0 0 0 0 0 0
40877 - 0 0 0 0 0 0 6 6 6 18 18 18
40878 - 50 50 50 109 106 95 192 133 9 224 166 10
40879 -242 186 14 246 190 14 246 190 14 246 190 14
40880 -246 190 14 246 190 14 246 190 14 246 190 14
40881 -246 190 14 246 190 14 246 190 14 246 190 14
40882 -246 190 14 246 190 14 246 190 14 246 190 14
40883 -242 186 14 226 184 13 210 162 10 142 110 46
40884 -226 226 226 253 253 253 253 253 253 253 253 253
40885 -253 253 253 253 253 253 253 253 253 253 253 253
40886 -253 253 253 253 253 253 253 253 253 253 253 253
40887 -198 198 198 66 66 66 2 2 6 2 2 6
40888 - 2 2 6 2 2 6 50 34 6 156 107 11
40889 -219 162 10 239 182 13 246 186 14 246 190 14
40890 -246 190 14 246 190 14 246 190 14 246 190 14
40891 -246 190 14 246 190 14 246 190 14 246 190 14
40892 -246 190 14 246 190 14 246 190 14 242 186 14
40893 -234 174 13 213 154 11 154 122 46 66 66 66
40894 - 30 30 30 10 10 10 0 0 0 0 0 0
40895 - 0 0 0 0 0 0 0 0 0 0 0 0
40896 - 0 0 0 0 0 0 0 0 0 0 0 0
40897 - 0 0 0 0 0 0 6 6 6 22 22 22
40898 - 58 58 58 154 121 60 206 145 10 234 174 13
40899 -242 186 14 246 186 14 246 190 14 246 190 14
40900 -246 190 14 246 190 14 246 190 14 246 190 14
40901 -246 190 14 246 190 14 246 190 14 246 190 14
40902 -246 190 14 246 190 14 246 190 14 246 190 14
40903 -246 186 14 236 178 12 210 162 10 163 110 8
40904 - 61 42 6 138 138 138 218 218 218 250 250 250
40905 -253 253 253 253 253 253 253 253 253 250 250 250
40906 -242 242 242 210 210 210 144 144 144 66 66 66
40907 - 6 6 6 2 2 6 2 2 6 2 2 6
40908 - 2 2 6 2 2 6 61 42 6 163 110 8
40909 -216 158 10 236 178 12 246 190 14 246 190 14
40910 -246 190 14 246 190 14 246 190 14 246 190 14
40911 -246 190 14 246 190 14 246 190 14 246 190 14
40912 -246 190 14 239 182 13 230 174 11 216 158 10
40913 -190 142 34 124 112 88 70 70 70 38 38 38
40914 - 18 18 18 6 6 6 0 0 0 0 0 0
40915 - 0 0 0 0 0 0 0 0 0 0 0 0
40916 - 0 0 0 0 0 0 0 0 0 0 0 0
40917 - 0 0 0 0 0 0 6 6 6 22 22 22
40918 - 62 62 62 168 124 44 206 145 10 224 166 10
40919 -236 178 12 239 182 13 242 186 14 242 186 14
40920 -246 186 14 246 190 14 246 190 14 246 190 14
40921 -246 190 14 246 190 14 246 190 14 246 190 14
40922 -246 190 14 246 190 14 246 190 14 246 190 14
40923 -246 190 14 236 178 12 216 158 10 175 118 6
40924 - 80 54 7 2 2 6 6 6 6 30 30 30
40925 - 54 54 54 62 62 62 50 50 50 38 38 38
40926 - 14 14 14 2 2 6 2 2 6 2 2 6
40927 - 2 2 6 2 2 6 2 2 6 2 2 6
40928 - 2 2 6 6 6 6 80 54 7 167 114 7
40929 -213 154 11 236 178 12 246 190 14 246 190 14
40930 -246 190 14 246 190 14 246 190 14 246 190 14
40931 -246 190 14 242 186 14 239 182 13 239 182 13
40932 -230 174 11 210 150 10 174 135 50 124 112 88
40933 - 82 82 82 54 54 54 34 34 34 18 18 18
40934 - 6 6 6 0 0 0 0 0 0 0 0 0
40935 - 0 0 0 0 0 0 0 0 0 0 0 0
40936 - 0 0 0 0 0 0 0 0 0 0 0 0
40937 - 0 0 0 0 0 0 6 6 6 18 18 18
40938 - 50 50 50 158 118 36 192 133 9 200 144 11
40939 -216 158 10 219 162 10 224 166 10 226 170 11
40940 -230 174 11 236 178 12 239 182 13 239 182 13
40941 -242 186 14 246 186 14 246 190 14 246 190 14
40942 -246 190 14 246 190 14 246 190 14 246 190 14
40943 -246 186 14 230 174 11 210 150 10 163 110 8
40944 -104 69 6 10 10 10 2 2 6 2 2 6
40945 - 2 2 6 2 2 6 2 2 6 2 2 6
40946 - 2 2 6 2 2 6 2 2 6 2 2 6
40947 - 2 2 6 2 2 6 2 2 6 2 2 6
40948 - 2 2 6 6 6 6 91 60 6 167 114 7
40949 -206 145 10 230 174 11 242 186 14 246 190 14
40950 -246 190 14 246 190 14 246 186 14 242 186 14
40951 -239 182 13 230 174 11 224 166 10 213 154 11
40952 -180 133 36 124 112 88 86 86 86 58 58 58
40953 - 38 38 38 22 22 22 10 10 10 6 6 6
40954 - 0 0 0 0 0 0 0 0 0 0 0 0
40955 - 0 0 0 0 0 0 0 0 0 0 0 0
40956 - 0 0 0 0 0 0 0 0 0 0 0 0
40957 - 0 0 0 0 0 0 0 0 0 14 14 14
40958 - 34 34 34 70 70 70 138 110 50 158 118 36
40959 -167 114 7 180 123 7 192 133 9 197 138 11
40960 -200 144 11 206 145 10 213 154 11 219 162 10
40961 -224 166 10 230 174 11 239 182 13 242 186 14
40962 -246 186 14 246 186 14 246 186 14 246 186 14
40963 -239 182 13 216 158 10 185 133 11 152 99 6
40964 -104 69 6 18 14 6 2 2 6 2 2 6
40965 - 2 2 6 2 2 6 2 2 6 2 2 6
40966 - 2 2 6 2 2 6 2 2 6 2 2 6
40967 - 2 2 6 2 2 6 2 2 6 2 2 6
40968 - 2 2 6 6 6 6 80 54 7 152 99 6
40969 -192 133 9 219 162 10 236 178 12 239 182 13
40970 -246 186 14 242 186 14 239 182 13 236 178 12
40971 -224 166 10 206 145 10 192 133 9 154 121 60
40972 - 94 94 94 62 62 62 42 42 42 22 22 22
40973 - 14 14 14 6 6 6 0 0 0 0 0 0
40974 - 0 0 0 0 0 0 0 0 0 0 0 0
40975 - 0 0 0 0 0 0 0 0 0 0 0 0
40976 - 0 0 0 0 0 0 0 0 0 0 0 0
40977 - 0 0 0 0 0 0 0 0 0 6 6 6
40978 - 18 18 18 34 34 34 58 58 58 78 78 78
40979 -101 98 89 124 112 88 142 110 46 156 107 11
40980 -163 110 8 167 114 7 175 118 6 180 123 7
40981 -185 133 11 197 138 11 210 150 10 219 162 10
40982 -226 170 11 236 178 12 236 178 12 234 174 13
40983 -219 162 10 197 138 11 163 110 8 130 83 6
40984 - 91 60 6 10 10 10 2 2 6 2 2 6
40985 - 18 18 18 38 38 38 38 38 38 38 38 38
40986 - 38 38 38 38 38 38 38 38 38 38 38 38
40987 - 38 38 38 38 38 38 26 26 26 2 2 6
40988 - 2 2 6 6 6 6 70 47 6 137 92 6
40989 -175 118 6 200 144 11 219 162 10 230 174 11
40990 -234 174 13 230 174 11 219 162 10 210 150 10
40991 -192 133 9 163 110 8 124 112 88 82 82 82
40992 - 50 50 50 30 30 30 14 14 14 6 6 6
40993 - 0 0 0 0 0 0 0 0 0 0 0 0
40994 - 0 0 0 0 0 0 0 0 0 0 0 0
40995 - 0 0 0 0 0 0 0 0 0 0 0 0
40996 - 0 0 0 0 0 0 0 0 0 0 0 0
40997 - 0 0 0 0 0 0 0 0 0 0 0 0
40998 - 6 6 6 14 14 14 22 22 22 34 34 34
40999 - 42 42 42 58 58 58 74 74 74 86 86 86
41000 -101 98 89 122 102 70 130 98 46 121 87 25
41001 -137 92 6 152 99 6 163 110 8 180 123 7
41002 -185 133 11 197 138 11 206 145 10 200 144 11
41003 -180 123 7 156 107 11 130 83 6 104 69 6
41004 - 50 34 6 54 54 54 110 110 110 101 98 89
41005 - 86 86 86 82 82 82 78 78 78 78 78 78
41006 - 78 78 78 78 78 78 78 78 78 78 78 78
41007 - 78 78 78 82 82 82 86 86 86 94 94 94
41008 -106 106 106 101 101 101 86 66 34 124 80 6
41009 -156 107 11 180 123 7 192 133 9 200 144 11
41010 -206 145 10 200 144 11 192 133 9 175 118 6
41011 -139 102 15 109 106 95 70 70 70 42 42 42
41012 - 22 22 22 10 10 10 0 0 0 0 0 0
41013 - 0 0 0 0 0 0 0 0 0 0 0 0
41014 - 0 0 0 0 0 0 0 0 0 0 0 0
41015 - 0 0 0 0 0 0 0 0 0 0 0 0
41016 - 0 0 0 0 0 0 0 0 0 0 0 0
41017 - 0 0 0 0 0 0 0 0 0 0 0 0
41018 - 0 0 0 0 0 0 6 6 6 10 10 10
41019 - 14 14 14 22 22 22 30 30 30 38 38 38
41020 - 50 50 50 62 62 62 74 74 74 90 90 90
41021 -101 98 89 112 100 78 121 87 25 124 80 6
41022 -137 92 6 152 99 6 152 99 6 152 99 6
41023 -138 86 6 124 80 6 98 70 6 86 66 30
41024 -101 98 89 82 82 82 58 58 58 46 46 46
41025 - 38 38 38 34 34 34 34 34 34 34 34 34
41026 - 34 34 34 34 34 34 34 34 34 34 34 34
41027 - 34 34 34 34 34 34 38 38 38 42 42 42
41028 - 54 54 54 82 82 82 94 86 76 91 60 6
41029 -134 86 6 156 107 11 167 114 7 175 118 6
41030 -175 118 6 167 114 7 152 99 6 121 87 25
41031 -101 98 89 62 62 62 34 34 34 18 18 18
41032 - 6 6 6 0 0 0 0 0 0 0 0 0
41033 - 0 0 0 0 0 0 0 0 0 0 0 0
41034 - 0 0 0 0 0 0 0 0 0 0 0 0
41035 - 0 0 0 0 0 0 0 0 0 0 0 0
41036 - 0 0 0 0 0 0 0 0 0 0 0 0
41037 - 0 0 0 0 0 0 0 0 0 0 0 0
41038 - 0 0 0 0 0 0 0 0 0 0 0 0
41039 - 0 0 0 6 6 6 6 6 6 10 10 10
41040 - 18 18 18 22 22 22 30 30 30 42 42 42
41041 - 50 50 50 66 66 66 86 86 86 101 98 89
41042 -106 86 58 98 70 6 104 69 6 104 69 6
41043 -104 69 6 91 60 6 82 62 34 90 90 90
41044 - 62 62 62 38 38 38 22 22 22 14 14 14
41045 - 10 10 10 10 10 10 10 10 10 10 10 10
41046 - 10 10 10 10 10 10 6 6 6 10 10 10
41047 - 10 10 10 10 10 10 10 10 10 14 14 14
41048 - 22 22 22 42 42 42 70 70 70 89 81 66
41049 - 80 54 7 104 69 6 124 80 6 137 92 6
41050 -134 86 6 116 81 8 100 82 52 86 86 86
41051 - 58 58 58 30 30 30 14 14 14 6 6 6
41052 - 0 0 0 0 0 0 0 0 0 0 0 0
41053 - 0 0 0 0 0 0 0 0 0 0 0 0
41054 - 0 0 0 0 0 0 0 0 0 0 0 0
41055 - 0 0 0 0 0 0 0 0 0 0 0 0
41056 - 0 0 0 0 0 0 0 0 0 0 0 0
41057 - 0 0 0 0 0 0 0 0 0 0 0 0
41058 - 0 0 0 0 0 0 0 0 0 0 0 0
41059 - 0 0 0 0 0 0 0 0 0 0 0 0
41060 - 0 0 0 6 6 6 10 10 10 14 14 14
41061 - 18 18 18 26 26 26 38 38 38 54 54 54
41062 - 70 70 70 86 86 86 94 86 76 89 81 66
41063 - 89 81 66 86 86 86 74 74 74 50 50 50
41064 - 30 30 30 14 14 14 6 6 6 0 0 0
41065 - 0 0 0 0 0 0 0 0 0 0 0 0
41066 - 0 0 0 0 0 0 0 0 0 0 0 0
41067 - 0 0 0 0 0 0 0 0 0 0 0 0
41068 - 6 6 6 18 18 18 34 34 34 58 58 58
41069 - 82 82 82 89 81 66 89 81 66 89 81 66
41070 - 94 86 66 94 86 76 74 74 74 50 50 50
41071 - 26 26 26 14 14 14 6 6 6 0 0 0
41072 - 0 0 0 0 0 0 0 0 0 0 0 0
41073 - 0 0 0 0 0 0 0 0 0 0 0 0
41074 - 0 0 0 0 0 0 0 0 0 0 0 0
41075 - 0 0 0 0 0 0 0 0 0 0 0 0
41076 - 0 0 0 0 0 0 0 0 0 0 0 0
41077 - 0 0 0 0 0 0 0 0 0 0 0 0
41078 - 0 0 0 0 0 0 0 0 0 0 0 0
41079 - 0 0 0 0 0 0 0 0 0 0 0 0
41080 - 0 0 0 0 0 0 0 0 0 0 0 0
41081 - 6 6 6 6 6 6 14 14 14 18 18 18
41082 - 30 30 30 38 38 38 46 46 46 54 54 54
41083 - 50 50 50 42 42 42 30 30 30 18 18 18
41084 - 10 10 10 0 0 0 0 0 0 0 0 0
41085 - 0 0 0 0 0 0 0 0 0 0 0 0
41086 - 0 0 0 0 0 0 0 0 0 0 0 0
41087 - 0 0 0 0 0 0 0 0 0 0 0 0
41088 - 0 0 0 6 6 6 14 14 14 26 26 26
41089 - 38 38 38 50 50 50 58 58 58 58 58 58
41090 - 54 54 54 42 42 42 30 30 30 18 18 18
41091 - 10 10 10 0 0 0 0 0 0 0 0 0
41092 - 0 0 0 0 0 0 0 0 0 0 0 0
41093 - 0 0 0 0 0 0 0 0 0 0 0 0
41094 - 0 0 0 0 0 0 0 0 0 0 0 0
41095 - 0 0 0 0 0 0 0 0 0 0 0 0
41096 - 0 0 0 0 0 0 0 0 0 0 0 0
41097 - 0 0 0 0 0 0 0 0 0 0 0 0
41098 - 0 0 0 0 0 0 0 0 0 0 0 0
41099 - 0 0 0 0 0 0 0 0 0 0 0 0
41100 - 0 0 0 0 0 0 0 0 0 0 0 0
41101 - 0 0 0 0 0 0 0 0 0 6 6 6
41102 - 6 6 6 10 10 10 14 14 14 18 18 18
41103 - 18 18 18 14 14 14 10 10 10 6 6 6
41104 - 0 0 0 0 0 0 0 0 0 0 0 0
41105 - 0 0 0 0 0 0 0 0 0 0 0 0
41106 - 0 0 0 0 0 0 0 0 0 0 0 0
41107 - 0 0 0 0 0 0 0 0 0 0 0 0
41108 - 0 0 0 0 0 0 0 0 0 6 6 6
41109 - 14 14 14 18 18 18 22 22 22 22 22 22
41110 - 18 18 18 14 14 14 10 10 10 6 6 6
41111 - 0 0 0 0 0 0 0 0 0 0 0 0
41112 - 0 0 0 0 0 0 0 0 0 0 0 0
41113 - 0 0 0 0 0 0 0 0 0 0 0 0
41114 - 0 0 0 0 0 0 0 0 0 0 0 0
41115 - 0 0 0 0 0 0 0 0 0 0 0 0
41116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41129 +4 4 4 4 4 4
41130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41143 +4 4 4 4 4 4
41144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41157 +4 4 4 4 4 4
41158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41171 +4 4 4 4 4 4
41172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41185 +4 4 4 4 4 4
41186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41199 +4 4 4 4 4 4
41200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41204 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
41205 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
41206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
41210 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41211 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
41212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41213 +4 4 4 4 4 4
41214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41218 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
41219 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
41220 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
41224 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
41225 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
41226 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41227 +4 4 4 4 4 4
41228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41232 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
41233 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
41234 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
41238 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
41239 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
41240 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
41241 +4 4 4 4 4 4
41242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41245 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
41246 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
41247 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
41248 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
41249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41251 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
41252 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
41253 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
41254 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
41255 +4 4 4 4 4 4
41256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41259 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
41260 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
41261 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
41262 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
41263 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41264 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
41265 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
41266 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
41267 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
41268 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
41269 +4 4 4 4 4 4
41270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41273 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
41274 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
41275 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
41276 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
41277 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41278 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
41279 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
41280 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
41281 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
41282 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
41283 +4 4 4 4 4 4
41284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
41287 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
41288 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
41289 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
41290 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
41291 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
41292 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
41293 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
41294 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
41295 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
41296 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
41297 +4 4 4 4 4 4
41298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
41301 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
41302 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
41303 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
41304 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
41305 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
41306 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
41307 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
41308 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
41309 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
41310 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
41311 +4 4 4 4 4 4
41312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
41315 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
41316 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
41317 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
41318 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
41319 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
41320 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
41321 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
41322 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
41323 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
41324 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41325 +4 4 4 4 4 4
41326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
41329 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
41330 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
41331 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
41332 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
41333 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
41334 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
41335 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
41336 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
41337 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
41338 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
41339 +4 4 4 4 4 4
41340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
41342 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
41343 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
41344 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
41345 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
41346 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
41347 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
41348 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
41349 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
41350 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
41351 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
41352 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
41353 +4 4 4 4 4 4
41354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41355 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
41356 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
41357 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
41358 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41359 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
41360 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
41361 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
41362 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
41363 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
41364 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
41365 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
41366 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
41367 +0 0 0 4 4 4
41368 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41369 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
41370 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
41371 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
41372 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
41373 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
41374 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
41375 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
41376 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
41377 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
41378 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
41379 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
41380 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
41381 +2 0 0 0 0 0
41382 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
41383 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
41384 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
41385 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
41386 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
41387 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
41388 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
41389 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
41390 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
41391 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
41392 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
41393 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
41394 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
41395 +37 38 37 0 0 0
41396 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41397 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
41398 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
41399 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
41400 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
41401 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
41402 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
41403 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
41404 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
41405 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
41406 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
41407 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
41408 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
41409 +85 115 134 4 0 0
41410 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
41411 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
41412 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
41413 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
41414 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
41415 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
41416 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
41417 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
41418 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
41419 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
41420 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
41421 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
41422 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
41423 +60 73 81 4 0 0
41424 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
41425 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
41426 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
41427 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
41428 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
41429 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
41430 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
41431 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
41432 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
41433 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
41434 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
41435 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
41436 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
41437 +16 19 21 4 0 0
41438 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
41439 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
41440 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
41441 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
41442 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
41443 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
41444 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
41445 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
41446 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
41447 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
41448 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
41449 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
41450 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
41451 +4 0 0 4 3 3
41452 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
41453 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
41454 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
41455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
41456 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
41457 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
41458 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
41459 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
41460 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
41461 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
41462 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
41463 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
41464 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
41465 +3 2 2 4 4 4
41466 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
41467 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
41468 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
41469 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41470 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
41471 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
41472 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
41473 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
41474 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
41475 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
41476 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
41477 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
41478 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
41479 +4 4 4 4 4 4
41480 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
41481 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
41482 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
41483 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
41484 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
41485 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
41486 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
41487 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
41488 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
41489 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
41490 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
41491 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
41492 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
41493 +4 4 4 4 4 4
41494 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
41495 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
41496 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
41497 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
41498 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
41499 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41500 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
41501 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
41502 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
41503 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
41504 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
41505 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
41506 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
41507 +5 5 5 5 5 5
41508 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
41509 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
41510 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
41511 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
41512 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
41513 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41514 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
41515 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
41516 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
41517 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
41518 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
41519 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
41520 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41521 +5 5 5 4 4 4
41522 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
41523 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
41524 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
41525 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
41526 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41527 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
41528 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
41529 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
41530 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
41531 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
41532 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
41533 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41535 +4 4 4 4 4 4
41536 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
41537 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
41538 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
41539 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
41540 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
41541 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41542 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41543 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
41544 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
41545 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
41546 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
41547 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
41548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41549 +4 4 4 4 4 4
41550 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
41551 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
41552 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
41553 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
41554 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41555 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
41556 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
41557 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
41558 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
41559 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
41560 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
41561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41563 +4 4 4 4 4 4
41564 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
41565 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
41566 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
41567 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
41568 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41569 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41570 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41571 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
41572 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
41573 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
41574 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
41575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41577 +4 4 4 4 4 4
41578 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
41579 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
41580 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
41581 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
41582 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41583 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
41584 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41585 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
41586 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
41587 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
41588 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41591 +4 4 4 4 4 4
41592 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
41593 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
41594 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
41595 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
41596 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41597 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
41598 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
41599 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
41600 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
41601 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
41602 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
41603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41605 +4 4 4 4 4 4
41606 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
41607 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
41608 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
41609 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
41610 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41611 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
41612 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
41613 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
41614 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
41615 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
41616 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
41617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41619 +4 4 4 4 4 4
41620 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
41621 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
41622 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
41623 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41624 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
41625 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
41626 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
41627 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
41628 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
41629 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
41630 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41633 +4 4 4 4 4 4
41634 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
41635 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
41636 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
41637 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41638 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41639 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
41640 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
41641 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
41642 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
41643 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
41644 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41647 +4 4 4 4 4 4
41648 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
41649 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
41650 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41651 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41652 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41653 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
41654 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
41655 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
41656 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
41657 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
41658 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41661 +4 4 4 4 4 4
41662 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
41663 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
41664 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41665 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41666 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41667 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
41668 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
41669 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
41670 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41671 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41672 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41673 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41675 +4 4 4 4 4 4
41676 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41677 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
41678 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41679 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
41680 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
41681 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
41682 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
41683 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
41684 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41685 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41686 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41689 +4 4 4 4 4 4
41690 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41691 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
41692 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41693 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
41694 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41695 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
41696 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
41697 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
41698 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41699 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41700 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41701 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41703 +4 4 4 4 4 4
41704 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41705 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41706 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41707 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41708 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41709 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41710 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41711 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41712 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41713 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41714 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41717 +4 4 4 4 4 4
41718 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41719 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41720 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41721 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41722 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41723 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41724 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41725 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41726 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41727 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41728 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41731 +4 4 4 4 4 4
41732 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41733 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41734 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41735 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41736 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41737 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41738 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41739 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41740 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41741 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41742 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41745 +4 4 4 4 4 4
41746 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41747 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41748 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41749 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41750 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41751 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41752 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41753 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41754 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41755 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41756 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41759 +4 4 4 4 4 4
41760 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41761 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41762 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41763 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41764 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41765 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41766 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41767 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41768 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41769 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41770 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41773 +4 4 4 4 4 4
41774 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41775 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41776 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41777 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41778 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41779 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41780 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41781 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41782 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41783 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41784 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41787 +4 4 4 4 4 4
41788 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41789 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41790 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41791 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41792 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41793 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41794 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41795 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41796 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41797 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41798 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41801 +4 4 4 4 4 4
41802 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41803 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41804 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41805 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41806 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41807 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41808 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41809 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41810 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41811 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41812 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41813 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41815 +4 4 4 4 4 4
41816 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41817 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41818 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41819 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41820 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41821 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41822 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41823 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41824 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41825 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41826 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41829 +4 4 4 4 4 4
41830 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41831 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41832 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41833 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41834 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41835 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41836 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41837 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41838 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41839 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41840 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41843 +4 4 4 4 4 4
41844 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41845 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41846 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41847 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41848 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41849 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41850 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41851 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41852 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41853 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41854 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41857 +4 4 4 4 4 4
41858 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41859 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41860 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41861 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41862 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41863 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41864 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41865 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41866 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41867 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41868 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41871 +4 4 4 4 4 4
41872 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41873 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41874 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41875 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41876 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41877 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41878 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41879 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41880 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41881 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41882 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41885 +4 4 4 4 4 4
41886 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41887 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41888 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41889 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41890 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41891 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41892 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41893 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41894 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41895 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41896 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41899 +4 4 4 4 4 4
41900 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41901 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41902 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41903 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41904 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41905 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41906 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41907 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41908 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41909 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41910 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41913 +4 4 4 4 4 4
41914 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41915 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41916 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41917 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41918 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41919 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41920 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41921 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41922 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41923 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41924 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41927 +4 4 4 4 4 4
41928 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41929 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41930 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41931 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41932 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41933 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41934 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41935 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41936 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41937 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41941 +4 4 4 4 4 4
41942 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41943 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41944 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41945 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41946 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41947 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41948 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41949 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41950 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41951 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41955 +4 4 4 4 4 4
41956 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41957 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41958 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41959 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41960 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41961 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41962 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41963 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41964 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41965 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41969 +4 4 4 4 4 4
41970 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41971 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41972 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41973 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41974 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41975 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41976 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41977 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41978 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41979 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41983 +4 4 4 4 4 4
41984 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41985 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41986 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41987 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41988 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41989 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41990 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41991 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41992 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41997 +4 4 4 4 4 4
41998 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41999 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
42000 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
42001 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
42002 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
42003 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
42004 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
42005 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
42006 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
42007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42011 +4 4 4 4 4 4
42012 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42013 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
42014 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
42015 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
42016 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
42017 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
42018 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
42019 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
42020 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42025 +4 4 4 4 4 4
42026 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
42027 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
42028 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42029 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
42030 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
42031 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
42032 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
42033 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
42034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42039 +4 4 4 4 4 4
42040 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42041 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
42042 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
42043 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
42044 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
42045 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
42046 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
42047 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
42048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42053 +4 4 4 4 4 4
42054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42055 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
42056 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42057 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
42058 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
42059 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
42060 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
42061 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
42062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42067 +4 4 4 4 4 4
42068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42069 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
42070 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
42071 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
42072 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
42073 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
42074 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
42075 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
42076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42081 +4 4 4 4 4 4
42082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42083 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
42084 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
42085 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42086 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
42087 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
42088 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
42089 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42095 +4 4 4 4 4 4
42096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42098 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42099 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
42100 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
42101 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
42102 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
42103 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42109 +4 4 4 4 4 4
42110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42113 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42114 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
42115 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
42116 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
42117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42123 +4 4 4 4 4 4
42124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42127 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42128 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42129 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
42130 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
42131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42137 +4 4 4 4 4 4
42138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42141 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42142 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42143 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42144 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
42145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42151 +4 4 4 4 4 4
42152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42155 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
42156 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
42157 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
42158 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
42159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42165 +4 4 4 4 4 4
42166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42170 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
42171 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42172 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42179 +4 4 4 4 4 4
42180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42184 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
42185 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
42186 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42193 +4 4 4 4 4 4
42194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42198 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
42199 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
42200 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42207 +4 4 4 4 4 4
42208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42212 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
42213 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
42214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42221 +4 4 4 4 4 4
42222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42226 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42227 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
42228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42235 +4 4 4 4 4 4
42236 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
42237 index 8af6414..658c030 100644
42238 --- a/drivers/video/udlfb.c
42239 +++ b/drivers/video/udlfb.c
42240 @@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
42241 dlfb_urb_completion(urb);
42242
42243 error:
42244 - atomic_add(bytes_sent, &dev->bytes_sent);
42245 - atomic_add(bytes_identical, &dev->bytes_identical);
42246 - atomic_add(width*height*2, &dev->bytes_rendered);
42247 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42248 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42249 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
42250 end_cycles = get_cycles();
42251 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
42252 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42253 >> 10)), /* Kcycles */
42254 &dev->cpu_kcycles_used);
42255
42256 @@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
42257 dlfb_urb_completion(urb);
42258
42259 error:
42260 - atomic_add(bytes_sent, &dev->bytes_sent);
42261 - atomic_add(bytes_identical, &dev->bytes_identical);
42262 - atomic_add(bytes_rendered, &dev->bytes_rendered);
42263 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42264 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42265 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
42266 end_cycles = get_cycles();
42267 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
42268 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42269 >> 10)), /* Kcycles */
42270 &dev->cpu_kcycles_used);
42271 }
42272 @@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
42273 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42274 struct dlfb_data *dev = fb_info->par;
42275 return snprintf(buf, PAGE_SIZE, "%u\n",
42276 - atomic_read(&dev->bytes_rendered));
42277 + atomic_read_unchecked(&dev->bytes_rendered));
42278 }
42279
42280 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
42281 @@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
42282 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42283 struct dlfb_data *dev = fb_info->par;
42284 return snprintf(buf, PAGE_SIZE, "%u\n",
42285 - atomic_read(&dev->bytes_identical));
42286 + atomic_read_unchecked(&dev->bytes_identical));
42287 }
42288
42289 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42290 @@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42291 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42292 struct dlfb_data *dev = fb_info->par;
42293 return snprintf(buf, PAGE_SIZE, "%u\n",
42294 - atomic_read(&dev->bytes_sent));
42295 + atomic_read_unchecked(&dev->bytes_sent));
42296 }
42297
42298 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42299 @@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42300 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42301 struct dlfb_data *dev = fb_info->par;
42302 return snprintf(buf, PAGE_SIZE, "%u\n",
42303 - atomic_read(&dev->cpu_kcycles_used));
42304 + atomic_read_unchecked(&dev->cpu_kcycles_used));
42305 }
42306
42307 static ssize_t edid_show(
42308 @@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
42309 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42310 struct dlfb_data *dev = fb_info->par;
42311
42312 - atomic_set(&dev->bytes_rendered, 0);
42313 - atomic_set(&dev->bytes_identical, 0);
42314 - atomic_set(&dev->bytes_sent, 0);
42315 - atomic_set(&dev->cpu_kcycles_used, 0);
42316 + atomic_set_unchecked(&dev->bytes_rendered, 0);
42317 + atomic_set_unchecked(&dev->bytes_identical, 0);
42318 + atomic_set_unchecked(&dev->bytes_sent, 0);
42319 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
42320
42321 return count;
42322 }
42323 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
42324 index b0e2a42..e2df3ad 100644
42325 --- a/drivers/video/uvesafb.c
42326 +++ b/drivers/video/uvesafb.c
42327 @@ -19,6 +19,7 @@
42328 #include <linux/io.h>
42329 #include <linux/mutex.h>
42330 #include <linux/slab.h>
42331 +#include <linux/moduleloader.h>
42332 #include <video/edid.h>
42333 #include <video/uvesafb.h>
42334 #ifdef CONFIG_X86
42335 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
42336 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
42337 par->pmi_setpal = par->ypan = 0;
42338 } else {
42339 +
42340 +#ifdef CONFIG_PAX_KERNEXEC
42341 +#ifdef CONFIG_MODULES
42342 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
42343 +#endif
42344 + if (!par->pmi_code) {
42345 + par->pmi_setpal = par->ypan = 0;
42346 + return 0;
42347 + }
42348 +#endif
42349 +
42350 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
42351 + task->t.regs.edi);
42352 +
42353 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42354 + pax_open_kernel();
42355 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
42356 + pax_close_kernel();
42357 +
42358 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
42359 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
42360 +#else
42361 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
42362 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
42363 +#endif
42364 +
42365 printk(KERN_INFO "uvesafb: protected mode interface info at "
42366 "%04x:%04x\n",
42367 (u16)task->t.regs.es, (u16)task->t.regs.edi);
42368 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
42369 par->ypan = ypan;
42370
42371 if (par->pmi_setpal || par->ypan) {
42372 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
42373 if (__supported_pte_mask & _PAGE_NX) {
42374 par->pmi_setpal = par->ypan = 0;
42375 printk(KERN_WARNING "uvesafb: NX protection is actively."
42376 "We have better not to use the PMI.\n");
42377 - } else {
42378 + } else
42379 +#endif
42380 uvesafb_vbe_getpmi(task, par);
42381 - }
42382 }
42383 #else
42384 /* The protected mode interface is not available on non-x86. */
42385 @@ -1836,6 +1860,11 @@ out:
42386 if (par->vbe_modes)
42387 kfree(par->vbe_modes);
42388
42389 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42390 + if (par->pmi_code)
42391 + module_free_exec(NULL, par->pmi_code);
42392 +#endif
42393 +
42394 framebuffer_release(info);
42395 return err;
42396 }
42397 @@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
42398 kfree(par->vbe_state_orig);
42399 if (par->vbe_state_saved)
42400 kfree(par->vbe_state_saved);
42401 +
42402 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42403 + if (par->pmi_code)
42404 + module_free_exec(NULL, par->pmi_code);
42405 +#endif
42406 +
42407 }
42408
42409 framebuffer_release(info);
42410 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
42411 index 501b340..86bd4cf 100644
42412 --- a/drivers/video/vesafb.c
42413 +++ b/drivers/video/vesafb.c
42414 @@ -9,6 +9,7 @@
42415 */
42416
42417 #include <linux/module.h>
42418 +#include <linux/moduleloader.h>
42419 #include <linux/kernel.h>
42420 #include <linux/errno.h>
42421 #include <linux/string.h>
42422 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
42423 static int vram_total __initdata; /* Set total amount of memory */
42424 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
42425 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
42426 -static void (*pmi_start)(void) __read_mostly;
42427 -static void (*pmi_pal) (void) __read_mostly;
42428 +static void (*pmi_start)(void) __read_only;
42429 +static void (*pmi_pal) (void) __read_only;
42430 static int depth __read_mostly;
42431 static int vga_compat __read_mostly;
42432 /* --------------------------------------------------------------------- */
42433 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
42434 unsigned int size_vmode;
42435 unsigned int size_remap;
42436 unsigned int size_total;
42437 + void *pmi_code = NULL;
42438
42439 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
42440 return -ENODEV;
42441 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
42442 size_remap = size_total;
42443 vesafb_fix.smem_len = size_remap;
42444
42445 -#ifndef __i386__
42446 - screen_info.vesapm_seg = 0;
42447 -#endif
42448 -
42449 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
42450 printk(KERN_WARNING
42451 "vesafb: cannot reserve video memory at 0x%lx\n",
42452 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
42453 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
42454 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
42455
42456 +#ifdef __i386__
42457 +
42458 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42459 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
42460 + if (!pmi_code)
42461 +#elif !defined(CONFIG_PAX_KERNEXEC)
42462 + if (0)
42463 +#endif
42464 +
42465 +#endif
42466 + screen_info.vesapm_seg = 0;
42467 +
42468 if (screen_info.vesapm_seg) {
42469 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
42470 - screen_info.vesapm_seg,screen_info.vesapm_off);
42471 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
42472 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
42473 }
42474
42475 if (screen_info.vesapm_seg < 0xc000)
42476 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
42477
42478 if (ypan || pmi_setpal) {
42479 unsigned short *pmi_base;
42480 +
42481 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
42482 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
42483 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
42484 +
42485 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42486 + pax_open_kernel();
42487 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
42488 +#else
42489 + pmi_code = pmi_base;
42490 +#endif
42491 +
42492 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
42493 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
42494 +
42495 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42496 + pmi_start = ktva_ktla(pmi_start);
42497 + pmi_pal = ktva_ktla(pmi_pal);
42498 + pax_close_kernel();
42499 +#endif
42500 +
42501 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
42502 if (pmi_base[3]) {
42503 printk(KERN_INFO "vesafb: pmi: ports = ");
42504 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
42505 info->node, info->fix.id);
42506 return 0;
42507 err:
42508 +
42509 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42510 + module_free_exec(NULL, pmi_code);
42511 +#endif
42512 +
42513 if (info->screen_base)
42514 iounmap(info->screen_base);
42515 framebuffer_release(info);
42516 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
42517 index 88714ae..16c2e11 100644
42518 --- a/drivers/video/via/via_clock.h
42519 +++ b/drivers/video/via/via_clock.h
42520 @@ -56,7 +56,7 @@ struct via_clock {
42521
42522 void (*set_engine_pll_state)(u8 state);
42523 void (*set_engine_pll)(struct via_pll_config config);
42524 -};
42525 +} __no_const;
42526
42527
42528 static inline u32 get_pll_internal_frequency(u32 ref_freq,
42529 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
42530 index e56c934..fc22f4b 100644
42531 --- a/drivers/xen/xen-pciback/conf_space.h
42532 +++ b/drivers/xen/xen-pciback/conf_space.h
42533 @@ -44,15 +44,15 @@ struct config_field {
42534 struct {
42535 conf_dword_write write;
42536 conf_dword_read read;
42537 - } dw;
42538 + } __no_const dw;
42539 struct {
42540 conf_word_write write;
42541 conf_word_read read;
42542 - } w;
42543 + } __no_const w;
42544 struct {
42545 conf_byte_write write;
42546 conf_byte_read read;
42547 - } b;
42548 + } __no_const b;
42549 } u;
42550 struct list_head list;
42551 };
42552 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
42553 index 57ccb75..f6d05f8 100644
42554 --- a/fs/9p/vfs_inode.c
42555 +++ b/fs/9p/vfs_inode.c
42556 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42557 void
42558 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42559 {
42560 - char *s = nd_get_link(nd);
42561 + const char *s = nd_get_link(nd);
42562
42563 p9_debug(P9_DEBUG_VFS, " %s %s\n",
42564 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
42565 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
42566 index 0225742..1cd4732 100644
42567 --- a/fs/Kconfig.binfmt
42568 +++ b/fs/Kconfig.binfmt
42569 @@ -89,7 +89,7 @@ config HAVE_AOUT
42570
42571 config BINFMT_AOUT
42572 tristate "Kernel support for a.out and ECOFF binaries"
42573 - depends on HAVE_AOUT
42574 + depends on HAVE_AOUT && BROKEN
42575 ---help---
42576 A.out (Assembler.OUTput) is a set of formats for libraries and
42577 executables used in the earliest versions of UNIX. Linux used
42578 diff --git a/fs/aio.c b/fs/aio.c
42579 index 55c4c76..11aee6f 100644
42580 --- a/fs/aio.c
42581 +++ b/fs/aio.c
42582 @@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
42583 size += sizeof(struct io_event) * nr_events;
42584 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
42585
42586 - if (nr_pages < 0)
42587 + if (nr_pages <= 0)
42588 return -EINVAL;
42589
42590 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
42591 @@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
42592 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42593 {
42594 ssize_t ret;
42595 + struct iovec iovstack;
42596
42597 #ifdef CONFIG_COMPAT
42598 if (compat)
42599 ret = compat_rw_copy_check_uvector(type,
42600 (struct compat_iovec __user *)kiocb->ki_buf,
42601 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42602 + kiocb->ki_nbytes, 1, &iovstack,
42603 &kiocb->ki_iovec);
42604 else
42605 #endif
42606 ret = rw_copy_check_uvector(type,
42607 (struct iovec __user *)kiocb->ki_buf,
42608 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42609 + kiocb->ki_nbytes, 1, &iovstack,
42610 &kiocb->ki_iovec);
42611 if (ret < 0)
42612 goto out;
42613 @@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42614 if (ret < 0)
42615 goto out;
42616
42617 + if (kiocb->ki_iovec == &iovstack) {
42618 + kiocb->ki_inline_vec = iovstack;
42619 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
42620 + }
42621 kiocb->ki_nr_segs = kiocb->ki_nbytes;
42622 kiocb->ki_cur_seg = 0;
42623 /* ki_nbytes/left now reflect bytes instead of segs */
42624 diff --git a/fs/attr.c b/fs/attr.c
42625 index 0da9095..1386693 100644
42626 --- a/fs/attr.c
42627 +++ b/fs/attr.c
42628 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
42629 unsigned long limit;
42630
42631 limit = rlimit(RLIMIT_FSIZE);
42632 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
42633 if (limit != RLIM_INFINITY && offset > limit)
42634 goto out_sig;
42635 if (offset > inode->i_sb->s_maxbytes)
42636 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
42637 index da8876d..4456166 100644
42638 --- a/fs/autofs4/waitq.c
42639 +++ b/fs/autofs4/waitq.c
42640 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
42641 {
42642 unsigned long sigpipe, flags;
42643 mm_segment_t fs;
42644 - const char *data = (const char *)addr;
42645 + const char __user *data = (const char __force_user *)addr;
42646 ssize_t wr = 0;
42647
42648 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
42649 @@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
42650 return 1;
42651 }
42652
42653 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42654 +static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
42655 +#endif
42656 +
42657 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
42658 enum autofs_notify notify)
42659 {
42660 @@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
42661
42662 /* If this is a direct mount request create a dummy name */
42663 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
42664 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42665 + /* this name does get written to userland via autofs4_write() */
42666 + qstr.len = sprintf(name, "%08lx", atomic_inc_return_unchecked(&autofs_dummy_name_id));
42667 +#else
42668 qstr.len = sprintf(name, "%p", dentry);
42669 +#endif
42670 else {
42671 qstr.len = autofs4_getpath(sbi, dentry, &name);
42672 if (!qstr.len) {
42673 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
42674 index e18da23..affc30e 100644
42675 --- a/fs/befs/linuxvfs.c
42676 +++ b/fs/befs/linuxvfs.c
42677 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42678 {
42679 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
42680 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
42681 - char *link = nd_get_link(nd);
42682 + const char *link = nd_get_link(nd);
42683 if (!IS_ERR(link))
42684 kfree(link);
42685 }
42686 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
42687 index d146e18..12d1bd1 100644
42688 --- a/fs/binfmt_aout.c
42689 +++ b/fs/binfmt_aout.c
42690 @@ -16,6 +16,7 @@
42691 #include <linux/string.h>
42692 #include <linux/fs.h>
42693 #include <linux/file.h>
42694 +#include <linux/security.h>
42695 #include <linux/stat.h>
42696 #include <linux/fcntl.h>
42697 #include <linux/ptrace.h>
42698 @@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
42699 #endif
42700 # define START_STACK(u) ((void __user *)u.start_stack)
42701
42702 + memset(&dump, 0, sizeof(dump));
42703 +
42704 fs = get_fs();
42705 set_fs(KERNEL_DS);
42706 has_dumped = 1;
42707 @@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
42708
42709 /* If the size of the dump file exceeds the rlimit, then see what would happen
42710 if we wrote the stack, but not the data area. */
42711 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
42712 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
42713 dump.u_dsize = 0;
42714
42715 /* Make sure we have enough room to write the stack and data areas. */
42716 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
42717 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
42718 dump.u_ssize = 0;
42719
42720 @@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42721 rlim = rlimit(RLIMIT_DATA);
42722 if (rlim >= RLIM_INFINITY)
42723 rlim = ~0;
42724 +
42725 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42726 if (ex.a_data + ex.a_bss > rlim)
42727 return -ENOMEM;
42728
42729 @@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42730
42731 install_exec_creds(bprm);
42732
42733 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42734 + current->mm->pax_flags = 0UL;
42735 +#endif
42736 +
42737 +#ifdef CONFIG_PAX_PAGEEXEC
42738 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42739 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42740 +
42741 +#ifdef CONFIG_PAX_EMUTRAMP
42742 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42743 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42744 +#endif
42745 +
42746 +#ifdef CONFIG_PAX_MPROTECT
42747 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42748 + current->mm->pax_flags |= MF_PAX_MPROTECT;
42749 +#endif
42750 +
42751 + }
42752 +#endif
42753 +
42754 if (N_MAGIC(ex) == OMAGIC) {
42755 unsigned long text_addr, map_size;
42756 loff_t pos;
42757 @@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42758 }
42759
42760 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42761 - PROT_READ | PROT_WRITE | PROT_EXEC,
42762 + PROT_READ | PROT_WRITE,
42763 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42764 fd_offset + ex.a_text);
42765 if (error != N_DATADDR(ex)) {
42766 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
42767 index 1b52956..725eca7 100644
42768 --- a/fs/binfmt_elf.c
42769 +++ b/fs/binfmt_elf.c
42770 @@ -32,6 +32,7 @@
42771 #include <linux/elf.h>
42772 #include <linux/utsname.h>
42773 #include <linux/coredump.h>
42774 +#include <linux/xattr.h>
42775 #include <asm/uaccess.h>
42776 #include <asm/param.h>
42777 #include <asm/page.h>
42778 @@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
42779 #define elf_core_dump NULL
42780 #endif
42781
42782 +#ifdef CONFIG_PAX_MPROTECT
42783 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42784 +#endif
42785 +
42786 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42787 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42788 #else
42789 @@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
42790 .load_binary = load_elf_binary,
42791 .load_shlib = load_elf_library,
42792 .core_dump = elf_core_dump,
42793 +
42794 +#ifdef CONFIG_PAX_MPROTECT
42795 + .handle_mprotect= elf_handle_mprotect,
42796 +#endif
42797 +
42798 .min_coredump = ELF_EXEC_PAGESIZE,
42799 };
42800
42801 @@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
42802
42803 static int set_brk(unsigned long start, unsigned long end)
42804 {
42805 + unsigned long e = end;
42806 +
42807 start = ELF_PAGEALIGN(start);
42808 end = ELF_PAGEALIGN(end);
42809 if (end > start) {
42810 @@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
42811 if (BAD_ADDR(addr))
42812 return addr;
42813 }
42814 - current->mm->start_brk = current->mm->brk = end;
42815 + current->mm->start_brk = current->mm->brk = e;
42816 return 0;
42817 }
42818
42819 @@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42820 elf_addr_t __user *u_rand_bytes;
42821 const char *k_platform = ELF_PLATFORM;
42822 const char *k_base_platform = ELF_BASE_PLATFORM;
42823 - unsigned char k_rand_bytes[16];
42824 + u32 k_rand_bytes[4];
42825 int items;
42826 elf_addr_t *elf_info;
42827 int ei_index = 0;
42828 const struct cred *cred = current_cred();
42829 struct vm_area_struct *vma;
42830 + unsigned long saved_auxv[AT_VECTOR_SIZE];
42831
42832 /*
42833 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42834 @@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42835 * Generate 16 random bytes for userspace PRNG seeding.
42836 */
42837 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42838 - u_rand_bytes = (elf_addr_t __user *)
42839 - STACK_ALLOC(p, sizeof(k_rand_bytes));
42840 + srandom32(k_rand_bytes[0] ^ random32());
42841 + srandom32(k_rand_bytes[1] ^ random32());
42842 + srandom32(k_rand_bytes[2] ^ random32());
42843 + srandom32(k_rand_bytes[3] ^ random32());
42844 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
42845 + u_rand_bytes = (elf_addr_t __user *) p;
42846 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42847 return -EFAULT;
42848
42849 @@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42850 return -EFAULT;
42851 current->mm->env_end = p;
42852
42853 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42854 +
42855 /* Put the elf_info on the stack in the right place. */
42856 sp = (elf_addr_t __user *)envp + 1;
42857 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42858 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42859 return -EFAULT;
42860 return 0;
42861 }
42862 @@ -378,10 +397,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42863 {
42864 struct elf_phdr *elf_phdata;
42865 struct elf_phdr *eppnt;
42866 - unsigned long load_addr = 0;
42867 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42868 int load_addr_set = 0;
42869 unsigned long last_bss = 0, elf_bss = 0;
42870 - unsigned long error = ~0UL;
42871 + unsigned long error = -EINVAL;
42872 unsigned long total_size;
42873 int retval, i, size;
42874
42875 @@ -427,6 +446,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42876 goto out_close;
42877 }
42878
42879 +#ifdef CONFIG_PAX_SEGMEXEC
42880 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42881 + pax_task_size = SEGMEXEC_TASK_SIZE;
42882 +#endif
42883 +
42884 eppnt = elf_phdata;
42885 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42886 if (eppnt->p_type == PT_LOAD) {
42887 @@ -470,8 +494,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42888 k = load_addr + eppnt->p_vaddr;
42889 if (BAD_ADDR(k) ||
42890 eppnt->p_filesz > eppnt->p_memsz ||
42891 - eppnt->p_memsz > TASK_SIZE ||
42892 - TASK_SIZE - eppnt->p_memsz < k) {
42893 + eppnt->p_memsz > pax_task_size ||
42894 + pax_task_size - eppnt->p_memsz < k) {
42895 error = -ENOMEM;
42896 goto out_close;
42897 }
42898 @@ -523,6 +547,311 @@ out:
42899 return error;
42900 }
42901
42902 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42903 +#ifdef CONFIG_PAX_SOFTMODE
42904 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42905 +{
42906 + unsigned long pax_flags = 0UL;
42907 +
42908 +#ifdef CONFIG_PAX_PAGEEXEC
42909 + if (elf_phdata->p_flags & PF_PAGEEXEC)
42910 + pax_flags |= MF_PAX_PAGEEXEC;
42911 +#endif
42912 +
42913 +#ifdef CONFIG_PAX_SEGMEXEC
42914 + if (elf_phdata->p_flags & PF_SEGMEXEC)
42915 + pax_flags |= MF_PAX_SEGMEXEC;
42916 +#endif
42917 +
42918 +#ifdef CONFIG_PAX_EMUTRAMP
42919 + if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
42920 + pax_flags |= MF_PAX_EMUTRAMP;
42921 +#endif
42922 +
42923 +#ifdef CONFIG_PAX_MPROTECT
42924 + if (elf_phdata->p_flags & PF_MPROTECT)
42925 + pax_flags |= MF_PAX_MPROTECT;
42926 +#endif
42927 +
42928 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42929 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42930 + pax_flags |= MF_PAX_RANDMMAP;
42931 +#endif
42932 +
42933 + return pax_flags;
42934 +}
42935 +#endif
42936 +
42937 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42938 +{
42939 + unsigned long pax_flags = 0UL;
42940 +
42941 +#ifdef CONFIG_PAX_PAGEEXEC
42942 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42943 + pax_flags |= MF_PAX_PAGEEXEC;
42944 +#endif
42945 +
42946 +#ifdef CONFIG_PAX_SEGMEXEC
42947 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42948 + pax_flags |= MF_PAX_SEGMEXEC;
42949 +#endif
42950 +
42951 +#ifdef CONFIG_PAX_EMUTRAMP
42952 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42953 + pax_flags |= MF_PAX_EMUTRAMP;
42954 +#endif
42955 +
42956 +#ifdef CONFIG_PAX_MPROTECT
42957 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42958 + pax_flags |= MF_PAX_MPROTECT;
42959 +#endif
42960 +
42961 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42962 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42963 + pax_flags |= MF_PAX_RANDMMAP;
42964 +#endif
42965 +
42966 + return pax_flags;
42967 +}
42968 +#endif
42969 +
42970 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42971 +#ifdef CONFIG_PAX_SOFTMODE
42972 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42973 +{
42974 + unsigned long pax_flags = 0UL;
42975 +
42976 +#ifdef CONFIG_PAX_PAGEEXEC
42977 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42978 + pax_flags |= MF_PAX_PAGEEXEC;
42979 +#endif
42980 +
42981 +#ifdef CONFIG_PAX_SEGMEXEC
42982 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42983 + pax_flags |= MF_PAX_SEGMEXEC;
42984 +#endif
42985 +
42986 +#ifdef CONFIG_PAX_EMUTRAMP
42987 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42988 + pax_flags |= MF_PAX_EMUTRAMP;
42989 +#endif
42990 +
42991 +#ifdef CONFIG_PAX_MPROTECT
42992 + if (pax_flags_softmode & MF_PAX_MPROTECT)
42993 + pax_flags |= MF_PAX_MPROTECT;
42994 +#endif
42995 +
42996 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42997 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42998 + pax_flags |= MF_PAX_RANDMMAP;
42999 +#endif
43000 +
43001 + return pax_flags;
43002 +}
43003 +#endif
43004 +
43005 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
43006 +{
43007 + unsigned long pax_flags = 0UL;
43008 +
43009 +#ifdef CONFIG_PAX_PAGEEXEC
43010 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
43011 + pax_flags |= MF_PAX_PAGEEXEC;
43012 +#endif
43013 +
43014 +#ifdef CONFIG_PAX_SEGMEXEC
43015 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
43016 + pax_flags |= MF_PAX_SEGMEXEC;
43017 +#endif
43018 +
43019 +#ifdef CONFIG_PAX_EMUTRAMP
43020 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
43021 + pax_flags |= MF_PAX_EMUTRAMP;
43022 +#endif
43023 +
43024 +#ifdef CONFIG_PAX_MPROTECT
43025 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
43026 + pax_flags |= MF_PAX_MPROTECT;
43027 +#endif
43028 +
43029 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43030 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
43031 + pax_flags |= MF_PAX_RANDMMAP;
43032 +#endif
43033 +
43034 + return pax_flags;
43035 +}
43036 +#endif
43037 +
43038 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43039 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
43040 +{
43041 + unsigned long pax_flags = 0UL;
43042 +
43043 +#ifdef CONFIG_PAX_EI_PAX
43044 +
43045 +#ifdef CONFIG_PAX_PAGEEXEC
43046 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
43047 + pax_flags |= MF_PAX_PAGEEXEC;
43048 +#endif
43049 +
43050 +#ifdef CONFIG_PAX_SEGMEXEC
43051 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
43052 + pax_flags |= MF_PAX_SEGMEXEC;
43053 +#endif
43054 +
43055 +#ifdef CONFIG_PAX_EMUTRAMP
43056 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
43057 + pax_flags |= MF_PAX_EMUTRAMP;
43058 +#endif
43059 +
43060 +#ifdef CONFIG_PAX_MPROTECT
43061 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
43062 + pax_flags |= MF_PAX_MPROTECT;
43063 +#endif
43064 +
43065 +#ifdef CONFIG_PAX_ASLR
43066 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
43067 + pax_flags |= MF_PAX_RANDMMAP;
43068 +#endif
43069 +
43070 +#else
43071 +
43072 +#ifdef CONFIG_PAX_PAGEEXEC
43073 + pax_flags |= MF_PAX_PAGEEXEC;
43074 +#endif
43075 +
43076 +#ifdef CONFIG_PAX_SEGMEXEC
43077 + pax_flags |= MF_PAX_SEGMEXEC;
43078 +#endif
43079 +
43080 +#ifdef CONFIG_PAX_MPROTECT
43081 + pax_flags |= MF_PAX_MPROTECT;
43082 +#endif
43083 +
43084 +#ifdef CONFIG_PAX_RANDMMAP
43085 + if (randomize_va_space)
43086 + pax_flags |= MF_PAX_RANDMMAP;
43087 +#endif
43088 +
43089 +#endif
43090 +
43091 + return pax_flags;
43092 +}
43093 +
43094 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
43095 +{
43096 +
43097 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
43098 + unsigned long i;
43099 +
43100 + for (i = 0UL; i < elf_ex->e_phnum; i++)
43101 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
43102 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
43103 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
43104 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
43105 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
43106 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
43107 + return ~0UL;
43108 +
43109 +#ifdef CONFIG_PAX_SOFTMODE
43110 + if (pax_softmode)
43111 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
43112 + else
43113 +#endif
43114 +
43115 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
43116 + break;
43117 + }
43118 +#endif
43119 +
43120 + return ~0UL;
43121 +}
43122 +
43123 +static unsigned long pax_parse_xattr_pax(struct file * const file)
43124 +{
43125 +
43126 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
43127 + ssize_t xattr_size, i;
43128 + unsigned char xattr_value[5];
43129 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
43130 +
43131 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
43132 + if (xattr_size <= 0)
43133 + return ~0UL;
43134 +
43135 + for (i = 0; i < xattr_size; i++)
43136 + switch (xattr_value[i]) {
43137 + default:
43138 + return ~0UL;
43139 +
43140 +#define parse_flag(option1, option2, flag) \
43141 + case option1: \
43142 + pax_flags_hardmode |= MF_PAX_##flag; \
43143 + break; \
43144 + case option2: \
43145 + pax_flags_softmode |= MF_PAX_##flag; \
43146 + break;
43147 +
43148 + parse_flag('p', 'P', PAGEEXEC);
43149 + parse_flag('e', 'E', EMUTRAMP);
43150 + parse_flag('m', 'M', MPROTECT);
43151 + parse_flag('r', 'R', RANDMMAP);
43152 + parse_flag('s', 'S', SEGMEXEC);
43153 +
43154 +#undef parse_flag
43155 + }
43156 +
43157 + if (pax_flags_hardmode & pax_flags_softmode)
43158 + return ~0UL;
43159 +
43160 +#ifdef CONFIG_PAX_SOFTMODE
43161 + if (pax_softmode)
43162 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
43163 + else
43164 +#endif
43165 +
43166 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
43167 +#else
43168 + return ~0UL;
43169 +#endif
43170 +
43171 +}
43172 +
43173 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
43174 +{
43175 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
43176 +
43177 + pax_flags = pax_parse_ei_pax(elf_ex);
43178 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
43179 + xattr_pax_flags = pax_parse_xattr_pax(file);
43180 +
43181 + if (pt_pax_flags == ~0UL)
43182 + pt_pax_flags = xattr_pax_flags;
43183 + else if (xattr_pax_flags == ~0UL)
43184 + xattr_pax_flags = pt_pax_flags;
43185 + if (pt_pax_flags != xattr_pax_flags)
43186 + return -EINVAL;
43187 + if (pt_pax_flags != ~0UL)
43188 + pax_flags = pt_pax_flags;
43189 +
43190 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43191 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43192 + if ((__supported_pte_mask & _PAGE_NX))
43193 + pax_flags &= ~MF_PAX_SEGMEXEC;
43194 + else
43195 + pax_flags &= ~MF_PAX_PAGEEXEC;
43196 + }
43197 +#endif
43198 +
43199 + if (0 > pax_check_flags(&pax_flags))
43200 + return -EINVAL;
43201 +
43202 + current->mm->pax_flags = pax_flags;
43203 + return 0;
43204 +}
43205 +#endif
43206 +
43207 /*
43208 * These are the functions used to load ELF style executables and shared
43209 * libraries. There is no binary dependent code anywhere else.
43210 @@ -539,6 +868,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
43211 {
43212 unsigned int random_variable = 0;
43213
43214 +#ifdef CONFIG_PAX_RANDUSTACK
43215 + if (randomize_va_space)
43216 + return stack_top - current->mm->delta_stack;
43217 +#endif
43218 +
43219 if ((current->flags & PF_RANDOMIZE) &&
43220 !(current->personality & ADDR_NO_RANDOMIZE)) {
43221 random_variable = get_random_int() & STACK_RND_MASK;
43222 @@ -557,7 +891,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43223 unsigned long load_addr = 0, load_bias = 0;
43224 int load_addr_set = 0;
43225 char * elf_interpreter = NULL;
43226 - unsigned long error;
43227 + unsigned long error = 0;
43228 struct elf_phdr *elf_ppnt, *elf_phdata;
43229 unsigned long elf_bss, elf_brk;
43230 int retval, i;
43231 @@ -567,11 +901,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43232 unsigned long start_code, end_code, start_data, end_data;
43233 unsigned long reloc_func_desc __maybe_unused = 0;
43234 int executable_stack = EXSTACK_DEFAULT;
43235 - unsigned long def_flags = 0;
43236 struct {
43237 struct elfhdr elf_ex;
43238 struct elfhdr interp_elf_ex;
43239 } *loc;
43240 + unsigned long pax_task_size = TASK_SIZE;
43241
43242 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
43243 if (!loc) {
43244 @@ -707,11 +1041,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43245 goto out_free_dentry;
43246
43247 /* OK, This is the point of no return */
43248 - current->mm->def_flags = def_flags;
43249 +
43250 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43251 + current->mm->pax_flags = 0UL;
43252 +#endif
43253 +
43254 +#ifdef CONFIG_PAX_DLRESOLVE
43255 + current->mm->call_dl_resolve = 0UL;
43256 +#endif
43257 +
43258 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
43259 + current->mm->call_syscall = 0UL;
43260 +#endif
43261 +
43262 +#ifdef CONFIG_PAX_ASLR
43263 + current->mm->delta_mmap = 0UL;
43264 + current->mm->delta_stack = 0UL;
43265 +#endif
43266 +
43267 + current->mm->def_flags = 0;
43268 +
43269 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43270 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
43271 + send_sig(SIGKILL, current, 0);
43272 + goto out_free_dentry;
43273 + }
43274 +#endif
43275 +
43276 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43277 + pax_set_initial_flags(bprm);
43278 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
43279 + if (pax_set_initial_flags_func)
43280 + (pax_set_initial_flags_func)(bprm);
43281 +#endif
43282 +
43283 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43284 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
43285 + current->mm->context.user_cs_limit = PAGE_SIZE;
43286 + current->mm->def_flags |= VM_PAGEEXEC;
43287 + }
43288 +#endif
43289 +
43290 +#ifdef CONFIG_PAX_SEGMEXEC
43291 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
43292 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
43293 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
43294 + pax_task_size = SEGMEXEC_TASK_SIZE;
43295 + current->mm->def_flags |= VM_NOHUGEPAGE;
43296 + }
43297 +#endif
43298 +
43299 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
43300 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43301 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
43302 + put_cpu();
43303 + }
43304 +#endif
43305
43306 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
43307 may depend on the personality. */
43308 SET_PERSONALITY(loc->elf_ex);
43309 +
43310 +#ifdef CONFIG_PAX_ASLR
43311 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43312 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
43313 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
43314 + }
43315 +#endif
43316 +
43317 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43318 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43319 + executable_stack = EXSTACK_DISABLE_X;
43320 + current->personality &= ~READ_IMPLIES_EXEC;
43321 + } else
43322 +#endif
43323 +
43324 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
43325 current->personality |= READ_IMPLIES_EXEC;
43326
43327 @@ -802,6 +1206,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43328 #else
43329 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
43330 #endif
43331 +
43332 +#ifdef CONFIG_PAX_RANDMMAP
43333 + /* PaX: randomize base address at the default exe base if requested */
43334 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
43335 +#ifdef CONFIG_SPARC64
43336 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
43337 +#else
43338 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
43339 +#endif
43340 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
43341 + elf_flags |= MAP_FIXED;
43342 + }
43343 +#endif
43344 +
43345 }
43346
43347 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
43348 @@ -834,9 +1252,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43349 * allowed task size. Note that p_filesz must always be
43350 * <= p_memsz so it is only necessary to check p_memsz.
43351 */
43352 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43353 - elf_ppnt->p_memsz > TASK_SIZE ||
43354 - TASK_SIZE - elf_ppnt->p_memsz < k) {
43355 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43356 + elf_ppnt->p_memsz > pax_task_size ||
43357 + pax_task_size - elf_ppnt->p_memsz < k) {
43358 /* set_brk can never work. Avoid overflows. */
43359 send_sig(SIGKILL, current, 0);
43360 retval = -EINVAL;
43361 @@ -875,11 +1293,41 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43362 goto out_free_dentry;
43363 }
43364 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
43365 - send_sig(SIGSEGV, current, 0);
43366 - retval = -EFAULT; /* Nobody gets to see this, but.. */
43367 - goto out_free_dentry;
43368 + /*
43369 + * This bss-zeroing can fail if the ELF
43370 + * file specifies odd protections. So
43371 + * we don't check the return value
43372 + */
43373 }
43374
43375 +#ifdef CONFIG_PAX_RANDMMAP
43376 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43377 + unsigned long start, size;
43378 +
43379 + start = ELF_PAGEALIGN(elf_brk);
43380 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
43381 + down_read(&current->mm->mmap_sem);
43382 + retval = -ENOMEM;
43383 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
43384 + unsigned long prot = PROT_NONE;
43385 +
43386 + up_read(&current->mm->mmap_sem);
43387 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
43388 +// if (current->personality & ADDR_NO_RANDOMIZE)
43389 +// prot = PROT_READ;
43390 + start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
43391 + retval = IS_ERR_VALUE(start) ? start : 0;
43392 + } else
43393 + up_read(&current->mm->mmap_sem);
43394 + if (retval == 0)
43395 + retval = set_brk(start + size, start + size + PAGE_SIZE);
43396 + if (retval < 0) {
43397 + send_sig(SIGKILL, current, 0);
43398 + goto out_free_dentry;
43399 + }
43400 + }
43401 +#endif
43402 +
43403 if (elf_interpreter) {
43404 unsigned long uninitialized_var(interp_map_addr);
43405
43406 @@ -1107,7 +1555,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
43407 * Decide what to dump of a segment, part, all or none.
43408 */
43409 static unsigned long vma_dump_size(struct vm_area_struct *vma,
43410 - unsigned long mm_flags)
43411 + unsigned long mm_flags, long signr)
43412 {
43413 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
43414
43415 @@ -1144,7 +1592,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
43416 if (vma->vm_file == NULL)
43417 return 0;
43418
43419 - if (FILTER(MAPPED_PRIVATE))
43420 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
43421 goto whole;
43422
43423 /*
43424 @@ -1366,9 +1814,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
43425 {
43426 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
43427 int i = 0;
43428 - do
43429 + do {
43430 i += 2;
43431 - while (auxv[i - 2] != AT_NULL);
43432 + } while (auxv[i - 2] != AT_NULL);
43433 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
43434 }
43435
43436 @@ -1890,14 +2338,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
43437 }
43438
43439 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
43440 - unsigned long mm_flags)
43441 + struct coredump_params *cprm)
43442 {
43443 struct vm_area_struct *vma;
43444 size_t size = 0;
43445
43446 for (vma = first_vma(current, gate_vma); vma != NULL;
43447 vma = next_vma(vma, gate_vma))
43448 - size += vma_dump_size(vma, mm_flags);
43449 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43450 return size;
43451 }
43452
43453 @@ -1991,7 +2439,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43454
43455 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
43456
43457 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
43458 + offset += elf_core_vma_data_size(gate_vma, cprm);
43459 offset += elf_core_extra_data_size();
43460 e_shoff = offset;
43461
43462 @@ -2005,10 +2453,12 @@ static int elf_core_dump(struct coredump_params *cprm)
43463 offset = dataoff;
43464
43465 size += sizeof(*elf);
43466 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43467 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
43468 goto end_coredump;
43469
43470 size += sizeof(*phdr4note);
43471 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43472 if (size > cprm->limit
43473 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
43474 goto end_coredump;
43475 @@ -2022,7 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43476 phdr.p_offset = offset;
43477 phdr.p_vaddr = vma->vm_start;
43478 phdr.p_paddr = 0;
43479 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
43480 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43481 phdr.p_memsz = vma->vm_end - vma->vm_start;
43482 offset += phdr.p_filesz;
43483 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
43484 @@ -2033,6 +2483,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43485 phdr.p_align = ELF_EXEC_PAGESIZE;
43486
43487 size += sizeof(phdr);
43488 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43489 if (size > cprm->limit
43490 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
43491 goto end_coredump;
43492 @@ -2057,7 +2508,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43493 unsigned long addr;
43494 unsigned long end;
43495
43496 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
43497 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43498
43499 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
43500 struct page *page;
43501 @@ -2066,6 +2517,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43502 page = get_dump_page(addr);
43503 if (page) {
43504 void *kaddr = kmap(page);
43505 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
43506 stop = ((size += PAGE_SIZE) > cprm->limit) ||
43507 !dump_write(cprm->file, kaddr,
43508 PAGE_SIZE);
43509 @@ -2083,6 +2535,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43510
43511 if (e_phnum == PN_XNUM) {
43512 size += sizeof(*shdr4extnum);
43513 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43514 if (size > cprm->limit
43515 || !dump_write(cprm->file, shdr4extnum,
43516 sizeof(*shdr4extnum)))
43517 @@ -2103,6 +2556,97 @@ out:
43518
43519 #endif /* CONFIG_ELF_CORE */
43520
43521 +#ifdef CONFIG_PAX_MPROTECT
43522 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
43523 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
43524 + * we'll remove VM_MAYWRITE for good on RELRO segments.
43525 + *
43526 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
43527 + * basis because we want to allow the common case and not the special ones.
43528 + */
43529 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
43530 +{
43531 + struct elfhdr elf_h;
43532 + struct elf_phdr elf_p;
43533 + unsigned long i;
43534 + unsigned long oldflags;
43535 + bool is_textrel_rw, is_textrel_rx, is_relro;
43536 +
43537 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
43538 + return;
43539 +
43540 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
43541 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
43542 +
43543 +#ifdef CONFIG_PAX_ELFRELOCS
43544 + /* possible TEXTREL */
43545 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
43546 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
43547 +#else
43548 + is_textrel_rw = false;
43549 + is_textrel_rx = false;
43550 +#endif
43551 +
43552 + /* possible RELRO */
43553 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
43554 +
43555 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
43556 + return;
43557 +
43558 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
43559 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
43560 +
43561 +#ifdef CONFIG_PAX_ETEXECRELOCS
43562 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43563 +#else
43564 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
43565 +#endif
43566 +
43567 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43568 + !elf_check_arch(&elf_h) ||
43569 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
43570 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
43571 + return;
43572 +
43573 + for (i = 0UL; i < elf_h.e_phnum; i++) {
43574 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
43575 + return;
43576 + switch (elf_p.p_type) {
43577 + case PT_DYNAMIC:
43578 + if (!is_textrel_rw && !is_textrel_rx)
43579 + continue;
43580 + i = 0UL;
43581 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
43582 + elf_dyn dyn;
43583 +
43584 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
43585 + return;
43586 + if (dyn.d_tag == DT_NULL)
43587 + return;
43588 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
43589 + gr_log_textrel(vma);
43590 + if (is_textrel_rw)
43591 + vma->vm_flags |= VM_MAYWRITE;
43592 + else
43593 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
43594 + vma->vm_flags &= ~VM_MAYWRITE;
43595 + return;
43596 + }
43597 + i++;
43598 + }
43599 + return;
43600 +
43601 + case PT_GNU_RELRO:
43602 + if (!is_relro)
43603 + continue;
43604 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
43605 + vma->vm_flags &= ~VM_MAYWRITE;
43606 + return;
43607 + }
43608 + }
43609 +}
43610 +#endif
43611 +
43612 static int __init init_elf_binfmt(void)
43613 {
43614 register_binfmt(&elf_format);
43615 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
43616 index 178cb70..8972997 100644
43617 --- a/fs/binfmt_flat.c
43618 +++ b/fs/binfmt_flat.c
43619 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
43620 realdatastart = (unsigned long) -ENOMEM;
43621 printk("Unable to allocate RAM for process data, errno %d\n",
43622 (int)-realdatastart);
43623 + down_write(&current->mm->mmap_sem);
43624 vm_munmap(textpos, text_len);
43625 + up_write(&current->mm->mmap_sem);
43626 ret = realdatastart;
43627 goto err;
43628 }
43629 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43630 }
43631 if (IS_ERR_VALUE(result)) {
43632 printk("Unable to read data+bss, errno %d\n", (int)-result);
43633 + down_write(&current->mm->mmap_sem);
43634 vm_munmap(textpos, text_len);
43635 vm_munmap(realdatastart, len);
43636 + up_write(&current->mm->mmap_sem);
43637 ret = result;
43638 goto err;
43639 }
43640 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43641 }
43642 if (IS_ERR_VALUE(result)) {
43643 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
43644 + down_write(&current->mm->mmap_sem);
43645 vm_munmap(textpos, text_len + data_len + extra +
43646 MAX_SHARED_LIBS * sizeof(unsigned long));
43647 + up_write(&current->mm->mmap_sem);
43648 ret = result;
43649 goto err;
43650 }
43651 diff --git a/fs/bio.c b/fs/bio.c
43652 index 73922ab..16642dd 100644
43653 --- a/fs/bio.c
43654 +++ b/fs/bio.c
43655 @@ -841,7 +841,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
43656 /*
43657 * Overflow, abort
43658 */
43659 - if (end < start)
43660 + if (end < start || end - start > INT_MAX - nr_pages)
43661 return ERR_PTR(-EINVAL);
43662
43663 nr_pages += end - start;
43664 @@ -975,7 +975,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
43665 /*
43666 * Overflow, abort
43667 */
43668 - if (end < start)
43669 + if (end < start || end - start > INT_MAX - nr_pages)
43670 return ERR_PTR(-EINVAL);
43671
43672 nr_pages += end - start;
43673 @@ -1237,7 +1237,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
43674 const int read = bio_data_dir(bio) == READ;
43675 struct bio_map_data *bmd = bio->bi_private;
43676 int i;
43677 - char *p = bmd->sgvecs[0].iov_base;
43678 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
43679
43680 __bio_for_each_segment(bvec, bio, i, 0) {
43681 char *addr = page_address(bvec->bv_page);
43682 diff --git a/fs/block_dev.c b/fs/block_dev.c
43683 index c2bbe1f..9dfbc23 100644
43684 --- a/fs/block_dev.c
43685 +++ b/fs/block_dev.c
43686 @@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
43687 else if (bdev->bd_contains == bdev)
43688 return true; /* is a whole device which isn't held */
43689
43690 - else if (whole->bd_holder == bd_may_claim)
43691 + else if (whole->bd_holder == (void *)bd_may_claim)
43692 return true; /* is a partition of a device that is being partitioned */
43693 else if (whole->bd_holder != NULL)
43694 return false; /* is a partition of a held device */
43695 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
43696 index da6e936..1598dd0 100644
43697 --- a/fs/btrfs/check-integrity.c
43698 +++ b/fs/btrfs/check-integrity.c
43699 @@ -155,7 +155,7 @@ struct btrfsic_block {
43700 union {
43701 bio_end_io_t *bio;
43702 bh_end_io_t *bh;
43703 - } orig_bio_bh_end_io;
43704 + } __no_const orig_bio_bh_end_io;
43705 int submit_bio_bh_rw;
43706 u64 flush_gen; /* only valid if !never_written */
43707 };
43708 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
43709 index 8206b39..06d5654 100644
43710 --- a/fs/btrfs/ctree.c
43711 +++ b/fs/btrfs/ctree.c
43712 @@ -973,9 +973,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
43713 free_extent_buffer(buf);
43714 add_root_to_dirty_list(root);
43715 } else {
43716 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43717 - parent_start = parent->start;
43718 - else
43719 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43720 + if (parent)
43721 + parent_start = parent->start;
43722 + else
43723 + parent_start = 0;
43724 + } else
43725 parent_start = 0;
43726
43727 WARN_ON(trans->transid != btrfs_header_generation(parent));
43728 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
43729 index a7d1921..a32dba2 100644
43730 --- a/fs/btrfs/inode.c
43731 +++ b/fs/btrfs/inode.c
43732 @@ -7111,7 +7111,7 @@ fail:
43733 return -ENOMEM;
43734 }
43735
43736 -static int btrfs_getattr(struct vfsmount *mnt,
43737 +int btrfs_getattr(struct vfsmount *mnt,
43738 struct dentry *dentry, struct kstat *stat)
43739 {
43740 struct inode *inode = dentry->d_inode;
43741 @@ -7125,6 +7125,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
43742 return 0;
43743 }
43744
43745 +EXPORT_SYMBOL(btrfs_getattr);
43746 +
43747 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
43748 +{
43749 + return BTRFS_I(inode)->root->anon_dev;
43750 +}
43751 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43752 +
43753 /*
43754 * If a file is moved, it will inherit the cow and compression flags of the new
43755 * directory.
43756 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43757 index 0e92e57..8b560de 100644
43758 --- a/fs/btrfs/ioctl.c
43759 +++ b/fs/btrfs/ioctl.c
43760 @@ -2902,9 +2902,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43761 for (i = 0; i < num_types; i++) {
43762 struct btrfs_space_info *tmp;
43763
43764 + /* Don't copy in more than we allocated */
43765 if (!slot_count)
43766 break;
43767
43768 + slot_count--;
43769 +
43770 info = NULL;
43771 rcu_read_lock();
43772 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43773 @@ -2926,10 +2929,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43774 memcpy(dest, &space, sizeof(space));
43775 dest++;
43776 space_args.total_spaces++;
43777 - slot_count--;
43778 }
43779 - if (!slot_count)
43780 - break;
43781 }
43782 up_read(&info->groups_sem);
43783 }
43784 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43785 index 646ee21..f020f87 100644
43786 --- a/fs/btrfs/relocation.c
43787 +++ b/fs/btrfs/relocation.c
43788 @@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43789 }
43790 spin_unlock(&rc->reloc_root_tree.lock);
43791
43792 - BUG_ON((struct btrfs_root *)node->data != root);
43793 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
43794
43795 if (!del) {
43796 spin_lock(&rc->reloc_root_tree.lock);
43797 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43798 index 622f469..e8d2d55 100644
43799 --- a/fs/cachefiles/bind.c
43800 +++ b/fs/cachefiles/bind.c
43801 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43802 args);
43803
43804 /* start by checking things over */
43805 - ASSERT(cache->fstop_percent >= 0 &&
43806 - cache->fstop_percent < cache->fcull_percent &&
43807 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
43808 cache->fcull_percent < cache->frun_percent &&
43809 cache->frun_percent < 100);
43810
43811 - ASSERT(cache->bstop_percent >= 0 &&
43812 - cache->bstop_percent < cache->bcull_percent &&
43813 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
43814 cache->bcull_percent < cache->brun_percent &&
43815 cache->brun_percent < 100);
43816
43817 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43818 index 0a1467b..6a53245 100644
43819 --- a/fs/cachefiles/daemon.c
43820 +++ b/fs/cachefiles/daemon.c
43821 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43822 if (n > buflen)
43823 return -EMSGSIZE;
43824
43825 - if (copy_to_user(_buffer, buffer, n) != 0)
43826 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43827 return -EFAULT;
43828
43829 return n;
43830 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43831 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43832 return -EIO;
43833
43834 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
43835 + if (datalen > PAGE_SIZE - 1)
43836 return -EOPNOTSUPP;
43837
43838 /* drag the command string into the kernel so we can parse it */
43839 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43840 if (args[0] != '%' || args[1] != '\0')
43841 return -EINVAL;
43842
43843 - if (fstop < 0 || fstop >= cache->fcull_percent)
43844 + if (fstop >= cache->fcull_percent)
43845 return cachefiles_daemon_range_error(cache, args);
43846
43847 cache->fstop_percent = fstop;
43848 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43849 if (args[0] != '%' || args[1] != '\0')
43850 return -EINVAL;
43851
43852 - if (bstop < 0 || bstop >= cache->bcull_percent)
43853 + if (bstop >= cache->bcull_percent)
43854 return cachefiles_daemon_range_error(cache, args);
43855
43856 cache->bstop_percent = bstop;
43857 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43858 index bd6bc1b..b627b53 100644
43859 --- a/fs/cachefiles/internal.h
43860 +++ b/fs/cachefiles/internal.h
43861 @@ -57,7 +57,7 @@ struct cachefiles_cache {
43862 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43863 struct rb_root active_nodes; /* active nodes (can't be culled) */
43864 rwlock_t active_lock; /* lock for active_nodes */
43865 - atomic_t gravecounter; /* graveyard uniquifier */
43866 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43867 unsigned frun_percent; /* when to stop culling (% files) */
43868 unsigned fcull_percent; /* when to start culling (% files) */
43869 unsigned fstop_percent; /* when to stop allocating (% files) */
43870 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43871 * proc.c
43872 */
43873 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43874 -extern atomic_t cachefiles_lookup_histogram[HZ];
43875 -extern atomic_t cachefiles_mkdir_histogram[HZ];
43876 -extern atomic_t cachefiles_create_histogram[HZ];
43877 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43878 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43879 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43880
43881 extern int __init cachefiles_proc_init(void);
43882 extern void cachefiles_proc_cleanup(void);
43883 static inline
43884 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43885 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43886 {
43887 unsigned long jif = jiffies - start_jif;
43888 if (jif >= HZ)
43889 jif = HZ - 1;
43890 - atomic_inc(&histogram[jif]);
43891 + atomic_inc_unchecked(&histogram[jif]);
43892 }
43893
43894 #else
43895 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43896 index 7f0771d..87d4f36 100644
43897 --- a/fs/cachefiles/namei.c
43898 +++ b/fs/cachefiles/namei.c
43899 @@ -318,7 +318,7 @@ try_again:
43900 /* first step is to make up a grave dentry in the graveyard */
43901 sprintf(nbuffer, "%08x%08x",
43902 (uint32_t) get_seconds(),
43903 - (uint32_t) atomic_inc_return(&cache->gravecounter));
43904 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43905
43906 /* do the multiway lock magic */
43907 trap = lock_rename(cache->graveyard, dir);
43908 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43909 index eccd339..4c1d995 100644
43910 --- a/fs/cachefiles/proc.c
43911 +++ b/fs/cachefiles/proc.c
43912 @@ -14,9 +14,9 @@
43913 #include <linux/seq_file.h>
43914 #include "internal.h"
43915
43916 -atomic_t cachefiles_lookup_histogram[HZ];
43917 -atomic_t cachefiles_mkdir_histogram[HZ];
43918 -atomic_t cachefiles_create_histogram[HZ];
43919 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43920 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43921 +atomic_unchecked_t cachefiles_create_histogram[HZ];
43922
43923 /*
43924 * display the latency histogram
43925 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43926 return 0;
43927 default:
43928 index = (unsigned long) v - 3;
43929 - x = atomic_read(&cachefiles_lookup_histogram[index]);
43930 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
43931 - z = atomic_read(&cachefiles_create_histogram[index]);
43932 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43933 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43934 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43935 if (x == 0 && y == 0 && z == 0)
43936 return 0;
43937
43938 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43939 index 0e3c092..818480e 100644
43940 --- a/fs/cachefiles/rdwr.c
43941 +++ b/fs/cachefiles/rdwr.c
43942 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43943 old_fs = get_fs();
43944 set_fs(KERNEL_DS);
43945 ret = file->f_op->write(
43946 - file, (const void __user *) data, len, &pos);
43947 + file, (const void __force_user *) data, len, &pos);
43948 set_fs(old_fs);
43949 kunmap(page);
43950 if (ret != len)
43951 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43952 index 3e8094b..cb3ff3d 100644
43953 --- a/fs/ceph/dir.c
43954 +++ b/fs/ceph/dir.c
43955 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43956 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43957 struct ceph_mds_client *mdsc = fsc->mdsc;
43958 unsigned frag = fpos_frag(filp->f_pos);
43959 - int off = fpos_off(filp->f_pos);
43960 + unsigned int off = fpos_off(filp->f_pos);
43961 int err;
43962 u32 ftype;
43963 struct ceph_mds_reply_info_parsed *rinfo;
43964 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43965 if (nd &&
43966 (nd->flags & LOOKUP_OPEN) &&
43967 !(nd->intent.open.flags & O_CREAT)) {
43968 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
43969 + int mode = nd->intent.open.create_mode & ~current_umask();
43970 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43971 }
43972
43973 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43974 index e814052..28dcdf7 100644
43975 --- a/fs/cifs/cifs_debug.c
43976 +++ b/fs/cifs/cifs_debug.c
43977 @@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43978
43979 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43980 #ifdef CONFIG_CIFS_STATS2
43981 - atomic_set(&totBufAllocCount, 0);
43982 - atomic_set(&totSmBufAllocCount, 0);
43983 + atomic_set_unchecked(&totBufAllocCount, 0);
43984 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43985 #endif /* CONFIG_CIFS_STATS2 */
43986 spin_lock(&cifs_tcp_ses_lock);
43987 list_for_each(tmp1, &cifs_tcp_ses_list) {
43988 @@ -281,25 +281,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43989 tcon = list_entry(tmp3,
43990 struct cifs_tcon,
43991 tcon_list);
43992 - atomic_set(&tcon->num_smbs_sent, 0);
43993 - atomic_set(&tcon->num_writes, 0);
43994 - atomic_set(&tcon->num_reads, 0);
43995 - atomic_set(&tcon->num_oplock_brks, 0);
43996 - atomic_set(&tcon->num_opens, 0);
43997 - atomic_set(&tcon->num_posixopens, 0);
43998 - atomic_set(&tcon->num_posixmkdirs, 0);
43999 - atomic_set(&tcon->num_closes, 0);
44000 - atomic_set(&tcon->num_deletes, 0);
44001 - atomic_set(&tcon->num_mkdirs, 0);
44002 - atomic_set(&tcon->num_rmdirs, 0);
44003 - atomic_set(&tcon->num_renames, 0);
44004 - atomic_set(&tcon->num_t2renames, 0);
44005 - atomic_set(&tcon->num_ffirst, 0);
44006 - atomic_set(&tcon->num_fnext, 0);
44007 - atomic_set(&tcon->num_fclose, 0);
44008 - atomic_set(&tcon->num_hardlinks, 0);
44009 - atomic_set(&tcon->num_symlinks, 0);
44010 - atomic_set(&tcon->num_locks, 0);
44011 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
44012 + atomic_set_unchecked(&tcon->num_writes, 0);
44013 + atomic_set_unchecked(&tcon->num_reads, 0);
44014 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
44015 + atomic_set_unchecked(&tcon->num_opens, 0);
44016 + atomic_set_unchecked(&tcon->num_posixopens, 0);
44017 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
44018 + atomic_set_unchecked(&tcon->num_closes, 0);
44019 + atomic_set_unchecked(&tcon->num_deletes, 0);
44020 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
44021 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
44022 + atomic_set_unchecked(&tcon->num_renames, 0);
44023 + atomic_set_unchecked(&tcon->num_t2renames, 0);
44024 + atomic_set_unchecked(&tcon->num_ffirst, 0);
44025 + atomic_set_unchecked(&tcon->num_fnext, 0);
44026 + atomic_set_unchecked(&tcon->num_fclose, 0);
44027 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
44028 + atomic_set_unchecked(&tcon->num_symlinks, 0);
44029 + atomic_set_unchecked(&tcon->num_locks, 0);
44030 }
44031 }
44032 }
44033 @@ -329,8 +329,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
44034 smBufAllocCount.counter, cifs_min_small);
44035 #ifdef CONFIG_CIFS_STATS2
44036 seq_printf(m, "Total Large %d Small %d Allocations\n",
44037 - atomic_read(&totBufAllocCount),
44038 - atomic_read(&totSmBufAllocCount));
44039 + atomic_read_unchecked(&totBufAllocCount),
44040 + atomic_read_unchecked(&totSmBufAllocCount));
44041 #endif /* CONFIG_CIFS_STATS2 */
44042
44043 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
44044 @@ -359,41 +359,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
44045 if (tcon->need_reconnect)
44046 seq_puts(m, "\tDISCONNECTED ");
44047 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
44048 - atomic_read(&tcon->num_smbs_sent),
44049 - atomic_read(&tcon->num_oplock_brks));
44050 + atomic_read_unchecked(&tcon->num_smbs_sent),
44051 + atomic_read_unchecked(&tcon->num_oplock_brks));
44052 seq_printf(m, "\nReads: %d Bytes: %lld",
44053 - atomic_read(&tcon->num_reads),
44054 + atomic_read_unchecked(&tcon->num_reads),
44055 (long long)(tcon->bytes_read));
44056 seq_printf(m, "\nWrites: %d Bytes: %lld",
44057 - atomic_read(&tcon->num_writes),
44058 + atomic_read_unchecked(&tcon->num_writes),
44059 (long long)(tcon->bytes_written));
44060 seq_printf(m, "\nFlushes: %d",
44061 - atomic_read(&tcon->num_flushes));
44062 + atomic_read_unchecked(&tcon->num_flushes));
44063 seq_printf(m, "\nLocks: %d HardLinks: %d "
44064 "Symlinks: %d",
44065 - atomic_read(&tcon->num_locks),
44066 - atomic_read(&tcon->num_hardlinks),
44067 - atomic_read(&tcon->num_symlinks));
44068 + atomic_read_unchecked(&tcon->num_locks),
44069 + atomic_read_unchecked(&tcon->num_hardlinks),
44070 + atomic_read_unchecked(&tcon->num_symlinks));
44071 seq_printf(m, "\nOpens: %d Closes: %d "
44072 "Deletes: %d",
44073 - atomic_read(&tcon->num_opens),
44074 - atomic_read(&tcon->num_closes),
44075 - atomic_read(&tcon->num_deletes));
44076 + atomic_read_unchecked(&tcon->num_opens),
44077 + atomic_read_unchecked(&tcon->num_closes),
44078 + atomic_read_unchecked(&tcon->num_deletes));
44079 seq_printf(m, "\nPosix Opens: %d "
44080 "Posix Mkdirs: %d",
44081 - atomic_read(&tcon->num_posixopens),
44082 - atomic_read(&tcon->num_posixmkdirs));
44083 + atomic_read_unchecked(&tcon->num_posixopens),
44084 + atomic_read_unchecked(&tcon->num_posixmkdirs));
44085 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
44086 - atomic_read(&tcon->num_mkdirs),
44087 - atomic_read(&tcon->num_rmdirs));
44088 + atomic_read_unchecked(&tcon->num_mkdirs),
44089 + atomic_read_unchecked(&tcon->num_rmdirs));
44090 seq_printf(m, "\nRenames: %d T2 Renames %d",
44091 - atomic_read(&tcon->num_renames),
44092 - atomic_read(&tcon->num_t2renames));
44093 + atomic_read_unchecked(&tcon->num_renames),
44094 + atomic_read_unchecked(&tcon->num_t2renames));
44095 seq_printf(m, "\nFindFirst: %d FNext %d "
44096 "FClose %d",
44097 - atomic_read(&tcon->num_ffirst),
44098 - atomic_read(&tcon->num_fnext),
44099 - atomic_read(&tcon->num_fclose));
44100 + atomic_read_unchecked(&tcon->num_ffirst),
44101 + atomic_read_unchecked(&tcon->num_fnext),
44102 + atomic_read_unchecked(&tcon->num_fclose));
44103 }
44104 }
44105 }
44106 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
44107 index 8b6e344..303a662 100644
44108 --- a/fs/cifs/cifsfs.c
44109 +++ b/fs/cifs/cifsfs.c
44110 @@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
44111 cifs_req_cachep = kmem_cache_create("cifs_request",
44112 CIFSMaxBufSize +
44113 MAX_CIFS_HDR_SIZE, 0,
44114 - SLAB_HWCACHE_ALIGN, NULL);
44115 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
44116 if (cifs_req_cachep == NULL)
44117 return -ENOMEM;
44118
44119 @@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
44120 efficient to alloc 1 per page off the slab compared to 17K (5page)
44121 alloc of large cifs buffers even when page debugging is on */
44122 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
44123 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
44124 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
44125 NULL);
44126 if (cifs_sm_req_cachep == NULL) {
44127 mempool_destroy(cifs_req_poolp);
44128 @@ -1106,8 +1106,8 @@ init_cifs(void)
44129 atomic_set(&bufAllocCount, 0);
44130 atomic_set(&smBufAllocCount, 0);
44131 #ifdef CONFIG_CIFS_STATS2
44132 - atomic_set(&totBufAllocCount, 0);
44133 - atomic_set(&totSmBufAllocCount, 0);
44134 + atomic_set_unchecked(&totBufAllocCount, 0);
44135 + atomic_set_unchecked(&totSmBufAllocCount, 0);
44136 #endif /* CONFIG_CIFS_STATS2 */
44137
44138 atomic_set(&midCount, 0);
44139 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
44140 index d86ba9f..e80049d 100644
44141 --- a/fs/cifs/cifsglob.h
44142 +++ b/fs/cifs/cifsglob.h
44143 @@ -491,28 +491,28 @@ struct cifs_tcon {
44144 __u16 Flags; /* optional support bits */
44145 enum statusEnum tidStatus;
44146 #ifdef CONFIG_CIFS_STATS
44147 - atomic_t num_smbs_sent;
44148 - atomic_t num_writes;
44149 - atomic_t num_reads;
44150 - atomic_t num_flushes;
44151 - atomic_t num_oplock_brks;
44152 - atomic_t num_opens;
44153 - atomic_t num_closes;
44154 - atomic_t num_deletes;
44155 - atomic_t num_mkdirs;
44156 - atomic_t num_posixopens;
44157 - atomic_t num_posixmkdirs;
44158 - atomic_t num_rmdirs;
44159 - atomic_t num_renames;
44160 - atomic_t num_t2renames;
44161 - atomic_t num_ffirst;
44162 - atomic_t num_fnext;
44163 - atomic_t num_fclose;
44164 - atomic_t num_hardlinks;
44165 - atomic_t num_symlinks;
44166 - atomic_t num_locks;
44167 - atomic_t num_acl_get;
44168 - atomic_t num_acl_set;
44169 + atomic_unchecked_t num_smbs_sent;
44170 + atomic_unchecked_t num_writes;
44171 + atomic_unchecked_t num_reads;
44172 + atomic_unchecked_t num_flushes;
44173 + atomic_unchecked_t num_oplock_brks;
44174 + atomic_unchecked_t num_opens;
44175 + atomic_unchecked_t num_closes;
44176 + atomic_unchecked_t num_deletes;
44177 + atomic_unchecked_t num_mkdirs;
44178 + atomic_unchecked_t num_posixopens;
44179 + atomic_unchecked_t num_posixmkdirs;
44180 + atomic_unchecked_t num_rmdirs;
44181 + atomic_unchecked_t num_renames;
44182 + atomic_unchecked_t num_t2renames;
44183 + atomic_unchecked_t num_ffirst;
44184 + atomic_unchecked_t num_fnext;
44185 + atomic_unchecked_t num_fclose;
44186 + atomic_unchecked_t num_hardlinks;
44187 + atomic_unchecked_t num_symlinks;
44188 + atomic_unchecked_t num_locks;
44189 + atomic_unchecked_t num_acl_get;
44190 + atomic_unchecked_t num_acl_set;
44191 #ifdef CONFIG_CIFS_STATS2
44192 unsigned long long time_writes;
44193 unsigned long long time_reads;
44194 @@ -735,7 +735,7 @@ convert_delimiter(char *path, char delim)
44195 }
44196
44197 #ifdef CONFIG_CIFS_STATS
44198 -#define cifs_stats_inc atomic_inc
44199 +#define cifs_stats_inc atomic_inc_unchecked
44200
44201 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
44202 unsigned int bytes)
44203 @@ -1093,8 +1093,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
44204 /* Various Debug counters */
44205 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
44206 #ifdef CONFIG_CIFS_STATS2
44207 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
44208 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
44209 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
44210 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
44211 #endif
44212 GLOBAL_EXTERN atomic_t smBufAllocCount;
44213 GLOBAL_EXTERN atomic_t midCount;
44214 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
44215 index 6b0e064..94e6c3c 100644
44216 --- a/fs/cifs/link.c
44217 +++ b/fs/cifs/link.c
44218 @@ -600,7 +600,7 @@ symlink_exit:
44219
44220 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
44221 {
44222 - char *p = nd_get_link(nd);
44223 + const char *p = nd_get_link(nd);
44224 if (!IS_ERR(p))
44225 kfree(p);
44226 }
44227 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
44228 index 557506a..2fd3816 100644
44229 --- a/fs/cifs/misc.c
44230 +++ b/fs/cifs/misc.c
44231 @@ -156,7 +156,7 @@ cifs_buf_get(void)
44232 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
44233 atomic_inc(&bufAllocCount);
44234 #ifdef CONFIG_CIFS_STATS2
44235 - atomic_inc(&totBufAllocCount);
44236 + atomic_inc_unchecked(&totBufAllocCount);
44237 #endif /* CONFIG_CIFS_STATS2 */
44238 }
44239
44240 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
44241 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
44242 atomic_inc(&smBufAllocCount);
44243 #ifdef CONFIG_CIFS_STATS2
44244 - atomic_inc(&totSmBufAllocCount);
44245 + atomic_inc_unchecked(&totSmBufAllocCount);
44246 #endif /* CONFIG_CIFS_STATS2 */
44247
44248 }
44249 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
44250 index 6901578..d402eb5 100644
44251 --- a/fs/coda/cache.c
44252 +++ b/fs/coda/cache.c
44253 @@ -24,7 +24,7 @@
44254 #include "coda_linux.h"
44255 #include "coda_cache.h"
44256
44257 -static atomic_t permission_epoch = ATOMIC_INIT(0);
44258 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
44259
44260 /* replace or extend an acl cache hit */
44261 void coda_cache_enter(struct inode *inode, int mask)
44262 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
44263 struct coda_inode_info *cii = ITOC(inode);
44264
44265 spin_lock(&cii->c_lock);
44266 - cii->c_cached_epoch = atomic_read(&permission_epoch);
44267 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
44268 if (cii->c_uid != current_fsuid()) {
44269 cii->c_uid = current_fsuid();
44270 cii->c_cached_perm = mask;
44271 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
44272 {
44273 struct coda_inode_info *cii = ITOC(inode);
44274 spin_lock(&cii->c_lock);
44275 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
44276 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
44277 spin_unlock(&cii->c_lock);
44278 }
44279
44280 /* remove all acl caches */
44281 void coda_cache_clear_all(struct super_block *sb)
44282 {
44283 - atomic_inc(&permission_epoch);
44284 + atomic_inc_unchecked(&permission_epoch);
44285 }
44286
44287
44288 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
44289 spin_lock(&cii->c_lock);
44290 hit = (mask & cii->c_cached_perm) == mask &&
44291 cii->c_uid == current_fsuid() &&
44292 - cii->c_cached_epoch == atomic_read(&permission_epoch);
44293 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
44294 spin_unlock(&cii->c_lock);
44295
44296 return hit;
44297 diff --git a/fs/compat.c b/fs/compat.c
44298 index 1bdb350..9f28287 100644
44299 --- a/fs/compat.c
44300 +++ b/fs/compat.c
44301 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
44302
44303 set_fs(KERNEL_DS);
44304 /* The __user pointer cast is valid because of the set_fs() */
44305 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
44306 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
44307 set_fs(oldfs);
44308 /* truncating is ok because it's a user address */
44309 if (!ret)
44310 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
44311 goto out;
44312
44313 ret = -EINVAL;
44314 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
44315 + if (nr_segs > UIO_MAXIOV)
44316 goto out;
44317 if (nr_segs > fast_segs) {
44318 ret = -ENOMEM;
44319 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
44320
44321 struct compat_readdir_callback {
44322 struct compat_old_linux_dirent __user *dirent;
44323 + struct file * file;
44324 int result;
44325 };
44326
44327 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
44328 buf->result = -EOVERFLOW;
44329 return -EOVERFLOW;
44330 }
44331 +
44332 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44333 + return 0;
44334 +
44335 buf->result++;
44336 dirent = buf->dirent;
44337 if (!access_ok(VERIFY_WRITE, dirent,
44338 @@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
44339
44340 buf.result = 0;
44341 buf.dirent = dirent;
44342 + buf.file = file;
44343
44344 error = vfs_readdir(file, compat_fillonedir, &buf);
44345 if (buf.result)
44346 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
44347 struct compat_getdents_callback {
44348 struct compat_linux_dirent __user *current_dir;
44349 struct compat_linux_dirent __user *previous;
44350 + struct file * file;
44351 int count;
44352 int error;
44353 };
44354 @@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
44355 buf->error = -EOVERFLOW;
44356 return -EOVERFLOW;
44357 }
44358 +
44359 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44360 + return 0;
44361 +
44362 dirent = buf->previous;
44363 if (dirent) {
44364 if (__put_user(offset, &dirent->d_off))
44365 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
44366 buf.previous = NULL;
44367 buf.count = count;
44368 buf.error = 0;
44369 + buf.file = file;
44370
44371 error = vfs_readdir(file, compat_filldir, &buf);
44372 if (error >= 0)
44373 @@ -986,6 +998,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
44374 struct compat_getdents_callback64 {
44375 struct linux_dirent64 __user *current_dir;
44376 struct linux_dirent64 __user *previous;
44377 + struct file * file;
44378 int count;
44379 int error;
44380 };
44381 @@ -1002,6 +1015,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
44382 buf->error = -EINVAL; /* only used if we fail.. */
44383 if (reclen > buf->count)
44384 return -EINVAL;
44385 +
44386 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44387 + return 0;
44388 +
44389 dirent = buf->previous;
44390
44391 if (dirent) {
44392 @@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
44393 buf.previous = NULL;
44394 buf.count = count;
44395 buf.error = 0;
44396 + buf.file = file;
44397
44398 error = vfs_readdir(file, compat_filldir64, &buf);
44399 if (error >= 0)
44400 error = buf.error;
44401 lastdirent = buf.previous;
44402 if (lastdirent) {
44403 - typeof(lastdirent->d_off) d_off = file->f_pos;
44404 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44405 if (__put_user_unaligned(d_off, &lastdirent->d_off))
44406 error = -EFAULT;
44407 else
44408 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
44409 index 112e45a..b59845b 100644
44410 --- a/fs/compat_binfmt_elf.c
44411 +++ b/fs/compat_binfmt_elf.c
44412 @@ -30,11 +30,13 @@
44413 #undef elf_phdr
44414 #undef elf_shdr
44415 #undef elf_note
44416 +#undef elf_dyn
44417 #undef elf_addr_t
44418 #define elfhdr elf32_hdr
44419 #define elf_phdr elf32_phdr
44420 #define elf_shdr elf32_shdr
44421 #define elf_note elf32_note
44422 +#define elf_dyn Elf32_Dyn
44423 #define elf_addr_t Elf32_Addr
44424
44425 /*
44426 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
44427 index debdfe0..75d31d4 100644
44428 --- a/fs/compat_ioctl.c
44429 +++ b/fs/compat_ioctl.c
44430 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
44431
44432 err = get_user(palp, &up->palette);
44433 err |= get_user(length, &up->length);
44434 + if (err)
44435 + return -EFAULT;
44436
44437 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
44438 err = put_user(compat_ptr(palp), &up_native->palette);
44439 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
44440 return -EFAULT;
44441 if (__get_user(udata, &ss32->iomem_base))
44442 return -EFAULT;
44443 - ss.iomem_base = compat_ptr(udata);
44444 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
44445 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
44446 __get_user(ss.port_high, &ss32->port_high))
44447 return -EFAULT;
44448 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
44449 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
44450 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
44451 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
44452 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44453 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44454 return -EFAULT;
44455
44456 return ioctl_preallocate(file, p);
44457 @@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
44458 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
44459 {
44460 unsigned int a, b;
44461 - a = *(unsigned int *)p;
44462 - b = *(unsigned int *)q;
44463 + a = *(const unsigned int *)p;
44464 + b = *(const unsigned int *)q;
44465 if (a > b)
44466 return 1;
44467 if (a < b)
44468 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
44469 index 7e6c52d..94bc756 100644
44470 --- a/fs/configfs/dir.c
44471 +++ b/fs/configfs/dir.c
44472 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44473 }
44474 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
44475 struct configfs_dirent *next;
44476 - const char * name;
44477 + const unsigned char * name;
44478 + char d_name[sizeof(next->s_dentry->d_iname)];
44479 int len;
44480 struct inode *inode = NULL;
44481
44482 @@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44483 continue;
44484
44485 name = configfs_get_name(next);
44486 - len = strlen(name);
44487 + if (next->s_dentry && name == next->s_dentry->d_iname) {
44488 + len = next->s_dentry->d_name.len;
44489 + memcpy(d_name, name, len);
44490 + name = d_name;
44491 + } else
44492 + len = strlen(name);
44493
44494 /*
44495 * We'll have a dentry and an inode for
44496 diff --git a/fs/dcache.c b/fs/dcache.c
44497 index 4046904..a58db1d 100644
44498 --- a/fs/dcache.c
44499 +++ b/fs/dcache.c
44500 @@ -389,7 +389,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
44501 * Inform try_to_ascend() that we are no longer attached to the
44502 * dentry tree
44503 */
44504 - dentry->d_flags |= DCACHE_DISCONNECTED;
44505 + dentry->d_flags |= DCACHE_DENTRY_KILLED;
44506 if (parent)
44507 spin_unlock(&parent->d_lock);
44508 dentry_iput(dentry);
44509 @@ -1046,7 +1046,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq
44510 * or deletion
44511 */
44512 if (new != old->d_parent ||
44513 - (old->d_flags & DCACHE_DISCONNECTED) ||
44514 + (old->d_flags & DCACHE_DENTRY_KILLED) ||
44515 (!locked && read_seqretry(&rename_lock, seq))) {
44516 spin_unlock(&new->d_lock);
44517 new = NULL;
44518 @@ -3154,7 +3154,7 @@ void __init vfs_caches_init(unsigned long mempages)
44519 mempages -= reserve;
44520
44521 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
44522 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
44523 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
44524
44525 dcache_init();
44526 inode_init();
44527 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
44528 index b80bc84..0d46d1a 100644
44529 --- a/fs/debugfs/inode.c
44530 +++ b/fs/debugfs/inode.c
44531 @@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
44532 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
44533 {
44534 return debugfs_create_file(name,
44535 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44536 + S_IFDIR | S_IRWXU,
44537 +#else
44538 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44539 +#endif
44540 parent, NULL, NULL);
44541 }
44542 EXPORT_SYMBOL_GPL(debugfs_create_dir);
44543 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
44544 index a07441a..5c47fa2 100644
44545 --- a/fs/ecryptfs/inode.c
44546 +++ b/fs/ecryptfs/inode.c
44547 @@ -621,6 +621,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
44548 struct dentry *lower_old_dir_dentry;
44549 struct dentry *lower_new_dir_dentry;
44550 struct dentry *trap = NULL;
44551 + struct inode *target_inode;
44552
44553 lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
44554 lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
44555 @@ -628,6 +629,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
44556 dget(lower_new_dentry);
44557 lower_old_dir_dentry = dget_parent(lower_old_dentry);
44558 lower_new_dir_dentry = dget_parent(lower_new_dentry);
44559 + target_inode = new_dentry->d_inode;
44560 trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
44561 /* source should not be ancestor of target */
44562 if (trap == lower_old_dentry) {
44563 @@ -643,6 +645,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
44564 lower_new_dir_dentry->d_inode, lower_new_dentry);
44565 if (rc)
44566 goto out_lock;
44567 + if (target_inode)
44568 + fsstack_copy_attr_all(target_inode,
44569 + ecryptfs_inode_to_lower(target_inode));
44570 fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
44571 if (new_dir != old_dir)
44572 fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
44573 @@ -671,7 +676,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
44574 old_fs = get_fs();
44575 set_fs(get_ds());
44576 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
44577 - (char __user *)lower_buf,
44578 + (char __force_user *)lower_buf,
44579 PATH_MAX);
44580 set_fs(old_fs);
44581 if (rc < 0)
44582 @@ -703,7 +708,7 @@ out:
44583 static void
44584 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
44585 {
44586 - char *buf = nd_get_link(nd);
44587 + const char *buf = nd_get_link(nd);
44588 if (!IS_ERR(buf)) {
44589 /* Free the char* */
44590 kfree(buf);
44591 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
44592 index c0038f6..47ab347 100644
44593 --- a/fs/ecryptfs/miscdev.c
44594 +++ b/fs/ecryptfs/miscdev.c
44595 @@ -355,7 +355,7 @@ check_list:
44596 goto out_unlock_msg_ctx;
44597 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
44598 if (msg_ctx->msg) {
44599 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
44600 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
44601 goto out_unlock_msg_ctx;
44602 i += packet_length_size;
44603 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
44604 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
44605 index b2a34a1..162fa69 100644
44606 --- a/fs/ecryptfs/read_write.c
44607 +++ b/fs/ecryptfs/read_write.c
44608 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
44609 return -EIO;
44610 fs_save = get_fs();
44611 set_fs(get_ds());
44612 - rc = vfs_write(lower_file, data, size, &offset);
44613 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
44614 set_fs(fs_save);
44615 mark_inode_dirty_sync(ecryptfs_inode);
44616 return rc;
44617 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
44618 return -EIO;
44619 fs_save = get_fs();
44620 set_fs(get_ds());
44621 - rc = vfs_read(lower_file, data, size, &offset);
44622 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
44623 set_fs(fs_save);
44624 return rc;
44625 }
44626 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
44627 index 1c8b556..eedec84 100644
44628 --- a/fs/eventpoll.c
44629 +++ b/fs/eventpoll.c
44630 @@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
44631 error = PTR_ERR(file);
44632 goto out_free_fd;
44633 }
44634 - fd_install(fd, file);
44635 ep->file = file;
44636 + fd_install(fd, file);
44637 return fd;
44638
44639 out_free_fd:
44640 diff --git a/fs/exec.c b/fs/exec.c
44641 index e95aeed..a943469 100644
44642 --- a/fs/exec.c
44643 +++ b/fs/exec.c
44644 @@ -55,6 +55,15 @@
44645 #include <linux/pipe_fs_i.h>
44646 #include <linux/oom.h>
44647 #include <linux/compat.h>
44648 +#include <linux/random.h>
44649 +#include <linux/seq_file.h>
44650 +
44651 +#ifdef CONFIG_PAX_REFCOUNT
44652 +#include <linux/kallsyms.h>
44653 +#include <linux/kdebug.h>
44654 +#endif
44655 +
44656 +#include <trace/events/fs.h>
44657
44658 #include <asm/uaccess.h>
44659 #include <asm/mmu_context.h>
44660 @@ -66,6 +75,18 @@
44661
44662 #include <trace/events/sched.h>
44663
44664 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
44665 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
44666 +{
44667 + WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
44668 +}
44669 +#endif
44670 +
44671 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
44672 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
44673 +EXPORT_SYMBOL(pax_set_initial_flags_func);
44674 +#endif
44675 +
44676 int core_uses_pid;
44677 char core_pattern[CORENAME_MAX_SIZE] = "core";
44678 unsigned int core_pipe_limit;
44679 @@ -75,7 +96,7 @@ struct core_name {
44680 char *corename;
44681 int used, size;
44682 };
44683 -static atomic_t call_count = ATOMIC_INIT(1);
44684 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
44685
44686 /* The maximal length of core_pattern is also specified in sysctl.c */
44687
44688 @@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44689 int write)
44690 {
44691 struct page *page;
44692 - int ret;
44693
44694 -#ifdef CONFIG_STACK_GROWSUP
44695 - if (write) {
44696 - ret = expand_downwards(bprm->vma, pos);
44697 - if (ret < 0)
44698 - return NULL;
44699 - }
44700 -#endif
44701 - ret = get_user_pages(current, bprm->mm, pos,
44702 - 1, write, 1, &page, NULL);
44703 - if (ret <= 0)
44704 + if (0 > expand_downwards(bprm->vma, pos))
44705 + return NULL;
44706 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44707 return NULL;
44708
44709 if (write) {
44710 @@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44711 if (size <= ARG_MAX)
44712 return page;
44713
44714 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44715 + // only allow 512KB for argv+env on suid/sgid binaries
44716 + // to prevent easy ASLR exhaustion
44717 + if (((bprm->cred->euid != current_euid()) ||
44718 + (bprm->cred->egid != current_egid())) &&
44719 + (size > (512 * 1024))) {
44720 + put_page(page);
44721 + return NULL;
44722 + }
44723 +#endif
44724 +
44725 /*
44726 * Limit to 1/4-th the stack size for the argv+env strings.
44727 * This ensures that:
44728 @@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44729 vma->vm_end = STACK_TOP_MAX;
44730 vma->vm_start = vma->vm_end - PAGE_SIZE;
44731 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
44732 +
44733 +#ifdef CONFIG_PAX_SEGMEXEC
44734 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44735 +#endif
44736 +
44737 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
44738 INIT_LIST_HEAD(&vma->anon_vma_chain);
44739
44740 @@ -287,6 +316,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44741 mm->stack_vm = mm->total_vm = 1;
44742 up_write(&mm->mmap_sem);
44743 bprm->p = vma->vm_end - sizeof(void *);
44744 +
44745 +#ifdef CONFIG_PAX_RANDUSTACK
44746 + if (randomize_va_space)
44747 + bprm->p ^= random32() & ~PAGE_MASK;
44748 +#endif
44749 +
44750 return 0;
44751 err:
44752 up_write(&mm->mmap_sem);
44753 @@ -395,19 +430,7 @@ err:
44754 return err;
44755 }
44756
44757 -struct user_arg_ptr {
44758 -#ifdef CONFIG_COMPAT
44759 - bool is_compat;
44760 -#endif
44761 - union {
44762 - const char __user *const __user *native;
44763 -#ifdef CONFIG_COMPAT
44764 - compat_uptr_t __user *compat;
44765 -#endif
44766 - } ptr;
44767 -};
44768 -
44769 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44770 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44771 {
44772 const char __user *native;
44773
44774 @@ -416,14 +439,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44775 compat_uptr_t compat;
44776
44777 if (get_user(compat, argv.ptr.compat + nr))
44778 - return ERR_PTR(-EFAULT);
44779 + return (const char __force_user *)ERR_PTR(-EFAULT);
44780
44781 return compat_ptr(compat);
44782 }
44783 #endif
44784
44785 if (get_user(native, argv.ptr.native + nr))
44786 - return ERR_PTR(-EFAULT);
44787 + return (const char __force_user *)ERR_PTR(-EFAULT);
44788
44789 return native;
44790 }
44791 @@ -442,7 +465,7 @@ static int count(struct user_arg_ptr argv, int max)
44792 if (!p)
44793 break;
44794
44795 - if (IS_ERR(p))
44796 + if (IS_ERR((const char __force_kernel *)p))
44797 return -EFAULT;
44798
44799 if (i++ >= max)
44800 @@ -476,7 +499,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44801
44802 ret = -EFAULT;
44803 str = get_user_arg_ptr(argv, argc);
44804 - if (IS_ERR(str))
44805 + if (IS_ERR((const char __force_kernel *)str))
44806 goto out;
44807
44808 len = strnlen_user(str, MAX_ARG_STRLEN);
44809 @@ -558,7 +581,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44810 int r;
44811 mm_segment_t oldfs = get_fs();
44812 struct user_arg_ptr argv = {
44813 - .ptr.native = (const char __user *const __user *)__argv,
44814 + .ptr.native = (const char __force_user *const __force_user *)__argv,
44815 };
44816
44817 set_fs(KERNEL_DS);
44818 @@ -593,7 +616,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44819 unsigned long new_end = old_end - shift;
44820 struct mmu_gather tlb;
44821
44822 - BUG_ON(new_start > new_end);
44823 + if (new_start >= new_end || new_start < mmap_min_addr)
44824 + return -ENOMEM;
44825
44826 /*
44827 * ensure there are no vmas between where we want to go
44828 @@ -602,6 +626,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44829 if (vma != find_vma(mm, new_start))
44830 return -EFAULT;
44831
44832 +#ifdef CONFIG_PAX_SEGMEXEC
44833 + BUG_ON(pax_find_mirror_vma(vma));
44834 +#endif
44835 +
44836 /*
44837 * cover the whole range: [new_start, old_end)
44838 */
44839 @@ -682,10 +710,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44840 stack_top = arch_align_stack(stack_top);
44841 stack_top = PAGE_ALIGN(stack_top);
44842
44843 - if (unlikely(stack_top < mmap_min_addr) ||
44844 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44845 - return -ENOMEM;
44846 -
44847 stack_shift = vma->vm_end - stack_top;
44848
44849 bprm->p -= stack_shift;
44850 @@ -697,8 +721,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44851 bprm->exec -= stack_shift;
44852
44853 down_write(&mm->mmap_sem);
44854 +
44855 + /* Move stack pages down in memory. */
44856 + if (stack_shift) {
44857 + ret = shift_arg_pages(vma, stack_shift);
44858 + if (ret)
44859 + goto out_unlock;
44860 + }
44861 +
44862 vm_flags = VM_STACK_FLAGS;
44863
44864 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44865 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44866 + vm_flags &= ~VM_EXEC;
44867 +
44868 +#ifdef CONFIG_PAX_MPROTECT
44869 + if (mm->pax_flags & MF_PAX_MPROTECT)
44870 + vm_flags &= ~VM_MAYEXEC;
44871 +#endif
44872 +
44873 + }
44874 +#endif
44875 +
44876 /*
44877 * Adjust stack execute permissions; explicitly enable for
44878 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
44879 @@ -717,13 +761,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44880 goto out_unlock;
44881 BUG_ON(prev != vma);
44882
44883 - /* Move stack pages down in memory. */
44884 - if (stack_shift) {
44885 - ret = shift_arg_pages(vma, stack_shift);
44886 - if (ret)
44887 - goto out_unlock;
44888 - }
44889 -
44890 /* mprotect_fixup is overkill to remove the temporary stack flags */
44891 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44892
44893 @@ -781,6 +818,8 @@ struct file *open_exec(const char *name)
44894
44895 fsnotify_open(file);
44896
44897 + trace_open_exec(name);
44898 +
44899 err = deny_write_access(file);
44900 if (err)
44901 goto exit;
44902 @@ -804,7 +843,7 @@ int kernel_read(struct file *file, loff_t offset,
44903 old_fs = get_fs();
44904 set_fs(get_ds());
44905 /* The cast to a user pointer is valid due to the set_fs() */
44906 - result = vfs_read(file, (void __user *)addr, count, &pos);
44907 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
44908 set_fs(old_fs);
44909 return result;
44910 }
44911 @@ -1257,7 +1296,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
44912 }
44913 rcu_read_unlock();
44914
44915 - if (p->fs->users > n_fs) {
44916 + if (atomic_read(&p->fs->users) > n_fs) {
44917 bprm->unsafe |= LSM_UNSAFE_SHARE;
44918 } else {
44919 res = -EAGAIN;
44920 @@ -1460,6 +1499,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
44921
44922 EXPORT_SYMBOL(search_binary_handler);
44923
44924 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44925 +static DEFINE_PER_CPU(u64, exec_counter);
44926 +static int __init init_exec_counters(void)
44927 +{
44928 + unsigned int cpu;
44929 +
44930 + for_each_possible_cpu(cpu) {
44931 + per_cpu(exec_counter, cpu) = (u64)cpu;
44932 + }
44933 +
44934 + return 0;
44935 +}
44936 +early_initcall(init_exec_counters);
44937 +static inline void increment_exec_counter(void)
44938 +{
44939 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
44940 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44941 +}
44942 +#else
44943 +static inline void increment_exec_counter(void) {}
44944 +#endif
44945 +
44946 /*
44947 * sys_execve() executes a new program.
44948 */
44949 @@ -1468,6 +1529,11 @@ static int do_execve_common(const char *filename,
44950 struct user_arg_ptr envp,
44951 struct pt_regs *regs)
44952 {
44953 +#ifdef CONFIG_GRKERNSEC
44954 + struct file *old_exec_file;
44955 + struct acl_subject_label *old_acl;
44956 + struct rlimit old_rlim[RLIM_NLIMITS];
44957 +#endif
44958 struct linux_binprm *bprm;
44959 struct file *file;
44960 struct files_struct *displaced;
44961 @@ -1475,6 +1541,8 @@ static int do_execve_common(const char *filename,
44962 int retval;
44963 const struct cred *cred = current_cred();
44964
44965 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44966 +
44967 /*
44968 * We move the actual failure in case of RLIMIT_NPROC excess from
44969 * set*uid() to execve() because too many poorly written programs
44970 @@ -1515,12 +1583,27 @@ static int do_execve_common(const char *filename,
44971 if (IS_ERR(file))
44972 goto out_unmark;
44973
44974 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
44975 + retval = -EPERM;
44976 + goto out_file;
44977 + }
44978 +
44979 sched_exec();
44980
44981 bprm->file = file;
44982 bprm->filename = filename;
44983 bprm->interp = filename;
44984
44985 + if (gr_process_user_ban()) {
44986 + retval = -EPERM;
44987 + goto out_file;
44988 + }
44989 +
44990 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44991 + retval = -EACCES;
44992 + goto out_file;
44993 + }
44994 +
44995 retval = bprm_mm_init(bprm);
44996 if (retval)
44997 goto out_file;
44998 @@ -1537,24 +1620,65 @@ static int do_execve_common(const char *filename,
44999 if (retval < 0)
45000 goto out;
45001
45002 +#ifdef CONFIG_GRKERNSEC
45003 + old_acl = current->acl;
45004 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
45005 + old_exec_file = current->exec_file;
45006 + get_file(file);
45007 + current->exec_file = file;
45008 +#endif
45009 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45010 + /* limit suid stack to 8MB
45011 + we saved the old limits above and will restore them if this exec fails
45012 + */
45013 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
45014 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
45015 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
45016 +#endif
45017 +
45018 + if (!gr_tpe_allow(file)) {
45019 + retval = -EACCES;
45020 + goto out_fail;
45021 + }
45022 +
45023 + if (gr_check_crash_exec(file)) {
45024 + retval = -EACCES;
45025 + goto out_fail;
45026 + }
45027 +
45028 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
45029 + bprm->unsafe);
45030 + if (retval < 0)
45031 + goto out_fail;
45032 +
45033 retval = copy_strings_kernel(1, &bprm->filename, bprm);
45034 if (retval < 0)
45035 - goto out;
45036 + goto out_fail;
45037
45038 bprm->exec = bprm->p;
45039 retval = copy_strings(bprm->envc, envp, bprm);
45040 if (retval < 0)
45041 - goto out;
45042 + goto out_fail;
45043
45044 retval = copy_strings(bprm->argc, argv, bprm);
45045 if (retval < 0)
45046 - goto out;
45047 + goto out_fail;
45048 +
45049 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
45050 +
45051 + gr_handle_exec_args(bprm, argv);
45052
45053 retval = search_binary_handler(bprm,regs);
45054 if (retval < 0)
45055 - goto out;
45056 + goto out_fail;
45057 +#ifdef CONFIG_GRKERNSEC
45058 + if (old_exec_file)
45059 + fput(old_exec_file);
45060 +#endif
45061
45062 /* execve succeeded */
45063 +
45064 + increment_exec_counter();
45065 current->fs->in_exec = 0;
45066 current->in_execve = 0;
45067 acct_update_integrals(current);
45068 @@ -1563,6 +1687,14 @@ static int do_execve_common(const char *filename,
45069 put_files_struct(displaced);
45070 return retval;
45071
45072 +out_fail:
45073 +#ifdef CONFIG_GRKERNSEC
45074 + current->acl = old_acl;
45075 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
45076 + fput(current->exec_file);
45077 + current->exec_file = old_exec_file;
45078 +#endif
45079 +
45080 out:
45081 if (bprm->mm) {
45082 acct_arg_size(bprm, 0);
45083 @@ -1636,7 +1768,7 @@ static int expand_corename(struct core_name *cn)
45084 {
45085 char *old_corename = cn->corename;
45086
45087 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
45088 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
45089 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
45090
45091 if (!cn->corename) {
45092 @@ -1733,7 +1865,7 @@ static int format_corename(struct core_name *cn, long signr)
45093 int pid_in_pattern = 0;
45094 int err = 0;
45095
45096 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
45097 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
45098 cn->corename = kmalloc(cn->size, GFP_KERNEL);
45099 cn->used = 0;
45100
45101 @@ -1830,6 +1962,250 @@ out:
45102 return ispipe;
45103 }
45104
45105 +int pax_check_flags(unsigned long *flags)
45106 +{
45107 + int retval = 0;
45108 +
45109 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
45110 + if (*flags & MF_PAX_SEGMEXEC)
45111 + {
45112 + *flags &= ~MF_PAX_SEGMEXEC;
45113 + retval = -EINVAL;
45114 + }
45115 +#endif
45116 +
45117 + if ((*flags & MF_PAX_PAGEEXEC)
45118 +
45119 +#ifdef CONFIG_PAX_PAGEEXEC
45120 + && (*flags & MF_PAX_SEGMEXEC)
45121 +#endif
45122 +
45123 + )
45124 + {
45125 + *flags &= ~MF_PAX_PAGEEXEC;
45126 + retval = -EINVAL;
45127 + }
45128 +
45129 + if ((*flags & MF_PAX_MPROTECT)
45130 +
45131 +#ifdef CONFIG_PAX_MPROTECT
45132 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
45133 +#endif
45134 +
45135 + )
45136 + {
45137 + *flags &= ~MF_PAX_MPROTECT;
45138 + retval = -EINVAL;
45139 + }
45140 +
45141 + if ((*flags & MF_PAX_EMUTRAMP)
45142 +
45143 +#ifdef CONFIG_PAX_EMUTRAMP
45144 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
45145 +#endif
45146 +
45147 + )
45148 + {
45149 + *flags &= ~MF_PAX_EMUTRAMP;
45150 + retval = -EINVAL;
45151 + }
45152 +
45153 + return retval;
45154 +}
45155 +
45156 +EXPORT_SYMBOL(pax_check_flags);
45157 +
45158 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
45159 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
45160 +{
45161 + struct task_struct *tsk = current;
45162 + struct mm_struct *mm = current->mm;
45163 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
45164 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
45165 + char *path_exec = NULL;
45166 + char *path_fault = NULL;
45167 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
45168 +
45169 + if (buffer_exec && buffer_fault) {
45170 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
45171 +
45172 + down_read(&mm->mmap_sem);
45173 + vma = mm->mmap;
45174 + while (vma && (!vma_exec || !vma_fault)) {
45175 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
45176 + vma_exec = vma;
45177 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
45178 + vma_fault = vma;
45179 + vma = vma->vm_next;
45180 + }
45181 + if (vma_exec) {
45182 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
45183 + if (IS_ERR(path_exec))
45184 + path_exec = "<path too long>";
45185 + else {
45186 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
45187 + if (path_exec) {
45188 + *path_exec = 0;
45189 + path_exec = buffer_exec;
45190 + } else
45191 + path_exec = "<path too long>";
45192 + }
45193 + }
45194 + if (vma_fault) {
45195 + start = vma_fault->vm_start;
45196 + end = vma_fault->vm_end;
45197 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
45198 + if (vma_fault->vm_file) {
45199 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
45200 + if (IS_ERR(path_fault))
45201 + path_fault = "<path too long>";
45202 + else {
45203 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
45204 + if (path_fault) {
45205 + *path_fault = 0;
45206 + path_fault = buffer_fault;
45207 + } else
45208 + path_fault = "<path too long>";
45209 + }
45210 + } else
45211 + path_fault = "<anonymous mapping>";
45212 + }
45213 + up_read(&mm->mmap_sem);
45214 + }
45215 + if (tsk->signal->curr_ip)
45216 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
45217 + else
45218 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
45219 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
45220 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
45221 + task_uid(tsk), task_euid(tsk), pc, sp);
45222 + free_page((unsigned long)buffer_exec);
45223 + free_page((unsigned long)buffer_fault);
45224 + pax_report_insns(regs, pc, sp);
45225 + do_coredump(SIGKILL, SIGKILL, regs);
45226 +}
45227 +#endif
45228 +
45229 +#ifdef CONFIG_PAX_REFCOUNT
45230 +void pax_report_refcount_overflow(struct pt_regs *regs)
45231 +{
45232 + if (current->signal->curr_ip)
45233 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45234 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
45235 + else
45236 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45237 + current->comm, task_pid_nr(current), current_uid(), current_euid());
45238 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
45239 + show_regs(regs);
45240 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
45241 +}
45242 +#endif
45243 +
45244 +#ifdef CONFIG_PAX_USERCOPY
45245 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
45246 +static noinline int check_stack_object(const void *obj, unsigned long len)
45247 +{
45248 + const void * const stack = task_stack_page(current);
45249 + const void * const stackend = stack + THREAD_SIZE;
45250 +
45251 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
45252 + const void *frame = NULL;
45253 + const void *oldframe;
45254 +#endif
45255 +
45256 + if (obj + len < obj)
45257 + return -1;
45258 +
45259 + if (obj + len <= stack || stackend <= obj)
45260 + return 0;
45261 +
45262 + if (obj < stack || stackend < obj + len)
45263 + return -1;
45264 +
45265 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
45266 + oldframe = __builtin_frame_address(1);
45267 + if (oldframe)
45268 + frame = __builtin_frame_address(2);
45269 + /*
45270 + low ----------------------------------------------> high
45271 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
45272 + ^----------------^
45273 + allow copies only within here
45274 + */
45275 + while (stack <= frame && frame < stackend) {
45276 + /* if obj + len extends past the last frame, this
45277 + check won't pass and the next frame will be 0,
45278 + causing us to bail out and correctly report
45279 + the copy as invalid
45280 + */
45281 + if (obj + len <= frame)
45282 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
45283 + oldframe = frame;
45284 + frame = *(const void * const *)frame;
45285 + }
45286 + return -1;
45287 +#else
45288 + return 1;
45289 +#endif
45290 +}
45291 +
45292 +static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
45293 +{
45294 + if (current->signal->curr_ip)
45295 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45296 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45297 + else
45298 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45299 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45300 + dump_stack();
45301 + gr_handle_kernel_exploit();
45302 + do_group_exit(SIGKILL);
45303 +}
45304 +#endif
45305 +
45306 +void check_object_size(const void *ptr, unsigned long n, bool to)
45307 +{
45308 +
45309 +#ifdef CONFIG_PAX_USERCOPY
45310 + const char *type;
45311 +
45312 + if (!n)
45313 + return;
45314 +
45315 + type = check_heap_object(ptr, n, to);
45316 + if (!type) {
45317 + if (check_stack_object(ptr, n) != -1)
45318 + return;
45319 + type = "<process stack>";
45320 + }
45321 +
45322 + pax_report_usercopy(ptr, n, to, type);
45323 +#endif
45324 +
45325 +}
45326 +EXPORT_SYMBOL(check_object_size);
45327 +
45328 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
45329 +void pax_track_stack(void)
45330 +{
45331 + unsigned long sp = (unsigned long)&sp;
45332 + if (sp < current_thread_info()->lowest_stack &&
45333 + sp > (unsigned long)task_stack_page(current))
45334 + current_thread_info()->lowest_stack = sp;
45335 +}
45336 +EXPORT_SYMBOL(pax_track_stack);
45337 +#endif
45338 +
45339 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
45340 +void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
45341 +{
45342 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
45343 + dump_stack();
45344 + do_group_exit(SIGKILL);
45345 +}
45346 +EXPORT_SYMBOL(report_size_overflow);
45347 +#endif
45348 +
45349 static int zap_process(struct task_struct *start, int exit_code)
45350 {
45351 struct task_struct *t;
45352 @@ -2002,17 +2378,17 @@ static void coredump_finish(struct mm_struct *mm)
45353 void set_dumpable(struct mm_struct *mm, int value)
45354 {
45355 switch (value) {
45356 - case 0:
45357 + case SUID_DUMPABLE_DISABLED:
45358 clear_bit(MMF_DUMPABLE, &mm->flags);
45359 smp_wmb();
45360 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
45361 break;
45362 - case 1:
45363 + case SUID_DUMPABLE_ENABLED:
45364 set_bit(MMF_DUMPABLE, &mm->flags);
45365 smp_wmb();
45366 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
45367 break;
45368 - case 2:
45369 + case SUID_DUMPABLE_SAFE:
45370 set_bit(MMF_DUMP_SECURELY, &mm->flags);
45371 smp_wmb();
45372 set_bit(MMF_DUMPABLE, &mm->flags);
45373 @@ -2025,7 +2401,7 @@ static int __get_dumpable(unsigned long mm_flags)
45374 int ret;
45375
45376 ret = mm_flags & MMF_DUMPABLE_MASK;
45377 - return (ret >= 2) ? 2 : ret;
45378 + return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
45379 }
45380
45381 int get_dumpable(struct mm_struct *mm)
45382 @@ -2040,17 +2416,17 @@ static void wait_for_dump_helpers(struct file *file)
45383 pipe = file->f_path.dentry->d_inode->i_pipe;
45384
45385 pipe_lock(pipe);
45386 - pipe->readers++;
45387 - pipe->writers--;
45388 + atomic_inc(&pipe->readers);
45389 + atomic_dec(&pipe->writers);
45390
45391 - while ((pipe->readers > 1) && (!signal_pending(current))) {
45392 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
45393 wake_up_interruptible_sync(&pipe->wait);
45394 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45395 pipe_wait(pipe);
45396 }
45397
45398 - pipe->readers--;
45399 - pipe->writers++;
45400 + atomic_dec(&pipe->readers);
45401 + atomic_inc(&pipe->writers);
45402 pipe_unlock(pipe);
45403
45404 }
45405 @@ -2111,7 +2487,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45406 int retval = 0;
45407 int flag = 0;
45408 int ispipe;
45409 - static atomic_t core_dump_count = ATOMIC_INIT(0);
45410 + bool need_nonrelative = false;
45411 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
45412 struct coredump_params cprm = {
45413 .signr = signr,
45414 .regs = regs,
45415 @@ -2126,6 +2503,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45416
45417 audit_core_dumps(signr);
45418
45419 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
45420 + gr_handle_brute_attach(current, cprm.mm_flags);
45421 +
45422 binfmt = mm->binfmt;
45423 if (!binfmt || !binfmt->core_dump)
45424 goto fail;
45425 @@ -2136,14 +2516,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45426 if (!cred)
45427 goto fail;
45428 /*
45429 - * We cannot trust fsuid as being the "true" uid of the
45430 - * process nor do we know its entire history. We only know it
45431 - * was tainted so we dump it as root in mode 2.
45432 + * We cannot trust fsuid as being the "true" uid of the process
45433 + * nor do we know its entire history. We only know it was tainted
45434 + * so we dump it as root in mode 2, and only into a controlled
45435 + * environment (pipe handler or fully qualified path).
45436 */
45437 - if (__get_dumpable(cprm.mm_flags) == 2) {
45438 + if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
45439 /* Setuid core dump mode */
45440 flag = O_EXCL; /* Stop rewrite attacks */
45441 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
45442 + need_nonrelative = true;
45443 }
45444
45445 retval = coredump_wait(exit_code, &core_state);
45446 @@ -2193,7 +2575,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45447 }
45448 cprm.limit = RLIM_INFINITY;
45449
45450 - dump_count = atomic_inc_return(&core_dump_count);
45451 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
45452 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
45453 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
45454 task_tgid_vnr(current), current->comm);
45455 @@ -2220,9 +2602,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45456 } else {
45457 struct inode *inode;
45458
45459 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
45460 +
45461 if (cprm.limit < binfmt->min_coredump)
45462 goto fail_unlock;
45463
45464 + if (need_nonrelative && cn.corename[0] != '/') {
45465 + printk(KERN_WARNING "Pid %d(%s) can only dump core "\
45466 + "to fully qualified path!\n",
45467 + task_tgid_vnr(current), current->comm);
45468 + printk(KERN_WARNING "Skipping core dump\n");
45469 + goto fail_unlock;
45470 + }
45471 +
45472 cprm.file = filp_open(cn.corename,
45473 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
45474 0600);
45475 @@ -2263,7 +2655,7 @@ close_fail:
45476 filp_close(cprm.file, NULL);
45477 fail_dropcount:
45478 if (ispipe)
45479 - atomic_dec(&core_dump_count);
45480 + atomic_dec_unchecked(&core_dump_count);
45481 fail_unlock:
45482 kfree(cn.corename);
45483 fail_corename:
45484 @@ -2282,7 +2674,7 @@ fail:
45485 */
45486 int dump_write(struct file *file, const void *addr, int nr)
45487 {
45488 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
45489 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
45490 }
45491 EXPORT_SYMBOL(dump_write);
45492
45493 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
45494 index 1c36139..cf6b350 100644
45495 --- a/fs/ext2/balloc.c
45496 +++ b/fs/ext2/balloc.c
45497 @@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
45498
45499 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45500 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45501 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45502 + if (free_blocks < root_blocks + 1 &&
45503 !uid_eq(sbi->s_resuid, current_fsuid()) &&
45504 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
45505 - !in_group_p (sbi->s_resgid))) {
45506 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
45507 return 0;
45508 }
45509 return 1;
45510 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
45511 index 25cd608..9ed5294 100644
45512 --- a/fs/ext3/balloc.c
45513 +++ b/fs/ext3/balloc.c
45514 @@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
45515
45516 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45517 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45518 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45519 + if (free_blocks < root_blocks + 1 &&
45520 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
45521 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
45522 - !in_group_p (sbi->s_resgid))) {
45523 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
45524 return 0;
45525 }
45526 return 1;
45527 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
45528 index 1b50890..e56c5ad 100644
45529 --- a/fs/ext4/balloc.c
45530 +++ b/fs/ext4/balloc.c
45531 @@ -500,8 +500,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
45532 /* Hm, nope. Are (enough) root reserved clusters available? */
45533 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
45534 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
45535 - capable(CAP_SYS_RESOURCE) ||
45536 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
45537 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
45538 + capable_nolog(CAP_SYS_RESOURCE)) {
45539
45540 if (free_clusters >= (nclusters + dirty_clusters))
45541 return 1;
45542 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
45543 index 01434f2..bd995b4 100644
45544 --- a/fs/ext4/ext4.h
45545 +++ b/fs/ext4/ext4.h
45546 @@ -1246,19 +1246,19 @@ struct ext4_sb_info {
45547 unsigned long s_mb_last_start;
45548
45549 /* stats for buddy allocator */
45550 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
45551 - atomic_t s_bal_success; /* we found long enough chunks */
45552 - atomic_t s_bal_allocated; /* in blocks */
45553 - atomic_t s_bal_ex_scanned; /* total extents scanned */
45554 - atomic_t s_bal_goals; /* goal hits */
45555 - atomic_t s_bal_breaks; /* too long searches */
45556 - atomic_t s_bal_2orders; /* 2^order hits */
45557 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
45558 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
45559 + atomic_unchecked_t s_bal_allocated; /* in blocks */
45560 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
45561 + atomic_unchecked_t s_bal_goals; /* goal hits */
45562 + atomic_unchecked_t s_bal_breaks; /* too long searches */
45563 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
45564 spinlock_t s_bal_lock;
45565 unsigned long s_mb_buddies_generated;
45566 unsigned long long s_mb_generation_time;
45567 - atomic_t s_mb_lost_chunks;
45568 - atomic_t s_mb_preallocated;
45569 - atomic_t s_mb_discarded;
45570 + atomic_unchecked_t s_mb_lost_chunks;
45571 + atomic_unchecked_t s_mb_preallocated;
45572 + atomic_unchecked_t s_mb_discarded;
45573 atomic_t s_lock_busy;
45574
45575 /* locality groups */
45576 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
45577 index 1cd6994..5799d45 100644
45578 --- a/fs/ext4/mballoc.c
45579 +++ b/fs/ext4/mballoc.c
45580 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
45581 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
45582
45583 if (EXT4_SB(sb)->s_mb_stats)
45584 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
45585 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
45586
45587 break;
45588 }
45589 @@ -2041,7 +2041,7 @@ repeat:
45590 ac->ac_status = AC_STATUS_CONTINUE;
45591 ac->ac_flags |= EXT4_MB_HINT_FIRST;
45592 cr = 3;
45593 - atomic_inc(&sbi->s_mb_lost_chunks);
45594 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
45595 goto repeat;
45596 }
45597 }
45598 @@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
45599 if (sbi->s_mb_stats) {
45600 ext4_msg(sb, KERN_INFO,
45601 "mballoc: %u blocks %u reqs (%u success)",
45602 - atomic_read(&sbi->s_bal_allocated),
45603 - atomic_read(&sbi->s_bal_reqs),
45604 - atomic_read(&sbi->s_bal_success));
45605 + atomic_read_unchecked(&sbi->s_bal_allocated),
45606 + atomic_read_unchecked(&sbi->s_bal_reqs),
45607 + atomic_read_unchecked(&sbi->s_bal_success));
45608 ext4_msg(sb, KERN_INFO,
45609 "mballoc: %u extents scanned, %u goal hits, "
45610 "%u 2^N hits, %u breaks, %u lost",
45611 - atomic_read(&sbi->s_bal_ex_scanned),
45612 - atomic_read(&sbi->s_bal_goals),
45613 - atomic_read(&sbi->s_bal_2orders),
45614 - atomic_read(&sbi->s_bal_breaks),
45615 - atomic_read(&sbi->s_mb_lost_chunks));
45616 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
45617 + atomic_read_unchecked(&sbi->s_bal_goals),
45618 + atomic_read_unchecked(&sbi->s_bal_2orders),
45619 + atomic_read_unchecked(&sbi->s_bal_breaks),
45620 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
45621 ext4_msg(sb, KERN_INFO,
45622 "mballoc: %lu generated and it took %Lu",
45623 sbi->s_mb_buddies_generated,
45624 sbi->s_mb_generation_time);
45625 ext4_msg(sb, KERN_INFO,
45626 "mballoc: %u preallocated, %u discarded",
45627 - atomic_read(&sbi->s_mb_preallocated),
45628 - atomic_read(&sbi->s_mb_discarded));
45629 + atomic_read_unchecked(&sbi->s_mb_preallocated),
45630 + atomic_read_unchecked(&sbi->s_mb_discarded));
45631 }
45632
45633 free_percpu(sbi->s_locality_groups);
45634 @@ -3047,16 +3047,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
45635 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
45636
45637 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
45638 - atomic_inc(&sbi->s_bal_reqs);
45639 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45640 + atomic_inc_unchecked(&sbi->s_bal_reqs);
45641 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45642 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
45643 - atomic_inc(&sbi->s_bal_success);
45644 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
45645 + atomic_inc_unchecked(&sbi->s_bal_success);
45646 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
45647 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
45648 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
45649 - atomic_inc(&sbi->s_bal_goals);
45650 + atomic_inc_unchecked(&sbi->s_bal_goals);
45651 if (ac->ac_found > sbi->s_mb_max_to_scan)
45652 - atomic_inc(&sbi->s_bal_breaks);
45653 + atomic_inc_unchecked(&sbi->s_bal_breaks);
45654 }
45655
45656 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
45657 @@ -3456,7 +3456,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
45658 trace_ext4_mb_new_inode_pa(ac, pa);
45659
45660 ext4_mb_use_inode_pa(ac, pa);
45661 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
45662 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
45663
45664 ei = EXT4_I(ac->ac_inode);
45665 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45666 @@ -3516,7 +3516,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
45667 trace_ext4_mb_new_group_pa(ac, pa);
45668
45669 ext4_mb_use_group_pa(ac, pa);
45670 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45671 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45672
45673 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45674 lg = ac->ac_lg;
45675 @@ -3605,7 +3605,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
45676 * from the bitmap and continue.
45677 */
45678 }
45679 - atomic_add(free, &sbi->s_mb_discarded);
45680 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
45681
45682 return err;
45683 }
45684 @@ -3623,7 +3623,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
45685 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
45686 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
45687 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
45688 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45689 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45690 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
45691
45692 return 0;
45693 diff --git a/fs/fcntl.c b/fs/fcntl.c
45694 index 81b70e6..d9ae6cf 100644
45695 --- a/fs/fcntl.c
45696 +++ b/fs/fcntl.c
45697 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
45698 if (err)
45699 return err;
45700
45701 + if (gr_handle_chroot_fowner(pid, type))
45702 + return -ENOENT;
45703 + if (gr_check_protected_task_fowner(pid, type))
45704 + return -EACCES;
45705 +
45706 f_modown(filp, pid, type, force);
45707 return 0;
45708 }
45709 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
45710
45711 static int f_setown_ex(struct file *filp, unsigned long arg)
45712 {
45713 - struct f_owner_ex * __user owner_p = (void * __user)arg;
45714 + struct f_owner_ex __user *owner_p = (void __user *)arg;
45715 struct f_owner_ex owner;
45716 struct pid *pid;
45717 int type;
45718 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
45719
45720 static int f_getown_ex(struct file *filp, unsigned long arg)
45721 {
45722 - struct f_owner_ex * __user owner_p = (void * __user)arg;
45723 + struct f_owner_ex __user *owner_p = (void __user *)arg;
45724 struct f_owner_ex owner;
45725 int ret = 0;
45726
45727 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
45728 switch (cmd) {
45729 case F_DUPFD:
45730 case F_DUPFD_CLOEXEC:
45731 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
45732 if (arg >= rlimit(RLIMIT_NOFILE))
45733 break;
45734 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
45735 diff --git a/fs/fifo.c b/fs/fifo.c
45736 index cf6f434..3d7942c 100644
45737 --- a/fs/fifo.c
45738 +++ b/fs/fifo.c
45739 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
45740 */
45741 filp->f_op = &read_pipefifo_fops;
45742 pipe->r_counter++;
45743 - if (pipe->readers++ == 0)
45744 + if (atomic_inc_return(&pipe->readers) == 1)
45745 wake_up_partner(inode);
45746
45747 - if (!pipe->writers) {
45748 + if (!atomic_read(&pipe->writers)) {
45749 if ((filp->f_flags & O_NONBLOCK)) {
45750 /* suppress POLLHUP until we have
45751 * seen a writer */
45752 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
45753 * errno=ENXIO when there is no process reading the FIFO.
45754 */
45755 ret = -ENXIO;
45756 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
45757 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
45758 goto err;
45759
45760 filp->f_op = &write_pipefifo_fops;
45761 pipe->w_counter++;
45762 - if (!pipe->writers++)
45763 + if (atomic_inc_return(&pipe->writers) == 1)
45764 wake_up_partner(inode);
45765
45766 - if (!pipe->readers) {
45767 + if (!atomic_read(&pipe->readers)) {
45768 if (wait_for_partner(inode, &pipe->r_counter))
45769 goto err_wr;
45770 }
45771 @@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
45772 */
45773 filp->f_op = &rdwr_pipefifo_fops;
45774
45775 - pipe->readers++;
45776 - pipe->writers++;
45777 + atomic_inc(&pipe->readers);
45778 + atomic_inc(&pipe->writers);
45779 pipe->r_counter++;
45780 pipe->w_counter++;
45781 - if (pipe->readers == 1 || pipe->writers == 1)
45782 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
45783 wake_up_partner(inode);
45784 break;
45785
45786 @@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
45787 return 0;
45788
45789 err_rd:
45790 - if (!--pipe->readers)
45791 + if (atomic_dec_and_test(&pipe->readers))
45792 wake_up_interruptible(&pipe->wait);
45793 ret = -ERESTARTSYS;
45794 goto err;
45795
45796 err_wr:
45797 - if (!--pipe->writers)
45798 + if (atomic_dec_and_test(&pipe->writers))
45799 wake_up_interruptible(&pipe->wait);
45800 ret = -ERESTARTSYS;
45801 goto err;
45802
45803 err:
45804 - if (!pipe->readers && !pipe->writers)
45805 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45806 free_pipe_info(inode);
45807
45808 err_nocleanup:
45809 diff --git a/fs/file.c b/fs/file.c
45810 index ba3f605..fade102 100644
45811 --- a/fs/file.c
45812 +++ b/fs/file.c
45813 @@ -15,6 +15,7 @@
45814 #include <linux/slab.h>
45815 #include <linux/vmalloc.h>
45816 #include <linux/file.h>
45817 +#include <linux/security.h>
45818 #include <linux/fdtable.h>
45819 #include <linux/bitops.h>
45820 #include <linux/interrupt.h>
45821 @@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
45822 * N.B. For clone tasks sharing a files structure, this test
45823 * will limit the total number of files that can be opened.
45824 */
45825 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
45826 if (nr >= rlimit(RLIMIT_NOFILE))
45827 return -EMFILE;
45828
45829 diff --git a/fs/filesystems.c b/fs/filesystems.c
45830 index 96f2428..f5eeb8e 100644
45831 --- a/fs/filesystems.c
45832 +++ b/fs/filesystems.c
45833 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
45834 int len = dot ? dot - name : strlen(name);
45835
45836 fs = __get_fs_type(name, len);
45837 +
45838 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
45839 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45840 +#else
45841 if (!fs && (request_module("%.*s", len, name) == 0))
45842 +#endif
45843 fs = __get_fs_type(name, len);
45844
45845 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
45846 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
45847 index e159e68..e7d2a6f 100644
45848 --- a/fs/fs_struct.c
45849 +++ b/fs/fs_struct.c
45850 @@ -4,6 +4,7 @@
45851 #include <linux/path.h>
45852 #include <linux/slab.h>
45853 #include <linux/fs_struct.h>
45854 +#include <linux/grsecurity.h>
45855 #include "internal.h"
45856
45857 static inline void path_get_longterm(struct path *path)
45858 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45859 write_seqcount_begin(&fs->seq);
45860 old_root = fs->root;
45861 fs->root = *path;
45862 + gr_set_chroot_entries(current, path);
45863 write_seqcount_end(&fs->seq);
45864 spin_unlock(&fs->lock);
45865 if (old_root.dentry)
45866 @@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
45867 return 1;
45868 }
45869
45870 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
45871 +{
45872 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
45873 + return 0;
45874 + *p = *new;
45875 +
45876 + gr_set_chroot_entries(task, new);
45877 +
45878 + return 1;
45879 +}
45880 +
45881 void chroot_fs_refs(struct path *old_root, struct path *new_root)
45882 {
45883 struct task_struct *g, *p;
45884 @@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45885 int hits = 0;
45886 spin_lock(&fs->lock);
45887 write_seqcount_begin(&fs->seq);
45888 - hits += replace_path(&fs->root, old_root, new_root);
45889 + hits += replace_root_path(p, &fs->root, old_root, new_root);
45890 hits += replace_path(&fs->pwd, old_root, new_root);
45891 write_seqcount_end(&fs->seq);
45892 while (hits--) {
45893 @@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
45894 task_lock(tsk);
45895 spin_lock(&fs->lock);
45896 tsk->fs = NULL;
45897 - kill = !--fs->users;
45898 + gr_clear_chroot_entries(tsk);
45899 + kill = !atomic_dec_return(&fs->users);
45900 spin_unlock(&fs->lock);
45901 task_unlock(tsk);
45902 if (kill)
45903 @@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45904 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45905 /* We don't need to lock fs - think why ;-) */
45906 if (fs) {
45907 - fs->users = 1;
45908 + atomic_set(&fs->users, 1);
45909 fs->in_exec = 0;
45910 spin_lock_init(&fs->lock);
45911 seqcount_init(&fs->seq);
45912 @@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45913 spin_lock(&old->lock);
45914 fs->root = old->root;
45915 path_get_longterm(&fs->root);
45916 + /* instead of calling gr_set_chroot_entries here,
45917 + we call it from every caller of this function
45918 + */
45919 fs->pwd = old->pwd;
45920 path_get_longterm(&fs->pwd);
45921 spin_unlock(&old->lock);
45922 @@ -151,8 +168,9 @@ int unshare_fs_struct(void)
45923
45924 task_lock(current);
45925 spin_lock(&fs->lock);
45926 - kill = !--fs->users;
45927 + kill = !atomic_dec_return(&fs->users);
45928 current->fs = new_fs;
45929 + gr_set_chroot_entries(current, &new_fs->root);
45930 spin_unlock(&fs->lock);
45931 task_unlock(current);
45932
45933 @@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
45934
45935 int current_umask(void)
45936 {
45937 - return current->fs->umask;
45938 + return current->fs->umask | gr_acl_umask();
45939 }
45940 EXPORT_SYMBOL(current_umask);
45941
45942 /* to be mentioned only in INIT_TASK */
45943 struct fs_struct init_fs = {
45944 - .users = 1,
45945 + .users = ATOMIC_INIT(1),
45946 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45947 .seq = SEQCNT_ZERO,
45948 .umask = 0022,
45949 @@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
45950 task_lock(current);
45951
45952 spin_lock(&init_fs.lock);
45953 - init_fs.users++;
45954 + atomic_inc(&init_fs.users);
45955 spin_unlock(&init_fs.lock);
45956
45957 spin_lock(&fs->lock);
45958 current->fs = &init_fs;
45959 - kill = !--fs->users;
45960 + gr_set_chroot_entries(current, &current->fs->root);
45961 + kill = !atomic_dec_return(&fs->users);
45962 spin_unlock(&fs->lock);
45963
45964 task_unlock(current);
45965 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45966 index 9905350..02eaec4 100644
45967 --- a/fs/fscache/cookie.c
45968 +++ b/fs/fscache/cookie.c
45969 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
45970 parent ? (char *) parent->def->name : "<no-parent>",
45971 def->name, netfs_data);
45972
45973 - fscache_stat(&fscache_n_acquires);
45974 + fscache_stat_unchecked(&fscache_n_acquires);
45975
45976 /* if there's no parent cookie, then we don't create one here either */
45977 if (!parent) {
45978 - fscache_stat(&fscache_n_acquires_null);
45979 + fscache_stat_unchecked(&fscache_n_acquires_null);
45980 _leave(" [no parent]");
45981 return NULL;
45982 }
45983 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
45984 /* allocate and initialise a cookie */
45985 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45986 if (!cookie) {
45987 - fscache_stat(&fscache_n_acquires_oom);
45988 + fscache_stat_unchecked(&fscache_n_acquires_oom);
45989 _leave(" [ENOMEM]");
45990 return NULL;
45991 }
45992 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45993
45994 switch (cookie->def->type) {
45995 case FSCACHE_COOKIE_TYPE_INDEX:
45996 - fscache_stat(&fscache_n_cookie_index);
45997 + fscache_stat_unchecked(&fscache_n_cookie_index);
45998 break;
45999 case FSCACHE_COOKIE_TYPE_DATAFILE:
46000 - fscache_stat(&fscache_n_cookie_data);
46001 + fscache_stat_unchecked(&fscache_n_cookie_data);
46002 break;
46003 default:
46004 - fscache_stat(&fscache_n_cookie_special);
46005 + fscache_stat_unchecked(&fscache_n_cookie_special);
46006 break;
46007 }
46008
46009 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
46010 if (fscache_acquire_non_index_cookie(cookie) < 0) {
46011 atomic_dec(&parent->n_children);
46012 __fscache_cookie_put(cookie);
46013 - fscache_stat(&fscache_n_acquires_nobufs);
46014 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
46015 _leave(" = NULL");
46016 return NULL;
46017 }
46018 }
46019
46020 - fscache_stat(&fscache_n_acquires_ok);
46021 + fscache_stat_unchecked(&fscache_n_acquires_ok);
46022 _leave(" = %p", cookie);
46023 return cookie;
46024 }
46025 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
46026 cache = fscache_select_cache_for_object(cookie->parent);
46027 if (!cache) {
46028 up_read(&fscache_addremove_sem);
46029 - fscache_stat(&fscache_n_acquires_no_cache);
46030 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
46031 _leave(" = -ENOMEDIUM [no cache]");
46032 return -ENOMEDIUM;
46033 }
46034 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
46035 object = cache->ops->alloc_object(cache, cookie);
46036 fscache_stat_d(&fscache_n_cop_alloc_object);
46037 if (IS_ERR(object)) {
46038 - fscache_stat(&fscache_n_object_no_alloc);
46039 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
46040 ret = PTR_ERR(object);
46041 goto error;
46042 }
46043
46044 - fscache_stat(&fscache_n_object_alloc);
46045 + fscache_stat_unchecked(&fscache_n_object_alloc);
46046
46047 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
46048
46049 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
46050 struct fscache_object *object;
46051 struct hlist_node *_p;
46052
46053 - fscache_stat(&fscache_n_updates);
46054 + fscache_stat_unchecked(&fscache_n_updates);
46055
46056 if (!cookie) {
46057 - fscache_stat(&fscache_n_updates_null);
46058 + fscache_stat_unchecked(&fscache_n_updates_null);
46059 _leave(" [no cookie]");
46060 return;
46061 }
46062 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
46063 struct fscache_object *object;
46064 unsigned long event;
46065
46066 - fscache_stat(&fscache_n_relinquishes);
46067 + fscache_stat_unchecked(&fscache_n_relinquishes);
46068 if (retire)
46069 - fscache_stat(&fscache_n_relinquishes_retire);
46070 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
46071
46072 if (!cookie) {
46073 - fscache_stat(&fscache_n_relinquishes_null);
46074 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
46075 _leave(" [no cookie]");
46076 return;
46077 }
46078 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
46079
46080 /* wait for the cookie to finish being instantiated (or to fail) */
46081 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
46082 - fscache_stat(&fscache_n_relinquishes_waitcrt);
46083 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
46084 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
46085 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
46086 }
46087 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
46088 index f6aad48..88dcf26 100644
46089 --- a/fs/fscache/internal.h
46090 +++ b/fs/fscache/internal.h
46091 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
46092 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
46093 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
46094
46095 -extern atomic_t fscache_n_op_pend;
46096 -extern atomic_t fscache_n_op_run;
46097 -extern atomic_t fscache_n_op_enqueue;
46098 -extern atomic_t fscache_n_op_deferred_release;
46099 -extern atomic_t fscache_n_op_release;
46100 -extern atomic_t fscache_n_op_gc;
46101 -extern atomic_t fscache_n_op_cancelled;
46102 -extern atomic_t fscache_n_op_rejected;
46103 +extern atomic_unchecked_t fscache_n_op_pend;
46104 +extern atomic_unchecked_t fscache_n_op_run;
46105 +extern atomic_unchecked_t fscache_n_op_enqueue;
46106 +extern atomic_unchecked_t fscache_n_op_deferred_release;
46107 +extern atomic_unchecked_t fscache_n_op_release;
46108 +extern atomic_unchecked_t fscache_n_op_gc;
46109 +extern atomic_unchecked_t fscache_n_op_cancelled;
46110 +extern atomic_unchecked_t fscache_n_op_rejected;
46111
46112 -extern atomic_t fscache_n_attr_changed;
46113 -extern atomic_t fscache_n_attr_changed_ok;
46114 -extern atomic_t fscache_n_attr_changed_nobufs;
46115 -extern atomic_t fscache_n_attr_changed_nomem;
46116 -extern atomic_t fscache_n_attr_changed_calls;
46117 +extern atomic_unchecked_t fscache_n_attr_changed;
46118 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
46119 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
46120 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
46121 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
46122
46123 -extern atomic_t fscache_n_allocs;
46124 -extern atomic_t fscache_n_allocs_ok;
46125 -extern atomic_t fscache_n_allocs_wait;
46126 -extern atomic_t fscache_n_allocs_nobufs;
46127 -extern atomic_t fscache_n_allocs_intr;
46128 -extern atomic_t fscache_n_allocs_object_dead;
46129 -extern atomic_t fscache_n_alloc_ops;
46130 -extern atomic_t fscache_n_alloc_op_waits;
46131 +extern atomic_unchecked_t fscache_n_allocs;
46132 +extern atomic_unchecked_t fscache_n_allocs_ok;
46133 +extern atomic_unchecked_t fscache_n_allocs_wait;
46134 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
46135 +extern atomic_unchecked_t fscache_n_allocs_intr;
46136 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
46137 +extern atomic_unchecked_t fscache_n_alloc_ops;
46138 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
46139
46140 -extern atomic_t fscache_n_retrievals;
46141 -extern atomic_t fscache_n_retrievals_ok;
46142 -extern atomic_t fscache_n_retrievals_wait;
46143 -extern atomic_t fscache_n_retrievals_nodata;
46144 -extern atomic_t fscache_n_retrievals_nobufs;
46145 -extern atomic_t fscache_n_retrievals_intr;
46146 -extern atomic_t fscache_n_retrievals_nomem;
46147 -extern atomic_t fscache_n_retrievals_object_dead;
46148 -extern atomic_t fscache_n_retrieval_ops;
46149 -extern atomic_t fscache_n_retrieval_op_waits;
46150 +extern atomic_unchecked_t fscache_n_retrievals;
46151 +extern atomic_unchecked_t fscache_n_retrievals_ok;
46152 +extern atomic_unchecked_t fscache_n_retrievals_wait;
46153 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
46154 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
46155 +extern atomic_unchecked_t fscache_n_retrievals_intr;
46156 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
46157 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
46158 +extern atomic_unchecked_t fscache_n_retrieval_ops;
46159 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
46160
46161 -extern atomic_t fscache_n_stores;
46162 -extern atomic_t fscache_n_stores_ok;
46163 -extern atomic_t fscache_n_stores_again;
46164 -extern atomic_t fscache_n_stores_nobufs;
46165 -extern atomic_t fscache_n_stores_oom;
46166 -extern atomic_t fscache_n_store_ops;
46167 -extern atomic_t fscache_n_store_calls;
46168 -extern atomic_t fscache_n_store_pages;
46169 -extern atomic_t fscache_n_store_radix_deletes;
46170 -extern atomic_t fscache_n_store_pages_over_limit;
46171 +extern atomic_unchecked_t fscache_n_stores;
46172 +extern atomic_unchecked_t fscache_n_stores_ok;
46173 +extern atomic_unchecked_t fscache_n_stores_again;
46174 +extern atomic_unchecked_t fscache_n_stores_nobufs;
46175 +extern atomic_unchecked_t fscache_n_stores_oom;
46176 +extern atomic_unchecked_t fscache_n_store_ops;
46177 +extern atomic_unchecked_t fscache_n_store_calls;
46178 +extern atomic_unchecked_t fscache_n_store_pages;
46179 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
46180 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
46181
46182 -extern atomic_t fscache_n_store_vmscan_not_storing;
46183 -extern atomic_t fscache_n_store_vmscan_gone;
46184 -extern atomic_t fscache_n_store_vmscan_busy;
46185 -extern atomic_t fscache_n_store_vmscan_cancelled;
46186 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46187 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
46188 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
46189 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46190
46191 -extern atomic_t fscache_n_marks;
46192 -extern atomic_t fscache_n_uncaches;
46193 +extern atomic_unchecked_t fscache_n_marks;
46194 +extern atomic_unchecked_t fscache_n_uncaches;
46195
46196 -extern atomic_t fscache_n_acquires;
46197 -extern atomic_t fscache_n_acquires_null;
46198 -extern atomic_t fscache_n_acquires_no_cache;
46199 -extern atomic_t fscache_n_acquires_ok;
46200 -extern atomic_t fscache_n_acquires_nobufs;
46201 -extern atomic_t fscache_n_acquires_oom;
46202 +extern atomic_unchecked_t fscache_n_acquires;
46203 +extern atomic_unchecked_t fscache_n_acquires_null;
46204 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
46205 +extern atomic_unchecked_t fscache_n_acquires_ok;
46206 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
46207 +extern atomic_unchecked_t fscache_n_acquires_oom;
46208
46209 -extern atomic_t fscache_n_updates;
46210 -extern atomic_t fscache_n_updates_null;
46211 -extern atomic_t fscache_n_updates_run;
46212 +extern atomic_unchecked_t fscache_n_updates;
46213 +extern atomic_unchecked_t fscache_n_updates_null;
46214 +extern atomic_unchecked_t fscache_n_updates_run;
46215
46216 -extern atomic_t fscache_n_relinquishes;
46217 -extern atomic_t fscache_n_relinquishes_null;
46218 -extern atomic_t fscache_n_relinquishes_waitcrt;
46219 -extern atomic_t fscache_n_relinquishes_retire;
46220 +extern atomic_unchecked_t fscache_n_relinquishes;
46221 +extern atomic_unchecked_t fscache_n_relinquishes_null;
46222 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46223 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
46224
46225 -extern atomic_t fscache_n_cookie_index;
46226 -extern atomic_t fscache_n_cookie_data;
46227 -extern atomic_t fscache_n_cookie_special;
46228 +extern atomic_unchecked_t fscache_n_cookie_index;
46229 +extern atomic_unchecked_t fscache_n_cookie_data;
46230 +extern atomic_unchecked_t fscache_n_cookie_special;
46231
46232 -extern atomic_t fscache_n_object_alloc;
46233 -extern atomic_t fscache_n_object_no_alloc;
46234 -extern atomic_t fscache_n_object_lookups;
46235 -extern atomic_t fscache_n_object_lookups_negative;
46236 -extern atomic_t fscache_n_object_lookups_positive;
46237 -extern atomic_t fscache_n_object_lookups_timed_out;
46238 -extern atomic_t fscache_n_object_created;
46239 -extern atomic_t fscache_n_object_avail;
46240 -extern atomic_t fscache_n_object_dead;
46241 +extern atomic_unchecked_t fscache_n_object_alloc;
46242 +extern atomic_unchecked_t fscache_n_object_no_alloc;
46243 +extern atomic_unchecked_t fscache_n_object_lookups;
46244 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
46245 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
46246 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
46247 +extern atomic_unchecked_t fscache_n_object_created;
46248 +extern atomic_unchecked_t fscache_n_object_avail;
46249 +extern atomic_unchecked_t fscache_n_object_dead;
46250
46251 -extern atomic_t fscache_n_checkaux_none;
46252 -extern atomic_t fscache_n_checkaux_okay;
46253 -extern atomic_t fscache_n_checkaux_update;
46254 -extern atomic_t fscache_n_checkaux_obsolete;
46255 +extern atomic_unchecked_t fscache_n_checkaux_none;
46256 +extern atomic_unchecked_t fscache_n_checkaux_okay;
46257 +extern atomic_unchecked_t fscache_n_checkaux_update;
46258 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
46259
46260 extern atomic_t fscache_n_cop_alloc_object;
46261 extern atomic_t fscache_n_cop_lookup_object;
46262 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
46263 atomic_inc(stat);
46264 }
46265
46266 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
46267 +{
46268 + atomic_inc_unchecked(stat);
46269 +}
46270 +
46271 static inline void fscache_stat_d(atomic_t *stat)
46272 {
46273 atomic_dec(stat);
46274 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
46275
46276 #define __fscache_stat(stat) (NULL)
46277 #define fscache_stat(stat) do {} while (0)
46278 +#define fscache_stat_unchecked(stat) do {} while (0)
46279 #define fscache_stat_d(stat) do {} while (0)
46280 #endif
46281
46282 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
46283 index b6b897c..0ffff9c 100644
46284 --- a/fs/fscache/object.c
46285 +++ b/fs/fscache/object.c
46286 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46287 /* update the object metadata on disk */
46288 case FSCACHE_OBJECT_UPDATING:
46289 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
46290 - fscache_stat(&fscache_n_updates_run);
46291 + fscache_stat_unchecked(&fscache_n_updates_run);
46292 fscache_stat(&fscache_n_cop_update_object);
46293 object->cache->ops->update_object(object);
46294 fscache_stat_d(&fscache_n_cop_update_object);
46295 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46296 spin_lock(&object->lock);
46297 object->state = FSCACHE_OBJECT_DEAD;
46298 spin_unlock(&object->lock);
46299 - fscache_stat(&fscache_n_object_dead);
46300 + fscache_stat_unchecked(&fscache_n_object_dead);
46301 goto terminal_transit;
46302
46303 /* handle the parent cache of this object being withdrawn from
46304 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46305 spin_lock(&object->lock);
46306 object->state = FSCACHE_OBJECT_DEAD;
46307 spin_unlock(&object->lock);
46308 - fscache_stat(&fscache_n_object_dead);
46309 + fscache_stat_unchecked(&fscache_n_object_dead);
46310 goto terminal_transit;
46311
46312 /* complain about the object being woken up once it is
46313 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
46314 parent->cookie->def->name, cookie->def->name,
46315 object->cache->tag->name);
46316
46317 - fscache_stat(&fscache_n_object_lookups);
46318 + fscache_stat_unchecked(&fscache_n_object_lookups);
46319 fscache_stat(&fscache_n_cop_lookup_object);
46320 ret = object->cache->ops->lookup_object(object);
46321 fscache_stat_d(&fscache_n_cop_lookup_object);
46322 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
46323 if (ret == -ETIMEDOUT) {
46324 /* probably stuck behind another object, so move this one to
46325 * the back of the queue */
46326 - fscache_stat(&fscache_n_object_lookups_timed_out);
46327 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
46328 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46329 }
46330
46331 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
46332
46333 spin_lock(&object->lock);
46334 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46335 - fscache_stat(&fscache_n_object_lookups_negative);
46336 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
46337
46338 /* transit here to allow write requests to begin stacking up
46339 * and read requests to begin returning ENODATA */
46340 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
46341 * result, in which case there may be data available */
46342 spin_lock(&object->lock);
46343 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46344 - fscache_stat(&fscache_n_object_lookups_positive);
46345 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
46346
46347 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
46348
46349 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
46350 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46351 } else {
46352 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
46353 - fscache_stat(&fscache_n_object_created);
46354 + fscache_stat_unchecked(&fscache_n_object_created);
46355
46356 object->state = FSCACHE_OBJECT_AVAILABLE;
46357 spin_unlock(&object->lock);
46358 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
46359 fscache_enqueue_dependents(object);
46360
46361 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
46362 - fscache_stat(&fscache_n_object_avail);
46363 + fscache_stat_unchecked(&fscache_n_object_avail);
46364
46365 _leave("");
46366 }
46367 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
46368 enum fscache_checkaux result;
46369
46370 if (!object->cookie->def->check_aux) {
46371 - fscache_stat(&fscache_n_checkaux_none);
46372 + fscache_stat_unchecked(&fscache_n_checkaux_none);
46373 return FSCACHE_CHECKAUX_OKAY;
46374 }
46375
46376 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
46377 switch (result) {
46378 /* entry okay as is */
46379 case FSCACHE_CHECKAUX_OKAY:
46380 - fscache_stat(&fscache_n_checkaux_okay);
46381 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
46382 break;
46383
46384 /* entry requires update */
46385 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
46386 - fscache_stat(&fscache_n_checkaux_update);
46387 + fscache_stat_unchecked(&fscache_n_checkaux_update);
46388 break;
46389
46390 /* entry requires deletion */
46391 case FSCACHE_CHECKAUX_OBSOLETE:
46392 - fscache_stat(&fscache_n_checkaux_obsolete);
46393 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
46394 break;
46395
46396 default:
46397 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
46398 index 30afdfa..2256596 100644
46399 --- a/fs/fscache/operation.c
46400 +++ b/fs/fscache/operation.c
46401 @@ -17,7 +17,7 @@
46402 #include <linux/slab.h>
46403 #include "internal.h"
46404
46405 -atomic_t fscache_op_debug_id;
46406 +atomic_unchecked_t fscache_op_debug_id;
46407 EXPORT_SYMBOL(fscache_op_debug_id);
46408
46409 /**
46410 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
46411 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
46412 ASSERTCMP(atomic_read(&op->usage), >, 0);
46413
46414 - fscache_stat(&fscache_n_op_enqueue);
46415 + fscache_stat_unchecked(&fscache_n_op_enqueue);
46416 switch (op->flags & FSCACHE_OP_TYPE) {
46417 case FSCACHE_OP_ASYNC:
46418 _debug("queue async");
46419 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
46420 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
46421 if (op->processor)
46422 fscache_enqueue_operation(op);
46423 - fscache_stat(&fscache_n_op_run);
46424 + fscache_stat_unchecked(&fscache_n_op_run);
46425 }
46426
46427 /*
46428 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46429 if (object->n_ops > 1) {
46430 atomic_inc(&op->usage);
46431 list_add_tail(&op->pend_link, &object->pending_ops);
46432 - fscache_stat(&fscache_n_op_pend);
46433 + fscache_stat_unchecked(&fscache_n_op_pend);
46434 } else if (!list_empty(&object->pending_ops)) {
46435 atomic_inc(&op->usage);
46436 list_add_tail(&op->pend_link, &object->pending_ops);
46437 - fscache_stat(&fscache_n_op_pend);
46438 + fscache_stat_unchecked(&fscache_n_op_pend);
46439 fscache_start_operations(object);
46440 } else {
46441 ASSERTCMP(object->n_in_progress, ==, 0);
46442 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46443 object->n_exclusive++; /* reads and writes must wait */
46444 atomic_inc(&op->usage);
46445 list_add_tail(&op->pend_link, &object->pending_ops);
46446 - fscache_stat(&fscache_n_op_pend);
46447 + fscache_stat_unchecked(&fscache_n_op_pend);
46448 ret = 0;
46449 } else {
46450 /* not allowed to submit ops in any other state */
46451 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
46452 if (object->n_exclusive > 0) {
46453 atomic_inc(&op->usage);
46454 list_add_tail(&op->pend_link, &object->pending_ops);
46455 - fscache_stat(&fscache_n_op_pend);
46456 + fscache_stat_unchecked(&fscache_n_op_pend);
46457 } else if (!list_empty(&object->pending_ops)) {
46458 atomic_inc(&op->usage);
46459 list_add_tail(&op->pend_link, &object->pending_ops);
46460 - fscache_stat(&fscache_n_op_pend);
46461 + fscache_stat_unchecked(&fscache_n_op_pend);
46462 fscache_start_operations(object);
46463 } else {
46464 ASSERTCMP(object->n_exclusive, ==, 0);
46465 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
46466 object->n_ops++;
46467 atomic_inc(&op->usage);
46468 list_add_tail(&op->pend_link, &object->pending_ops);
46469 - fscache_stat(&fscache_n_op_pend);
46470 + fscache_stat_unchecked(&fscache_n_op_pend);
46471 ret = 0;
46472 } else if (object->state == FSCACHE_OBJECT_DYING ||
46473 object->state == FSCACHE_OBJECT_LC_DYING ||
46474 object->state == FSCACHE_OBJECT_WITHDRAWING) {
46475 - fscache_stat(&fscache_n_op_rejected);
46476 + fscache_stat_unchecked(&fscache_n_op_rejected);
46477 ret = -ENOBUFS;
46478 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
46479 fscache_report_unexpected_submission(object, op, ostate);
46480 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
46481
46482 ret = -EBUSY;
46483 if (!list_empty(&op->pend_link)) {
46484 - fscache_stat(&fscache_n_op_cancelled);
46485 + fscache_stat_unchecked(&fscache_n_op_cancelled);
46486 list_del_init(&op->pend_link);
46487 object->n_ops--;
46488 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
46489 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
46490 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
46491 BUG();
46492
46493 - fscache_stat(&fscache_n_op_release);
46494 + fscache_stat_unchecked(&fscache_n_op_release);
46495
46496 if (op->release) {
46497 op->release(op);
46498 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
46499 * lock, and defer it otherwise */
46500 if (!spin_trylock(&object->lock)) {
46501 _debug("defer put");
46502 - fscache_stat(&fscache_n_op_deferred_release);
46503 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
46504
46505 cache = object->cache;
46506 spin_lock(&cache->op_gc_list_lock);
46507 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
46508
46509 _debug("GC DEFERRED REL OBJ%x OP%x",
46510 object->debug_id, op->debug_id);
46511 - fscache_stat(&fscache_n_op_gc);
46512 + fscache_stat_unchecked(&fscache_n_op_gc);
46513
46514 ASSERTCMP(atomic_read(&op->usage), ==, 0);
46515
46516 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
46517 index 3f7a59b..cf196cc 100644
46518 --- a/fs/fscache/page.c
46519 +++ b/fs/fscache/page.c
46520 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46521 val = radix_tree_lookup(&cookie->stores, page->index);
46522 if (!val) {
46523 rcu_read_unlock();
46524 - fscache_stat(&fscache_n_store_vmscan_not_storing);
46525 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
46526 __fscache_uncache_page(cookie, page);
46527 return true;
46528 }
46529 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46530 spin_unlock(&cookie->stores_lock);
46531
46532 if (xpage) {
46533 - fscache_stat(&fscache_n_store_vmscan_cancelled);
46534 - fscache_stat(&fscache_n_store_radix_deletes);
46535 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
46536 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46537 ASSERTCMP(xpage, ==, page);
46538 } else {
46539 - fscache_stat(&fscache_n_store_vmscan_gone);
46540 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
46541 }
46542
46543 wake_up_bit(&cookie->flags, 0);
46544 @@ -107,7 +107,7 @@ page_busy:
46545 /* we might want to wait here, but that could deadlock the allocator as
46546 * the work threads writing to the cache may all end up sleeping
46547 * on memory allocation */
46548 - fscache_stat(&fscache_n_store_vmscan_busy);
46549 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
46550 return false;
46551 }
46552 EXPORT_SYMBOL(__fscache_maybe_release_page);
46553 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
46554 FSCACHE_COOKIE_STORING_TAG);
46555 if (!radix_tree_tag_get(&cookie->stores, page->index,
46556 FSCACHE_COOKIE_PENDING_TAG)) {
46557 - fscache_stat(&fscache_n_store_radix_deletes);
46558 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46559 xpage = radix_tree_delete(&cookie->stores, page->index);
46560 }
46561 spin_unlock(&cookie->stores_lock);
46562 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
46563
46564 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
46565
46566 - fscache_stat(&fscache_n_attr_changed_calls);
46567 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
46568
46569 if (fscache_object_is_active(object)) {
46570 fscache_stat(&fscache_n_cop_attr_changed);
46571 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46572
46573 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46574
46575 - fscache_stat(&fscache_n_attr_changed);
46576 + fscache_stat_unchecked(&fscache_n_attr_changed);
46577
46578 op = kzalloc(sizeof(*op), GFP_KERNEL);
46579 if (!op) {
46580 - fscache_stat(&fscache_n_attr_changed_nomem);
46581 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
46582 _leave(" = -ENOMEM");
46583 return -ENOMEM;
46584 }
46585 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46586 if (fscache_submit_exclusive_op(object, op) < 0)
46587 goto nobufs;
46588 spin_unlock(&cookie->lock);
46589 - fscache_stat(&fscache_n_attr_changed_ok);
46590 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
46591 fscache_put_operation(op);
46592 _leave(" = 0");
46593 return 0;
46594 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46595 nobufs:
46596 spin_unlock(&cookie->lock);
46597 kfree(op);
46598 - fscache_stat(&fscache_n_attr_changed_nobufs);
46599 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
46600 _leave(" = %d", -ENOBUFS);
46601 return -ENOBUFS;
46602 }
46603 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
46604 /* allocate a retrieval operation and attempt to submit it */
46605 op = kzalloc(sizeof(*op), GFP_NOIO);
46606 if (!op) {
46607 - fscache_stat(&fscache_n_retrievals_nomem);
46608 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46609 return NULL;
46610 }
46611
46612 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46613 return 0;
46614 }
46615
46616 - fscache_stat(&fscache_n_retrievals_wait);
46617 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
46618
46619 jif = jiffies;
46620 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
46621 fscache_wait_bit_interruptible,
46622 TASK_INTERRUPTIBLE) != 0) {
46623 - fscache_stat(&fscache_n_retrievals_intr);
46624 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46625 _leave(" = -ERESTARTSYS");
46626 return -ERESTARTSYS;
46627 }
46628 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46629 */
46630 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46631 struct fscache_retrieval *op,
46632 - atomic_t *stat_op_waits,
46633 - atomic_t *stat_object_dead)
46634 + atomic_unchecked_t *stat_op_waits,
46635 + atomic_unchecked_t *stat_object_dead)
46636 {
46637 int ret;
46638
46639 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46640 goto check_if_dead;
46641
46642 _debug(">>> WT");
46643 - fscache_stat(stat_op_waits);
46644 + fscache_stat_unchecked(stat_op_waits);
46645 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
46646 fscache_wait_bit_interruptible,
46647 TASK_INTERRUPTIBLE) < 0) {
46648 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46649
46650 check_if_dead:
46651 if (unlikely(fscache_object_is_dead(object))) {
46652 - fscache_stat(stat_object_dead);
46653 + fscache_stat_unchecked(stat_object_dead);
46654 return -ENOBUFS;
46655 }
46656 return 0;
46657 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46658
46659 _enter("%p,%p,,,", cookie, page);
46660
46661 - fscache_stat(&fscache_n_retrievals);
46662 + fscache_stat_unchecked(&fscache_n_retrievals);
46663
46664 if (hlist_empty(&cookie->backing_objects))
46665 goto nobufs;
46666 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46667 goto nobufs_unlock;
46668 spin_unlock(&cookie->lock);
46669
46670 - fscache_stat(&fscache_n_retrieval_ops);
46671 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
46672
46673 /* pin the netfs read context in case we need to do the actual netfs
46674 * read because we've encountered a cache read failure */
46675 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46676
46677 error:
46678 if (ret == -ENOMEM)
46679 - fscache_stat(&fscache_n_retrievals_nomem);
46680 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46681 else if (ret == -ERESTARTSYS)
46682 - fscache_stat(&fscache_n_retrievals_intr);
46683 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46684 else if (ret == -ENODATA)
46685 - fscache_stat(&fscache_n_retrievals_nodata);
46686 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46687 else if (ret < 0)
46688 - fscache_stat(&fscache_n_retrievals_nobufs);
46689 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46690 else
46691 - fscache_stat(&fscache_n_retrievals_ok);
46692 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
46693
46694 fscache_put_retrieval(op);
46695 _leave(" = %d", ret);
46696 @@ -429,7 +429,7 @@ nobufs_unlock:
46697 spin_unlock(&cookie->lock);
46698 kfree(op);
46699 nobufs:
46700 - fscache_stat(&fscache_n_retrievals_nobufs);
46701 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46702 _leave(" = -ENOBUFS");
46703 return -ENOBUFS;
46704 }
46705 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46706
46707 _enter("%p,,%d,,,", cookie, *nr_pages);
46708
46709 - fscache_stat(&fscache_n_retrievals);
46710 + fscache_stat_unchecked(&fscache_n_retrievals);
46711
46712 if (hlist_empty(&cookie->backing_objects))
46713 goto nobufs;
46714 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46715 goto nobufs_unlock;
46716 spin_unlock(&cookie->lock);
46717
46718 - fscache_stat(&fscache_n_retrieval_ops);
46719 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
46720
46721 /* pin the netfs read context in case we need to do the actual netfs
46722 * read because we've encountered a cache read failure */
46723 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46724
46725 error:
46726 if (ret == -ENOMEM)
46727 - fscache_stat(&fscache_n_retrievals_nomem);
46728 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46729 else if (ret == -ERESTARTSYS)
46730 - fscache_stat(&fscache_n_retrievals_intr);
46731 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46732 else if (ret == -ENODATA)
46733 - fscache_stat(&fscache_n_retrievals_nodata);
46734 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46735 else if (ret < 0)
46736 - fscache_stat(&fscache_n_retrievals_nobufs);
46737 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46738 else
46739 - fscache_stat(&fscache_n_retrievals_ok);
46740 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
46741
46742 fscache_put_retrieval(op);
46743 _leave(" = %d", ret);
46744 @@ -545,7 +545,7 @@ nobufs_unlock:
46745 spin_unlock(&cookie->lock);
46746 kfree(op);
46747 nobufs:
46748 - fscache_stat(&fscache_n_retrievals_nobufs);
46749 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46750 _leave(" = -ENOBUFS");
46751 return -ENOBUFS;
46752 }
46753 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46754
46755 _enter("%p,%p,,,", cookie, page);
46756
46757 - fscache_stat(&fscache_n_allocs);
46758 + fscache_stat_unchecked(&fscache_n_allocs);
46759
46760 if (hlist_empty(&cookie->backing_objects))
46761 goto nobufs;
46762 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46763 goto nobufs_unlock;
46764 spin_unlock(&cookie->lock);
46765
46766 - fscache_stat(&fscache_n_alloc_ops);
46767 + fscache_stat_unchecked(&fscache_n_alloc_ops);
46768
46769 ret = fscache_wait_for_retrieval_activation(
46770 object, op,
46771 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46772
46773 error:
46774 if (ret == -ERESTARTSYS)
46775 - fscache_stat(&fscache_n_allocs_intr);
46776 + fscache_stat_unchecked(&fscache_n_allocs_intr);
46777 else if (ret < 0)
46778 - fscache_stat(&fscache_n_allocs_nobufs);
46779 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46780 else
46781 - fscache_stat(&fscache_n_allocs_ok);
46782 + fscache_stat_unchecked(&fscache_n_allocs_ok);
46783
46784 fscache_put_retrieval(op);
46785 _leave(" = %d", ret);
46786 @@ -625,7 +625,7 @@ nobufs_unlock:
46787 spin_unlock(&cookie->lock);
46788 kfree(op);
46789 nobufs:
46790 - fscache_stat(&fscache_n_allocs_nobufs);
46791 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46792 _leave(" = -ENOBUFS");
46793 return -ENOBUFS;
46794 }
46795 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46796
46797 spin_lock(&cookie->stores_lock);
46798
46799 - fscache_stat(&fscache_n_store_calls);
46800 + fscache_stat_unchecked(&fscache_n_store_calls);
46801
46802 /* find a page to store */
46803 page = NULL;
46804 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46805 page = results[0];
46806 _debug("gang %d [%lx]", n, page->index);
46807 if (page->index > op->store_limit) {
46808 - fscache_stat(&fscache_n_store_pages_over_limit);
46809 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
46810 goto superseded;
46811 }
46812
46813 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46814 spin_unlock(&cookie->stores_lock);
46815 spin_unlock(&object->lock);
46816
46817 - fscache_stat(&fscache_n_store_pages);
46818 + fscache_stat_unchecked(&fscache_n_store_pages);
46819 fscache_stat(&fscache_n_cop_write_page);
46820 ret = object->cache->ops->write_page(op, page);
46821 fscache_stat_d(&fscache_n_cop_write_page);
46822 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46823 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46824 ASSERT(PageFsCache(page));
46825
46826 - fscache_stat(&fscache_n_stores);
46827 + fscache_stat_unchecked(&fscache_n_stores);
46828
46829 op = kzalloc(sizeof(*op), GFP_NOIO);
46830 if (!op)
46831 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46832 spin_unlock(&cookie->stores_lock);
46833 spin_unlock(&object->lock);
46834
46835 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46836 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46837 op->store_limit = object->store_limit;
46838
46839 if (fscache_submit_op(object, &op->op) < 0)
46840 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46841
46842 spin_unlock(&cookie->lock);
46843 radix_tree_preload_end();
46844 - fscache_stat(&fscache_n_store_ops);
46845 - fscache_stat(&fscache_n_stores_ok);
46846 + fscache_stat_unchecked(&fscache_n_store_ops);
46847 + fscache_stat_unchecked(&fscache_n_stores_ok);
46848
46849 /* the work queue now carries its own ref on the object */
46850 fscache_put_operation(&op->op);
46851 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46852 return 0;
46853
46854 already_queued:
46855 - fscache_stat(&fscache_n_stores_again);
46856 + fscache_stat_unchecked(&fscache_n_stores_again);
46857 already_pending:
46858 spin_unlock(&cookie->stores_lock);
46859 spin_unlock(&object->lock);
46860 spin_unlock(&cookie->lock);
46861 radix_tree_preload_end();
46862 kfree(op);
46863 - fscache_stat(&fscache_n_stores_ok);
46864 + fscache_stat_unchecked(&fscache_n_stores_ok);
46865 _leave(" = 0");
46866 return 0;
46867
46868 @@ -851,14 +851,14 @@ nobufs:
46869 spin_unlock(&cookie->lock);
46870 radix_tree_preload_end();
46871 kfree(op);
46872 - fscache_stat(&fscache_n_stores_nobufs);
46873 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
46874 _leave(" = -ENOBUFS");
46875 return -ENOBUFS;
46876
46877 nomem_free:
46878 kfree(op);
46879 nomem:
46880 - fscache_stat(&fscache_n_stores_oom);
46881 + fscache_stat_unchecked(&fscache_n_stores_oom);
46882 _leave(" = -ENOMEM");
46883 return -ENOMEM;
46884 }
46885 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
46886 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46887 ASSERTCMP(page, !=, NULL);
46888
46889 - fscache_stat(&fscache_n_uncaches);
46890 + fscache_stat_unchecked(&fscache_n_uncaches);
46891
46892 /* cache withdrawal may beat us to it */
46893 if (!PageFsCache(page))
46894 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
46895 unsigned long loop;
46896
46897 #ifdef CONFIG_FSCACHE_STATS
46898 - atomic_add(pagevec->nr, &fscache_n_marks);
46899 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46900 #endif
46901
46902 for (loop = 0; loop < pagevec->nr; loop++) {
46903 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46904 index 4765190..2a067f2 100644
46905 --- a/fs/fscache/stats.c
46906 +++ b/fs/fscache/stats.c
46907 @@ -18,95 +18,95 @@
46908 /*
46909 * operation counters
46910 */
46911 -atomic_t fscache_n_op_pend;
46912 -atomic_t fscache_n_op_run;
46913 -atomic_t fscache_n_op_enqueue;
46914 -atomic_t fscache_n_op_requeue;
46915 -atomic_t fscache_n_op_deferred_release;
46916 -atomic_t fscache_n_op_release;
46917 -atomic_t fscache_n_op_gc;
46918 -atomic_t fscache_n_op_cancelled;
46919 -atomic_t fscache_n_op_rejected;
46920 +atomic_unchecked_t fscache_n_op_pend;
46921 +atomic_unchecked_t fscache_n_op_run;
46922 +atomic_unchecked_t fscache_n_op_enqueue;
46923 +atomic_unchecked_t fscache_n_op_requeue;
46924 +atomic_unchecked_t fscache_n_op_deferred_release;
46925 +atomic_unchecked_t fscache_n_op_release;
46926 +atomic_unchecked_t fscache_n_op_gc;
46927 +atomic_unchecked_t fscache_n_op_cancelled;
46928 +atomic_unchecked_t fscache_n_op_rejected;
46929
46930 -atomic_t fscache_n_attr_changed;
46931 -atomic_t fscache_n_attr_changed_ok;
46932 -atomic_t fscache_n_attr_changed_nobufs;
46933 -atomic_t fscache_n_attr_changed_nomem;
46934 -atomic_t fscache_n_attr_changed_calls;
46935 +atomic_unchecked_t fscache_n_attr_changed;
46936 +atomic_unchecked_t fscache_n_attr_changed_ok;
46937 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
46938 +atomic_unchecked_t fscache_n_attr_changed_nomem;
46939 +atomic_unchecked_t fscache_n_attr_changed_calls;
46940
46941 -atomic_t fscache_n_allocs;
46942 -atomic_t fscache_n_allocs_ok;
46943 -atomic_t fscache_n_allocs_wait;
46944 -atomic_t fscache_n_allocs_nobufs;
46945 -atomic_t fscache_n_allocs_intr;
46946 -atomic_t fscache_n_allocs_object_dead;
46947 -atomic_t fscache_n_alloc_ops;
46948 -atomic_t fscache_n_alloc_op_waits;
46949 +atomic_unchecked_t fscache_n_allocs;
46950 +atomic_unchecked_t fscache_n_allocs_ok;
46951 +atomic_unchecked_t fscache_n_allocs_wait;
46952 +atomic_unchecked_t fscache_n_allocs_nobufs;
46953 +atomic_unchecked_t fscache_n_allocs_intr;
46954 +atomic_unchecked_t fscache_n_allocs_object_dead;
46955 +atomic_unchecked_t fscache_n_alloc_ops;
46956 +atomic_unchecked_t fscache_n_alloc_op_waits;
46957
46958 -atomic_t fscache_n_retrievals;
46959 -atomic_t fscache_n_retrievals_ok;
46960 -atomic_t fscache_n_retrievals_wait;
46961 -atomic_t fscache_n_retrievals_nodata;
46962 -atomic_t fscache_n_retrievals_nobufs;
46963 -atomic_t fscache_n_retrievals_intr;
46964 -atomic_t fscache_n_retrievals_nomem;
46965 -atomic_t fscache_n_retrievals_object_dead;
46966 -atomic_t fscache_n_retrieval_ops;
46967 -atomic_t fscache_n_retrieval_op_waits;
46968 +atomic_unchecked_t fscache_n_retrievals;
46969 +atomic_unchecked_t fscache_n_retrievals_ok;
46970 +atomic_unchecked_t fscache_n_retrievals_wait;
46971 +atomic_unchecked_t fscache_n_retrievals_nodata;
46972 +atomic_unchecked_t fscache_n_retrievals_nobufs;
46973 +atomic_unchecked_t fscache_n_retrievals_intr;
46974 +atomic_unchecked_t fscache_n_retrievals_nomem;
46975 +atomic_unchecked_t fscache_n_retrievals_object_dead;
46976 +atomic_unchecked_t fscache_n_retrieval_ops;
46977 +atomic_unchecked_t fscache_n_retrieval_op_waits;
46978
46979 -atomic_t fscache_n_stores;
46980 -atomic_t fscache_n_stores_ok;
46981 -atomic_t fscache_n_stores_again;
46982 -atomic_t fscache_n_stores_nobufs;
46983 -atomic_t fscache_n_stores_oom;
46984 -atomic_t fscache_n_store_ops;
46985 -atomic_t fscache_n_store_calls;
46986 -atomic_t fscache_n_store_pages;
46987 -atomic_t fscache_n_store_radix_deletes;
46988 -atomic_t fscache_n_store_pages_over_limit;
46989 +atomic_unchecked_t fscache_n_stores;
46990 +atomic_unchecked_t fscache_n_stores_ok;
46991 +atomic_unchecked_t fscache_n_stores_again;
46992 +atomic_unchecked_t fscache_n_stores_nobufs;
46993 +atomic_unchecked_t fscache_n_stores_oom;
46994 +atomic_unchecked_t fscache_n_store_ops;
46995 +atomic_unchecked_t fscache_n_store_calls;
46996 +atomic_unchecked_t fscache_n_store_pages;
46997 +atomic_unchecked_t fscache_n_store_radix_deletes;
46998 +atomic_unchecked_t fscache_n_store_pages_over_limit;
46999
47000 -atomic_t fscache_n_store_vmscan_not_storing;
47001 -atomic_t fscache_n_store_vmscan_gone;
47002 -atomic_t fscache_n_store_vmscan_busy;
47003 -atomic_t fscache_n_store_vmscan_cancelled;
47004 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
47005 +atomic_unchecked_t fscache_n_store_vmscan_gone;
47006 +atomic_unchecked_t fscache_n_store_vmscan_busy;
47007 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
47008
47009 -atomic_t fscache_n_marks;
47010 -atomic_t fscache_n_uncaches;
47011 +atomic_unchecked_t fscache_n_marks;
47012 +atomic_unchecked_t fscache_n_uncaches;
47013
47014 -atomic_t fscache_n_acquires;
47015 -atomic_t fscache_n_acquires_null;
47016 -atomic_t fscache_n_acquires_no_cache;
47017 -atomic_t fscache_n_acquires_ok;
47018 -atomic_t fscache_n_acquires_nobufs;
47019 -atomic_t fscache_n_acquires_oom;
47020 +atomic_unchecked_t fscache_n_acquires;
47021 +atomic_unchecked_t fscache_n_acquires_null;
47022 +atomic_unchecked_t fscache_n_acquires_no_cache;
47023 +atomic_unchecked_t fscache_n_acquires_ok;
47024 +atomic_unchecked_t fscache_n_acquires_nobufs;
47025 +atomic_unchecked_t fscache_n_acquires_oom;
47026
47027 -atomic_t fscache_n_updates;
47028 -atomic_t fscache_n_updates_null;
47029 -atomic_t fscache_n_updates_run;
47030 +atomic_unchecked_t fscache_n_updates;
47031 +atomic_unchecked_t fscache_n_updates_null;
47032 +atomic_unchecked_t fscache_n_updates_run;
47033
47034 -atomic_t fscache_n_relinquishes;
47035 -atomic_t fscache_n_relinquishes_null;
47036 -atomic_t fscache_n_relinquishes_waitcrt;
47037 -atomic_t fscache_n_relinquishes_retire;
47038 +atomic_unchecked_t fscache_n_relinquishes;
47039 +atomic_unchecked_t fscache_n_relinquishes_null;
47040 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
47041 +atomic_unchecked_t fscache_n_relinquishes_retire;
47042
47043 -atomic_t fscache_n_cookie_index;
47044 -atomic_t fscache_n_cookie_data;
47045 -atomic_t fscache_n_cookie_special;
47046 +atomic_unchecked_t fscache_n_cookie_index;
47047 +atomic_unchecked_t fscache_n_cookie_data;
47048 +atomic_unchecked_t fscache_n_cookie_special;
47049
47050 -atomic_t fscache_n_object_alloc;
47051 -atomic_t fscache_n_object_no_alloc;
47052 -atomic_t fscache_n_object_lookups;
47053 -atomic_t fscache_n_object_lookups_negative;
47054 -atomic_t fscache_n_object_lookups_positive;
47055 -atomic_t fscache_n_object_lookups_timed_out;
47056 -atomic_t fscache_n_object_created;
47057 -atomic_t fscache_n_object_avail;
47058 -atomic_t fscache_n_object_dead;
47059 +atomic_unchecked_t fscache_n_object_alloc;
47060 +atomic_unchecked_t fscache_n_object_no_alloc;
47061 +atomic_unchecked_t fscache_n_object_lookups;
47062 +atomic_unchecked_t fscache_n_object_lookups_negative;
47063 +atomic_unchecked_t fscache_n_object_lookups_positive;
47064 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
47065 +atomic_unchecked_t fscache_n_object_created;
47066 +atomic_unchecked_t fscache_n_object_avail;
47067 +atomic_unchecked_t fscache_n_object_dead;
47068
47069 -atomic_t fscache_n_checkaux_none;
47070 -atomic_t fscache_n_checkaux_okay;
47071 -atomic_t fscache_n_checkaux_update;
47072 -atomic_t fscache_n_checkaux_obsolete;
47073 +atomic_unchecked_t fscache_n_checkaux_none;
47074 +atomic_unchecked_t fscache_n_checkaux_okay;
47075 +atomic_unchecked_t fscache_n_checkaux_update;
47076 +atomic_unchecked_t fscache_n_checkaux_obsolete;
47077
47078 atomic_t fscache_n_cop_alloc_object;
47079 atomic_t fscache_n_cop_lookup_object;
47080 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
47081 seq_puts(m, "FS-Cache statistics\n");
47082
47083 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
47084 - atomic_read(&fscache_n_cookie_index),
47085 - atomic_read(&fscache_n_cookie_data),
47086 - atomic_read(&fscache_n_cookie_special));
47087 + atomic_read_unchecked(&fscache_n_cookie_index),
47088 + atomic_read_unchecked(&fscache_n_cookie_data),
47089 + atomic_read_unchecked(&fscache_n_cookie_special));
47090
47091 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
47092 - atomic_read(&fscache_n_object_alloc),
47093 - atomic_read(&fscache_n_object_no_alloc),
47094 - atomic_read(&fscache_n_object_avail),
47095 - atomic_read(&fscache_n_object_dead));
47096 + atomic_read_unchecked(&fscache_n_object_alloc),
47097 + atomic_read_unchecked(&fscache_n_object_no_alloc),
47098 + atomic_read_unchecked(&fscache_n_object_avail),
47099 + atomic_read_unchecked(&fscache_n_object_dead));
47100 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
47101 - atomic_read(&fscache_n_checkaux_none),
47102 - atomic_read(&fscache_n_checkaux_okay),
47103 - atomic_read(&fscache_n_checkaux_update),
47104 - atomic_read(&fscache_n_checkaux_obsolete));
47105 + atomic_read_unchecked(&fscache_n_checkaux_none),
47106 + atomic_read_unchecked(&fscache_n_checkaux_okay),
47107 + atomic_read_unchecked(&fscache_n_checkaux_update),
47108 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
47109
47110 seq_printf(m, "Pages : mrk=%u unc=%u\n",
47111 - atomic_read(&fscache_n_marks),
47112 - atomic_read(&fscache_n_uncaches));
47113 + atomic_read_unchecked(&fscache_n_marks),
47114 + atomic_read_unchecked(&fscache_n_uncaches));
47115
47116 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
47117 " oom=%u\n",
47118 - atomic_read(&fscache_n_acquires),
47119 - atomic_read(&fscache_n_acquires_null),
47120 - atomic_read(&fscache_n_acquires_no_cache),
47121 - atomic_read(&fscache_n_acquires_ok),
47122 - atomic_read(&fscache_n_acquires_nobufs),
47123 - atomic_read(&fscache_n_acquires_oom));
47124 + atomic_read_unchecked(&fscache_n_acquires),
47125 + atomic_read_unchecked(&fscache_n_acquires_null),
47126 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
47127 + atomic_read_unchecked(&fscache_n_acquires_ok),
47128 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
47129 + atomic_read_unchecked(&fscache_n_acquires_oom));
47130
47131 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
47132 - atomic_read(&fscache_n_object_lookups),
47133 - atomic_read(&fscache_n_object_lookups_negative),
47134 - atomic_read(&fscache_n_object_lookups_positive),
47135 - atomic_read(&fscache_n_object_created),
47136 - atomic_read(&fscache_n_object_lookups_timed_out));
47137 + atomic_read_unchecked(&fscache_n_object_lookups),
47138 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
47139 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
47140 + atomic_read_unchecked(&fscache_n_object_created),
47141 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
47142
47143 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
47144 - atomic_read(&fscache_n_updates),
47145 - atomic_read(&fscache_n_updates_null),
47146 - atomic_read(&fscache_n_updates_run));
47147 + atomic_read_unchecked(&fscache_n_updates),
47148 + atomic_read_unchecked(&fscache_n_updates_null),
47149 + atomic_read_unchecked(&fscache_n_updates_run));
47150
47151 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
47152 - atomic_read(&fscache_n_relinquishes),
47153 - atomic_read(&fscache_n_relinquishes_null),
47154 - atomic_read(&fscache_n_relinquishes_waitcrt),
47155 - atomic_read(&fscache_n_relinquishes_retire));
47156 + atomic_read_unchecked(&fscache_n_relinquishes),
47157 + atomic_read_unchecked(&fscache_n_relinquishes_null),
47158 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
47159 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
47160
47161 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
47162 - atomic_read(&fscache_n_attr_changed),
47163 - atomic_read(&fscache_n_attr_changed_ok),
47164 - atomic_read(&fscache_n_attr_changed_nobufs),
47165 - atomic_read(&fscache_n_attr_changed_nomem),
47166 - atomic_read(&fscache_n_attr_changed_calls));
47167 + atomic_read_unchecked(&fscache_n_attr_changed),
47168 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
47169 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
47170 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
47171 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
47172
47173 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
47174 - atomic_read(&fscache_n_allocs),
47175 - atomic_read(&fscache_n_allocs_ok),
47176 - atomic_read(&fscache_n_allocs_wait),
47177 - atomic_read(&fscache_n_allocs_nobufs),
47178 - atomic_read(&fscache_n_allocs_intr));
47179 + atomic_read_unchecked(&fscache_n_allocs),
47180 + atomic_read_unchecked(&fscache_n_allocs_ok),
47181 + atomic_read_unchecked(&fscache_n_allocs_wait),
47182 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
47183 + atomic_read_unchecked(&fscache_n_allocs_intr));
47184 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
47185 - atomic_read(&fscache_n_alloc_ops),
47186 - atomic_read(&fscache_n_alloc_op_waits),
47187 - atomic_read(&fscache_n_allocs_object_dead));
47188 + atomic_read_unchecked(&fscache_n_alloc_ops),
47189 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
47190 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
47191
47192 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
47193 " int=%u oom=%u\n",
47194 - atomic_read(&fscache_n_retrievals),
47195 - atomic_read(&fscache_n_retrievals_ok),
47196 - atomic_read(&fscache_n_retrievals_wait),
47197 - atomic_read(&fscache_n_retrievals_nodata),
47198 - atomic_read(&fscache_n_retrievals_nobufs),
47199 - atomic_read(&fscache_n_retrievals_intr),
47200 - atomic_read(&fscache_n_retrievals_nomem));
47201 + atomic_read_unchecked(&fscache_n_retrievals),
47202 + atomic_read_unchecked(&fscache_n_retrievals_ok),
47203 + atomic_read_unchecked(&fscache_n_retrievals_wait),
47204 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
47205 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
47206 + atomic_read_unchecked(&fscache_n_retrievals_intr),
47207 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
47208 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
47209 - atomic_read(&fscache_n_retrieval_ops),
47210 - atomic_read(&fscache_n_retrieval_op_waits),
47211 - atomic_read(&fscache_n_retrievals_object_dead));
47212 + atomic_read_unchecked(&fscache_n_retrieval_ops),
47213 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
47214 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
47215
47216 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
47217 - atomic_read(&fscache_n_stores),
47218 - atomic_read(&fscache_n_stores_ok),
47219 - atomic_read(&fscache_n_stores_again),
47220 - atomic_read(&fscache_n_stores_nobufs),
47221 - atomic_read(&fscache_n_stores_oom));
47222 + atomic_read_unchecked(&fscache_n_stores),
47223 + atomic_read_unchecked(&fscache_n_stores_ok),
47224 + atomic_read_unchecked(&fscache_n_stores_again),
47225 + atomic_read_unchecked(&fscache_n_stores_nobufs),
47226 + atomic_read_unchecked(&fscache_n_stores_oom));
47227 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
47228 - atomic_read(&fscache_n_store_ops),
47229 - atomic_read(&fscache_n_store_calls),
47230 - atomic_read(&fscache_n_store_pages),
47231 - atomic_read(&fscache_n_store_radix_deletes),
47232 - atomic_read(&fscache_n_store_pages_over_limit));
47233 + atomic_read_unchecked(&fscache_n_store_ops),
47234 + atomic_read_unchecked(&fscache_n_store_calls),
47235 + atomic_read_unchecked(&fscache_n_store_pages),
47236 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
47237 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
47238
47239 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
47240 - atomic_read(&fscache_n_store_vmscan_not_storing),
47241 - atomic_read(&fscache_n_store_vmscan_gone),
47242 - atomic_read(&fscache_n_store_vmscan_busy),
47243 - atomic_read(&fscache_n_store_vmscan_cancelled));
47244 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
47245 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
47246 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
47247 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
47248
47249 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
47250 - atomic_read(&fscache_n_op_pend),
47251 - atomic_read(&fscache_n_op_run),
47252 - atomic_read(&fscache_n_op_enqueue),
47253 - atomic_read(&fscache_n_op_cancelled),
47254 - atomic_read(&fscache_n_op_rejected));
47255 + atomic_read_unchecked(&fscache_n_op_pend),
47256 + atomic_read_unchecked(&fscache_n_op_run),
47257 + atomic_read_unchecked(&fscache_n_op_enqueue),
47258 + atomic_read_unchecked(&fscache_n_op_cancelled),
47259 + atomic_read_unchecked(&fscache_n_op_rejected));
47260 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
47261 - atomic_read(&fscache_n_op_deferred_release),
47262 - atomic_read(&fscache_n_op_release),
47263 - atomic_read(&fscache_n_op_gc));
47264 + atomic_read_unchecked(&fscache_n_op_deferred_release),
47265 + atomic_read_unchecked(&fscache_n_op_release),
47266 + atomic_read_unchecked(&fscache_n_op_gc));
47267
47268 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
47269 atomic_read(&fscache_n_cop_alloc_object),
47270 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
47271 index 3426521..3b75162 100644
47272 --- a/fs/fuse/cuse.c
47273 +++ b/fs/fuse/cuse.c
47274 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
47275 INIT_LIST_HEAD(&cuse_conntbl[i]);
47276
47277 /* inherit and extend fuse_dev_operations */
47278 - cuse_channel_fops = fuse_dev_operations;
47279 - cuse_channel_fops.owner = THIS_MODULE;
47280 - cuse_channel_fops.open = cuse_channel_open;
47281 - cuse_channel_fops.release = cuse_channel_release;
47282 + pax_open_kernel();
47283 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
47284 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
47285 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
47286 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
47287 + pax_close_kernel();
47288
47289 cuse_class = class_create(THIS_MODULE, "cuse");
47290 if (IS_ERR(cuse_class))
47291 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
47292 index f4246cf..b4aed1d 100644
47293 --- a/fs/fuse/dev.c
47294 +++ b/fs/fuse/dev.c
47295 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
47296 ret = 0;
47297 pipe_lock(pipe);
47298
47299 - if (!pipe->readers) {
47300 + if (!atomic_read(&pipe->readers)) {
47301 send_sig(SIGPIPE, current, 0);
47302 if (!ret)
47303 ret = -EPIPE;
47304 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
47305 index 334e0b1..fc571e8 100644
47306 --- a/fs/fuse/dir.c
47307 +++ b/fs/fuse/dir.c
47308 @@ -1189,7 +1189,7 @@ static char *read_link(struct dentry *dentry)
47309 return link;
47310 }
47311
47312 -static void free_link(char *link)
47313 +static void free_link(const char *link)
47314 {
47315 if (!IS_ERR(link))
47316 free_page((unsigned long) link);
47317 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
47318 index a9ba244..d9df391 100644
47319 --- a/fs/gfs2/inode.c
47320 +++ b/fs/gfs2/inode.c
47321 @@ -1496,7 +1496,7 @@ out:
47322
47323 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47324 {
47325 - char *s = nd_get_link(nd);
47326 + const char *s = nd_get_link(nd);
47327 if (!IS_ERR(s))
47328 kfree(s);
47329 }
47330 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
47331 index cc9281b..58996fb 100644
47332 --- a/fs/hugetlbfs/inode.c
47333 +++ b/fs/hugetlbfs/inode.c
47334 @@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
47335 .kill_sb = kill_litter_super,
47336 };
47337
47338 -static struct vfsmount *hugetlbfs_vfsmount;
47339 +struct vfsmount *hugetlbfs_vfsmount;
47340
47341 static int can_do_hugetlb_shm(void)
47342 {
47343 diff --git a/fs/inode.c b/fs/inode.c
47344 index c99163b..a11ad40 100644
47345 --- a/fs/inode.c
47346 +++ b/fs/inode.c
47347 @@ -867,8 +867,8 @@ unsigned int get_next_ino(void)
47348
47349 #ifdef CONFIG_SMP
47350 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
47351 - static atomic_t shared_last_ino;
47352 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
47353 + static atomic_unchecked_t shared_last_ino;
47354 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
47355
47356 res = next - LAST_INO_BATCH;
47357 }
47358 diff --git a/fs/isofs/export.c b/fs/isofs/export.c
47359 index aa4356d..1d38044 100644
47360 --- a/fs/isofs/export.c
47361 +++ b/fs/isofs/export.c
47362 @@ -134,6 +134,7 @@ isofs_export_encode_fh(struct inode *inode,
47363 len = 3;
47364 fh32[0] = ei->i_iget5_block;
47365 fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
47366 + fh16[3] = 0; /* avoid leaking uninitialized data */
47367 fh32[2] = inode->i_generation;
47368 if (parent) {
47369 struct iso_inode_info *eparent;
47370 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
47371 index 4a6cf28..d3a29d3 100644
47372 --- a/fs/jffs2/erase.c
47373 +++ b/fs/jffs2/erase.c
47374 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
47375 struct jffs2_unknown_node marker = {
47376 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
47377 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47378 - .totlen = cpu_to_je32(c->cleanmarker_size)
47379 + .totlen = cpu_to_je32(c->cleanmarker_size),
47380 + .hdr_crc = cpu_to_je32(0)
47381 };
47382
47383 jffs2_prealloc_raw_node_refs(c, jeb, 1);
47384 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
47385 index 6f4529d..bf12806 100644
47386 --- a/fs/jffs2/wbuf.c
47387 +++ b/fs/jffs2/wbuf.c
47388 @@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
47389 {
47390 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
47391 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47392 - .totlen = constant_cpu_to_je32(8)
47393 + .totlen = constant_cpu_to_je32(8),
47394 + .hdr_crc = constant_cpu_to_je32(0)
47395 };
47396
47397 /*
47398 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
47399 index 4a82950..bcaa0cb 100644
47400 --- a/fs/jfs/super.c
47401 +++ b/fs/jfs/super.c
47402 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
47403
47404 jfs_inode_cachep =
47405 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
47406 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
47407 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
47408 init_once);
47409 if (jfs_inode_cachep == NULL)
47410 return -ENOMEM;
47411 diff --git a/fs/libfs.c b/fs/libfs.c
47412 index f86ec27..4734776 100644
47413 --- a/fs/libfs.c
47414 +++ b/fs/libfs.c
47415 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47416
47417 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
47418 struct dentry *next;
47419 + char d_name[sizeof(next->d_iname)];
47420 + const unsigned char *name;
47421 +
47422 next = list_entry(p, struct dentry, d_u.d_child);
47423 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
47424 if (!simple_positive(next)) {
47425 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47426
47427 spin_unlock(&next->d_lock);
47428 spin_unlock(&dentry->d_lock);
47429 - if (filldir(dirent, next->d_name.name,
47430 + name = next->d_name.name;
47431 + if (name == next->d_iname) {
47432 + memcpy(d_name, name, next->d_name.len);
47433 + name = d_name;
47434 + }
47435 + if (filldir(dirent, name,
47436 next->d_name.len, filp->f_pos,
47437 next->d_inode->i_ino,
47438 dt_type(next->d_inode)) < 0)
47439 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
47440 index 8392cb8..80d6193 100644
47441 --- a/fs/lockd/clntproc.c
47442 +++ b/fs/lockd/clntproc.c
47443 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
47444 /*
47445 * Cookie counter for NLM requests
47446 */
47447 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
47448 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
47449
47450 void nlmclnt_next_cookie(struct nlm_cookie *c)
47451 {
47452 - u32 cookie = atomic_inc_return(&nlm_cookie);
47453 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
47454
47455 memcpy(c->data, &cookie, 4);
47456 c->len=4;
47457 diff --git a/fs/locks.c b/fs/locks.c
47458 index 82c3533..34e929c 100644
47459 --- a/fs/locks.c
47460 +++ b/fs/locks.c
47461 @@ -2076,16 +2076,16 @@ void locks_remove_flock(struct file *filp)
47462 return;
47463
47464 if (filp->f_op && filp->f_op->flock) {
47465 - struct file_lock fl = {
47466 + struct file_lock flock = {
47467 .fl_pid = current->tgid,
47468 .fl_file = filp,
47469 .fl_flags = FL_FLOCK,
47470 .fl_type = F_UNLCK,
47471 .fl_end = OFFSET_MAX,
47472 };
47473 - filp->f_op->flock(filp, F_SETLKW, &fl);
47474 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
47475 - fl.fl_ops->fl_release_private(&fl);
47476 + filp->f_op->flock(filp, F_SETLKW, &flock);
47477 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
47478 + flock.fl_ops->fl_release_private(&flock);
47479 }
47480
47481 lock_flocks();
47482 diff --git a/fs/namei.c b/fs/namei.c
47483 index 7d69419..c7a09f0 100644
47484 --- a/fs/namei.c
47485 +++ b/fs/namei.c
47486 @@ -265,16 +265,32 @@ int generic_permission(struct inode *inode, int mask)
47487 if (ret != -EACCES)
47488 return ret;
47489
47490 +#ifdef CONFIG_GRKERNSEC
47491 + /* we'll block if we have to log due to a denied capability use */
47492 + if (mask & MAY_NOT_BLOCK)
47493 + return -ECHILD;
47494 +#endif
47495 +
47496 if (S_ISDIR(inode->i_mode)) {
47497 /* DACs are overridable for directories */
47498 - if (inode_capable(inode, CAP_DAC_OVERRIDE))
47499 - return 0;
47500 if (!(mask & MAY_WRITE))
47501 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
47502 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
47503 + inode_capable(inode, CAP_DAC_READ_SEARCH))
47504 return 0;
47505 + if (inode_capable(inode, CAP_DAC_OVERRIDE))
47506 + return 0;
47507 return -EACCES;
47508 }
47509 /*
47510 + * Searching includes executable on directories, else just read.
47511 + */
47512 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47513 + if (mask == MAY_READ)
47514 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
47515 + inode_capable(inode, CAP_DAC_READ_SEARCH))
47516 + return 0;
47517 +
47518 + /*
47519 * Read/write DACs are always overridable.
47520 * Executable DACs are overridable when there is
47521 * at least one exec bit set.
47522 @@ -283,14 +299,6 @@ int generic_permission(struct inode *inode, int mask)
47523 if (inode_capable(inode, CAP_DAC_OVERRIDE))
47524 return 0;
47525
47526 - /*
47527 - * Searching includes executable on directories, else just read.
47528 - */
47529 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47530 - if (mask == MAY_READ)
47531 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
47532 - return 0;
47533 -
47534 return -EACCES;
47535 }
47536
47537 @@ -639,11 +647,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
47538 return error;
47539 }
47540
47541 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
47542 + dentry->d_inode, dentry, nd->path.mnt)) {
47543 + error = -EACCES;
47544 + *p = ERR_PTR(error); /* no ->put_link(), please */
47545 + path_put(&nd->path);
47546 + return error;
47547 + }
47548 +
47549 nd->last_type = LAST_BIND;
47550 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
47551 error = PTR_ERR(*p);
47552 if (!IS_ERR(*p)) {
47553 - char *s = nd_get_link(nd);
47554 + const char *s = nd_get_link(nd);
47555 error = 0;
47556 if (s)
47557 error = __vfs_follow_link(nd, s);
47558 @@ -1386,6 +1402,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
47559 if (!res)
47560 res = walk_component(nd, path, &nd->last,
47561 nd->last_type, LOOKUP_FOLLOW);
47562 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
47563 + res = -EACCES;
47564 put_link(nd, &link, cookie);
47565 } while (res > 0);
47566
47567 @@ -1779,6 +1797,8 @@ static int path_lookupat(int dfd, const char *name,
47568 err = follow_link(&link, nd, &cookie);
47569 if (!err)
47570 err = lookup_last(nd, &path);
47571 + if (!err && gr_handle_symlink_owner(&link, nd->inode))
47572 + err = -EACCES;
47573 put_link(nd, &link, cookie);
47574 }
47575 }
47576 @@ -1786,6 +1806,21 @@ static int path_lookupat(int dfd, const char *name,
47577 if (!err)
47578 err = complete_walk(nd);
47579
47580 + if (!(nd->flags & LOOKUP_PARENT)) {
47581 +#ifdef CONFIG_GRKERNSEC
47582 + if (flags & LOOKUP_RCU) {
47583 + if (!err)
47584 + path_put(&nd->path);
47585 + err = -ECHILD;
47586 + } else
47587 +#endif
47588 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47589 + if (!err)
47590 + path_put(&nd->path);
47591 + err = -ENOENT;
47592 + }
47593 + }
47594 +
47595 if (!err && nd->flags & LOOKUP_DIRECTORY) {
47596 if (!nd->inode->i_op->lookup) {
47597 path_put(&nd->path);
47598 @@ -1813,6 +1848,15 @@ static int do_path_lookup(int dfd, const char *name,
47599 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
47600
47601 if (likely(!retval)) {
47602 + if (*name != '/' && nd->path.dentry && nd->inode) {
47603 +#ifdef CONFIG_GRKERNSEC
47604 + if (flags & LOOKUP_RCU)
47605 + return -ECHILD;
47606 +#endif
47607 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
47608 + return -ENOENT;
47609 + }
47610 +
47611 if (unlikely(!audit_dummy_context())) {
47612 if (nd->path.dentry && nd->inode)
47613 audit_inode(name, nd->path.dentry);
47614 @@ -2155,6 +2199,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
47615 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
47616 return -EPERM;
47617
47618 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
47619 + return -EPERM;
47620 + if (gr_handle_rawio(inode))
47621 + return -EPERM;
47622 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
47623 + return -EACCES;
47624 +
47625 return 0;
47626 }
47627
47628 @@ -2190,7 +2241,7 @@ static inline int open_to_namei_flags(int flag)
47629 /*
47630 * Handle the last step of open()
47631 */
47632 -static struct file *do_last(struct nameidata *nd, struct path *path,
47633 +static struct file *do_last(struct nameidata *nd, struct path *path, struct path *link,
47634 const struct open_flags *op, const char *pathname)
47635 {
47636 struct dentry *dir = nd->path.dentry;
47637 @@ -2220,16 +2271,44 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47638 error = complete_walk(nd);
47639 if (error)
47640 return ERR_PTR(error);
47641 +#ifdef CONFIG_GRKERNSEC
47642 + if (nd->flags & LOOKUP_RCU) {
47643 + error = -ECHILD;
47644 + goto exit;
47645 + }
47646 +#endif
47647 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47648 + error = -ENOENT;
47649 + goto exit;
47650 + }
47651 audit_inode(pathname, nd->path.dentry);
47652 if (open_flag & O_CREAT) {
47653 error = -EISDIR;
47654 goto exit;
47655 }
47656 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
47657 + error = -EACCES;
47658 + goto exit;
47659 + }
47660 goto ok;
47661 case LAST_BIND:
47662 error = complete_walk(nd);
47663 if (error)
47664 return ERR_PTR(error);
47665 +#ifdef CONFIG_GRKERNSEC
47666 + if (nd->flags & LOOKUP_RCU) {
47667 + error = -ECHILD;
47668 + goto exit;
47669 + }
47670 +#endif
47671 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
47672 + error = -ENOENT;
47673 + goto exit;
47674 + }
47675 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
47676 + error = -EACCES;
47677 + goto exit;
47678 + }
47679 audit_inode(pathname, dir);
47680 goto ok;
47681 }
47682 @@ -2285,6 +2364,17 @@ retry_lookup:
47683 /* Negative dentry, just create the file */
47684 if (!dentry->d_inode) {
47685 umode_t mode = op->mode;
47686 +
47687 + if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
47688 + error = -EACCES;
47689 + goto exit_mutex_unlock;
47690 + }
47691 +
47692 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
47693 + error = -EACCES;
47694 + goto exit_mutex_unlock;
47695 + }
47696 +
47697 if (!IS_POSIXACL(dir->d_inode))
47698 mode &= ~current_umask();
47699 /*
47700 @@ -2308,6 +2398,8 @@ retry_lookup:
47701 error = vfs_create(dir->d_inode, dentry, mode, nd);
47702 if (error)
47703 goto exit_mutex_unlock;
47704 + else
47705 + gr_handle_create(path->dentry, path->mnt);
47706 mutex_unlock(&dir->d_inode->i_mutex);
47707 dput(nd->path.dentry);
47708 nd->path.dentry = dentry;
47709 @@ -2317,6 +2409,23 @@ retry_lookup:
47710 /*
47711 * It already exists.
47712 */
47713 +
47714 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
47715 + error = -ENOENT;
47716 + goto exit_mutex_unlock;
47717 + }
47718 + if (link && gr_handle_symlink_owner(link, dentry->d_inode)) {
47719 + error = -EACCES;
47720 + goto exit_mutex_unlock;
47721 + }
47722 +
47723 + /* only check if O_CREAT is specified, all other checks need to go
47724 + into may_open */
47725 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
47726 + error = -EACCES;
47727 + goto exit_mutex_unlock;
47728 + }
47729 +
47730 mutex_unlock(&dir->d_inode->i_mutex);
47731 audit_inode(pathname, path->dentry);
47732
47733 @@ -2349,6 +2458,11 @@ finish_lookup:
47734 }
47735 }
47736 BUG_ON(inode != path->dentry->d_inode);
47737 + /* if we're resolving a symlink to another symlink */
47738 + if (link && gr_handle_symlink_owner(link, inode)) {
47739 + error = -EACCES;
47740 + goto exit;
47741 + }
47742 return NULL;
47743 }
47744
47745 @@ -2358,7 +2472,6 @@ finish_lookup:
47746 save_parent.dentry = nd->path.dentry;
47747 save_parent.mnt = mntget(path->mnt);
47748 nd->path.dentry = path->dentry;
47749 -
47750 }
47751 nd->inode = inode;
47752 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
47753 @@ -2367,6 +2480,21 @@ finish_lookup:
47754 path_put(&save_parent);
47755 return ERR_PTR(error);
47756 }
47757 +#ifdef CONFIG_GRKERNSEC
47758 + if (nd->flags & LOOKUP_RCU) {
47759 + error = -ECHILD;
47760 + goto exit;
47761 + }
47762 +#endif
47763 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47764 + error = -ENOENT;
47765 + goto exit;
47766 + }
47767 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
47768 + error = -EACCES;
47769 + goto exit;
47770 + }
47771 +
47772 error = -EISDIR;
47773 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
47774 goto exit;
47775 @@ -2461,7 +2589,7 @@ static struct file *path_openat(int dfd, const char *pathname,
47776 if (unlikely(error))
47777 goto out_filp;
47778
47779 - filp = do_last(nd, &path, op, pathname);
47780 + filp = do_last(nd, &path, NULL, op, pathname);
47781 while (unlikely(!filp)) { /* trailing symlink */
47782 struct path link = path;
47783 void *cookie;
47784 @@ -2476,8 +2604,9 @@ static struct file *path_openat(int dfd, const char *pathname,
47785 error = follow_link(&link, nd, &cookie);
47786 if (unlikely(error))
47787 filp = ERR_PTR(error);
47788 - else
47789 - filp = do_last(nd, &path, op, pathname);
47790 + else {
47791 + filp = do_last(nd, &path, &link, op, pathname);
47792 + }
47793 put_link(nd, &link, cookie);
47794 }
47795 out:
47796 @@ -2577,6 +2706,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
47797 *path = nd.path;
47798 return dentry;
47799 eexist:
47800 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
47801 + dput(dentry);
47802 + dentry = ERR_PTR(-ENOENT);
47803 + goto fail;
47804 + }
47805 dput(dentry);
47806 dentry = ERR_PTR(-EEXIST);
47807 fail:
47808 @@ -2599,6 +2733,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
47809 }
47810 EXPORT_SYMBOL(user_path_create);
47811
47812 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
47813 +{
47814 + char *tmp = getname(pathname);
47815 + struct dentry *res;
47816 + if (IS_ERR(tmp))
47817 + return ERR_CAST(tmp);
47818 + res = kern_path_create(dfd, tmp, path, is_dir);
47819 + if (IS_ERR(res))
47820 + putname(tmp);
47821 + else
47822 + *to = tmp;
47823 + return res;
47824 +}
47825 +
47826 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
47827 {
47828 int error = may_create(dir, dentry);
47829 @@ -2665,6 +2813,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47830 error = mnt_want_write(path.mnt);
47831 if (error)
47832 goto out_dput;
47833 +
47834 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
47835 + error = -EPERM;
47836 + goto out_drop_write;
47837 + }
47838 +
47839 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
47840 + error = -EACCES;
47841 + goto out_drop_write;
47842 + }
47843 +
47844 error = security_path_mknod(&path, dentry, mode, dev);
47845 if (error)
47846 goto out_drop_write;
47847 @@ -2682,6 +2841,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47848 }
47849 out_drop_write:
47850 mnt_drop_write(path.mnt);
47851 +
47852 + if (!error)
47853 + gr_handle_create(dentry, path.mnt);
47854 out_dput:
47855 dput(dentry);
47856 mutex_unlock(&path.dentry->d_inode->i_mutex);
47857 @@ -2735,12 +2897,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
47858 error = mnt_want_write(path.mnt);
47859 if (error)
47860 goto out_dput;
47861 +
47862 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
47863 + error = -EACCES;
47864 + goto out_drop_write;
47865 + }
47866 +
47867 error = security_path_mkdir(&path, dentry, mode);
47868 if (error)
47869 goto out_drop_write;
47870 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
47871 out_drop_write:
47872 mnt_drop_write(path.mnt);
47873 +
47874 + if (!error)
47875 + gr_handle_create(dentry, path.mnt);
47876 out_dput:
47877 dput(dentry);
47878 mutex_unlock(&path.dentry->d_inode->i_mutex);
47879 @@ -2820,6 +2991,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47880 char * name;
47881 struct dentry *dentry;
47882 struct nameidata nd;
47883 + ino_t saved_ino = 0;
47884 + dev_t saved_dev = 0;
47885
47886 error = user_path_parent(dfd, pathname, &nd, &name);
47887 if (error)
47888 @@ -2848,6 +3021,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
47889 error = -ENOENT;
47890 goto exit3;
47891 }
47892 +
47893 + saved_ino = dentry->d_inode->i_ino;
47894 + saved_dev = gr_get_dev_from_dentry(dentry);
47895 +
47896 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
47897 + error = -EACCES;
47898 + goto exit3;
47899 + }
47900 +
47901 error = mnt_want_write(nd.path.mnt);
47902 if (error)
47903 goto exit3;
47904 @@ -2855,6 +3037,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47905 if (error)
47906 goto exit4;
47907 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
47908 + if (!error && (saved_dev || saved_ino))
47909 + gr_handle_delete(saved_ino, saved_dev);
47910 exit4:
47911 mnt_drop_write(nd.path.mnt);
47912 exit3:
47913 @@ -2917,6 +3101,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47914 struct dentry *dentry;
47915 struct nameidata nd;
47916 struct inode *inode = NULL;
47917 + ino_t saved_ino = 0;
47918 + dev_t saved_dev = 0;
47919
47920 error = user_path_parent(dfd, pathname, &nd, &name);
47921 if (error)
47922 @@ -2939,6 +3125,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47923 if (!inode)
47924 goto slashes;
47925 ihold(inode);
47926 +
47927 + if (inode->i_nlink <= 1) {
47928 + saved_ino = inode->i_ino;
47929 + saved_dev = gr_get_dev_from_dentry(dentry);
47930 + }
47931 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47932 + error = -EACCES;
47933 + goto exit2;
47934 + }
47935 +
47936 error = mnt_want_write(nd.path.mnt);
47937 if (error)
47938 goto exit2;
47939 @@ -2946,6 +3142,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47940 if (error)
47941 goto exit3;
47942 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47943 + if (!error && (saved_ino || saved_dev))
47944 + gr_handle_delete(saved_ino, saved_dev);
47945 exit3:
47946 mnt_drop_write(nd.path.mnt);
47947 exit2:
47948 @@ -3021,10 +3219,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
47949 error = mnt_want_write(path.mnt);
47950 if (error)
47951 goto out_dput;
47952 +
47953 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
47954 + error = -EACCES;
47955 + goto out_drop_write;
47956 + }
47957 +
47958 error = security_path_symlink(&path, dentry, from);
47959 if (error)
47960 goto out_drop_write;
47961 error = vfs_symlink(path.dentry->d_inode, dentry, from);
47962 + if (!error)
47963 + gr_handle_create(dentry, path.mnt);
47964 out_drop_write:
47965 mnt_drop_write(path.mnt);
47966 out_dput:
47967 @@ -3099,6 +3305,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47968 {
47969 struct dentry *new_dentry;
47970 struct path old_path, new_path;
47971 + char *to = NULL;
47972 int how = 0;
47973 int error;
47974
47975 @@ -3122,7 +3329,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47976 if (error)
47977 return error;
47978
47979 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47980 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
47981 error = PTR_ERR(new_dentry);
47982 if (IS_ERR(new_dentry))
47983 goto out;
47984 @@ -3133,13 +3340,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47985 error = mnt_want_write(new_path.mnt);
47986 if (error)
47987 goto out_dput;
47988 +
47989 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47990 + old_path.dentry->d_inode,
47991 + old_path.dentry->d_inode->i_mode, to)) {
47992 + error = -EACCES;
47993 + goto out_drop_write;
47994 + }
47995 +
47996 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47997 + old_path.dentry, old_path.mnt, to)) {
47998 + error = -EACCES;
47999 + goto out_drop_write;
48000 + }
48001 +
48002 error = security_path_link(old_path.dentry, &new_path, new_dentry);
48003 if (error)
48004 goto out_drop_write;
48005 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
48006 + if (!error)
48007 + gr_handle_create(new_dentry, new_path.mnt);
48008 out_drop_write:
48009 mnt_drop_write(new_path.mnt);
48010 out_dput:
48011 + putname(to);
48012 dput(new_dentry);
48013 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
48014 path_put(&new_path);
48015 @@ -3373,6 +3597,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
48016 if (new_dentry == trap)
48017 goto exit5;
48018
48019 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
48020 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
48021 + to);
48022 + if (error)
48023 + goto exit5;
48024 +
48025 error = mnt_want_write(oldnd.path.mnt);
48026 if (error)
48027 goto exit5;
48028 @@ -3382,6 +3612,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
48029 goto exit6;
48030 error = vfs_rename(old_dir->d_inode, old_dentry,
48031 new_dir->d_inode, new_dentry);
48032 + if (!error)
48033 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
48034 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
48035 exit6:
48036 mnt_drop_write(oldnd.path.mnt);
48037 exit5:
48038 @@ -3407,6 +3640,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
48039
48040 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
48041 {
48042 + char tmpbuf[64];
48043 + const char *newlink;
48044 int len;
48045
48046 len = PTR_ERR(link);
48047 @@ -3416,7 +3651,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
48048 len = strlen(link);
48049 if (len > (unsigned) buflen)
48050 len = buflen;
48051 - if (copy_to_user(buffer, link, len))
48052 +
48053 + if (len < sizeof(tmpbuf)) {
48054 + memcpy(tmpbuf, link, len);
48055 + newlink = tmpbuf;
48056 + } else
48057 + newlink = link;
48058 +
48059 + if (copy_to_user(buffer, newlink, len))
48060 len = -EFAULT;
48061 out:
48062 return len;
48063 diff --git a/fs/namespace.c b/fs/namespace.c
48064 index 1e4a5fe..a5ce747 100644
48065 --- a/fs/namespace.c
48066 +++ b/fs/namespace.c
48067 @@ -1157,6 +1157,9 @@ static int do_umount(struct mount *mnt, int flags)
48068 if (!(sb->s_flags & MS_RDONLY))
48069 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
48070 up_write(&sb->s_umount);
48071 +
48072 + gr_log_remount(mnt->mnt_devname, retval);
48073 +
48074 return retval;
48075 }
48076
48077 @@ -1176,6 +1179,9 @@ static int do_umount(struct mount *mnt, int flags)
48078 br_write_unlock(&vfsmount_lock);
48079 up_write(&namespace_sem);
48080 release_mounts(&umount_list);
48081 +
48082 + gr_log_unmount(mnt->mnt_devname, retval);
48083 +
48084 return retval;
48085 }
48086
48087 @@ -2177,6 +2183,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
48088 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
48089 MS_STRICTATIME);
48090
48091 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
48092 + retval = -EPERM;
48093 + goto dput_out;
48094 + }
48095 +
48096 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
48097 + retval = -EPERM;
48098 + goto dput_out;
48099 + }
48100 +
48101 if (flags & MS_REMOUNT)
48102 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
48103 data_page);
48104 @@ -2191,6 +2207,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
48105 dev_name, data_page);
48106 dput_out:
48107 path_put(&path);
48108 +
48109 + gr_log_mount(dev_name, dir_name, retval);
48110 +
48111 return retval;
48112 }
48113
48114 @@ -2472,6 +2491,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
48115 if (error)
48116 goto out2;
48117
48118 + if (gr_handle_chroot_pivot()) {
48119 + error = -EPERM;
48120 + goto out2;
48121 + }
48122 +
48123 get_fs_root(current->fs, &root);
48124 error = lock_mount(&old);
48125 if (error)
48126 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
48127 index f729698..2bac081 100644
48128 --- a/fs/nfs/inode.c
48129 +++ b/fs/nfs/inode.c
48130 @@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
48131 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
48132 nfsi->attrtimeo_timestamp = jiffies;
48133
48134 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
48135 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
48136 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
48137 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
48138 else
48139 @@ -1008,16 +1008,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
48140 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
48141 }
48142
48143 -static atomic_long_t nfs_attr_generation_counter;
48144 +static atomic_long_unchecked_t nfs_attr_generation_counter;
48145
48146 static unsigned long nfs_read_attr_generation_counter(void)
48147 {
48148 - return atomic_long_read(&nfs_attr_generation_counter);
48149 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
48150 }
48151
48152 unsigned long nfs_inc_attr_generation_counter(void)
48153 {
48154 - return atomic_long_inc_return(&nfs_attr_generation_counter);
48155 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
48156 }
48157
48158 void nfs_fattr_init(struct nfs_fattr *fattr)
48159 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
48160 index c8bd9c3..4f83416 100644
48161 --- a/fs/nfsd/vfs.c
48162 +++ b/fs/nfsd/vfs.c
48163 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
48164 } else {
48165 oldfs = get_fs();
48166 set_fs(KERNEL_DS);
48167 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
48168 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
48169 set_fs(oldfs);
48170 }
48171
48172 @@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
48173
48174 /* Write the data. */
48175 oldfs = get_fs(); set_fs(KERNEL_DS);
48176 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
48177 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
48178 set_fs(oldfs);
48179 if (host_err < 0)
48180 goto out_nfserr;
48181 @@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
48182 */
48183
48184 oldfs = get_fs(); set_fs(KERNEL_DS);
48185 - host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
48186 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
48187 set_fs(oldfs);
48188
48189 if (host_err < 0)
48190 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
48191 index 3568c8a..e0240d8 100644
48192 --- a/fs/notify/fanotify/fanotify_user.c
48193 +++ b/fs/notify/fanotify/fanotify_user.c
48194 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
48195 goto out_close_fd;
48196
48197 ret = -EFAULT;
48198 - if (copy_to_user(buf, &fanotify_event_metadata,
48199 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
48200 + copy_to_user(buf, &fanotify_event_metadata,
48201 fanotify_event_metadata.event_len))
48202 goto out_kill_access_response;
48203
48204 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
48205 index c887b13..0fdf472 100644
48206 --- a/fs/notify/notification.c
48207 +++ b/fs/notify/notification.c
48208 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
48209 * get set to 0 so it will never get 'freed'
48210 */
48211 static struct fsnotify_event *q_overflow_event;
48212 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48213 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48214
48215 /**
48216 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
48217 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48218 */
48219 u32 fsnotify_get_cookie(void)
48220 {
48221 - return atomic_inc_return(&fsnotify_sync_cookie);
48222 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
48223 }
48224 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
48225
48226 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
48227 index 99e3610..02c1068 100644
48228 --- a/fs/ntfs/dir.c
48229 +++ b/fs/ntfs/dir.c
48230 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
48231 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
48232 ~(s64)(ndir->itype.index.block_size - 1)));
48233 /* Bounds checks. */
48234 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48235 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48236 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
48237 "inode 0x%lx or driver bug.", vdir->i_ino);
48238 goto err_out;
48239 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
48240 index 7389d2d..dfd5dbe 100644
48241 --- a/fs/ntfs/file.c
48242 +++ b/fs/ntfs/file.c
48243 @@ -2231,6 +2231,6 @@ const struct inode_operations ntfs_file_inode_ops = {
48244 #endif /* NTFS_RW */
48245 };
48246
48247 -const struct file_operations ntfs_empty_file_ops = {};
48248 +const struct file_operations ntfs_empty_file_ops __read_only;
48249
48250 -const struct inode_operations ntfs_empty_inode_ops = {};
48251 +const struct inode_operations ntfs_empty_inode_ops __read_only;
48252 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
48253 index 210c352..a174f83 100644
48254 --- a/fs/ocfs2/localalloc.c
48255 +++ b/fs/ocfs2/localalloc.c
48256 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
48257 goto bail;
48258 }
48259
48260 - atomic_inc(&osb->alloc_stats.moves);
48261 + atomic_inc_unchecked(&osb->alloc_stats.moves);
48262
48263 bail:
48264 if (handle)
48265 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
48266 index d355e6e..578d905 100644
48267 --- a/fs/ocfs2/ocfs2.h
48268 +++ b/fs/ocfs2/ocfs2.h
48269 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
48270
48271 struct ocfs2_alloc_stats
48272 {
48273 - atomic_t moves;
48274 - atomic_t local_data;
48275 - atomic_t bitmap_data;
48276 - atomic_t bg_allocs;
48277 - atomic_t bg_extends;
48278 + atomic_unchecked_t moves;
48279 + atomic_unchecked_t local_data;
48280 + atomic_unchecked_t bitmap_data;
48281 + atomic_unchecked_t bg_allocs;
48282 + atomic_unchecked_t bg_extends;
48283 };
48284
48285 enum ocfs2_local_alloc_state
48286 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
48287 index f169da4..9112253 100644
48288 --- a/fs/ocfs2/suballoc.c
48289 +++ b/fs/ocfs2/suballoc.c
48290 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
48291 mlog_errno(status);
48292 goto bail;
48293 }
48294 - atomic_inc(&osb->alloc_stats.bg_extends);
48295 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
48296
48297 /* You should never ask for this much metadata */
48298 BUG_ON(bits_wanted >
48299 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
48300 mlog_errno(status);
48301 goto bail;
48302 }
48303 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48304 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48305
48306 *suballoc_loc = res.sr_bg_blkno;
48307 *suballoc_bit_start = res.sr_bit_offset;
48308 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
48309 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
48310 res->sr_bits);
48311
48312 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48313 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48314
48315 BUG_ON(res->sr_bits != 1);
48316
48317 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
48318 mlog_errno(status);
48319 goto bail;
48320 }
48321 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48322 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48323
48324 BUG_ON(res.sr_bits != 1);
48325
48326 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
48327 cluster_start,
48328 num_clusters);
48329 if (!status)
48330 - atomic_inc(&osb->alloc_stats.local_data);
48331 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
48332 } else {
48333 if (min_clusters > (osb->bitmap_cpg - 1)) {
48334 /* The only paths asking for contiguousness
48335 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
48336 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
48337 res.sr_bg_blkno,
48338 res.sr_bit_offset);
48339 - atomic_inc(&osb->alloc_stats.bitmap_data);
48340 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
48341 *num_clusters = res.sr_bits;
48342 }
48343 }
48344 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
48345 index 68f4541..89cfe6a 100644
48346 --- a/fs/ocfs2/super.c
48347 +++ b/fs/ocfs2/super.c
48348 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
48349 "%10s => GlobalAllocs: %d LocalAllocs: %d "
48350 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
48351 "Stats",
48352 - atomic_read(&osb->alloc_stats.bitmap_data),
48353 - atomic_read(&osb->alloc_stats.local_data),
48354 - atomic_read(&osb->alloc_stats.bg_allocs),
48355 - atomic_read(&osb->alloc_stats.moves),
48356 - atomic_read(&osb->alloc_stats.bg_extends));
48357 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
48358 + atomic_read_unchecked(&osb->alloc_stats.local_data),
48359 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
48360 + atomic_read_unchecked(&osb->alloc_stats.moves),
48361 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
48362
48363 out += snprintf(buf + out, len - out,
48364 "%10s => State: %u Descriptor: %llu Size: %u bits "
48365 @@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
48366 spin_lock_init(&osb->osb_xattr_lock);
48367 ocfs2_init_steal_slots(osb);
48368
48369 - atomic_set(&osb->alloc_stats.moves, 0);
48370 - atomic_set(&osb->alloc_stats.local_data, 0);
48371 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
48372 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
48373 - atomic_set(&osb->alloc_stats.bg_extends, 0);
48374 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
48375 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
48376 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
48377 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
48378 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
48379
48380 /* Copy the blockcheck stats from the superblock probe */
48381 osb->osb_ecc_stats = *stats;
48382 diff --git a/fs/open.c b/fs/open.c
48383 index 5d9c71b..adb5b19 100644
48384 --- a/fs/open.c
48385 +++ b/fs/open.c
48386 @@ -31,6 +31,8 @@
48387 #include <linux/ima.h>
48388 #include <linux/dnotify.h>
48389
48390 +#define CREATE_TRACE_POINTS
48391 +#include <trace/events/fs.h>
48392 #include "internal.h"
48393
48394 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
48395 @@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
48396 error = locks_verify_truncate(inode, NULL, length);
48397 if (!error)
48398 error = security_path_truncate(&path);
48399 +
48400 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
48401 + error = -EACCES;
48402 +
48403 if (!error)
48404 error = do_truncate(path.dentry, length, 0, NULL);
48405
48406 @@ -359,6 +365,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
48407 if (__mnt_is_readonly(path.mnt))
48408 res = -EROFS;
48409
48410 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
48411 + res = -EACCES;
48412 +
48413 out_path_release:
48414 path_put(&path);
48415 out:
48416 @@ -385,6 +394,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
48417 if (error)
48418 goto dput_and_out;
48419
48420 + gr_log_chdir(path.dentry, path.mnt);
48421 +
48422 set_fs_pwd(current->fs, &path);
48423
48424 dput_and_out:
48425 @@ -411,6 +422,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
48426 goto out_putf;
48427
48428 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
48429 +
48430 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
48431 + error = -EPERM;
48432 +
48433 + if (!error)
48434 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
48435 +
48436 if (!error)
48437 set_fs_pwd(current->fs, &file->f_path);
48438 out_putf:
48439 @@ -439,7 +457,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
48440 if (error)
48441 goto dput_and_out;
48442
48443 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
48444 + goto dput_and_out;
48445 +
48446 set_fs_root(current->fs, &path);
48447 +
48448 + gr_handle_chroot_chdir(&path);
48449 +
48450 error = 0;
48451 dput_and_out:
48452 path_put(&path);
48453 @@ -457,6 +481,16 @@ static int chmod_common(struct path *path, umode_t mode)
48454 if (error)
48455 return error;
48456 mutex_lock(&inode->i_mutex);
48457 +
48458 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
48459 + error = -EACCES;
48460 + goto out_unlock;
48461 + }
48462 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
48463 + error = -EACCES;
48464 + goto out_unlock;
48465 + }
48466 +
48467 error = security_path_chmod(path, mode);
48468 if (error)
48469 goto out_unlock;
48470 @@ -512,6 +546,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
48471 uid = make_kuid(current_user_ns(), user);
48472 gid = make_kgid(current_user_ns(), group);
48473
48474 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
48475 + return -EACCES;
48476 +
48477 newattrs.ia_valid = ATTR_CTIME;
48478 if (user != (uid_t) -1) {
48479 if (!uid_valid(uid))
48480 @@ -1036,6 +1073,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
48481 } else {
48482 fsnotify_open(f);
48483 fd_install(fd, f);
48484 + trace_do_sys_open(tmp, flags, mode);
48485 }
48486 }
48487 putname(tmp);
48488 diff --git a/fs/pipe.c b/fs/pipe.c
48489 index 49c1065..13b9e12 100644
48490 --- a/fs/pipe.c
48491 +++ b/fs/pipe.c
48492 @@ -438,9 +438,9 @@ redo:
48493 }
48494 if (bufs) /* More to do? */
48495 continue;
48496 - if (!pipe->writers)
48497 + if (!atomic_read(&pipe->writers))
48498 break;
48499 - if (!pipe->waiting_writers) {
48500 + if (!atomic_read(&pipe->waiting_writers)) {
48501 /* syscall merging: Usually we must not sleep
48502 * if O_NONBLOCK is set, or if we got some data.
48503 * But if a writer sleeps in kernel space, then
48504 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
48505 mutex_lock(&inode->i_mutex);
48506 pipe = inode->i_pipe;
48507
48508 - if (!pipe->readers) {
48509 + if (!atomic_read(&pipe->readers)) {
48510 send_sig(SIGPIPE, current, 0);
48511 ret = -EPIPE;
48512 goto out;
48513 @@ -553,7 +553,7 @@ redo1:
48514 for (;;) {
48515 int bufs;
48516
48517 - if (!pipe->readers) {
48518 + if (!atomic_read(&pipe->readers)) {
48519 send_sig(SIGPIPE, current, 0);
48520 if (!ret)
48521 ret = -EPIPE;
48522 @@ -644,9 +644,9 @@ redo2:
48523 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48524 do_wakeup = 0;
48525 }
48526 - pipe->waiting_writers++;
48527 + atomic_inc(&pipe->waiting_writers);
48528 pipe_wait(pipe);
48529 - pipe->waiting_writers--;
48530 + atomic_dec(&pipe->waiting_writers);
48531 }
48532 out:
48533 mutex_unlock(&inode->i_mutex);
48534 @@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48535 mask = 0;
48536 if (filp->f_mode & FMODE_READ) {
48537 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
48538 - if (!pipe->writers && filp->f_version != pipe->w_counter)
48539 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
48540 mask |= POLLHUP;
48541 }
48542
48543 @@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48544 * Most Unices do not set POLLERR for FIFOs but on Linux they
48545 * behave exactly like pipes for poll().
48546 */
48547 - if (!pipe->readers)
48548 + if (!atomic_read(&pipe->readers))
48549 mask |= POLLERR;
48550 }
48551
48552 @@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
48553
48554 mutex_lock(&inode->i_mutex);
48555 pipe = inode->i_pipe;
48556 - pipe->readers -= decr;
48557 - pipe->writers -= decw;
48558 + atomic_sub(decr, &pipe->readers);
48559 + atomic_sub(decw, &pipe->writers);
48560
48561 - if (!pipe->readers && !pipe->writers) {
48562 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
48563 free_pipe_info(inode);
48564 } else {
48565 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
48566 @@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
48567
48568 if (inode->i_pipe) {
48569 ret = 0;
48570 - inode->i_pipe->readers++;
48571 + atomic_inc(&inode->i_pipe->readers);
48572 }
48573
48574 mutex_unlock(&inode->i_mutex);
48575 @@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
48576
48577 if (inode->i_pipe) {
48578 ret = 0;
48579 - inode->i_pipe->writers++;
48580 + atomic_inc(&inode->i_pipe->writers);
48581 }
48582
48583 mutex_unlock(&inode->i_mutex);
48584 @@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
48585 if (inode->i_pipe) {
48586 ret = 0;
48587 if (filp->f_mode & FMODE_READ)
48588 - inode->i_pipe->readers++;
48589 + atomic_inc(&inode->i_pipe->readers);
48590 if (filp->f_mode & FMODE_WRITE)
48591 - inode->i_pipe->writers++;
48592 + atomic_inc(&inode->i_pipe->writers);
48593 }
48594
48595 mutex_unlock(&inode->i_mutex);
48596 @@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
48597 inode->i_pipe = NULL;
48598 }
48599
48600 -static struct vfsmount *pipe_mnt __read_mostly;
48601 +struct vfsmount *pipe_mnt __read_mostly;
48602
48603 /*
48604 * pipefs_dname() is called from d_path().
48605 @@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
48606 goto fail_iput;
48607 inode->i_pipe = pipe;
48608
48609 - pipe->readers = pipe->writers = 1;
48610 + atomic_set(&pipe->readers, 1);
48611 + atomic_set(&pipe->writers, 1);
48612 inode->i_fop = &rdwr_pipefifo_fops;
48613
48614 /*
48615 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
48616 index 15af622..0e9f4467 100644
48617 --- a/fs/proc/Kconfig
48618 +++ b/fs/proc/Kconfig
48619 @@ -30,12 +30,12 @@ config PROC_FS
48620
48621 config PROC_KCORE
48622 bool "/proc/kcore support" if !ARM
48623 - depends on PROC_FS && MMU
48624 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
48625
48626 config PROC_VMCORE
48627 bool "/proc/vmcore support"
48628 - depends on PROC_FS && CRASH_DUMP
48629 - default y
48630 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
48631 + default n
48632 help
48633 Exports the dump image of crashed kernel in ELF format.
48634
48635 @@ -59,8 +59,8 @@ config PROC_SYSCTL
48636 limited in memory.
48637
48638 config PROC_PAGE_MONITOR
48639 - default y
48640 - depends on PROC_FS && MMU
48641 + default n
48642 + depends on PROC_FS && MMU && !GRKERNSEC
48643 bool "Enable /proc page monitoring" if EXPERT
48644 help
48645 Various /proc files exist to monitor process memory utilization:
48646 diff --git a/fs/proc/array.c b/fs/proc/array.c
48647 index c1c207c..5179411 100644
48648 --- a/fs/proc/array.c
48649 +++ b/fs/proc/array.c
48650 @@ -60,6 +60,7 @@
48651 #include <linux/tty.h>
48652 #include <linux/string.h>
48653 #include <linux/mman.h>
48654 +#include <linux/grsecurity.h>
48655 #include <linux/proc_fs.h>
48656 #include <linux/ioport.h>
48657 #include <linux/uaccess.h>
48658 @@ -346,6 +347,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
48659 seq_putc(m, '\n');
48660 }
48661
48662 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48663 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
48664 +{
48665 + if (p->mm)
48666 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
48667 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
48668 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
48669 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
48670 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
48671 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
48672 + else
48673 + seq_printf(m, "PaX:\t-----\n");
48674 +}
48675 +#endif
48676 +
48677 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48678 struct pid *pid, struct task_struct *task)
48679 {
48680 @@ -363,9 +379,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48681 task_cpus_allowed(m, task);
48682 cpuset_task_status_allowed(m, task);
48683 task_context_switch_counts(m, task);
48684 +
48685 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48686 + task_pax(m, task);
48687 +#endif
48688 +
48689 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
48690 + task_grsec_rbac(m, task);
48691 +#endif
48692 +
48693 return 0;
48694 }
48695
48696 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48697 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48698 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48699 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48700 +#endif
48701 +
48702 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48703 struct pid *pid, struct task_struct *task, int whole)
48704 {
48705 @@ -387,6 +418,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48706 char tcomm[sizeof(task->comm)];
48707 unsigned long flags;
48708
48709 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48710 + if (current->exec_id != m->exec_id) {
48711 + gr_log_badprocpid("stat");
48712 + return 0;
48713 + }
48714 +#endif
48715 +
48716 state = *get_task_state(task);
48717 vsize = eip = esp = 0;
48718 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48719 @@ -458,6 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48720 gtime = task->gtime;
48721 }
48722
48723 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48724 + if (PAX_RAND_FLAGS(mm)) {
48725 + eip = 0;
48726 + esp = 0;
48727 + wchan = 0;
48728 + }
48729 +#endif
48730 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48731 + wchan = 0;
48732 + eip =0;
48733 + esp =0;
48734 +#endif
48735 +
48736 /* scale priority and nice values from timeslices to -20..20 */
48737 /* to make it look like a "normal" Unix priority/nice value */
48738 priority = task_prio(task);
48739 @@ -494,9 +545,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48740 seq_put_decimal_ull(m, ' ', vsize);
48741 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
48742 seq_put_decimal_ull(m, ' ', rsslim);
48743 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48744 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
48745 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
48746 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
48747 +#else
48748 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
48749 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
48750 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
48751 +#endif
48752 seq_put_decimal_ull(m, ' ', esp);
48753 seq_put_decimal_ull(m, ' ', eip);
48754 /* The signal information here is obsolete.
48755 @@ -518,7 +575,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48756 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
48757 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
48758
48759 - if (mm && permitted) {
48760 + if (mm && permitted
48761 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48762 + && !PAX_RAND_FLAGS(mm)
48763 +#endif
48764 + ) {
48765 seq_put_decimal_ull(m, ' ', mm->start_data);
48766 seq_put_decimal_ull(m, ' ', mm->end_data);
48767 seq_put_decimal_ull(m, ' ', mm->start_brk);
48768 @@ -556,8 +617,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48769 struct pid *pid, struct task_struct *task)
48770 {
48771 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
48772 - struct mm_struct *mm = get_task_mm(task);
48773 + struct mm_struct *mm;
48774
48775 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48776 + if (current->exec_id != m->exec_id) {
48777 + gr_log_badprocpid("statm");
48778 + return 0;
48779 + }
48780 +#endif
48781 + mm = get_task_mm(task);
48782 if (mm) {
48783 size = task_statm(mm, &shared, &text, &data, &resident);
48784 mmput(mm);
48785 @@ -580,6 +648,21 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48786 return 0;
48787 }
48788
48789 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48790 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48791 +{
48792 + u32 curr_ip = 0;
48793 + unsigned long flags;
48794 +
48795 + if (lock_task_sighand(task, &flags)) {
48796 + curr_ip = task->signal->curr_ip;
48797 + unlock_task_sighand(task, &flags);
48798 + }
48799 +
48800 + return sprintf(buffer, "%pI4\n", &curr_ip);
48801 +}
48802 +#endif
48803 +
48804 #ifdef CONFIG_CHECKPOINT_RESTORE
48805 static struct pid *
48806 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
48807 diff --git a/fs/proc/base.c b/fs/proc/base.c
48808 index 437195f..cd2210d 100644
48809 --- a/fs/proc/base.c
48810 +++ b/fs/proc/base.c
48811 @@ -110,6 +110,14 @@ struct pid_entry {
48812 union proc_op op;
48813 };
48814
48815 +struct getdents_callback {
48816 + struct linux_dirent __user * current_dir;
48817 + struct linux_dirent __user * previous;
48818 + struct file * file;
48819 + int count;
48820 + int error;
48821 +};
48822 +
48823 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48824 .name = (NAME), \
48825 .len = sizeof(NAME) - 1, \
48826 @@ -209,6 +217,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
48827 if (!mm->arg_end)
48828 goto out_mm; /* Shh! No looking before we're done */
48829
48830 + if (gr_acl_handle_procpidmem(task))
48831 + goto out_mm;
48832 +
48833 len = mm->arg_end - mm->arg_start;
48834
48835 if (len > PAGE_SIZE)
48836 @@ -236,12 +247,28 @@ out:
48837 return res;
48838 }
48839
48840 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48841 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48842 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48843 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48844 +#endif
48845 +
48846 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48847 {
48848 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
48849 int res = PTR_ERR(mm);
48850 if (mm && !IS_ERR(mm)) {
48851 unsigned int nwords = 0;
48852 +
48853 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48854 + /* allow if we're currently ptracing this task */
48855 + if (PAX_RAND_FLAGS(mm) &&
48856 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
48857 + mmput(mm);
48858 + return 0;
48859 + }
48860 +#endif
48861 +
48862 do {
48863 nwords += 2;
48864 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
48865 @@ -255,7 +282,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
48866 }
48867
48868
48869 -#ifdef CONFIG_KALLSYMS
48870 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48871 /*
48872 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48873 * Returns the resolved symbol. If that fails, simply return the address.
48874 @@ -294,7 +321,7 @@ static void unlock_trace(struct task_struct *task)
48875 mutex_unlock(&task->signal->cred_guard_mutex);
48876 }
48877
48878 -#ifdef CONFIG_STACKTRACE
48879 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48880
48881 #define MAX_STACK_TRACE_DEPTH 64
48882
48883 @@ -486,7 +513,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
48884 return count;
48885 }
48886
48887 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48888 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48889 static int proc_pid_syscall(struct task_struct *task, char *buffer)
48890 {
48891 long nr;
48892 @@ -515,7 +542,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
48893 /************************************************************************/
48894
48895 /* permission checks */
48896 -static int proc_fd_access_allowed(struct inode *inode)
48897 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
48898 {
48899 struct task_struct *task;
48900 int allowed = 0;
48901 @@ -525,7 +552,10 @@ static int proc_fd_access_allowed(struct inode *inode)
48902 */
48903 task = get_proc_task(inode);
48904 if (task) {
48905 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48906 + if (log)
48907 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48908 + else
48909 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48910 put_task_struct(task);
48911 }
48912 return allowed;
48913 @@ -563,10 +593,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
48914 struct task_struct *task,
48915 int hide_pid_min)
48916 {
48917 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48918 + return false;
48919 +
48920 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48921 + rcu_read_lock();
48922 + {
48923 + const struct cred *tmpcred = current_cred();
48924 + const struct cred *cred = __task_cred(task);
48925 +
48926 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48927 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48928 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48929 +#endif
48930 + ) {
48931 + rcu_read_unlock();
48932 + return true;
48933 + }
48934 + }
48935 + rcu_read_unlock();
48936 +
48937 + if (!pid->hide_pid)
48938 + return false;
48939 +#endif
48940 +
48941 if (pid->hide_pid < hide_pid_min)
48942 return true;
48943 if (in_group_p(pid->pid_gid))
48944 return true;
48945 +
48946 return ptrace_may_access(task, PTRACE_MODE_READ);
48947 }
48948
48949 @@ -584,7 +639,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
48950 put_task_struct(task);
48951
48952 if (!has_perms) {
48953 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48954 + {
48955 +#else
48956 if (pid->hide_pid == 2) {
48957 +#endif
48958 /*
48959 * Let's make getdents(), stat(), and open()
48960 * consistent with each other. If a process
48961 @@ -682,6 +741,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
48962 if (!task)
48963 return -ESRCH;
48964
48965 + if (gr_acl_handle_procpidmem(task)) {
48966 + put_task_struct(task);
48967 + return -EPERM;
48968 + }
48969 +
48970 mm = mm_access(task, mode);
48971 put_task_struct(task);
48972
48973 @@ -695,16 +759,24 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
48974 mmput(mm);
48975 }
48976
48977 - /* OK to pass negative loff_t, we can catch out-of-range */
48978 - file->f_mode |= FMODE_UNSIGNED_OFFSET;
48979 file->private_data = mm;
48980
48981 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48982 + file->f_version = current->exec_id;
48983 +#endif
48984 +
48985 return 0;
48986 }
48987
48988 static int mem_open(struct inode *inode, struct file *file)
48989 {
48990 - return __mem_open(inode, file, PTRACE_MODE_ATTACH);
48991 + int ret;
48992 + ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
48993 +
48994 + /* OK to pass negative loff_t, we can catch out-of-range */
48995 + file->f_mode |= FMODE_UNSIGNED_OFFSET;
48996 +
48997 + return ret;
48998 }
48999
49000 static ssize_t mem_rw(struct file *file, char __user *buf,
49001 @@ -715,6 +787,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
49002 ssize_t copied;
49003 char *page;
49004
49005 +#ifdef CONFIG_GRKERNSEC
49006 + if (write)
49007 + return -EPERM;
49008 +#endif
49009 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49010 + if (file->f_version != current->exec_id) {
49011 + gr_log_badprocpid("mem");
49012 + return 0;
49013 + }
49014 +#endif
49015 +
49016 if (!mm)
49017 return 0;
49018
49019 @@ -819,6 +902,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
49020 if (!mm)
49021 return 0;
49022
49023 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49024 + if (file->f_version != current->exec_id) {
49025 + gr_log_badprocpid("environ");
49026 + return 0;
49027 + }
49028 +#endif
49029 +
49030 page = (char *)__get_free_page(GFP_TEMPORARY);
49031 if (!page)
49032 return -ENOMEM;
49033 @@ -827,15 +917,17 @@ static ssize_t environ_read(struct file *file, char __user *buf,
49034 if (!atomic_inc_not_zero(&mm->mm_users))
49035 goto free;
49036 while (count > 0) {
49037 - int this_len, retval, max_len;
49038 + size_t this_len, max_len;
49039 + int retval;
49040 +
49041 + if (src >= (mm->env_end - mm->env_start))
49042 + break;
49043
49044 this_len = mm->env_end - (mm->env_start + src);
49045
49046 - if (this_len <= 0)
49047 - break;
49048
49049 - max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
49050 - this_len = (this_len > max_len) ? max_len : this_len;
49051 + max_len = min_t(size_t, PAGE_SIZE, count);
49052 + this_len = min(max_len, this_len);
49053
49054 retval = access_remote_vm(mm, (mm->env_start + src),
49055 page, this_len, 0);
49056 @@ -1433,7 +1525,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
49057 path_put(&nd->path);
49058
49059 /* Are we allowed to snoop on the tasks file descriptors? */
49060 - if (!proc_fd_access_allowed(inode))
49061 + if (!proc_fd_access_allowed(inode, 0))
49062 goto out;
49063
49064 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
49065 @@ -1472,8 +1564,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
49066 struct path path;
49067
49068 /* Are we allowed to snoop on the tasks file descriptors? */
49069 - if (!proc_fd_access_allowed(inode))
49070 - goto out;
49071 + /* logging this is needed for learning on chromium to work properly,
49072 + but we don't want to flood the logs from 'ps' which does a readlink
49073 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
49074 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
49075 + */
49076 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
49077 + if (!proc_fd_access_allowed(inode,0))
49078 + goto out;
49079 + } else {
49080 + if (!proc_fd_access_allowed(inode,1))
49081 + goto out;
49082 + }
49083
49084 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
49085 if (error)
49086 @@ -1538,7 +1640,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
49087 rcu_read_lock();
49088 cred = __task_cred(task);
49089 inode->i_uid = cred->euid;
49090 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49091 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49092 +#else
49093 inode->i_gid = cred->egid;
49094 +#endif
49095 rcu_read_unlock();
49096 }
49097 security_task_to_inode(task, inode);
49098 @@ -1574,10 +1680,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
49099 return -ENOENT;
49100 }
49101 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
49102 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49103 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
49104 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49105 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
49106 +#endif
49107 task_dumpable(task)) {
49108 cred = __task_cred(task);
49109 stat->uid = cred->euid;
49110 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49111 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
49112 +#else
49113 stat->gid = cred->egid;
49114 +#endif
49115 }
49116 }
49117 rcu_read_unlock();
49118 @@ -1615,11 +1730,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
49119
49120 if (task) {
49121 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
49122 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49123 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
49124 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49125 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
49126 +#endif
49127 task_dumpable(task)) {
49128 rcu_read_lock();
49129 cred = __task_cred(task);
49130 inode->i_uid = cred->euid;
49131 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49132 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49133 +#else
49134 inode->i_gid = cred->egid;
49135 +#endif
49136 rcu_read_unlock();
49137 } else {
49138 inode->i_uid = GLOBAL_ROOT_UID;
49139 @@ -1737,7 +1861,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
49140 int fd = proc_fd(inode);
49141
49142 if (task) {
49143 - files = get_files_struct(task);
49144 + if (!gr_acl_handle_procpidmem(task))
49145 + files = get_files_struct(task);
49146 put_task_struct(task);
49147 }
49148 if (files) {
49149 @@ -2336,11 +2461,21 @@ static const struct file_operations proc_map_files_operations = {
49150 */
49151 static int proc_fd_permission(struct inode *inode, int mask)
49152 {
49153 + struct task_struct *task;
49154 int rv = generic_permission(inode, mask);
49155 - if (rv == 0)
49156 - return 0;
49157 +
49158 if (task_pid(current) == proc_pid(inode))
49159 rv = 0;
49160 +
49161 + task = get_proc_task(inode);
49162 + if (task == NULL)
49163 + return rv;
49164 +
49165 + if (gr_acl_handle_procpidmem(task))
49166 + rv = -EACCES;
49167 +
49168 + put_task_struct(task);
49169 +
49170 return rv;
49171 }
49172
49173 @@ -2450,6 +2585,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
49174 if (!task)
49175 goto out_no_task;
49176
49177 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49178 + goto out;
49179 +
49180 /*
49181 * Yes, it does not scale. And it should not. Don't add
49182 * new entries into /proc/<tgid>/ without very good reasons.
49183 @@ -2494,6 +2632,9 @@ static int proc_pident_readdir(struct file *filp,
49184 if (!task)
49185 goto out_no_task;
49186
49187 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49188 + goto out;
49189 +
49190 ret = 0;
49191 i = filp->f_pos;
49192 switch (i) {
49193 @@ -2764,7 +2905,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
49194 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
49195 void *cookie)
49196 {
49197 - char *s = nd_get_link(nd);
49198 + const char *s = nd_get_link(nd);
49199 if (!IS_ERR(s))
49200 __putname(s);
49201 }
49202 @@ -3033,7 +3174,7 @@ static const struct pid_entry tgid_base_stuff[] = {
49203 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
49204 #endif
49205 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49206 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49207 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49208 INF("syscall", S_IRUGO, proc_pid_syscall),
49209 #endif
49210 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49211 @@ -3058,10 +3199,10 @@ static const struct pid_entry tgid_base_stuff[] = {
49212 #ifdef CONFIG_SECURITY
49213 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49214 #endif
49215 -#ifdef CONFIG_KALLSYMS
49216 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49217 INF("wchan", S_IRUGO, proc_pid_wchan),
49218 #endif
49219 -#ifdef CONFIG_STACKTRACE
49220 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49221 ONE("stack", S_IRUGO, proc_pid_stack),
49222 #endif
49223 #ifdef CONFIG_SCHEDSTATS
49224 @@ -3095,6 +3236,9 @@ static const struct pid_entry tgid_base_stuff[] = {
49225 #ifdef CONFIG_HARDWALL
49226 INF("hardwall", S_IRUGO, proc_pid_hardwall),
49227 #endif
49228 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49229 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
49230 +#endif
49231 #ifdef CONFIG_USER_NS
49232 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
49233 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
49234 @@ -3225,7 +3369,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
49235 if (!inode)
49236 goto out;
49237
49238 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49239 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
49240 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49241 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49242 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
49243 +#else
49244 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
49245 +#endif
49246 inode->i_op = &proc_tgid_base_inode_operations;
49247 inode->i_fop = &proc_tgid_base_operations;
49248 inode->i_flags|=S_IMMUTABLE;
49249 @@ -3267,7 +3418,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
49250 if (!task)
49251 goto out;
49252
49253 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49254 + goto out_put_task;
49255 +
49256 result = proc_pid_instantiate(dir, dentry, task, NULL);
49257 +out_put_task:
49258 put_task_struct(task);
49259 out:
49260 return result;
49261 @@ -3330,6 +3485,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
49262 static int fake_filldir(void *buf, const char *name, int namelen,
49263 loff_t offset, u64 ino, unsigned d_type)
49264 {
49265 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
49266 + __buf->error = -EINVAL;
49267 return 0;
49268 }
49269
49270 @@ -3396,7 +3553,7 @@ static const struct pid_entry tid_base_stuff[] = {
49271 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
49272 #endif
49273 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49274 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49275 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49276 INF("syscall", S_IRUGO, proc_pid_syscall),
49277 #endif
49278 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49279 @@ -3423,10 +3580,10 @@ static const struct pid_entry tid_base_stuff[] = {
49280 #ifdef CONFIG_SECURITY
49281 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49282 #endif
49283 -#ifdef CONFIG_KALLSYMS
49284 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49285 INF("wchan", S_IRUGO, proc_pid_wchan),
49286 #endif
49287 -#ifdef CONFIG_STACKTRACE
49288 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49289 ONE("stack", S_IRUGO, proc_pid_stack),
49290 #endif
49291 #ifdef CONFIG_SCHEDSTATS
49292 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
49293 index 82676e3..5f8518a 100644
49294 --- a/fs/proc/cmdline.c
49295 +++ b/fs/proc/cmdline.c
49296 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
49297
49298 static int __init proc_cmdline_init(void)
49299 {
49300 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
49301 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
49302 +#else
49303 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
49304 +#endif
49305 return 0;
49306 }
49307 module_init(proc_cmdline_init);
49308 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
49309 index b143471..bb105e5 100644
49310 --- a/fs/proc/devices.c
49311 +++ b/fs/proc/devices.c
49312 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
49313
49314 static int __init proc_devices_init(void)
49315 {
49316 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
49317 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
49318 +#else
49319 proc_create("devices", 0, NULL, &proc_devinfo_operations);
49320 +#endif
49321 return 0;
49322 }
49323 module_init(proc_devices_init);
49324 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
49325 index 7ac817b..abab1a5 100644
49326 --- a/fs/proc/inode.c
49327 +++ b/fs/proc/inode.c
49328 @@ -21,11 +21,17 @@
49329 #include <linux/seq_file.h>
49330 #include <linux/slab.h>
49331 #include <linux/mount.h>
49332 +#include <linux/grsecurity.h>
49333
49334 #include <asm/uaccess.h>
49335
49336 #include "internal.h"
49337
49338 +#ifdef CONFIG_PROC_SYSCTL
49339 +extern const struct inode_operations proc_sys_inode_operations;
49340 +extern const struct inode_operations proc_sys_dir_operations;
49341 +#endif
49342 +
49343 static void proc_evict_inode(struct inode *inode)
49344 {
49345 struct proc_dir_entry *de;
49346 @@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
49347 ns_ops = PROC_I(inode)->ns_ops;
49348 if (ns_ops && ns_ops->put)
49349 ns_ops->put(PROC_I(inode)->ns);
49350 +
49351 +#ifdef CONFIG_PROC_SYSCTL
49352 + if (inode->i_op == &proc_sys_inode_operations ||
49353 + inode->i_op == &proc_sys_dir_operations)
49354 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
49355 +#endif
49356 +
49357 }
49358
49359 static struct kmem_cache * proc_inode_cachep;
49360 @@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
49361 if (de->mode) {
49362 inode->i_mode = de->mode;
49363 inode->i_uid = de->uid;
49364 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49365 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49366 +#else
49367 inode->i_gid = de->gid;
49368 +#endif
49369 }
49370 if (de->size)
49371 inode->i_size = de->size;
49372 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
49373 index eca4aca..19166b2 100644
49374 --- a/fs/proc/internal.h
49375 +++ b/fs/proc/internal.h
49376 @@ -52,6 +52,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
49377 struct pid *pid, struct task_struct *task);
49378 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
49379 struct pid *pid, struct task_struct *task);
49380 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49381 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
49382 +#endif
49383 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
49384
49385 extern const struct file_operations proc_tid_children_operations;
49386 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
49387 index 86c67ee..cdca321 100644
49388 --- a/fs/proc/kcore.c
49389 +++ b/fs/proc/kcore.c
49390 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49391 * the addresses in the elf_phdr on our list.
49392 */
49393 start = kc_offset_to_vaddr(*fpos - elf_buflen);
49394 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
49395 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
49396 + if (tsz > buflen)
49397 tsz = buflen;
49398 -
49399 +
49400 while (buflen) {
49401 struct kcore_list *m;
49402
49403 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49404 kfree(elf_buf);
49405 } else {
49406 if (kern_addr_valid(start)) {
49407 - unsigned long n;
49408 + char *elf_buf;
49409 + mm_segment_t oldfs;
49410
49411 - n = copy_to_user(buffer, (char *)start, tsz);
49412 - /*
49413 - * We cannot distinguish between fault on source
49414 - * and fault on destination. When this happens
49415 - * we clear too and hope it will trigger the
49416 - * EFAULT again.
49417 - */
49418 - if (n) {
49419 - if (clear_user(buffer + tsz - n,
49420 - n))
49421 + elf_buf = kmalloc(tsz, GFP_KERNEL);
49422 + if (!elf_buf)
49423 + return -ENOMEM;
49424 + oldfs = get_fs();
49425 + set_fs(KERNEL_DS);
49426 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
49427 + set_fs(oldfs);
49428 + if (copy_to_user(buffer, elf_buf, tsz)) {
49429 + kfree(elf_buf);
49430 return -EFAULT;
49431 + }
49432 }
49433 + set_fs(oldfs);
49434 + kfree(elf_buf);
49435 } else {
49436 if (clear_user(buffer, tsz))
49437 return -EFAULT;
49438 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49439
49440 static int open_kcore(struct inode *inode, struct file *filp)
49441 {
49442 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
49443 + return -EPERM;
49444 +#endif
49445 if (!capable(CAP_SYS_RAWIO))
49446 return -EPERM;
49447 if (kcore_need_update)
49448 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
49449 index 80e4645..53e5fcf 100644
49450 --- a/fs/proc/meminfo.c
49451 +++ b/fs/proc/meminfo.c
49452 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
49453 vmi.used >> 10,
49454 vmi.largest_chunk >> 10
49455 #ifdef CONFIG_MEMORY_FAILURE
49456 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
49457 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
49458 #endif
49459 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
49460 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
49461 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
49462 index b1822dd..df622cb 100644
49463 --- a/fs/proc/nommu.c
49464 +++ b/fs/proc/nommu.c
49465 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
49466 if (len < 1)
49467 len = 1;
49468 seq_printf(m, "%*c", len, ' ');
49469 - seq_path(m, &file->f_path, "");
49470 + seq_path(m, &file->f_path, "\n\\");
49471 }
49472
49473 seq_putc(m, '\n');
49474 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
49475 index 06e1cc1..177cd98 100644
49476 --- a/fs/proc/proc_net.c
49477 +++ b/fs/proc/proc_net.c
49478 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
49479 struct task_struct *task;
49480 struct nsproxy *ns;
49481 struct net *net = NULL;
49482 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49483 + const struct cred *cred = current_cred();
49484 +#endif
49485 +
49486 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49487 + if (cred->fsuid)
49488 + return net;
49489 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49490 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
49491 + return net;
49492 +#endif
49493
49494 rcu_read_lock();
49495 task = pid_task(proc_pid(dir), PIDTYPE_PID);
49496 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
49497 index 3476bca..75e1775 100644
49498 --- a/fs/proc/proc_sysctl.c
49499 +++ b/fs/proc/proc_sysctl.c
49500 @@ -12,11 +12,15 @@
49501 #include <linux/module.h>
49502 #include "internal.h"
49503
49504 +extern int gr_handle_chroot_sysctl(const int op);
49505 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
49506 + const int op);
49507 +
49508 static const struct dentry_operations proc_sys_dentry_operations;
49509 static const struct file_operations proc_sys_file_operations;
49510 -static const struct inode_operations proc_sys_inode_operations;
49511 +const struct inode_operations proc_sys_inode_operations;
49512 static const struct file_operations proc_sys_dir_file_operations;
49513 -static const struct inode_operations proc_sys_dir_operations;
49514 +const struct inode_operations proc_sys_dir_operations;
49515
49516 void proc_sys_poll_notify(struct ctl_table_poll *poll)
49517 {
49518 @@ -462,17 +466,22 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
49519
49520 err = ERR_PTR(-ENOMEM);
49521 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
49522 - if (h)
49523 - sysctl_head_finish(h);
49524 -
49525 if (!inode)
49526 goto out;
49527
49528 err = NULL;
49529 d_set_d_op(dentry, &proc_sys_dentry_operations);
49530 +
49531 + gr_handle_proc_create(dentry, inode);
49532 +
49533 d_add(dentry, inode);
49534
49535 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
49536 + err = ERR_PTR(-ENOENT);
49537 +
49538 out:
49539 + if (h)
49540 + sysctl_head_finish(h);
49541 sysctl_head_finish(head);
49542 return err;
49543 }
49544 @@ -483,18 +492,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
49545 struct inode *inode = filp->f_path.dentry->d_inode;
49546 struct ctl_table_header *head = grab_header(inode);
49547 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
49548 + int op = write ? MAY_WRITE : MAY_READ;
49549 ssize_t error;
49550 size_t res;
49551
49552 if (IS_ERR(head))
49553 return PTR_ERR(head);
49554
49555 +
49556 /*
49557 * At this point we know that the sysctl was not unregistered
49558 * and won't be until we finish.
49559 */
49560 error = -EPERM;
49561 - if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
49562 + if (sysctl_perm(head->root, table, op))
49563 goto out;
49564
49565 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
49566 @@ -502,6 +513,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
49567 if (!table->proc_handler)
49568 goto out;
49569
49570 +#ifdef CONFIG_GRKERNSEC
49571 + error = -EPERM;
49572 + if (gr_handle_chroot_sysctl(op))
49573 + goto out;
49574 + dget(filp->f_path.dentry);
49575 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
49576 + dput(filp->f_path.dentry);
49577 + goto out;
49578 + }
49579 + dput(filp->f_path.dentry);
49580 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
49581 + goto out;
49582 + if (write && !capable(CAP_SYS_ADMIN))
49583 + goto out;
49584 +#endif
49585 +
49586 /* careful: calling conventions are nasty here */
49587 res = count;
49588 error = table->proc_handler(table, write, buf, &res, ppos);
49589 @@ -599,6 +626,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
49590 return -ENOMEM;
49591 } else {
49592 d_set_d_op(child, &proc_sys_dentry_operations);
49593 +
49594 + gr_handle_proc_create(child, inode);
49595 +
49596 d_add(child, inode);
49597 }
49598 } else {
49599 @@ -642,6 +672,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
49600 if ((*pos)++ < file->f_pos)
49601 return 0;
49602
49603 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
49604 + return 0;
49605 +
49606 if (unlikely(S_ISLNK(table->mode)))
49607 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
49608 else
49609 @@ -759,6 +792,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
49610 if (IS_ERR(head))
49611 return PTR_ERR(head);
49612
49613 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
49614 + return -ENOENT;
49615 +
49616 generic_fillattr(inode, stat);
49617 if (table)
49618 stat->mode = (stat->mode & S_IFMT) | table->mode;
49619 @@ -781,13 +817,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
49620 .llseek = generic_file_llseek,
49621 };
49622
49623 -static const struct inode_operations proc_sys_inode_operations = {
49624 +const struct inode_operations proc_sys_inode_operations = {
49625 .permission = proc_sys_permission,
49626 .setattr = proc_sys_setattr,
49627 .getattr = proc_sys_getattr,
49628 };
49629
49630 -static const struct inode_operations proc_sys_dir_operations = {
49631 +const struct inode_operations proc_sys_dir_operations = {
49632 .lookup = proc_sys_lookup,
49633 .permission = proc_sys_permission,
49634 .setattr = proc_sys_setattr,
49635 diff --git a/fs/proc/root.c b/fs/proc/root.c
49636 index 7c30fce..b3d3aa2 100644
49637 --- a/fs/proc/root.c
49638 +++ b/fs/proc/root.c
49639 @@ -188,7 +188,15 @@ void __init proc_root_init(void)
49640 #ifdef CONFIG_PROC_DEVICETREE
49641 proc_device_tree_init();
49642 #endif
49643 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
49644 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49645 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
49646 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49647 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49648 +#endif
49649 +#else
49650 proc_mkdir("bus", NULL);
49651 +#endif
49652 proc_sys_init();
49653 }
49654
49655 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
49656 index 4540b8f..1b9772f 100644
49657 --- a/fs/proc/task_mmu.c
49658 +++ b/fs/proc/task_mmu.c
49659 @@ -11,12 +11,19 @@
49660 #include <linux/rmap.h>
49661 #include <linux/swap.h>
49662 #include <linux/swapops.h>
49663 +#include <linux/grsecurity.h>
49664
49665 #include <asm/elf.h>
49666 #include <asm/uaccess.h>
49667 #include <asm/tlbflush.h>
49668 #include "internal.h"
49669
49670 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49671 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
49672 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
49673 + _mm->pax_flags & MF_PAX_SEGMEXEC))
49674 +#endif
49675 +
49676 void task_mem(struct seq_file *m, struct mm_struct *mm)
49677 {
49678 unsigned long data, text, lib, swap;
49679 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49680 "VmExe:\t%8lu kB\n"
49681 "VmLib:\t%8lu kB\n"
49682 "VmPTE:\t%8lu kB\n"
49683 - "VmSwap:\t%8lu kB\n",
49684 - hiwater_vm << (PAGE_SHIFT-10),
49685 + "VmSwap:\t%8lu kB\n"
49686 +
49687 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49688 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
49689 +#endif
49690 +
49691 + ,hiwater_vm << (PAGE_SHIFT-10),
49692 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49693 mm->locked_vm << (PAGE_SHIFT-10),
49694 mm->pinned_vm << (PAGE_SHIFT-10),
49695 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49696 data << (PAGE_SHIFT-10),
49697 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
49698 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
49699 - swap << (PAGE_SHIFT-10));
49700 + swap << (PAGE_SHIFT-10)
49701 +
49702 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49703 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49704 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
49705 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
49706 +#else
49707 + , mm->context.user_cs_base
49708 + , mm->context.user_cs_limit
49709 +#endif
49710 +#endif
49711 +
49712 + );
49713 }
49714
49715 unsigned long task_vsize(struct mm_struct *mm)
49716 @@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49717 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
49718 }
49719
49720 - /* We don't show the stack guard page in /proc/maps */
49721 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49722 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
49723 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
49724 +#else
49725 start = vma->vm_start;
49726 - if (stack_guard_page_start(vma, start))
49727 - start += PAGE_SIZE;
49728 end = vma->vm_end;
49729 - if (stack_guard_page_end(vma, end))
49730 - end -= PAGE_SIZE;
49731 +#endif
49732
49733 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
49734 start,
49735 @@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49736 flags & VM_WRITE ? 'w' : '-',
49737 flags & VM_EXEC ? 'x' : '-',
49738 flags & VM_MAYSHARE ? 's' : 'p',
49739 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49740 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
49741 +#else
49742 pgoff,
49743 +#endif
49744 MAJOR(dev), MINOR(dev), ino, &len);
49745
49746 /*
49747 @@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49748 */
49749 if (file) {
49750 pad_len_spaces(m, len);
49751 - seq_path(m, &file->f_path, "\n");
49752 + seq_path(m, &file->f_path, "\n\\");
49753 goto done;
49754 }
49755
49756 @@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49757 * Thread stack in /proc/PID/task/TID/maps or
49758 * the main process stack.
49759 */
49760 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
49761 - vma->vm_end >= mm->start_stack)) {
49762 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
49763 + (vma->vm_start <= mm->start_stack &&
49764 + vma->vm_end >= mm->start_stack)) {
49765 name = "[stack]";
49766 } else {
49767 /* Thread stack in /proc/PID/maps */
49768 @@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
49769 struct proc_maps_private *priv = m->private;
49770 struct task_struct *task = priv->task;
49771
49772 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49773 + if (current->exec_id != m->exec_id) {
49774 + gr_log_badprocpid("maps");
49775 + return 0;
49776 + }
49777 +#endif
49778 +
49779 show_map_vma(m, vma, is_pid);
49780
49781 if (m->count < m->size) /* vma is copied successfully */
49782 @@ -492,12 +528,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
49783 .private = &mss,
49784 };
49785
49786 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49787 + if (current->exec_id != m->exec_id) {
49788 + gr_log_badprocpid("smaps");
49789 + return 0;
49790 + }
49791 +#endif
49792 memset(&mss, 0, sizeof mss);
49793 - mss.vma = vma;
49794 - /* mmap_sem is held in m_start */
49795 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49796 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49797 -
49798 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49799 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
49800 +#endif
49801 + mss.vma = vma;
49802 + /* mmap_sem is held in m_start */
49803 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49804 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49805 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49806 + }
49807 +#endif
49808 show_map_vma(m, vma, is_pid);
49809
49810 seq_printf(m,
49811 @@ -515,7 +562,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
49812 "KernelPageSize: %8lu kB\n"
49813 "MMUPageSize: %8lu kB\n"
49814 "Locked: %8lu kB\n",
49815 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49816 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
49817 +#else
49818 (vma->vm_end - vma->vm_start) >> 10,
49819 +#endif
49820 mss.resident >> 10,
49821 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
49822 mss.shared_clean >> 10,
49823 @@ -1164,6 +1215,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
49824 int n;
49825 char buffer[50];
49826
49827 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49828 + if (current->exec_id != m->exec_id) {
49829 + gr_log_badprocpid("numa_maps");
49830 + return 0;
49831 + }
49832 +#endif
49833 +
49834 if (!mm)
49835 return 0;
49836
49837 @@ -1181,11 +1239,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
49838 mpol_to_str(buffer, sizeof(buffer), pol, 0);
49839 mpol_cond_put(pol);
49840
49841 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49842 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
49843 +#else
49844 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
49845 +#endif
49846
49847 if (file) {
49848 seq_printf(m, " file=");
49849 - seq_path(m, &file->f_path, "\n\t= ");
49850 + seq_path(m, &file->f_path, "\n\t\\= ");
49851 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
49852 seq_printf(m, " heap");
49853 } else {
49854 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
49855 index 1ccfa53..0848f95 100644
49856 --- a/fs/proc/task_nommu.c
49857 +++ b/fs/proc/task_nommu.c
49858 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49859 else
49860 bytes += kobjsize(mm);
49861
49862 - if (current->fs && current->fs->users > 1)
49863 + if (current->fs && atomic_read(&current->fs->users) > 1)
49864 sbytes += kobjsize(current->fs);
49865 else
49866 bytes += kobjsize(current->fs);
49867 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
49868
49869 if (file) {
49870 pad_len_spaces(m, len);
49871 - seq_path(m, &file->f_path, "");
49872 + seq_path(m, &file->f_path, "\n\\");
49873 } else if (mm) {
49874 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
49875
49876 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
49877 index d67908b..d13f6a6 100644
49878 --- a/fs/quota/netlink.c
49879 +++ b/fs/quota/netlink.c
49880 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
49881 void quota_send_warning(short type, unsigned int id, dev_t dev,
49882 const char warntype)
49883 {
49884 - static atomic_t seq;
49885 + static atomic_unchecked_t seq;
49886 struct sk_buff *skb;
49887 void *msg_head;
49888 int ret;
49889 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
49890 "VFS: Not enough memory to send quota warning.\n");
49891 return;
49892 }
49893 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49894 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49895 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
49896 if (!msg_head) {
49897 printk(KERN_ERR
49898 diff --git a/fs/readdir.c b/fs/readdir.c
49899 index 39e3370..20d446d 100644
49900 --- a/fs/readdir.c
49901 +++ b/fs/readdir.c
49902 @@ -17,6 +17,7 @@
49903 #include <linux/security.h>
49904 #include <linux/syscalls.h>
49905 #include <linux/unistd.h>
49906 +#include <linux/namei.h>
49907
49908 #include <asm/uaccess.h>
49909
49910 @@ -67,6 +68,7 @@ struct old_linux_dirent {
49911
49912 struct readdir_callback {
49913 struct old_linux_dirent __user * dirent;
49914 + struct file * file;
49915 int result;
49916 };
49917
49918 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
49919 buf->result = -EOVERFLOW;
49920 return -EOVERFLOW;
49921 }
49922 +
49923 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49924 + return 0;
49925 +
49926 buf->result++;
49927 dirent = buf->dirent;
49928 if (!access_ok(VERIFY_WRITE, dirent,
49929 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
49930
49931 buf.result = 0;
49932 buf.dirent = dirent;
49933 + buf.file = file;
49934
49935 error = vfs_readdir(file, fillonedir, &buf);
49936 if (buf.result)
49937 @@ -141,6 +148,7 @@ struct linux_dirent {
49938 struct getdents_callback {
49939 struct linux_dirent __user * current_dir;
49940 struct linux_dirent __user * previous;
49941 + struct file * file;
49942 int count;
49943 int error;
49944 };
49945 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
49946 buf->error = -EOVERFLOW;
49947 return -EOVERFLOW;
49948 }
49949 +
49950 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49951 + return 0;
49952 +
49953 dirent = buf->previous;
49954 if (dirent) {
49955 if (__put_user(offset, &dirent->d_off))
49956 @@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49957 buf.previous = NULL;
49958 buf.count = count;
49959 buf.error = 0;
49960 + buf.file = file;
49961
49962 error = vfs_readdir(file, filldir, &buf);
49963 if (error >= 0)
49964 @@ -226,6 +239,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49965 struct getdents_callback64 {
49966 struct linux_dirent64 __user * current_dir;
49967 struct linux_dirent64 __user * previous;
49968 + struct file *file;
49969 int count;
49970 int error;
49971 };
49972 @@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
49973 buf->error = -EINVAL; /* only used if we fail.. */
49974 if (reclen > buf->count)
49975 return -EINVAL;
49976 +
49977 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49978 + return 0;
49979 +
49980 dirent = buf->previous;
49981 if (dirent) {
49982 if (__put_user(offset, &dirent->d_off))
49983 @@ -287,6 +305,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49984
49985 buf.current_dir = dirent;
49986 buf.previous = NULL;
49987 + buf.file = file;
49988 buf.count = count;
49989 buf.error = 0;
49990
49991 @@ -295,7 +314,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49992 error = buf.error;
49993 lastdirent = buf.previous;
49994 if (lastdirent) {
49995 - typeof(lastdirent->d_off) d_off = file->f_pos;
49996 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49997 if (__put_user(d_off, &lastdirent->d_off))
49998 error = -EFAULT;
49999 else
50000 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
50001 index 2b7882b..1c5ef48 100644
50002 --- a/fs/reiserfs/do_balan.c
50003 +++ b/fs/reiserfs/do_balan.c
50004 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
50005 return;
50006 }
50007
50008 - atomic_inc(&(fs_generation(tb->tb_sb)));
50009 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
50010 do_balance_starts(tb);
50011
50012 /* balance leaf returns 0 except if combining L R and S into
50013 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
50014 index 2c1ade6..8c59d8d 100644
50015 --- a/fs/reiserfs/procfs.c
50016 +++ b/fs/reiserfs/procfs.c
50017 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
50018 "SMALL_TAILS " : "NO_TAILS ",
50019 replay_only(sb) ? "REPLAY_ONLY " : "",
50020 convert_reiserfs(sb) ? "CONV " : "",
50021 - atomic_read(&r->s_generation_counter),
50022 + atomic_read_unchecked(&r->s_generation_counter),
50023 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
50024 SF(s_do_balance), SF(s_unneeded_left_neighbor),
50025 SF(s_good_search_by_key_reada), SF(s_bmaps),
50026 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
50027 index 33215f5..c5d427a 100644
50028 --- a/fs/reiserfs/reiserfs.h
50029 +++ b/fs/reiserfs/reiserfs.h
50030 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
50031 /* Comment? -Hans */
50032 wait_queue_head_t s_wait;
50033 /* To be obsoleted soon by per buffer seals.. -Hans */
50034 - atomic_t s_generation_counter; // increased by one every time the
50035 + atomic_unchecked_t s_generation_counter; // increased by one every time the
50036 // tree gets re-balanced
50037 unsigned long s_properties; /* File system properties. Currently holds
50038 on-disk FS format */
50039 @@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
50040 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
50041
50042 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
50043 -#define get_generation(s) atomic_read (&fs_generation(s))
50044 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
50045 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
50046 #define __fs_changed(gen,s) (gen != get_generation (s))
50047 #define fs_changed(gen,s) \
50048 diff --git a/fs/select.c b/fs/select.c
50049 index db14c78..3aae1bd 100644
50050 --- a/fs/select.c
50051 +++ b/fs/select.c
50052 @@ -20,6 +20,7 @@
50053 #include <linux/export.h>
50054 #include <linux/slab.h>
50055 #include <linux/poll.h>
50056 +#include <linux/security.h>
50057 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
50058 #include <linux/file.h>
50059 #include <linux/fdtable.h>
50060 @@ -831,6 +832,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
50061 struct poll_list *walk = head;
50062 unsigned long todo = nfds;
50063
50064 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
50065 if (nfds > rlimit(RLIMIT_NOFILE))
50066 return -EINVAL;
50067
50068 diff --git a/fs/seq_file.c b/fs/seq_file.c
50069 index 0cbd049..64e705c 100644
50070 --- a/fs/seq_file.c
50071 +++ b/fs/seq_file.c
50072 @@ -9,6 +9,7 @@
50073 #include <linux/export.h>
50074 #include <linux/seq_file.h>
50075 #include <linux/slab.h>
50076 +#include <linux/sched.h>
50077
50078 #include <asm/uaccess.h>
50079 #include <asm/page.h>
50080 @@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
50081 memset(p, 0, sizeof(*p));
50082 mutex_init(&p->lock);
50083 p->op = op;
50084 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50085 + p->exec_id = current->exec_id;
50086 +#endif
50087
50088 /*
50089 * Wrappers around seq_open(e.g. swaps_open) need to be
50090 @@ -92,7 +96,7 @@ static int traverse(struct seq_file *m, loff_t offset)
50091 return 0;
50092 }
50093 if (!m->buf) {
50094 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
50095 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
50096 if (!m->buf)
50097 return -ENOMEM;
50098 }
50099 @@ -132,7 +136,7 @@ static int traverse(struct seq_file *m, loff_t offset)
50100 Eoverflow:
50101 m->op->stop(m, p);
50102 kfree(m->buf);
50103 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
50104 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
50105 return !m->buf ? -ENOMEM : -EAGAIN;
50106 }
50107
50108 @@ -187,7 +191,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
50109
50110 /* grab buffer if we didn't have one */
50111 if (!m->buf) {
50112 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
50113 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
50114 if (!m->buf)
50115 goto Enomem;
50116 }
50117 @@ -228,7 +232,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
50118 goto Fill;
50119 m->op->stop(m, p);
50120 kfree(m->buf);
50121 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
50122 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
50123 if (!m->buf)
50124 goto Enomem;
50125 m->count = 0;
50126 @@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
50127 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
50128 void *data)
50129 {
50130 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
50131 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
50132 int res = -ENOMEM;
50133
50134 if (op) {
50135 diff --git a/fs/splice.c b/fs/splice.c
50136 index 7bf08fa..eb35c2f 100644
50137 --- a/fs/splice.c
50138 +++ b/fs/splice.c
50139 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
50140 pipe_lock(pipe);
50141
50142 for (;;) {
50143 - if (!pipe->readers) {
50144 + if (!atomic_read(&pipe->readers)) {
50145 send_sig(SIGPIPE, current, 0);
50146 if (!ret)
50147 ret = -EPIPE;
50148 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
50149 do_wakeup = 0;
50150 }
50151
50152 - pipe->waiting_writers++;
50153 + atomic_inc(&pipe->waiting_writers);
50154 pipe_wait(pipe);
50155 - pipe->waiting_writers--;
50156 + atomic_dec(&pipe->waiting_writers);
50157 }
50158
50159 pipe_unlock(pipe);
50160 @@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
50161 old_fs = get_fs();
50162 set_fs(get_ds());
50163 /* The cast to a user pointer is valid due to the set_fs() */
50164 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
50165 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
50166 set_fs(old_fs);
50167
50168 return res;
50169 @@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
50170 old_fs = get_fs();
50171 set_fs(get_ds());
50172 /* The cast to a user pointer is valid due to the set_fs() */
50173 - res = vfs_write(file, (const char __user *)buf, count, &pos);
50174 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
50175 set_fs(old_fs);
50176
50177 return res;
50178 @@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
50179 goto err;
50180
50181 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
50182 - vec[i].iov_base = (void __user *) page_address(page);
50183 + vec[i].iov_base = (void __force_user *) page_address(page);
50184 vec[i].iov_len = this_len;
50185 spd.pages[i] = page;
50186 spd.nr_pages++;
50187 @@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
50188 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
50189 {
50190 while (!pipe->nrbufs) {
50191 - if (!pipe->writers)
50192 + if (!atomic_read(&pipe->writers))
50193 return 0;
50194
50195 - if (!pipe->waiting_writers && sd->num_spliced)
50196 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
50197 return 0;
50198
50199 if (sd->flags & SPLICE_F_NONBLOCK)
50200 @@ -1187,7 +1187,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
50201 * out of the pipe right after the splice_to_pipe(). So set
50202 * PIPE_READERS appropriately.
50203 */
50204 - pipe->readers = 1;
50205 + atomic_set(&pipe->readers, 1);
50206
50207 current->splice_pipe = pipe;
50208 }
50209 @@ -1740,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
50210 ret = -ERESTARTSYS;
50211 break;
50212 }
50213 - if (!pipe->writers)
50214 + if (!atomic_read(&pipe->writers))
50215 break;
50216 - if (!pipe->waiting_writers) {
50217 + if (!atomic_read(&pipe->waiting_writers)) {
50218 if (flags & SPLICE_F_NONBLOCK) {
50219 ret = -EAGAIN;
50220 break;
50221 @@ -1774,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
50222 pipe_lock(pipe);
50223
50224 while (pipe->nrbufs >= pipe->buffers) {
50225 - if (!pipe->readers) {
50226 + if (!atomic_read(&pipe->readers)) {
50227 send_sig(SIGPIPE, current, 0);
50228 ret = -EPIPE;
50229 break;
50230 @@ -1787,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
50231 ret = -ERESTARTSYS;
50232 break;
50233 }
50234 - pipe->waiting_writers++;
50235 + atomic_inc(&pipe->waiting_writers);
50236 pipe_wait(pipe);
50237 - pipe->waiting_writers--;
50238 + atomic_dec(&pipe->waiting_writers);
50239 }
50240
50241 pipe_unlock(pipe);
50242 @@ -1825,14 +1825,14 @@ retry:
50243 pipe_double_lock(ipipe, opipe);
50244
50245 do {
50246 - if (!opipe->readers) {
50247 + if (!atomic_read(&opipe->readers)) {
50248 send_sig(SIGPIPE, current, 0);
50249 if (!ret)
50250 ret = -EPIPE;
50251 break;
50252 }
50253
50254 - if (!ipipe->nrbufs && !ipipe->writers)
50255 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
50256 break;
50257
50258 /*
50259 @@ -1929,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
50260 pipe_double_lock(ipipe, opipe);
50261
50262 do {
50263 - if (!opipe->readers) {
50264 + if (!atomic_read(&opipe->readers)) {
50265 send_sig(SIGPIPE, current, 0);
50266 if (!ret)
50267 ret = -EPIPE;
50268 @@ -1974,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
50269 * return EAGAIN if we have the potential of some data in the
50270 * future, otherwise just return 0
50271 */
50272 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
50273 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
50274 ret = -EAGAIN;
50275
50276 pipe_unlock(ipipe);
50277 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
50278 index e6bb9b2..d8e3951 100644
50279 --- a/fs/sysfs/dir.c
50280 +++ b/fs/sysfs/dir.c
50281 @@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
50282 struct sysfs_dirent *sd;
50283 int rc;
50284
50285 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50286 + const char *parent_name = parent_sd->s_name;
50287 +
50288 + mode = S_IFDIR | S_IRWXU;
50289 +
50290 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
50291 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
50292 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
50293 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
50294 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
50295 +#endif
50296 +
50297 /* allocate */
50298 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
50299 if (!sd)
50300 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
50301 index 00012e3..8392349 100644
50302 --- a/fs/sysfs/file.c
50303 +++ b/fs/sysfs/file.c
50304 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
50305
50306 struct sysfs_open_dirent {
50307 atomic_t refcnt;
50308 - atomic_t event;
50309 + atomic_unchecked_t event;
50310 wait_queue_head_t poll;
50311 struct list_head buffers; /* goes through sysfs_buffer.list */
50312 };
50313 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
50314 if (!sysfs_get_active(attr_sd))
50315 return -ENODEV;
50316
50317 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
50318 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
50319 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
50320
50321 sysfs_put_active(attr_sd);
50322 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
50323 return -ENOMEM;
50324
50325 atomic_set(&new_od->refcnt, 0);
50326 - atomic_set(&new_od->event, 1);
50327 + atomic_set_unchecked(&new_od->event, 1);
50328 init_waitqueue_head(&new_od->poll);
50329 INIT_LIST_HEAD(&new_od->buffers);
50330 goto retry;
50331 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
50332
50333 sysfs_put_active(attr_sd);
50334
50335 - if (buffer->event != atomic_read(&od->event))
50336 + if (buffer->event != atomic_read_unchecked(&od->event))
50337 goto trigger;
50338
50339 return DEFAULT_POLLMASK;
50340 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
50341
50342 od = sd->s_attr.open;
50343 if (od) {
50344 - atomic_inc(&od->event);
50345 + atomic_inc_unchecked(&od->event);
50346 wake_up_interruptible(&od->poll);
50347 }
50348
50349 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
50350 index a7ac78f..02158e1 100644
50351 --- a/fs/sysfs/symlink.c
50352 +++ b/fs/sysfs/symlink.c
50353 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
50354
50355 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
50356 {
50357 - char *page = nd_get_link(nd);
50358 + const char *page = nd_get_link(nd);
50359 if (!IS_ERR(page))
50360 free_page((unsigned long)page);
50361 }
50362 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
50363 index c175b4d..8f36a16 100644
50364 --- a/fs/udf/misc.c
50365 +++ b/fs/udf/misc.c
50366 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
50367
50368 u8 udf_tag_checksum(const struct tag *t)
50369 {
50370 - u8 *data = (u8 *)t;
50371 + const u8 *data = (const u8 *)t;
50372 u8 checksum = 0;
50373 int i;
50374 for (i = 0; i < sizeof(struct tag); ++i)
50375 diff --git a/fs/udf/namei.c b/fs/udf/namei.c
50376 index 1802417..c31deb3 100644
50377 --- a/fs/udf/namei.c
50378 +++ b/fs/udf/namei.c
50379 @@ -1279,6 +1279,7 @@ static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
50380 *lenp = 3;
50381 fid->udf.block = location.logicalBlockNum;
50382 fid->udf.partref = location.partitionReferenceNum;
50383 + fid->udf.parent_partref = 0;
50384 fid->udf.generation = inode->i_generation;
50385
50386 if (parent) {
50387 diff --git a/fs/utimes.c b/fs/utimes.c
50388 index fa4dbe4..e12d1b9 100644
50389 --- a/fs/utimes.c
50390 +++ b/fs/utimes.c
50391 @@ -1,6 +1,7 @@
50392 #include <linux/compiler.h>
50393 #include <linux/file.h>
50394 #include <linux/fs.h>
50395 +#include <linux/security.h>
50396 #include <linux/linkage.h>
50397 #include <linux/mount.h>
50398 #include <linux/namei.h>
50399 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
50400 goto mnt_drop_write_and_out;
50401 }
50402 }
50403 +
50404 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
50405 + error = -EACCES;
50406 + goto mnt_drop_write_and_out;
50407 + }
50408 +
50409 mutex_lock(&inode->i_mutex);
50410 error = notify_change(path->dentry, &newattrs);
50411 mutex_unlock(&inode->i_mutex);
50412 diff --git a/fs/xattr.c b/fs/xattr.c
50413 index 1d7ac37..23cb9ec 100644
50414 --- a/fs/xattr.c
50415 +++ b/fs/xattr.c
50416 @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
50417 * Extended attribute SET operations
50418 */
50419 static long
50420 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
50421 +setxattr(struct path *path, const char __user *name, const void __user *value,
50422 size_t size, int flags)
50423 {
50424 int error;
50425 @@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
50426 }
50427 }
50428
50429 - error = vfs_setxattr(d, kname, kvalue, size, flags);
50430 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
50431 + error = -EACCES;
50432 + goto out;
50433 + }
50434 +
50435 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
50436 out:
50437 if (vvalue)
50438 vfree(vvalue);
50439 @@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
50440 return error;
50441 error = mnt_want_write(path.mnt);
50442 if (!error) {
50443 - error = setxattr(path.dentry, name, value, size, flags);
50444 + error = setxattr(&path, name, value, size, flags);
50445 mnt_drop_write(path.mnt);
50446 }
50447 path_put(&path);
50448 @@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
50449 return error;
50450 error = mnt_want_write(path.mnt);
50451 if (!error) {
50452 - error = setxattr(path.dentry, name, value, size, flags);
50453 + error = setxattr(&path, name, value, size, flags);
50454 mnt_drop_write(path.mnt);
50455 }
50456 path_put(&path);
50457 @@ -401,17 +406,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
50458 {
50459 int fput_needed;
50460 struct file *f;
50461 - struct dentry *dentry;
50462 int error = -EBADF;
50463
50464 f = fget_light(fd, &fput_needed);
50465 if (!f)
50466 return error;
50467 - dentry = f->f_path.dentry;
50468 - audit_inode(NULL, dentry);
50469 + audit_inode(NULL, f->f_path.dentry);
50470 error = mnt_want_write_file(f);
50471 if (!error) {
50472 - error = setxattr(dentry, name, value, size, flags);
50473 + error = setxattr(&f->f_path, name, value, size, flags);
50474 mnt_drop_write_file(f);
50475 }
50476 fput_light(f, fput_needed);
50477 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
50478 index 69d06b0..c0996e5 100644
50479 --- a/fs/xattr_acl.c
50480 +++ b/fs/xattr_acl.c
50481 @@ -17,8 +17,8 @@
50482 struct posix_acl *
50483 posix_acl_from_xattr(const void *value, size_t size)
50484 {
50485 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
50486 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
50487 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
50488 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
50489 int count;
50490 struct posix_acl *acl;
50491 struct posix_acl_entry *acl_e;
50492 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
50493 index 58b815e..595ddee 100644
50494 --- a/fs/xfs/xfs_bmap.c
50495 +++ b/fs/xfs/xfs_bmap.c
50496 @@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
50497 int nmap,
50498 int ret_nmap);
50499 #else
50500 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
50501 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
50502 #endif /* DEBUG */
50503
50504 STATIC int
50505 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
50506 index 19bf0c5..9f26b02 100644
50507 --- a/fs/xfs/xfs_dir2_sf.c
50508 +++ b/fs/xfs/xfs_dir2_sf.c
50509 @@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
50510 }
50511
50512 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
50513 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50514 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
50515 + char name[sfep->namelen];
50516 + memcpy(name, sfep->name, sfep->namelen);
50517 + if (filldir(dirent, name, sfep->namelen,
50518 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
50519 + *offset = off & 0x7fffffff;
50520 + return 0;
50521 + }
50522 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50523 off & 0x7fffffff, ino, DT_UNKNOWN)) {
50524 *offset = off & 0x7fffffff;
50525 return 0;
50526 diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
50527 index f9c3fe3..69cf4fc 100644
50528 --- a/fs/xfs/xfs_discard.c
50529 +++ b/fs/xfs/xfs_discard.c
50530 @@ -179,12 +179,14 @@ xfs_ioc_trim(
50531 * used by the fstrim application. In the end it really doesn't
50532 * matter as trimming blocks is an advisory interface.
50533 */
50534 + if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
50535 + range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
50536 + return -XFS_ERROR(EINVAL);
50537 +
50538 start = BTOBB(range.start);
50539 end = start + BTOBBT(range.len) - 1;
50540 minlen = BTOBB(max_t(u64, granularity, range.minlen));
50541
50542 - if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
50543 - return -XFS_ERROR(EINVAL);
50544 if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
50545 end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
50546
50547 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
50548 index 3a05a41..320bec6 100644
50549 --- a/fs/xfs/xfs_ioctl.c
50550 +++ b/fs/xfs/xfs_ioctl.c
50551 @@ -126,7 +126,7 @@ xfs_find_handle(
50552 }
50553
50554 error = -EFAULT;
50555 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
50556 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
50557 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
50558 goto out_put;
50559
50560 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
50561 index 1a25fd8..e935581 100644
50562 --- a/fs/xfs/xfs_iops.c
50563 +++ b/fs/xfs/xfs_iops.c
50564 @@ -394,7 +394,7 @@ xfs_vn_put_link(
50565 struct nameidata *nd,
50566 void *p)
50567 {
50568 - char *s = nd_get_link(nd);
50569 + const char *s = nd_get_link(nd);
50570
50571 if (!IS_ERR(s))
50572 kfree(s);
50573 diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
50574 index 92d4331..ca28a4b 100644
50575 --- a/fs/xfs/xfs_rtalloc.c
50576 +++ b/fs/xfs/xfs_rtalloc.c
50577 @@ -857,7 +857,7 @@ xfs_rtbuf_get(
50578 xfs_buf_t *bp; /* block buffer, result */
50579 xfs_inode_t *ip; /* bitmap or summary inode */
50580 xfs_bmbt_irec_t map;
50581 - int nmap;
50582 + int nmap = 1;
50583 int error; /* error value */
50584
50585 ip = issum ? mp->m_rsumip : mp->m_rbmip;
50586 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
50587 new file mode 100644
50588 index 0000000..4d533f1
50589 --- /dev/null
50590 +++ b/grsecurity/Kconfig
50591 @@ -0,0 +1,941 @@
50592 +#
50593 +# grecurity configuration
50594 +#
50595 +menu "Memory Protections"
50596 +depends on GRKERNSEC
50597 +
50598 +config GRKERNSEC_KMEM
50599 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
50600 + default y if GRKERNSEC_CONFIG_AUTO
50601 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50602 + help
50603 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50604 + be written to or read from to modify or leak the contents of the running
50605 + kernel. /dev/port will also not be allowed to be opened. If you have module
50606 + support disabled, enabling this will close up four ways that are
50607 + currently used to insert malicious code into the running kernel.
50608 + Even with all these features enabled, we still highly recommend that
50609 + you use the RBAC system, as it is still possible for an attacker to
50610 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50611 + If you are not using XFree86, you may be able to stop this additional
50612 + case by enabling the 'Disable privileged I/O' option. Though nothing
50613 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50614 + but only to video memory, which is the only writing we allow in this
50615 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50616 + not be allowed to mprotect it with PROT_WRITE later.
50617 + It is highly recommended that you say Y here if you meet all the
50618 + conditions above.
50619 +
50620 +config GRKERNSEC_VM86
50621 + bool "Restrict VM86 mode"
50622 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
50623 + depends on X86_32
50624 +
50625 + help
50626 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50627 + make use of a special execution mode on 32bit x86 processors called
50628 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50629 + video cards and will still work with this option enabled. The purpose
50630 + of the option is to prevent exploitation of emulation errors in
50631 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50632 + Nearly all users should be able to enable this option.
50633 +
50634 +config GRKERNSEC_IO
50635 + bool "Disable privileged I/O"
50636 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
50637 + depends on X86
50638 + select RTC_CLASS
50639 + select RTC_INTF_DEV
50640 + select RTC_DRV_CMOS
50641 +
50642 + help
50643 + If you say Y here, all ioperm and iopl calls will return an error.
50644 + Ioperm and iopl can be used to modify the running kernel.
50645 + Unfortunately, some programs need this access to operate properly,
50646 + the most notable of which are XFree86 and hwclock. hwclock can be
50647 + remedied by having RTC support in the kernel, so real-time
50648 + clock support is enabled if this option is enabled, to ensure
50649 + that hwclock operates correctly. XFree86 still will not
50650 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50651 + IF YOU USE XFree86. If you use XFree86 and you still want to
50652 + protect your kernel against modification, use the RBAC system.
50653 +
50654 +config GRKERNSEC_PROC_MEMMAP
50655 + bool "Harden ASLR against information leaks and entropy reduction"
50656 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
50657 + depends on PAX_NOEXEC || PAX_ASLR
50658 + help
50659 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50660 + give no information about the addresses of its mappings if
50661 + PaX features that rely on random addresses are enabled on the task.
50662 + In addition to sanitizing this information and disabling other
50663 + dangerous sources of information, this option causes reads of sensitive
50664 + /proc/<pid> entries where the file descriptor was opened in a different
50665 + task than the one performing the read. Such attempts are logged.
50666 + This option also limits argv/env strings for suid/sgid binaries
50667 + to 512KB to prevent a complete exhaustion of the stack entropy provided
50668 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
50669 + binaries to prevent alternative mmap layouts from being abused.
50670 +
50671 + If you use PaX it is essential that you say Y here as it closes up
50672 + several holes that make full ASLR useless locally.
50673 +
50674 +config GRKERNSEC_BRUTE
50675 + bool "Deter exploit bruteforcing"
50676 + default y if GRKERNSEC_CONFIG_AUTO
50677 + help
50678 + If you say Y here, attempts to bruteforce exploits against forking
50679 + daemons such as apache or sshd, as well as against suid/sgid binaries
50680 + will be deterred. When a child of a forking daemon is killed by PaX
50681 + or crashes due to an illegal instruction or other suspicious signal,
50682 + the parent process will be delayed 30 seconds upon every subsequent
50683 + fork until the administrator is able to assess the situation and
50684 + restart the daemon.
50685 + In the suid/sgid case, the attempt is logged, the user has all their
50686 + processes terminated, and they are prevented from executing any further
50687 + processes for 15 minutes.
50688 + It is recommended that you also enable signal logging in the auditing
50689 + section so that logs are generated when a process triggers a suspicious
50690 + signal.
50691 + If the sysctl option is enabled, a sysctl option with name
50692 + "deter_bruteforce" is created.
50693 +
50694 +
50695 +config GRKERNSEC_MODHARDEN
50696 + bool "Harden module auto-loading"
50697 + default y if GRKERNSEC_CONFIG_AUTO
50698 + depends on MODULES
50699 + help
50700 + If you say Y here, module auto-loading in response to use of some
50701 + feature implemented by an unloaded module will be restricted to
50702 + root users. Enabling this option helps defend against attacks
50703 + by unprivileged users who abuse the auto-loading behavior to
50704 + cause a vulnerable module to load that is then exploited.
50705 +
50706 + If this option prevents a legitimate use of auto-loading for a
50707 + non-root user, the administrator can execute modprobe manually
50708 + with the exact name of the module mentioned in the alert log.
50709 + Alternatively, the administrator can add the module to the list
50710 + of modules loaded at boot by modifying init scripts.
50711 +
50712 + Modification of init scripts will most likely be needed on
50713 + Ubuntu servers with encrypted home directory support enabled,
50714 + as the first non-root user logging in will cause the ecb(aes),
50715 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50716 +
50717 +config GRKERNSEC_HIDESYM
50718 + bool "Hide kernel symbols"
50719 + default y if GRKERNSEC_CONFIG_AUTO
50720 + select PAX_USERCOPY_SLABS
50721 + help
50722 + If you say Y here, getting information on loaded modules, and
50723 + displaying all kernel symbols through a syscall will be restricted
50724 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50725 + /proc/kallsyms will be restricted to the root user. The RBAC
50726 + system can hide that entry even from root.
50727 +
50728 + This option also prevents leaking of kernel addresses through
50729 + several /proc entries.
50730 +
50731 + Note that this option is only effective provided the following
50732 + conditions are met:
50733 + 1) The kernel using grsecurity is not precompiled by some distribution
50734 + 2) You have also enabled GRKERNSEC_DMESG
50735 + 3) You are using the RBAC system and hiding other files such as your
50736 + kernel image and System.map. Alternatively, enabling this option
50737 + causes the permissions on /boot, /lib/modules, and the kernel
50738 + source directory to change at compile time to prevent
50739 + reading by non-root users.
50740 + If the above conditions are met, this option will aid in providing a
50741 + useful protection against local kernel exploitation of overflows
50742 + and arbitrary read/write vulnerabilities.
50743 +
50744 +config GRKERNSEC_KERN_LOCKOUT
50745 + bool "Active kernel exploit response"
50746 + default y if GRKERNSEC_CONFIG_AUTO
50747 + depends on X86 || ARM || PPC || SPARC
50748 + help
50749 + If you say Y here, when a PaX alert is triggered due to suspicious
50750 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50751 + or an OOPS occurs due to bad memory accesses, instead of just
50752 + terminating the offending process (and potentially allowing
50753 + a subsequent exploit from the same user), we will take one of two
50754 + actions:
50755 + If the user was root, we will panic the system
50756 + If the user was non-root, we will log the attempt, terminate
50757 + all processes owned by the user, then prevent them from creating
50758 + any new processes until the system is restarted
50759 + This deters repeated kernel exploitation/bruteforcing attempts
50760 + and is useful for later forensics.
50761 +
50762 +endmenu
50763 +menu "Role Based Access Control Options"
50764 +depends on GRKERNSEC
50765 +
50766 +config GRKERNSEC_RBAC_DEBUG
50767 + bool
50768 +
50769 +config GRKERNSEC_NO_RBAC
50770 + bool "Disable RBAC system"
50771 + help
50772 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50773 + preventing the RBAC system from being enabled. You should only say Y
50774 + here if you have no intention of using the RBAC system, so as to prevent
50775 + an attacker with root access from misusing the RBAC system to hide files
50776 + and processes when loadable module support and /dev/[k]mem have been
50777 + locked down.
50778 +
50779 +config GRKERNSEC_ACL_HIDEKERN
50780 + bool "Hide kernel processes"
50781 + help
50782 + If you say Y here, all kernel threads will be hidden to all
50783 + processes but those whose subject has the "view hidden processes"
50784 + flag.
50785 +
50786 +config GRKERNSEC_ACL_MAXTRIES
50787 + int "Maximum tries before password lockout"
50788 + default 3
50789 + help
50790 + This option enforces the maximum number of times a user can attempt
50791 + to authorize themselves with the grsecurity RBAC system before being
50792 + denied the ability to attempt authorization again for a specified time.
50793 + The lower the number, the harder it will be to brute-force a password.
50794 +
50795 +config GRKERNSEC_ACL_TIMEOUT
50796 + int "Time to wait after max password tries, in seconds"
50797 + default 30
50798 + help
50799 + This option specifies the time the user must wait after attempting to
50800 + authorize to the RBAC system with the maximum number of invalid
50801 + passwords. The higher the number, the harder it will be to brute-force
50802 + a password.
50803 +
50804 +endmenu
50805 +menu "Filesystem Protections"
50806 +depends on GRKERNSEC
50807 +
50808 +config GRKERNSEC_PROC
50809 + bool "Proc restrictions"
50810 + default y if GRKERNSEC_CONFIG_AUTO
50811 + help
50812 + If you say Y here, the permissions of the /proc filesystem
50813 + will be altered to enhance system security and privacy. You MUST
50814 + choose either a user only restriction or a user and group restriction.
50815 + Depending upon the option you choose, you can either restrict users to
50816 + see only the processes they themselves run, or choose a group that can
50817 + view all processes and files normally restricted to root if you choose
50818 + the "restrict to user only" option. NOTE: If you're running identd or
50819 + ntpd as a non-root user, you will have to run it as the group you
50820 + specify here.
50821 +
50822 +config GRKERNSEC_PROC_USER
50823 + bool "Restrict /proc to user only"
50824 + depends on GRKERNSEC_PROC
50825 + help
50826 + If you say Y here, non-root users will only be able to view their own
50827 + processes, and restricts them from viewing network-related information,
50828 + and viewing kernel symbol and module information.
50829 +
50830 +config GRKERNSEC_PROC_USERGROUP
50831 + bool "Allow special group"
50832 + default y if GRKERNSEC_CONFIG_AUTO
50833 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50834 + help
50835 + If you say Y here, you will be able to select a group that will be
50836 + able to view all processes and network-related information. If you've
50837 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50838 + remain hidden. This option is useful if you want to run identd as
50839 + a non-root user.
50840 +
50841 +config GRKERNSEC_PROC_GID
50842 + int "GID for special group"
50843 + depends on GRKERNSEC_PROC_USERGROUP
50844 + default 1001
50845 +
50846 +config GRKERNSEC_PROC_ADD
50847 + bool "Additional restrictions"
50848 + default y if GRKERNSEC_CONFIG_AUTO
50849 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50850 + help
50851 + If you say Y here, additional restrictions will be placed on
50852 + /proc that keep normal users from viewing device information and
50853 + slabinfo information that could be useful for exploits.
50854 +
50855 +config GRKERNSEC_LINK
50856 + bool "Linking restrictions"
50857 + default y if GRKERNSEC_CONFIG_AUTO
50858 + help
50859 + If you say Y here, /tmp race exploits will be prevented, since users
50860 + will no longer be able to follow symlinks owned by other users in
50861 + world-writable +t directories (e.g. /tmp), unless the owner of the
50862 + symlink is the owner of the directory. users will also not be
50863 + able to hardlink to files they do not own. If the sysctl option is
50864 + enabled, a sysctl option with name "linking_restrictions" is created.
50865 +
50866 +config GRKERNSEC_SYMLINKOWN
50867 + bool "Kernel-enforced SymlinksIfOwnerMatch"
50868 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
50869 + help
50870 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
50871 + that prevents it from being used as a security feature. As Apache
50872 + verifies the symlink by performing a stat() against the target of
50873 + the symlink before it is followed, an attacker can setup a symlink
50874 + to point to a same-owned file, then replace the symlink with one
50875 + that targets another user's file just after Apache "validates" the
50876 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
50877 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
50878 + will be in place for the group you specify. If the sysctl option
50879 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
50880 + created.
50881 +
50882 +config GRKERNSEC_SYMLINKOWN_GID
50883 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
50884 + depends on GRKERNSEC_SYMLINKOWN
50885 + default 1006
50886 + help
50887 + Setting this GID determines what group kernel-enforced
50888 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
50889 + is enabled, a sysctl option with name "symlinkown_gid" is created.
50890 +
50891 +config GRKERNSEC_FIFO
50892 + bool "FIFO restrictions"
50893 + default y if GRKERNSEC_CONFIG_AUTO
50894 + help
50895 + If you say Y here, users will not be able to write to FIFOs they don't
50896 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50897 + the FIFO is the same owner of the directory it's held in. If the sysctl
50898 + option is enabled, a sysctl option with name "fifo_restrictions" is
50899 + created.
50900 +
50901 +config GRKERNSEC_SYSFS_RESTRICT
50902 + bool "Sysfs/debugfs restriction"
50903 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
50904 + depends on SYSFS
50905 + help
50906 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50907 + any filesystem normally mounted under it (e.g. debugfs) will be
50908 + mostly accessible only by root. These filesystems generally provide access
50909 + to hardware and debug information that isn't appropriate for unprivileged
50910 + users of the system. Sysfs and debugfs have also become a large source
50911 + of new vulnerabilities, ranging from infoleaks to local compromise.
50912 + There has been very little oversight with an eye toward security involved
50913 + in adding new exporters of information to these filesystems, so their
50914 + use is discouraged.
50915 + For reasons of compatibility, a few directories have been whitelisted
50916 + for access by non-root users:
50917 + /sys/fs/selinux
50918 + /sys/fs/fuse
50919 + /sys/devices/system/cpu
50920 +
50921 +config GRKERNSEC_ROFS
50922 + bool "Runtime read-only mount protection"
50923 + help
50924 + If you say Y here, a sysctl option with name "romount_protect" will
50925 + be created. By setting this option to 1 at runtime, filesystems
50926 + will be protected in the following ways:
50927 + * No new writable mounts will be allowed
50928 + * Existing read-only mounts won't be able to be remounted read/write
50929 + * Write operations will be denied on all block devices
50930 + This option acts independently of grsec_lock: once it is set to 1,
50931 + it cannot be turned off. Therefore, please be mindful of the resulting
50932 + behavior if this option is enabled in an init script on a read-only
50933 + filesystem. This feature is mainly intended for secure embedded systems.
50934 +
50935 +config GRKERNSEC_CHROOT
50936 + bool "Chroot jail restrictions"
50937 + default y if GRKERNSEC_CONFIG_AUTO
50938 + help
50939 + If you say Y here, you will be able to choose several options that will
50940 + make breaking out of a chrooted jail much more difficult. If you
50941 + encounter no software incompatibilities with the following options, it
50942 + is recommended that you enable each one.
50943 +
50944 +config GRKERNSEC_CHROOT_MOUNT
50945 + bool "Deny mounts"
50946 + default y if GRKERNSEC_CONFIG_AUTO
50947 + depends on GRKERNSEC_CHROOT
50948 + help
50949 + If you say Y here, processes inside a chroot will not be able to
50950 + mount or remount filesystems. If the sysctl option is enabled, a
50951 + sysctl option with name "chroot_deny_mount" is created.
50952 +
50953 +config GRKERNSEC_CHROOT_DOUBLE
50954 + bool "Deny double-chroots"
50955 + default y if GRKERNSEC_CONFIG_AUTO
50956 + depends on GRKERNSEC_CHROOT
50957 + help
50958 + If you say Y here, processes inside a chroot will not be able to chroot
50959 + again outside the chroot. This is a widely used method of breaking
50960 + out of a chroot jail and should not be allowed. If the sysctl
50961 + option is enabled, a sysctl option with name
50962 + "chroot_deny_chroot" is created.
50963 +
50964 +config GRKERNSEC_CHROOT_PIVOT
50965 + bool "Deny pivot_root in chroot"
50966 + default y if GRKERNSEC_CONFIG_AUTO
50967 + depends on GRKERNSEC_CHROOT
50968 + help
50969 + If you say Y here, processes inside a chroot will not be able to use
50970 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50971 + works similar to chroot in that it changes the root filesystem. This
50972 + function could be misused in a chrooted process to attempt to break out
50973 + of the chroot, and therefore should not be allowed. If the sysctl
50974 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50975 + created.
50976 +
50977 +config GRKERNSEC_CHROOT_CHDIR
50978 + bool "Enforce chdir(\"/\") on all chroots"
50979 + default y if GRKERNSEC_CONFIG_AUTO
50980 + depends on GRKERNSEC_CHROOT
50981 + help
50982 + If you say Y here, the current working directory of all newly-chrooted
50983 + applications will be set to the the root directory of the chroot.
50984 + The man page on chroot(2) states:
50985 + Note that this call does not change the current working
50986 + directory, so that `.' can be outside the tree rooted at
50987 + `/'. In particular, the super-user can escape from a
50988 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50989 +
50990 + It is recommended that you say Y here, since it's not known to break
50991 + any software. If the sysctl option is enabled, a sysctl option with
50992 + name "chroot_enforce_chdir" is created.
50993 +
50994 +config GRKERNSEC_CHROOT_CHMOD
50995 + bool "Deny (f)chmod +s"
50996 + default y if GRKERNSEC_CONFIG_AUTO
50997 + depends on GRKERNSEC_CHROOT
50998 + help
50999 + If you say Y here, processes inside a chroot will not be able to chmod
51000 + or fchmod files to make them have suid or sgid bits. This protects
51001 + against another published method of breaking a chroot. If the sysctl
51002 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
51003 + created.
51004 +
51005 +config GRKERNSEC_CHROOT_FCHDIR
51006 + bool "Deny fchdir out of chroot"
51007 + default y if GRKERNSEC_CONFIG_AUTO
51008 + depends on GRKERNSEC_CHROOT
51009 + help
51010 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
51011 + to a file descriptor of the chrooting process that points to a directory
51012 + outside the filesystem will be stopped. If the sysctl option
51013 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
51014 +
51015 +config GRKERNSEC_CHROOT_MKNOD
51016 + bool "Deny mknod"
51017 + default y if GRKERNSEC_CONFIG_AUTO
51018 + depends on GRKERNSEC_CHROOT
51019 + help
51020 + If you say Y here, processes inside a chroot will not be allowed to
51021 + mknod. The problem with using mknod inside a chroot is that it
51022 + would allow an attacker to create a device entry that is the same
51023 + as one on the physical root of your system, which could range from
51024 + anything from the console device to a device for your harddrive (which
51025 + they could then use to wipe the drive or steal data). It is recommended
51026 + that you say Y here, unless you run into software incompatibilities.
51027 + If the sysctl option is enabled, a sysctl option with name
51028 + "chroot_deny_mknod" is created.
51029 +
51030 +config GRKERNSEC_CHROOT_SHMAT
51031 + bool "Deny shmat() out of chroot"
51032 + default y if GRKERNSEC_CONFIG_AUTO
51033 + depends on GRKERNSEC_CHROOT
51034 + help
51035 + If you say Y here, processes inside a chroot will not be able to attach
51036 + to shared memory segments that were created outside of the chroot jail.
51037 + It is recommended that you say Y here. If the sysctl option is enabled,
51038 + a sysctl option with name "chroot_deny_shmat" is created.
51039 +
51040 +config GRKERNSEC_CHROOT_UNIX
51041 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
51042 + default y if GRKERNSEC_CONFIG_AUTO
51043 + depends on GRKERNSEC_CHROOT
51044 + help
51045 + If you say Y here, processes inside a chroot will not be able to
51046 + connect to abstract (meaning not belonging to a filesystem) Unix
51047 + domain sockets that were bound outside of a chroot. It is recommended
51048 + that you say Y here. If the sysctl option is enabled, a sysctl option
51049 + with name "chroot_deny_unix" is created.
51050 +
51051 +config GRKERNSEC_CHROOT_FINDTASK
51052 + bool "Protect outside processes"
51053 + default y if GRKERNSEC_CONFIG_AUTO
51054 + depends on GRKERNSEC_CHROOT
51055 + help
51056 + If you say Y here, processes inside a chroot will not be able to
51057 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
51058 + getsid, or view any process outside of the chroot. If the sysctl
51059 + option is enabled, a sysctl option with name "chroot_findtask" is
51060 + created.
51061 +
51062 +config GRKERNSEC_CHROOT_NICE
51063 + bool "Restrict priority changes"
51064 + default y if GRKERNSEC_CONFIG_AUTO
51065 + depends on GRKERNSEC_CHROOT
51066 + help
51067 + If you say Y here, processes inside a chroot will not be able to raise
51068 + the priority of processes in the chroot, or alter the priority of
51069 + processes outside the chroot. This provides more security than simply
51070 + removing CAP_SYS_NICE from the process' capability set. If the
51071 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
51072 + is created.
51073 +
51074 +config GRKERNSEC_CHROOT_SYSCTL
51075 + bool "Deny sysctl writes"
51076 + default y if GRKERNSEC_CONFIG_AUTO
51077 + depends on GRKERNSEC_CHROOT
51078 + help
51079 + If you say Y here, an attacker in a chroot will not be able to
51080 + write to sysctl entries, either by sysctl(2) or through a /proc
51081 + interface. It is strongly recommended that you say Y here. If the
51082 + sysctl option is enabled, a sysctl option with name
51083 + "chroot_deny_sysctl" is created.
51084 +
51085 +config GRKERNSEC_CHROOT_CAPS
51086 + bool "Capability restrictions"
51087 + default y if GRKERNSEC_CONFIG_AUTO
51088 + depends on GRKERNSEC_CHROOT
51089 + help
51090 + If you say Y here, the capabilities on all processes within a
51091 + chroot jail will be lowered to stop module insertion, raw i/o,
51092 + system and net admin tasks, rebooting the system, modifying immutable
51093 + files, modifying IPC owned by another, and changing the system time.
51094 + This is left an option because it can break some apps. Disable this
51095 + if your chrooted apps are having problems performing those kinds of
51096 + tasks. If the sysctl option is enabled, a sysctl option with
51097 + name "chroot_caps" is created.
51098 +
51099 +endmenu
51100 +menu "Kernel Auditing"
51101 +depends on GRKERNSEC
51102 +
51103 +config GRKERNSEC_AUDIT_GROUP
51104 + bool "Single group for auditing"
51105 + help
51106 + If you say Y here, the exec, chdir, and (un)mount logging features
51107 + will only operate on a group you specify. This option is recommended
51108 + if you only want to watch certain users instead of having a large
51109 + amount of logs from the entire system. If the sysctl option is enabled,
51110 + a sysctl option with name "audit_group" is created.
51111 +
51112 +config GRKERNSEC_AUDIT_GID
51113 + int "GID for auditing"
51114 + depends on GRKERNSEC_AUDIT_GROUP
51115 + default 1007
51116 +
51117 +config GRKERNSEC_EXECLOG
51118 + bool "Exec logging"
51119 + help
51120 + If you say Y here, all execve() calls will be logged (since the
51121 + other exec*() calls are frontends to execve(), all execution
51122 + will be logged). Useful for shell-servers that like to keep track
51123 + of their users. If the sysctl option is enabled, a sysctl option with
51124 + name "exec_logging" is created.
51125 + WARNING: This option when enabled will produce a LOT of logs, especially
51126 + on an active system.
51127 +
51128 +config GRKERNSEC_RESLOG
51129 + bool "Resource logging"
51130 + default y if GRKERNSEC_CONFIG_AUTO
51131 + help
51132 + If you say Y here, all attempts to overstep resource limits will
51133 + be logged with the resource name, the requested size, and the current
51134 + limit. It is highly recommended that you say Y here. If the sysctl
51135 + option is enabled, a sysctl option with name "resource_logging" is
51136 + created. If the RBAC system is enabled, the sysctl value is ignored.
51137 +
51138 +config GRKERNSEC_CHROOT_EXECLOG
51139 + bool "Log execs within chroot"
51140 + help
51141 + If you say Y here, all executions inside a chroot jail will be logged
51142 + to syslog. This can cause a large amount of logs if certain
51143 + applications (eg. djb's daemontools) are installed on the system, and
51144 + is therefore left as an option. If the sysctl option is enabled, a
51145 + sysctl option with name "chroot_execlog" is created.
51146 +
51147 +config GRKERNSEC_AUDIT_PTRACE
51148 + bool "Ptrace logging"
51149 + help
51150 + If you say Y here, all attempts to attach to a process via ptrace
51151 + will be logged. If the sysctl option is enabled, a sysctl option
51152 + with name "audit_ptrace" is created.
51153 +
51154 +config GRKERNSEC_AUDIT_CHDIR
51155 + bool "Chdir logging"
51156 + help
51157 + If you say Y here, all chdir() calls will be logged. If the sysctl
51158 + option is enabled, a sysctl option with name "audit_chdir" is created.
51159 +
51160 +config GRKERNSEC_AUDIT_MOUNT
51161 + bool "(Un)Mount logging"
51162 + help
51163 + If you say Y here, all mounts and unmounts will be logged. If the
51164 + sysctl option is enabled, a sysctl option with name "audit_mount" is
51165 + created.
51166 +
51167 +config GRKERNSEC_SIGNAL
51168 + bool "Signal logging"
51169 + default y if GRKERNSEC_CONFIG_AUTO
51170 + help
51171 + If you say Y here, certain important signals will be logged, such as
51172 + SIGSEGV, which will as a result inform you of when a error in a program
51173 + occurred, which in some cases could mean a possible exploit attempt.
51174 + If the sysctl option is enabled, a sysctl option with name
51175 + "signal_logging" is created.
51176 +
51177 +config GRKERNSEC_FORKFAIL
51178 + bool "Fork failure logging"
51179 + help
51180 + If you say Y here, all failed fork() attempts will be logged.
51181 + This could suggest a fork bomb, or someone attempting to overstep
51182 + their process limit. If the sysctl option is enabled, a sysctl option
51183 + with name "forkfail_logging" is created.
51184 +
51185 +config GRKERNSEC_TIME
51186 + bool "Time change logging"
51187 + default y if GRKERNSEC_CONFIG_AUTO
51188 + help
51189 + If you say Y here, any changes of the system clock will be logged.
51190 + If the sysctl option is enabled, a sysctl option with name
51191 + "timechange_logging" is created.
51192 +
51193 +config GRKERNSEC_PROC_IPADDR
51194 + bool "/proc/<pid>/ipaddr support"
51195 + default y if GRKERNSEC_CONFIG_AUTO
51196 + help
51197 + If you say Y here, a new entry will be added to each /proc/<pid>
51198 + directory that contains the IP address of the person using the task.
51199 + The IP is carried across local TCP and AF_UNIX stream sockets.
51200 + This information can be useful for IDS/IPSes to perform remote response
51201 + to a local attack. The entry is readable by only the owner of the
51202 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
51203 + the RBAC system), and thus does not create privacy concerns.
51204 +
51205 +config GRKERNSEC_RWXMAP_LOG
51206 + bool 'Denied RWX mmap/mprotect logging'
51207 + default y if GRKERNSEC_CONFIG_AUTO
51208 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
51209 + help
51210 + If you say Y here, calls to mmap() and mprotect() with explicit
51211 + usage of PROT_WRITE and PROT_EXEC together will be logged when
51212 + denied by the PAX_MPROTECT feature. If the sysctl option is
51213 + enabled, a sysctl option with name "rwxmap_logging" is created.
51214 +
51215 +config GRKERNSEC_AUDIT_TEXTREL
51216 + bool 'ELF text relocations logging (READ HELP)'
51217 + depends on PAX_MPROTECT
51218 + help
51219 + If you say Y here, text relocations will be logged with the filename
51220 + of the offending library or binary. The purpose of the feature is
51221 + to help Linux distribution developers get rid of libraries and
51222 + binaries that need text relocations which hinder the future progress
51223 + of PaX. Only Linux distribution developers should say Y here, and
51224 + never on a production machine, as this option creates an information
51225 + leak that could aid an attacker in defeating the randomization of
51226 + a single memory region. If the sysctl option is enabled, a sysctl
51227 + option with name "audit_textrel" is created.
51228 +
51229 +endmenu
51230 +
51231 +menu "Executable Protections"
51232 +depends on GRKERNSEC
51233 +
51234 +config GRKERNSEC_DMESG
51235 + bool "Dmesg(8) restriction"
51236 + default y if GRKERNSEC_CONFIG_AUTO
51237 + help
51238 + If you say Y here, non-root users will not be able to use dmesg(8)
51239 + to view up to the last 4kb of messages in the kernel's log buffer.
51240 + The kernel's log buffer often contains kernel addresses and other
51241 + identifying information useful to an attacker in fingerprinting a
51242 + system for a targeted exploit.
51243 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
51244 + created.
51245 +
51246 +config GRKERNSEC_HARDEN_PTRACE
51247 + bool "Deter ptrace-based process snooping"
51248 + default y if GRKERNSEC_CONFIG_AUTO
51249 + help
51250 + If you say Y here, TTY sniffers and other malicious monitoring
51251 + programs implemented through ptrace will be defeated. If you
51252 + have been using the RBAC system, this option has already been
51253 + enabled for several years for all users, with the ability to make
51254 + fine-grained exceptions.
51255 +
51256 + This option only affects the ability of non-root users to ptrace
51257 + processes that are not a descendent of the ptracing process.
51258 + This means that strace ./binary and gdb ./binary will still work,
51259 + but attaching to arbitrary processes will not. If the sysctl
51260 + option is enabled, a sysctl option with name "harden_ptrace" is
51261 + created.
51262 +
51263 +config GRKERNSEC_PTRACE_READEXEC
51264 + bool "Require read access to ptrace sensitive binaries"
51265 + default y if GRKERNSEC_CONFIG_AUTO
51266 + help
51267 + If you say Y here, unprivileged users will not be able to ptrace unreadable
51268 + binaries. This option is useful in environments that
51269 + remove the read bits (e.g. file mode 4711) from suid binaries to
51270 + prevent infoleaking of their contents. This option adds
51271 + consistency to the use of that file mode, as the binary could normally
51272 + be read out when run without privileges while ptracing.
51273 +
51274 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
51275 + is created.
51276 +
51277 +config GRKERNSEC_SETXID
51278 + bool "Enforce consistent multithreaded privileges"
51279 + default y if GRKERNSEC_CONFIG_AUTO
51280 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
51281 + help
51282 + If you say Y here, a change from a root uid to a non-root uid
51283 + in a multithreaded application will cause the resulting uids,
51284 + gids, supplementary groups, and capabilities in that thread
51285 + to be propagated to the other threads of the process. In most
51286 + cases this is unnecessary, as glibc will emulate this behavior
51287 + on behalf of the application. Other libcs do not act in the
51288 + same way, allowing the other threads of the process to continue
51289 + running with root privileges. If the sysctl option is enabled,
51290 + a sysctl option with name "consistent_setxid" is created.
51291 +
51292 +config GRKERNSEC_TPE
51293 + bool "Trusted Path Execution (TPE)"
51294 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
51295 + help
51296 + If you say Y here, you will be able to choose a gid to add to the
51297 + supplementary groups of users you want to mark as "untrusted."
51298 + These users will not be able to execute any files that are not in
51299 + root-owned directories writable only by root. If the sysctl option
51300 + is enabled, a sysctl option with name "tpe" is created.
51301 +
51302 +config GRKERNSEC_TPE_ALL
51303 + bool "Partially restrict all non-root users"
51304 + depends on GRKERNSEC_TPE
51305 + help
51306 + If you say Y here, all non-root users will be covered under
51307 + a weaker TPE restriction. This is separate from, and in addition to,
51308 + the main TPE options that you have selected elsewhere. Thus, if a
51309 + "trusted" GID is chosen, this restriction applies to even that GID.
51310 + Under this restriction, all non-root users will only be allowed to
51311 + execute files in directories they own that are not group or
51312 + world-writable, or in directories owned by root and writable only by
51313 + root. If the sysctl option is enabled, a sysctl option with name
51314 + "tpe_restrict_all" is created.
51315 +
51316 +config GRKERNSEC_TPE_INVERT
51317 + bool "Invert GID option"
51318 + depends on GRKERNSEC_TPE
51319 + help
51320 + If you say Y here, the group you specify in the TPE configuration will
51321 + decide what group TPE restrictions will be *disabled* for. This
51322 + option is useful if you want TPE restrictions to be applied to most
51323 + users on the system. If the sysctl option is enabled, a sysctl option
51324 + with name "tpe_invert" is created. Unlike other sysctl options, this
51325 + entry will default to on for backward-compatibility.
51326 +
51327 +config GRKERNSEC_TPE_GID
51328 + int "GID for untrusted users"
51329 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
51330 + default 1005
51331 + help
51332 + Setting this GID determines what group TPE restrictions will be
51333 + *enabled* for. If the sysctl option is enabled, a sysctl option
51334 + with name "tpe_gid" is created.
51335 +
51336 +config GRKERNSEC_TPE_GID
51337 + int "GID for trusted users"
51338 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
51339 + default 1005
51340 + help
51341 + Setting this GID determines what group TPE restrictions will be
51342 + *disabled* for. If the sysctl option is enabled, a sysctl option
51343 + with name "tpe_gid" is created.
51344 +
51345 +endmenu
51346 +menu "Network Protections"
51347 +depends on GRKERNSEC
51348 +
51349 +config GRKERNSEC_RANDNET
51350 + bool "Larger entropy pools"
51351 + default y if GRKERNSEC_CONFIG_AUTO
51352 + help
51353 + If you say Y here, the entropy pools used for many features of Linux
51354 + and grsecurity will be doubled in size. Since several grsecurity
51355 + features use additional randomness, it is recommended that you say Y
51356 + here. Saying Y here has a similar effect as modifying
51357 + /proc/sys/kernel/random/poolsize.
51358 +
51359 +config GRKERNSEC_BLACKHOLE
51360 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
51361 + default y if GRKERNSEC_CONFIG_AUTO
51362 + depends on NET
51363 + help
51364 + If you say Y here, neither TCP resets nor ICMP
51365 + destination-unreachable packets will be sent in response to packets
51366 + sent to ports for which no associated listening process exists.
51367 + This feature supports both IPV4 and IPV6 and exempts the
51368 + loopback interface from blackholing. Enabling this feature
51369 + makes a host more resilient to DoS attacks and reduces network
51370 + visibility against scanners.
51371 +
51372 + The blackhole feature as-implemented is equivalent to the FreeBSD
51373 + blackhole feature, as it prevents RST responses to all packets, not
51374 + just SYNs. Under most application behavior this causes no
51375 + problems, but applications (like haproxy) may not close certain
51376 + connections in a way that cleanly terminates them on the remote
51377 + end, leaving the remote host in LAST_ACK state. Because of this
51378 + side-effect and to prevent intentional LAST_ACK DoSes, this
51379 + feature also adds automatic mitigation against such attacks.
51380 + The mitigation drastically reduces the amount of time a socket
51381 + can spend in LAST_ACK state. If you're using haproxy and not
51382 + all servers it connects to have this option enabled, consider
51383 + disabling this feature on the haproxy host.
51384 +
51385 + If the sysctl option is enabled, two sysctl options with names
51386 + "ip_blackhole" and "lastack_retries" will be created.
51387 + While "ip_blackhole" takes the standard zero/non-zero on/off
51388 + toggle, "lastack_retries" uses the same kinds of values as
51389 + "tcp_retries1" and "tcp_retries2". The default value of 4
51390 + prevents a socket from lasting more than 45 seconds in LAST_ACK
51391 + state.
51392 +
51393 +config GRKERNSEC_SOCKET
51394 + bool "Socket restrictions"
51395 + depends on NET
51396 + help
51397 + If you say Y here, you will be able to choose from several options.
51398 + If you assign a GID on your system and add it to the supplementary
51399 + groups of users you want to restrict socket access to, this patch
51400 + will perform up to three things, based on the option(s) you choose.
51401 +
51402 +config GRKERNSEC_SOCKET_ALL
51403 + bool "Deny any sockets to group"
51404 + depends on GRKERNSEC_SOCKET
51405 + help
51406 + If you say Y here, you will be able to choose a GID of whose users will
51407 + be unable to connect to other hosts from your machine or run server
51408 + applications from your machine. If the sysctl option is enabled, a
51409 + sysctl option with name "socket_all" is created.
51410 +
51411 +config GRKERNSEC_SOCKET_ALL_GID
51412 + int "GID to deny all sockets for"
51413 + depends on GRKERNSEC_SOCKET_ALL
51414 + default 1004
51415 + help
51416 + Here you can choose the GID to disable socket access for. Remember to
51417 + add the users you want socket access disabled for to the GID
51418 + specified here. If the sysctl option is enabled, a sysctl option
51419 + with name "socket_all_gid" is created.
51420 +
51421 +config GRKERNSEC_SOCKET_CLIENT
51422 + bool "Deny client sockets to group"
51423 + depends on GRKERNSEC_SOCKET
51424 + help
51425 + If you say Y here, you will be able to choose a GID of whose users will
51426 + be unable to connect to other hosts from your machine, but will be
51427 + able to run servers. If this option is enabled, all users in the group
51428 + you specify will have to use passive mode when initiating ftp transfers
51429 + from the shell on your machine. If the sysctl option is enabled, a
51430 + sysctl option with name "socket_client" is created.
51431 +
51432 +config GRKERNSEC_SOCKET_CLIENT_GID
51433 + int "GID to deny client sockets for"
51434 + depends on GRKERNSEC_SOCKET_CLIENT
51435 + default 1003
51436 + help
51437 + Here you can choose the GID to disable client socket access for.
51438 + Remember to add the users you want client socket access disabled for to
51439 + the GID specified here. If the sysctl option is enabled, a sysctl
51440 + option with name "socket_client_gid" is created.
51441 +
51442 +config GRKERNSEC_SOCKET_SERVER
51443 + bool "Deny server sockets to group"
51444 + depends on GRKERNSEC_SOCKET
51445 + help
51446 + If you say Y here, you will be able to choose a GID of whose users will
51447 + be unable to run server applications from your machine. If the sysctl
51448 + option is enabled, a sysctl option with name "socket_server" is created.
51449 +
51450 +config GRKERNSEC_SOCKET_SERVER_GID
51451 + int "GID to deny server sockets for"
51452 + depends on GRKERNSEC_SOCKET_SERVER
51453 + default 1002
51454 + help
51455 + Here you can choose the GID to disable server socket access for.
51456 + Remember to add the users you want server socket access disabled for to
51457 + the GID specified here. If the sysctl option is enabled, a sysctl
51458 + option with name "socket_server_gid" is created.
51459 +
51460 +endmenu
51461 +menu "Sysctl Support"
51462 +depends on GRKERNSEC && SYSCTL
51463 +
51464 +config GRKERNSEC_SYSCTL
51465 + bool "Sysctl support"
51466 + default y if GRKERNSEC_CONFIG_AUTO
51467 + help
51468 + If you say Y here, you will be able to change the options that
51469 + grsecurity runs with at bootup, without having to recompile your
51470 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51471 + to enable (1) or disable (0) various features. All the sysctl entries
51472 + are mutable until the "grsec_lock" entry is set to a non-zero value.
51473 + All features enabled in the kernel configuration are disabled at boot
51474 + if you do not say Y to the "Turn on features by default" option.
51475 + All options should be set at startup, and the grsec_lock entry should
51476 + be set to a non-zero value after all the options are set.
51477 + *THIS IS EXTREMELY IMPORTANT*
51478 +
51479 +config GRKERNSEC_SYSCTL_DISTRO
51480 + bool "Extra sysctl support for distro makers (READ HELP)"
51481 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51482 + help
51483 + If you say Y here, additional sysctl options will be created
51484 + for features that affect processes running as root. Therefore,
51485 + it is critical when using this option that the grsec_lock entry be
51486 + enabled after boot. Only distros with prebuilt kernel packages
51487 + with this option enabled that can ensure grsec_lock is enabled
51488 + after boot should use this option.
51489 + *Failure to set grsec_lock after boot makes all grsec features
51490 + this option covers useless*
51491 +
51492 + Currently this option creates the following sysctl entries:
51493 + "Disable Privileged I/O": "disable_priv_io"
51494 +
51495 +config GRKERNSEC_SYSCTL_ON
51496 + bool "Turn on features by default"
51497 + default y if GRKERNSEC_CONFIG_AUTO
51498 + depends on GRKERNSEC_SYSCTL
51499 + help
51500 + If you say Y here, instead of having all features enabled in the
51501 + kernel configuration disabled at boot time, the features will be
51502 + enabled at boot time. It is recommended you say Y here unless
51503 + there is some reason you would want all sysctl-tunable features to
51504 + be disabled by default. As mentioned elsewhere, it is important
51505 + to enable the grsec_lock entry once you have finished modifying
51506 + the sysctl entries.
51507 +
51508 +endmenu
51509 +menu "Logging Options"
51510 +depends on GRKERNSEC
51511 +
51512 +config GRKERNSEC_FLOODTIME
51513 + int "Seconds in between log messages (minimum)"
51514 + default 10
51515 + help
51516 + This option allows you to enforce the number of seconds between
51517 + grsecurity log messages. The default should be suitable for most
51518 + people, however, if you choose to change it, choose a value small enough
51519 + to allow informative logs to be produced, but large enough to
51520 + prevent flooding.
51521 +
51522 +config GRKERNSEC_FLOODBURST
51523 + int "Number of messages in a burst (maximum)"
51524 + default 6
51525 + help
51526 + This option allows you to choose the maximum number of messages allowed
51527 + within the flood time interval you chose in a separate option. The
51528 + default should be suitable for most people, however if you find that
51529 + many of your logs are being interpreted as flooding, you may want to
51530 + raise this value.
51531 +
51532 +endmenu
51533 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
51534 new file mode 100644
51535 index 0000000..1b9afa9
51536 --- /dev/null
51537 +++ b/grsecurity/Makefile
51538 @@ -0,0 +1,38 @@
51539 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51540 +# during 2001-2009 it has been completely redesigned by Brad Spengler
51541 +# into an RBAC system
51542 +#
51543 +# All code in this directory and various hooks inserted throughout the kernel
51544 +# are copyright Brad Spengler - Open Source Security, Inc., and released
51545 +# under the GPL v2 or higher
51546 +
51547 +KBUILD_CFLAGS += -Werror
51548 +
51549 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51550 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
51551 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51552 +
51553 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51554 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51555 + gracl_learn.o grsec_log.o
51556 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51557 +
51558 +ifdef CONFIG_NET
51559 +obj-y += grsec_sock.o
51560 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51561 +endif
51562 +
51563 +ifndef CONFIG_GRKERNSEC
51564 +obj-y += grsec_disabled.o
51565 +endif
51566 +
51567 +ifdef CONFIG_GRKERNSEC_HIDESYM
51568 +extra-y := grsec_hidesym.o
51569 +$(obj)/grsec_hidesym.o:
51570 + @-chmod -f 500 /boot
51571 + @-chmod -f 500 /lib/modules
51572 + @-chmod -f 500 /lib64/modules
51573 + @-chmod -f 500 /lib32/modules
51574 + @-chmod -f 700 .
51575 + @echo ' grsec: protected kernel image paths'
51576 +endif
51577 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
51578 new file mode 100644
51579 index 0000000..1561617
51580 --- /dev/null
51581 +++ b/grsecurity/gracl.c
51582 @@ -0,0 +1,4017 @@
51583 +#include <linux/kernel.h>
51584 +#include <linux/module.h>
51585 +#include <linux/sched.h>
51586 +#include <linux/mm.h>
51587 +#include <linux/file.h>
51588 +#include <linux/fs.h>
51589 +#include <linux/namei.h>
51590 +#include <linux/mount.h>
51591 +#include <linux/tty.h>
51592 +#include <linux/proc_fs.h>
51593 +#include <linux/lglock.h>
51594 +#include <linux/slab.h>
51595 +#include <linux/vmalloc.h>
51596 +#include <linux/types.h>
51597 +#include <linux/sysctl.h>
51598 +#include <linux/netdevice.h>
51599 +#include <linux/ptrace.h>
51600 +#include <linux/gracl.h>
51601 +#include <linux/gralloc.h>
51602 +#include <linux/security.h>
51603 +#include <linux/grinternal.h>
51604 +#include <linux/pid_namespace.h>
51605 +#include <linux/stop_machine.h>
51606 +#include <linux/fdtable.h>
51607 +#include <linux/percpu.h>
51608 +#include <linux/lglock.h>
51609 +#include "../fs/mount.h"
51610 +
51611 +#include <asm/uaccess.h>
51612 +#include <asm/errno.h>
51613 +#include <asm/mman.h>
51614 +
51615 +extern struct lglock vfsmount_lock;
51616 +
51617 +static struct acl_role_db acl_role_set;
51618 +static struct name_db name_set;
51619 +static struct inodev_db inodev_set;
51620 +
51621 +/* for keeping track of userspace pointers used for subjects, so we
51622 + can share references in the kernel as well
51623 +*/
51624 +
51625 +static struct path real_root;
51626 +
51627 +static struct acl_subj_map_db subj_map_set;
51628 +
51629 +static struct acl_role_label *default_role;
51630 +
51631 +static struct acl_role_label *role_list;
51632 +
51633 +static u16 acl_sp_role_value;
51634 +
51635 +extern char *gr_shared_page[4];
51636 +static DEFINE_MUTEX(gr_dev_mutex);
51637 +DEFINE_RWLOCK(gr_inode_lock);
51638 +
51639 +struct gr_arg *gr_usermode;
51640 +
51641 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
51642 +
51643 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
51644 +extern void gr_clear_learn_entries(void);
51645 +
51646 +#ifdef CONFIG_GRKERNSEC_RESLOG
51647 +extern void gr_log_resource(const struct task_struct *task,
51648 + const int res, const unsigned long wanted, const int gt);
51649 +#endif
51650 +
51651 +unsigned char *gr_system_salt;
51652 +unsigned char *gr_system_sum;
51653 +
51654 +static struct sprole_pw **acl_special_roles = NULL;
51655 +static __u16 num_sprole_pws = 0;
51656 +
51657 +static struct acl_role_label *kernel_role = NULL;
51658 +
51659 +static unsigned int gr_auth_attempts = 0;
51660 +static unsigned long gr_auth_expires = 0UL;
51661 +
51662 +#ifdef CONFIG_NET
51663 +extern struct vfsmount *sock_mnt;
51664 +#endif
51665 +
51666 +extern struct vfsmount *pipe_mnt;
51667 +extern struct vfsmount *shm_mnt;
51668 +#ifdef CONFIG_HUGETLBFS
51669 +extern struct vfsmount *hugetlbfs_vfsmount;
51670 +#endif
51671 +
51672 +static struct acl_object_label *fakefs_obj_rw;
51673 +static struct acl_object_label *fakefs_obj_rwx;
51674 +
51675 +extern int gr_init_uidset(void);
51676 +extern void gr_free_uidset(void);
51677 +extern void gr_remove_uid(uid_t uid);
51678 +extern int gr_find_uid(uid_t uid);
51679 +
51680 +__inline__ int
51681 +gr_acl_is_enabled(void)
51682 +{
51683 + return (gr_status & GR_READY);
51684 +}
51685 +
51686 +#ifdef CONFIG_BTRFS_FS
51687 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
51688 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
51689 +#endif
51690 +
51691 +static inline dev_t __get_dev(const struct dentry *dentry)
51692 +{
51693 +#ifdef CONFIG_BTRFS_FS
51694 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51695 + return get_btrfs_dev_from_inode(dentry->d_inode);
51696 + else
51697 +#endif
51698 + return dentry->d_inode->i_sb->s_dev;
51699 +}
51700 +
51701 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51702 +{
51703 + return __get_dev(dentry);
51704 +}
51705 +
51706 +static char gr_task_roletype_to_char(struct task_struct *task)
51707 +{
51708 + switch (task->role->roletype &
51709 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
51710 + GR_ROLE_SPECIAL)) {
51711 + case GR_ROLE_DEFAULT:
51712 + return 'D';
51713 + case GR_ROLE_USER:
51714 + return 'U';
51715 + case GR_ROLE_GROUP:
51716 + return 'G';
51717 + case GR_ROLE_SPECIAL:
51718 + return 'S';
51719 + }
51720 +
51721 + return 'X';
51722 +}
51723 +
51724 +char gr_roletype_to_char(void)
51725 +{
51726 + return gr_task_roletype_to_char(current);
51727 +}
51728 +
51729 +__inline__ int
51730 +gr_acl_tpe_check(void)
51731 +{
51732 + if (unlikely(!(gr_status & GR_READY)))
51733 + return 0;
51734 + if (current->role->roletype & GR_ROLE_TPE)
51735 + return 1;
51736 + else
51737 + return 0;
51738 +}
51739 +
51740 +int
51741 +gr_handle_rawio(const struct inode *inode)
51742 +{
51743 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51744 + if (inode && S_ISBLK(inode->i_mode) &&
51745 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51746 + !capable(CAP_SYS_RAWIO))
51747 + return 1;
51748 +#endif
51749 + return 0;
51750 +}
51751 +
51752 +static int
51753 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
51754 +{
51755 + if (likely(lena != lenb))
51756 + return 0;
51757 +
51758 + return !memcmp(a, b, lena);
51759 +}
51760 +
51761 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
51762 +{
51763 + *buflen -= namelen;
51764 + if (*buflen < 0)
51765 + return -ENAMETOOLONG;
51766 + *buffer -= namelen;
51767 + memcpy(*buffer, str, namelen);
51768 + return 0;
51769 +}
51770 +
51771 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
51772 +{
51773 + return prepend(buffer, buflen, name->name, name->len);
51774 +}
51775 +
51776 +static int prepend_path(const struct path *path, struct path *root,
51777 + char **buffer, int *buflen)
51778 +{
51779 + struct dentry *dentry = path->dentry;
51780 + struct vfsmount *vfsmnt = path->mnt;
51781 + struct mount *mnt = real_mount(vfsmnt);
51782 + bool slash = false;
51783 + int error = 0;
51784 +
51785 + while (dentry != root->dentry || vfsmnt != root->mnt) {
51786 + struct dentry * parent;
51787 +
51788 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
51789 + /* Global root? */
51790 + if (!mnt_has_parent(mnt)) {
51791 + goto out;
51792 + }
51793 + dentry = mnt->mnt_mountpoint;
51794 + mnt = mnt->mnt_parent;
51795 + vfsmnt = &mnt->mnt;
51796 + continue;
51797 + }
51798 + parent = dentry->d_parent;
51799 + prefetch(parent);
51800 + spin_lock(&dentry->d_lock);
51801 + error = prepend_name(buffer, buflen, &dentry->d_name);
51802 + spin_unlock(&dentry->d_lock);
51803 + if (!error)
51804 + error = prepend(buffer, buflen, "/", 1);
51805 + if (error)
51806 + break;
51807 +
51808 + slash = true;
51809 + dentry = parent;
51810 + }
51811 +
51812 +out:
51813 + if (!error && !slash)
51814 + error = prepend(buffer, buflen, "/", 1);
51815 +
51816 + return error;
51817 +}
51818 +
51819 +/* this must be called with vfsmount_lock and rename_lock held */
51820 +
51821 +static char *__our_d_path(const struct path *path, struct path *root,
51822 + char *buf, int buflen)
51823 +{
51824 + char *res = buf + buflen;
51825 + int error;
51826 +
51827 + prepend(&res, &buflen, "\0", 1);
51828 + error = prepend_path(path, root, &res, &buflen);
51829 + if (error)
51830 + return ERR_PTR(error);
51831 +
51832 + return res;
51833 +}
51834 +
51835 +static char *
51836 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
51837 +{
51838 + char *retval;
51839 +
51840 + retval = __our_d_path(path, root, buf, buflen);
51841 + if (unlikely(IS_ERR(retval)))
51842 + retval = strcpy(buf, "<path too long>");
51843 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
51844 + retval[1] = '\0';
51845 +
51846 + return retval;
51847 +}
51848 +
51849 +static char *
51850 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51851 + char *buf, int buflen)
51852 +{
51853 + struct path path;
51854 + char *res;
51855 +
51856 + path.dentry = (struct dentry *)dentry;
51857 + path.mnt = (struct vfsmount *)vfsmnt;
51858 +
51859 + /* we can use real_root.dentry, real_root.mnt, because this is only called
51860 + by the RBAC system */
51861 + res = gen_full_path(&path, &real_root, buf, buflen);
51862 +
51863 + return res;
51864 +}
51865 +
51866 +static char *
51867 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51868 + char *buf, int buflen)
51869 +{
51870 + char *res;
51871 + struct path path;
51872 + struct path root;
51873 + struct task_struct *reaper = init_pid_ns.child_reaper;
51874 +
51875 + path.dentry = (struct dentry *)dentry;
51876 + path.mnt = (struct vfsmount *)vfsmnt;
51877 +
51878 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51879 + get_fs_root(reaper->fs, &root);
51880 +
51881 + write_seqlock(&rename_lock);
51882 + br_read_lock(&vfsmount_lock);
51883 + res = gen_full_path(&path, &root, buf, buflen);
51884 + br_read_unlock(&vfsmount_lock);
51885 + write_sequnlock(&rename_lock);
51886 +
51887 + path_put(&root);
51888 + return res;
51889 +}
51890 +
51891 +static char *
51892 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51893 +{
51894 + char *ret;
51895 + write_seqlock(&rename_lock);
51896 + br_read_lock(&vfsmount_lock);
51897 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51898 + PAGE_SIZE);
51899 + br_read_unlock(&vfsmount_lock);
51900 + write_sequnlock(&rename_lock);
51901 + return ret;
51902 +}
51903 +
51904 +static char *
51905 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51906 +{
51907 + char *ret;
51908 + char *buf;
51909 + int buflen;
51910 +
51911 + write_seqlock(&rename_lock);
51912 + br_read_lock(&vfsmount_lock);
51913 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51914 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51915 + buflen = (int)(ret - buf);
51916 + if (buflen >= 5)
51917 + prepend(&ret, &buflen, "/proc", 5);
51918 + else
51919 + ret = strcpy(buf, "<path too long>");
51920 + br_read_unlock(&vfsmount_lock);
51921 + write_sequnlock(&rename_lock);
51922 + return ret;
51923 +}
51924 +
51925 +char *
51926 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51927 +{
51928 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51929 + PAGE_SIZE);
51930 +}
51931 +
51932 +char *
51933 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51934 +{
51935 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51936 + PAGE_SIZE);
51937 +}
51938 +
51939 +char *
51940 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51941 +{
51942 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51943 + PAGE_SIZE);
51944 +}
51945 +
51946 +char *
51947 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51948 +{
51949 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51950 + PAGE_SIZE);
51951 +}
51952 +
51953 +char *
51954 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51955 +{
51956 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51957 + PAGE_SIZE);
51958 +}
51959 +
51960 +__inline__ __u32
51961 +to_gr_audit(const __u32 reqmode)
51962 +{
51963 + /* masks off auditable permission flags, then shifts them to create
51964 + auditing flags, and adds the special case of append auditing if
51965 + we're requesting write */
51966 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51967 +}
51968 +
51969 +struct acl_subject_label *
51970 +lookup_subject_map(const struct acl_subject_label *userp)
51971 +{
51972 + unsigned int index = shash(userp, subj_map_set.s_size);
51973 + struct subject_map *match;
51974 +
51975 + match = subj_map_set.s_hash[index];
51976 +
51977 + while (match && match->user != userp)
51978 + match = match->next;
51979 +
51980 + if (match != NULL)
51981 + return match->kernel;
51982 + else
51983 + return NULL;
51984 +}
51985 +
51986 +static void
51987 +insert_subj_map_entry(struct subject_map *subjmap)
51988 +{
51989 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51990 + struct subject_map **curr;
51991 +
51992 + subjmap->prev = NULL;
51993 +
51994 + curr = &subj_map_set.s_hash[index];
51995 + if (*curr != NULL)
51996 + (*curr)->prev = subjmap;
51997 +
51998 + subjmap->next = *curr;
51999 + *curr = subjmap;
52000 +
52001 + return;
52002 +}
52003 +
52004 +static struct acl_role_label *
52005 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
52006 + const gid_t gid)
52007 +{
52008 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
52009 + struct acl_role_label *match;
52010 + struct role_allowed_ip *ipp;
52011 + unsigned int x;
52012 + u32 curr_ip = task->signal->curr_ip;
52013 +
52014 + task->signal->saved_ip = curr_ip;
52015 +
52016 + match = acl_role_set.r_hash[index];
52017 +
52018 + while (match) {
52019 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
52020 + for (x = 0; x < match->domain_child_num; x++) {
52021 + if (match->domain_children[x] == uid)
52022 + goto found;
52023 + }
52024 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
52025 + break;
52026 + match = match->next;
52027 + }
52028 +found:
52029 + if (match == NULL) {
52030 + try_group:
52031 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
52032 + match = acl_role_set.r_hash[index];
52033 +
52034 + while (match) {
52035 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
52036 + for (x = 0; x < match->domain_child_num; x++) {
52037 + if (match->domain_children[x] == gid)
52038 + goto found2;
52039 + }
52040 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
52041 + break;
52042 + match = match->next;
52043 + }
52044 +found2:
52045 + if (match == NULL)
52046 + match = default_role;
52047 + if (match->allowed_ips == NULL)
52048 + return match;
52049 + else {
52050 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
52051 + if (likely
52052 + ((ntohl(curr_ip) & ipp->netmask) ==
52053 + (ntohl(ipp->addr) & ipp->netmask)))
52054 + return match;
52055 + }
52056 + match = default_role;
52057 + }
52058 + } else if (match->allowed_ips == NULL) {
52059 + return match;
52060 + } else {
52061 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
52062 + if (likely
52063 + ((ntohl(curr_ip) & ipp->netmask) ==
52064 + (ntohl(ipp->addr) & ipp->netmask)))
52065 + return match;
52066 + }
52067 + goto try_group;
52068 + }
52069 +
52070 + return match;
52071 +}
52072 +
52073 +struct acl_subject_label *
52074 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
52075 + const struct acl_role_label *role)
52076 +{
52077 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
52078 + struct acl_subject_label *match;
52079 +
52080 + match = role->subj_hash[index];
52081 +
52082 + while (match && (match->inode != ino || match->device != dev ||
52083 + (match->mode & GR_DELETED))) {
52084 + match = match->next;
52085 + }
52086 +
52087 + if (match && !(match->mode & GR_DELETED))
52088 + return match;
52089 + else
52090 + return NULL;
52091 +}
52092 +
52093 +struct acl_subject_label *
52094 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
52095 + const struct acl_role_label *role)
52096 +{
52097 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
52098 + struct acl_subject_label *match;
52099 +
52100 + match = role->subj_hash[index];
52101 +
52102 + while (match && (match->inode != ino || match->device != dev ||
52103 + !(match->mode & GR_DELETED))) {
52104 + match = match->next;
52105 + }
52106 +
52107 + if (match && (match->mode & GR_DELETED))
52108 + return match;
52109 + else
52110 + return NULL;
52111 +}
52112 +
52113 +static struct acl_object_label *
52114 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
52115 + const struct acl_subject_label *subj)
52116 +{
52117 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
52118 + struct acl_object_label *match;
52119 +
52120 + match = subj->obj_hash[index];
52121 +
52122 + while (match && (match->inode != ino || match->device != dev ||
52123 + (match->mode & GR_DELETED))) {
52124 + match = match->next;
52125 + }
52126 +
52127 + if (match && !(match->mode & GR_DELETED))
52128 + return match;
52129 + else
52130 + return NULL;
52131 +}
52132 +
52133 +static struct acl_object_label *
52134 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
52135 + const struct acl_subject_label *subj)
52136 +{
52137 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
52138 + struct acl_object_label *match;
52139 +
52140 + match = subj->obj_hash[index];
52141 +
52142 + while (match && (match->inode != ino || match->device != dev ||
52143 + !(match->mode & GR_DELETED))) {
52144 + match = match->next;
52145 + }
52146 +
52147 + if (match && (match->mode & GR_DELETED))
52148 + return match;
52149 +
52150 + match = subj->obj_hash[index];
52151 +
52152 + while (match && (match->inode != ino || match->device != dev ||
52153 + (match->mode & GR_DELETED))) {
52154 + match = match->next;
52155 + }
52156 +
52157 + if (match && !(match->mode & GR_DELETED))
52158 + return match;
52159 + else
52160 + return NULL;
52161 +}
52162 +
52163 +static struct name_entry *
52164 +lookup_name_entry(const char *name)
52165 +{
52166 + unsigned int len = strlen(name);
52167 + unsigned int key = full_name_hash(name, len);
52168 + unsigned int index = key % name_set.n_size;
52169 + struct name_entry *match;
52170 +
52171 + match = name_set.n_hash[index];
52172 +
52173 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
52174 + match = match->next;
52175 +
52176 + return match;
52177 +}
52178 +
52179 +static struct name_entry *
52180 +lookup_name_entry_create(const char *name)
52181 +{
52182 + unsigned int len = strlen(name);
52183 + unsigned int key = full_name_hash(name, len);
52184 + unsigned int index = key % name_set.n_size;
52185 + struct name_entry *match;
52186 +
52187 + match = name_set.n_hash[index];
52188 +
52189 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52190 + !match->deleted))
52191 + match = match->next;
52192 +
52193 + if (match && match->deleted)
52194 + return match;
52195 +
52196 + match = name_set.n_hash[index];
52197 +
52198 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52199 + match->deleted))
52200 + match = match->next;
52201 +
52202 + if (match && !match->deleted)
52203 + return match;
52204 + else
52205 + return NULL;
52206 +}
52207 +
52208 +static struct inodev_entry *
52209 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
52210 +{
52211 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
52212 + struct inodev_entry *match;
52213 +
52214 + match = inodev_set.i_hash[index];
52215 +
52216 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
52217 + match = match->next;
52218 +
52219 + return match;
52220 +}
52221 +
52222 +static void
52223 +insert_inodev_entry(struct inodev_entry *entry)
52224 +{
52225 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
52226 + inodev_set.i_size);
52227 + struct inodev_entry **curr;
52228 +
52229 + entry->prev = NULL;
52230 +
52231 + curr = &inodev_set.i_hash[index];
52232 + if (*curr != NULL)
52233 + (*curr)->prev = entry;
52234 +
52235 + entry->next = *curr;
52236 + *curr = entry;
52237 +
52238 + return;
52239 +}
52240 +
52241 +static void
52242 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
52243 +{
52244 + unsigned int index =
52245 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
52246 + struct acl_role_label **curr;
52247 + struct acl_role_label *tmp, *tmp2;
52248 +
52249 + curr = &acl_role_set.r_hash[index];
52250 +
52251 + /* simple case, slot is empty, just set it to our role */
52252 + if (*curr == NULL) {
52253 + *curr = role;
52254 + } else {
52255 + /* example:
52256 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
52257 + 2 -> 3
52258 + */
52259 + /* first check to see if we can already be reached via this slot */
52260 + tmp = *curr;
52261 + while (tmp && tmp != role)
52262 + tmp = tmp->next;
52263 + if (tmp == role) {
52264 + /* we don't need to add ourselves to this slot's chain */
52265 + return;
52266 + }
52267 + /* we need to add ourselves to this chain, two cases */
52268 + if (role->next == NULL) {
52269 + /* simple case, append the current chain to our role */
52270 + role->next = *curr;
52271 + *curr = role;
52272 + } else {
52273 + /* 1 -> 2 -> 3 -> 4
52274 + 2 -> 3 -> 4
52275 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
52276 + */
52277 + /* trickier case: walk our role's chain until we find
52278 + the role for the start of the current slot's chain */
52279 + tmp = role;
52280 + tmp2 = *curr;
52281 + while (tmp->next && tmp->next != tmp2)
52282 + tmp = tmp->next;
52283 + if (tmp->next == tmp2) {
52284 + /* from example above, we found 3, so just
52285 + replace this slot's chain with ours */
52286 + *curr = role;
52287 + } else {
52288 + /* we didn't find a subset of our role's chain
52289 + in the current slot's chain, so append their
52290 + chain to ours, and set us as the first role in
52291 + the slot's chain
52292 +
52293 + we could fold this case with the case above,
52294 + but making it explicit for clarity
52295 + */
52296 + tmp->next = tmp2;
52297 + *curr = role;
52298 + }
52299 + }
52300 + }
52301 +
52302 + return;
52303 +}
52304 +
52305 +static void
52306 +insert_acl_role_label(struct acl_role_label *role)
52307 +{
52308 + int i;
52309 +
52310 + if (role_list == NULL) {
52311 + role_list = role;
52312 + role->prev = NULL;
52313 + } else {
52314 + role->prev = role_list;
52315 + role_list = role;
52316 + }
52317 +
52318 + /* used for hash chains */
52319 + role->next = NULL;
52320 +
52321 + if (role->roletype & GR_ROLE_DOMAIN) {
52322 + for (i = 0; i < role->domain_child_num; i++)
52323 + __insert_acl_role_label(role, role->domain_children[i]);
52324 + } else
52325 + __insert_acl_role_label(role, role->uidgid);
52326 +}
52327 +
52328 +static int
52329 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
52330 +{
52331 + struct name_entry **curr, *nentry;
52332 + struct inodev_entry *ientry;
52333 + unsigned int len = strlen(name);
52334 + unsigned int key = full_name_hash(name, len);
52335 + unsigned int index = key % name_set.n_size;
52336 +
52337 + curr = &name_set.n_hash[index];
52338 +
52339 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
52340 + curr = &((*curr)->next);
52341 +
52342 + if (*curr != NULL)
52343 + return 1;
52344 +
52345 + nentry = acl_alloc(sizeof (struct name_entry));
52346 + if (nentry == NULL)
52347 + return 0;
52348 + ientry = acl_alloc(sizeof (struct inodev_entry));
52349 + if (ientry == NULL)
52350 + return 0;
52351 + ientry->nentry = nentry;
52352 +
52353 + nentry->key = key;
52354 + nentry->name = name;
52355 + nentry->inode = inode;
52356 + nentry->device = device;
52357 + nentry->len = len;
52358 + nentry->deleted = deleted;
52359 +
52360 + nentry->prev = NULL;
52361 + curr = &name_set.n_hash[index];
52362 + if (*curr != NULL)
52363 + (*curr)->prev = nentry;
52364 + nentry->next = *curr;
52365 + *curr = nentry;
52366 +
52367 + /* insert us into the table searchable by inode/dev */
52368 + insert_inodev_entry(ientry);
52369 +
52370 + return 1;
52371 +}
52372 +
52373 +static void
52374 +insert_acl_obj_label(struct acl_object_label *obj,
52375 + struct acl_subject_label *subj)
52376 +{
52377 + unsigned int index =
52378 + fhash(obj->inode, obj->device, subj->obj_hash_size);
52379 + struct acl_object_label **curr;
52380 +
52381 +
52382 + obj->prev = NULL;
52383 +
52384 + curr = &subj->obj_hash[index];
52385 + if (*curr != NULL)
52386 + (*curr)->prev = obj;
52387 +
52388 + obj->next = *curr;
52389 + *curr = obj;
52390 +
52391 + return;
52392 +}
52393 +
52394 +static void
52395 +insert_acl_subj_label(struct acl_subject_label *obj,
52396 + struct acl_role_label *role)
52397 +{
52398 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
52399 + struct acl_subject_label **curr;
52400 +
52401 + obj->prev = NULL;
52402 +
52403 + curr = &role->subj_hash[index];
52404 + if (*curr != NULL)
52405 + (*curr)->prev = obj;
52406 +
52407 + obj->next = *curr;
52408 + *curr = obj;
52409 +
52410 + return;
52411 +}
52412 +
52413 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
52414 +
52415 +static void *
52416 +create_table(__u32 * len, int elementsize)
52417 +{
52418 + unsigned int table_sizes[] = {
52419 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
52420 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
52421 + 4194301, 8388593, 16777213, 33554393, 67108859
52422 + };
52423 + void *newtable = NULL;
52424 + unsigned int pwr = 0;
52425 +
52426 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
52427 + table_sizes[pwr] <= *len)
52428 + pwr++;
52429 +
52430 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
52431 + return newtable;
52432 +
52433 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
52434 + newtable =
52435 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
52436 + else
52437 + newtable = vmalloc(table_sizes[pwr] * elementsize);
52438 +
52439 + *len = table_sizes[pwr];
52440 +
52441 + return newtable;
52442 +}
52443 +
52444 +static int
52445 +init_variables(const struct gr_arg *arg)
52446 +{
52447 + struct task_struct *reaper = init_pid_ns.child_reaper;
52448 + unsigned int stacksize;
52449 +
52450 + subj_map_set.s_size = arg->role_db.num_subjects;
52451 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
52452 + name_set.n_size = arg->role_db.num_objects;
52453 + inodev_set.i_size = arg->role_db.num_objects;
52454 +
52455 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
52456 + !name_set.n_size || !inodev_set.i_size)
52457 + return 1;
52458 +
52459 + if (!gr_init_uidset())
52460 + return 1;
52461 +
52462 + /* set up the stack that holds allocation info */
52463 +
52464 + stacksize = arg->role_db.num_pointers + 5;
52465 +
52466 + if (!acl_alloc_stack_init(stacksize))
52467 + return 1;
52468 +
52469 + /* grab reference for the real root dentry and vfsmount */
52470 + get_fs_root(reaper->fs, &real_root);
52471 +
52472 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52473 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
52474 +#endif
52475 +
52476 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
52477 + if (fakefs_obj_rw == NULL)
52478 + return 1;
52479 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
52480 +
52481 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
52482 + if (fakefs_obj_rwx == NULL)
52483 + return 1;
52484 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
52485 +
52486 + subj_map_set.s_hash =
52487 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
52488 + acl_role_set.r_hash =
52489 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
52490 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
52491 + inodev_set.i_hash =
52492 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
52493 +
52494 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
52495 + !name_set.n_hash || !inodev_set.i_hash)
52496 + return 1;
52497 +
52498 + memset(subj_map_set.s_hash, 0,
52499 + sizeof(struct subject_map *) * subj_map_set.s_size);
52500 + memset(acl_role_set.r_hash, 0,
52501 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
52502 + memset(name_set.n_hash, 0,
52503 + sizeof (struct name_entry *) * name_set.n_size);
52504 + memset(inodev_set.i_hash, 0,
52505 + sizeof (struct inodev_entry *) * inodev_set.i_size);
52506 +
52507 + return 0;
52508 +}
52509 +
52510 +/* free information not needed after startup
52511 + currently contains user->kernel pointer mappings for subjects
52512 +*/
52513 +
52514 +static void
52515 +free_init_variables(void)
52516 +{
52517 + __u32 i;
52518 +
52519 + if (subj_map_set.s_hash) {
52520 + for (i = 0; i < subj_map_set.s_size; i++) {
52521 + if (subj_map_set.s_hash[i]) {
52522 + kfree(subj_map_set.s_hash[i]);
52523 + subj_map_set.s_hash[i] = NULL;
52524 + }
52525 + }
52526 +
52527 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
52528 + PAGE_SIZE)
52529 + kfree(subj_map_set.s_hash);
52530 + else
52531 + vfree(subj_map_set.s_hash);
52532 + }
52533 +
52534 + return;
52535 +}
52536 +
52537 +static void
52538 +free_variables(void)
52539 +{
52540 + struct acl_subject_label *s;
52541 + struct acl_role_label *r;
52542 + struct task_struct *task, *task2;
52543 + unsigned int x;
52544 +
52545 + gr_clear_learn_entries();
52546 +
52547 + read_lock(&tasklist_lock);
52548 + do_each_thread(task2, task) {
52549 + task->acl_sp_role = 0;
52550 + task->acl_role_id = 0;
52551 + task->acl = NULL;
52552 + task->role = NULL;
52553 + } while_each_thread(task2, task);
52554 + read_unlock(&tasklist_lock);
52555 +
52556 + /* release the reference to the real root dentry and vfsmount */
52557 + path_put(&real_root);
52558 + memset(&real_root, 0, sizeof(real_root));
52559 +
52560 + /* free all object hash tables */
52561 +
52562 + FOR_EACH_ROLE_START(r)
52563 + if (r->subj_hash == NULL)
52564 + goto next_role;
52565 + FOR_EACH_SUBJECT_START(r, s, x)
52566 + if (s->obj_hash == NULL)
52567 + break;
52568 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52569 + kfree(s->obj_hash);
52570 + else
52571 + vfree(s->obj_hash);
52572 + FOR_EACH_SUBJECT_END(s, x)
52573 + FOR_EACH_NESTED_SUBJECT_START(r, s)
52574 + if (s->obj_hash == NULL)
52575 + break;
52576 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52577 + kfree(s->obj_hash);
52578 + else
52579 + vfree(s->obj_hash);
52580 + FOR_EACH_NESTED_SUBJECT_END(s)
52581 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
52582 + kfree(r->subj_hash);
52583 + else
52584 + vfree(r->subj_hash);
52585 + r->subj_hash = NULL;
52586 +next_role:
52587 + FOR_EACH_ROLE_END(r)
52588 +
52589 + acl_free_all();
52590 +
52591 + if (acl_role_set.r_hash) {
52592 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
52593 + PAGE_SIZE)
52594 + kfree(acl_role_set.r_hash);
52595 + else
52596 + vfree(acl_role_set.r_hash);
52597 + }
52598 + if (name_set.n_hash) {
52599 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
52600 + PAGE_SIZE)
52601 + kfree(name_set.n_hash);
52602 + else
52603 + vfree(name_set.n_hash);
52604 + }
52605 +
52606 + if (inodev_set.i_hash) {
52607 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
52608 + PAGE_SIZE)
52609 + kfree(inodev_set.i_hash);
52610 + else
52611 + vfree(inodev_set.i_hash);
52612 + }
52613 +
52614 + gr_free_uidset();
52615 +
52616 + memset(&name_set, 0, sizeof (struct name_db));
52617 + memset(&inodev_set, 0, sizeof (struct inodev_db));
52618 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
52619 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
52620 +
52621 + default_role = NULL;
52622 + kernel_role = NULL;
52623 + role_list = NULL;
52624 +
52625 + return;
52626 +}
52627 +
52628 +static __u32
52629 +count_user_objs(struct acl_object_label *userp)
52630 +{
52631 + struct acl_object_label o_tmp;
52632 + __u32 num = 0;
52633 +
52634 + while (userp) {
52635 + if (copy_from_user(&o_tmp, userp,
52636 + sizeof (struct acl_object_label)))
52637 + break;
52638 +
52639 + userp = o_tmp.prev;
52640 + num++;
52641 + }
52642 +
52643 + return num;
52644 +}
52645 +
52646 +static struct acl_subject_label *
52647 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
52648 +
52649 +static int
52650 +copy_user_glob(struct acl_object_label *obj)
52651 +{
52652 + struct acl_object_label *g_tmp, **guser;
52653 + unsigned int len;
52654 + char *tmp;
52655 +
52656 + if (obj->globbed == NULL)
52657 + return 0;
52658 +
52659 + guser = &obj->globbed;
52660 + while (*guser) {
52661 + g_tmp = (struct acl_object_label *)
52662 + acl_alloc(sizeof (struct acl_object_label));
52663 + if (g_tmp == NULL)
52664 + return -ENOMEM;
52665 +
52666 + if (copy_from_user(g_tmp, *guser,
52667 + sizeof (struct acl_object_label)))
52668 + return -EFAULT;
52669 +
52670 + len = strnlen_user(g_tmp->filename, PATH_MAX);
52671 +
52672 + if (!len || len >= PATH_MAX)
52673 + return -EINVAL;
52674 +
52675 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52676 + return -ENOMEM;
52677 +
52678 + if (copy_from_user(tmp, g_tmp->filename, len))
52679 + return -EFAULT;
52680 + tmp[len-1] = '\0';
52681 + g_tmp->filename = tmp;
52682 +
52683 + *guser = g_tmp;
52684 + guser = &(g_tmp->next);
52685 + }
52686 +
52687 + return 0;
52688 +}
52689 +
52690 +static int
52691 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
52692 + struct acl_role_label *role)
52693 +{
52694 + struct acl_object_label *o_tmp;
52695 + unsigned int len;
52696 + int ret;
52697 + char *tmp;
52698 +
52699 + while (userp) {
52700 + if ((o_tmp = (struct acl_object_label *)
52701 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
52702 + return -ENOMEM;
52703 +
52704 + if (copy_from_user(o_tmp, userp,
52705 + sizeof (struct acl_object_label)))
52706 + return -EFAULT;
52707 +
52708 + userp = o_tmp->prev;
52709 +
52710 + len = strnlen_user(o_tmp->filename, PATH_MAX);
52711 +
52712 + if (!len || len >= PATH_MAX)
52713 + return -EINVAL;
52714 +
52715 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52716 + return -ENOMEM;
52717 +
52718 + if (copy_from_user(tmp, o_tmp->filename, len))
52719 + return -EFAULT;
52720 + tmp[len-1] = '\0';
52721 + o_tmp->filename = tmp;
52722 +
52723 + insert_acl_obj_label(o_tmp, subj);
52724 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
52725 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
52726 + return -ENOMEM;
52727 +
52728 + ret = copy_user_glob(o_tmp);
52729 + if (ret)
52730 + return ret;
52731 +
52732 + if (o_tmp->nested) {
52733 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
52734 + if (IS_ERR(o_tmp->nested))
52735 + return PTR_ERR(o_tmp->nested);
52736 +
52737 + /* insert into nested subject list */
52738 + o_tmp->nested->next = role->hash->first;
52739 + role->hash->first = o_tmp->nested;
52740 + }
52741 + }
52742 +
52743 + return 0;
52744 +}
52745 +
52746 +static __u32
52747 +count_user_subjs(struct acl_subject_label *userp)
52748 +{
52749 + struct acl_subject_label s_tmp;
52750 + __u32 num = 0;
52751 +
52752 + while (userp) {
52753 + if (copy_from_user(&s_tmp, userp,
52754 + sizeof (struct acl_subject_label)))
52755 + break;
52756 +
52757 + userp = s_tmp.prev;
52758 + /* do not count nested subjects against this count, since
52759 + they are not included in the hash table, but are
52760 + attached to objects. We have already counted
52761 + the subjects in userspace for the allocation
52762 + stack
52763 + */
52764 + if (!(s_tmp.mode & GR_NESTED))
52765 + num++;
52766 + }
52767 +
52768 + return num;
52769 +}
52770 +
52771 +static int
52772 +copy_user_allowedips(struct acl_role_label *rolep)
52773 +{
52774 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
52775 +
52776 + ruserip = rolep->allowed_ips;
52777 +
52778 + while (ruserip) {
52779 + rlast = rtmp;
52780 +
52781 + if ((rtmp = (struct role_allowed_ip *)
52782 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
52783 + return -ENOMEM;
52784 +
52785 + if (copy_from_user(rtmp, ruserip,
52786 + sizeof (struct role_allowed_ip)))
52787 + return -EFAULT;
52788 +
52789 + ruserip = rtmp->prev;
52790 +
52791 + if (!rlast) {
52792 + rtmp->prev = NULL;
52793 + rolep->allowed_ips = rtmp;
52794 + } else {
52795 + rlast->next = rtmp;
52796 + rtmp->prev = rlast;
52797 + }
52798 +
52799 + if (!ruserip)
52800 + rtmp->next = NULL;
52801 + }
52802 +
52803 + return 0;
52804 +}
52805 +
52806 +static int
52807 +copy_user_transitions(struct acl_role_label *rolep)
52808 +{
52809 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
52810 +
52811 + unsigned int len;
52812 + char *tmp;
52813 +
52814 + rusertp = rolep->transitions;
52815 +
52816 + while (rusertp) {
52817 + rlast = rtmp;
52818 +
52819 + if ((rtmp = (struct role_transition *)
52820 + acl_alloc(sizeof (struct role_transition))) == NULL)
52821 + return -ENOMEM;
52822 +
52823 + if (copy_from_user(rtmp, rusertp,
52824 + sizeof (struct role_transition)))
52825 + return -EFAULT;
52826 +
52827 + rusertp = rtmp->prev;
52828 +
52829 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
52830 +
52831 + if (!len || len >= GR_SPROLE_LEN)
52832 + return -EINVAL;
52833 +
52834 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52835 + return -ENOMEM;
52836 +
52837 + if (copy_from_user(tmp, rtmp->rolename, len))
52838 + return -EFAULT;
52839 + tmp[len-1] = '\0';
52840 + rtmp->rolename = tmp;
52841 +
52842 + if (!rlast) {
52843 + rtmp->prev = NULL;
52844 + rolep->transitions = rtmp;
52845 + } else {
52846 + rlast->next = rtmp;
52847 + rtmp->prev = rlast;
52848 + }
52849 +
52850 + if (!rusertp)
52851 + rtmp->next = NULL;
52852 + }
52853 +
52854 + return 0;
52855 +}
52856 +
52857 +static struct acl_subject_label *
52858 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
52859 +{
52860 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
52861 + unsigned int len;
52862 + char *tmp;
52863 + __u32 num_objs;
52864 + struct acl_ip_label **i_tmp, *i_utmp2;
52865 + struct gr_hash_struct ghash;
52866 + struct subject_map *subjmap;
52867 + unsigned int i_num;
52868 + int err;
52869 +
52870 + s_tmp = lookup_subject_map(userp);
52871 +
52872 + /* we've already copied this subject into the kernel, just return
52873 + the reference to it, and don't copy it over again
52874 + */
52875 + if (s_tmp)
52876 + return(s_tmp);
52877 +
52878 + if ((s_tmp = (struct acl_subject_label *)
52879 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
52880 + return ERR_PTR(-ENOMEM);
52881 +
52882 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
52883 + if (subjmap == NULL)
52884 + return ERR_PTR(-ENOMEM);
52885 +
52886 + subjmap->user = userp;
52887 + subjmap->kernel = s_tmp;
52888 + insert_subj_map_entry(subjmap);
52889 +
52890 + if (copy_from_user(s_tmp, userp,
52891 + sizeof (struct acl_subject_label)))
52892 + return ERR_PTR(-EFAULT);
52893 +
52894 + len = strnlen_user(s_tmp->filename, PATH_MAX);
52895 +
52896 + if (!len || len >= PATH_MAX)
52897 + return ERR_PTR(-EINVAL);
52898 +
52899 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52900 + return ERR_PTR(-ENOMEM);
52901 +
52902 + if (copy_from_user(tmp, s_tmp->filename, len))
52903 + return ERR_PTR(-EFAULT);
52904 + tmp[len-1] = '\0';
52905 + s_tmp->filename = tmp;
52906 +
52907 + if (!strcmp(s_tmp->filename, "/"))
52908 + role->root_label = s_tmp;
52909 +
52910 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
52911 + return ERR_PTR(-EFAULT);
52912 +
52913 + /* copy user and group transition tables */
52914 +
52915 + if (s_tmp->user_trans_num) {
52916 + uid_t *uidlist;
52917 +
52918 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
52919 + if (uidlist == NULL)
52920 + return ERR_PTR(-ENOMEM);
52921 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
52922 + return ERR_PTR(-EFAULT);
52923 +
52924 + s_tmp->user_transitions = uidlist;
52925 + }
52926 +
52927 + if (s_tmp->group_trans_num) {
52928 + gid_t *gidlist;
52929 +
52930 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52931 + if (gidlist == NULL)
52932 + return ERR_PTR(-ENOMEM);
52933 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52934 + return ERR_PTR(-EFAULT);
52935 +
52936 + s_tmp->group_transitions = gidlist;
52937 + }
52938 +
52939 + /* set up object hash table */
52940 + num_objs = count_user_objs(ghash.first);
52941 +
52942 + s_tmp->obj_hash_size = num_objs;
52943 + s_tmp->obj_hash =
52944 + (struct acl_object_label **)
52945 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52946 +
52947 + if (!s_tmp->obj_hash)
52948 + return ERR_PTR(-ENOMEM);
52949 +
52950 + memset(s_tmp->obj_hash, 0,
52951 + s_tmp->obj_hash_size *
52952 + sizeof (struct acl_object_label *));
52953 +
52954 + /* add in objects */
52955 + err = copy_user_objs(ghash.first, s_tmp, role);
52956 +
52957 + if (err)
52958 + return ERR_PTR(err);
52959 +
52960 + /* set pointer for parent subject */
52961 + if (s_tmp->parent_subject) {
52962 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52963 +
52964 + if (IS_ERR(s_tmp2))
52965 + return s_tmp2;
52966 +
52967 + s_tmp->parent_subject = s_tmp2;
52968 + }
52969 +
52970 + /* add in ip acls */
52971 +
52972 + if (!s_tmp->ip_num) {
52973 + s_tmp->ips = NULL;
52974 + goto insert;
52975 + }
52976 +
52977 + i_tmp =
52978 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52979 + sizeof (struct acl_ip_label *));
52980 +
52981 + if (!i_tmp)
52982 + return ERR_PTR(-ENOMEM);
52983 +
52984 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52985 + *(i_tmp + i_num) =
52986 + (struct acl_ip_label *)
52987 + acl_alloc(sizeof (struct acl_ip_label));
52988 + if (!*(i_tmp + i_num))
52989 + return ERR_PTR(-ENOMEM);
52990 +
52991 + if (copy_from_user
52992 + (&i_utmp2, s_tmp->ips + i_num,
52993 + sizeof (struct acl_ip_label *)))
52994 + return ERR_PTR(-EFAULT);
52995 +
52996 + if (copy_from_user
52997 + (*(i_tmp + i_num), i_utmp2,
52998 + sizeof (struct acl_ip_label)))
52999 + return ERR_PTR(-EFAULT);
53000 +
53001 + if ((*(i_tmp + i_num))->iface == NULL)
53002 + continue;
53003 +
53004 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
53005 + if (!len || len >= IFNAMSIZ)
53006 + return ERR_PTR(-EINVAL);
53007 + tmp = acl_alloc(len);
53008 + if (tmp == NULL)
53009 + return ERR_PTR(-ENOMEM);
53010 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
53011 + return ERR_PTR(-EFAULT);
53012 + (*(i_tmp + i_num))->iface = tmp;
53013 + }
53014 +
53015 + s_tmp->ips = i_tmp;
53016 +
53017 +insert:
53018 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
53019 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
53020 + return ERR_PTR(-ENOMEM);
53021 +
53022 + return s_tmp;
53023 +}
53024 +
53025 +static int
53026 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
53027 +{
53028 + struct acl_subject_label s_pre;
53029 + struct acl_subject_label * ret;
53030 + int err;
53031 +
53032 + while (userp) {
53033 + if (copy_from_user(&s_pre, userp,
53034 + sizeof (struct acl_subject_label)))
53035 + return -EFAULT;
53036 +
53037 + /* do not add nested subjects here, add
53038 + while parsing objects
53039 + */
53040 +
53041 + if (s_pre.mode & GR_NESTED) {
53042 + userp = s_pre.prev;
53043 + continue;
53044 + }
53045 +
53046 + ret = do_copy_user_subj(userp, role);
53047 +
53048 + err = PTR_ERR(ret);
53049 + if (IS_ERR(ret))
53050 + return err;
53051 +
53052 + insert_acl_subj_label(ret, role);
53053 +
53054 + userp = s_pre.prev;
53055 + }
53056 +
53057 + return 0;
53058 +}
53059 +
53060 +static int
53061 +copy_user_acl(struct gr_arg *arg)
53062 +{
53063 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
53064 + struct sprole_pw *sptmp;
53065 + struct gr_hash_struct *ghash;
53066 + uid_t *domainlist;
53067 + unsigned int r_num;
53068 + unsigned int len;
53069 + char *tmp;
53070 + int err = 0;
53071 + __u16 i;
53072 + __u32 num_subjs;
53073 +
53074 + /* we need a default and kernel role */
53075 + if (arg->role_db.num_roles < 2)
53076 + return -EINVAL;
53077 +
53078 + /* copy special role authentication info from userspace */
53079 +
53080 + num_sprole_pws = arg->num_sprole_pws;
53081 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
53082 +
53083 + if (!acl_special_roles && num_sprole_pws)
53084 + return -ENOMEM;
53085 +
53086 + for (i = 0; i < num_sprole_pws; i++) {
53087 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
53088 + if (!sptmp)
53089 + return -ENOMEM;
53090 + if (copy_from_user(sptmp, arg->sprole_pws + i,
53091 + sizeof (struct sprole_pw)))
53092 + return -EFAULT;
53093 +
53094 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
53095 +
53096 + if (!len || len >= GR_SPROLE_LEN)
53097 + return -EINVAL;
53098 +
53099 + if ((tmp = (char *) acl_alloc(len)) == NULL)
53100 + return -ENOMEM;
53101 +
53102 + if (copy_from_user(tmp, sptmp->rolename, len))
53103 + return -EFAULT;
53104 +
53105 + tmp[len-1] = '\0';
53106 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53107 + printk(KERN_ALERT "Copying special role %s\n", tmp);
53108 +#endif
53109 + sptmp->rolename = tmp;
53110 + acl_special_roles[i] = sptmp;
53111 + }
53112 +
53113 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
53114 +
53115 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
53116 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
53117 +
53118 + if (!r_tmp)
53119 + return -ENOMEM;
53120 +
53121 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
53122 + sizeof (struct acl_role_label *)))
53123 + return -EFAULT;
53124 +
53125 + if (copy_from_user(r_tmp, r_utmp2,
53126 + sizeof (struct acl_role_label)))
53127 + return -EFAULT;
53128 +
53129 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
53130 +
53131 + if (!len || len >= PATH_MAX)
53132 + return -EINVAL;
53133 +
53134 + if ((tmp = (char *) acl_alloc(len)) == NULL)
53135 + return -ENOMEM;
53136 +
53137 + if (copy_from_user(tmp, r_tmp->rolename, len))
53138 + return -EFAULT;
53139 +
53140 + tmp[len-1] = '\0';
53141 + r_tmp->rolename = tmp;
53142 +
53143 + if (!strcmp(r_tmp->rolename, "default")
53144 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
53145 + default_role = r_tmp;
53146 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
53147 + kernel_role = r_tmp;
53148 + }
53149 +
53150 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
53151 + return -ENOMEM;
53152 +
53153 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
53154 + return -EFAULT;
53155 +
53156 + r_tmp->hash = ghash;
53157 +
53158 + num_subjs = count_user_subjs(r_tmp->hash->first);
53159 +
53160 + r_tmp->subj_hash_size = num_subjs;
53161 + r_tmp->subj_hash =
53162 + (struct acl_subject_label **)
53163 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
53164 +
53165 + if (!r_tmp->subj_hash)
53166 + return -ENOMEM;
53167 +
53168 + err = copy_user_allowedips(r_tmp);
53169 + if (err)
53170 + return err;
53171 +
53172 + /* copy domain info */
53173 + if (r_tmp->domain_children != NULL) {
53174 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
53175 + if (domainlist == NULL)
53176 + return -ENOMEM;
53177 +
53178 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
53179 + return -EFAULT;
53180 +
53181 + r_tmp->domain_children = domainlist;
53182 + }
53183 +
53184 + err = copy_user_transitions(r_tmp);
53185 + if (err)
53186 + return err;
53187 +
53188 + memset(r_tmp->subj_hash, 0,
53189 + r_tmp->subj_hash_size *
53190 + sizeof (struct acl_subject_label *));
53191 +
53192 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
53193 +
53194 + if (err)
53195 + return err;
53196 +
53197 + /* set nested subject list to null */
53198 + r_tmp->hash->first = NULL;
53199 +
53200 + insert_acl_role_label(r_tmp);
53201 + }
53202 +
53203 + if (default_role == NULL || kernel_role == NULL)
53204 + return -EINVAL;
53205 +
53206 + return err;
53207 +}
53208 +
53209 +static int
53210 +gracl_init(struct gr_arg *args)
53211 +{
53212 + int error = 0;
53213 +
53214 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
53215 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
53216 +
53217 + if (init_variables(args)) {
53218 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
53219 + error = -ENOMEM;
53220 + free_variables();
53221 + goto out;
53222 + }
53223 +
53224 + error = copy_user_acl(args);
53225 + free_init_variables();
53226 + if (error) {
53227 + free_variables();
53228 + goto out;
53229 + }
53230 +
53231 + if ((error = gr_set_acls(0))) {
53232 + free_variables();
53233 + goto out;
53234 + }
53235 +
53236 + pax_open_kernel();
53237 + gr_status |= GR_READY;
53238 + pax_close_kernel();
53239 +
53240 + out:
53241 + return error;
53242 +}
53243 +
53244 +/* derived from glibc fnmatch() 0: match, 1: no match*/
53245 +
53246 +static int
53247 +glob_match(const char *p, const char *n)
53248 +{
53249 + char c;
53250 +
53251 + while ((c = *p++) != '\0') {
53252 + switch (c) {
53253 + case '?':
53254 + if (*n == '\0')
53255 + return 1;
53256 + else if (*n == '/')
53257 + return 1;
53258 + break;
53259 + case '\\':
53260 + if (*n != c)
53261 + return 1;
53262 + break;
53263 + case '*':
53264 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
53265 + if (*n == '/')
53266 + return 1;
53267 + else if (c == '?') {
53268 + if (*n == '\0')
53269 + return 1;
53270 + else
53271 + ++n;
53272 + }
53273 + }
53274 + if (c == '\0') {
53275 + return 0;
53276 + } else {
53277 + const char *endp;
53278 +
53279 + if ((endp = strchr(n, '/')) == NULL)
53280 + endp = n + strlen(n);
53281 +
53282 + if (c == '[') {
53283 + for (--p; n < endp; ++n)
53284 + if (!glob_match(p, n))
53285 + return 0;
53286 + } else if (c == '/') {
53287 + while (*n != '\0' && *n != '/')
53288 + ++n;
53289 + if (*n == '/' && !glob_match(p, n + 1))
53290 + return 0;
53291 + } else {
53292 + for (--p; n < endp; ++n)
53293 + if (*n == c && !glob_match(p, n))
53294 + return 0;
53295 + }
53296 +
53297 + return 1;
53298 + }
53299 + case '[':
53300 + {
53301 + int not;
53302 + char cold;
53303 +
53304 + if (*n == '\0' || *n == '/')
53305 + return 1;
53306 +
53307 + not = (*p == '!' || *p == '^');
53308 + if (not)
53309 + ++p;
53310 +
53311 + c = *p++;
53312 + for (;;) {
53313 + unsigned char fn = (unsigned char)*n;
53314 +
53315 + if (c == '\0')
53316 + return 1;
53317 + else {
53318 + if (c == fn)
53319 + goto matched;
53320 + cold = c;
53321 + c = *p++;
53322 +
53323 + if (c == '-' && *p != ']') {
53324 + unsigned char cend = *p++;
53325 +
53326 + if (cend == '\0')
53327 + return 1;
53328 +
53329 + if (cold <= fn && fn <= cend)
53330 + goto matched;
53331 +
53332 + c = *p++;
53333 + }
53334 + }
53335 +
53336 + if (c == ']')
53337 + break;
53338 + }
53339 + if (!not)
53340 + return 1;
53341 + break;
53342 + matched:
53343 + while (c != ']') {
53344 + if (c == '\0')
53345 + return 1;
53346 +
53347 + c = *p++;
53348 + }
53349 + if (not)
53350 + return 1;
53351 + }
53352 + break;
53353 + default:
53354 + if (c != *n)
53355 + return 1;
53356 + }
53357 +
53358 + ++n;
53359 + }
53360 +
53361 + if (*n == '\0')
53362 + return 0;
53363 +
53364 + if (*n == '/')
53365 + return 0;
53366 +
53367 + return 1;
53368 +}
53369 +
53370 +static struct acl_object_label *
53371 +chk_glob_label(struct acl_object_label *globbed,
53372 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
53373 +{
53374 + struct acl_object_label *tmp;
53375 +
53376 + if (*path == NULL)
53377 + *path = gr_to_filename_nolock(dentry, mnt);
53378 +
53379 + tmp = globbed;
53380 +
53381 + while (tmp) {
53382 + if (!glob_match(tmp->filename, *path))
53383 + return tmp;
53384 + tmp = tmp->next;
53385 + }
53386 +
53387 + return NULL;
53388 +}
53389 +
53390 +static struct acl_object_label *
53391 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53392 + const ino_t curr_ino, const dev_t curr_dev,
53393 + const struct acl_subject_label *subj, char **path, const int checkglob)
53394 +{
53395 + struct acl_subject_label *tmpsubj;
53396 + struct acl_object_label *retval;
53397 + struct acl_object_label *retval2;
53398 +
53399 + tmpsubj = (struct acl_subject_label *) subj;
53400 + read_lock(&gr_inode_lock);
53401 + do {
53402 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
53403 + if (retval) {
53404 + if (checkglob && retval->globbed) {
53405 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
53406 + if (retval2)
53407 + retval = retval2;
53408 + }
53409 + break;
53410 + }
53411 + } while ((tmpsubj = tmpsubj->parent_subject));
53412 + read_unlock(&gr_inode_lock);
53413 +
53414 + return retval;
53415 +}
53416 +
53417 +static __inline__ struct acl_object_label *
53418 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53419 + struct dentry *curr_dentry,
53420 + const struct acl_subject_label *subj, char **path, const int checkglob)
53421 +{
53422 + int newglob = checkglob;
53423 + ino_t inode;
53424 + dev_t device;
53425 +
53426 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
53427 + as we don't want a / * rule to match instead of the / object
53428 + don't do this for create lookups that call this function though, since they're looking up
53429 + on the parent and thus need globbing checks on all paths
53430 + */
53431 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
53432 + newglob = GR_NO_GLOB;
53433 +
53434 + spin_lock(&curr_dentry->d_lock);
53435 + inode = curr_dentry->d_inode->i_ino;
53436 + device = __get_dev(curr_dentry);
53437 + spin_unlock(&curr_dentry->d_lock);
53438 +
53439 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
53440 +}
53441 +
53442 +static struct acl_object_label *
53443 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53444 + const struct acl_subject_label *subj, char *path, const int checkglob)
53445 +{
53446 + struct dentry *dentry = (struct dentry *) l_dentry;
53447 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53448 + struct mount *real_mnt = real_mount(mnt);
53449 + struct acl_object_label *retval;
53450 + struct dentry *parent;
53451 +
53452 + write_seqlock(&rename_lock);
53453 + br_read_lock(&vfsmount_lock);
53454 +
53455 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
53456 +#ifdef CONFIG_NET
53457 + mnt == sock_mnt ||
53458 +#endif
53459 +#ifdef CONFIG_HUGETLBFS
53460 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
53461 +#endif
53462 + /* ignore Eric Biederman */
53463 + IS_PRIVATE(l_dentry->d_inode))) {
53464 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
53465 + goto out;
53466 + }
53467 +
53468 + for (;;) {
53469 + if (dentry == real_root.dentry && mnt == real_root.mnt)
53470 + break;
53471 +
53472 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53473 + if (!mnt_has_parent(real_mnt))
53474 + break;
53475 +
53476 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53477 + if (retval != NULL)
53478 + goto out;
53479 +
53480 + dentry = real_mnt->mnt_mountpoint;
53481 + real_mnt = real_mnt->mnt_parent;
53482 + mnt = &real_mnt->mnt;
53483 + continue;
53484 + }
53485 +
53486 + parent = dentry->d_parent;
53487 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53488 + if (retval != NULL)
53489 + goto out;
53490 +
53491 + dentry = parent;
53492 + }
53493 +
53494 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53495 +
53496 + /* real_root is pinned so we don't have to hold a reference */
53497 + if (retval == NULL)
53498 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
53499 +out:
53500 + br_read_unlock(&vfsmount_lock);
53501 + write_sequnlock(&rename_lock);
53502 +
53503 + BUG_ON(retval == NULL);
53504 +
53505 + return retval;
53506 +}
53507 +
53508 +static __inline__ struct acl_object_label *
53509 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53510 + const struct acl_subject_label *subj)
53511 +{
53512 + char *path = NULL;
53513 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
53514 +}
53515 +
53516 +static __inline__ struct acl_object_label *
53517 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53518 + const struct acl_subject_label *subj)
53519 +{
53520 + char *path = NULL;
53521 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
53522 +}
53523 +
53524 +static __inline__ struct acl_object_label *
53525 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53526 + const struct acl_subject_label *subj, char *path)
53527 +{
53528 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
53529 +}
53530 +
53531 +static struct acl_subject_label *
53532 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53533 + const struct acl_role_label *role)
53534 +{
53535 + struct dentry *dentry = (struct dentry *) l_dentry;
53536 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53537 + struct mount *real_mnt = real_mount(mnt);
53538 + struct acl_subject_label *retval;
53539 + struct dentry *parent;
53540 +
53541 + write_seqlock(&rename_lock);
53542 + br_read_lock(&vfsmount_lock);
53543 +
53544 + for (;;) {
53545 + if (dentry == real_root.dentry && mnt == real_root.mnt)
53546 + break;
53547 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53548 + if (!mnt_has_parent(real_mnt))
53549 + break;
53550 +
53551 + spin_lock(&dentry->d_lock);
53552 + read_lock(&gr_inode_lock);
53553 + retval =
53554 + lookup_acl_subj_label(dentry->d_inode->i_ino,
53555 + __get_dev(dentry), role);
53556 + read_unlock(&gr_inode_lock);
53557 + spin_unlock(&dentry->d_lock);
53558 + if (retval != NULL)
53559 + goto out;
53560 +
53561 + dentry = real_mnt->mnt_mountpoint;
53562 + real_mnt = real_mnt->mnt_parent;
53563 + mnt = &real_mnt->mnt;
53564 + continue;
53565 + }
53566 +
53567 + spin_lock(&dentry->d_lock);
53568 + read_lock(&gr_inode_lock);
53569 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53570 + __get_dev(dentry), role);
53571 + read_unlock(&gr_inode_lock);
53572 + parent = dentry->d_parent;
53573 + spin_unlock(&dentry->d_lock);
53574 +
53575 + if (retval != NULL)
53576 + goto out;
53577 +
53578 + dentry = parent;
53579 + }
53580 +
53581 + spin_lock(&dentry->d_lock);
53582 + read_lock(&gr_inode_lock);
53583 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53584 + __get_dev(dentry), role);
53585 + read_unlock(&gr_inode_lock);
53586 + spin_unlock(&dentry->d_lock);
53587 +
53588 + if (unlikely(retval == NULL)) {
53589 + /* real_root is pinned, we don't need to hold a reference */
53590 + read_lock(&gr_inode_lock);
53591 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
53592 + __get_dev(real_root.dentry), role);
53593 + read_unlock(&gr_inode_lock);
53594 + }
53595 +out:
53596 + br_read_unlock(&vfsmount_lock);
53597 + write_sequnlock(&rename_lock);
53598 +
53599 + BUG_ON(retval == NULL);
53600 +
53601 + return retval;
53602 +}
53603 +
53604 +static void
53605 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
53606 +{
53607 + struct task_struct *task = current;
53608 + const struct cred *cred = current_cred();
53609 +
53610 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53611 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53612 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53613 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
53614 +
53615 + return;
53616 +}
53617 +
53618 +static void
53619 +gr_log_learn_id_change(const char type, const unsigned int real,
53620 + const unsigned int effective, const unsigned int fs)
53621 +{
53622 + struct task_struct *task = current;
53623 + const struct cred *cred = current_cred();
53624 +
53625 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
53626 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53627 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53628 + type, real, effective, fs, &task->signal->saved_ip);
53629 +
53630 + return;
53631 +}
53632 +
53633 +__u32
53634 +gr_search_file(const struct dentry * dentry, const __u32 mode,
53635 + const struct vfsmount * mnt)
53636 +{
53637 + __u32 retval = mode;
53638 + struct acl_subject_label *curracl;
53639 + struct acl_object_label *currobj;
53640 +
53641 + if (unlikely(!(gr_status & GR_READY)))
53642 + return (mode & ~GR_AUDITS);
53643 +
53644 + curracl = current->acl;
53645 +
53646 + currobj = chk_obj_label(dentry, mnt, curracl);
53647 + retval = currobj->mode & mode;
53648 +
53649 + /* if we're opening a specified transfer file for writing
53650 + (e.g. /dev/initctl), then transfer our role to init
53651 + */
53652 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
53653 + current->role->roletype & GR_ROLE_PERSIST)) {
53654 + struct task_struct *task = init_pid_ns.child_reaper;
53655 +
53656 + if (task->role != current->role) {
53657 + task->acl_sp_role = 0;
53658 + task->acl_role_id = current->acl_role_id;
53659 + task->role = current->role;
53660 + rcu_read_lock();
53661 + read_lock(&grsec_exec_file_lock);
53662 + gr_apply_subject_to_task(task);
53663 + read_unlock(&grsec_exec_file_lock);
53664 + rcu_read_unlock();
53665 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
53666 + }
53667 + }
53668 +
53669 + if (unlikely
53670 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
53671 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
53672 + __u32 new_mode = mode;
53673 +
53674 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53675 +
53676 + retval = new_mode;
53677 +
53678 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
53679 + new_mode |= GR_INHERIT;
53680 +
53681 + if (!(mode & GR_NOLEARN))
53682 + gr_log_learn(dentry, mnt, new_mode);
53683 + }
53684 +
53685 + return retval;
53686 +}
53687 +
53688 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
53689 + const struct dentry *parent,
53690 + const struct vfsmount *mnt)
53691 +{
53692 + struct name_entry *match;
53693 + struct acl_object_label *matchpo;
53694 + struct acl_subject_label *curracl;
53695 + char *path;
53696 +
53697 + if (unlikely(!(gr_status & GR_READY)))
53698 + return NULL;
53699 +
53700 + preempt_disable();
53701 + path = gr_to_filename_rbac(new_dentry, mnt);
53702 + match = lookup_name_entry_create(path);
53703 +
53704 + curracl = current->acl;
53705 +
53706 + if (match) {
53707 + read_lock(&gr_inode_lock);
53708 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
53709 + read_unlock(&gr_inode_lock);
53710 +
53711 + if (matchpo) {
53712 + preempt_enable();
53713 + return matchpo;
53714 + }
53715 + }
53716 +
53717 + // lookup parent
53718 +
53719 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
53720 +
53721 + preempt_enable();
53722 + return matchpo;
53723 +}
53724 +
53725 +__u32
53726 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
53727 + const struct vfsmount * mnt, const __u32 mode)
53728 +{
53729 + struct acl_object_label *matchpo;
53730 + __u32 retval;
53731 +
53732 + if (unlikely(!(gr_status & GR_READY)))
53733 + return (mode & ~GR_AUDITS);
53734 +
53735 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
53736 +
53737 + retval = matchpo->mode & mode;
53738 +
53739 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
53740 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53741 + __u32 new_mode = mode;
53742 +
53743 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53744 +
53745 + gr_log_learn(new_dentry, mnt, new_mode);
53746 + return new_mode;
53747 + }
53748 +
53749 + return retval;
53750 +}
53751 +
53752 +__u32
53753 +gr_check_link(const struct dentry * new_dentry,
53754 + const struct dentry * parent_dentry,
53755 + const struct vfsmount * parent_mnt,
53756 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
53757 +{
53758 + struct acl_object_label *obj;
53759 + __u32 oldmode, newmode;
53760 + __u32 needmode;
53761 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
53762 + GR_DELETE | GR_INHERIT;
53763 +
53764 + if (unlikely(!(gr_status & GR_READY)))
53765 + return (GR_CREATE | GR_LINK);
53766 +
53767 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
53768 + oldmode = obj->mode;
53769 +
53770 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
53771 + newmode = obj->mode;
53772 +
53773 + needmode = newmode & checkmodes;
53774 +
53775 + // old name for hardlink must have at least the permissions of the new name
53776 + if ((oldmode & needmode) != needmode)
53777 + goto bad;
53778 +
53779 + // if old name had restrictions/auditing, make sure the new name does as well
53780 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
53781 +
53782 + // don't allow hardlinking of suid/sgid/fcapped files without permission
53783 + if (is_privileged_binary(old_dentry))
53784 + needmode |= GR_SETID;
53785 +
53786 + if ((newmode & needmode) != needmode)
53787 + goto bad;
53788 +
53789 + // enforce minimum permissions
53790 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
53791 + return newmode;
53792 +bad:
53793 + needmode = oldmode;
53794 + if (is_privileged_binary(old_dentry))
53795 + needmode |= GR_SETID;
53796 +
53797 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
53798 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
53799 + return (GR_CREATE | GR_LINK);
53800 + } else if (newmode & GR_SUPPRESS)
53801 + return GR_SUPPRESS;
53802 + else
53803 + return 0;
53804 +}
53805 +
53806 +int
53807 +gr_check_hidden_task(const struct task_struct *task)
53808 +{
53809 + if (unlikely(!(gr_status & GR_READY)))
53810 + return 0;
53811 +
53812 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
53813 + return 1;
53814 +
53815 + return 0;
53816 +}
53817 +
53818 +int
53819 +gr_check_protected_task(const struct task_struct *task)
53820 +{
53821 + if (unlikely(!(gr_status & GR_READY) || !task))
53822 + return 0;
53823 +
53824 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53825 + task->acl != current->acl)
53826 + return 1;
53827 +
53828 + return 0;
53829 +}
53830 +
53831 +int
53832 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53833 +{
53834 + struct task_struct *p;
53835 + int ret = 0;
53836 +
53837 + if (unlikely(!(gr_status & GR_READY) || !pid))
53838 + return ret;
53839 +
53840 + read_lock(&tasklist_lock);
53841 + do_each_pid_task(pid, type, p) {
53842 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53843 + p->acl != current->acl) {
53844 + ret = 1;
53845 + goto out;
53846 + }
53847 + } while_each_pid_task(pid, type, p);
53848 +out:
53849 + read_unlock(&tasklist_lock);
53850 +
53851 + return ret;
53852 +}
53853 +
53854 +void
53855 +gr_copy_label(struct task_struct *tsk)
53856 +{
53857 + tsk->signal->used_accept = 0;
53858 + tsk->acl_sp_role = 0;
53859 + tsk->acl_role_id = current->acl_role_id;
53860 + tsk->acl = current->acl;
53861 + tsk->role = current->role;
53862 + tsk->signal->curr_ip = current->signal->curr_ip;
53863 + tsk->signal->saved_ip = current->signal->saved_ip;
53864 + if (current->exec_file)
53865 + get_file(current->exec_file);
53866 + tsk->exec_file = current->exec_file;
53867 + tsk->is_writable = current->is_writable;
53868 + if (unlikely(current->signal->used_accept)) {
53869 + current->signal->curr_ip = 0;
53870 + current->signal->saved_ip = 0;
53871 + }
53872 +
53873 + return;
53874 +}
53875 +
53876 +static void
53877 +gr_set_proc_res(struct task_struct *task)
53878 +{
53879 + struct acl_subject_label *proc;
53880 + unsigned short i;
53881 +
53882 + proc = task->acl;
53883 +
53884 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53885 + return;
53886 +
53887 + for (i = 0; i < RLIM_NLIMITS; i++) {
53888 + if (!(proc->resmask & (1 << i)))
53889 + continue;
53890 +
53891 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53892 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53893 + }
53894 +
53895 + return;
53896 +}
53897 +
53898 +extern int __gr_process_user_ban(struct user_struct *user);
53899 +
53900 +int
53901 +gr_check_user_change(int real, int effective, int fs)
53902 +{
53903 + unsigned int i;
53904 + __u16 num;
53905 + uid_t *uidlist;
53906 + int curuid;
53907 + int realok = 0;
53908 + int effectiveok = 0;
53909 + int fsok = 0;
53910 +
53911 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53912 + struct user_struct *user;
53913 +
53914 + if (real == -1)
53915 + goto skipit;
53916 +
53917 + user = find_user(real);
53918 + if (user == NULL)
53919 + goto skipit;
53920 +
53921 + if (__gr_process_user_ban(user)) {
53922 + /* for find_user */
53923 + free_uid(user);
53924 + return 1;
53925 + }
53926 +
53927 + /* for find_user */
53928 + free_uid(user);
53929 +
53930 +skipit:
53931 +#endif
53932 +
53933 + if (unlikely(!(gr_status & GR_READY)))
53934 + return 0;
53935 +
53936 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53937 + gr_log_learn_id_change('u', real, effective, fs);
53938 +
53939 + num = current->acl->user_trans_num;
53940 + uidlist = current->acl->user_transitions;
53941 +
53942 + if (uidlist == NULL)
53943 + return 0;
53944 +
53945 + if (real == -1)
53946 + realok = 1;
53947 + if (effective == -1)
53948 + effectiveok = 1;
53949 + if (fs == -1)
53950 + fsok = 1;
53951 +
53952 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
53953 + for (i = 0; i < num; i++) {
53954 + curuid = (int)uidlist[i];
53955 + if (real == curuid)
53956 + realok = 1;
53957 + if (effective == curuid)
53958 + effectiveok = 1;
53959 + if (fs == curuid)
53960 + fsok = 1;
53961 + }
53962 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
53963 + for (i = 0; i < num; i++) {
53964 + curuid = (int)uidlist[i];
53965 + if (real == curuid)
53966 + break;
53967 + if (effective == curuid)
53968 + break;
53969 + if (fs == curuid)
53970 + break;
53971 + }
53972 + /* not in deny list */
53973 + if (i == num) {
53974 + realok = 1;
53975 + effectiveok = 1;
53976 + fsok = 1;
53977 + }
53978 + }
53979 +
53980 + if (realok && effectiveok && fsok)
53981 + return 0;
53982 + else {
53983 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53984 + return 1;
53985 + }
53986 +}
53987 +
53988 +int
53989 +gr_check_group_change(int real, int effective, int fs)
53990 +{
53991 + unsigned int i;
53992 + __u16 num;
53993 + gid_t *gidlist;
53994 + int curgid;
53995 + int realok = 0;
53996 + int effectiveok = 0;
53997 + int fsok = 0;
53998 +
53999 + if (unlikely(!(gr_status & GR_READY)))
54000 + return 0;
54001 +
54002 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54003 + gr_log_learn_id_change('g', real, effective, fs);
54004 +
54005 + num = current->acl->group_trans_num;
54006 + gidlist = current->acl->group_transitions;
54007 +
54008 + if (gidlist == NULL)
54009 + return 0;
54010 +
54011 + if (real == -1)
54012 + realok = 1;
54013 + if (effective == -1)
54014 + effectiveok = 1;
54015 + if (fs == -1)
54016 + fsok = 1;
54017 +
54018 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
54019 + for (i = 0; i < num; i++) {
54020 + curgid = (int)gidlist[i];
54021 + if (real == curgid)
54022 + realok = 1;
54023 + if (effective == curgid)
54024 + effectiveok = 1;
54025 + if (fs == curgid)
54026 + fsok = 1;
54027 + }
54028 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
54029 + for (i = 0; i < num; i++) {
54030 + curgid = (int)gidlist[i];
54031 + if (real == curgid)
54032 + break;
54033 + if (effective == curgid)
54034 + break;
54035 + if (fs == curgid)
54036 + break;
54037 + }
54038 + /* not in deny list */
54039 + if (i == num) {
54040 + realok = 1;
54041 + effectiveok = 1;
54042 + fsok = 1;
54043 + }
54044 + }
54045 +
54046 + if (realok && effectiveok && fsok)
54047 + return 0;
54048 + else {
54049 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
54050 + return 1;
54051 + }
54052 +}
54053 +
54054 +extern int gr_acl_is_capable(const int cap);
54055 +
54056 +void
54057 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
54058 +{
54059 + struct acl_role_label *role = task->role;
54060 + struct acl_subject_label *subj = NULL;
54061 + struct acl_object_label *obj;
54062 + struct file *filp;
54063 +
54064 + if (unlikely(!(gr_status & GR_READY)))
54065 + return;
54066 +
54067 + filp = task->exec_file;
54068 +
54069 + /* kernel process, we'll give them the kernel role */
54070 + if (unlikely(!filp)) {
54071 + task->role = kernel_role;
54072 + task->acl = kernel_role->root_label;
54073 + return;
54074 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
54075 + role = lookup_acl_role_label(task, uid, gid);
54076 +
54077 + /* don't change the role if we're not a privileged process */
54078 + if (role && task->role != role &&
54079 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
54080 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
54081 + return;
54082 +
54083 + /* perform subject lookup in possibly new role
54084 + we can use this result below in the case where role == task->role
54085 + */
54086 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
54087 +
54088 + /* if we changed uid/gid, but result in the same role
54089 + and are using inheritance, don't lose the inherited subject
54090 + if current subject is other than what normal lookup
54091 + would result in, we arrived via inheritance, don't
54092 + lose subject
54093 + */
54094 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
54095 + (subj == task->acl)))
54096 + task->acl = subj;
54097 +
54098 + task->role = role;
54099 +
54100 + task->is_writable = 0;
54101 +
54102 + /* ignore additional mmap checks for processes that are writable
54103 + by the default ACL */
54104 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54105 + if (unlikely(obj->mode & GR_WRITE))
54106 + task->is_writable = 1;
54107 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54108 + if (unlikely(obj->mode & GR_WRITE))
54109 + task->is_writable = 1;
54110 +
54111 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54112 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54113 +#endif
54114 +
54115 + gr_set_proc_res(task);
54116 +
54117 + return;
54118 +}
54119 +
54120 +int
54121 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54122 + const int unsafe_flags)
54123 +{
54124 + struct task_struct *task = current;
54125 + struct acl_subject_label *newacl;
54126 + struct acl_object_label *obj;
54127 + __u32 retmode;
54128 +
54129 + if (unlikely(!(gr_status & GR_READY)))
54130 + return 0;
54131 +
54132 + newacl = chk_subj_label(dentry, mnt, task->role);
54133 +
54134 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
54135 + did an exec
54136 + */
54137 + rcu_read_lock();
54138 + read_lock(&tasklist_lock);
54139 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
54140 + (task->parent->acl->mode & GR_POVERRIDE))) {
54141 + read_unlock(&tasklist_lock);
54142 + rcu_read_unlock();
54143 + goto skip_check;
54144 + }
54145 + read_unlock(&tasklist_lock);
54146 + rcu_read_unlock();
54147 +
54148 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
54149 + !(task->role->roletype & GR_ROLE_GOD) &&
54150 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
54151 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
54152 + if (unsafe_flags & LSM_UNSAFE_SHARE)
54153 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
54154 + else
54155 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
54156 + return -EACCES;
54157 + }
54158 +
54159 +skip_check:
54160 +
54161 + obj = chk_obj_label(dentry, mnt, task->acl);
54162 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
54163 +
54164 + if (!(task->acl->mode & GR_INHERITLEARN) &&
54165 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
54166 + if (obj->nested)
54167 + task->acl = obj->nested;
54168 + else
54169 + task->acl = newacl;
54170 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
54171 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
54172 +
54173 + task->is_writable = 0;
54174 +
54175 + /* ignore additional mmap checks for processes that are writable
54176 + by the default ACL */
54177 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
54178 + if (unlikely(obj->mode & GR_WRITE))
54179 + task->is_writable = 1;
54180 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
54181 + if (unlikely(obj->mode & GR_WRITE))
54182 + task->is_writable = 1;
54183 +
54184 + gr_set_proc_res(task);
54185 +
54186 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54187 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54188 +#endif
54189 + return 0;
54190 +}
54191 +
54192 +/* always called with valid inodev ptr */
54193 +static void
54194 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
54195 +{
54196 + struct acl_object_label *matchpo;
54197 + struct acl_subject_label *matchps;
54198 + struct acl_subject_label *subj;
54199 + struct acl_role_label *role;
54200 + unsigned int x;
54201 +
54202 + FOR_EACH_ROLE_START(role)
54203 + FOR_EACH_SUBJECT_START(role, subj, x)
54204 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
54205 + matchpo->mode |= GR_DELETED;
54206 + FOR_EACH_SUBJECT_END(subj,x)
54207 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
54208 + if (subj->inode == ino && subj->device == dev)
54209 + subj->mode |= GR_DELETED;
54210 + FOR_EACH_NESTED_SUBJECT_END(subj)
54211 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
54212 + matchps->mode |= GR_DELETED;
54213 + FOR_EACH_ROLE_END(role)
54214 +
54215 + inodev->nentry->deleted = 1;
54216 +
54217 + return;
54218 +}
54219 +
54220 +void
54221 +gr_handle_delete(const ino_t ino, const dev_t dev)
54222 +{
54223 + struct inodev_entry *inodev;
54224 +
54225 + if (unlikely(!(gr_status & GR_READY)))
54226 + return;
54227 +
54228 + write_lock(&gr_inode_lock);
54229 + inodev = lookup_inodev_entry(ino, dev);
54230 + if (inodev != NULL)
54231 + do_handle_delete(inodev, ino, dev);
54232 + write_unlock(&gr_inode_lock);
54233 +
54234 + return;
54235 +}
54236 +
54237 +static void
54238 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
54239 + const ino_t newinode, const dev_t newdevice,
54240 + struct acl_subject_label *subj)
54241 +{
54242 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
54243 + struct acl_object_label *match;
54244 +
54245 + match = subj->obj_hash[index];
54246 +
54247 + while (match && (match->inode != oldinode ||
54248 + match->device != olddevice ||
54249 + !(match->mode & GR_DELETED)))
54250 + match = match->next;
54251 +
54252 + if (match && (match->inode == oldinode)
54253 + && (match->device == olddevice)
54254 + && (match->mode & GR_DELETED)) {
54255 + if (match->prev == NULL) {
54256 + subj->obj_hash[index] = match->next;
54257 + if (match->next != NULL)
54258 + match->next->prev = NULL;
54259 + } else {
54260 + match->prev->next = match->next;
54261 + if (match->next != NULL)
54262 + match->next->prev = match->prev;
54263 + }
54264 + match->prev = NULL;
54265 + match->next = NULL;
54266 + match->inode = newinode;
54267 + match->device = newdevice;
54268 + match->mode &= ~GR_DELETED;
54269 +
54270 + insert_acl_obj_label(match, subj);
54271 + }
54272 +
54273 + return;
54274 +}
54275 +
54276 +static void
54277 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
54278 + const ino_t newinode, const dev_t newdevice,
54279 + struct acl_role_label *role)
54280 +{
54281 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
54282 + struct acl_subject_label *match;
54283 +
54284 + match = role->subj_hash[index];
54285 +
54286 + while (match && (match->inode != oldinode ||
54287 + match->device != olddevice ||
54288 + !(match->mode & GR_DELETED)))
54289 + match = match->next;
54290 +
54291 + if (match && (match->inode == oldinode)
54292 + && (match->device == olddevice)
54293 + && (match->mode & GR_DELETED)) {
54294 + if (match->prev == NULL) {
54295 + role->subj_hash[index] = match->next;
54296 + if (match->next != NULL)
54297 + match->next->prev = NULL;
54298 + } else {
54299 + match->prev->next = match->next;
54300 + if (match->next != NULL)
54301 + match->next->prev = match->prev;
54302 + }
54303 + match->prev = NULL;
54304 + match->next = NULL;
54305 + match->inode = newinode;
54306 + match->device = newdevice;
54307 + match->mode &= ~GR_DELETED;
54308 +
54309 + insert_acl_subj_label(match, role);
54310 + }
54311 +
54312 + return;
54313 +}
54314 +
54315 +static void
54316 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
54317 + const ino_t newinode, const dev_t newdevice)
54318 +{
54319 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
54320 + struct inodev_entry *match;
54321 +
54322 + match = inodev_set.i_hash[index];
54323 +
54324 + while (match && (match->nentry->inode != oldinode ||
54325 + match->nentry->device != olddevice || !match->nentry->deleted))
54326 + match = match->next;
54327 +
54328 + if (match && (match->nentry->inode == oldinode)
54329 + && (match->nentry->device == olddevice) &&
54330 + match->nentry->deleted) {
54331 + if (match->prev == NULL) {
54332 + inodev_set.i_hash[index] = match->next;
54333 + if (match->next != NULL)
54334 + match->next->prev = NULL;
54335 + } else {
54336 + match->prev->next = match->next;
54337 + if (match->next != NULL)
54338 + match->next->prev = match->prev;
54339 + }
54340 + match->prev = NULL;
54341 + match->next = NULL;
54342 + match->nentry->inode = newinode;
54343 + match->nentry->device = newdevice;
54344 + match->nentry->deleted = 0;
54345 +
54346 + insert_inodev_entry(match);
54347 + }
54348 +
54349 + return;
54350 +}
54351 +
54352 +static void
54353 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
54354 +{
54355 + struct acl_subject_label *subj;
54356 + struct acl_role_label *role;
54357 + unsigned int x;
54358 +
54359 + FOR_EACH_ROLE_START(role)
54360 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
54361 +
54362 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
54363 + if ((subj->inode == ino) && (subj->device == dev)) {
54364 + subj->inode = ino;
54365 + subj->device = dev;
54366 + }
54367 + FOR_EACH_NESTED_SUBJECT_END(subj)
54368 + FOR_EACH_SUBJECT_START(role, subj, x)
54369 + update_acl_obj_label(matchn->inode, matchn->device,
54370 + ino, dev, subj);
54371 + FOR_EACH_SUBJECT_END(subj,x)
54372 + FOR_EACH_ROLE_END(role)
54373 +
54374 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
54375 +
54376 + return;
54377 +}
54378 +
54379 +static void
54380 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
54381 + const struct vfsmount *mnt)
54382 +{
54383 + ino_t ino = dentry->d_inode->i_ino;
54384 + dev_t dev = __get_dev(dentry);
54385 +
54386 + __do_handle_create(matchn, ino, dev);
54387 +
54388 + return;
54389 +}
54390 +
54391 +void
54392 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54393 +{
54394 + struct name_entry *matchn;
54395 +
54396 + if (unlikely(!(gr_status & GR_READY)))
54397 + return;
54398 +
54399 + preempt_disable();
54400 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
54401 +
54402 + if (unlikely((unsigned long)matchn)) {
54403 + write_lock(&gr_inode_lock);
54404 + do_handle_create(matchn, dentry, mnt);
54405 + write_unlock(&gr_inode_lock);
54406 + }
54407 + preempt_enable();
54408 +
54409 + return;
54410 +}
54411 +
54412 +void
54413 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54414 +{
54415 + struct name_entry *matchn;
54416 +
54417 + if (unlikely(!(gr_status & GR_READY)))
54418 + return;
54419 +
54420 + preempt_disable();
54421 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
54422 +
54423 + if (unlikely((unsigned long)matchn)) {
54424 + write_lock(&gr_inode_lock);
54425 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
54426 + write_unlock(&gr_inode_lock);
54427 + }
54428 + preempt_enable();
54429 +
54430 + return;
54431 +}
54432 +
54433 +void
54434 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54435 + struct dentry *old_dentry,
54436 + struct dentry *new_dentry,
54437 + struct vfsmount *mnt, const __u8 replace)
54438 +{
54439 + struct name_entry *matchn;
54440 + struct inodev_entry *inodev;
54441 + struct inode *inode = new_dentry->d_inode;
54442 + ino_t old_ino = old_dentry->d_inode->i_ino;
54443 + dev_t old_dev = __get_dev(old_dentry);
54444 +
54445 + /* vfs_rename swaps the name and parent link for old_dentry and
54446 + new_dentry
54447 + at this point, old_dentry has the new name, parent link, and inode
54448 + for the renamed file
54449 + if a file is being replaced by a rename, new_dentry has the inode
54450 + and name for the replaced file
54451 + */
54452 +
54453 + if (unlikely(!(gr_status & GR_READY)))
54454 + return;
54455 +
54456 + preempt_disable();
54457 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
54458 +
54459 + /* we wouldn't have to check d_inode if it weren't for
54460 + NFS silly-renaming
54461 + */
54462 +
54463 + write_lock(&gr_inode_lock);
54464 + if (unlikely(replace && inode)) {
54465 + ino_t new_ino = inode->i_ino;
54466 + dev_t new_dev = __get_dev(new_dentry);
54467 +
54468 + inodev = lookup_inodev_entry(new_ino, new_dev);
54469 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
54470 + do_handle_delete(inodev, new_ino, new_dev);
54471 + }
54472 +
54473 + inodev = lookup_inodev_entry(old_ino, old_dev);
54474 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
54475 + do_handle_delete(inodev, old_ino, old_dev);
54476 +
54477 + if (unlikely((unsigned long)matchn))
54478 + do_handle_create(matchn, old_dentry, mnt);
54479 +
54480 + write_unlock(&gr_inode_lock);
54481 + preempt_enable();
54482 +
54483 + return;
54484 +}
54485 +
54486 +static int
54487 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
54488 + unsigned char **sum)
54489 +{
54490 + struct acl_role_label *r;
54491 + struct role_allowed_ip *ipp;
54492 + struct role_transition *trans;
54493 + unsigned int i;
54494 + int found = 0;
54495 + u32 curr_ip = current->signal->curr_ip;
54496 +
54497 + current->signal->saved_ip = curr_ip;
54498 +
54499 + /* check transition table */
54500 +
54501 + for (trans = current->role->transitions; trans; trans = trans->next) {
54502 + if (!strcmp(rolename, trans->rolename)) {
54503 + found = 1;
54504 + break;
54505 + }
54506 + }
54507 +
54508 + if (!found)
54509 + return 0;
54510 +
54511 + /* handle special roles that do not require authentication
54512 + and check ip */
54513 +
54514 + FOR_EACH_ROLE_START(r)
54515 + if (!strcmp(rolename, r->rolename) &&
54516 + (r->roletype & GR_ROLE_SPECIAL)) {
54517 + found = 0;
54518 + if (r->allowed_ips != NULL) {
54519 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
54520 + if ((ntohl(curr_ip) & ipp->netmask) ==
54521 + (ntohl(ipp->addr) & ipp->netmask))
54522 + found = 1;
54523 + }
54524 + } else
54525 + found = 2;
54526 + if (!found)
54527 + return 0;
54528 +
54529 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
54530 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
54531 + *salt = NULL;
54532 + *sum = NULL;
54533 + return 1;
54534 + }
54535 + }
54536 + FOR_EACH_ROLE_END(r)
54537 +
54538 + for (i = 0; i < num_sprole_pws; i++) {
54539 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
54540 + *salt = acl_special_roles[i]->salt;
54541 + *sum = acl_special_roles[i]->sum;
54542 + return 1;
54543 + }
54544 + }
54545 +
54546 + return 0;
54547 +}
54548 +
54549 +static void
54550 +assign_special_role(char *rolename)
54551 +{
54552 + struct acl_object_label *obj;
54553 + struct acl_role_label *r;
54554 + struct acl_role_label *assigned = NULL;
54555 + struct task_struct *tsk;
54556 + struct file *filp;
54557 +
54558 + FOR_EACH_ROLE_START(r)
54559 + if (!strcmp(rolename, r->rolename) &&
54560 + (r->roletype & GR_ROLE_SPECIAL)) {
54561 + assigned = r;
54562 + break;
54563 + }
54564 + FOR_EACH_ROLE_END(r)
54565 +
54566 + if (!assigned)
54567 + return;
54568 +
54569 + read_lock(&tasklist_lock);
54570 + read_lock(&grsec_exec_file_lock);
54571 +
54572 + tsk = current->real_parent;
54573 + if (tsk == NULL)
54574 + goto out_unlock;
54575 +
54576 + filp = tsk->exec_file;
54577 + if (filp == NULL)
54578 + goto out_unlock;
54579 +
54580 + tsk->is_writable = 0;
54581 +
54582 + tsk->acl_sp_role = 1;
54583 + tsk->acl_role_id = ++acl_sp_role_value;
54584 + tsk->role = assigned;
54585 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
54586 +
54587 + /* ignore additional mmap checks for processes that are writable
54588 + by the default ACL */
54589 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54590 + if (unlikely(obj->mode & GR_WRITE))
54591 + tsk->is_writable = 1;
54592 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
54593 + if (unlikely(obj->mode & GR_WRITE))
54594 + tsk->is_writable = 1;
54595 +
54596 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54597 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
54598 +#endif
54599 +
54600 +out_unlock:
54601 + read_unlock(&grsec_exec_file_lock);
54602 + read_unlock(&tasklist_lock);
54603 + return;
54604 +}
54605 +
54606 +int gr_check_secure_terminal(struct task_struct *task)
54607 +{
54608 + struct task_struct *p, *p2, *p3;
54609 + struct files_struct *files;
54610 + struct fdtable *fdt;
54611 + struct file *our_file = NULL, *file;
54612 + int i;
54613 +
54614 + if (task->signal->tty == NULL)
54615 + return 1;
54616 +
54617 + files = get_files_struct(task);
54618 + if (files != NULL) {
54619 + rcu_read_lock();
54620 + fdt = files_fdtable(files);
54621 + for (i=0; i < fdt->max_fds; i++) {
54622 + file = fcheck_files(files, i);
54623 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
54624 + get_file(file);
54625 + our_file = file;
54626 + }
54627 + }
54628 + rcu_read_unlock();
54629 + put_files_struct(files);
54630 + }
54631 +
54632 + if (our_file == NULL)
54633 + return 1;
54634 +
54635 + read_lock(&tasklist_lock);
54636 + do_each_thread(p2, p) {
54637 + files = get_files_struct(p);
54638 + if (files == NULL ||
54639 + (p->signal && p->signal->tty == task->signal->tty)) {
54640 + if (files != NULL)
54641 + put_files_struct(files);
54642 + continue;
54643 + }
54644 + rcu_read_lock();
54645 + fdt = files_fdtable(files);
54646 + for (i=0; i < fdt->max_fds; i++) {
54647 + file = fcheck_files(files, i);
54648 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
54649 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
54650 + p3 = task;
54651 + while (p3->pid > 0) {
54652 + if (p3 == p)
54653 + break;
54654 + p3 = p3->real_parent;
54655 + }
54656 + if (p3 == p)
54657 + break;
54658 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
54659 + gr_handle_alertkill(p);
54660 + rcu_read_unlock();
54661 + put_files_struct(files);
54662 + read_unlock(&tasklist_lock);
54663 + fput(our_file);
54664 + return 0;
54665 + }
54666 + }
54667 + rcu_read_unlock();
54668 + put_files_struct(files);
54669 + } while_each_thread(p2, p);
54670 + read_unlock(&tasklist_lock);
54671 +
54672 + fput(our_file);
54673 + return 1;
54674 +}
54675 +
54676 +static int gr_rbac_disable(void *unused)
54677 +{
54678 + pax_open_kernel();
54679 + gr_status &= ~GR_READY;
54680 + pax_close_kernel();
54681 +
54682 + return 0;
54683 +}
54684 +
54685 +ssize_t
54686 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
54687 +{
54688 + struct gr_arg_wrapper uwrap;
54689 + unsigned char *sprole_salt = NULL;
54690 + unsigned char *sprole_sum = NULL;
54691 + int error = sizeof (struct gr_arg_wrapper);
54692 + int error2 = 0;
54693 +
54694 + mutex_lock(&gr_dev_mutex);
54695 +
54696 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
54697 + error = -EPERM;
54698 + goto out;
54699 + }
54700 +
54701 + if (count != sizeof (struct gr_arg_wrapper)) {
54702 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
54703 + error = -EINVAL;
54704 + goto out;
54705 + }
54706 +
54707 +
54708 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
54709 + gr_auth_expires = 0;
54710 + gr_auth_attempts = 0;
54711 + }
54712 +
54713 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
54714 + error = -EFAULT;
54715 + goto out;
54716 + }
54717 +
54718 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
54719 + error = -EINVAL;
54720 + goto out;
54721 + }
54722 +
54723 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
54724 + error = -EFAULT;
54725 + goto out;
54726 + }
54727 +
54728 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54729 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54730 + time_after(gr_auth_expires, get_seconds())) {
54731 + error = -EBUSY;
54732 + goto out;
54733 + }
54734 +
54735 + /* if non-root trying to do anything other than use a special role,
54736 + do not attempt authentication, do not count towards authentication
54737 + locking
54738 + */
54739 +
54740 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
54741 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54742 + !uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
54743 + error = -EPERM;
54744 + goto out;
54745 + }
54746 +
54747 + /* ensure pw and special role name are null terminated */
54748 +
54749 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
54750 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
54751 +
54752 + /* Okay.
54753 + * We have our enough of the argument structure..(we have yet
54754 + * to copy_from_user the tables themselves) . Copy the tables
54755 + * only if we need them, i.e. for loading operations. */
54756 +
54757 + switch (gr_usermode->mode) {
54758 + case GR_STATUS:
54759 + if (gr_status & GR_READY) {
54760 + error = 1;
54761 + if (!gr_check_secure_terminal(current))
54762 + error = 3;
54763 + } else
54764 + error = 2;
54765 + goto out;
54766 + case GR_SHUTDOWN:
54767 + if ((gr_status & GR_READY)
54768 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54769 + stop_machine(gr_rbac_disable, NULL, NULL);
54770 + free_variables();
54771 + memset(gr_usermode, 0, sizeof (struct gr_arg));
54772 + memset(gr_system_salt, 0, GR_SALT_LEN);
54773 + memset(gr_system_sum, 0, GR_SHA_LEN);
54774 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
54775 + } else if (gr_status & GR_READY) {
54776 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
54777 + error = -EPERM;
54778 + } else {
54779 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
54780 + error = -EAGAIN;
54781 + }
54782 + break;
54783 + case GR_ENABLE:
54784 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
54785 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
54786 + else {
54787 + if (gr_status & GR_READY)
54788 + error = -EAGAIN;
54789 + else
54790 + error = error2;
54791 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
54792 + }
54793 + break;
54794 + case GR_RELOAD:
54795 + if (!(gr_status & GR_READY)) {
54796 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
54797 + error = -EAGAIN;
54798 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54799 + stop_machine(gr_rbac_disable, NULL, NULL);
54800 + free_variables();
54801 + error2 = gracl_init(gr_usermode);
54802 + if (!error2)
54803 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
54804 + else {
54805 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54806 + error = error2;
54807 + }
54808 + } else {
54809 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54810 + error = -EPERM;
54811 + }
54812 + break;
54813 + case GR_SEGVMOD:
54814 + if (unlikely(!(gr_status & GR_READY))) {
54815 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
54816 + error = -EAGAIN;
54817 + break;
54818 + }
54819 +
54820 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54821 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
54822 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
54823 + struct acl_subject_label *segvacl;
54824 + segvacl =
54825 + lookup_acl_subj_label(gr_usermode->segv_inode,
54826 + gr_usermode->segv_device,
54827 + current->role);
54828 + if (segvacl) {
54829 + segvacl->crashes = 0;
54830 + segvacl->expires = 0;
54831 + }
54832 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
54833 + gr_remove_uid(gr_usermode->segv_uid);
54834 + }
54835 + } else {
54836 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
54837 + error = -EPERM;
54838 + }
54839 + break;
54840 + case GR_SPROLE:
54841 + case GR_SPROLEPAM:
54842 + if (unlikely(!(gr_status & GR_READY))) {
54843 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
54844 + error = -EAGAIN;
54845 + break;
54846 + }
54847 +
54848 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
54849 + current->role->expires = 0;
54850 + current->role->auth_attempts = 0;
54851 + }
54852 +
54853 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54854 + time_after(current->role->expires, get_seconds())) {
54855 + error = -EBUSY;
54856 + goto out;
54857 + }
54858 +
54859 + if (lookup_special_role_auth
54860 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
54861 + && ((!sprole_salt && !sprole_sum)
54862 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
54863 + char *p = "";
54864 + assign_special_role(gr_usermode->sp_role);
54865 + read_lock(&tasklist_lock);
54866 + if (current->real_parent)
54867 + p = current->real_parent->role->rolename;
54868 + read_unlock(&tasklist_lock);
54869 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
54870 + p, acl_sp_role_value);
54871 + } else {
54872 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
54873 + error = -EPERM;
54874 + if(!(current->role->auth_attempts++))
54875 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54876 +
54877 + goto out;
54878 + }
54879 + break;
54880 + case GR_UNSPROLE:
54881 + if (unlikely(!(gr_status & GR_READY))) {
54882 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
54883 + error = -EAGAIN;
54884 + break;
54885 + }
54886 +
54887 + if (current->role->roletype & GR_ROLE_SPECIAL) {
54888 + char *p = "";
54889 + int i = 0;
54890 +
54891 + read_lock(&tasklist_lock);
54892 + if (current->real_parent) {
54893 + p = current->real_parent->role->rolename;
54894 + i = current->real_parent->acl_role_id;
54895 + }
54896 + read_unlock(&tasklist_lock);
54897 +
54898 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
54899 + gr_set_acls(1);
54900 + } else {
54901 + error = -EPERM;
54902 + goto out;
54903 + }
54904 + break;
54905 + default:
54906 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
54907 + error = -EINVAL;
54908 + break;
54909 + }
54910 +
54911 + if (error != -EPERM)
54912 + goto out;
54913 +
54914 + if(!(gr_auth_attempts++))
54915 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54916 +
54917 + out:
54918 + mutex_unlock(&gr_dev_mutex);
54919 + return error;
54920 +}
54921 +
54922 +/* must be called with
54923 + rcu_read_lock();
54924 + read_lock(&tasklist_lock);
54925 + read_lock(&grsec_exec_file_lock);
54926 +*/
54927 +int gr_apply_subject_to_task(struct task_struct *task)
54928 +{
54929 + struct acl_object_label *obj;
54930 + char *tmpname;
54931 + struct acl_subject_label *tmpsubj;
54932 + struct file *filp;
54933 + struct name_entry *nmatch;
54934 +
54935 + filp = task->exec_file;
54936 + if (filp == NULL)
54937 + return 0;
54938 +
54939 + /* the following is to apply the correct subject
54940 + on binaries running when the RBAC system
54941 + is enabled, when the binaries have been
54942 + replaced or deleted since their execution
54943 + -----
54944 + when the RBAC system starts, the inode/dev
54945 + from exec_file will be one the RBAC system
54946 + is unaware of. It only knows the inode/dev
54947 + of the present file on disk, or the absence
54948 + of it.
54949 + */
54950 + preempt_disable();
54951 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54952 +
54953 + nmatch = lookup_name_entry(tmpname);
54954 + preempt_enable();
54955 + tmpsubj = NULL;
54956 + if (nmatch) {
54957 + if (nmatch->deleted)
54958 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54959 + else
54960 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54961 + if (tmpsubj != NULL)
54962 + task->acl = tmpsubj;
54963 + }
54964 + if (tmpsubj == NULL)
54965 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54966 + task->role);
54967 + if (task->acl) {
54968 + task->is_writable = 0;
54969 + /* ignore additional mmap checks for processes that are writable
54970 + by the default ACL */
54971 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54972 + if (unlikely(obj->mode & GR_WRITE))
54973 + task->is_writable = 1;
54974 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54975 + if (unlikely(obj->mode & GR_WRITE))
54976 + task->is_writable = 1;
54977 +
54978 + gr_set_proc_res(task);
54979 +
54980 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54981 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54982 +#endif
54983 + } else {
54984 + return 1;
54985 + }
54986 +
54987 + return 0;
54988 +}
54989 +
54990 +int
54991 +gr_set_acls(const int type)
54992 +{
54993 + struct task_struct *task, *task2;
54994 + struct acl_role_label *role = current->role;
54995 + __u16 acl_role_id = current->acl_role_id;
54996 + const struct cred *cred;
54997 + int ret;
54998 +
54999 + rcu_read_lock();
55000 + read_lock(&tasklist_lock);
55001 + read_lock(&grsec_exec_file_lock);
55002 + do_each_thread(task2, task) {
55003 + /* check to see if we're called from the exit handler,
55004 + if so, only replace ACLs that have inherited the admin
55005 + ACL */
55006 +
55007 + if (type && (task->role != role ||
55008 + task->acl_role_id != acl_role_id))
55009 + continue;
55010 +
55011 + task->acl_role_id = 0;
55012 + task->acl_sp_role = 0;
55013 +
55014 + if (task->exec_file) {
55015 + cred = __task_cred(task);
55016 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
55017 + ret = gr_apply_subject_to_task(task);
55018 + if (ret) {
55019 + read_unlock(&grsec_exec_file_lock);
55020 + read_unlock(&tasklist_lock);
55021 + rcu_read_unlock();
55022 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
55023 + return ret;
55024 + }
55025 + } else {
55026 + // it's a kernel process
55027 + task->role = kernel_role;
55028 + task->acl = kernel_role->root_label;
55029 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
55030 + task->acl->mode &= ~GR_PROCFIND;
55031 +#endif
55032 + }
55033 + } while_each_thread(task2, task);
55034 + read_unlock(&grsec_exec_file_lock);
55035 + read_unlock(&tasklist_lock);
55036 + rcu_read_unlock();
55037 +
55038 + return 0;
55039 +}
55040 +
55041 +void
55042 +gr_learn_resource(const struct task_struct *task,
55043 + const int res, const unsigned long wanted, const int gt)
55044 +{
55045 + struct acl_subject_label *acl;
55046 + const struct cred *cred;
55047 +
55048 + if (unlikely((gr_status & GR_READY) &&
55049 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
55050 + goto skip_reslog;
55051 +
55052 +#ifdef CONFIG_GRKERNSEC_RESLOG
55053 + gr_log_resource(task, res, wanted, gt);
55054 +#endif
55055 + skip_reslog:
55056 +
55057 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
55058 + return;
55059 +
55060 + acl = task->acl;
55061 +
55062 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
55063 + !(acl->resmask & (1 << (unsigned short) res))))
55064 + return;
55065 +
55066 + if (wanted >= acl->res[res].rlim_cur) {
55067 + unsigned long res_add;
55068 +
55069 + res_add = wanted;
55070 + switch (res) {
55071 + case RLIMIT_CPU:
55072 + res_add += GR_RLIM_CPU_BUMP;
55073 + break;
55074 + case RLIMIT_FSIZE:
55075 + res_add += GR_RLIM_FSIZE_BUMP;
55076 + break;
55077 + case RLIMIT_DATA:
55078 + res_add += GR_RLIM_DATA_BUMP;
55079 + break;
55080 + case RLIMIT_STACK:
55081 + res_add += GR_RLIM_STACK_BUMP;
55082 + break;
55083 + case RLIMIT_CORE:
55084 + res_add += GR_RLIM_CORE_BUMP;
55085 + break;
55086 + case RLIMIT_RSS:
55087 + res_add += GR_RLIM_RSS_BUMP;
55088 + break;
55089 + case RLIMIT_NPROC:
55090 + res_add += GR_RLIM_NPROC_BUMP;
55091 + break;
55092 + case RLIMIT_NOFILE:
55093 + res_add += GR_RLIM_NOFILE_BUMP;
55094 + break;
55095 + case RLIMIT_MEMLOCK:
55096 + res_add += GR_RLIM_MEMLOCK_BUMP;
55097 + break;
55098 + case RLIMIT_AS:
55099 + res_add += GR_RLIM_AS_BUMP;
55100 + break;
55101 + case RLIMIT_LOCKS:
55102 + res_add += GR_RLIM_LOCKS_BUMP;
55103 + break;
55104 + case RLIMIT_SIGPENDING:
55105 + res_add += GR_RLIM_SIGPENDING_BUMP;
55106 + break;
55107 + case RLIMIT_MSGQUEUE:
55108 + res_add += GR_RLIM_MSGQUEUE_BUMP;
55109 + break;
55110 + case RLIMIT_NICE:
55111 + res_add += GR_RLIM_NICE_BUMP;
55112 + break;
55113 + case RLIMIT_RTPRIO:
55114 + res_add += GR_RLIM_RTPRIO_BUMP;
55115 + break;
55116 + case RLIMIT_RTTIME:
55117 + res_add += GR_RLIM_RTTIME_BUMP;
55118 + break;
55119 + }
55120 +
55121 + acl->res[res].rlim_cur = res_add;
55122 +
55123 + if (wanted > acl->res[res].rlim_max)
55124 + acl->res[res].rlim_max = res_add;
55125 +
55126 + /* only log the subject filename, since resource logging is supported for
55127 + single-subject learning only */
55128 + rcu_read_lock();
55129 + cred = __task_cred(task);
55130 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55131 + task->role->roletype, cred->uid, cred->gid, acl->filename,
55132 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
55133 + "", (unsigned long) res, &task->signal->saved_ip);
55134 + rcu_read_unlock();
55135 + }
55136 +
55137 + return;
55138 +}
55139 +
55140 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
55141 +void
55142 +pax_set_initial_flags(struct linux_binprm *bprm)
55143 +{
55144 + struct task_struct *task = current;
55145 + struct acl_subject_label *proc;
55146 + unsigned long flags;
55147 +
55148 + if (unlikely(!(gr_status & GR_READY)))
55149 + return;
55150 +
55151 + flags = pax_get_flags(task);
55152 +
55153 + proc = task->acl;
55154 +
55155 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
55156 + flags &= ~MF_PAX_PAGEEXEC;
55157 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
55158 + flags &= ~MF_PAX_SEGMEXEC;
55159 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
55160 + flags &= ~MF_PAX_RANDMMAP;
55161 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
55162 + flags &= ~MF_PAX_EMUTRAMP;
55163 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
55164 + flags &= ~MF_PAX_MPROTECT;
55165 +
55166 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
55167 + flags |= MF_PAX_PAGEEXEC;
55168 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
55169 + flags |= MF_PAX_SEGMEXEC;
55170 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
55171 + flags |= MF_PAX_RANDMMAP;
55172 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
55173 + flags |= MF_PAX_EMUTRAMP;
55174 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
55175 + flags |= MF_PAX_MPROTECT;
55176 +
55177 + pax_set_flags(task, flags);
55178 +
55179 + return;
55180 +}
55181 +#endif
55182 +
55183 +int
55184 +gr_handle_proc_ptrace(struct task_struct *task)
55185 +{
55186 + struct file *filp;
55187 + struct task_struct *tmp = task;
55188 + struct task_struct *curtemp = current;
55189 + __u32 retmode;
55190 +
55191 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55192 + if (unlikely(!(gr_status & GR_READY)))
55193 + return 0;
55194 +#endif
55195 +
55196 + read_lock(&tasklist_lock);
55197 + read_lock(&grsec_exec_file_lock);
55198 + filp = task->exec_file;
55199 +
55200 + while (tmp->pid > 0) {
55201 + if (tmp == curtemp)
55202 + break;
55203 + tmp = tmp->real_parent;
55204 + }
55205 +
55206 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
55207 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
55208 + read_unlock(&grsec_exec_file_lock);
55209 + read_unlock(&tasklist_lock);
55210 + return 1;
55211 + }
55212 +
55213 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55214 + if (!(gr_status & GR_READY)) {
55215 + read_unlock(&grsec_exec_file_lock);
55216 + read_unlock(&tasklist_lock);
55217 + return 0;
55218 + }
55219 +#endif
55220 +
55221 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
55222 + read_unlock(&grsec_exec_file_lock);
55223 + read_unlock(&tasklist_lock);
55224 +
55225 + if (retmode & GR_NOPTRACE)
55226 + return 1;
55227 +
55228 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
55229 + && (current->acl != task->acl || (current->acl != current->role->root_label
55230 + && current->pid != task->pid)))
55231 + return 1;
55232 +
55233 + return 0;
55234 +}
55235 +
55236 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
55237 +{
55238 + if (unlikely(!(gr_status & GR_READY)))
55239 + return;
55240 +
55241 + if (!(current->role->roletype & GR_ROLE_GOD))
55242 + return;
55243 +
55244 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
55245 + p->role->rolename, gr_task_roletype_to_char(p),
55246 + p->acl->filename);
55247 +}
55248 +
55249 +int
55250 +gr_handle_ptrace(struct task_struct *task, const long request)
55251 +{
55252 + struct task_struct *tmp = task;
55253 + struct task_struct *curtemp = current;
55254 + __u32 retmode;
55255 +
55256 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55257 + if (unlikely(!(gr_status & GR_READY)))
55258 + return 0;
55259 +#endif
55260 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
55261 + read_lock(&tasklist_lock);
55262 + while (tmp->pid > 0) {
55263 + if (tmp == curtemp)
55264 + break;
55265 + tmp = tmp->real_parent;
55266 + }
55267 +
55268 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
55269 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
55270 + read_unlock(&tasklist_lock);
55271 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55272 + return 1;
55273 + }
55274 + read_unlock(&tasklist_lock);
55275 + }
55276 +
55277 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55278 + if (!(gr_status & GR_READY))
55279 + return 0;
55280 +#endif
55281 +
55282 + read_lock(&grsec_exec_file_lock);
55283 + if (unlikely(!task->exec_file)) {
55284 + read_unlock(&grsec_exec_file_lock);
55285 + return 0;
55286 + }
55287 +
55288 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
55289 + read_unlock(&grsec_exec_file_lock);
55290 +
55291 + if (retmode & GR_NOPTRACE) {
55292 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55293 + return 1;
55294 + }
55295 +
55296 + if (retmode & GR_PTRACERD) {
55297 + switch (request) {
55298 + case PTRACE_SEIZE:
55299 + case PTRACE_POKETEXT:
55300 + case PTRACE_POKEDATA:
55301 + case PTRACE_POKEUSR:
55302 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
55303 + case PTRACE_SETREGS:
55304 + case PTRACE_SETFPREGS:
55305 +#endif
55306 +#ifdef CONFIG_X86
55307 + case PTRACE_SETFPXREGS:
55308 +#endif
55309 +#ifdef CONFIG_ALTIVEC
55310 + case PTRACE_SETVRREGS:
55311 +#endif
55312 + return 1;
55313 + default:
55314 + return 0;
55315 + }
55316 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
55317 + !(current->role->roletype & GR_ROLE_GOD) &&
55318 + (current->acl != task->acl)) {
55319 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55320 + return 1;
55321 + }
55322 +
55323 + return 0;
55324 +}
55325 +
55326 +static int is_writable_mmap(const struct file *filp)
55327 +{
55328 + struct task_struct *task = current;
55329 + struct acl_object_label *obj, *obj2;
55330 +
55331 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
55332 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
55333 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55334 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
55335 + task->role->root_label);
55336 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
55337 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
55338 + return 1;
55339 + }
55340 + }
55341 + return 0;
55342 +}
55343 +
55344 +int
55345 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
55346 +{
55347 + __u32 mode;
55348 +
55349 + if (unlikely(!file || !(prot & PROT_EXEC)))
55350 + return 1;
55351 +
55352 + if (is_writable_mmap(file))
55353 + return 0;
55354 +
55355 + mode =
55356 + gr_search_file(file->f_path.dentry,
55357 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55358 + file->f_path.mnt);
55359 +
55360 + if (!gr_tpe_allow(file))
55361 + return 0;
55362 +
55363 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55364 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55365 + return 0;
55366 + } else if (unlikely(!(mode & GR_EXEC))) {
55367 + return 0;
55368 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55369 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55370 + return 1;
55371 + }
55372 +
55373 + return 1;
55374 +}
55375 +
55376 +int
55377 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55378 +{
55379 + __u32 mode;
55380 +
55381 + if (unlikely(!file || !(prot & PROT_EXEC)))
55382 + return 1;
55383 +
55384 + if (is_writable_mmap(file))
55385 + return 0;
55386 +
55387 + mode =
55388 + gr_search_file(file->f_path.dentry,
55389 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55390 + file->f_path.mnt);
55391 +
55392 + if (!gr_tpe_allow(file))
55393 + return 0;
55394 +
55395 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55396 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55397 + return 0;
55398 + } else if (unlikely(!(mode & GR_EXEC))) {
55399 + return 0;
55400 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55401 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55402 + return 1;
55403 + }
55404 +
55405 + return 1;
55406 +}
55407 +
55408 +void
55409 +gr_acl_handle_psacct(struct task_struct *task, const long code)
55410 +{
55411 + unsigned long runtime;
55412 + unsigned long cputime;
55413 + unsigned int wday, cday;
55414 + __u8 whr, chr;
55415 + __u8 wmin, cmin;
55416 + __u8 wsec, csec;
55417 + struct timespec timeval;
55418 +
55419 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
55420 + !(task->acl->mode & GR_PROCACCT)))
55421 + return;
55422 +
55423 + do_posix_clock_monotonic_gettime(&timeval);
55424 + runtime = timeval.tv_sec - task->start_time.tv_sec;
55425 + wday = runtime / (3600 * 24);
55426 + runtime -= wday * (3600 * 24);
55427 + whr = runtime / 3600;
55428 + runtime -= whr * 3600;
55429 + wmin = runtime / 60;
55430 + runtime -= wmin * 60;
55431 + wsec = runtime;
55432 +
55433 + cputime = (task->utime + task->stime) / HZ;
55434 + cday = cputime / (3600 * 24);
55435 + cputime -= cday * (3600 * 24);
55436 + chr = cputime / 3600;
55437 + cputime -= chr * 3600;
55438 + cmin = cputime / 60;
55439 + cputime -= cmin * 60;
55440 + csec = cputime;
55441 +
55442 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
55443 +
55444 + return;
55445 +}
55446 +
55447 +void gr_set_kernel_label(struct task_struct *task)
55448 +{
55449 + if (gr_status & GR_READY) {
55450 + task->role = kernel_role;
55451 + task->acl = kernel_role->root_label;
55452 + }
55453 + return;
55454 +}
55455 +
55456 +#ifdef CONFIG_TASKSTATS
55457 +int gr_is_taskstats_denied(int pid)
55458 +{
55459 + struct task_struct *task;
55460 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55461 + const struct cred *cred;
55462 +#endif
55463 + int ret = 0;
55464 +
55465 + /* restrict taskstats viewing to un-chrooted root users
55466 + who have the 'view' subject flag if the RBAC system is enabled
55467 + */
55468 +
55469 + rcu_read_lock();
55470 + read_lock(&tasklist_lock);
55471 + task = find_task_by_vpid(pid);
55472 + if (task) {
55473 +#ifdef CONFIG_GRKERNSEC_CHROOT
55474 + if (proc_is_chrooted(task))
55475 + ret = -EACCES;
55476 +#endif
55477 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55478 + cred = __task_cred(task);
55479 +#ifdef CONFIG_GRKERNSEC_PROC_USER
55480 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
55481 + ret = -EACCES;
55482 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55483 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
55484 + ret = -EACCES;
55485 +#endif
55486 +#endif
55487 + if (gr_status & GR_READY) {
55488 + if (!(task->acl->mode & GR_VIEW))
55489 + ret = -EACCES;
55490 + }
55491 + } else
55492 + ret = -ENOENT;
55493 +
55494 + read_unlock(&tasklist_lock);
55495 + rcu_read_unlock();
55496 +
55497 + return ret;
55498 +}
55499 +#endif
55500 +
55501 +/* AUXV entries are filled via a descendant of search_binary_handler
55502 + after we've already applied the subject for the target
55503 +*/
55504 +int gr_acl_enable_at_secure(void)
55505 +{
55506 + if (unlikely(!(gr_status & GR_READY)))
55507 + return 0;
55508 +
55509 + if (current->acl->mode & GR_ATSECURE)
55510 + return 1;
55511 +
55512 + return 0;
55513 +}
55514 +
55515 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
55516 +{
55517 + struct task_struct *task = current;
55518 + struct dentry *dentry = file->f_path.dentry;
55519 + struct vfsmount *mnt = file->f_path.mnt;
55520 + struct acl_object_label *obj, *tmp;
55521 + struct acl_subject_label *subj;
55522 + unsigned int bufsize;
55523 + int is_not_root;
55524 + char *path;
55525 + dev_t dev = __get_dev(dentry);
55526 +
55527 + if (unlikely(!(gr_status & GR_READY)))
55528 + return 1;
55529 +
55530 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55531 + return 1;
55532 +
55533 + /* ignore Eric Biederman */
55534 + if (IS_PRIVATE(dentry->d_inode))
55535 + return 1;
55536 +
55537 + subj = task->acl;
55538 + read_lock(&gr_inode_lock);
55539 + do {
55540 + obj = lookup_acl_obj_label(ino, dev, subj);
55541 + if (obj != NULL) {
55542 + read_unlock(&gr_inode_lock);
55543 + return (obj->mode & GR_FIND) ? 1 : 0;
55544 + }
55545 + } while ((subj = subj->parent_subject));
55546 + read_unlock(&gr_inode_lock);
55547 +
55548 + /* this is purely an optimization since we're looking for an object
55549 + for the directory we're doing a readdir on
55550 + if it's possible for any globbed object to match the entry we're
55551 + filling into the directory, then the object we find here will be
55552 + an anchor point with attached globbed objects
55553 + */
55554 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
55555 + if (obj->globbed == NULL)
55556 + return (obj->mode & GR_FIND) ? 1 : 0;
55557 +
55558 + is_not_root = ((obj->filename[0] == '/') &&
55559 + (obj->filename[1] == '\0')) ? 0 : 1;
55560 + bufsize = PAGE_SIZE - namelen - is_not_root;
55561 +
55562 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
55563 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
55564 + return 1;
55565 +
55566 + preempt_disable();
55567 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55568 + bufsize);
55569 +
55570 + bufsize = strlen(path);
55571 +
55572 + /* if base is "/", don't append an additional slash */
55573 + if (is_not_root)
55574 + *(path + bufsize) = '/';
55575 + memcpy(path + bufsize + is_not_root, name, namelen);
55576 + *(path + bufsize + namelen + is_not_root) = '\0';
55577 +
55578 + tmp = obj->globbed;
55579 + while (tmp) {
55580 + if (!glob_match(tmp->filename, path)) {
55581 + preempt_enable();
55582 + return (tmp->mode & GR_FIND) ? 1 : 0;
55583 + }
55584 + tmp = tmp->next;
55585 + }
55586 + preempt_enable();
55587 + return (obj->mode & GR_FIND) ? 1 : 0;
55588 +}
55589 +
55590 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
55591 +EXPORT_SYMBOL(gr_acl_is_enabled);
55592 +#endif
55593 +EXPORT_SYMBOL(gr_learn_resource);
55594 +EXPORT_SYMBOL(gr_set_kernel_label);
55595 +#ifdef CONFIG_SECURITY
55596 +EXPORT_SYMBOL(gr_check_user_change);
55597 +EXPORT_SYMBOL(gr_check_group_change);
55598 +#endif
55599 +
55600 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
55601 new file mode 100644
55602 index 0000000..34fefda
55603 --- /dev/null
55604 +++ b/grsecurity/gracl_alloc.c
55605 @@ -0,0 +1,105 @@
55606 +#include <linux/kernel.h>
55607 +#include <linux/mm.h>
55608 +#include <linux/slab.h>
55609 +#include <linux/vmalloc.h>
55610 +#include <linux/gracl.h>
55611 +#include <linux/grsecurity.h>
55612 +
55613 +static unsigned long alloc_stack_next = 1;
55614 +static unsigned long alloc_stack_size = 1;
55615 +static void **alloc_stack;
55616 +
55617 +static __inline__ int
55618 +alloc_pop(void)
55619 +{
55620 + if (alloc_stack_next == 1)
55621 + return 0;
55622 +
55623 + kfree(alloc_stack[alloc_stack_next - 2]);
55624 +
55625 + alloc_stack_next--;
55626 +
55627 + return 1;
55628 +}
55629 +
55630 +static __inline__ int
55631 +alloc_push(void *buf)
55632 +{
55633 + if (alloc_stack_next >= alloc_stack_size)
55634 + return 1;
55635 +
55636 + alloc_stack[alloc_stack_next - 1] = buf;
55637 +
55638 + alloc_stack_next++;
55639 +
55640 + return 0;
55641 +}
55642 +
55643 +void *
55644 +acl_alloc(unsigned long len)
55645 +{
55646 + void *ret = NULL;
55647 +
55648 + if (!len || len > PAGE_SIZE)
55649 + goto out;
55650 +
55651 + ret = kmalloc(len, GFP_KERNEL);
55652 +
55653 + if (ret) {
55654 + if (alloc_push(ret)) {
55655 + kfree(ret);
55656 + ret = NULL;
55657 + }
55658 + }
55659 +
55660 +out:
55661 + return ret;
55662 +}
55663 +
55664 +void *
55665 +acl_alloc_num(unsigned long num, unsigned long len)
55666 +{
55667 + if (!len || (num > (PAGE_SIZE / len)))
55668 + return NULL;
55669 +
55670 + return acl_alloc(num * len);
55671 +}
55672 +
55673 +void
55674 +acl_free_all(void)
55675 +{
55676 + if (gr_acl_is_enabled() || !alloc_stack)
55677 + return;
55678 +
55679 + while (alloc_pop()) ;
55680 +
55681 + if (alloc_stack) {
55682 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
55683 + kfree(alloc_stack);
55684 + else
55685 + vfree(alloc_stack);
55686 + }
55687 +
55688 + alloc_stack = NULL;
55689 + alloc_stack_size = 1;
55690 + alloc_stack_next = 1;
55691 +
55692 + return;
55693 +}
55694 +
55695 +int
55696 +acl_alloc_stack_init(unsigned long size)
55697 +{
55698 + if ((size * sizeof (void *)) <= PAGE_SIZE)
55699 + alloc_stack =
55700 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
55701 + else
55702 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
55703 +
55704 + alloc_stack_size = size;
55705 +
55706 + if (!alloc_stack)
55707 + return 0;
55708 + else
55709 + return 1;
55710 +}
55711 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
55712 new file mode 100644
55713 index 0000000..6d21049
55714 --- /dev/null
55715 +++ b/grsecurity/gracl_cap.c
55716 @@ -0,0 +1,110 @@
55717 +#include <linux/kernel.h>
55718 +#include <linux/module.h>
55719 +#include <linux/sched.h>
55720 +#include <linux/gracl.h>
55721 +#include <linux/grsecurity.h>
55722 +#include <linux/grinternal.h>
55723 +
55724 +extern const char *captab_log[];
55725 +extern int captab_log_entries;
55726 +
55727 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
55728 +{
55729 + struct acl_subject_label *curracl;
55730 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55731 + kernel_cap_t cap_audit = __cap_empty_set;
55732 +
55733 + if (!gr_acl_is_enabled())
55734 + return 1;
55735 +
55736 + curracl = task->acl;
55737 +
55738 + cap_drop = curracl->cap_lower;
55739 + cap_mask = curracl->cap_mask;
55740 + cap_audit = curracl->cap_invert_audit;
55741 +
55742 + while ((curracl = curracl->parent_subject)) {
55743 + /* if the cap isn't specified in the current computed mask but is specified in the
55744 + current level subject, and is lowered in the current level subject, then add
55745 + it to the set of dropped capabilities
55746 + otherwise, add the current level subject's mask to the current computed mask
55747 + */
55748 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55749 + cap_raise(cap_mask, cap);
55750 + if (cap_raised(curracl->cap_lower, cap))
55751 + cap_raise(cap_drop, cap);
55752 + if (cap_raised(curracl->cap_invert_audit, cap))
55753 + cap_raise(cap_audit, cap);
55754 + }
55755 + }
55756 +
55757 + if (!cap_raised(cap_drop, cap)) {
55758 + if (cap_raised(cap_audit, cap))
55759 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
55760 + return 1;
55761 + }
55762 +
55763 + curracl = task->acl;
55764 +
55765 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55766 + && cap_raised(cred->cap_effective, cap)) {
55767 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55768 + task->role->roletype, cred->uid,
55769 + cred->gid, task->exec_file ?
55770 + gr_to_filename(task->exec_file->f_path.dentry,
55771 + task->exec_file->f_path.mnt) : curracl->filename,
55772 + curracl->filename, 0UL,
55773 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
55774 + return 1;
55775 + }
55776 +
55777 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
55778 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55779 +
55780 + return 0;
55781 +}
55782 +
55783 +int
55784 +gr_acl_is_capable(const int cap)
55785 +{
55786 + return gr_task_acl_is_capable(current, current_cred(), cap);
55787 +}
55788 +
55789 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
55790 +{
55791 + struct acl_subject_label *curracl;
55792 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55793 +
55794 + if (!gr_acl_is_enabled())
55795 + return 1;
55796 +
55797 + curracl = task->acl;
55798 +
55799 + cap_drop = curracl->cap_lower;
55800 + cap_mask = curracl->cap_mask;
55801 +
55802 + while ((curracl = curracl->parent_subject)) {
55803 + /* if the cap isn't specified in the current computed mask but is specified in the
55804 + current level subject, and is lowered in the current level subject, then add
55805 + it to the set of dropped capabilities
55806 + otherwise, add the current level subject's mask to the current computed mask
55807 + */
55808 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55809 + cap_raise(cap_mask, cap);
55810 + if (cap_raised(curracl->cap_lower, cap))
55811 + cap_raise(cap_drop, cap);
55812 + }
55813 + }
55814 +
55815 + if (!cap_raised(cap_drop, cap))
55816 + return 1;
55817 +
55818 + return 0;
55819 +}
55820 +
55821 +int
55822 +gr_acl_is_capable_nolog(const int cap)
55823 +{
55824 + return gr_task_acl_is_capable_nolog(current, cap);
55825 +}
55826 +
55827 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55828 new file mode 100644
55829 index 0000000..d28e241
55830 --- /dev/null
55831 +++ b/grsecurity/gracl_fs.c
55832 @@ -0,0 +1,437 @@
55833 +#include <linux/kernel.h>
55834 +#include <linux/sched.h>
55835 +#include <linux/types.h>
55836 +#include <linux/fs.h>
55837 +#include <linux/file.h>
55838 +#include <linux/stat.h>
55839 +#include <linux/grsecurity.h>
55840 +#include <linux/grinternal.h>
55841 +#include <linux/gracl.h>
55842 +
55843 +umode_t
55844 +gr_acl_umask(void)
55845 +{
55846 + if (unlikely(!gr_acl_is_enabled()))
55847 + return 0;
55848 +
55849 + return current->role->umask;
55850 +}
55851 +
55852 +__u32
55853 +gr_acl_handle_hidden_file(const struct dentry * dentry,
55854 + const struct vfsmount * mnt)
55855 +{
55856 + __u32 mode;
55857 +
55858 + if (unlikely(!dentry->d_inode))
55859 + return GR_FIND;
55860 +
55861 + mode =
55862 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55863 +
55864 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55865 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55866 + return mode;
55867 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55868 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55869 + return 0;
55870 + } else if (unlikely(!(mode & GR_FIND)))
55871 + return 0;
55872 +
55873 + return GR_FIND;
55874 +}
55875 +
55876 +__u32
55877 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55878 + int acc_mode)
55879 +{
55880 + __u32 reqmode = GR_FIND;
55881 + __u32 mode;
55882 +
55883 + if (unlikely(!dentry->d_inode))
55884 + return reqmode;
55885 +
55886 + if (acc_mode & MAY_APPEND)
55887 + reqmode |= GR_APPEND;
55888 + else if (acc_mode & MAY_WRITE)
55889 + reqmode |= GR_WRITE;
55890 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55891 + reqmode |= GR_READ;
55892 +
55893 + mode =
55894 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55895 + mnt);
55896 +
55897 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55898 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55899 + reqmode & GR_READ ? " reading" : "",
55900 + reqmode & GR_WRITE ? " writing" : reqmode &
55901 + GR_APPEND ? " appending" : "");
55902 + return reqmode;
55903 + } else
55904 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55905 + {
55906 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55907 + reqmode & GR_READ ? " reading" : "",
55908 + reqmode & GR_WRITE ? " writing" : reqmode &
55909 + GR_APPEND ? " appending" : "");
55910 + return 0;
55911 + } else if (unlikely((mode & reqmode) != reqmode))
55912 + return 0;
55913 +
55914 + return reqmode;
55915 +}
55916 +
55917 +__u32
55918 +gr_acl_handle_creat(const struct dentry * dentry,
55919 + const struct dentry * p_dentry,
55920 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55921 + const int imode)
55922 +{
55923 + __u32 reqmode = GR_WRITE | GR_CREATE;
55924 + __u32 mode;
55925 +
55926 + if (acc_mode & MAY_APPEND)
55927 + reqmode |= GR_APPEND;
55928 + // if a directory was required or the directory already exists, then
55929 + // don't count this open as a read
55930 + if ((acc_mode & MAY_READ) &&
55931 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55932 + reqmode |= GR_READ;
55933 + if ((open_flags & O_CREAT) &&
55934 + ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
55935 + reqmode |= GR_SETID;
55936 +
55937 + mode =
55938 + gr_check_create(dentry, p_dentry, p_mnt,
55939 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55940 +
55941 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55942 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55943 + reqmode & GR_READ ? " reading" : "",
55944 + reqmode & GR_WRITE ? " writing" : reqmode &
55945 + GR_APPEND ? " appending" : "");
55946 + return reqmode;
55947 + } else
55948 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55949 + {
55950 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55951 + reqmode & GR_READ ? " reading" : "",
55952 + reqmode & GR_WRITE ? " writing" : reqmode &
55953 + GR_APPEND ? " appending" : "");
55954 + return 0;
55955 + } else if (unlikely((mode & reqmode) != reqmode))
55956 + return 0;
55957 +
55958 + return reqmode;
55959 +}
55960 +
55961 +__u32
55962 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55963 + const int fmode)
55964 +{
55965 + __u32 mode, reqmode = GR_FIND;
55966 +
55967 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55968 + reqmode |= GR_EXEC;
55969 + if (fmode & S_IWOTH)
55970 + reqmode |= GR_WRITE;
55971 + if (fmode & S_IROTH)
55972 + reqmode |= GR_READ;
55973 +
55974 + mode =
55975 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55976 + mnt);
55977 +
55978 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55979 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55980 + reqmode & GR_READ ? " reading" : "",
55981 + reqmode & GR_WRITE ? " writing" : "",
55982 + reqmode & GR_EXEC ? " executing" : "");
55983 + return reqmode;
55984 + } else
55985 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55986 + {
55987 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55988 + reqmode & GR_READ ? " reading" : "",
55989 + reqmode & GR_WRITE ? " writing" : "",
55990 + reqmode & GR_EXEC ? " executing" : "");
55991 + return 0;
55992 + } else if (unlikely((mode & reqmode) != reqmode))
55993 + return 0;
55994 +
55995 + return reqmode;
55996 +}
55997 +
55998 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55999 +{
56000 + __u32 mode;
56001 +
56002 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
56003 +
56004 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
56005 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
56006 + return mode;
56007 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56008 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
56009 + return 0;
56010 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
56011 + return 0;
56012 +
56013 + return (reqmode);
56014 +}
56015 +
56016 +__u32
56017 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56018 +{
56019 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
56020 +}
56021 +
56022 +__u32
56023 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
56024 +{
56025 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
56026 +}
56027 +
56028 +__u32
56029 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
56030 +{
56031 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
56032 +}
56033 +
56034 +__u32
56035 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
56036 +{
56037 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
56038 +}
56039 +
56040 +__u32
56041 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
56042 + umode_t *modeptr)
56043 +{
56044 + umode_t mode;
56045 +
56046 + *modeptr &= ~gr_acl_umask();
56047 + mode = *modeptr;
56048 +
56049 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
56050 + return 1;
56051 +
56052 + if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
56053 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
56054 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
56055 + GR_CHMOD_ACL_MSG);
56056 + } else {
56057 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
56058 + }
56059 +}
56060 +
56061 +__u32
56062 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
56063 +{
56064 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
56065 +}
56066 +
56067 +__u32
56068 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
56069 +{
56070 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
56071 +}
56072 +
56073 +__u32
56074 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
56075 +{
56076 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
56077 +}
56078 +
56079 +__u32
56080 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
56081 +{
56082 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
56083 + GR_UNIXCONNECT_ACL_MSG);
56084 +}
56085 +
56086 +/* hardlinks require at minimum create and link permission,
56087 + any additional privilege required is based on the
56088 + privilege of the file being linked to
56089 +*/
56090 +__u32
56091 +gr_acl_handle_link(const struct dentry * new_dentry,
56092 + const struct dentry * parent_dentry,
56093 + const struct vfsmount * parent_mnt,
56094 + const struct dentry * old_dentry,
56095 + const struct vfsmount * old_mnt, const char *to)
56096 +{
56097 + __u32 mode;
56098 + __u32 needmode = GR_CREATE | GR_LINK;
56099 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
56100 +
56101 + mode =
56102 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
56103 + old_mnt);
56104 +
56105 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
56106 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56107 + return mode;
56108 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56109 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56110 + return 0;
56111 + } else if (unlikely((mode & needmode) != needmode))
56112 + return 0;
56113 +
56114 + return 1;
56115 +}
56116 +
56117 +__u32
56118 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56119 + const struct dentry * parent_dentry,
56120 + const struct vfsmount * parent_mnt, const char *from)
56121 +{
56122 + __u32 needmode = GR_WRITE | GR_CREATE;
56123 + __u32 mode;
56124 +
56125 + mode =
56126 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
56127 + GR_CREATE | GR_AUDIT_CREATE |
56128 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
56129 +
56130 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
56131 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56132 + return mode;
56133 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56134 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56135 + return 0;
56136 + } else if (unlikely((mode & needmode) != needmode))
56137 + return 0;
56138 +
56139 + return (GR_WRITE | GR_CREATE);
56140 +}
56141 +
56142 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
56143 +{
56144 + __u32 mode;
56145 +
56146 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
56147 +
56148 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
56149 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
56150 + return mode;
56151 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56152 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
56153 + return 0;
56154 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
56155 + return 0;
56156 +
56157 + return (reqmode);
56158 +}
56159 +
56160 +__u32
56161 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56162 + const struct dentry * parent_dentry,
56163 + const struct vfsmount * parent_mnt,
56164 + const int mode)
56165 +{
56166 + __u32 reqmode = GR_WRITE | GR_CREATE;
56167 + if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
56168 + reqmode |= GR_SETID;
56169 +
56170 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56171 + reqmode, GR_MKNOD_ACL_MSG);
56172 +}
56173 +
56174 +__u32
56175 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
56176 + const struct dentry *parent_dentry,
56177 + const struct vfsmount *parent_mnt)
56178 +{
56179 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56180 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
56181 +}
56182 +
56183 +#define RENAME_CHECK_SUCCESS(old, new) \
56184 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
56185 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
56186 +
56187 +int
56188 +gr_acl_handle_rename(struct dentry *new_dentry,
56189 + struct dentry *parent_dentry,
56190 + const struct vfsmount *parent_mnt,
56191 + struct dentry *old_dentry,
56192 + struct inode *old_parent_inode,
56193 + struct vfsmount *old_mnt, const char *newname)
56194 +{
56195 + __u32 comp1, comp2;
56196 + int error = 0;
56197 +
56198 + if (unlikely(!gr_acl_is_enabled()))
56199 + return 0;
56200 +
56201 + if (!new_dentry->d_inode) {
56202 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
56203 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
56204 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
56205 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
56206 + GR_DELETE | GR_AUDIT_DELETE |
56207 + GR_AUDIT_READ | GR_AUDIT_WRITE |
56208 + GR_SUPPRESS, old_mnt);
56209 + } else {
56210 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
56211 + GR_CREATE | GR_DELETE |
56212 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
56213 + GR_AUDIT_READ | GR_AUDIT_WRITE |
56214 + GR_SUPPRESS, parent_mnt);
56215 + comp2 =
56216 + gr_search_file(old_dentry,
56217 + GR_READ | GR_WRITE | GR_AUDIT_READ |
56218 + GR_DELETE | GR_AUDIT_DELETE |
56219 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
56220 + }
56221 +
56222 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
56223 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
56224 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56225 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
56226 + && !(comp2 & GR_SUPPRESS)) {
56227 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56228 + error = -EACCES;
56229 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
56230 + error = -EACCES;
56231 +
56232 + return error;
56233 +}
56234 +
56235 +void
56236 +gr_acl_handle_exit(void)
56237 +{
56238 + u16 id;
56239 + char *rolename;
56240 + struct file *exec_file;
56241 +
56242 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
56243 + !(current->role->roletype & GR_ROLE_PERSIST))) {
56244 + id = current->acl_role_id;
56245 + rolename = current->role->rolename;
56246 + gr_set_acls(1);
56247 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
56248 + }
56249 +
56250 + write_lock(&grsec_exec_file_lock);
56251 + exec_file = current->exec_file;
56252 + current->exec_file = NULL;
56253 + write_unlock(&grsec_exec_file_lock);
56254 +
56255 + if (exec_file)
56256 + fput(exec_file);
56257 +}
56258 +
56259 +int
56260 +gr_acl_handle_procpidmem(const struct task_struct *task)
56261 +{
56262 + if (unlikely(!gr_acl_is_enabled()))
56263 + return 0;
56264 +
56265 + if (task != current && task->acl->mode & GR_PROTPROCFD)
56266 + return -EACCES;
56267 +
56268 + return 0;
56269 +}
56270 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
56271 new file mode 100644
56272 index 0000000..58800a7
56273 --- /dev/null
56274 +++ b/grsecurity/gracl_ip.c
56275 @@ -0,0 +1,384 @@
56276 +#include <linux/kernel.h>
56277 +#include <asm/uaccess.h>
56278 +#include <asm/errno.h>
56279 +#include <net/sock.h>
56280 +#include <linux/file.h>
56281 +#include <linux/fs.h>
56282 +#include <linux/net.h>
56283 +#include <linux/in.h>
56284 +#include <linux/skbuff.h>
56285 +#include <linux/ip.h>
56286 +#include <linux/udp.h>
56287 +#include <linux/types.h>
56288 +#include <linux/sched.h>
56289 +#include <linux/netdevice.h>
56290 +#include <linux/inetdevice.h>
56291 +#include <linux/gracl.h>
56292 +#include <linux/grsecurity.h>
56293 +#include <linux/grinternal.h>
56294 +
56295 +#define GR_BIND 0x01
56296 +#define GR_CONNECT 0x02
56297 +#define GR_INVERT 0x04
56298 +#define GR_BINDOVERRIDE 0x08
56299 +#define GR_CONNECTOVERRIDE 0x10
56300 +#define GR_SOCK_FAMILY 0x20
56301 +
56302 +static const char * gr_protocols[IPPROTO_MAX] = {
56303 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
56304 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
56305 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
56306 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
56307 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
56308 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
56309 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
56310 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
56311 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
56312 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
56313 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
56314 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
56315 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
56316 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
56317 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
56318 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
56319 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
56320 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
56321 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
56322 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
56323 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
56324 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
56325 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
56326 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
56327 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
56328 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
56329 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
56330 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
56331 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
56332 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
56333 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
56334 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
56335 + };
56336 +
56337 +static const char * gr_socktypes[SOCK_MAX] = {
56338 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
56339 + "unknown:7", "unknown:8", "unknown:9", "packet"
56340 + };
56341 +
56342 +static const char * gr_sockfamilies[AF_MAX+1] = {
56343 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
56344 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
56345 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
56346 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
56347 + };
56348 +
56349 +const char *
56350 +gr_proto_to_name(unsigned char proto)
56351 +{
56352 + return gr_protocols[proto];
56353 +}
56354 +
56355 +const char *
56356 +gr_socktype_to_name(unsigned char type)
56357 +{
56358 + return gr_socktypes[type];
56359 +}
56360 +
56361 +const char *
56362 +gr_sockfamily_to_name(unsigned char family)
56363 +{
56364 + return gr_sockfamilies[family];
56365 +}
56366 +
56367 +int
56368 +gr_search_socket(const int domain, const int type, const int protocol)
56369 +{
56370 + struct acl_subject_label *curr;
56371 + const struct cred *cred = current_cred();
56372 +
56373 + if (unlikely(!gr_acl_is_enabled()))
56374 + goto exit;
56375 +
56376 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
56377 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
56378 + goto exit; // let the kernel handle it
56379 +
56380 + curr = current->acl;
56381 +
56382 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
56383 + /* the family is allowed, if this is PF_INET allow it only if
56384 + the extra sock type/protocol checks pass */
56385 + if (domain == PF_INET)
56386 + goto inet_check;
56387 + goto exit;
56388 + } else {
56389 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56390 + __u32 fakeip = 0;
56391 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56392 + current->role->roletype, cred->uid,
56393 + cred->gid, current->exec_file ?
56394 + gr_to_filename(current->exec_file->f_path.dentry,
56395 + current->exec_file->f_path.mnt) :
56396 + curr->filename, curr->filename,
56397 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
56398 + &current->signal->saved_ip);
56399 + goto exit;
56400 + }
56401 + goto exit_fail;
56402 + }
56403 +
56404 +inet_check:
56405 + /* the rest of this checking is for IPv4 only */
56406 + if (!curr->ips)
56407 + goto exit;
56408 +
56409 + if ((curr->ip_type & (1 << type)) &&
56410 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
56411 + goto exit;
56412 +
56413 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56414 + /* we don't place acls on raw sockets , and sometimes
56415 + dgram/ip sockets are opened for ioctl and not
56416 + bind/connect, so we'll fake a bind learn log */
56417 + if (type == SOCK_RAW || type == SOCK_PACKET) {
56418 + __u32 fakeip = 0;
56419 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56420 + current->role->roletype, cred->uid,
56421 + cred->gid, current->exec_file ?
56422 + gr_to_filename(current->exec_file->f_path.dentry,
56423 + current->exec_file->f_path.mnt) :
56424 + curr->filename, curr->filename,
56425 + &fakeip, 0, type,
56426 + protocol, GR_CONNECT, &current->signal->saved_ip);
56427 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
56428 + __u32 fakeip = 0;
56429 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56430 + current->role->roletype, cred->uid,
56431 + cred->gid, current->exec_file ?
56432 + gr_to_filename(current->exec_file->f_path.dentry,
56433 + current->exec_file->f_path.mnt) :
56434 + curr->filename, curr->filename,
56435 + &fakeip, 0, type,
56436 + protocol, GR_BIND, &current->signal->saved_ip);
56437 + }
56438 + /* we'll log when they use connect or bind */
56439 + goto exit;
56440 + }
56441 +
56442 +exit_fail:
56443 + if (domain == PF_INET)
56444 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
56445 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
56446 + else
56447 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
56448 + gr_socktype_to_name(type), protocol);
56449 +
56450 + return 0;
56451 +exit:
56452 + return 1;
56453 +}
56454 +
56455 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
56456 +{
56457 + if ((ip->mode & mode) &&
56458 + (ip_port >= ip->low) &&
56459 + (ip_port <= ip->high) &&
56460 + ((ntohl(ip_addr) & our_netmask) ==
56461 + (ntohl(our_addr) & our_netmask))
56462 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
56463 + && (ip->type & (1 << type))) {
56464 + if (ip->mode & GR_INVERT)
56465 + return 2; // specifically denied
56466 + else
56467 + return 1; // allowed
56468 + }
56469 +
56470 + return 0; // not specifically allowed, may continue parsing
56471 +}
56472 +
56473 +static int
56474 +gr_search_connectbind(const int full_mode, struct sock *sk,
56475 + struct sockaddr_in *addr, const int type)
56476 +{
56477 + char iface[IFNAMSIZ] = {0};
56478 + struct acl_subject_label *curr;
56479 + struct acl_ip_label *ip;
56480 + struct inet_sock *isk;
56481 + struct net_device *dev;
56482 + struct in_device *idev;
56483 + unsigned long i;
56484 + int ret;
56485 + int mode = full_mode & (GR_BIND | GR_CONNECT);
56486 + __u32 ip_addr = 0;
56487 + __u32 our_addr;
56488 + __u32 our_netmask;
56489 + char *p;
56490 + __u16 ip_port = 0;
56491 + const struct cred *cred = current_cred();
56492 +
56493 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
56494 + return 0;
56495 +
56496 + curr = current->acl;
56497 + isk = inet_sk(sk);
56498 +
56499 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
56500 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
56501 + addr->sin_addr.s_addr = curr->inaddr_any_override;
56502 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
56503 + struct sockaddr_in saddr;
56504 + int err;
56505 +
56506 + saddr.sin_family = AF_INET;
56507 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
56508 + saddr.sin_port = isk->inet_sport;
56509 +
56510 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56511 + if (err)
56512 + return err;
56513 +
56514 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56515 + if (err)
56516 + return err;
56517 + }
56518 +
56519 + if (!curr->ips)
56520 + return 0;
56521 +
56522 + ip_addr = addr->sin_addr.s_addr;
56523 + ip_port = ntohs(addr->sin_port);
56524 +
56525 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56526 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56527 + current->role->roletype, cred->uid,
56528 + cred->gid, current->exec_file ?
56529 + gr_to_filename(current->exec_file->f_path.dentry,
56530 + current->exec_file->f_path.mnt) :
56531 + curr->filename, curr->filename,
56532 + &ip_addr, ip_port, type,
56533 + sk->sk_protocol, mode, &current->signal->saved_ip);
56534 + return 0;
56535 + }
56536 +
56537 + for (i = 0; i < curr->ip_num; i++) {
56538 + ip = *(curr->ips + i);
56539 + if (ip->iface != NULL) {
56540 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
56541 + p = strchr(iface, ':');
56542 + if (p != NULL)
56543 + *p = '\0';
56544 + dev = dev_get_by_name(sock_net(sk), iface);
56545 + if (dev == NULL)
56546 + continue;
56547 + idev = in_dev_get(dev);
56548 + if (idev == NULL) {
56549 + dev_put(dev);
56550 + continue;
56551 + }
56552 + rcu_read_lock();
56553 + for_ifa(idev) {
56554 + if (!strcmp(ip->iface, ifa->ifa_label)) {
56555 + our_addr = ifa->ifa_address;
56556 + our_netmask = 0xffffffff;
56557 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56558 + if (ret == 1) {
56559 + rcu_read_unlock();
56560 + in_dev_put(idev);
56561 + dev_put(dev);
56562 + return 0;
56563 + } else if (ret == 2) {
56564 + rcu_read_unlock();
56565 + in_dev_put(idev);
56566 + dev_put(dev);
56567 + goto denied;
56568 + }
56569 + }
56570 + } endfor_ifa(idev);
56571 + rcu_read_unlock();
56572 + in_dev_put(idev);
56573 + dev_put(dev);
56574 + } else {
56575 + our_addr = ip->addr;
56576 + our_netmask = ip->netmask;
56577 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56578 + if (ret == 1)
56579 + return 0;
56580 + else if (ret == 2)
56581 + goto denied;
56582 + }
56583 + }
56584 +
56585 +denied:
56586 + if (mode == GR_BIND)
56587 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56588 + else if (mode == GR_CONNECT)
56589 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56590 +
56591 + return -EACCES;
56592 +}
56593 +
56594 +int
56595 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
56596 +{
56597 + /* always allow disconnection of dgram sockets with connect */
56598 + if (addr->sin_family == AF_UNSPEC)
56599 + return 0;
56600 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
56601 +}
56602 +
56603 +int
56604 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
56605 +{
56606 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
56607 +}
56608 +
56609 +int gr_search_listen(struct socket *sock)
56610 +{
56611 + struct sock *sk = sock->sk;
56612 + struct sockaddr_in addr;
56613 +
56614 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56615 + addr.sin_port = inet_sk(sk)->inet_sport;
56616 +
56617 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56618 +}
56619 +
56620 +int gr_search_accept(struct socket *sock)
56621 +{
56622 + struct sock *sk = sock->sk;
56623 + struct sockaddr_in addr;
56624 +
56625 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56626 + addr.sin_port = inet_sk(sk)->inet_sport;
56627 +
56628 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56629 +}
56630 +
56631 +int
56632 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
56633 +{
56634 + if (addr)
56635 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
56636 + else {
56637 + struct sockaddr_in sin;
56638 + const struct inet_sock *inet = inet_sk(sk);
56639 +
56640 + sin.sin_addr.s_addr = inet->inet_daddr;
56641 + sin.sin_port = inet->inet_dport;
56642 +
56643 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56644 + }
56645 +}
56646 +
56647 +int
56648 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
56649 +{
56650 + struct sockaddr_in sin;
56651 +
56652 + if (unlikely(skb->len < sizeof (struct udphdr)))
56653 + return 0; // skip this packet
56654 +
56655 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
56656 + sin.sin_port = udp_hdr(skb)->source;
56657 +
56658 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56659 +}
56660 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
56661 new file mode 100644
56662 index 0000000..25f54ef
56663 --- /dev/null
56664 +++ b/grsecurity/gracl_learn.c
56665 @@ -0,0 +1,207 @@
56666 +#include <linux/kernel.h>
56667 +#include <linux/mm.h>
56668 +#include <linux/sched.h>
56669 +#include <linux/poll.h>
56670 +#include <linux/string.h>
56671 +#include <linux/file.h>
56672 +#include <linux/types.h>
56673 +#include <linux/vmalloc.h>
56674 +#include <linux/grinternal.h>
56675 +
56676 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
56677 + size_t count, loff_t *ppos);
56678 +extern int gr_acl_is_enabled(void);
56679 +
56680 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
56681 +static int gr_learn_attached;
56682 +
56683 +/* use a 512k buffer */
56684 +#define LEARN_BUFFER_SIZE (512 * 1024)
56685 +
56686 +static DEFINE_SPINLOCK(gr_learn_lock);
56687 +static DEFINE_MUTEX(gr_learn_user_mutex);
56688 +
56689 +/* we need to maintain two buffers, so that the kernel context of grlearn
56690 + uses a semaphore around the userspace copying, and the other kernel contexts
56691 + use a spinlock when copying into the buffer, since they cannot sleep
56692 +*/
56693 +static char *learn_buffer;
56694 +static char *learn_buffer_user;
56695 +static int learn_buffer_len;
56696 +static int learn_buffer_user_len;
56697 +
56698 +static ssize_t
56699 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
56700 +{
56701 + DECLARE_WAITQUEUE(wait, current);
56702 + ssize_t retval = 0;
56703 +
56704 + add_wait_queue(&learn_wait, &wait);
56705 + set_current_state(TASK_INTERRUPTIBLE);
56706 + do {
56707 + mutex_lock(&gr_learn_user_mutex);
56708 + spin_lock(&gr_learn_lock);
56709 + if (learn_buffer_len)
56710 + break;
56711 + spin_unlock(&gr_learn_lock);
56712 + mutex_unlock(&gr_learn_user_mutex);
56713 + if (file->f_flags & O_NONBLOCK) {
56714 + retval = -EAGAIN;
56715 + goto out;
56716 + }
56717 + if (signal_pending(current)) {
56718 + retval = -ERESTARTSYS;
56719 + goto out;
56720 + }
56721 +
56722 + schedule();
56723 + } while (1);
56724 +
56725 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
56726 + learn_buffer_user_len = learn_buffer_len;
56727 + retval = learn_buffer_len;
56728 + learn_buffer_len = 0;
56729 +
56730 + spin_unlock(&gr_learn_lock);
56731 +
56732 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
56733 + retval = -EFAULT;
56734 +
56735 + mutex_unlock(&gr_learn_user_mutex);
56736 +out:
56737 + set_current_state(TASK_RUNNING);
56738 + remove_wait_queue(&learn_wait, &wait);
56739 + return retval;
56740 +}
56741 +
56742 +static unsigned int
56743 +poll_learn(struct file * file, poll_table * wait)
56744 +{
56745 + poll_wait(file, &learn_wait, wait);
56746 +
56747 + if (learn_buffer_len)
56748 + return (POLLIN | POLLRDNORM);
56749 +
56750 + return 0;
56751 +}
56752 +
56753 +void
56754 +gr_clear_learn_entries(void)
56755 +{
56756 + char *tmp;
56757 +
56758 + mutex_lock(&gr_learn_user_mutex);
56759 + spin_lock(&gr_learn_lock);
56760 + tmp = learn_buffer;
56761 + learn_buffer = NULL;
56762 + spin_unlock(&gr_learn_lock);
56763 + if (tmp)
56764 + vfree(tmp);
56765 + if (learn_buffer_user != NULL) {
56766 + vfree(learn_buffer_user);
56767 + learn_buffer_user = NULL;
56768 + }
56769 + learn_buffer_len = 0;
56770 + mutex_unlock(&gr_learn_user_mutex);
56771 +
56772 + return;
56773 +}
56774 +
56775 +void
56776 +gr_add_learn_entry(const char *fmt, ...)
56777 +{
56778 + va_list args;
56779 + unsigned int len;
56780 +
56781 + if (!gr_learn_attached)
56782 + return;
56783 +
56784 + spin_lock(&gr_learn_lock);
56785 +
56786 + /* leave a gap at the end so we know when it's "full" but don't have to
56787 + compute the exact length of the string we're trying to append
56788 + */
56789 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56790 + spin_unlock(&gr_learn_lock);
56791 + wake_up_interruptible(&learn_wait);
56792 + return;
56793 + }
56794 + if (learn_buffer == NULL) {
56795 + spin_unlock(&gr_learn_lock);
56796 + return;
56797 + }
56798 +
56799 + va_start(args, fmt);
56800 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56801 + va_end(args);
56802 +
56803 + learn_buffer_len += len + 1;
56804 +
56805 + spin_unlock(&gr_learn_lock);
56806 + wake_up_interruptible(&learn_wait);
56807 +
56808 + return;
56809 +}
56810 +
56811 +static int
56812 +open_learn(struct inode *inode, struct file *file)
56813 +{
56814 + if (file->f_mode & FMODE_READ && gr_learn_attached)
56815 + return -EBUSY;
56816 + if (file->f_mode & FMODE_READ) {
56817 + int retval = 0;
56818 + mutex_lock(&gr_learn_user_mutex);
56819 + if (learn_buffer == NULL)
56820 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56821 + if (learn_buffer_user == NULL)
56822 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56823 + if (learn_buffer == NULL) {
56824 + retval = -ENOMEM;
56825 + goto out_error;
56826 + }
56827 + if (learn_buffer_user == NULL) {
56828 + retval = -ENOMEM;
56829 + goto out_error;
56830 + }
56831 + learn_buffer_len = 0;
56832 + learn_buffer_user_len = 0;
56833 + gr_learn_attached = 1;
56834 +out_error:
56835 + mutex_unlock(&gr_learn_user_mutex);
56836 + return retval;
56837 + }
56838 + return 0;
56839 +}
56840 +
56841 +static int
56842 +close_learn(struct inode *inode, struct file *file)
56843 +{
56844 + if (file->f_mode & FMODE_READ) {
56845 + char *tmp = NULL;
56846 + mutex_lock(&gr_learn_user_mutex);
56847 + spin_lock(&gr_learn_lock);
56848 + tmp = learn_buffer;
56849 + learn_buffer = NULL;
56850 + spin_unlock(&gr_learn_lock);
56851 + if (tmp)
56852 + vfree(tmp);
56853 + if (learn_buffer_user != NULL) {
56854 + vfree(learn_buffer_user);
56855 + learn_buffer_user = NULL;
56856 + }
56857 + learn_buffer_len = 0;
56858 + learn_buffer_user_len = 0;
56859 + gr_learn_attached = 0;
56860 + mutex_unlock(&gr_learn_user_mutex);
56861 + }
56862 +
56863 + return 0;
56864 +}
56865 +
56866 +const struct file_operations grsec_fops = {
56867 + .read = read_learn,
56868 + .write = write_grsec_handler,
56869 + .open = open_learn,
56870 + .release = close_learn,
56871 + .poll = poll_learn,
56872 +};
56873 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56874 new file mode 100644
56875 index 0000000..39645c9
56876 --- /dev/null
56877 +++ b/grsecurity/gracl_res.c
56878 @@ -0,0 +1,68 @@
56879 +#include <linux/kernel.h>
56880 +#include <linux/sched.h>
56881 +#include <linux/gracl.h>
56882 +#include <linux/grinternal.h>
56883 +
56884 +static const char *restab_log[] = {
56885 + [RLIMIT_CPU] = "RLIMIT_CPU",
56886 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56887 + [RLIMIT_DATA] = "RLIMIT_DATA",
56888 + [RLIMIT_STACK] = "RLIMIT_STACK",
56889 + [RLIMIT_CORE] = "RLIMIT_CORE",
56890 + [RLIMIT_RSS] = "RLIMIT_RSS",
56891 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
56892 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56893 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56894 + [RLIMIT_AS] = "RLIMIT_AS",
56895 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56896 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56897 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56898 + [RLIMIT_NICE] = "RLIMIT_NICE",
56899 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56900 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56901 + [GR_CRASH_RES] = "RLIMIT_CRASH"
56902 +};
56903 +
56904 +void
56905 +gr_log_resource(const struct task_struct *task,
56906 + const int res, const unsigned long wanted, const int gt)
56907 +{
56908 + const struct cred *cred;
56909 + unsigned long rlim;
56910 +
56911 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
56912 + return;
56913 +
56914 + // not yet supported resource
56915 + if (unlikely(!restab_log[res]))
56916 + return;
56917 +
56918 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56919 + rlim = task_rlimit_max(task, res);
56920 + else
56921 + rlim = task_rlimit(task, res);
56922 +
56923 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56924 + return;
56925 +
56926 + rcu_read_lock();
56927 + cred = __task_cred(task);
56928 +
56929 + if (res == RLIMIT_NPROC &&
56930 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56931 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56932 + goto out_rcu_unlock;
56933 + else if (res == RLIMIT_MEMLOCK &&
56934 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56935 + goto out_rcu_unlock;
56936 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56937 + goto out_rcu_unlock;
56938 + rcu_read_unlock();
56939 +
56940 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56941 +
56942 + return;
56943 +out_rcu_unlock:
56944 + rcu_read_unlock();
56945 + return;
56946 +}
56947 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56948 new file mode 100644
56949 index 0000000..25197e9
56950 --- /dev/null
56951 +++ b/grsecurity/gracl_segv.c
56952 @@ -0,0 +1,299 @@
56953 +#include <linux/kernel.h>
56954 +#include <linux/mm.h>
56955 +#include <asm/uaccess.h>
56956 +#include <asm/errno.h>
56957 +#include <asm/mman.h>
56958 +#include <net/sock.h>
56959 +#include <linux/file.h>
56960 +#include <linux/fs.h>
56961 +#include <linux/net.h>
56962 +#include <linux/in.h>
56963 +#include <linux/slab.h>
56964 +#include <linux/types.h>
56965 +#include <linux/sched.h>
56966 +#include <linux/timer.h>
56967 +#include <linux/gracl.h>
56968 +#include <linux/grsecurity.h>
56969 +#include <linux/grinternal.h>
56970 +
56971 +static struct crash_uid *uid_set;
56972 +static unsigned short uid_used;
56973 +static DEFINE_SPINLOCK(gr_uid_lock);
56974 +extern rwlock_t gr_inode_lock;
56975 +extern struct acl_subject_label *
56976 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56977 + struct acl_role_label *role);
56978 +
56979 +#ifdef CONFIG_BTRFS_FS
56980 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56981 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56982 +#endif
56983 +
56984 +static inline dev_t __get_dev(const struct dentry *dentry)
56985 +{
56986 +#ifdef CONFIG_BTRFS_FS
56987 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56988 + return get_btrfs_dev_from_inode(dentry->d_inode);
56989 + else
56990 +#endif
56991 + return dentry->d_inode->i_sb->s_dev;
56992 +}
56993 +
56994 +int
56995 +gr_init_uidset(void)
56996 +{
56997 + uid_set =
56998 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56999 + uid_used = 0;
57000 +
57001 + return uid_set ? 1 : 0;
57002 +}
57003 +
57004 +void
57005 +gr_free_uidset(void)
57006 +{
57007 + if (uid_set)
57008 + kfree(uid_set);
57009 +
57010 + return;
57011 +}
57012 +
57013 +int
57014 +gr_find_uid(const uid_t uid)
57015 +{
57016 + struct crash_uid *tmp = uid_set;
57017 + uid_t buid;
57018 + int low = 0, high = uid_used - 1, mid;
57019 +
57020 + while (high >= low) {
57021 + mid = (low + high) >> 1;
57022 + buid = tmp[mid].uid;
57023 + if (buid == uid)
57024 + return mid;
57025 + if (buid > uid)
57026 + high = mid - 1;
57027 + if (buid < uid)
57028 + low = mid + 1;
57029 + }
57030 +
57031 + return -1;
57032 +}
57033 +
57034 +static __inline__ void
57035 +gr_insertsort(void)
57036 +{
57037 + unsigned short i, j;
57038 + struct crash_uid index;
57039 +
57040 + for (i = 1; i < uid_used; i++) {
57041 + index = uid_set[i];
57042 + j = i;
57043 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
57044 + uid_set[j] = uid_set[j - 1];
57045 + j--;
57046 + }
57047 + uid_set[j] = index;
57048 + }
57049 +
57050 + return;
57051 +}
57052 +
57053 +static __inline__ void
57054 +gr_insert_uid(const uid_t uid, const unsigned long expires)
57055 +{
57056 + int loc;
57057 +
57058 + if (uid_used == GR_UIDTABLE_MAX)
57059 + return;
57060 +
57061 + loc = gr_find_uid(uid);
57062 +
57063 + if (loc >= 0) {
57064 + uid_set[loc].expires = expires;
57065 + return;
57066 + }
57067 +
57068 + uid_set[uid_used].uid = uid;
57069 + uid_set[uid_used].expires = expires;
57070 + uid_used++;
57071 +
57072 + gr_insertsort();
57073 +
57074 + return;
57075 +}
57076 +
57077 +void
57078 +gr_remove_uid(const unsigned short loc)
57079 +{
57080 + unsigned short i;
57081 +
57082 + for (i = loc + 1; i < uid_used; i++)
57083 + uid_set[i - 1] = uid_set[i];
57084 +
57085 + uid_used--;
57086 +
57087 + return;
57088 +}
57089 +
57090 +int
57091 +gr_check_crash_uid(const uid_t uid)
57092 +{
57093 + int loc;
57094 + int ret = 0;
57095 +
57096 + if (unlikely(!gr_acl_is_enabled()))
57097 + return 0;
57098 +
57099 + spin_lock(&gr_uid_lock);
57100 + loc = gr_find_uid(uid);
57101 +
57102 + if (loc < 0)
57103 + goto out_unlock;
57104 +
57105 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
57106 + gr_remove_uid(loc);
57107 + else
57108 + ret = 1;
57109 +
57110 +out_unlock:
57111 + spin_unlock(&gr_uid_lock);
57112 + return ret;
57113 +}
57114 +
57115 +static __inline__ int
57116 +proc_is_setxid(const struct cred *cred)
57117 +{
57118 + if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
57119 + !uid_eq(cred->uid, cred->fsuid))
57120 + return 1;
57121 + if (!uid_eq(cred->gid, cred->egid) || !uid_eq(cred->gid, cred->sgid) ||
57122 + !uid_eq(cred->gid, cred->fsgid))
57123 + return 1;
57124 +
57125 + return 0;
57126 +}
57127 +
57128 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
57129 +
57130 +void
57131 +gr_handle_crash(struct task_struct *task, const int sig)
57132 +{
57133 + struct acl_subject_label *curr;
57134 + struct task_struct *tsk, *tsk2;
57135 + const struct cred *cred;
57136 + const struct cred *cred2;
57137 +
57138 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
57139 + return;
57140 +
57141 + if (unlikely(!gr_acl_is_enabled()))
57142 + return;
57143 +
57144 + curr = task->acl;
57145 +
57146 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
57147 + return;
57148 +
57149 + if (time_before_eq(curr->expires, get_seconds())) {
57150 + curr->expires = 0;
57151 + curr->crashes = 0;
57152 + }
57153 +
57154 + curr->crashes++;
57155 +
57156 + if (!curr->expires)
57157 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
57158 +
57159 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57160 + time_after(curr->expires, get_seconds())) {
57161 + rcu_read_lock();
57162 + cred = __task_cred(task);
57163 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && proc_is_setxid(cred)) {
57164 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57165 + spin_lock(&gr_uid_lock);
57166 + gr_insert_uid(cred->uid, curr->expires);
57167 + spin_unlock(&gr_uid_lock);
57168 + curr->expires = 0;
57169 + curr->crashes = 0;
57170 + read_lock(&tasklist_lock);
57171 + do_each_thread(tsk2, tsk) {
57172 + cred2 = __task_cred(tsk);
57173 + if (tsk != task && uid_eq(cred2->uid, cred->uid))
57174 + gr_fake_force_sig(SIGKILL, tsk);
57175 + } while_each_thread(tsk2, tsk);
57176 + read_unlock(&tasklist_lock);
57177 + } else {
57178 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57179 + read_lock(&tasklist_lock);
57180 + read_lock(&grsec_exec_file_lock);
57181 + do_each_thread(tsk2, tsk) {
57182 + if (likely(tsk != task)) {
57183 + // if this thread has the same subject as the one that triggered
57184 + // RES_CRASH and it's the same binary, kill it
57185 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
57186 + gr_fake_force_sig(SIGKILL, tsk);
57187 + }
57188 + } while_each_thread(tsk2, tsk);
57189 + read_unlock(&grsec_exec_file_lock);
57190 + read_unlock(&tasklist_lock);
57191 + }
57192 + rcu_read_unlock();
57193 + }
57194 +
57195 + return;
57196 +}
57197 +
57198 +int
57199 +gr_check_crash_exec(const struct file *filp)
57200 +{
57201 + struct acl_subject_label *curr;
57202 +
57203 + if (unlikely(!gr_acl_is_enabled()))
57204 + return 0;
57205 +
57206 + read_lock(&gr_inode_lock);
57207 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
57208 + __get_dev(filp->f_path.dentry),
57209 + current->role);
57210 + read_unlock(&gr_inode_lock);
57211 +
57212 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
57213 + (!curr->crashes && !curr->expires))
57214 + return 0;
57215 +
57216 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57217 + time_after(curr->expires, get_seconds()))
57218 + return 1;
57219 + else if (time_before_eq(curr->expires, get_seconds())) {
57220 + curr->crashes = 0;
57221 + curr->expires = 0;
57222 + }
57223 +
57224 + return 0;
57225 +}
57226 +
57227 +void
57228 +gr_handle_alertkill(struct task_struct *task)
57229 +{
57230 + struct acl_subject_label *curracl;
57231 + __u32 curr_ip;
57232 + struct task_struct *p, *p2;
57233 +
57234 + if (unlikely(!gr_acl_is_enabled()))
57235 + return;
57236 +
57237 + curracl = task->acl;
57238 + curr_ip = task->signal->curr_ip;
57239 +
57240 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
57241 + read_lock(&tasklist_lock);
57242 + do_each_thread(p2, p) {
57243 + if (p->signal->curr_ip == curr_ip)
57244 + gr_fake_force_sig(SIGKILL, p);
57245 + } while_each_thread(p2, p);
57246 + read_unlock(&tasklist_lock);
57247 + } else if (curracl->mode & GR_KILLPROC)
57248 + gr_fake_force_sig(SIGKILL, task);
57249 +
57250 + return;
57251 +}
57252 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
57253 new file mode 100644
57254 index 0000000..9d83a69
57255 --- /dev/null
57256 +++ b/grsecurity/gracl_shm.c
57257 @@ -0,0 +1,40 @@
57258 +#include <linux/kernel.h>
57259 +#include <linux/mm.h>
57260 +#include <linux/sched.h>
57261 +#include <linux/file.h>
57262 +#include <linux/ipc.h>
57263 +#include <linux/gracl.h>
57264 +#include <linux/grsecurity.h>
57265 +#include <linux/grinternal.h>
57266 +
57267 +int
57268 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57269 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57270 +{
57271 + struct task_struct *task;
57272 +
57273 + if (!gr_acl_is_enabled())
57274 + return 1;
57275 +
57276 + rcu_read_lock();
57277 + read_lock(&tasklist_lock);
57278 +
57279 + task = find_task_by_vpid(shm_cprid);
57280 +
57281 + if (unlikely(!task))
57282 + task = find_task_by_vpid(shm_lapid);
57283 +
57284 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
57285 + (task->pid == shm_lapid)) &&
57286 + (task->acl->mode & GR_PROTSHM) &&
57287 + (task->acl != current->acl))) {
57288 + read_unlock(&tasklist_lock);
57289 + rcu_read_unlock();
57290 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
57291 + return 0;
57292 + }
57293 + read_unlock(&tasklist_lock);
57294 + rcu_read_unlock();
57295 +
57296 + return 1;
57297 +}
57298 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
57299 new file mode 100644
57300 index 0000000..bc0be01
57301 --- /dev/null
57302 +++ b/grsecurity/grsec_chdir.c
57303 @@ -0,0 +1,19 @@
57304 +#include <linux/kernel.h>
57305 +#include <linux/sched.h>
57306 +#include <linux/fs.h>
57307 +#include <linux/file.h>
57308 +#include <linux/grsecurity.h>
57309 +#include <linux/grinternal.h>
57310 +
57311 +void
57312 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
57313 +{
57314 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57315 + if ((grsec_enable_chdir && grsec_enable_group &&
57316 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
57317 + !grsec_enable_group)) {
57318 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
57319 + }
57320 +#endif
57321 + return;
57322 +}
57323 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
57324 new file mode 100644
57325 index 0000000..9807ee2
57326 --- /dev/null
57327 +++ b/grsecurity/grsec_chroot.c
57328 @@ -0,0 +1,368 @@
57329 +#include <linux/kernel.h>
57330 +#include <linux/module.h>
57331 +#include <linux/sched.h>
57332 +#include <linux/file.h>
57333 +#include <linux/fs.h>
57334 +#include <linux/mount.h>
57335 +#include <linux/types.h>
57336 +#include "../fs/mount.h"
57337 +#include <linux/grsecurity.h>
57338 +#include <linux/grinternal.h>
57339 +
57340 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
57341 +{
57342 +#ifdef CONFIG_GRKERNSEC
57343 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
57344 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
57345 + task->gr_is_chrooted = 1;
57346 + else
57347 + task->gr_is_chrooted = 0;
57348 +
57349 + task->gr_chroot_dentry = path->dentry;
57350 +#endif
57351 + return;
57352 +}
57353 +
57354 +void gr_clear_chroot_entries(struct task_struct *task)
57355 +{
57356 +#ifdef CONFIG_GRKERNSEC
57357 + task->gr_is_chrooted = 0;
57358 + task->gr_chroot_dentry = NULL;
57359 +#endif
57360 + return;
57361 +}
57362 +
57363 +int
57364 +gr_handle_chroot_unix(const pid_t pid)
57365 +{
57366 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57367 + struct task_struct *p;
57368 +
57369 + if (unlikely(!grsec_enable_chroot_unix))
57370 + return 1;
57371 +
57372 + if (likely(!proc_is_chrooted(current)))
57373 + return 1;
57374 +
57375 + rcu_read_lock();
57376 + read_lock(&tasklist_lock);
57377 + p = find_task_by_vpid_unrestricted(pid);
57378 + if (unlikely(p && !have_same_root(current, p))) {
57379 + read_unlock(&tasklist_lock);
57380 + rcu_read_unlock();
57381 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
57382 + return 0;
57383 + }
57384 + read_unlock(&tasklist_lock);
57385 + rcu_read_unlock();
57386 +#endif
57387 + return 1;
57388 +}
57389 +
57390 +int
57391 +gr_handle_chroot_nice(void)
57392 +{
57393 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57394 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
57395 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
57396 + return -EPERM;
57397 + }
57398 +#endif
57399 + return 0;
57400 +}
57401 +
57402 +int
57403 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
57404 +{
57405 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57406 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
57407 + && proc_is_chrooted(current)) {
57408 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
57409 + return -EACCES;
57410 + }
57411 +#endif
57412 + return 0;
57413 +}
57414 +
57415 +int
57416 +gr_handle_chroot_rawio(const struct inode *inode)
57417 +{
57418 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57419 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57420 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
57421 + return 1;
57422 +#endif
57423 + return 0;
57424 +}
57425 +
57426 +int
57427 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
57428 +{
57429 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57430 + struct task_struct *p;
57431 + int ret = 0;
57432 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
57433 + return ret;
57434 +
57435 + read_lock(&tasklist_lock);
57436 + do_each_pid_task(pid, type, p) {
57437 + if (!have_same_root(current, p)) {
57438 + ret = 1;
57439 + goto out;
57440 + }
57441 + } while_each_pid_task(pid, type, p);
57442 +out:
57443 + read_unlock(&tasklist_lock);
57444 + return ret;
57445 +#endif
57446 + return 0;
57447 +}
57448 +
57449 +int
57450 +gr_pid_is_chrooted(struct task_struct *p)
57451 +{
57452 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57453 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
57454 + return 0;
57455 +
57456 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
57457 + !have_same_root(current, p)) {
57458 + return 1;
57459 + }
57460 +#endif
57461 + return 0;
57462 +}
57463 +
57464 +EXPORT_SYMBOL(gr_pid_is_chrooted);
57465 +
57466 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
57467 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
57468 +{
57469 + struct path path, currentroot;
57470 + int ret = 0;
57471 +
57472 + path.dentry = (struct dentry *)u_dentry;
57473 + path.mnt = (struct vfsmount *)u_mnt;
57474 + get_fs_root(current->fs, &currentroot);
57475 + if (path_is_under(&path, &currentroot))
57476 + ret = 1;
57477 + path_put(&currentroot);
57478 +
57479 + return ret;
57480 +}
57481 +#endif
57482 +
57483 +int
57484 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
57485 +{
57486 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57487 + if (!grsec_enable_chroot_fchdir)
57488 + return 1;
57489 +
57490 + if (!proc_is_chrooted(current))
57491 + return 1;
57492 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
57493 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
57494 + return 0;
57495 + }
57496 +#endif
57497 + return 1;
57498 +}
57499 +
57500 +int
57501 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57502 + const time_t shm_createtime)
57503 +{
57504 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57505 + struct task_struct *p;
57506 + time_t starttime;
57507 +
57508 + if (unlikely(!grsec_enable_chroot_shmat))
57509 + return 1;
57510 +
57511 + if (likely(!proc_is_chrooted(current)))
57512 + return 1;
57513 +
57514 + rcu_read_lock();
57515 + read_lock(&tasklist_lock);
57516 +
57517 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
57518 + starttime = p->start_time.tv_sec;
57519 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
57520 + if (have_same_root(current, p)) {
57521 + goto allow;
57522 + } else {
57523 + read_unlock(&tasklist_lock);
57524 + rcu_read_unlock();
57525 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57526 + return 0;
57527 + }
57528 + }
57529 + /* creator exited, pid reuse, fall through to next check */
57530 + }
57531 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
57532 + if (unlikely(!have_same_root(current, p))) {
57533 + read_unlock(&tasklist_lock);
57534 + rcu_read_unlock();
57535 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57536 + return 0;
57537 + }
57538 + }
57539 +
57540 +allow:
57541 + read_unlock(&tasklist_lock);
57542 + rcu_read_unlock();
57543 +#endif
57544 + return 1;
57545 +}
57546 +
57547 +void
57548 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
57549 +{
57550 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57551 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
57552 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
57553 +#endif
57554 + return;
57555 +}
57556 +
57557 +int
57558 +gr_handle_chroot_mknod(const struct dentry *dentry,
57559 + const struct vfsmount *mnt, const int mode)
57560 +{
57561 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57562 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
57563 + proc_is_chrooted(current)) {
57564 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
57565 + return -EPERM;
57566 + }
57567 +#endif
57568 + return 0;
57569 +}
57570 +
57571 +int
57572 +gr_handle_chroot_mount(const struct dentry *dentry,
57573 + const struct vfsmount *mnt, const char *dev_name)
57574 +{
57575 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57576 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
57577 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
57578 + return -EPERM;
57579 + }
57580 +#endif
57581 + return 0;
57582 +}
57583 +
57584 +int
57585 +gr_handle_chroot_pivot(void)
57586 +{
57587 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57588 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
57589 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
57590 + return -EPERM;
57591 + }
57592 +#endif
57593 + return 0;
57594 +}
57595 +
57596 +int
57597 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
57598 +{
57599 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57600 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
57601 + !gr_is_outside_chroot(dentry, mnt)) {
57602 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
57603 + return -EPERM;
57604 + }
57605 +#endif
57606 + return 0;
57607 +}
57608 +
57609 +extern const char *captab_log[];
57610 +extern int captab_log_entries;
57611 +
57612 +int
57613 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57614 +{
57615 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57616 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57617 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57618 + if (cap_raised(chroot_caps, cap)) {
57619 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
57620 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
57621 + }
57622 + return 0;
57623 + }
57624 + }
57625 +#endif
57626 + return 1;
57627 +}
57628 +
57629 +int
57630 +gr_chroot_is_capable(const int cap)
57631 +{
57632 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57633 + return gr_task_chroot_is_capable(current, current_cred(), cap);
57634 +#endif
57635 + return 1;
57636 +}
57637 +
57638 +int
57639 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
57640 +{
57641 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57642 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57643 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57644 + if (cap_raised(chroot_caps, cap)) {
57645 + return 0;
57646 + }
57647 + }
57648 +#endif
57649 + return 1;
57650 +}
57651 +
57652 +int
57653 +gr_chroot_is_capable_nolog(const int cap)
57654 +{
57655 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57656 + return gr_task_chroot_is_capable_nolog(current, cap);
57657 +#endif
57658 + return 1;
57659 +}
57660 +
57661 +int
57662 +gr_handle_chroot_sysctl(const int op)
57663 +{
57664 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57665 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
57666 + proc_is_chrooted(current))
57667 + return -EACCES;
57668 +#endif
57669 + return 0;
57670 +}
57671 +
57672 +void
57673 +gr_handle_chroot_chdir(struct path *path)
57674 +{
57675 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57676 + if (grsec_enable_chroot_chdir)
57677 + set_fs_pwd(current->fs, path);
57678 +#endif
57679 + return;
57680 +}
57681 +
57682 +int
57683 +gr_handle_chroot_chmod(const struct dentry *dentry,
57684 + const struct vfsmount *mnt, const int mode)
57685 +{
57686 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57687 + /* allow chmod +s on directories, but not files */
57688 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
57689 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
57690 + proc_is_chrooted(current)) {
57691 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
57692 + return -EPERM;
57693 + }
57694 +#endif
57695 + return 0;
57696 +}
57697 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
57698 new file mode 100644
57699 index 0000000..213ad8b
57700 --- /dev/null
57701 +++ b/grsecurity/grsec_disabled.c
57702 @@ -0,0 +1,437 @@
57703 +#include <linux/kernel.h>
57704 +#include <linux/module.h>
57705 +#include <linux/sched.h>
57706 +#include <linux/file.h>
57707 +#include <linux/fs.h>
57708 +#include <linux/kdev_t.h>
57709 +#include <linux/net.h>
57710 +#include <linux/in.h>
57711 +#include <linux/ip.h>
57712 +#include <linux/skbuff.h>
57713 +#include <linux/sysctl.h>
57714 +
57715 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57716 +void
57717 +pax_set_initial_flags(struct linux_binprm *bprm)
57718 +{
57719 + return;
57720 +}
57721 +#endif
57722 +
57723 +#ifdef CONFIG_SYSCTL
57724 +__u32
57725 +gr_handle_sysctl(const struct ctl_table * table, const int op)
57726 +{
57727 + return 0;
57728 +}
57729 +#endif
57730 +
57731 +#ifdef CONFIG_TASKSTATS
57732 +int gr_is_taskstats_denied(int pid)
57733 +{
57734 + return 0;
57735 +}
57736 +#endif
57737 +
57738 +int
57739 +gr_acl_is_enabled(void)
57740 +{
57741 + return 0;
57742 +}
57743 +
57744 +void
57745 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57746 +{
57747 + return;
57748 +}
57749 +
57750 +int
57751 +gr_handle_rawio(const struct inode *inode)
57752 +{
57753 + return 0;
57754 +}
57755 +
57756 +void
57757 +gr_acl_handle_psacct(struct task_struct *task, const long code)
57758 +{
57759 + return;
57760 +}
57761 +
57762 +int
57763 +gr_handle_ptrace(struct task_struct *task, const long request)
57764 +{
57765 + return 0;
57766 +}
57767 +
57768 +int
57769 +gr_handle_proc_ptrace(struct task_struct *task)
57770 +{
57771 + return 0;
57772 +}
57773 +
57774 +void
57775 +gr_learn_resource(const struct task_struct *task,
57776 + const int res, const unsigned long wanted, const int gt)
57777 +{
57778 + return;
57779 +}
57780 +
57781 +int
57782 +gr_set_acls(const int type)
57783 +{
57784 + return 0;
57785 +}
57786 +
57787 +int
57788 +gr_check_hidden_task(const struct task_struct *tsk)
57789 +{
57790 + return 0;
57791 +}
57792 +
57793 +int
57794 +gr_check_protected_task(const struct task_struct *task)
57795 +{
57796 + return 0;
57797 +}
57798 +
57799 +int
57800 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57801 +{
57802 + return 0;
57803 +}
57804 +
57805 +void
57806 +gr_copy_label(struct task_struct *tsk)
57807 +{
57808 + return;
57809 +}
57810 +
57811 +void
57812 +gr_set_pax_flags(struct task_struct *task)
57813 +{
57814 + return;
57815 +}
57816 +
57817 +int
57818 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57819 + const int unsafe_share)
57820 +{
57821 + return 0;
57822 +}
57823 +
57824 +void
57825 +gr_handle_delete(const ino_t ino, const dev_t dev)
57826 +{
57827 + return;
57828 +}
57829 +
57830 +void
57831 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57832 +{
57833 + return;
57834 +}
57835 +
57836 +void
57837 +gr_handle_crash(struct task_struct *task, const int sig)
57838 +{
57839 + return;
57840 +}
57841 +
57842 +int
57843 +gr_check_crash_exec(const struct file *filp)
57844 +{
57845 + return 0;
57846 +}
57847 +
57848 +int
57849 +gr_check_crash_uid(const uid_t uid)
57850 +{
57851 + return 0;
57852 +}
57853 +
57854 +void
57855 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57856 + struct dentry *old_dentry,
57857 + struct dentry *new_dentry,
57858 + struct vfsmount *mnt, const __u8 replace)
57859 +{
57860 + return;
57861 +}
57862 +
57863 +int
57864 +gr_search_socket(const int family, const int type, const int protocol)
57865 +{
57866 + return 1;
57867 +}
57868 +
57869 +int
57870 +gr_search_connectbind(const int mode, const struct socket *sock,
57871 + const struct sockaddr_in *addr)
57872 +{
57873 + return 0;
57874 +}
57875 +
57876 +void
57877 +gr_handle_alertkill(struct task_struct *task)
57878 +{
57879 + return;
57880 +}
57881 +
57882 +__u32
57883 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57884 +{
57885 + return 1;
57886 +}
57887 +
57888 +__u32
57889 +gr_acl_handle_hidden_file(const struct dentry * dentry,
57890 + const struct vfsmount * mnt)
57891 +{
57892 + return 1;
57893 +}
57894 +
57895 +__u32
57896 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57897 + int acc_mode)
57898 +{
57899 + return 1;
57900 +}
57901 +
57902 +__u32
57903 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57904 +{
57905 + return 1;
57906 +}
57907 +
57908 +__u32
57909 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57910 +{
57911 + return 1;
57912 +}
57913 +
57914 +int
57915 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57916 + unsigned int *vm_flags)
57917 +{
57918 + return 1;
57919 +}
57920 +
57921 +__u32
57922 +gr_acl_handle_truncate(const struct dentry * dentry,
57923 + const struct vfsmount * mnt)
57924 +{
57925 + return 1;
57926 +}
57927 +
57928 +__u32
57929 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57930 +{
57931 + return 1;
57932 +}
57933 +
57934 +__u32
57935 +gr_acl_handle_access(const struct dentry * dentry,
57936 + const struct vfsmount * mnt, const int fmode)
57937 +{
57938 + return 1;
57939 +}
57940 +
57941 +__u32
57942 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57943 + umode_t *mode)
57944 +{
57945 + return 1;
57946 +}
57947 +
57948 +__u32
57949 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57950 +{
57951 + return 1;
57952 +}
57953 +
57954 +__u32
57955 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57956 +{
57957 + return 1;
57958 +}
57959 +
57960 +void
57961 +grsecurity_init(void)
57962 +{
57963 + return;
57964 +}
57965 +
57966 +umode_t gr_acl_umask(void)
57967 +{
57968 + return 0;
57969 +}
57970 +
57971 +__u32
57972 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57973 + const struct dentry * parent_dentry,
57974 + const struct vfsmount * parent_mnt,
57975 + const int mode)
57976 +{
57977 + return 1;
57978 +}
57979 +
57980 +__u32
57981 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
57982 + const struct dentry * parent_dentry,
57983 + const struct vfsmount * parent_mnt)
57984 +{
57985 + return 1;
57986 +}
57987 +
57988 +__u32
57989 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57990 + const struct dentry * parent_dentry,
57991 + const struct vfsmount * parent_mnt, const char *from)
57992 +{
57993 + return 1;
57994 +}
57995 +
57996 +__u32
57997 +gr_acl_handle_link(const struct dentry * new_dentry,
57998 + const struct dentry * parent_dentry,
57999 + const struct vfsmount * parent_mnt,
58000 + const struct dentry * old_dentry,
58001 + const struct vfsmount * old_mnt, const char *to)
58002 +{
58003 + return 1;
58004 +}
58005 +
58006 +int
58007 +gr_acl_handle_rename(const struct dentry *new_dentry,
58008 + const struct dentry *parent_dentry,
58009 + const struct vfsmount *parent_mnt,
58010 + const struct dentry *old_dentry,
58011 + const struct inode *old_parent_inode,
58012 + const struct vfsmount *old_mnt, const char *newname)
58013 +{
58014 + return 0;
58015 +}
58016 +
58017 +int
58018 +gr_acl_handle_filldir(const struct file *file, const char *name,
58019 + const int namelen, const ino_t ino)
58020 +{
58021 + return 1;
58022 +}
58023 +
58024 +int
58025 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58026 + const time_t shm_createtime, const uid_t cuid, const int shmid)
58027 +{
58028 + return 1;
58029 +}
58030 +
58031 +int
58032 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
58033 +{
58034 + return 0;
58035 +}
58036 +
58037 +int
58038 +gr_search_accept(const struct socket *sock)
58039 +{
58040 + return 0;
58041 +}
58042 +
58043 +int
58044 +gr_search_listen(const struct socket *sock)
58045 +{
58046 + return 0;
58047 +}
58048 +
58049 +int
58050 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
58051 +{
58052 + return 0;
58053 +}
58054 +
58055 +__u32
58056 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
58057 +{
58058 + return 1;
58059 +}
58060 +
58061 +__u32
58062 +gr_acl_handle_creat(const struct dentry * dentry,
58063 + const struct dentry * p_dentry,
58064 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58065 + const int imode)
58066 +{
58067 + return 1;
58068 +}
58069 +
58070 +void
58071 +gr_acl_handle_exit(void)
58072 +{
58073 + return;
58074 +}
58075 +
58076 +int
58077 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
58078 +{
58079 + return 1;
58080 +}
58081 +
58082 +void
58083 +gr_set_role_label(const uid_t uid, const gid_t gid)
58084 +{
58085 + return;
58086 +}
58087 +
58088 +int
58089 +gr_acl_handle_procpidmem(const struct task_struct *task)
58090 +{
58091 + return 0;
58092 +}
58093 +
58094 +int
58095 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
58096 +{
58097 + return 0;
58098 +}
58099 +
58100 +int
58101 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
58102 +{
58103 + return 0;
58104 +}
58105 +
58106 +void
58107 +gr_set_kernel_label(struct task_struct *task)
58108 +{
58109 + return;
58110 +}
58111 +
58112 +int
58113 +gr_check_user_change(int real, int effective, int fs)
58114 +{
58115 + return 0;
58116 +}
58117 +
58118 +int
58119 +gr_check_group_change(int real, int effective, int fs)
58120 +{
58121 + return 0;
58122 +}
58123 +
58124 +int gr_acl_enable_at_secure(void)
58125 +{
58126 + return 0;
58127 +}
58128 +
58129 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58130 +{
58131 + return dentry->d_inode->i_sb->s_dev;
58132 +}
58133 +
58134 +EXPORT_SYMBOL(gr_learn_resource);
58135 +EXPORT_SYMBOL(gr_set_kernel_label);
58136 +#ifdef CONFIG_SECURITY
58137 +EXPORT_SYMBOL(gr_check_user_change);
58138 +EXPORT_SYMBOL(gr_check_group_change);
58139 +#endif
58140 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
58141 new file mode 100644
58142 index 0000000..abfa971
58143 --- /dev/null
58144 +++ b/grsecurity/grsec_exec.c
58145 @@ -0,0 +1,174 @@
58146 +#include <linux/kernel.h>
58147 +#include <linux/sched.h>
58148 +#include <linux/file.h>
58149 +#include <linux/binfmts.h>
58150 +#include <linux/fs.h>
58151 +#include <linux/types.h>
58152 +#include <linux/grdefs.h>
58153 +#include <linux/grsecurity.h>
58154 +#include <linux/grinternal.h>
58155 +#include <linux/capability.h>
58156 +#include <linux/module.h>
58157 +
58158 +#include <asm/uaccess.h>
58159 +
58160 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58161 +static char gr_exec_arg_buf[132];
58162 +static DEFINE_MUTEX(gr_exec_arg_mutex);
58163 +#endif
58164 +
58165 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
58166 +
58167 +void
58168 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
58169 +{
58170 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58171 + char *grarg = gr_exec_arg_buf;
58172 + unsigned int i, x, execlen = 0;
58173 + char c;
58174 +
58175 + if (!((grsec_enable_execlog && grsec_enable_group &&
58176 + in_group_p(grsec_audit_gid))
58177 + || (grsec_enable_execlog && !grsec_enable_group)))
58178 + return;
58179 +
58180 + mutex_lock(&gr_exec_arg_mutex);
58181 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
58182 +
58183 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
58184 + const char __user *p;
58185 + unsigned int len;
58186 +
58187 + p = get_user_arg_ptr(argv, i);
58188 + if (IS_ERR(p))
58189 + goto log;
58190 +
58191 + len = strnlen_user(p, 128 - execlen);
58192 + if (len > 128 - execlen)
58193 + len = 128 - execlen;
58194 + else if (len > 0)
58195 + len--;
58196 + if (copy_from_user(grarg + execlen, p, len))
58197 + goto log;
58198 +
58199 + /* rewrite unprintable characters */
58200 + for (x = 0; x < len; x++) {
58201 + c = *(grarg + execlen + x);
58202 + if (c < 32 || c > 126)
58203 + *(grarg + execlen + x) = ' ';
58204 + }
58205 +
58206 + execlen += len;
58207 + *(grarg + execlen) = ' ';
58208 + *(grarg + execlen + 1) = '\0';
58209 + execlen++;
58210 + }
58211 +
58212 + log:
58213 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
58214 + bprm->file->f_path.mnt, grarg);
58215 + mutex_unlock(&gr_exec_arg_mutex);
58216 +#endif
58217 + return;
58218 +}
58219 +
58220 +#ifdef CONFIG_GRKERNSEC
58221 +extern int gr_acl_is_capable(const int cap);
58222 +extern int gr_acl_is_capable_nolog(const int cap);
58223 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
58224 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
58225 +extern int gr_chroot_is_capable(const int cap);
58226 +extern int gr_chroot_is_capable_nolog(const int cap);
58227 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
58228 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
58229 +#endif
58230 +
58231 +const char *captab_log[] = {
58232 + "CAP_CHOWN",
58233 + "CAP_DAC_OVERRIDE",
58234 + "CAP_DAC_READ_SEARCH",
58235 + "CAP_FOWNER",
58236 + "CAP_FSETID",
58237 + "CAP_KILL",
58238 + "CAP_SETGID",
58239 + "CAP_SETUID",
58240 + "CAP_SETPCAP",
58241 + "CAP_LINUX_IMMUTABLE",
58242 + "CAP_NET_BIND_SERVICE",
58243 + "CAP_NET_BROADCAST",
58244 + "CAP_NET_ADMIN",
58245 + "CAP_NET_RAW",
58246 + "CAP_IPC_LOCK",
58247 + "CAP_IPC_OWNER",
58248 + "CAP_SYS_MODULE",
58249 + "CAP_SYS_RAWIO",
58250 + "CAP_SYS_CHROOT",
58251 + "CAP_SYS_PTRACE",
58252 + "CAP_SYS_PACCT",
58253 + "CAP_SYS_ADMIN",
58254 + "CAP_SYS_BOOT",
58255 + "CAP_SYS_NICE",
58256 + "CAP_SYS_RESOURCE",
58257 + "CAP_SYS_TIME",
58258 + "CAP_SYS_TTY_CONFIG",
58259 + "CAP_MKNOD",
58260 + "CAP_LEASE",
58261 + "CAP_AUDIT_WRITE",
58262 + "CAP_AUDIT_CONTROL",
58263 + "CAP_SETFCAP",
58264 + "CAP_MAC_OVERRIDE",
58265 + "CAP_MAC_ADMIN",
58266 + "CAP_SYSLOG",
58267 + "CAP_WAKE_ALARM"
58268 +};
58269 +
58270 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
58271 +
58272 +int gr_is_capable(const int cap)
58273 +{
58274 +#ifdef CONFIG_GRKERNSEC
58275 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
58276 + return 1;
58277 + return 0;
58278 +#else
58279 + return 1;
58280 +#endif
58281 +}
58282 +
58283 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58284 +{
58285 +#ifdef CONFIG_GRKERNSEC
58286 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
58287 + return 1;
58288 + return 0;
58289 +#else
58290 + return 1;
58291 +#endif
58292 +}
58293 +
58294 +int gr_is_capable_nolog(const int cap)
58295 +{
58296 +#ifdef CONFIG_GRKERNSEC
58297 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
58298 + return 1;
58299 + return 0;
58300 +#else
58301 + return 1;
58302 +#endif
58303 +}
58304 +
58305 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
58306 +{
58307 +#ifdef CONFIG_GRKERNSEC
58308 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
58309 + return 1;
58310 + return 0;
58311 +#else
58312 + return 1;
58313 +#endif
58314 +}
58315 +
58316 +EXPORT_SYMBOL(gr_is_capable);
58317 +EXPORT_SYMBOL(gr_is_capable_nolog);
58318 +EXPORT_SYMBOL(gr_task_is_capable);
58319 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
58320 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
58321 new file mode 100644
58322 index 0000000..d3ee748
58323 --- /dev/null
58324 +++ b/grsecurity/grsec_fifo.c
58325 @@ -0,0 +1,24 @@
58326 +#include <linux/kernel.h>
58327 +#include <linux/sched.h>
58328 +#include <linux/fs.h>
58329 +#include <linux/file.h>
58330 +#include <linux/grinternal.h>
58331 +
58332 +int
58333 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
58334 + const struct dentry *dir, const int flag, const int acc_mode)
58335 +{
58336 +#ifdef CONFIG_GRKERNSEC_FIFO
58337 + const struct cred *cred = current_cred();
58338 +
58339 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
58340 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
58341 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
58342 + (cred->fsuid != dentry->d_inode->i_uid)) {
58343 + if (!inode_permission(dentry->d_inode, acc_mode))
58344 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
58345 + return -EACCES;
58346 + }
58347 +#endif
58348 + return 0;
58349 +}
58350 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
58351 new file mode 100644
58352 index 0000000..8ca18bf
58353 --- /dev/null
58354 +++ b/grsecurity/grsec_fork.c
58355 @@ -0,0 +1,23 @@
58356 +#include <linux/kernel.h>
58357 +#include <linux/sched.h>
58358 +#include <linux/grsecurity.h>
58359 +#include <linux/grinternal.h>
58360 +#include <linux/errno.h>
58361 +
58362 +void
58363 +gr_log_forkfail(const int retval)
58364 +{
58365 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58366 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
58367 + switch (retval) {
58368 + case -EAGAIN:
58369 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
58370 + break;
58371 + case -ENOMEM:
58372 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
58373 + break;
58374 + }
58375 + }
58376 +#endif
58377 + return;
58378 +}
58379 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
58380 new file mode 100644
58381 index 0000000..05a6015
58382 --- /dev/null
58383 +++ b/grsecurity/grsec_init.c
58384 @@ -0,0 +1,283 @@
58385 +#include <linux/kernel.h>
58386 +#include <linux/sched.h>
58387 +#include <linux/mm.h>
58388 +#include <linux/gracl.h>
58389 +#include <linux/slab.h>
58390 +#include <linux/vmalloc.h>
58391 +#include <linux/percpu.h>
58392 +#include <linux/module.h>
58393 +
58394 +int grsec_enable_ptrace_readexec;
58395 +int grsec_enable_setxid;
58396 +int grsec_enable_symlinkown;
58397 +int grsec_symlinkown_gid;
58398 +int grsec_enable_brute;
58399 +int grsec_enable_link;
58400 +int grsec_enable_dmesg;
58401 +int grsec_enable_harden_ptrace;
58402 +int grsec_enable_fifo;
58403 +int grsec_enable_execlog;
58404 +int grsec_enable_signal;
58405 +int grsec_enable_forkfail;
58406 +int grsec_enable_audit_ptrace;
58407 +int grsec_enable_time;
58408 +int grsec_enable_audit_textrel;
58409 +int grsec_enable_group;
58410 +int grsec_audit_gid;
58411 +int grsec_enable_chdir;
58412 +int grsec_enable_mount;
58413 +int grsec_enable_rofs;
58414 +int grsec_enable_chroot_findtask;
58415 +int grsec_enable_chroot_mount;
58416 +int grsec_enable_chroot_shmat;
58417 +int grsec_enable_chroot_fchdir;
58418 +int grsec_enable_chroot_double;
58419 +int grsec_enable_chroot_pivot;
58420 +int grsec_enable_chroot_chdir;
58421 +int grsec_enable_chroot_chmod;
58422 +int grsec_enable_chroot_mknod;
58423 +int grsec_enable_chroot_nice;
58424 +int grsec_enable_chroot_execlog;
58425 +int grsec_enable_chroot_caps;
58426 +int grsec_enable_chroot_sysctl;
58427 +int grsec_enable_chroot_unix;
58428 +int grsec_enable_tpe;
58429 +int grsec_tpe_gid;
58430 +int grsec_enable_blackhole;
58431 +#ifdef CONFIG_IPV6_MODULE
58432 +EXPORT_SYMBOL(grsec_enable_blackhole);
58433 +#endif
58434 +int grsec_lastack_retries;
58435 +int grsec_enable_tpe_all;
58436 +int grsec_enable_tpe_invert;
58437 +int grsec_enable_socket_all;
58438 +int grsec_socket_all_gid;
58439 +int grsec_enable_socket_client;
58440 +int grsec_socket_client_gid;
58441 +int grsec_enable_socket_server;
58442 +int grsec_socket_server_gid;
58443 +int grsec_resource_logging;
58444 +int grsec_disable_privio;
58445 +int grsec_enable_log_rwxmaps;
58446 +int grsec_lock;
58447 +
58448 +DEFINE_SPINLOCK(grsec_alert_lock);
58449 +unsigned long grsec_alert_wtime = 0;
58450 +unsigned long grsec_alert_fyet = 0;
58451 +
58452 +DEFINE_SPINLOCK(grsec_audit_lock);
58453 +
58454 +DEFINE_RWLOCK(grsec_exec_file_lock);
58455 +
58456 +char *gr_shared_page[4];
58457 +
58458 +char *gr_alert_log_fmt;
58459 +char *gr_audit_log_fmt;
58460 +char *gr_alert_log_buf;
58461 +char *gr_audit_log_buf;
58462 +
58463 +extern struct gr_arg *gr_usermode;
58464 +extern unsigned char *gr_system_salt;
58465 +extern unsigned char *gr_system_sum;
58466 +
58467 +void __init
58468 +grsecurity_init(void)
58469 +{
58470 + int j;
58471 + /* create the per-cpu shared pages */
58472 +
58473 +#ifdef CONFIG_X86
58474 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
58475 +#endif
58476 +
58477 + for (j = 0; j < 4; j++) {
58478 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
58479 + if (gr_shared_page[j] == NULL) {
58480 + panic("Unable to allocate grsecurity shared page");
58481 + return;
58482 + }
58483 + }
58484 +
58485 + /* allocate log buffers */
58486 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
58487 + if (!gr_alert_log_fmt) {
58488 + panic("Unable to allocate grsecurity alert log format buffer");
58489 + return;
58490 + }
58491 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
58492 + if (!gr_audit_log_fmt) {
58493 + panic("Unable to allocate grsecurity audit log format buffer");
58494 + return;
58495 + }
58496 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58497 + if (!gr_alert_log_buf) {
58498 + panic("Unable to allocate grsecurity alert log buffer");
58499 + return;
58500 + }
58501 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58502 + if (!gr_audit_log_buf) {
58503 + panic("Unable to allocate grsecurity audit log buffer");
58504 + return;
58505 + }
58506 +
58507 + /* allocate memory for authentication structure */
58508 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
58509 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
58510 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
58511 +
58512 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
58513 + panic("Unable to allocate grsecurity authentication structure");
58514 + return;
58515 + }
58516 +
58517 +
58518 +#ifdef CONFIG_GRKERNSEC_IO
58519 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
58520 + grsec_disable_privio = 1;
58521 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58522 + grsec_disable_privio = 1;
58523 +#else
58524 + grsec_disable_privio = 0;
58525 +#endif
58526 +#endif
58527 +
58528 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58529 + /* for backward compatibility, tpe_invert always defaults to on if
58530 + enabled in the kernel
58531 + */
58532 + grsec_enable_tpe_invert = 1;
58533 +#endif
58534 +
58535 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58536 +#ifndef CONFIG_GRKERNSEC_SYSCTL
58537 + grsec_lock = 1;
58538 +#endif
58539 +
58540 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58541 + grsec_enable_audit_textrel = 1;
58542 +#endif
58543 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58544 + grsec_enable_log_rwxmaps = 1;
58545 +#endif
58546 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58547 + grsec_enable_group = 1;
58548 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
58549 +#endif
58550 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58551 + grsec_enable_ptrace_readexec = 1;
58552 +#endif
58553 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58554 + grsec_enable_chdir = 1;
58555 +#endif
58556 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58557 + grsec_enable_harden_ptrace = 1;
58558 +#endif
58559 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58560 + grsec_enable_mount = 1;
58561 +#endif
58562 +#ifdef CONFIG_GRKERNSEC_LINK
58563 + grsec_enable_link = 1;
58564 +#endif
58565 +#ifdef CONFIG_GRKERNSEC_BRUTE
58566 + grsec_enable_brute = 1;
58567 +#endif
58568 +#ifdef CONFIG_GRKERNSEC_DMESG
58569 + grsec_enable_dmesg = 1;
58570 +#endif
58571 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58572 + grsec_enable_blackhole = 1;
58573 + grsec_lastack_retries = 4;
58574 +#endif
58575 +#ifdef CONFIG_GRKERNSEC_FIFO
58576 + grsec_enable_fifo = 1;
58577 +#endif
58578 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58579 + grsec_enable_execlog = 1;
58580 +#endif
58581 +#ifdef CONFIG_GRKERNSEC_SETXID
58582 + grsec_enable_setxid = 1;
58583 +#endif
58584 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58585 + grsec_enable_signal = 1;
58586 +#endif
58587 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58588 + grsec_enable_forkfail = 1;
58589 +#endif
58590 +#ifdef CONFIG_GRKERNSEC_TIME
58591 + grsec_enable_time = 1;
58592 +#endif
58593 +#ifdef CONFIG_GRKERNSEC_RESLOG
58594 + grsec_resource_logging = 1;
58595 +#endif
58596 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58597 + grsec_enable_chroot_findtask = 1;
58598 +#endif
58599 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58600 + grsec_enable_chroot_unix = 1;
58601 +#endif
58602 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58603 + grsec_enable_chroot_mount = 1;
58604 +#endif
58605 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58606 + grsec_enable_chroot_fchdir = 1;
58607 +#endif
58608 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58609 + grsec_enable_chroot_shmat = 1;
58610 +#endif
58611 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58612 + grsec_enable_audit_ptrace = 1;
58613 +#endif
58614 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58615 + grsec_enable_chroot_double = 1;
58616 +#endif
58617 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58618 + grsec_enable_chroot_pivot = 1;
58619 +#endif
58620 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58621 + grsec_enable_chroot_chdir = 1;
58622 +#endif
58623 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58624 + grsec_enable_chroot_chmod = 1;
58625 +#endif
58626 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58627 + grsec_enable_chroot_mknod = 1;
58628 +#endif
58629 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58630 + grsec_enable_chroot_nice = 1;
58631 +#endif
58632 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58633 + grsec_enable_chroot_execlog = 1;
58634 +#endif
58635 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58636 + grsec_enable_chroot_caps = 1;
58637 +#endif
58638 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58639 + grsec_enable_chroot_sysctl = 1;
58640 +#endif
58641 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58642 + grsec_enable_symlinkown = 1;
58643 + grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
58644 +#endif
58645 +#ifdef CONFIG_GRKERNSEC_TPE
58646 + grsec_enable_tpe = 1;
58647 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
58648 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58649 + grsec_enable_tpe_all = 1;
58650 +#endif
58651 +#endif
58652 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58653 + grsec_enable_socket_all = 1;
58654 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
58655 +#endif
58656 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58657 + grsec_enable_socket_client = 1;
58658 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
58659 +#endif
58660 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58661 + grsec_enable_socket_server = 1;
58662 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
58663 +#endif
58664 +#endif
58665 +
58666 + return;
58667 +}
58668 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
58669 new file mode 100644
58670 index 0000000..589481f
58671 --- /dev/null
58672 +++ b/grsecurity/grsec_link.c
58673 @@ -0,0 +1,58 @@
58674 +#include <linux/kernel.h>
58675 +#include <linux/sched.h>
58676 +#include <linux/fs.h>
58677 +#include <linux/file.h>
58678 +#include <linux/grinternal.h>
58679 +
58680 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
58681 +{
58682 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58683 + const struct inode *link_inode = link->dentry->d_inode;
58684 +
58685 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
58686 + /* ignore root-owned links, e.g. /proc/self */
58687 + !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
58688 + !uid_eq(link_inode->i_uid, target->i_uid)) {
58689 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
58690 + return 1;
58691 + }
58692 +#endif
58693 + return 0;
58694 +}
58695 +
58696 +int
58697 +gr_handle_follow_link(const struct inode *parent,
58698 + const struct inode *inode,
58699 + const struct dentry *dentry, const struct vfsmount *mnt)
58700 +{
58701 +#ifdef CONFIG_GRKERNSEC_LINK
58702 + const struct cred *cred = current_cred();
58703 +
58704 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
58705 + (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
58706 + (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
58707 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
58708 + return -EACCES;
58709 + }
58710 +#endif
58711 + return 0;
58712 +}
58713 +
58714 +int
58715 +gr_handle_hardlink(const struct dentry *dentry,
58716 + const struct vfsmount *mnt,
58717 + struct inode *inode, const int mode, const char *to)
58718 +{
58719 +#ifdef CONFIG_GRKERNSEC_LINK
58720 + const struct cred *cred = current_cred();
58721 +
58722 + if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
58723 + (!S_ISREG(mode) || is_privileged_binary(dentry) ||
58724 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58725 + !capable(CAP_FOWNER) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
58726 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
58727 + return -EPERM;
58728 + }
58729 +#endif
58730 + return 0;
58731 +}
58732 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
58733 new file mode 100644
58734 index 0000000..a45d2e9
58735 --- /dev/null
58736 +++ b/grsecurity/grsec_log.c
58737 @@ -0,0 +1,322 @@
58738 +#include <linux/kernel.h>
58739 +#include <linux/sched.h>
58740 +#include <linux/file.h>
58741 +#include <linux/tty.h>
58742 +#include <linux/fs.h>
58743 +#include <linux/grinternal.h>
58744 +
58745 +#ifdef CONFIG_TREE_PREEMPT_RCU
58746 +#define DISABLE_PREEMPT() preempt_disable()
58747 +#define ENABLE_PREEMPT() preempt_enable()
58748 +#else
58749 +#define DISABLE_PREEMPT()
58750 +#define ENABLE_PREEMPT()
58751 +#endif
58752 +
58753 +#define BEGIN_LOCKS(x) \
58754 + DISABLE_PREEMPT(); \
58755 + rcu_read_lock(); \
58756 + read_lock(&tasklist_lock); \
58757 + read_lock(&grsec_exec_file_lock); \
58758 + if (x != GR_DO_AUDIT) \
58759 + spin_lock(&grsec_alert_lock); \
58760 + else \
58761 + spin_lock(&grsec_audit_lock)
58762 +
58763 +#define END_LOCKS(x) \
58764 + if (x != GR_DO_AUDIT) \
58765 + spin_unlock(&grsec_alert_lock); \
58766 + else \
58767 + spin_unlock(&grsec_audit_lock); \
58768 + read_unlock(&grsec_exec_file_lock); \
58769 + read_unlock(&tasklist_lock); \
58770 + rcu_read_unlock(); \
58771 + ENABLE_PREEMPT(); \
58772 + if (x == GR_DONT_AUDIT) \
58773 + gr_handle_alertkill(current)
58774 +
58775 +enum {
58776 + FLOODING,
58777 + NO_FLOODING
58778 +};
58779 +
58780 +extern char *gr_alert_log_fmt;
58781 +extern char *gr_audit_log_fmt;
58782 +extern char *gr_alert_log_buf;
58783 +extern char *gr_audit_log_buf;
58784 +
58785 +static int gr_log_start(int audit)
58786 +{
58787 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
58788 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
58789 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58790 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
58791 + unsigned long curr_secs = get_seconds();
58792 +
58793 + if (audit == GR_DO_AUDIT)
58794 + goto set_fmt;
58795 +
58796 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
58797 + grsec_alert_wtime = curr_secs;
58798 + grsec_alert_fyet = 0;
58799 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
58800 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58801 + grsec_alert_fyet++;
58802 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
58803 + grsec_alert_wtime = curr_secs;
58804 + grsec_alert_fyet++;
58805 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
58806 + return FLOODING;
58807 + }
58808 + else return FLOODING;
58809 +
58810 +set_fmt:
58811 +#endif
58812 + memset(buf, 0, PAGE_SIZE);
58813 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
58814 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58815 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58816 + } else if (current->signal->curr_ip) {
58817 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58818 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58819 + } else if (gr_acl_is_enabled()) {
58820 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58821 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58822 + } else {
58823 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
58824 + strcpy(buf, fmt);
58825 + }
58826 +
58827 + return NO_FLOODING;
58828 +}
58829 +
58830 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58831 + __attribute__ ((format (printf, 2, 0)));
58832 +
58833 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58834 +{
58835 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58836 + unsigned int len = strlen(buf);
58837 +
58838 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58839 +
58840 + return;
58841 +}
58842 +
58843 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58844 + __attribute__ ((format (printf, 2, 3)));
58845 +
58846 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58847 +{
58848 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58849 + unsigned int len = strlen(buf);
58850 + va_list ap;
58851 +
58852 + va_start(ap, msg);
58853 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58854 + va_end(ap);
58855 +
58856 + return;
58857 +}
58858 +
58859 +static void gr_log_end(int audit, int append_default)
58860 +{
58861 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58862 +
58863 + if (append_default) {
58864 + unsigned int len = strlen(buf);
58865 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58866 + }
58867 +
58868 + printk("%s\n", buf);
58869 +
58870 + return;
58871 +}
58872 +
58873 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58874 +{
58875 + int logtype;
58876 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58877 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58878 + void *voidptr = NULL;
58879 + int num1 = 0, num2 = 0;
58880 + unsigned long ulong1 = 0, ulong2 = 0;
58881 + struct dentry *dentry = NULL;
58882 + struct vfsmount *mnt = NULL;
58883 + struct file *file = NULL;
58884 + struct task_struct *task = NULL;
58885 + const struct cred *cred, *pcred;
58886 + va_list ap;
58887 +
58888 + BEGIN_LOCKS(audit);
58889 + logtype = gr_log_start(audit);
58890 + if (logtype == FLOODING) {
58891 + END_LOCKS(audit);
58892 + return;
58893 + }
58894 + va_start(ap, argtypes);
58895 + switch (argtypes) {
58896 + case GR_TTYSNIFF:
58897 + task = va_arg(ap, struct task_struct *);
58898 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58899 + break;
58900 + case GR_SYSCTL_HIDDEN:
58901 + str1 = va_arg(ap, char *);
58902 + gr_log_middle_varargs(audit, msg, result, str1);
58903 + break;
58904 + case GR_RBAC:
58905 + dentry = va_arg(ap, struct dentry *);
58906 + mnt = va_arg(ap, struct vfsmount *);
58907 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58908 + break;
58909 + case GR_RBAC_STR:
58910 + dentry = va_arg(ap, struct dentry *);
58911 + mnt = va_arg(ap, struct vfsmount *);
58912 + str1 = va_arg(ap, char *);
58913 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58914 + break;
58915 + case GR_STR_RBAC:
58916 + str1 = va_arg(ap, char *);
58917 + dentry = va_arg(ap, struct dentry *);
58918 + mnt = va_arg(ap, struct vfsmount *);
58919 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58920 + break;
58921 + case GR_RBAC_MODE2:
58922 + dentry = va_arg(ap, struct dentry *);
58923 + mnt = va_arg(ap, struct vfsmount *);
58924 + str1 = va_arg(ap, char *);
58925 + str2 = va_arg(ap, char *);
58926 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58927 + break;
58928 + case GR_RBAC_MODE3:
58929 + dentry = va_arg(ap, struct dentry *);
58930 + mnt = va_arg(ap, struct vfsmount *);
58931 + str1 = va_arg(ap, char *);
58932 + str2 = va_arg(ap, char *);
58933 + str3 = va_arg(ap, char *);
58934 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58935 + break;
58936 + case GR_FILENAME:
58937 + dentry = va_arg(ap, struct dentry *);
58938 + mnt = va_arg(ap, struct vfsmount *);
58939 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58940 + break;
58941 + case GR_STR_FILENAME:
58942 + str1 = va_arg(ap, char *);
58943 + dentry = va_arg(ap, struct dentry *);
58944 + mnt = va_arg(ap, struct vfsmount *);
58945 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58946 + break;
58947 + case GR_FILENAME_STR:
58948 + dentry = va_arg(ap, struct dentry *);
58949 + mnt = va_arg(ap, struct vfsmount *);
58950 + str1 = va_arg(ap, char *);
58951 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58952 + break;
58953 + case GR_FILENAME_TWO_INT:
58954 + dentry = va_arg(ap, struct dentry *);
58955 + mnt = va_arg(ap, struct vfsmount *);
58956 + num1 = va_arg(ap, int);
58957 + num2 = va_arg(ap, int);
58958 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58959 + break;
58960 + case GR_FILENAME_TWO_INT_STR:
58961 + dentry = va_arg(ap, struct dentry *);
58962 + mnt = va_arg(ap, struct vfsmount *);
58963 + num1 = va_arg(ap, int);
58964 + num2 = va_arg(ap, int);
58965 + str1 = va_arg(ap, char *);
58966 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58967 + break;
58968 + case GR_TEXTREL:
58969 + file = va_arg(ap, struct file *);
58970 + ulong1 = va_arg(ap, unsigned long);
58971 + ulong2 = va_arg(ap, unsigned long);
58972 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58973 + break;
58974 + case GR_PTRACE:
58975 + task = va_arg(ap, struct task_struct *);
58976 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58977 + break;
58978 + case GR_RESOURCE:
58979 + task = va_arg(ap, struct task_struct *);
58980 + cred = __task_cred(task);
58981 + pcred = __task_cred(task->real_parent);
58982 + ulong1 = va_arg(ap, unsigned long);
58983 + str1 = va_arg(ap, char *);
58984 + ulong2 = va_arg(ap, unsigned long);
58985 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58986 + break;
58987 + case GR_CAP:
58988 + task = va_arg(ap, struct task_struct *);
58989 + cred = __task_cred(task);
58990 + pcred = __task_cred(task->real_parent);
58991 + str1 = va_arg(ap, char *);
58992 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58993 + break;
58994 + case GR_SIG:
58995 + str1 = va_arg(ap, char *);
58996 + voidptr = va_arg(ap, void *);
58997 + gr_log_middle_varargs(audit, msg, str1, voidptr);
58998 + break;
58999 + case GR_SIG2:
59000 + task = va_arg(ap, struct task_struct *);
59001 + cred = __task_cred(task);
59002 + pcred = __task_cred(task->real_parent);
59003 + num1 = va_arg(ap, int);
59004 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
59005 + break;
59006 + case GR_CRASH1:
59007 + task = va_arg(ap, struct task_struct *);
59008 + cred = __task_cred(task);
59009 + pcred = __task_cred(task->real_parent);
59010 + ulong1 = va_arg(ap, unsigned long);
59011 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
59012 + break;
59013 + case GR_CRASH2:
59014 + task = va_arg(ap, struct task_struct *);
59015 + cred = __task_cred(task);
59016 + pcred = __task_cred(task->real_parent);
59017 + ulong1 = va_arg(ap, unsigned long);
59018 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
59019 + break;
59020 + case GR_RWXMAP:
59021 + file = va_arg(ap, struct file *);
59022 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
59023 + break;
59024 + case GR_PSACCT:
59025 + {
59026 + unsigned int wday, cday;
59027 + __u8 whr, chr;
59028 + __u8 wmin, cmin;
59029 + __u8 wsec, csec;
59030 + char cur_tty[64] = { 0 };
59031 + char parent_tty[64] = { 0 };
59032 +
59033 + task = va_arg(ap, struct task_struct *);
59034 + wday = va_arg(ap, unsigned int);
59035 + cday = va_arg(ap, unsigned int);
59036 + whr = va_arg(ap, int);
59037 + chr = va_arg(ap, int);
59038 + wmin = va_arg(ap, int);
59039 + cmin = va_arg(ap, int);
59040 + wsec = va_arg(ap, int);
59041 + csec = va_arg(ap, int);
59042 + ulong1 = va_arg(ap, unsigned long);
59043 + cred = __task_cred(task);
59044 + pcred = __task_cred(task->real_parent);
59045 +
59046 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
59047 + }
59048 + break;
59049 + default:
59050 + gr_log_middle(audit, msg, ap);
59051 + }
59052 + va_end(ap);
59053 + // these don't need DEFAULTSECARGS printed on the end
59054 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
59055 + gr_log_end(audit, 0);
59056 + else
59057 + gr_log_end(audit, 1);
59058 + END_LOCKS(audit);
59059 +}
59060 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
59061 new file mode 100644
59062 index 0000000..f536303
59063 --- /dev/null
59064 +++ b/grsecurity/grsec_mem.c
59065 @@ -0,0 +1,40 @@
59066 +#include <linux/kernel.h>
59067 +#include <linux/sched.h>
59068 +#include <linux/mm.h>
59069 +#include <linux/mman.h>
59070 +#include <linux/grinternal.h>
59071 +
59072 +void
59073 +gr_handle_ioperm(void)
59074 +{
59075 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
59076 + return;
59077 +}
59078 +
59079 +void
59080 +gr_handle_iopl(void)
59081 +{
59082 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
59083 + return;
59084 +}
59085 +
59086 +void
59087 +gr_handle_mem_readwrite(u64 from, u64 to)
59088 +{
59089 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
59090 + return;
59091 +}
59092 +
59093 +void
59094 +gr_handle_vm86(void)
59095 +{
59096 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
59097 + return;
59098 +}
59099 +
59100 +void
59101 +gr_log_badprocpid(const char *entry)
59102 +{
59103 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
59104 + return;
59105 +}
59106 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
59107 new file mode 100644
59108 index 0000000..2131422
59109 --- /dev/null
59110 +++ b/grsecurity/grsec_mount.c
59111 @@ -0,0 +1,62 @@
59112 +#include <linux/kernel.h>
59113 +#include <linux/sched.h>
59114 +#include <linux/mount.h>
59115 +#include <linux/grsecurity.h>
59116 +#include <linux/grinternal.h>
59117 +
59118 +void
59119 +gr_log_remount(const char *devname, const int retval)
59120 +{
59121 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59122 + if (grsec_enable_mount && (retval >= 0))
59123 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
59124 +#endif
59125 + return;
59126 +}
59127 +
59128 +void
59129 +gr_log_unmount(const char *devname, const int retval)
59130 +{
59131 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59132 + if (grsec_enable_mount && (retval >= 0))
59133 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
59134 +#endif
59135 + return;
59136 +}
59137 +
59138 +void
59139 +gr_log_mount(const char *from, const char *to, const int retval)
59140 +{
59141 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59142 + if (grsec_enable_mount && (retval >= 0))
59143 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
59144 +#endif
59145 + return;
59146 +}
59147 +
59148 +int
59149 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
59150 +{
59151 +#ifdef CONFIG_GRKERNSEC_ROFS
59152 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
59153 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
59154 + return -EPERM;
59155 + } else
59156 + return 0;
59157 +#endif
59158 + return 0;
59159 +}
59160 +
59161 +int
59162 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
59163 +{
59164 +#ifdef CONFIG_GRKERNSEC_ROFS
59165 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
59166 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
59167 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
59168 + return -EPERM;
59169 + } else
59170 + return 0;
59171 +#endif
59172 + return 0;
59173 +}
59174 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
59175 new file mode 100644
59176 index 0000000..a3b12a0
59177 --- /dev/null
59178 +++ b/grsecurity/grsec_pax.c
59179 @@ -0,0 +1,36 @@
59180 +#include <linux/kernel.h>
59181 +#include <linux/sched.h>
59182 +#include <linux/mm.h>
59183 +#include <linux/file.h>
59184 +#include <linux/grinternal.h>
59185 +#include <linux/grsecurity.h>
59186 +
59187 +void
59188 +gr_log_textrel(struct vm_area_struct * vma)
59189 +{
59190 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59191 + if (grsec_enable_audit_textrel)
59192 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
59193 +#endif
59194 + return;
59195 +}
59196 +
59197 +void
59198 +gr_log_rwxmmap(struct file *file)
59199 +{
59200 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59201 + if (grsec_enable_log_rwxmaps)
59202 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
59203 +#endif
59204 + return;
59205 +}
59206 +
59207 +void
59208 +gr_log_rwxmprotect(struct file *file)
59209 +{
59210 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59211 + if (grsec_enable_log_rwxmaps)
59212 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
59213 +#endif
59214 + return;
59215 +}
59216 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
59217 new file mode 100644
59218 index 0000000..f7f29aa
59219 --- /dev/null
59220 +++ b/grsecurity/grsec_ptrace.c
59221 @@ -0,0 +1,30 @@
59222 +#include <linux/kernel.h>
59223 +#include <linux/sched.h>
59224 +#include <linux/grinternal.h>
59225 +#include <linux/security.h>
59226 +
59227 +void
59228 +gr_audit_ptrace(struct task_struct *task)
59229 +{
59230 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59231 + if (grsec_enable_audit_ptrace)
59232 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
59233 +#endif
59234 + return;
59235 +}
59236 +
59237 +int
59238 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
59239 +{
59240 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59241 + const struct dentry *dentry = file->f_path.dentry;
59242 + const struct vfsmount *mnt = file->f_path.mnt;
59243 +
59244 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
59245 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
59246 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
59247 + return -EACCES;
59248 + }
59249 +#endif
59250 + return 0;
59251 +}
59252 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
59253 new file mode 100644
59254 index 0000000..b4ac94c
59255 --- /dev/null
59256 +++ b/grsecurity/grsec_sig.c
59257 @@ -0,0 +1,209 @@
59258 +#include <linux/kernel.h>
59259 +#include <linux/sched.h>
59260 +#include <linux/delay.h>
59261 +#include <linux/grsecurity.h>
59262 +#include <linux/grinternal.h>
59263 +#include <linux/hardirq.h>
59264 +
59265 +char *signames[] = {
59266 + [SIGSEGV] = "Segmentation fault",
59267 + [SIGILL] = "Illegal instruction",
59268 + [SIGABRT] = "Abort",
59269 + [SIGBUS] = "Invalid alignment/Bus error"
59270 +};
59271 +
59272 +void
59273 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
59274 +{
59275 +#ifdef CONFIG_GRKERNSEC_SIGNAL
59276 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
59277 + (sig == SIGABRT) || (sig == SIGBUS))) {
59278 + if (t->pid == current->pid) {
59279 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
59280 + } else {
59281 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
59282 + }
59283 + }
59284 +#endif
59285 + return;
59286 +}
59287 +
59288 +int
59289 +gr_handle_signal(const struct task_struct *p, const int sig)
59290 +{
59291 +#ifdef CONFIG_GRKERNSEC
59292 + /* ignore the 0 signal for protected task checks */
59293 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
59294 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
59295 + return -EPERM;
59296 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
59297 + return -EPERM;
59298 + }
59299 +#endif
59300 + return 0;
59301 +}
59302 +
59303 +#ifdef CONFIG_GRKERNSEC
59304 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
59305 +
59306 +int gr_fake_force_sig(int sig, struct task_struct *t)
59307 +{
59308 + unsigned long int flags;
59309 + int ret, blocked, ignored;
59310 + struct k_sigaction *action;
59311 +
59312 + spin_lock_irqsave(&t->sighand->siglock, flags);
59313 + action = &t->sighand->action[sig-1];
59314 + ignored = action->sa.sa_handler == SIG_IGN;
59315 + blocked = sigismember(&t->blocked, sig);
59316 + if (blocked || ignored) {
59317 + action->sa.sa_handler = SIG_DFL;
59318 + if (blocked) {
59319 + sigdelset(&t->blocked, sig);
59320 + recalc_sigpending_and_wake(t);
59321 + }
59322 + }
59323 + if (action->sa.sa_handler == SIG_DFL)
59324 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
59325 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
59326 +
59327 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
59328 +
59329 + return ret;
59330 +}
59331 +#endif
59332 +
59333 +#ifdef CONFIG_GRKERNSEC_BRUTE
59334 +#define GR_USER_BAN_TIME (15 * 60)
59335 +
59336 +static int __get_dumpable(unsigned long mm_flags)
59337 +{
59338 + int ret;
59339 +
59340 + ret = mm_flags & MMF_DUMPABLE_MASK;
59341 + return (ret >= 2) ? 2 : ret;
59342 +}
59343 +#endif
59344 +
59345 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
59346 +{
59347 +#ifdef CONFIG_GRKERNSEC_BRUTE
59348 + kuid_t uid = GLOBAL_ROOT_UID;
59349 +
59350 + if (!grsec_enable_brute)
59351 + return;
59352 +
59353 + rcu_read_lock();
59354 + read_lock(&tasklist_lock);
59355 + read_lock(&grsec_exec_file_lock);
59356 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
59357 + p->real_parent->brute = 1;
59358 + else {
59359 + const struct cred *cred = __task_cred(p), *cred2;
59360 + struct task_struct *tsk, *tsk2;
59361 +
59362 + if (!__get_dumpable(mm_flags) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
59363 + struct user_struct *user;
59364 +
59365 + uid = cred->uid;
59366 +
59367 + /* this is put upon execution past expiration */
59368 + user = find_user(uid);
59369 + if (user == NULL)
59370 + goto unlock;
59371 + user->banned = 1;
59372 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
59373 + if (user->ban_expires == ~0UL)
59374 + user->ban_expires--;
59375 +
59376 + do_each_thread(tsk2, tsk) {
59377 + cred2 = __task_cred(tsk);
59378 + if (tsk != p && uid_eq(cred2->uid, uid))
59379 + gr_fake_force_sig(SIGKILL, tsk);
59380 + } while_each_thread(tsk2, tsk);
59381 + }
59382 + }
59383 +unlock:
59384 + read_unlock(&grsec_exec_file_lock);
59385 + read_unlock(&tasklist_lock);
59386 + rcu_read_unlock();
59387 +
59388 + if (!uid_eq(uid, GLOBAL_ROOT_UID))
59389 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
59390 + from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60);
59391 +
59392 +#endif
59393 + return;
59394 +}
59395 +
59396 +void gr_handle_brute_check(void)
59397 +{
59398 +#ifdef CONFIG_GRKERNSEC_BRUTE
59399 + if (current->brute)
59400 + msleep(30 * 1000);
59401 +#endif
59402 + return;
59403 +}
59404 +
59405 +void gr_handle_kernel_exploit(void)
59406 +{
59407 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
59408 + const struct cred *cred;
59409 + struct task_struct *tsk, *tsk2;
59410 + struct user_struct *user;
59411 + kuid_t uid;
59412 +
59413 + if (in_irq() || in_serving_softirq() || in_nmi())
59414 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
59415 +
59416 + uid = current_uid();
59417 +
59418 + if (uid_eq(uid, GLOBAL_ROOT_UID))
59419 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
59420 + else {
59421 + /* kill all the processes of this user, hold a reference
59422 + to their creds struct, and prevent them from creating
59423 + another process until system reset
59424 + */
59425 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
59426 + from_kuid_munged(&init_user_ns, uid));
59427 + /* we intentionally leak this ref */
59428 + user = get_uid(current->cred->user);
59429 + if (user) {
59430 + user->banned = 1;
59431 + user->ban_expires = ~0UL;
59432 + }
59433 +
59434 + read_lock(&tasklist_lock);
59435 + do_each_thread(tsk2, tsk) {
59436 + cred = __task_cred(tsk);
59437 + if (uid_eq(cred->uid, uid))
59438 + gr_fake_force_sig(SIGKILL, tsk);
59439 + } while_each_thread(tsk2, tsk);
59440 + read_unlock(&tasklist_lock);
59441 + }
59442 +#endif
59443 +}
59444 +
59445 +int __gr_process_user_ban(struct user_struct *user)
59446 +{
59447 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59448 + if (unlikely(user->banned)) {
59449 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
59450 + user->banned = 0;
59451 + user->ban_expires = 0;
59452 + free_uid(user);
59453 + } else
59454 + return -EPERM;
59455 + }
59456 +#endif
59457 + return 0;
59458 +}
59459 +
59460 +int gr_process_user_ban(void)
59461 +{
59462 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59463 + return __gr_process_user_ban(current->cred->user);
59464 +#endif
59465 + return 0;
59466 +}
59467 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
59468 new file mode 100644
59469 index 0000000..4030d57
59470 --- /dev/null
59471 +++ b/grsecurity/grsec_sock.c
59472 @@ -0,0 +1,244 @@
59473 +#include <linux/kernel.h>
59474 +#include <linux/module.h>
59475 +#include <linux/sched.h>
59476 +#include <linux/file.h>
59477 +#include <linux/net.h>
59478 +#include <linux/in.h>
59479 +#include <linux/ip.h>
59480 +#include <net/sock.h>
59481 +#include <net/inet_sock.h>
59482 +#include <linux/grsecurity.h>
59483 +#include <linux/grinternal.h>
59484 +#include <linux/gracl.h>
59485 +
59486 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
59487 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
59488 +
59489 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
59490 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
59491 +
59492 +#ifdef CONFIG_UNIX_MODULE
59493 +EXPORT_SYMBOL(gr_acl_handle_unix);
59494 +EXPORT_SYMBOL(gr_acl_handle_mknod);
59495 +EXPORT_SYMBOL(gr_handle_chroot_unix);
59496 +EXPORT_SYMBOL(gr_handle_create);
59497 +#endif
59498 +
59499 +#ifdef CONFIG_GRKERNSEC
59500 +#define gr_conn_table_size 32749
59501 +struct conn_table_entry {
59502 + struct conn_table_entry *next;
59503 + struct signal_struct *sig;
59504 +};
59505 +
59506 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
59507 +DEFINE_SPINLOCK(gr_conn_table_lock);
59508 +
59509 +extern const char * gr_socktype_to_name(unsigned char type);
59510 +extern const char * gr_proto_to_name(unsigned char proto);
59511 +extern const char * gr_sockfamily_to_name(unsigned char family);
59512 +
59513 +static __inline__ int
59514 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
59515 +{
59516 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
59517 +}
59518 +
59519 +static __inline__ int
59520 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
59521 + __u16 sport, __u16 dport)
59522 +{
59523 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
59524 + sig->gr_sport == sport && sig->gr_dport == dport))
59525 + return 1;
59526 + else
59527 + return 0;
59528 +}
59529 +
59530 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
59531 +{
59532 + struct conn_table_entry **match;
59533 + unsigned int index;
59534 +
59535 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59536 + sig->gr_sport, sig->gr_dport,
59537 + gr_conn_table_size);
59538 +
59539 + newent->sig = sig;
59540 +
59541 + match = &gr_conn_table[index];
59542 + newent->next = *match;
59543 + *match = newent;
59544 +
59545 + return;
59546 +}
59547 +
59548 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
59549 +{
59550 + struct conn_table_entry *match, *last = NULL;
59551 + unsigned int index;
59552 +
59553 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59554 + sig->gr_sport, sig->gr_dport,
59555 + gr_conn_table_size);
59556 +
59557 + match = gr_conn_table[index];
59558 + while (match && !conn_match(match->sig,
59559 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
59560 + sig->gr_dport)) {
59561 + last = match;
59562 + match = match->next;
59563 + }
59564 +
59565 + if (match) {
59566 + if (last)
59567 + last->next = match->next;
59568 + else
59569 + gr_conn_table[index] = NULL;
59570 + kfree(match);
59571 + }
59572 +
59573 + return;
59574 +}
59575 +
59576 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
59577 + __u16 sport, __u16 dport)
59578 +{
59579 + struct conn_table_entry *match;
59580 + unsigned int index;
59581 +
59582 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
59583 +
59584 + match = gr_conn_table[index];
59585 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
59586 + match = match->next;
59587 +
59588 + if (match)
59589 + return match->sig;
59590 + else
59591 + return NULL;
59592 +}
59593 +
59594 +#endif
59595 +
59596 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
59597 +{
59598 +#ifdef CONFIG_GRKERNSEC
59599 + struct signal_struct *sig = task->signal;
59600 + struct conn_table_entry *newent;
59601 +
59602 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
59603 + if (newent == NULL)
59604 + return;
59605 + /* no bh lock needed since we are called with bh disabled */
59606 + spin_lock(&gr_conn_table_lock);
59607 + gr_del_task_from_ip_table_nolock(sig);
59608 + sig->gr_saddr = inet->inet_rcv_saddr;
59609 + sig->gr_daddr = inet->inet_daddr;
59610 + sig->gr_sport = inet->inet_sport;
59611 + sig->gr_dport = inet->inet_dport;
59612 + gr_add_to_task_ip_table_nolock(sig, newent);
59613 + spin_unlock(&gr_conn_table_lock);
59614 +#endif
59615 + return;
59616 +}
59617 +
59618 +void gr_del_task_from_ip_table(struct task_struct *task)
59619 +{
59620 +#ifdef CONFIG_GRKERNSEC
59621 + spin_lock_bh(&gr_conn_table_lock);
59622 + gr_del_task_from_ip_table_nolock(task->signal);
59623 + spin_unlock_bh(&gr_conn_table_lock);
59624 +#endif
59625 + return;
59626 +}
59627 +
59628 +void
59629 +gr_attach_curr_ip(const struct sock *sk)
59630 +{
59631 +#ifdef CONFIG_GRKERNSEC
59632 + struct signal_struct *p, *set;
59633 + const struct inet_sock *inet = inet_sk(sk);
59634 +
59635 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
59636 + return;
59637 +
59638 + set = current->signal;
59639 +
59640 + spin_lock_bh(&gr_conn_table_lock);
59641 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
59642 + inet->inet_dport, inet->inet_sport);
59643 + if (unlikely(p != NULL)) {
59644 + set->curr_ip = p->curr_ip;
59645 + set->used_accept = 1;
59646 + gr_del_task_from_ip_table_nolock(p);
59647 + spin_unlock_bh(&gr_conn_table_lock);
59648 + return;
59649 + }
59650 + spin_unlock_bh(&gr_conn_table_lock);
59651 +
59652 + set->curr_ip = inet->inet_daddr;
59653 + set->used_accept = 1;
59654 +#endif
59655 + return;
59656 +}
59657 +
59658 +int
59659 +gr_handle_sock_all(const int family, const int type, const int protocol)
59660 +{
59661 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59662 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
59663 + (family != AF_UNIX)) {
59664 + if (family == AF_INET)
59665 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
59666 + else
59667 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
59668 + return -EACCES;
59669 + }
59670 +#endif
59671 + return 0;
59672 +}
59673 +
59674 +int
59675 +gr_handle_sock_server(const struct sockaddr *sck)
59676 +{
59677 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59678 + if (grsec_enable_socket_server &&
59679 + in_group_p(grsec_socket_server_gid) &&
59680 + sck && (sck->sa_family != AF_UNIX) &&
59681 + (sck->sa_family != AF_LOCAL)) {
59682 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59683 + return -EACCES;
59684 + }
59685 +#endif
59686 + return 0;
59687 +}
59688 +
59689 +int
59690 +gr_handle_sock_server_other(const struct sock *sck)
59691 +{
59692 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59693 + if (grsec_enable_socket_server &&
59694 + in_group_p(grsec_socket_server_gid) &&
59695 + sck && (sck->sk_family != AF_UNIX) &&
59696 + (sck->sk_family != AF_LOCAL)) {
59697 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59698 + return -EACCES;
59699 + }
59700 +#endif
59701 + return 0;
59702 +}
59703 +
59704 +int
59705 +gr_handle_sock_client(const struct sockaddr *sck)
59706 +{
59707 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59708 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
59709 + sck && (sck->sa_family != AF_UNIX) &&
59710 + (sck->sa_family != AF_LOCAL)) {
59711 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
59712 + return -EACCES;
59713 + }
59714 +#endif
59715 + return 0;
59716 +}
59717 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
59718 new file mode 100644
59719 index 0000000..f55ef0f
59720 --- /dev/null
59721 +++ b/grsecurity/grsec_sysctl.c
59722 @@ -0,0 +1,469 @@
59723 +#include <linux/kernel.h>
59724 +#include <linux/sched.h>
59725 +#include <linux/sysctl.h>
59726 +#include <linux/grsecurity.h>
59727 +#include <linux/grinternal.h>
59728 +
59729 +int
59730 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
59731 +{
59732 +#ifdef CONFIG_GRKERNSEC_SYSCTL
59733 + if (dirname == NULL || name == NULL)
59734 + return 0;
59735 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
59736 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
59737 + return -EACCES;
59738 + }
59739 +#endif
59740 + return 0;
59741 +}
59742 +
59743 +#ifdef CONFIG_GRKERNSEC_ROFS
59744 +static int __maybe_unused one = 1;
59745 +#endif
59746 +
59747 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59748 +struct ctl_table grsecurity_table[] = {
59749 +#ifdef CONFIG_GRKERNSEC_SYSCTL
59750 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
59751 +#ifdef CONFIG_GRKERNSEC_IO
59752 + {
59753 + .procname = "disable_priv_io",
59754 + .data = &grsec_disable_privio,
59755 + .maxlen = sizeof(int),
59756 + .mode = 0600,
59757 + .proc_handler = &proc_dointvec,
59758 + },
59759 +#endif
59760 +#endif
59761 +#ifdef CONFIG_GRKERNSEC_LINK
59762 + {
59763 + .procname = "linking_restrictions",
59764 + .data = &grsec_enable_link,
59765 + .maxlen = sizeof(int),
59766 + .mode = 0600,
59767 + .proc_handler = &proc_dointvec,
59768 + },
59769 +#endif
59770 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
59771 + {
59772 + .procname = "enforce_symlinksifowner",
59773 + .data = &grsec_enable_symlinkown,
59774 + .maxlen = sizeof(int),
59775 + .mode = 0600,
59776 + .proc_handler = &proc_dointvec,
59777 + },
59778 + {
59779 + .procname = "symlinkown_gid",
59780 + .data = &grsec_symlinkown_gid,
59781 + .maxlen = sizeof(int),
59782 + .mode = 0600,
59783 + .proc_handler = &proc_dointvec,
59784 + },
59785 +#endif
59786 +#ifdef CONFIG_GRKERNSEC_BRUTE
59787 + {
59788 + .procname = "deter_bruteforce",
59789 + .data = &grsec_enable_brute,
59790 + .maxlen = sizeof(int),
59791 + .mode = 0600,
59792 + .proc_handler = &proc_dointvec,
59793 + },
59794 +#endif
59795 +#ifdef CONFIG_GRKERNSEC_FIFO
59796 + {
59797 + .procname = "fifo_restrictions",
59798 + .data = &grsec_enable_fifo,
59799 + .maxlen = sizeof(int),
59800 + .mode = 0600,
59801 + .proc_handler = &proc_dointvec,
59802 + },
59803 +#endif
59804 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59805 + {
59806 + .procname = "ptrace_readexec",
59807 + .data = &grsec_enable_ptrace_readexec,
59808 + .maxlen = sizeof(int),
59809 + .mode = 0600,
59810 + .proc_handler = &proc_dointvec,
59811 + },
59812 +#endif
59813 +#ifdef CONFIG_GRKERNSEC_SETXID
59814 + {
59815 + .procname = "consistent_setxid",
59816 + .data = &grsec_enable_setxid,
59817 + .maxlen = sizeof(int),
59818 + .mode = 0600,
59819 + .proc_handler = &proc_dointvec,
59820 + },
59821 +#endif
59822 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
59823 + {
59824 + .procname = "ip_blackhole",
59825 + .data = &grsec_enable_blackhole,
59826 + .maxlen = sizeof(int),
59827 + .mode = 0600,
59828 + .proc_handler = &proc_dointvec,
59829 + },
59830 + {
59831 + .procname = "lastack_retries",
59832 + .data = &grsec_lastack_retries,
59833 + .maxlen = sizeof(int),
59834 + .mode = 0600,
59835 + .proc_handler = &proc_dointvec,
59836 + },
59837 +#endif
59838 +#ifdef CONFIG_GRKERNSEC_EXECLOG
59839 + {
59840 + .procname = "exec_logging",
59841 + .data = &grsec_enable_execlog,
59842 + .maxlen = sizeof(int),
59843 + .mode = 0600,
59844 + .proc_handler = &proc_dointvec,
59845 + },
59846 +#endif
59847 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59848 + {
59849 + .procname = "rwxmap_logging",
59850 + .data = &grsec_enable_log_rwxmaps,
59851 + .maxlen = sizeof(int),
59852 + .mode = 0600,
59853 + .proc_handler = &proc_dointvec,
59854 + },
59855 +#endif
59856 +#ifdef CONFIG_GRKERNSEC_SIGNAL
59857 + {
59858 + .procname = "signal_logging",
59859 + .data = &grsec_enable_signal,
59860 + .maxlen = sizeof(int),
59861 + .mode = 0600,
59862 + .proc_handler = &proc_dointvec,
59863 + },
59864 +#endif
59865 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
59866 + {
59867 + .procname = "forkfail_logging",
59868 + .data = &grsec_enable_forkfail,
59869 + .maxlen = sizeof(int),
59870 + .mode = 0600,
59871 + .proc_handler = &proc_dointvec,
59872 + },
59873 +#endif
59874 +#ifdef CONFIG_GRKERNSEC_TIME
59875 + {
59876 + .procname = "timechange_logging",
59877 + .data = &grsec_enable_time,
59878 + .maxlen = sizeof(int),
59879 + .mode = 0600,
59880 + .proc_handler = &proc_dointvec,
59881 + },
59882 +#endif
59883 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59884 + {
59885 + .procname = "chroot_deny_shmat",
59886 + .data = &grsec_enable_chroot_shmat,
59887 + .maxlen = sizeof(int),
59888 + .mode = 0600,
59889 + .proc_handler = &proc_dointvec,
59890 + },
59891 +#endif
59892 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59893 + {
59894 + .procname = "chroot_deny_unix",
59895 + .data = &grsec_enable_chroot_unix,
59896 + .maxlen = sizeof(int),
59897 + .mode = 0600,
59898 + .proc_handler = &proc_dointvec,
59899 + },
59900 +#endif
59901 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59902 + {
59903 + .procname = "chroot_deny_mount",
59904 + .data = &grsec_enable_chroot_mount,
59905 + .maxlen = sizeof(int),
59906 + .mode = 0600,
59907 + .proc_handler = &proc_dointvec,
59908 + },
59909 +#endif
59910 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59911 + {
59912 + .procname = "chroot_deny_fchdir",
59913 + .data = &grsec_enable_chroot_fchdir,
59914 + .maxlen = sizeof(int),
59915 + .mode = 0600,
59916 + .proc_handler = &proc_dointvec,
59917 + },
59918 +#endif
59919 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59920 + {
59921 + .procname = "chroot_deny_chroot",
59922 + .data = &grsec_enable_chroot_double,
59923 + .maxlen = sizeof(int),
59924 + .mode = 0600,
59925 + .proc_handler = &proc_dointvec,
59926 + },
59927 +#endif
59928 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59929 + {
59930 + .procname = "chroot_deny_pivot",
59931 + .data = &grsec_enable_chroot_pivot,
59932 + .maxlen = sizeof(int),
59933 + .mode = 0600,
59934 + .proc_handler = &proc_dointvec,
59935 + },
59936 +#endif
59937 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59938 + {
59939 + .procname = "chroot_enforce_chdir",
59940 + .data = &grsec_enable_chroot_chdir,
59941 + .maxlen = sizeof(int),
59942 + .mode = 0600,
59943 + .proc_handler = &proc_dointvec,
59944 + },
59945 +#endif
59946 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59947 + {
59948 + .procname = "chroot_deny_chmod",
59949 + .data = &grsec_enable_chroot_chmod,
59950 + .maxlen = sizeof(int),
59951 + .mode = 0600,
59952 + .proc_handler = &proc_dointvec,
59953 + },
59954 +#endif
59955 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59956 + {
59957 + .procname = "chroot_deny_mknod",
59958 + .data = &grsec_enable_chroot_mknod,
59959 + .maxlen = sizeof(int),
59960 + .mode = 0600,
59961 + .proc_handler = &proc_dointvec,
59962 + },
59963 +#endif
59964 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59965 + {
59966 + .procname = "chroot_restrict_nice",
59967 + .data = &grsec_enable_chroot_nice,
59968 + .maxlen = sizeof(int),
59969 + .mode = 0600,
59970 + .proc_handler = &proc_dointvec,
59971 + },
59972 +#endif
59973 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59974 + {
59975 + .procname = "chroot_execlog",
59976 + .data = &grsec_enable_chroot_execlog,
59977 + .maxlen = sizeof(int),
59978 + .mode = 0600,
59979 + .proc_handler = &proc_dointvec,
59980 + },
59981 +#endif
59982 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59983 + {
59984 + .procname = "chroot_caps",
59985 + .data = &grsec_enable_chroot_caps,
59986 + .maxlen = sizeof(int),
59987 + .mode = 0600,
59988 + .proc_handler = &proc_dointvec,
59989 + },
59990 +#endif
59991 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59992 + {
59993 + .procname = "chroot_deny_sysctl",
59994 + .data = &grsec_enable_chroot_sysctl,
59995 + .maxlen = sizeof(int),
59996 + .mode = 0600,
59997 + .proc_handler = &proc_dointvec,
59998 + },
59999 +#endif
60000 +#ifdef CONFIG_GRKERNSEC_TPE
60001 + {
60002 + .procname = "tpe",
60003 + .data = &grsec_enable_tpe,
60004 + .maxlen = sizeof(int),
60005 + .mode = 0600,
60006 + .proc_handler = &proc_dointvec,
60007 + },
60008 + {
60009 + .procname = "tpe_gid",
60010 + .data = &grsec_tpe_gid,
60011 + .maxlen = sizeof(int),
60012 + .mode = 0600,
60013 + .proc_handler = &proc_dointvec,
60014 + },
60015 +#endif
60016 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60017 + {
60018 + .procname = "tpe_invert",
60019 + .data = &grsec_enable_tpe_invert,
60020 + .maxlen = sizeof(int),
60021 + .mode = 0600,
60022 + .proc_handler = &proc_dointvec,
60023 + },
60024 +#endif
60025 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
60026 + {
60027 + .procname = "tpe_restrict_all",
60028 + .data = &grsec_enable_tpe_all,
60029 + .maxlen = sizeof(int),
60030 + .mode = 0600,
60031 + .proc_handler = &proc_dointvec,
60032 + },
60033 +#endif
60034 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
60035 + {
60036 + .procname = "socket_all",
60037 + .data = &grsec_enable_socket_all,
60038 + .maxlen = sizeof(int),
60039 + .mode = 0600,
60040 + .proc_handler = &proc_dointvec,
60041 + },
60042 + {
60043 + .procname = "socket_all_gid",
60044 + .data = &grsec_socket_all_gid,
60045 + .maxlen = sizeof(int),
60046 + .mode = 0600,
60047 + .proc_handler = &proc_dointvec,
60048 + },
60049 +#endif
60050 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
60051 + {
60052 + .procname = "socket_client",
60053 + .data = &grsec_enable_socket_client,
60054 + .maxlen = sizeof(int),
60055 + .mode = 0600,
60056 + .proc_handler = &proc_dointvec,
60057 + },
60058 + {
60059 + .procname = "socket_client_gid",
60060 + .data = &grsec_socket_client_gid,
60061 + .maxlen = sizeof(int),
60062 + .mode = 0600,
60063 + .proc_handler = &proc_dointvec,
60064 + },
60065 +#endif
60066 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
60067 + {
60068 + .procname = "socket_server",
60069 + .data = &grsec_enable_socket_server,
60070 + .maxlen = sizeof(int),
60071 + .mode = 0600,
60072 + .proc_handler = &proc_dointvec,
60073 + },
60074 + {
60075 + .procname = "socket_server_gid",
60076 + .data = &grsec_socket_server_gid,
60077 + .maxlen = sizeof(int),
60078 + .mode = 0600,
60079 + .proc_handler = &proc_dointvec,
60080 + },
60081 +#endif
60082 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
60083 + {
60084 + .procname = "audit_group",
60085 + .data = &grsec_enable_group,
60086 + .maxlen = sizeof(int),
60087 + .mode = 0600,
60088 + .proc_handler = &proc_dointvec,
60089 + },
60090 + {
60091 + .procname = "audit_gid",
60092 + .data = &grsec_audit_gid,
60093 + .maxlen = sizeof(int),
60094 + .mode = 0600,
60095 + .proc_handler = &proc_dointvec,
60096 + },
60097 +#endif
60098 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60099 + {
60100 + .procname = "audit_chdir",
60101 + .data = &grsec_enable_chdir,
60102 + .maxlen = sizeof(int),
60103 + .mode = 0600,
60104 + .proc_handler = &proc_dointvec,
60105 + },
60106 +#endif
60107 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60108 + {
60109 + .procname = "audit_mount",
60110 + .data = &grsec_enable_mount,
60111 + .maxlen = sizeof(int),
60112 + .mode = 0600,
60113 + .proc_handler = &proc_dointvec,
60114 + },
60115 +#endif
60116 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60117 + {
60118 + .procname = "audit_textrel",
60119 + .data = &grsec_enable_audit_textrel,
60120 + .maxlen = sizeof(int),
60121 + .mode = 0600,
60122 + .proc_handler = &proc_dointvec,
60123 + },
60124 +#endif
60125 +#ifdef CONFIG_GRKERNSEC_DMESG
60126 + {
60127 + .procname = "dmesg",
60128 + .data = &grsec_enable_dmesg,
60129 + .maxlen = sizeof(int),
60130 + .mode = 0600,
60131 + .proc_handler = &proc_dointvec,
60132 + },
60133 +#endif
60134 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60135 + {
60136 + .procname = "chroot_findtask",
60137 + .data = &grsec_enable_chroot_findtask,
60138 + .maxlen = sizeof(int),
60139 + .mode = 0600,
60140 + .proc_handler = &proc_dointvec,
60141 + },
60142 +#endif
60143 +#ifdef CONFIG_GRKERNSEC_RESLOG
60144 + {
60145 + .procname = "resource_logging",
60146 + .data = &grsec_resource_logging,
60147 + .maxlen = sizeof(int),
60148 + .mode = 0600,
60149 + .proc_handler = &proc_dointvec,
60150 + },
60151 +#endif
60152 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60153 + {
60154 + .procname = "audit_ptrace",
60155 + .data = &grsec_enable_audit_ptrace,
60156 + .maxlen = sizeof(int),
60157 + .mode = 0600,
60158 + .proc_handler = &proc_dointvec,
60159 + },
60160 +#endif
60161 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60162 + {
60163 + .procname = "harden_ptrace",
60164 + .data = &grsec_enable_harden_ptrace,
60165 + .maxlen = sizeof(int),
60166 + .mode = 0600,
60167 + .proc_handler = &proc_dointvec,
60168 + },
60169 +#endif
60170 + {
60171 + .procname = "grsec_lock",
60172 + .data = &grsec_lock,
60173 + .maxlen = sizeof(int),
60174 + .mode = 0600,
60175 + .proc_handler = &proc_dointvec,
60176 + },
60177 +#endif
60178 +#ifdef CONFIG_GRKERNSEC_ROFS
60179 + {
60180 + .procname = "romount_protect",
60181 + .data = &grsec_enable_rofs,
60182 + .maxlen = sizeof(int),
60183 + .mode = 0600,
60184 + .proc_handler = &proc_dointvec_minmax,
60185 + .extra1 = &one,
60186 + .extra2 = &one,
60187 + },
60188 +#endif
60189 + { }
60190 +};
60191 +#endif
60192 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
60193 new file mode 100644
60194 index 0000000..0dc13c3
60195 --- /dev/null
60196 +++ b/grsecurity/grsec_time.c
60197 @@ -0,0 +1,16 @@
60198 +#include <linux/kernel.h>
60199 +#include <linux/sched.h>
60200 +#include <linux/grinternal.h>
60201 +#include <linux/module.h>
60202 +
60203 +void
60204 +gr_log_timechange(void)
60205 +{
60206 +#ifdef CONFIG_GRKERNSEC_TIME
60207 + if (grsec_enable_time)
60208 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
60209 +#endif
60210 + return;
60211 +}
60212 +
60213 +EXPORT_SYMBOL(gr_log_timechange);
60214 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
60215 new file mode 100644
60216 index 0000000..07e0dc0
60217 --- /dev/null
60218 +++ b/grsecurity/grsec_tpe.c
60219 @@ -0,0 +1,73 @@
60220 +#include <linux/kernel.h>
60221 +#include <linux/sched.h>
60222 +#include <linux/file.h>
60223 +#include <linux/fs.h>
60224 +#include <linux/grinternal.h>
60225 +
60226 +extern int gr_acl_tpe_check(void);
60227 +
60228 +int
60229 +gr_tpe_allow(const struct file *file)
60230 +{
60231 +#ifdef CONFIG_GRKERNSEC
60232 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
60233 + const struct cred *cred = current_cred();
60234 + char *msg = NULL;
60235 + char *msg2 = NULL;
60236 +
60237 + // never restrict root
60238 + if (!cred->uid)
60239 + return 1;
60240 +
60241 + if (grsec_enable_tpe) {
60242 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60243 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
60244 + msg = "not being in trusted group";
60245 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
60246 + msg = "being in untrusted group";
60247 +#else
60248 + if (in_group_p(grsec_tpe_gid))
60249 + msg = "being in untrusted group";
60250 +#endif
60251 + }
60252 + if (!msg && gr_acl_tpe_check())
60253 + msg = "being in untrusted role";
60254 +
60255 + // not in any affected group/role
60256 + if (!msg)
60257 + goto next_check;
60258 +
60259 + if (inode->i_uid)
60260 + msg2 = "file in non-root-owned directory";
60261 + else if (inode->i_mode & S_IWOTH)
60262 + msg2 = "file in world-writable directory";
60263 + else if (inode->i_mode & S_IWGRP)
60264 + msg2 = "file in group-writable directory";
60265 +
60266 + if (msg && msg2) {
60267 + char fullmsg[70] = {0};
60268 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
60269 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
60270 + return 0;
60271 + }
60272 + msg = NULL;
60273 +next_check:
60274 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
60275 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
60276 + return 1;
60277 +
60278 + if (inode->i_uid && (inode->i_uid != cred->uid))
60279 + msg = "directory not owned by user";
60280 + else if (inode->i_mode & S_IWOTH)
60281 + msg = "file in world-writable directory";
60282 + else if (inode->i_mode & S_IWGRP)
60283 + msg = "file in group-writable directory";
60284 +
60285 + if (msg) {
60286 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
60287 + return 0;
60288 + }
60289 +#endif
60290 +#endif
60291 + return 1;
60292 +}
60293 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
60294 new file mode 100644
60295 index 0000000..9f7b1ac
60296 --- /dev/null
60297 +++ b/grsecurity/grsum.c
60298 @@ -0,0 +1,61 @@
60299 +#include <linux/err.h>
60300 +#include <linux/kernel.h>
60301 +#include <linux/sched.h>
60302 +#include <linux/mm.h>
60303 +#include <linux/scatterlist.h>
60304 +#include <linux/crypto.h>
60305 +#include <linux/gracl.h>
60306 +
60307 +
60308 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
60309 +#error "crypto and sha256 must be built into the kernel"
60310 +#endif
60311 +
60312 +int
60313 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
60314 +{
60315 + char *p;
60316 + struct crypto_hash *tfm;
60317 + struct hash_desc desc;
60318 + struct scatterlist sg;
60319 + unsigned char temp_sum[GR_SHA_LEN];
60320 + volatile int retval = 0;
60321 + volatile int dummy = 0;
60322 + unsigned int i;
60323 +
60324 + sg_init_table(&sg, 1);
60325 +
60326 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
60327 + if (IS_ERR(tfm)) {
60328 + /* should never happen, since sha256 should be built in */
60329 + return 1;
60330 + }
60331 +
60332 + desc.tfm = tfm;
60333 + desc.flags = 0;
60334 +
60335 + crypto_hash_init(&desc);
60336 +
60337 + p = salt;
60338 + sg_set_buf(&sg, p, GR_SALT_LEN);
60339 + crypto_hash_update(&desc, &sg, sg.length);
60340 +
60341 + p = entry->pw;
60342 + sg_set_buf(&sg, p, strlen(p));
60343 +
60344 + crypto_hash_update(&desc, &sg, sg.length);
60345 +
60346 + crypto_hash_final(&desc, temp_sum);
60347 +
60348 + memset(entry->pw, 0, GR_PW_LEN);
60349 +
60350 + for (i = 0; i < GR_SHA_LEN; i++)
60351 + if (sum[i] != temp_sum[i])
60352 + retval = 1;
60353 + else
60354 + dummy = 1; // waste a cycle
60355 +
60356 + crypto_free_hash(tfm);
60357 +
60358 + return retval;
60359 +}
60360 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
60361 index 9e6e1c6..d47b906 100644
60362 --- a/include/acpi/acpi_bus.h
60363 +++ b/include/acpi/acpi_bus.h
60364 @@ -138,7 +138,7 @@ struct acpi_device_ops {
60365 acpi_op_bind bind;
60366 acpi_op_unbind unbind;
60367 acpi_op_notify notify;
60368 -};
60369 +} __no_const;
60370
60371 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
60372
60373 diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
60374 index 77ff547..181834f 100644
60375 --- a/include/asm-generic/4level-fixup.h
60376 +++ b/include/asm-generic/4level-fixup.h
60377 @@ -13,8 +13,10 @@
60378 #define pmd_alloc(mm, pud, address) \
60379 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
60380 NULL: pmd_offset(pud, address))
60381 +#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
60382
60383 #define pud_alloc(mm, pgd, address) (pgd)
60384 +#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
60385 #define pud_offset(pgd, start) (pgd)
60386 #define pud_none(pud) 0
60387 #define pud_bad(pud) 0
60388 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
60389 index b7babf0..3ba8aee 100644
60390 --- a/include/asm-generic/atomic-long.h
60391 +++ b/include/asm-generic/atomic-long.h
60392 @@ -22,6 +22,12 @@
60393
60394 typedef atomic64_t atomic_long_t;
60395
60396 +#ifdef CONFIG_PAX_REFCOUNT
60397 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
60398 +#else
60399 +typedef atomic64_t atomic_long_unchecked_t;
60400 +#endif
60401 +
60402 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
60403
60404 static inline long atomic_long_read(atomic_long_t *l)
60405 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60406 return (long)atomic64_read(v);
60407 }
60408
60409 +#ifdef CONFIG_PAX_REFCOUNT
60410 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60411 +{
60412 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60413 +
60414 + return (long)atomic64_read_unchecked(v);
60415 +}
60416 +#endif
60417 +
60418 static inline void atomic_long_set(atomic_long_t *l, long i)
60419 {
60420 atomic64_t *v = (atomic64_t *)l;
60421 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60422 atomic64_set(v, i);
60423 }
60424
60425 +#ifdef CONFIG_PAX_REFCOUNT
60426 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60427 +{
60428 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60429 +
60430 + atomic64_set_unchecked(v, i);
60431 +}
60432 +#endif
60433 +
60434 static inline void atomic_long_inc(atomic_long_t *l)
60435 {
60436 atomic64_t *v = (atomic64_t *)l;
60437 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60438 atomic64_inc(v);
60439 }
60440
60441 +#ifdef CONFIG_PAX_REFCOUNT
60442 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60443 +{
60444 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60445 +
60446 + atomic64_inc_unchecked(v);
60447 +}
60448 +#endif
60449 +
60450 static inline void atomic_long_dec(atomic_long_t *l)
60451 {
60452 atomic64_t *v = (atomic64_t *)l;
60453 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60454 atomic64_dec(v);
60455 }
60456
60457 +#ifdef CONFIG_PAX_REFCOUNT
60458 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60459 +{
60460 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60461 +
60462 + atomic64_dec_unchecked(v);
60463 +}
60464 +#endif
60465 +
60466 static inline void atomic_long_add(long i, atomic_long_t *l)
60467 {
60468 atomic64_t *v = (atomic64_t *)l;
60469 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60470 atomic64_add(i, v);
60471 }
60472
60473 +#ifdef CONFIG_PAX_REFCOUNT
60474 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60475 +{
60476 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60477 +
60478 + atomic64_add_unchecked(i, v);
60479 +}
60480 +#endif
60481 +
60482 static inline void atomic_long_sub(long i, atomic_long_t *l)
60483 {
60484 atomic64_t *v = (atomic64_t *)l;
60485 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60486 atomic64_sub(i, v);
60487 }
60488
60489 +#ifdef CONFIG_PAX_REFCOUNT
60490 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60491 +{
60492 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60493 +
60494 + atomic64_sub_unchecked(i, v);
60495 +}
60496 +#endif
60497 +
60498 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60499 {
60500 atomic64_t *v = (atomic64_t *)l;
60501 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60502 return (long)atomic64_inc_return(v);
60503 }
60504
60505 +#ifdef CONFIG_PAX_REFCOUNT
60506 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60507 +{
60508 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60509 +
60510 + return (long)atomic64_inc_return_unchecked(v);
60511 +}
60512 +#endif
60513 +
60514 static inline long atomic_long_dec_return(atomic_long_t *l)
60515 {
60516 atomic64_t *v = (atomic64_t *)l;
60517 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60518
60519 typedef atomic_t atomic_long_t;
60520
60521 +#ifdef CONFIG_PAX_REFCOUNT
60522 +typedef atomic_unchecked_t atomic_long_unchecked_t;
60523 +#else
60524 +typedef atomic_t atomic_long_unchecked_t;
60525 +#endif
60526 +
60527 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
60528 static inline long atomic_long_read(atomic_long_t *l)
60529 {
60530 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60531 return (long)atomic_read(v);
60532 }
60533
60534 +#ifdef CONFIG_PAX_REFCOUNT
60535 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60536 +{
60537 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60538 +
60539 + return (long)atomic_read_unchecked(v);
60540 +}
60541 +#endif
60542 +
60543 static inline void atomic_long_set(atomic_long_t *l, long i)
60544 {
60545 atomic_t *v = (atomic_t *)l;
60546 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60547 atomic_set(v, i);
60548 }
60549
60550 +#ifdef CONFIG_PAX_REFCOUNT
60551 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60552 +{
60553 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60554 +
60555 + atomic_set_unchecked(v, i);
60556 +}
60557 +#endif
60558 +
60559 static inline void atomic_long_inc(atomic_long_t *l)
60560 {
60561 atomic_t *v = (atomic_t *)l;
60562 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60563 atomic_inc(v);
60564 }
60565
60566 +#ifdef CONFIG_PAX_REFCOUNT
60567 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60568 +{
60569 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60570 +
60571 + atomic_inc_unchecked(v);
60572 +}
60573 +#endif
60574 +
60575 static inline void atomic_long_dec(atomic_long_t *l)
60576 {
60577 atomic_t *v = (atomic_t *)l;
60578 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60579 atomic_dec(v);
60580 }
60581
60582 +#ifdef CONFIG_PAX_REFCOUNT
60583 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60584 +{
60585 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60586 +
60587 + atomic_dec_unchecked(v);
60588 +}
60589 +#endif
60590 +
60591 static inline void atomic_long_add(long i, atomic_long_t *l)
60592 {
60593 atomic_t *v = (atomic_t *)l;
60594 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60595 atomic_add(i, v);
60596 }
60597
60598 +#ifdef CONFIG_PAX_REFCOUNT
60599 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60600 +{
60601 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60602 +
60603 + atomic_add_unchecked(i, v);
60604 +}
60605 +#endif
60606 +
60607 static inline void atomic_long_sub(long i, atomic_long_t *l)
60608 {
60609 atomic_t *v = (atomic_t *)l;
60610 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60611 atomic_sub(i, v);
60612 }
60613
60614 +#ifdef CONFIG_PAX_REFCOUNT
60615 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60616 +{
60617 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60618 +
60619 + atomic_sub_unchecked(i, v);
60620 +}
60621 +#endif
60622 +
60623 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60624 {
60625 atomic_t *v = (atomic_t *)l;
60626 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60627 return (long)atomic_inc_return(v);
60628 }
60629
60630 +#ifdef CONFIG_PAX_REFCOUNT
60631 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60632 +{
60633 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60634 +
60635 + return (long)atomic_inc_return_unchecked(v);
60636 +}
60637 +#endif
60638 +
60639 static inline long atomic_long_dec_return(atomic_long_t *l)
60640 {
60641 atomic_t *v = (atomic_t *)l;
60642 @@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60643
60644 #endif /* BITS_PER_LONG == 64 */
60645
60646 +#ifdef CONFIG_PAX_REFCOUNT
60647 +static inline void pax_refcount_needs_these_functions(void)
60648 +{
60649 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
60650 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
60651 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
60652 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
60653 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
60654 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
60655 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
60656 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
60657 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
60658 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
60659 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
60660 +#ifdef CONFIG_X86
60661 + atomic_clear_mask_unchecked(0, NULL);
60662 + atomic_set_mask_unchecked(0, NULL);
60663 +#endif
60664 +
60665 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
60666 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
60667 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
60668 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
60669 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
60670 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
60671 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
60672 +}
60673 +#else
60674 +#define atomic_read_unchecked(v) atomic_read(v)
60675 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
60676 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
60677 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
60678 +#define atomic_inc_unchecked(v) atomic_inc(v)
60679 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
60680 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
60681 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
60682 +#define atomic_dec_unchecked(v) atomic_dec(v)
60683 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
60684 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
60685 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
60686 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
60687 +
60688 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
60689 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
60690 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
60691 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
60692 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
60693 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
60694 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
60695 +#endif
60696 +
60697 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
60698 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
60699 index 1ced641..c896ee8 100644
60700 --- a/include/asm-generic/atomic.h
60701 +++ b/include/asm-generic/atomic.h
60702 @@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
60703 * Atomically clears the bits set in @mask from @v
60704 */
60705 #ifndef atomic_clear_mask
60706 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
60707 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
60708 {
60709 unsigned long flags;
60710
60711 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
60712 index b18ce4f..2ee2843 100644
60713 --- a/include/asm-generic/atomic64.h
60714 +++ b/include/asm-generic/atomic64.h
60715 @@ -16,6 +16,8 @@ typedef struct {
60716 long long counter;
60717 } atomic64_t;
60718
60719 +typedef atomic64_t atomic64_unchecked_t;
60720 +
60721 #define ATOMIC64_INIT(i) { (i) }
60722
60723 extern long long atomic64_read(const atomic64_t *v);
60724 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
60725 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
60726 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
60727
60728 +#define atomic64_read_unchecked(v) atomic64_read(v)
60729 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
60730 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
60731 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
60732 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
60733 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
60734 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
60735 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
60736 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
60737 +
60738 #endif /* _ASM_GENERIC_ATOMIC64_H */
60739 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
60740 index 1bfcfe5..e04c5c9 100644
60741 --- a/include/asm-generic/cache.h
60742 +++ b/include/asm-generic/cache.h
60743 @@ -6,7 +6,7 @@
60744 * cache lines need to provide their own cache.h.
60745 */
60746
60747 -#define L1_CACHE_SHIFT 5
60748 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
60749 +#define L1_CACHE_SHIFT 5UL
60750 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
60751
60752 #endif /* __ASM_GENERIC_CACHE_H */
60753 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
60754 index 0d68a1e..b74a761 100644
60755 --- a/include/asm-generic/emergency-restart.h
60756 +++ b/include/asm-generic/emergency-restart.h
60757 @@ -1,7 +1,7 @@
60758 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
60759 #define _ASM_GENERIC_EMERGENCY_RESTART_H
60760
60761 -static inline void machine_emergency_restart(void)
60762 +static inline __noreturn void machine_emergency_restart(void)
60763 {
60764 machine_restart(NULL);
60765 }
60766 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
60767 index 0232ccb..13d9165 100644
60768 --- a/include/asm-generic/kmap_types.h
60769 +++ b/include/asm-generic/kmap_types.h
60770 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
60771 KMAP_D(17) KM_NMI,
60772 KMAP_D(18) KM_NMI_PTE,
60773 KMAP_D(19) KM_KDB,
60774 +KMAP_D(20) KM_CLEARPAGE,
60775 /*
60776 * Remember to update debug_kmap_atomic() when adding new kmap types!
60777 */
60778 -KMAP_D(20) KM_TYPE_NR
60779 +KMAP_D(21) KM_TYPE_NR
60780 };
60781
60782 #undef KMAP_D
60783 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
60784 index 9ceb03b..2efbcbd 100644
60785 --- a/include/asm-generic/local.h
60786 +++ b/include/asm-generic/local.h
60787 @@ -39,6 +39,7 @@ typedef struct
60788 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
60789 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
60790 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
60791 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
60792
60793 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
60794 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
60795 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
60796 index 725612b..9cc513a 100644
60797 --- a/include/asm-generic/pgtable-nopmd.h
60798 +++ b/include/asm-generic/pgtable-nopmd.h
60799 @@ -1,14 +1,19 @@
60800 #ifndef _PGTABLE_NOPMD_H
60801 #define _PGTABLE_NOPMD_H
60802
60803 -#ifndef __ASSEMBLY__
60804 -
60805 #include <asm-generic/pgtable-nopud.h>
60806
60807 -struct mm_struct;
60808 -
60809 #define __PAGETABLE_PMD_FOLDED
60810
60811 +#define PMD_SHIFT PUD_SHIFT
60812 +#define PTRS_PER_PMD 1
60813 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60814 +#define PMD_MASK (~(PMD_SIZE-1))
60815 +
60816 +#ifndef __ASSEMBLY__
60817 +
60818 +struct mm_struct;
60819 +
60820 /*
60821 * Having the pmd type consist of a pud gets the size right, and allows
60822 * us to conceptually access the pud entry that this pmd is folded into
60823 @@ -16,11 +21,6 @@ struct mm_struct;
60824 */
60825 typedef struct { pud_t pud; } pmd_t;
60826
60827 -#define PMD_SHIFT PUD_SHIFT
60828 -#define PTRS_PER_PMD 1
60829 -#define PMD_SIZE (1UL << PMD_SHIFT)
60830 -#define PMD_MASK (~(PMD_SIZE-1))
60831 -
60832 /*
60833 * The "pud_xxx()" functions here are trivial for a folded two-level
60834 * setup: the pmd is never bad, and a pmd always exists (as it's folded
60835 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
60836 index 810431d..0ec4804f 100644
60837 --- a/include/asm-generic/pgtable-nopud.h
60838 +++ b/include/asm-generic/pgtable-nopud.h
60839 @@ -1,10 +1,15 @@
60840 #ifndef _PGTABLE_NOPUD_H
60841 #define _PGTABLE_NOPUD_H
60842
60843 -#ifndef __ASSEMBLY__
60844 -
60845 #define __PAGETABLE_PUD_FOLDED
60846
60847 +#define PUD_SHIFT PGDIR_SHIFT
60848 +#define PTRS_PER_PUD 1
60849 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
60850 +#define PUD_MASK (~(PUD_SIZE-1))
60851 +
60852 +#ifndef __ASSEMBLY__
60853 +
60854 /*
60855 * Having the pud type consist of a pgd gets the size right, and allows
60856 * us to conceptually access the pgd entry that this pud is folded into
60857 @@ -12,11 +17,6 @@
60858 */
60859 typedef struct { pgd_t pgd; } pud_t;
60860
60861 -#define PUD_SHIFT PGDIR_SHIFT
60862 -#define PTRS_PER_PUD 1
60863 -#define PUD_SIZE (1UL << PUD_SHIFT)
60864 -#define PUD_MASK (~(PUD_SIZE-1))
60865 -
60866 /*
60867 * The "pgd_xxx()" functions here are trivial for a folded two-level
60868 * setup: the pud is never bad, and a pud always exists (as it's folded
60869 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
60870 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
60871
60872 #define pgd_populate(mm, pgd, pud) do { } while (0)
60873 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
60874 /*
60875 * (puds are folded into pgds so this doesn't get actually called,
60876 * but the define is needed for a generic inline function.)
60877 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
60878 index ff4947b..f48183f 100644
60879 --- a/include/asm-generic/pgtable.h
60880 +++ b/include/asm-generic/pgtable.h
60881 @@ -530,6 +530,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
60882 #endif
60883 }
60884
60885 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60886 +static inline unsigned long pax_open_kernel(void) { return 0; }
60887 +#endif
60888 +
60889 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60890 +static inline unsigned long pax_close_kernel(void) { return 0; }
60891 +#endif
60892 +
60893 #endif /* CONFIG_MMU */
60894
60895 #endif /* !__ASSEMBLY__ */
60896 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
60897 index 4e2e1cc..12c266b 100644
60898 --- a/include/asm-generic/vmlinux.lds.h
60899 +++ b/include/asm-generic/vmlinux.lds.h
60900 @@ -218,6 +218,7 @@
60901 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60902 VMLINUX_SYMBOL(__start_rodata) = .; \
60903 *(.rodata) *(.rodata.*) \
60904 + *(.data..read_only) \
60905 *(__vermagic) /* Kernel version magic */ \
60906 . = ALIGN(8); \
60907 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
60908 @@ -716,17 +717,18 @@
60909 * section in the linker script will go there too. @phdr should have
60910 * a leading colon.
60911 *
60912 - * Note that this macros defines __per_cpu_load as an absolute symbol.
60913 + * Note that this macros defines per_cpu_load as an absolute symbol.
60914 * If there is no need to put the percpu section at a predetermined
60915 * address, use PERCPU_SECTION.
60916 */
60917 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
60918 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
60919 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
60920 + per_cpu_load = .; \
60921 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
60922 - LOAD_OFFSET) { \
60923 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
60924 PERCPU_INPUT(cacheline) \
60925 } phdr \
60926 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60927 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
60928
60929 /**
60930 * PERCPU_SECTION - define output section for percpu area, simple version
60931 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60932 index 31ad880..4e79884 100644
60933 --- a/include/drm/drmP.h
60934 +++ b/include/drm/drmP.h
60935 @@ -72,6 +72,7 @@
60936 #include <linux/workqueue.h>
60937 #include <linux/poll.h>
60938 #include <asm/pgalloc.h>
60939 +#include <asm/local.h>
60940 #include "drm.h"
60941
60942 #include <linux/idr.h>
60943 @@ -1074,7 +1075,7 @@ struct drm_device {
60944
60945 /** \name Usage Counters */
60946 /*@{ */
60947 - int open_count; /**< Outstanding files open */
60948 + local_t open_count; /**< Outstanding files open */
60949 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60950 atomic_t vma_count; /**< Outstanding vma areas open */
60951 int buf_use; /**< Buffers in use -- cannot alloc */
60952 @@ -1085,7 +1086,7 @@ struct drm_device {
60953 /*@{ */
60954 unsigned long counters;
60955 enum drm_stat_type types[15];
60956 - atomic_t counts[15];
60957 + atomic_unchecked_t counts[15];
60958 /*@} */
60959
60960 struct list_head filelist;
60961 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60962 index 7988e55..ec974c9 100644
60963 --- a/include/drm/drm_crtc_helper.h
60964 +++ b/include/drm/drm_crtc_helper.h
60965 @@ -81,7 +81,7 @@ struct drm_crtc_helper_funcs {
60966
60967 /* disable crtc when not in use - more explicit than dpms off */
60968 void (*disable)(struct drm_crtc *crtc);
60969 -};
60970 +} __no_const;
60971
60972 /**
60973 * drm_encoder_helper_funcs - helper operations for encoders
60974 @@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
60975 struct drm_connector *connector);
60976 /* disable encoder when not in use - more explicit than dpms off */
60977 void (*disable)(struct drm_encoder *encoder);
60978 -};
60979 +} __no_const;
60980
60981 /**
60982 * drm_connector_helper_funcs - helper operations for connectors
60983 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60984 index d6d1da4..fdd1ac5 100644
60985 --- a/include/drm/ttm/ttm_memory.h
60986 +++ b/include/drm/ttm/ttm_memory.h
60987 @@ -48,7 +48,7 @@
60988
60989 struct ttm_mem_shrink {
60990 int (*do_shrink) (struct ttm_mem_shrink *);
60991 -};
60992 +} __no_const;
60993
60994 /**
60995 * struct ttm_mem_global - Global memory accounting structure.
60996 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60997 index e86dfca..40cc55f 100644
60998 --- a/include/linux/a.out.h
60999 +++ b/include/linux/a.out.h
61000 @@ -39,6 +39,14 @@ enum machine_type {
61001 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
61002 };
61003
61004 +/* Constants for the N_FLAGS field */
61005 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61006 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
61007 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
61008 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
61009 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61010 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61011 +
61012 #if !defined (N_MAGIC)
61013 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
61014 #endif
61015 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
61016 index 06fd4bb..1caec0d 100644
61017 --- a/include/linux/atmdev.h
61018 +++ b/include/linux/atmdev.h
61019 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
61020 #endif
61021
61022 struct k_atm_aal_stats {
61023 -#define __HANDLE_ITEM(i) atomic_t i
61024 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
61025 __AAL_STAT_ITEMS
61026 #undef __HANDLE_ITEM
61027 };
61028 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
61029 index 366422b..1fa7f84 100644
61030 --- a/include/linux/binfmts.h
61031 +++ b/include/linux/binfmts.h
61032 @@ -89,6 +89,7 @@ struct linux_binfmt {
61033 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
61034 int (*load_shlib)(struct file *);
61035 int (*core_dump)(struct coredump_params *cprm);
61036 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
61037 unsigned long min_coredump; /* minimal dump size */
61038 };
61039
61040 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
61041 index 07954b0..cb2ae71 100644
61042 --- a/include/linux/blkdev.h
61043 +++ b/include/linux/blkdev.h
61044 @@ -1393,7 +1393,7 @@ struct block_device_operations {
61045 /* this callback is with swap_lock and sometimes page table lock held */
61046 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
61047 struct module *owner;
61048 -};
61049 +} __do_const;
61050
61051 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
61052 unsigned long);
61053 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
61054 index 4d1a074..88f929a 100644
61055 --- a/include/linux/blktrace_api.h
61056 +++ b/include/linux/blktrace_api.h
61057 @@ -162,7 +162,7 @@ struct blk_trace {
61058 struct dentry *dir;
61059 struct dentry *dropped_file;
61060 struct dentry *msg_file;
61061 - atomic_t dropped;
61062 + atomic_unchecked_t dropped;
61063 };
61064
61065 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
61066 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
61067 index 83195fb..0b0f77d 100644
61068 --- a/include/linux/byteorder/little_endian.h
61069 +++ b/include/linux/byteorder/little_endian.h
61070 @@ -42,51 +42,51 @@
61071
61072 static inline __le64 __cpu_to_le64p(const __u64 *p)
61073 {
61074 - return (__force __le64)*p;
61075 + return (__force const __le64)*p;
61076 }
61077 static inline __u64 __le64_to_cpup(const __le64 *p)
61078 {
61079 - return (__force __u64)*p;
61080 + return (__force const __u64)*p;
61081 }
61082 static inline __le32 __cpu_to_le32p(const __u32 *p)
61083 {
61084 - return (__force __le32)*p;
61085 + return (__force const __le32)*p;
61086 }
61087 static inline __u32 __le32_to_cpup(const __le32 *p)
61088 {
61089 - return (__force __u32)*p;
61090 + return (__force const __u32)*p;
61091 }
61092 static inline __le16 __cpu_to_le16p(const __u16 *p)
61093 {
61094 - return (__force __le16)*p;
61095 + return (__force const __le16)*p;
61096 }
61097 static inline __u16 __le16_to_cpup(const __le16 *p)
61098 {
61099 - return (__force __u16)*p;
61100 + return (__force const __u16)*p;
61101 }
61102 static inline __be64 __cpu_to_be64p(const __u64 *p)
61103 {
61104 - return (__force __be64)__swab64p(p);
61105 + return (__force const __be64)__swab64p(p);
61106 }
61107 static inline __u64 __be64_to_cpup(const __be64 *p)
61108 {
61109 - return __swab64p((__u64 *)p);
61110 + return __swab64p((const __u64 *)p);
61111 }
61112 static inline __be32 __cpu_to_be32p(const __u32 *p)
61113 {
61114 - return (__force __be32)__swab32p(p);
61115 + return (__force const __be32)__swab32p(p);
61116 }
61117 static inline __u32 __be32_to_cpup(const __be32 *p)
61118 {
61119 - return __swab32p((__u32 *)p);
61120 + return __swab32p((const __u32 *)p);
61121 }
61122 static inline __be16 __cpu_to_be16p(const __u16 *p)
61123 {
61124 - return (__force __be16)__swab16p(p);
61125 + return (__force const __be16)__swab16p(p);
61126 }
61127 static inline __u16 __be16_to_cpup(const __be16 *p)
61128 {
61129 - return __swab16p((__u16 *)p);
61130 + return __swab16p((const __u16 *)p);
61131 }
61132 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
61133 #define __le64_to_cpus(x) do { (void)(x); } while (0)
61134 diff --git a/include/linux/cache.h b/include/linux/cache.h
61135 index 4c57065..4307975 100644
61136 --- a/include/linux/cache.h
61137 +++ b/include/linux/cache.h
61138 @@ -16,6 +16,10 @@
61139 #define __read_mostly
61140 #endif
61141
61142 +#ifndef __read_only
61143 +#define __read_only __read_mostly
61144 +#endif
61145 +
61146 #ifndef ____cacheline_aligned
61147 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
61148 #endif
61149 diff --git a/include/linux/capability.h b/include/linux/capability.h
61150 index d10b7ed..0288b79 100644
61151 --- a/include/linux/capability.h
61152 +++ b/include/linux/capability.h
61153 @@ -553,10 +553,15 @@ extern bool capable(int cap);
61154 extern bool ns_capable(struct user_namespace *ns, int cap);
61155 extern bool nsown_capable(int cap);
61156 extern bool inode_capable(const struct inode *inode, int cap);
61157 +extern bool capable_nolog(int cap);
61158 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
61159 +extern bool inode_capable_nolog(const struct inode *inode, int cap);
61160
61161 /* audit system wants to get cap info from files as well */
61162 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
61163
61164 +extern int is_privileged_binary(const struct dentry *dentry);
61165 +
61166 #endif /* __KERNEL__ */
61167
61168 #endif /* !_LINUX_CAPABILITY_H */
61169 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
61170 index 42e55de..1cd0e66 100644
61171 --- a/include/linux/cleancache.h
61172 +++ b/include/linux/cleancache.h
61173 @@ -31,7 +31,7 @@ struct cleancache_ops {
61174 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
61175 void (*invalidate_inode)(int, struct cleancache_filekey);
61176 void (*invalidate_fs)(int);
61177 -};
61178 +} __no_const;
61179
61180 extern struct cleancache_ops
61181 cleancache_register_ops(struct cleancache_ops *ops);
61182 diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
61183 index 4a0b483..f1f70ba 100644
61184 --- a/include/linux/clk-provider.h
61185 +++ b/include/linux/clk-provider.h
61186 @@ -110,6 +110,7 @@ struct clk_ops {
61187 unsigned long);
61188 void (*init)(struct clk_hw *hw);
61189 };
61190 +typedef struct clk_ops __no_const clk_ops_no_const;
61191
61192 /**
61193 * struct clk_init_data - holds init data that's common to all clocks and is
61194 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
61195 index 2f40791..938880e 100644
61196 --- a/include/linux/compiler-gcc4.h
61197 +++ b/include/linux/compiler-gcc4.h
61198 @@ -32,6 +32,21 @@
61199 #define __linktime_error(message) __attribute__((__error__(message)))
61200
61201 #if __GNUC_MINOR__ >= 5
61202 +
61203 +#ifdef CONSTIFY_PLUGIN
61204 +#define __no_const __attribute__((no_const))
61205 +#define __do_const __attribute__((do_const))
61206 +#endif
61207 +
61208 +#ifdef SIZE_OVERFLOW_PLUGIN
61209 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
61210 +#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
61211 +#endif
61212 +
61213 +#ifdef LATENT_ENTROPY_PLUGIN
61214 +#define __latent_entropy __attribute__((latent_entropy))
61215 +#endif
61216 +
61217 /*
61218 * Mark a position in code as unreachable. This can be used to
61219 * suppress control flow warnings after asm blocks that transfer
61220 @@ -47,6 +62,11 @@
61221 #define __noclone __attribute__((__noclone__))
61222
61223 #endif
61224 +
61225 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
61226 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
61227 +#define __bos0(ptr) __bos((ptr), 0)
61228 +#define __bos1(ptr) __bos((ptr), 1)
61229 #endif
61230
61231 #if __GNUC_MINOR__ > 0
61232 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
61233 index 923d093..3625de1 100644
61234 --- a/include/linux/compiler.h
61235 +++ b/include/linux/compiler.h
61236 @@ -5,31 +5,62 @@
61237
61238 #ifdef __CHECKER__
61239 # define __user __attribute__((noderef, address_space(1)))
61240 +# define __force_user __force __user
61241 # define __kernel __attribute__((address_space(0)))
61242 +# define __force_kernel __force __kernel
61243 # define __safe __attribute__((safe))
61244 # define __force __attribute__((force))
61245 # define __nocast __attribute__((nocast))
61246 # define __iomem __attribute__((noderef, address_space(2)))
61247 +# define __force_iomem __force __iomem
61248 # define __acquires(x) __attribute__((context(x,0,1)))
61249 # define __releases(x) __attribute__((context(x,1,0)))
61250 # define __acquire(x) __context__(x,1)
61251 # define __release(x) __context__(x,-1)
61252 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
61253 # define __percpu __attribute__((noderef, address_space(3)))
61254 +# define __force_percpu __force __percpu
61255 #ifdef CONFIG_SPARSE_RCU_POINTER
61256 # define __rcu __attribute__((noderef, address_space(4)))
61257 +# define __force_rcu __force __rcu
61258 #else
61259 # define __rcu
61260 +# define __force_rcu
61261 #endif
61262 extern void __chk_user_ptr(const volatile void __user *);
61263 extern void __chk_io_ptr(const volatile void __iomem *);
61264 +#elif defined(CHECKER_PLUGIN)
61265 +//# define __user
61266 +//# define __force_user
61267 +//# define __kernel
61268 +//# define __force_kernel
61269 +# define __safe
61270 +# define __force
61271 +# define __nocast
61272 +# define __iomem
61273 +# define __force_iomem
61274 +# define __chk_user_ptr(x) (void)0
61275 +# define __chk_io_ptr(x) (void)0
61276 +# define __builtin_warning(x, y...) (1)
61277 +# define __acquires(x)
61278 +# define __releases(x)
61279 +# define __acquire(x) (void)0
61280 +# define __release(x) (void)0
61281 +# define __cond_lock(x,c) (c)
61282 +# define __percpu
61283 +# define __force_percpu
61284 +# define __rcu
61285 +# define __force_rcu
61286 #else
61287 # define __user
61288 +# define __force_user
61289 # define __kernel
61290 +# define __force_kernel
61291 # define __safe
61292 # define __force
61293 # define __nocast
61294 # define __iomem
61295 +# define __force_iomem
61296 # define __chk_user_ptr(x) (void)0
61297 # define __chk_io_ptr(x) (void)0
61298 # define __builtin_warning(x, y...) (1)
61299 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
61300 # define __release(x) (void)0
61301 # define __cond_lock(x,c) (c)
61302 # define __percpu
61303 +# define __force_percpu
61304 # define __rcu
61305 +# define __force_rcu
61306 #endif
61307
61308 #ifdef __KERNEL__
61309 @@ -264,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61310 # define __attribute_const__ /* unimplemented */
61311 #endif
61312
61313 +#ifndef __no_const
61314 +# define __no_const
61315 +#endif
61316 +
61317 +#ifndef __do_const
61318 +# define __do_const
61319 +#endif
61320 +
61321 +#ifndef __size_overflow
61322 +# define __size_overflow(...)
61323 +#endif
61324 +
61325 +#ifndef __latent_entropy
61326 +# define __latent_entropy
61327 +#endif
61328 +
61329 +#ifndef __intentional_overflow
61330 +# define __intentional_overflow(...)
61331 +#endif
61332 +
61333 /*
61334 * Tell gcc if a function is cold. The compiler will assume any path
61335 * directly leading to the call is unlikely.
61336 @@ -273,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61337 #define __cold
61338 #endif
61339
61340 +#ifndef __alloc_size
61341 +#define __alloc_size(...)
61342 +#endif
61343 +
61344 +#ifndef __bos
61345 +#define __bos(ptr, arg)
61346 +#endif
61347 +
61348 +#ifndef __bos0
61349 +#define __bos0(ptr)
61350 +#endif
61351 +
61352 +#ifndef __bos1
61353 +#define __bos1(ptr)
61354 +#endif
61355 +
61356 /* Simple shorthand for a section definition */
61357 #ifndef __section
61358 # define __section(S) __attribute__ ((__section__(#S)))
61359 @@ -308,6 +377,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61360 * use is to mediate communication between process-level code and irq/NMI
61361 * handlers, all running on the same CPU.
61362 */
61363 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
61364 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
61365 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
61366
61367 #endif /* __LINUX_COMPILER_H */
61368 diff --git a/include/linux/cred.h b/include/linux/cred.h
61369 index ebbed2c..908cc2c 100644
61370 --- a/include/linux/cred.h
61371 +++ b/include/linux/cred.h
61372 @@ -208,6 +208,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
61373 static inline void validate_process_creds(void)
61374 {
61375 }
61376 +static inline void validate_task_creds(struct task_struct *task)
61377 +{
61378 +}
61379 #endif
61380
61381 /**
61382 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
61383 index b92eadf..b4ecdc1 100644
61384 --- a/include/linux/crypto.h
61385 +++ b/include/linux/crypto.h
61386 @@ -373,7 +373,7 @@ struct cipher_tfm {
61387 const u8 *key, unsigned int keylen);
61388 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61389 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61390 -};
61391 +} __no_const;
61392
61393 struct hash_tfm {
61394 int (*init)(struct hash_desc *desc);
61395 @@ -394,13 +394,13 @@ struct compress_tfm {
61396 int (*cot_decompress)(struct crypto_tfm *tfm,
61397 const u8 *src, unsigned int slen,
61398 u8 *dst, unsigned int *dlen);
61399 -};
61400 +} __no_const;
61401
61402 struct rng_tfm {
61403 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
61404 unsigned int dlen);
61405 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
61406 -};
61407 +} __no_const;
61408
61409 #define crt_ablkcipher crt_u.ablkcipher
61410 #define crt_aead crt_u.aead
61411 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
61412 index 094789f..525ab12 100644
61413 --- a/include/linux/dcache.h
61414 +++ b/include/linux/dcache.h
61415 @@ -206,6 +206,8 @@ struct dentry_operations {
61416 #define DCACHE_MANAGED_DENTRY \
61417 (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
61418
61419 +#define DCACHE_DENTRY_KILLED 0x100000
61420 +
61421 extern seqlock_t rename_lock;
61422
61423 static inline int dname_external(struct dentry *dentry)
61424 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
61425 index 7925bf0..d5143d2 100644
61426 --- a/include/linux/decompress/mm.h
61427 +++ b/include/linux/decompress/mm.h
61428 @@ -77,7 +77,7 @@ static void free(void *where)
61429 * warnings when not needed (indeed large_malloc / large_free are not
61430 * needed by inflate */
61431
61432 -#define malloc(a) kmalloc(a, GFP_KERNEL)
61433 +#define malloc(a) kmalloc((a), GFP_KERNEL)
61434 #define free(a) kfree(a)
61435
61436 #define large_malloc(a) vmalloc(a)
61437 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
61438 index dfc099e..e583e66 100644
61439 --- a/include/linux/dma-mapping.h
61440 +++ b/include/linux/dma-mapping.h
61441 @@ -51,7 +51,7 @@ struct dma_map_ops {
61442 u64 (*get_required_mask)(struct device *dev);
61443 #endif
61444 int is_phys;
61445 -};
61446 +} __do_const;
61447
61448 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
61449
61450 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
61451 index 56377df..4eb4990 100644
61452 --- a/include/linux/dmaengine.h
61453 +++ b/include/linux/dmaengine.h
61454 @@ -1007,9 +1007,9 @@ struct dma_pinned_list {
61455 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
61456 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
61457
61458 -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
61459 +dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
61460 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
61461 -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
61462 +dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
61463 struct dma_pinned_list *pinned_list, struct page *page,
61464 unsigned int offset, size_t len);
61465
61466 diff --git a/include/linux/efi.h b/include/linux/efi.h
61467 index ec45ccd..9923c32 100644
61468 --- a/include/linux/efi.h
61469 +++ b/include/linux/efi.h
61470 @@ -635,7 +635,7 @@ struct efivar_operations {
61471 efi_get_variable_t *get_variable;
61472 efi_get_next_variable_t *get_next_variable;
61473 efi_set_variable_t *set_variable;
61474 -};
61475 +} __no_const;
61476
61477 struct efivars {
61478 /*
61479 diff --git a/include/linux/elf.h b/include/linux/elf.h
61480 index 999b4f5..57753b4 100644
61481 --- a/include/linux/elf.h
61482 +++ b/include/linux/elf.h
61483 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
61484 #define PT_GNU_EH_FRAME 0x6474e550
61485
61486 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
61487 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
61488 +
61489 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
61490 +
61491 +/* Constants for the e_flags field */
61492 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61493 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
61494 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
61495 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
61496 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61497 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61498
61499 /*
61500 * Extended Numbering
61501 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
61502 #define DT_DEBUG 21
61503 #define DT_TEXTREL 22
61504 #define DT_JMPREL 23
61505 +#define DT_FLAGS 30
61506 + #define DF_TEXTREL 0x00000004
61507 #define DT_ENCODING 32
61508 #define OLD_DT_LOOS 0x60000000
61509 #define DT_LOOS 0x6000000d
61510 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
61511 #define PF_W 0x2
61512 #define PF_X 0x1
61513
61514 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
61515 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
61516 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
61517 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
61518 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
61519 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
61520 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
61521 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
61522 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
61523 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
61524 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
61525 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
61526 +
61527 typedef struct elf32_phdr{
61528 Elf32_Word p_type;
61529 Elf32_Off p_offset;
61530 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
61531 #define EI_OSABI 7
61532 #define EI_PAD 8
61533
61534 +#define EI_PAX 14
61535 +
61536 #define ELFMAG0 0x7f /* EI_MAG */
61537 #define ELFMAG1 'E'
61538 #define ELFMAG2 'L'
61539 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
61540 #define elf_note elf32_note
61541 #define elf_addr_t Elf32_Off
61542 #define Elf_Half Elf32_Half
61543 +#define elf_dyn Elf32_Dyn
61544
61545 #else
61546
61547 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
61548 #define elf_note elf64_note
61549 #define elf_addr_t Elf64_Off
61550 #define Elf_Half Elf64_Half
61551 +#define elf_dyn Elf64_Dyn
61552
61553 #endif
61554
61555 diff --git a/include/linux/filter.h b/include/linux/filter.h
61556 index 82b0135..917914d 100644
61557 --- a/include/linux/filter.h
61558 +++ b/include/linux/filter.h
61559 @@ -146,6 +146,7 @@ struct compat_sock_fprog {
61560
61561 struct sk_buff;
61562 struct sock;
61563 +struct bpf_jit_work;
61564
61565 struct sk_filter
61566 {
61567 @@ -153,6 +154,9 @@ struct sk_filter
61568 unsigned int len; /* Number of filter blocks */
61569 unsigned int (*bpf_func)(const struct sk_buff *skb,
61570 const struct sock_filter *filter);
61571 +#ifdef CONFIG_BPF_JIT
61572 + struct bpf_jit_work *work;
61573 +#endif
61574 struct rcu_head rcu;
61575 struct sock_filter insns[0];
61576 };
61577 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
61578 index 7edcf10..714d5e8 100644
61579 --- a/include/linux/firewire.h
61580 +++ b/include/linux/firewire.h
61581 @@ -430,7 +430,7 @@ struct fw_iso_context {
61582 union {
61583 fw_iso_callback_t sc;
61584 fw_iso_mc_callback_t mc;
61585 - } callback;
61586 + } __no_const callback;
61587 void *callback_data;
61588 };
61589
61590 diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
61591 index 0e4e2ee..4ff4312 100644
61592 --- a/include/linux/frontswap.h
61593 +++ b/include/linux/frontswap.h
61594 @@ -11,7 +11,7 @@ struct frontswap_ops {
61595 int (*load)(unsigned, pgoff_t, struct page *);
61596 void (*invalidate_page)(unsigned, pgoff_t);
61597 void (*invalidate_area)(unsigned);
61598 -};
61599 +} __no_const;
61600
61601 extern bool frontswap_enabled;
61602 extern struct frontswap_ops
61603 diff --git a/include/linux/fs.h b/include/linux/fs.h
61604 index 17fd887..8eebca0 100644
61605 --- a/include/linux/fs.h
61606 +++ b/include/linux/fs.h
61607 @@ -1663,7 +1663,8 @@ struct file_operations {
61608 int (*setlease)(struct file *, long, struct file_lock **);
61609 long (*fallocate)(struct file *file, int mode, loff_t offset,
61610 loff_t len);
61611 -};
61612 +} __do_const;
61613 +typedef struct file_operations __no_const file_operations_no_const;
61614
61615 struct inode_operations {
61616 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
61617 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
61618 index 003dc0f..3c4ea97 100644
61619 --- a/include/linux/fs_struct.h
61620 +++ b/include/linux/fs_struct.h
61621 @@ -6,7 +6,7 @@
61622 #include <linux/seqlock.h>
61623
61624 struct fs_struct {
61625 - int users;
61626 + atomic_t users;
61627 spinlock_t lock;
61628 seqcount_t seq;
61629 int umask;
61630 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
61631 index ce31408..b1ad003 100644
61632 --- a/include/linux/fscache-cache.h
61633 +++ b/include/linux/fscache-cache.h
61634 @@ -102,7 +102,7 @@ struct fscache_operation {
61635 fscache_operation_release_t release;
61636 };
61637
61638 -extern atomic_t fscache_op_debug_id;
61639 +extern atomic_unchecked_t fscache_op_debug_id;
61640 extern void fscache_op_work_func(struct work_struct *work);
61641
61642 extern void fscache_enqueue_operation(struct fscache_operation *);
61643 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
61644 {
61645 INIT_WORK(&op->work, fscache_op_work_func);
61646 atomic_set(&op->usage, 1);
61647 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
61648 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61649 op->processor = processor;
61650 op->release = release;
61651 INIT_LIST_HEAD(&op->pend_link);
61652 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
61653 index a6dfe69..569586df 100644
61654 --- a/include/linux/fsnotify.h
61655 +++ b/include/linux/fsnotify.h
61656 @@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
61657 */
61658 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
61659 {
61660 - return kstrdup(name, GFP_KERNEL);
61661 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
61662 }
61663
61664 /*
61665 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
61666 index 63d966d..cdcb717 100644
61667 --- a/include/linux/fsnotify_backend.h
61668 +++ b/include/linux/fsnotify_backend.h
61669 @@ -105,6 +105,7 @@ struct fsnotify_ops {
61670 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
61671 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
61672 };
61673 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
61674
61675 /*
61676 * A group is a "thing" that wants to receive notification about filesystem
61677 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
61678 index 176a939..1462211 100644
61679 --- a/include/linux/ftrace_event.h
61680 +++ b/include/linux/ftrace_event.h
61681 @@ -97,7 +97,7 @@ struct trace_event_functions {
61682 trace_print_func raw;
61683 trace_print_func hex;
61684 trace_print_func binary;
61685 -};
61686 +} __no_const;
61687
61688 struct trace_event {
61689 struct hlist_node node;
61690 @@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
61691 extern int trace_add_event_call(struct ftrace_event_call *call);
61692 extern void trace_remove_event_call(struct ftrace_event_call *call);
61693
61694 -#define is_signed_type(type) (((type)(-1)) < 0)
61695 +#define is_signed_type(type) (((type)(-1)) < (type)1)
61696
61697 int trace_set_clr_event(const char *system, const char *event, int set);
61698
61699 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
61700 index 017a7fb..33a8507 100644
61701 --- a/include/linux/genhd.h
61702 +++ b/include/linux/genhd.h
61703 @@ -185,7 +185,7 @@ struct gendisk {
61704 struct kobject *slave_dir;
61705
61706 struct timer_rand_state *random;
61707 - atomic_t sync_io; /* RAID */
61708 + atomic_unchecked_t sync_io; /* RAID */
61709 struct disk_events *ev;
61710 #ifdef CONFIG_BLK_DEV_INTEGRITY
61711 struct blk_integrity *integrity;
61712 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
61713 index 1e49be4..b8a9305 100644
61714 --- a/include/linux/gfp.h
61715 +++ b/include/linux/gfp.h
61716 @@ -38,6 +38,12 @@ struct vm_area_struct;
61717 #define ___GFP_OTHER_NODE 0x800000u
61718 #define ___GFP_WRITE 0x1000000u
61719
61720 +#ifdef CONFIG_PAX_USERCOPY_SLABS
61721 +#define ___GFP_USERCOPY 0x2000000u
61722 +#else
61723 +#define ___GFP_USERCOPY 0
61724 +#endif
61725 +
61726 /*
61727 * GFP bitmasks..
61728 *
61729 @@ -87,6 +93,7 @@ struct vm_area_struct;
61730 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
61731 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
61732 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
61733 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
61734
61735 /*
61736 * This may seem redundant, but it's a way of annotating false positives vs.
61737 @@ -94,7 +101,7 @@ struct vm_area_struct;
61738 */
61739 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
61740
61741 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
61742 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
61743 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
61744
61745 /* This equals 0, but use constants in case they ever change */
61746 @@ -148,6 +155,8 @@ struct vm_area_struct;
61747 /* 4GB DMA on some platforms */
61748 #define GFP_DMA32 __GFP_DMA32
61749
61750 +#define GFP_USERCOPY __GFP_USERCOPY
61751 +
61752 /* Convert GFP flags to their corresponding migrate type */
61753 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
61754 {
61755 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
61756 new file mode 100644
61757 index 0000000..c938b1f
61758 --- /dev/null
61759 +++ b/include/linux/gracl.h
61760 @@ -0,0 +1,319 @@
61761 +#ifndef GR_ACL_H
61762 +#define GR_ACL_H
61763 +
61764 +#include <linux/grdefs.h>
61765 +#include <linux/resource.h>
61766 +#include <linux/capability.h>
61767 +#include <linux/dcache.h>
61768 +#include <asm/resource.h>
61769 +
61770 +/* Major status information */
61771 +
61772 +#define GR_VERSION "grsecurity 2.9.1"
61773 +#define GRSECURITY_VERSION 0x2901
61774 +
61775 +enum {
61776 + GR_SHUTDOWN = 0,
61777 + GR_ENABLE = 1,
61778 + GR_SPROLE = 2,
61779 + GR_RELOAD = 3,
61780 + GR_SEGVMOD = 4,
61781 + GR_STATUS = 5,
61782 + GR_UNSPROLE = 6,
61783 + GR_PASSSET = 7,
61784 + GR_SPROLEPAM = 8,
61785 +};
61786 +
61787 +/* Password setup definitions
61788 + * kernel/grhash.c */
61789 +enum {
61790 + GR_PW_LEN = 128,
61791 + GR_SALT_LEN = 16,
61792 + GR_SHA_LEN = 32,
61793 +};
61794 +
61795 +enum {
61796 + GR_SPROLE_LEN = 64,
61797 +};
61798 +
61799 +enum {
61800 + GR_NO_GLOB = 0,
61801 + GR_REG_GLOB,
61802 + GR_CREATE_GLOB
61803 +};
61804 +
61805 +#define GR_NLIMITS 32
61806 +
61807 +/* Begin Data Structures */
61808 +
61809 +struct sprole_pw {
61810 + unsigned char *rolename;
61811 + unsigned char salt[GR_SALT_LEN];
61812 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
61813 +};
61814 +
61815 +struct name_entry {
61816 + __u32 key;
61817 + ino_t inode;
61818 + dev_t device;
61819 + char *name;
61820 + __u16 len;
61821 + __u8 deleted;
61822 + struct name_entry *prev;
61823 + struct name_entry *next;
61824 +};
61825 +
61826 +struct inodev_entry {
61827 + struct name_entry *nentry;
61828 + struct inodev_entry *prev;
61829 + struct inodev_entry *next;
61830 +};
61831 +
61832 +struct acl_role_db {
61833 + struct acl_role_label **r_hash;
61834 + __u32 r_size;
61835 +};
61836 +
61837 +struct inodev_db {
61838 + struct inodev_entry **i_hash;
61839 + __u32 i_size;
61840 +};
61841 +
61842 +struct name_db {
61843 + struct name_entry **n_hash;
61844 + __u32 n_size;
61845 +};
61846 +
61847 +struct crash_uid {
61848 + uid_t uid;
61849 + unsigned long expires;
61850 +};
61851 +
61852 +struct gr_hash_struct {
61853 + void **table;
61854 + void **nametable;
61855 + void *first;
61856 + __u32 table_size;
61857 + __u32 used_size;
61858 + int type;
61859 +};
61860 +
61861 +/* Userspace Grsecurity ACL data structures */
61862 +
61863 +struct acl_subject_label {
61864 + char *filename;
61865 + ino_t inode;
61866 + dev_t device;
61867 + __u32 mode;
61868 + kernel_cap_t cap_mask;
61869 + kernel_cap_t cap_lower;
61870 + kernel_cap_t cap_invert_audit;
61871 +
61872 + struct rlimit res[GR_NLIMITS];
61873 + __u32 resmask;
61874 +
61875 + __u8 user_trans_type;
61876 + __u8 group_trans_type;
61877 + uid_t *user_transitions;
61878 + gid_t *group_transitions;
61879 + __u16 user_trans_num;
61880 + __u16 group_trans_num;
61881 +
61882 + __u32 sock_families[2];
61883 + __u32 ip_proto[8];
61884 + __u32 ip_type;
61885 + struct acl_ip_label **ips;
61886 + __u32 ip_num;
61887 + __u32 inaddr_any_override;
61888 +
61889 + __u32 crashes;
61890 + unsigned long expires;
61891 +
61892 + struct acl_subject_label *parent_subject;
61893 + struct gr_hash_struct *hash;
61894 + struct acl_subject_label *prev;
61895 + struct acl_subject_label *next;
61896 +
61897 + struct acl_object_label **obj_hash;
61898 + __u32 obj_hash_size;
61899 + __u16 pax_flags;
61900 +};
61901 +
61902 +struct role_allowed_ip {
61903 + __u32 addr;
61904 + __u32 netmask;
61905 +
61906 + struct role_allowed_ip *prev;
61907 + struct role_allowed_ip *next;
61908 +};
61909 +
61910 +struct role_transition {
61911 + char *rolename;
61912 +
61913 + struct role_transition *prev;
61914 + struct role_transition *next;
61915 +};
61916 +
61917 +struct acl_role_label {
61918 + char *rolename;
61919 + uid_t uidgid;
61920 + __u16 roletype;
61921 +
61922 + __u16 auth_attempts;
61923 + unsigned long expires;
61924 +
61925 + struct acl_subject_label *root_label;
61926 + struct gr_hash_struct *hash;
61927 +
61928 + struct acl_role_label *prev;
61929 + struct acl_role_label *next;
61930 +
61931 + struct role_transition *transitions;
61932 + struct role_allowed_ip *allowed_ips;
61933 + uid_t *domain_children;
61934 + __u16 domain_child_num;
61935 +
61936 + umode_t umask;
61937 +
61938 + struct acl_subject_label **subj_hash;
61939 + __u32 subj_hash_size;
61940 +};
61941 +
61942 +struct user_acl_role_db {
61943 + struct acl_role_label **r_table;
61944 + __u32 num_pointers; /* Number of allocations to track */
61945 + __u32 num_roles; /* Number of roles */
61946 + __u32 num_domain_children; /* Number of domain children */
61947 + __u32 num_subjects; /* Number of subjects */
61948 + __u32 num_objects; /* Number of objects */
61949 +};
61950 +
61951 +struct acl_object_label {
61952 + char *filename;
61953 + ino_t inode;
61954 + dev_t device;
61955 + __u32 mode;
61956 +
61957 + struct acl_subject_label *nested;
61958 + struct acl_object_label *globbed;
61959 +
61960 + /* next two structures not used */
61961 +
61962 + struct acl_object_label *prev;
61963 + struct acl_object_label *next;
61964 +};
61965 +
61966 +struct acl_ip_label {
61967 + char *iface;
61968 + __u32 addr;
61969 + __u32 netmask;
61970 + __u16 low, high;
61971 + __u8 mode;
61972 + __u32 type;
61973 + __u32 proto[8];
61974 +
61975 + /* next two structures not used */
61976 +
61977 + struct acl_ip_label *prev;
61978 + struct acl_ip_label *next;
61979 +};
61980 +
61981 +struct gr_arg {
61982 + struct user_acl_role_db role_db;
61983 + unsigned char pw[GR_PW_LEN];
61984 + unsigned char salt[GR_SALT_LEN];
61985 + unsigned char sum[GR_SHA_LEN];
61986 + unsigned char sp_role[GR_SPROLE_LEN];
61987 + struct sprole_pw *sprole_pws;
61988 + dev_t segv_device;
61989 + ino_t segv_inode;
61990 + uid_t segv_uid;
61991 + __u16 num_sprole_pws;
61992 + __u16 mode;
61993 +};
61994 +
61995 +struct gr_arg_wrapper {
61996 + struct gr_arg *arg;
61997 + __u32 version;
61998 + __u32 size;
61999 +};
62000 +
62001 +struct subject_map {
62002 + struct acl_subject_label *user;
62003 + struct acl_subject_label *kernel;
62004 + struct subject_map *prev;
62005 + struct subject_map *next;
62006 +};
62007 +
62008 +struct acl_subj_map_db {
62009 + struct subject_map **s_hash;
62010 + __u32 s_size;
62011 +};
62012 +
62013 +/* End Data Structures Section */
62014 +
62015 +/* Hash functions generated by empirical testing by Brad Spengler
62016 + Makes good use of the low bits of the inode. Generally 0-1 times
62017 + in loop for successful match. 0-3 for unsuccessful match.
62018 + Shift/add algorithm with modulus of table size and an XOR*/
62019 +
62020 +static __inline__ unsigned int
62021 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
62022 +{
62023 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
62024 +}
62025 +
62026 + static __inline__ unsigned int
62027 +shash(const struct acl_subject_label *userp, const unsigned int sz)
62028 +{
62029 + return ((const unsigned long)userp % sz);
62030 +}
62031 +
62032 +static __inline__ unsigned int
62033 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
62034 +{
62035 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
62036 +}
62037 +
62038 +static __inline__ unsigned int
62039 +nhash(const char *name, const __u16 len, const unsigned int sz)
62040 +{
62041 + return full_name_hash((const unsigned char *)name, len) % sz;
62042 +}
62043 +
62044 +#define FOR_EACH_ROLE_START(role) \
62045 + role = role_list; \
62046 + while (role) {
62047 +
62048 +#define FOR_EACH_ROLE_END(role) \
62049 + role = role->prev; \
62050 + }
62051 +
62052 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
62053 + subj = NULL; \
62054 + iter = 0; \
62055 + while (iter < role->subj_hash_size) { \
62056 + if (subj == NULL) \
62057 + subj = role->subj_hash[iter]; \
62058 + if (subj == NULL) { \
62059 + iter++; \
62060 + continue; \
62061 + }
62062 +
62063 +#define FOR_EACH_SUBJECT_END(subj,iter) \
62064 + subj = subj->next; \
62065 + if (subj == NULL) \
62066 + iter++; \
62067 + }
62068 +
62069 +
62070 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
62071 + subj = role->hash->first; \
62072 + while (subj != NULL) {
62073 +
62074 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
62075 + subj = subj->next; \
62076 + }
62077 +
62078 +#endif
62079 +
62080 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
62081 new file mode 100644
62082 index 0000000..323ecf2
62083 --- /dev/null
62084 +++ b/include/linux/gralloc.h
62085 @@ -0,0 +1,9 @@
62086 +#ifndef __GRALLOC_H
62087 +#define __GRALLOC_H
62088 +
62089 +void acl_free_all(void);
62090 +int acl_alloc_stack_init(unsigned long size);
62091 +void *acl_alloc(unsigned long len);
62092 +void *acl_alloc_num(unsigned long num, unsigned long len);
62093 +
62094 +#endif
62095 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
62096 new file mode 100644
62097 index 0000000..b30e9bc
62098 --- /dev/null
62099 +++ b/include/linux/grdefs.h
62100 @@ -0,0 +1,140 @@
62101 +#ifndef GRDEFS_H
62102 +#define GRDEFS_H
62103 +
62104 +/* Begin grsecurity status declarations */
62105 +
62106 +enum {
62107 + GR_READY = 0x01,
62108 + GR_STATUS_INIT = 0x00 // disabled state
62109 +};
62110 +
62111 +/* Begin ACL declarations */
62112 +
62113 +/* Role flags */
62114 +
62115 +enum {
62116 + GR_ROLE_USER = 0x0001,
62117 + GR_ROLE_GROUP = 0x0002,
62118 + GR_ROLE_DEFAULT = 0x0004,
62119 + GR_ROLE_SPECIAL = 0x0008,
62120 + GR_ROLE_AUTH = 0x0010,
62121 + GR_ROLE_NOPW = 0x0020,
62122 + GR_ROLE_GOD = 0x0040,
62123 + GR_ROLE_LEARN = 0x0080,
62124 + GR_ROLE_TPE = 0x0100,
62125 + GR_ROLE_DOMAIN = 0x0200,
62126 + GR_ROLE_PAM = 0x0400,
62127 + GR_ROLE_PERSIST = 0x0800
62128 +};
62129 +
62130 +/* ACL Subject and Object mode flags */
62131 +enum {
62132 + GR_DELETED = 0x80000000
62133 +};
62134 +
62135 +/* ACL Object-only mode flags */
62136 +enum {
62137 + GR_READ = 0x00000001,
62138 + GR_APPEND = 0x00000002,
62139 + GR_WRITE = 0x00000004,
62140 + GR_EXEC = 0x00000008,
62141 + GR_FIND = 0x00000010,
62142 + GR_INHERIT = 0x00000020,
62143 + GR_SETID = 0x00000040,
62144 + GR_CREATE = 0x00000080,
62145 + GR_DELETE = 0x00000100,
62146 + GR_LINK = 0x00000200,
62147 + GR_AUDIT_READ = 0x00000400,
62148 + GR_AUDIT_APPEND = 0x00000800,
62149 + GR_AUDIT_WRITE = 0x00001000,
62150 + GR_AUDIT_EXEC = 0x00002000,
62151 + GR_AUDIT_FIND = 0x00004000,
62152 + GR_AUDIT_INHERIT= 0x00008000,
62153 + GR_AUDIT_SETID = 0x00010000,
62154 + GR_AUDIT_CREATE = 0x00020000,
62155 + GR_AUDIT_DELETE = 0x00040000,
62156 + GR_AUDIT_LINK = 0x00080000,
62157 + GR_PTRACERD = 0x00100000,
62158 + GR_NOPTRACE = 0x00200000,
62159 + GR_SUPPRESS = 0x00400000,
62160 + GR_NOLEARN = 0x00800000,
62161 + GR_INIT_TRANSFER= 0x01000000
62162 +};
62163 +
62164 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
62165 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
62166 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
62167 +
62168 +/* ACL subject-only mode flags */
62169 +enum {
62170 + GR_KILL = 0x00000001,
62171 + GR_VIEW = 0x00000002,
62172 + GR_PROTECTED = 0x00000004,
62173 + GR_LEARN = 0x00000008,
62174 + GR_OVERRIDE = 0x00000010,
62175 + /* just a placeholder, this mode is only used in userspace */
62176 + GR_DUMMY = 0x00000020,
62177 + GR_PROTSHM = 0x00000040,
62178 + GR_KILLPROC = 0x00000080,
62179 + GR_KILLIPPROC = 0x00000100,
62180 + /* just a placeholder, this mode is only used in userspace */
62181 + GR_NOTROJAN = 0x00000200,
62182 + GR_PROTPROCFD = 0x00000400,
62183 + GR_PROCACCT = 0x00000800,
62184 + GR_RELAXPTRACE = 0x00001000,
62185 + GR_NESTED = 0x00002000,
62186 + GR_INHERITLEARN = 0x00004000,
62187 + GR_PROCFIND = 0x00008000,
62188 + GR_POVERRIDE = 0x00010000,
62189 + GR_KERNELAUTH = 0x00020000,
62190 + GR_ATSECURE = 0x00040000,
62191 + GR_SHMEXEC = 0x00080000
62192 +};
62193 +
62194 +enum {
62195 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
62196 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
62197 + GR_PAX_ENABLE_MPROTECT = 0x0004,
62198 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
62199 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
62200 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
62201 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
62202 + GR_PAX_DISABLE_MPROTECT = 0x0400,
62203 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
62204 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
62205 +};
62206 +
62207 +enum {
62208 + GR_ID_USER = 0x01,
62209 + GR_ID_GROUP = 0x02,
62210 +};
62211 +
62212 +enum {
62213 + GR_ID_ALLOW = 0x01,
62214 + GR_ID_DENY = 0x02,
62215 +};
62216 +
62217 +#define GR_CRASH_RES 31
62218 +#define GR_UIDTABLE_MAX 500
62219 +
62220 +/* begin resource learning section */
62221 +enum {
62222 + GR_RLIM_CPU_BUMP = 60,
62223 + GR_RLIM_FSIZE_BUMP = 50000,
62224 + GR_RLIM_DATA_BUMP = 10000,
62225 + GR_RLIM_STACK_BUMP = 1000,
62226 + GR_RLIM_CORE_BUMP = 10000,
62227 + GR_RLIM_RSS_BUMP = 500000,
62228 + GR_RLIM_NPROC_BUMP = 1,
62229 + GR_RLIM_NOFILE_BUMP = 5,
62230 + GR_RLIM_MEMLOCK_BUMP = 50000,
62231 + GR_RLIM_AS_BUMP = 500000,
62232 + GR_RLIM_LOCKS_BUMP = 2,
62233 + GR_RLIM_SIGPENDING_BUMP = 5,
62234 + GR_RLIM_MSGQUEUE_BUMP = 10000,
62235 + GR_RLIM_NICE_BUMP = 1,
62236 + GR_RLIM_RTPRIO_BUMP = 1,
62237 + GR_RLIM_RTTIME_BUMP = 1000000
62238 +};
62239 +
62240 +#endif
62241 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
62242 new file mode 100644
62243 index 0000000..c9292f7
62244 --- /dev/null
62245 +++ b/include/linux/grinternal.h
62246 @@ -0,0 +1,223 @@
62247 +#ifndef __GRINTERNAL_H
62248 +#define __GRINTERNAL_H
62249 +
62250 +#ifdef CONFIG_GRKERNSEC
62251 +
62252 +#include <linux/fs.h>
62253 +#include <linux/mnt_namespace.h>
62254 +#include <linux/nsproxy.h>
62255 +#include <linux/gracl.h>
62256 +#include <linux/grdefs.h>
62257 +#include <linux/grmsg.h>
62258 +
62259 +void gr_add_learn_entry(const char *fmt, ...)
62260 + __attribute__ ((format (printf, 1, 2)));
62261 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
62262 + const struct vfsmount *mnt);
62263 +__u32 gr_check_create(const struct dentry *new_dentry,
62264 + const struct dentry *parent,
62265 + const struct vfsmount *mnt, const __u32 mode);
62266 +int gr_check_protected_task(const struct task_struct *task);
62267 +__u32 to_gr_audit(const __u32 reqmode);
62268 +int gr_set_acls(const int type);
62269 +int gr_apply_subject_to_task(struct task_struct *task);
62270 +int gr_acl_is_enabled(void);
62271 +char gr_roletype_to_char(void);
62272 +
62273 +void gr_handle_alertkill(struct task_struct *task);
62274 +char *gr_to_filename(const struct dentry *dentry,
62275 + const struct vfsmount *mnt);
62276 +char *gr_to_filename1(const struct dentry *dentry,
62277 + const struct vfsmount *mnt);
62278 +char *gr_to_filename2(const struct dentry *dentry,
62279 + const struct vfsmount *mnt);
62280 +char *gr_to_filename3(const struct dentry *dentry,
62281 + const struct vfsmount *mnt);
62282 +
62283 +extern int grsec_enable_ptrace_readexec;
62284 +extern int grsec_enable_harden_ptrace;
62285 +extern int grsec_enable_link;
62286 +extern int grsec_enable_fifo;
62287 +extern int grsec_enable_execve;
62288 +extern int grsec_enable_shm;
62289 +extern int grsec_enable_execlog;
62290 +extern int grsec_enable_signal;
62291 +extern int grsec_enable_audit_ptrace;
62292 +extern int grsec_enable_forkfail;
62293 +extern int grsec_enable_time;
62294 +extern int grsec_enable_rofs;
62295 +extern int grsec_enable_chroot_shmat;
62296 +extern int grsec_enable_chroot_mount;
62297 +extern int grsec_enable_chroot_double;
62298 +extern int grsec_enable_chroot_pivot;
62299 +extern int grsec_enable_chroot_chdir;
62300 +extern int grsec_enable_chroot_chmod;
62301 +extern int grsec_enable_chroot_mknod;
62302 +extern int grsec_enable_chroot_fchdir;
62303 +extern int grsec_enable_chroot_nice;
62304 +extern int grsec_enable_chroot_execlog;
62305 +extern int grsec_enable_chroot_caps;
62306 +extern int grsec_enable_chroot_sysctl;
62307 +extern int grsec_enable_chroot_unix;
62308 +extern int grsec_enable_symlinkown;
62309 +extern int grsec_symlinkown_gid;
62310 +extern int grsec_enable_tpe;
62311 +extern int grsec_tpe_gid;
62312 +extern int grsec_enable_tpe_all;
62313 +extern int grsec_enable_tpe_invert;
62314 +extern int grsec_enable_socket_all;
62315 +extern int grsec_socket_all_gid;
62316 +extern int grsec_enable_socket_client;
62317 +extern int grsec_socket_client_gid;
62318 +extern int grsec_enable_socket_server;
62319 +extern int grsec_socket_server_gid;
62320 +extern int grsec_audit_gid;
62321 +extern int grsec_enable_group;
62322 +extern int grsec_enable_audit_textrel;
62323 +extern int grsec_enable_log_rwxmaps;
62324 +extern int grsec_enable_mount;
62325 +extern int grsec_enable_chdir;
62326 +extern int grsec_resource_logging;
62327 +extern int grsec_enable_blackhole;
62328 +extern int grsec_lastack_retries;
62329 +extern int grsec_enable_brute;
62330 +extern int grsec_lock;
62331 +
62332 +extern spinlock_t grsec_alert_lock;
62333 +extern unsigned long grsec_alert_wtime;
62334 +extern unsigned long grsec_alert_fyet;
62335 +
62336 +extern spinlock_t grsec_audit_lock;
62337 +
62338 +extern rwlock_t grsec_exec_file_lock;
62339 +
62340 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
62341 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
62342 + (tsk)->exec_file->f_vfsmnt) : "/")
62343 +
62344 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
62345 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
62346 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62347 +
62348 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
62349 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
62350 + (tsk)->exec_file->f_vfsmnt) : "/")
62351 +
62352 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
62353 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
62354 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62355 +
62356 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
62357 +
62358 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
62359 +
62360 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
62361 + (task)->pid, (cred)->uid, \
62362 + (cred)->euid, (cred)->gid, (cred)->egid, \
62363 + gr_parent_task_fullpath(task), \
62364 + (task)->real_parent->comm, (task)->real_parent->pid, \
62365 + (pcred)->uid, (pcred)->euid, \
62366 + (pcred)->gid, (pcred)->egid
62367 +
62368 +#define GR_CHROOT_CAPS {{ \
62369 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
62370 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
62371 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
62372 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
62373 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
62374 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
62375 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
62376 +
62377 +#define security_learn(normal_msg,args...) \
62378 +({ \
62379 + read_lock(&grsec_exec_file_lock); \
62380 + gr_add_learn_entry(normal_msg "\n", ## args); \
62381 + read_unlock(&grsec_exec_file_lock); \
62382 +})
62383 +
62384 +enum {
62385 + GR_DO_AUDIT,
62386 + GR_DONT_AUDIT,
62387 + /* used for non-audit messages that we shouldn't kill the task on */
62388 + GR_DONT_AUDIT_GOOD
62389 +};
62390 +
62391 +enum {
62392 + GR_TTYSNIFF,
62393 + GR_RBAC,
62394 + GR_RBAC_STR,
62395 + GR_STR_RBAC,
62396 + GR_RBAC_MODE2,
62397 + GR_RBAC_MODE3,
62398 + GR_FILENAME,
62399 + GR_SYSCTL_HIDDEN,
62400 + GR_NOARGS,
62401 + GR_ONE_INT,
62402 + GR_ONE_INT_TWO_STR,
62403 + GR_ONE_STR,
62404 + GR_STR_INT,
62405 + GR_TWO_STR_INT,
62406 + GR_TWO_INT,
62407 + GR_TWO_U64,
62408 + GR_THREE_INT,
62409 + GR_FIVE_INT_TWO_STR,
62410 + GR_TWO_STR,
62411 + GR_THREE_STR,
62412 + GR_FOUR_STR,
62413 + GR_STR_FILENAME,
62414 + GR_FILENAME_STR,
62415 + GR_FILENAME_TWO_INT,
62416 + GR_FILENAME_TWO_INT_STR,
62417 + GR_TEXTREL,
62418 + GR_PTRACE,
62419 + GR_RESOURCE,
62420 + GR_CAP,
62421 + GR_SIG,
62422 + GR_SIG2,
62423 + GR_CRASH1,
62424 + GR_CRASH2,
62425 + GR_PSACCT,
62426 + GR_RWXMAP
62427 +};
62428 +
62429 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
62430 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
62431 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
62432 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
62433 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
62434 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
62435 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
62436 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
62437 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
62438 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
62439 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
62440 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
62441 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
62442 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
62443 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
62444 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
62445 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
62446 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
62447 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
62448 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
62449 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
62450 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
62451 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
62452 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
62453 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
62454 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
62455 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
62456 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
62457 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
62458 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
62459 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
62460 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
62461 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
62462 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
62463 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
62464 +
62465 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
62466 +
62467 +#endif
62468 +
62469 +#endif
62470 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
62471 new file mode 100644
62472 index 0000000..54f4e85
62473 --- /dev/null
62474 +++ b/include/linux/grmsg.h
62475 @@ -0,0 +1,110 @@
62476 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
62477 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
62478 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
62479 +#define GR_STOPMOD_MSG "denied modification of module state by "
62480 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
62481 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
62482 +#define GR_IOPERM_MSG "denied use of ioperm() by "
62483 +#define GR_IOPL_MSG "denied use of iopl() by "
62484 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
62485 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
62486 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
62487 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
62488 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
62489 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
62490 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
62491 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
62492 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
62493 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
62494 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
62495 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
62496 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
62497 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
62498 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
62499 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
62500 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
62501 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
62502 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
62503 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
62504 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
62505 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
62506 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
62507 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
62508 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
62509 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
62510 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
62511 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
62512 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
62513 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
62514 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
62515 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
62516 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
62517 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
62518 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
62519 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
62520 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
62521 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
62522 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
62523 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
62524 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
62525 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
62526 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
62527 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
62528 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
62529 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
62530 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
62531 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
62532 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
62533 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
62534 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
62535 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
62536 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
62537 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
62538 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
62539 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
62540 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
62541 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
62542 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
62543 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
62544 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
62545 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
62546 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
62547 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
62548 +#define GR_NICE_CHROOT_MSG "denied priority change by "
62549 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
62550 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
62551 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
62552 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
62553 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
62554 +#define GR_TIME_MSG "time set by "
62555 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
62556 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
62557 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
62558 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
62559 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
62560 +#define GR_BIND_MSG "denied bind() by "
62561 +#define GR_CONNECT_MSG "denied connect() by "
62562 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
62563 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
62564 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
62565 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
62566 +#define GR_CAP_ACL_MSG "use of %s denied for "
62567 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
62568 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
62569 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
62570 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
62571 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
62572 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
62573 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
62574 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
62575 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
62576 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
62577 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
62578 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
62579 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
62580 +#define GR_VM86_MSG "denied use of vm86 by "
62581 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
62582 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
62583 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
62584 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
62585 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
62586 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
62587 new file mode 100644
62588 index 0000000..38bfb04
62589 --- /dev/null
62590 +++ b/include/linux/grsecurity.h
62591 @@ -0,0 +1,233 @@
62592 +#ifndef GR_SECURITY_H
62593 +#define GR_SECURITY_H
62594 +#include <linux/fs.h>
62595 +#include <linux/fs_struct.h>
62596 +#include <linux/binfmts.h>
62597 +#include <linux/gracl.h>
62598 +
62599 +/* notify of brain-dead configs */
62600 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62601 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
62602 +#endif
62603 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
62604 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
62605 +#endif
62606 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
62607 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
62608 +#endif
62609 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
62610 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
62611 +#endif
62612 +
62613 +#include <linux/compat.h>
62614 +
62615 +struct user_arg_ptr {
62616 +#ifdef CONFIG_COMPAT
62617 + bool is_compat;
62618 +#endif
62619 + union {
62620 + const char __user *const __user *native;
62621 +#ifdef CONFIG_COMPAT
62622 + compat_uptr_t __user *compat;
62623 +#endif
62624 + } ptr;
62625 +};
62626 +
62627 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
62628 +void gr_handle_brute_check(void);
62629 +void gr_handle_kernel_exploit(void);
62630 +int gr_process_user_ban(void);
62631 +
62632 +char gr_roletype_to_char(void);
62633 +
62634 +int gr_acl_enable_at_secure(void);
62635 +
62636 +int gr_check_user_change(int real, int effective, int fs);
62637 +int gr_check_group_change(int real, int effective, int fs);
62638 +
62639 +void gr_del_task_from_ip_table(struct task_struct *p);
62640 +
62641 +int gr_pid_is_chrooted(struct task_struct *p);
62642 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
62643 +int gr_handle_chroot_nice(void);
62644 +int gr_handle_chroot_sysctl(const int op);
62645 +int gr_handle_chroot_setpriority(struct task_struct *p,
62646 + const int niceval);
62647 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
62648 +int gr_handle_chroot_chroot(const struct dentry *dentry,
62649 + const struct vfsmount *mnt);
62650 +void gr_handle_chroot_chdir(struct path *path);
62651 +int gr_handle_chroot_chmod(const struct dentry *dentry,
62652 + const struct vfsmount *mnt, const int mode);
62653 +int gr_handle_chroot_mknod(const struct dentry *dentry,
62654 + const struct vfsmount *mnt, const int mode);
62655 +int gr_handle_chroot_mount(const struct dentry *dentry,
62656 + const struct vfsmount *mnt,
62657 + const char *dev_name);
62658 +int gr_handle_chroot_pivot(void);
62659 +int gr_handle_chroot_unix(const pid_t pid);
62660 +
62661 +int gr_handle_rawio(const struct inode *inode);
62662 +
62663 +void gr_handle_ioperm(void);
62664 +void gr_handle_iopl(void);
62665 +
62666 +umode_t gr_acl_umask(void);
62667 +
62668 +int gr_tpe_allow(const struct file *file);
62669 +
62670 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
62671 +void gr_clear_chroot_entries(struct task_struct *task);
62672 +
62673 +void gr_log_forkfail(const int retval);
62674 +void gr_log_timechange(void);
62675 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
62676 +void gr_log_chdir(const struct dentry *dentry,
62677 + const struct vfsmount *mnt);
62678 +void gr_log_chroot_exec(const struct dentry *dentry,
62679 + const struct vfsmount *mnt);
62680 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
62681 +void gr_log_remount(const char *devname, const int retval);
62682 +void gr_log_unmount(const char *devname, const int retval);
62683 +void gr_log_mount(const char *from, const char *to, const int retval);
62684 +void gr_log_textrel(struct vm_area_struct *vma);
62685 +void gr_log_rwxmmap(struct file *file);
62686 +void gr_log_rwxmprotect(struct file *file);
62687 +
62688 +int gr_handle_follow_link(const struct inode *parent,
62689 + const struct inode *inode,
62690 + const struct dentry *dentry,
62691 + const struct vfsmount *mnt);
62692 +int gr_handle_fifo(const struct dentry *dentry,
62693 + const struct vfsmount *mnt,
62694 + const struct dentry *dir, const int flag,
62695 + const int acc_mode);
62696 +int gr_handle_hardlink(const struct dentry *dentry,
62697 + const struct vfsmount *mnt,
62698 + struct inode *inode,
62699 + const int mode, const char *to);
62700 +
62701 +int gr_is_capable(const int cap);
62702 +int gr_is_capable_nolog(const int cap);
62703 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
62704 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
62705 +
62706 +void gr_learn_resource(const struct task_struct *task, const int limit,
62707 + const unsigned long wanted, const int gt);
62708 +void gr_copy_label(struct task_struct *tsk);
62709 +void gr_handle_crash(struct task_struct *task, const int sig);
62710 +int gr_handle_signal(const struct task_struct *p, const int sig);
62711 +int gr_check_crash_uid(const uid_t uid);
62712 +int gr_check_protected_task(const struct task_struct *task);
62713 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
62714 +int gr_acl_handle_mmap(const struct file *file,
62715 + const unsigned long prot);
62716 +int gr_acl_handle_mprotect(const struct file *file,
62717 + const unsigned long prot);
62718 +int gr_check_hidden_task(const struct task_struct *tsk);
62719 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
62720 + const struct vfsmount *mnt);
62721 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
62722 + const struct vfsmount *mnt);
62723 +__u32 gr_acl_handle_access(const struct dentry *dentry,
62724 + const struct vfsmount *mnt, const int fmode);
62725 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
62726 + const struct vfsmount *mnt, umode_t *mode);
62727 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
62728 + const struct vfsmount *mnt);
62729 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
62730 + const struct vfsmount *mnt);
62731 +int gr_handle_ptrace(struct task_struct *task, const long request);
62732 +int gr_handle_proc_ptrace(struct task_struct *task);
62733 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
62734 + const struct vfsmount *mnt);
62735 +int gr_check_crash_exec(const struct file *filp);
62736 +int gr_acl_is_enabled(void);
62737 +void gr_set_kernel_label(struct task_struct *task);
62738 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
62739 + const gid_t gid);
62740 +int gr_set_proc_label(const struct dentry *dentry,
62741 + const struct vfsmount *mnt,
62742 + const int unsafe_flags);
62743 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
62744 + const struct vfsmount *mnt);
62745 +__u32 gr_acl_handle_open(const struct dentry *dentry,
62746 + const struct vfsmount *mnt, int acc_mode);
62747 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
62748 + const struct dentry *p_dentry,
62749 + const struct vfsmount *p_mnt,
62750 + int open_flags, int acc_mode, const int imode);
62751 +void gr_handle_create(const struct dentry *dentry,
62752 + const struct vfsmount *mnt);
62753 +void gr_handle_proc_create(const struct dentry *dentry,
62754 + const struct inode *inode);
62755 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
62756 + const struct dentry *parent_dentry,
62757 + const struct vfsmount *parent_mnt,
62758 + const int mode);
62759 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
62760 + const struct dentry *parent_dentry,
62761 + const struct vfsmount *parent_mnt);
62762 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
62763 + const struct vfsmount *mnt);
62764 +void gr_handle_delete(const ino_t ino, const dev_t dev);
62765 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
62766 + const struct vfsmount *mnt);
62767 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
62768 + const struct dentry *parent_dentry,
62769 + const struct vfsmount *parent_mnt,
62770 + const char *from);
62771 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
62772 + const struct dentry *parent_dentry,
62773 + const struct vfsmount *parent_mnt,
62774 + const struct dentry *old_dentry,
62775 + const struct vfsmount *old_mnt, const char *to);
62776 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
62777 +int gr_acl_handle_rename(struct dentry *new_dentry,
62778 + struct dentry *parent_dentry,
62779 + const struct vfsmount *parent_mnt,
62780 + struct dentry *old_dentry,
62781 + struct inode *old_parent_inode,
62782 + struct vfsmount *old_mnt, const char *newname);
62783 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62784 + struct dentry *old_dentry,
62785 + struct dentry *new_dentry,
62786 + struct vfsmount *mnt, const __u8 replace);
62787 +__u32 gr_check_link(const struct dentry *new_dentry,
62788 + const struct dentry *parent_dentry,
62789 + const struct vfsmount *parent_mnt,
62790 + const struct dentry *old_dentry,
62791 + const struct vfsmount *old_mnt);
62792 +int gr_acl_handle_filldir(const struct file *file, const char *name,
62793 + const unsigned int namelen, const ino_t ino);
62794 +
62795 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
62796 + const struct vfsmount *mnt);
62797 +void gr_acl_handle_exit(void);
62798 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
62799 +int gr_acl_handle_procpidmem(const struct task_struct *task);
62800 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
62801 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
62802 +void gr_audit_ptrace(struct task_struct *task);
62803 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
62804 +
62805 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
62806 +
62807 +#ifdef CONFIG_GRKERNSEC
62808 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
62809 +void gr_handle_vm86(void);
62810 +void gr_handle_mem_readwrite(u64 from, u64 to);
62811 +
62812 +void gr_log_badprocpid(const char *entry);
62813 +
62814 +extern int grsec_enable_dmesg;
62815 +extern int grsec_disable_privio;
62816 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62817 +extern int grsec_enable_chroot_findtask;
62818 +#endif
62819 +#ifdef CONFIG_GRKERNSEC_SETXID
62820 +extern int grsec_enable_setxid;
62821 +#endif
62822 +#endif
62823 +
62824 +#endif
62825 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
62826 new file mode 100644
62827 index 0000000..e7ffaaf
62828 --- /dev/null
62829 +++ b/include/linux/grsock.h
62830 @@ -0,0 +1,19 @@
62831 +#ifndef __GRSOCK_H
62832 +#define __GRSOCK_H
62833 +
62834 +extern void gr_attach_curr_ip(const struct sock *sk);
62835 +extern int gr_handle_sock_all(const int family, const int type,
62836 + const int protocol);
62837 +extern int gr_handle_sock_server(const struct sockaddr *sck);
62838 +extern int gr_handle_sock_server_other(const struct sock *sck);
62839 +extern int gr_handle_sock_client(const struct sockaddr *sck);
62840 +extern int gr_search_connect(struct socket * sock,
62841 + struct sockaddr_in * addr);
62842 +extern int gr_search_bind(struct socket * sock,
62843 + struct sockaddr_in * addr);
62844 +extern int gr_search_listen(struct socket * sock);
62845 +extern int gr_search_accept(struct socket * sock);
62846 +extern int gr_search_socket(const int domain, const int type,
62847 + const int protocol);
62848 +
62849 +#endif
62850 diff --git a/include/linux/hid.h b/include/linux/hid.h
62851 index 449fa38..b37c8cc 100644
62852 --- a/include/linux/hid.h
62853 +++ b/include/linux/hid.h
62854 @@ -704,7 +704,7 @@ struct hid_ll_driver {
62855 unsigned int code, int value);
62856
62857 int (*parse)(struct hid_device *hdev);
62858 -};
62859 +} __no_const;
62860
62861 #define PM_HINT_FULLON 1<<5
62862 #define PM_HINT_NORMAL 1<<1
62863 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
62864 index d3999b4..1304cb4 100644
62865 --- a/include/linux/highmem.h
62866 +++ b/include/linux/highmem.h
62867 @@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
62868 kunmap_atomic(kaddr);
62869 }
62870
62871 +static inline void sanitize_highpage(struct page *page)
62872 +{
62873 + void *kaddr;
62874 + unsigned long flags;
62875 +
62876 + local_irq_save(flags);
62877 + kaddr = kmap_atomic(page);
62878 + clear_page(kaddr);
62879 + kunmap_atomic(kaddr);
62880 + local_irq_restore(flags);
62881 +}
62882 +
62883 static inline void zero_user_segments(struct page *page,
62884 unsigned start1, unsigned end1,
62885 unsigned start2, unsigned end2)
62886 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
62887 index ddfa041..a44cfff 100644
62888 --- a/include/linux/i2c.h
62889 +++ b/include/linux/i2c.h
62890 @@ -366,6 +366,7 @@ struct i2c_algorithm {
62891 /* To determine what the adapter supports */
62892 u32 (*functionality) (struct i2c_adapter *);
62893 };
62894 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
62895
62896 /*
62897 * i2c_adapter is the structure used to identify a physical i2c bus along
62898 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
62899 index d23c3c2..eb63c81 100644
62900 --- a/include/linux/i2o.h
62901 +++ b/include/linux/i2o.h
62902 @@ -565,7 +565,7 @@ struct i2o_controller {
62903 struct i2o_device *exec; /* Executive */
62904 #if BITS_PER_LONG == 64
62905 spinlock_t context_list_lock; /* lock for context_list */
62906 - atomic_t context_list_counter; /* needed for unique contexts */
62907 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62908 struct list_head context_list; /* list of context id's
62909 and pointers */
62910 #endif
62911 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
62912 index 8185f57..7b2d222 100644
62913 --- a/include/linux/if_team.h
62914 +++ b/include/linux/if_team.h
62915 @@ -74,6 +74,7 @@ struct team_mode_ops {
62916 void (*port_leave)(struct team *team, struct team_port *port);
62917 void (*port_change_mac)(struct team *team, struct team_port *port);
62918 };
62919 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
62920
62921 enum team_option_type {
62922 TEAM_OPTION_TYPE_U32,
62923 @@ -136,7 +137,7 @@ struct team {
62924 struct list_head option_inst_list; /* list of option instances */
62925
62926 const struct team_mode *mode;
62927 - struct team_mode_ops ops;
62928 + team_mode_ops_no_const ops;
62929 long mode_priv[TEAM_MODE_PRIV_LONGS];
62930 };
62931
62932 diff --git a/include/linux/init.h b/include/linux/init.h
62933 index 6b95109..7616d09 100644
62934 --- a/include/linux/init.h
62935 +++ b/include/linux/init.h
62936 @@ -39,9 +39,15 @@
62937 * Also note, that this data cannot be "const".
62938 */
62939
62940 +#ifdef MODULE
62941 +#define add_latent_entropy
62942 +#else
62943 +#define add_latent_entropy __latent_entropy
62944 +#endif
62945 +
62946 /* These are for everybody (although not all archs will actually
62947 discard it in modules) */
62948 -#define __init __section(.init.text) __cold notrace
62949 +#define __init __section(.init.text) __cold notrace add_latent_entropy
62950 #define __initdata __section(.init.data)
62951 #define __initconst __section(.init.rodata)
62952 #define __exitdata __section(.exit.data)
62953 @@ -83,7 +89,7 @@
62954 #define __exit __section(.exit.text) __exitused __cold notrace
62955
62956 /* Used for HOTPLUG */
62957 -#define __devinit __section(.devinit.text) __cold notrace
62958 +#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
62959 #define __devinitdata __section(.devinit.data)
62960 #define __devinitconst __section(.devinit.rodata)
62961 #define __devexit __section(.devexit.text) __exitused __cold notrace
62962 @@ -91,7 +97,7 @@
62963 #define __devexitconst __section(.devexit.rodata)
62964
62965 /* Used for HOTPLUG_CPU */
62966 -#define __cpuinit __section(.cpuinit.text) __cold notrace
62967 +#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
62968 #define __cpuinitdata __section(.cpuinit.data)
62969 #define __cpuinitconst __section(.cpuinit.rodata)
62970 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
62971 @@ -99,7 +105,7 @@
62972 #define __cpuexitconst __section(.cpuexit.rodata)
62973
62974 /* Used for MEMORY_HOTPLUG */
62975 -#define __meminit __section(.meminit.text) __cold notrace
62976 +#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
62977 #define __meminitdata __section(.meminit.data)
62978 #define __meminitconst __section(.meminit.rodata)
62979 #define __memexit __section(.memexit.text) __exitused __cold notrace
62980 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
62981 index 9e65eff..b131e8b 100644
62982 --- a/include/linux/init_task.h
62983 +++ b/include/linux/init_task.h
62984 @@ -134,6 +134,12 @@ extern struct cred init_cred;
62985
62986 #define INIT_TASK_COMM "swapper"
62987
62988 +#ifdef CONFIG_X86
62989 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62990 +#else
62991 +#define INIT_TASK_THREAD_INFO
62992 +#endif
62993 +
62994 /*
62995 * INIT_TASK is used to set up the first task table, touch at
62996 * your own risk!. Base=0, limit=0x1fffff (=2MB)
62997 @@ -172,6 +178,7 @@ extern struct cred init_cred;
62998 RCU_INIT_POINTER(.cred, &init_cred), \
62999 .comm = INIT_TASK_COMM, \
63000 .thread = INIT_THREAD, \
63001 + INIT_TASK_THREAD_INFO \
63002 .fs = &init_fs, \
63003 .files = &init_files, \
63004 .signal = &init_signals, \
63005 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
63006 index e6ca56d..8583707 100644
63007 --- a/include/linux/intel-iommu.h
63008 +++ b/include/linux/intel-iommu.h
63009 @@ -296,7 +296,7 @@ struct iommu_flush {
63010 u8 fm, u64 type);
63011 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
63012 unsigned int size_order, u64 type);
63013 -};
63014 +} __no_const;
63015
63016 enum {
63017 SR_DMAR_FECTL_REG,
63018 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
63019 index e68a8e5..811b9af 100644
63020 --- a/include/linux/interrupt.h
63021 +++ b/include/linux/interrupt.h
63022 @@ -435,7 +435,7 @@ enum
63023 /* map softirq index to softirq name. update 'softirq_to_name' in
63024 * kernel/softirq.c when adding a new softirq.
63025 */
63026 -extern char *softirq_to_name[NR_SOFTIRQS];
63027 +extern const char * const softirq_to_name[NR_SOFTIRQS];
63028
63029 /* softirq mask and active fields moved to irq_cpustat_t in
63030 * asm/hardirq.h to get better cache usage. KAO
63031 @@ -443,12 +443,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
63032
63033 struct softirq_action
63034 {
63035 - void (*action)(struct softirq_action *);
63036 + void (*action)(void);
63037 };
63038
63039 asmlinkage void do_softirq(void);
63040 asmlinkage void __do_softirq(void);
63041 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
63042 +extern void open_softirq(int nr, void (*action)(void));
63043 extern void softirq_init(void);
63044 extern void __raise_softirq_irqoff(unsigned int nr);
63045
63046 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
63047 index 6883e19..06992b1 100644
63048 --- a/include/linux/kallsyms.h
63049 +++ b/include/linux/kallsyms.h
63050 @@ -15,7 +15,8 @@
63051
63052 struct module;
63053
63054 -#ifdef CONFIG_KALLSYMS
63055 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
63056 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63057 /* Lookup the address for a symbol. Returns 0 if not found. */
63058 unsigned long kallsyms_lookup_name(const char *name);
63059
63060 @@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
63061 /* Stupid that this does nothing, but I didn't create this mess. */
63062 #define __print_symbol(fmt, addr)
63063 #endif /*CONFIG_KALLSYMS*/
63064 +#else /* when included by kallsyms.c, vsnprintf.c, or
63065 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
63066 +extern void __print_symbol(const char *fmt, unsigned long address);
63067 +extern int sprint_backtrace(char *buffer, unsigned long address);
63068 +extern int sprint_symbol(char *buffer, unsigned long address);
63069 +extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
63070 +const char *kallsyms_lookup(unsigned long addr,
63071 + unsigned long *symbolsize,
63072 + unsigned long *offset,
63073 + char **modname, char *namebuf);
63074 +#endif
63075
63076 /* This macro allows us to keep printk typechecking */
63077 static __printf(1, 2)
63078 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
63079 index c4d2fc1..5df9c19 100644
63080 --- a/include/linux/kgdb.h
63081 +++ b/include/linux/kgdb.h
63082 @@ -53,7 +53,7 @@ extern int kgdb_connected;
63083 extern int kgdb_io_module_registered;
63084
63085 extern atomic_t kgdb_setting_breakpoint;
63086 -extern atomic_t kgdb_cpu_doing_single_step;
63087 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
63088
63089 extern struct task_struct *kgdb_usethread;
63090 extern struct task_struct *kgdb_contthread;
63091 @@ -252,7 +252,7 @@ struct kgdb_arch {
63092 void (*disable_hw_break)(struct pt_regs *regs);
63093 void (*remove_all_hw_break)(void);
63094 void (*correct_hw_break)(void);
63095 -};
63096 +} __do_const;
63097
63098 /**
63099 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
63100 @@ -277,7 +277,7 @@ struct kgdb_io {
63101 void (*pre_exception) (void);
63102 void (*post_exception) (void);
63103 int is_console;
63104 -};
63105 +} __do_const;
63106
63107 extern struct kgdb_arch arch_kgdb_ops;
63108
63109 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
63110 index 5398d58..5883a34 100644
63111 --- a/include/linux/kmod.h
63112 +++ b/include/linux/kmod.h
63113 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
63114 * usually useless though. */
63115 extern __printf(2, 3)
63116 int __request_module(bool wait, const char *name, ...);
63117 +extern __printf(3, 4)
63118 +int ___request_module(bool wait, char *param_name, const char *name, ...);
63119 #define request_module(mod...) __request_module(true, mod)
63120 #define request_module_nowait(mod...) __request_module(false, mod)
63121 #define try_then_request_module(x, mod...) \
63122 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
63123 index fc615a9..1e57449 100644
63124 --- a/include/linux/kobject.h
63125 +++ b/include/linux/kobject.h
63126 @@ -224,7 +224,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
63127
63128 static inline __printf(2, 3)
63129 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
63130 -{ return 0; }
63131 +{ return -ENOMEM; }
63132
63133 static inline int kobject_action_type(const char *buf, size_t count,
63134 enum kobject_action *type)
63135 diff --git a/include/linux/kref.h b/include/linux/kref.h
63136 index 9c07dce..a92fa71 100644
63137 --- a/include/linux/kref.h
63138 +++ b/include/linux/kref.h
63139 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
63140 static inline int kref_sub(struct kref *kref, unsigned int count,
63141 void (*release)(struct kref *kref))
63142 {
63143 - WARN_ON(release == NULL);
63144 + BUG_ON(release == NULL);
63145
63146 if (atomic_sub_and_test((int) count, &kref->refcount)) {
63147 release(kref);
63148 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
63149 index 96c158a..1864db5 100644
63150 --- a/include/linux/kvm_host.h
63151 +++ b/include/linux/kvm_host.h
63152 @@ -345,7 +345,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
63153 void vcpu_load(struct kvm_vcpu *vcpu);
63154 void vcpu_put(struct kvm_vcpu *vcpu);
63155
63156 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
63157 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
63158 struct module *module);
63159 void kvm_exit(void);
63160
63161 @@ -511,7 +511,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
63162 struct kvm_guest_debug *dbg);
63163 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
63164
63165 -int kvm_arch_init(void *opaque);
63166 +int kvm_arch_init(const void *opaque);
63167 void kvm_arch_exit(void);
63168
63169 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
63170 diff --git a/include/linux/libata.h b/include/linux/libata.h
63171 index 6e887c7..4539601 100644
63172 --- a/include/linux/libata.h
63173 +++ b/include/linux/libata.h
63174 @@ -910,7 +910,7 @@ struct ata_port_operations {
63175 * fields must be pointers.
63176 */
63177 const struct ata_port_operations *inherits;
63178 -};
63179 +} __do_const;
63180
63181 struct ata_port_info {
63182 unsigned long flags;
63183 diff --git a/include/linux/memory.h b/include/linux/memory.h
63184 index 1ac7f6e..a5794d0 100644
63185 --- a/include/linux/memory.h
63186 +++ b/include/linux/memory.h
63187 @@ -143,7 +143,7 @@ struct memory_accessor {
63188 size_t count);
63189 ssize_t (*write)(struct memory_accessor *, const char *buf,
63190 off_t offset, size_t count);
63191 -};
63192 +} __no_const;
63193
63194 /*
63195 * Kernel text modification mutex, used for code patching. Users of this lock
63196 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
63197 index 1318ca6..7521340 100644
63198 --- a/include/linux/mfd/abx500.h
63199 +++ b/include/linux/mfd/abx500.h
63200 @@ -452,6 +452,7 @@ struct abx500_ops {
63201 int (*event_registers_startup_state_get) (struct device *, u8 *);
63202 int (*startup_irq_enabled) (struct device *, unsigned int);
63203 };
63204 +typedef struct abx500_ops __no_const abx500_ops_no_const;
63205
63206 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
63207 void abx500_remove_ops(struct device *dev);
63208 diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
63209 index 9b07725..3d55001 100644
63210 --- a/include/linux/mfd/abx500/ux500_chargalg.h
63211 +++ b/include/linux/mfd/abx500/ux500_chargalg.h
63212 @@ -19,7 +19,7 @@ struct ux500_charger_ops {
63213 int (*enable) (struct ux500_charger *, int, int, int);
63214 int (*kick_wd) (struct ux500_charger *);
63215 int (*update_curr) (struct ux500_charger *, int);
63216 -};
63217 +} __no_const;
63218
63219 /**
63220 * struct ux500_charger - power supply ux500 charger sub class
63221 diff --git a/include/linux/mm.h b/include/linux/mm.h
63222 index f9f279c..198da78 100644
63223 --- a/include/linux/mm.h
63224 +++ b/include/linux/mm.h
63225 @@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
63226
63227 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
63228 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
63229 +
63230 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
63231 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
63232 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
63233 +#else
63234 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
63235 +#endif
63236 +
63237 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
63238 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
63239
63240 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
63241 int set_page_dirty_lock(struct page *page);
63242 int clear_page_dirty_for_io(struct page *page);
63243
63244 -/* Is the vma a continuation of the stack vma above it? */
63245 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
63246 -{
63247 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
63248 -}
63249 -
63250 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
63251 - unsigned long addr)
63252 -{
63253 - return (vma->vm_flags & VM_GROWSDOWN) &&
63254 - (vma->vm_start == addr) &&
63255 - !vma_growsdown(vma->vm_prev, addr);
63256 -}
63257 -
63258 -/* Is the vma a continuation of the stack vma below it? */
63259 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
63260 -{
63261 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
63262 -}
63263 -
63264 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
63265 - unsigned long addr)
63266 -{
63267 - return (vma->vm_flags & VM_GROWSUP) &&
63268 - (vma->vm_end == addr) &&
63269 - !vma_growsup(vma->vm_next, addr);
63270 -}
63271 -
63272 extern pid_t
63273 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
63274
63275 @@ -1135,6 +1114,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
63276 }
63277 #endif
63278
63279 +#ifdef CONFIG_MMU
63280 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
63281 +#else
63282 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
63283 +{
63284 + return __pgprot(0);
63285 +}
63286 +#endif
63287 +
63288 int vma_wants_writenotify(struct vm_area_struct *vma);
63289
63290 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
63291 @@ -1153,8 +1141,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
63292 {
63293 return 0;
63294 }
63295 +
63296 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
63297 + unsigned long address)
63298 +{
63299 + return 0;
63300 +}
63301 #else
63302 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63303 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63304 #endif
63305
63306 #ifdef __PAGETABLE_PMD_FOLDED
63307 @@ -1163,8 +1158,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
63308 {
63309 return 0;
63310 }
63311 +
63312 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
63313 + unsigned long address)
63314 +{
63315 + return 0;
63316 +}
63317 #else
63318 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
63319 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
63320 #endif
63321
63322 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
63323 @@ -1182,11 +1184,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
63324 NULL: pud_offset(pgd, address);
63325 }
63326
63327 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
63328 +{
63329 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
63330 + NULL: pud_offset(pgd, address);
63331 +}
63332 +
63333 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
63334 {
63335 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
63336 NULL: pmd_offset(pud, address);
63337 }
63338 +
63339 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
63340 +{
63341 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
63342 + NULL: pmd_offset(pud, address);
63343 +}
63344 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
63345
63346 #if USE_SPLIT_PTLOCKS
63347 @@ -1396,6 +1410,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
63348 unsigned long, unsigned long,
63349 unsigned long, unsigned long);
63350 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
63351 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
63352
63353 /* These take the mm semaphore themselves */
63354 extern unsigned long vm_brk(unsigned long, unsigned long);
63355 @@ -1458,6 +1473,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
63356 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
63357 struct vm_area_struct **pprev);
63358
63359 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
63360 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
63361 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
63362 +
63363 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
63364 NULL if none. Assume start_addr < end_addr. */
63365 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
63366 @@ -1486,15 +1505,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
63367 return vma;
63368 }
63369
63370 -#ifdef CONFIG_MMU
63371 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
63372 -#else
63373 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
63374 -{
63375 - return __pgprot(0);
63376 -}
63377 -#endif
63378 -
63379 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
63380 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
63381 unsigned long pfn, unsigned long size, pgprot_t);
63382 @@ -1599,7 +1609,7 @@ extern int unpoison_memory(unsigned long pfn);
63383 extern int sysctl_memory_failure_early_kill;
63384 extern int sysctl_memory_failure_recovery;
63385 extern void shake_page(struct page *p, int access);
63386 -extern atomic_long_t mce_bad_pages;
63387 +extern atomic_long_unchecked_t mce_bad_pages;
63388 extern int soft_offline_page(struct page *page, int flags);
63389
63390 extern void dump_page(struct page *page);
63391 @@ -1630,5 +1640,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
63392 static inline bool page_is_guard(struct page *page) { return false; }
63393 #endif /* CONFIG_DEBUG_PAGEALLOC */
63394
63395 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63396 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
63397 +#else
63398 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
63399 +#endif
63400 +
63401 #endif /* __KERNEL__ */
63402 #endif /* _LINUX_MM_H */
63403 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
63404 index 704a626..bb0705a 100644
63405 --- a/include/linux/mm_types.h
63406 +++ b/include/linux/mm_types.h
63407 @@ -263,6 +263,8 @@ struct vm_area_struct {
63408 #ifdef CONFIG_NUMA
63409 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
63410 #endif
63411 +
63412 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
63413 };
63414
63415 struct core_thread {
63416 @@ -337,7 +339,7 @@ struct mm_struct {
63417 unsigned long def_flags;
63418 unsigned long nr_ptes; /* Page table pages */
63419 unsigned long start_code, end_code, start_data, end_data;
63420 - unsigned long start_brk, brk, start_stack;
63421 + unsigned long brk_gap, start_brk, brk, start_stack;
63422 unsigned long arg_start, arg_end, env_start, env_end;
63423
63424 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
63425 @@ -389,6 +391,24 @@ struct mm_struct {
63426 struct cpumask cpumask_allocation;
63427 #endif
63428 struct uprobes_state uprobes_state;
63429 +
63430 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63431 + unsigned long pax_flags;
63432 +#endif
63433 +
63434 +#ifdef CONFIG_PAX_DLRESOLVE
63435 + unsigned long call_dl_resolve;
63436 +#endif
63437 +
63438 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
63439 + unsigned long call_syscall;
63440 +#endif
63441 +
63442 +#ifdef CONFIG_PAX_ASLR
63443 + unsigned long delta_mmap; /* randomized offset */
63444 + unsigned long delta_stack; /* randomized offset */
63445 +#endif
63446 +
63447 };
63448
63449 static inline void mm_init_cpumask(struct mm_struct *mm)
63450 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
63451 index 1d1b1e1..2a13c78 100644
63452 --- a/include/linux/mmu_notifier.h
63453 +++ b/include/linux/mmu_notifier.h
63454 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
63455 */
63456 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
63457 ({ \
63458 - pte_t __pte; \
63459 + pte_t ___pte; \
63460 struct vm_area_struct *___vma = __vma; \
63461 unsigned long ___address = __address; \
63462 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
63463 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
63464 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
63465 - __pte; \
63466 + ___pte; \
63467 })
63468
63469 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
63470 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
63471 index 68c569f..5f43753 100644
63472 --- a/include/linux/mmzone.h
63473 +++ b/include/linux/mmzone.h
63474 @@ -411,7 +411,7 @@ struct zone {
63475 unsigned long flags; /* zone flags, see below */
63476
63477 /* Zone statistics */
63478 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63479 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63480
63481 /*
63482 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
63483 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
63484 index 5db9382..50e801d 100644
63485 --- a/include/linux/mod_devicetable.h
63486 +++ b/include/linux/mod_devicetable.h
63487 @@ -12,7 +12,7 @@
63488 typedef unsigned long kernel_ulong_t;
63489 #endif
63490
63491 -#define PCI_ANY_ID (~0)
63492 +#define PCI_ANY_ID ((__u16)~0)
63493
63494 struct pci_device_id {
63495 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
63496 @@ -131,7 +131,7 @@ struct usb_device_id {
63497 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
63498 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
63499
63500 -#define HID_ANY_ID (~0)
63501 +#define HID_ANY_ID (~0U)
63502 #define HID_BUS_ANY 0xffff
63503 #define HID_GROUP_ANY 0x0000
63504
63505 diff --git a/include/linux/module.h b/include/linux/module.h
63506 index fbcafe2..e5d9587 100644
63507 --- a/include/linux/module.h
63508 +++ b/include/linux/module.h
63509 @@ -17,6 +17,7 @@
63510 #include <linux/moduleparam.h>
63511 #include <linux/tracepoint.h>
63512 #include <linux/export.h>
63513 +#include <linux/fs.h>
63514
63515 #include <linux/percpu.h>
63516 #include <asm/module.h>
63517 @@ -273,19 +274,16 @@ struct module
63518 int (*init)(void);
63519
63520 /* If this is non-NULL, vfree after init() returns */
63521 - void *module_init;
63522 + void *module_init_rx, *module_init_rw;
63523
63524 /* Here is the actual code + data, vfree'd on unload. */
63525 - void *module_core;
63526 + void *module_core_rx, *module_core_rw;
63527
63528 /* Here are the sizes of the init and core sections */
63529 - unsigned int init_size, core_size;
63530 + unsigned int init_size_rw, core_size_rw;
63531
63532 /* The size of the executable code in each section. */
63533 - unsigned int init_text_size, core_text_size;
63534 -
63535 - /* Size of RO sections of the module (text+rodata) */
63536 - unsigned int init_ro_size, core_ro_size;
63537 + unsigned int init_size_rx, core_size_rx;
63538
63539 /* Arch-specific module values */
63540 struct mod_arch_specific arch;
63541 @@ -341,6 +339,10 @@ struct module
63542 #ifdef CONFIG_EVENT_TRACING
63543 struct ftrace_event_call **trace_events;
63544 unsigned int num_trace_events;
63545 + struct file_operations trace_id;
63546 + struct file_operations trace_enable;
63547 + struct file_operations trace_format;
63548 + struct file_operations trace_filter;
63549 #endif
63550 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
63551 unsigned int num_ftrace_callsites;
63552 @@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
63553 bool is_module_percpu_address(unsigned long addr);
63554 bool is_module_text_address(unsigned long addr);
63555
63556 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
63557 +{
63558 +
63559 +#ifdef CONFIG_PAX_KERNEXEC
63560 + if (ktla_ktva(addr) >= (unsigned long)start &&
63561 + ktla_ktva(addr) < (unsigned long)start + size)
63562 + return 1;
63563 +#endif
63564 +
63565 + return ((void *)addr >= start && (void *)addr < start + size);
63566 +}
63567 +
63568 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
63569 +{
63570 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
63571 +}
63572 +
63573 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
63574 +{
63575 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
63576 +}
63577 +
63578 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
63579 +{
63580 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
63581 +}
63582 +
63583 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
63584 +{
63585 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
63586 +}
63587 +
63588 static inline int within_module_core(unsigned long addr, struct module *mod)
63589 {
63590 - return (unsigned long)mod->module_core <= addr &&
63591 - addr < (unsigned long)mod->module_core + mod->core_size;
63592 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
63593 }
63594
63595 static inline int within_module_init(unsigned long addr, struct module *mod)
63596 {
63597 - return (unsigned long)mod->module_init <= addr &&
63598 - addr < (unsigned long)mod->module_init + mod->init_size;
63599 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
63600 }
63601
63602 /* Search for module by name: must hold module_mutex. */
63603 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
63604 index b2be02e..72d2f78 100644
63605 --- a/include/linux/moduleloader.h
63606 +++ b/include/linux/moduleloader.h
63607 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
63608
63609 /* Allocator used for allocating struct module, core sections and init
63610 sections. Returns NULL on failure. */
63611 -void *module_alloc(unsigned long size);
63612 +void *module_alloc(unsigned long size) __size_overflow(1);
63613 +
63614 +#ifdef CONFIG_PAX_KERNEXEC
63615 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
63616 +#else
63617 +#define module_alloc_exec(x) module_alloc(x)
63618 +#endif
63619
63620 /* Free memory returned from module_alloc. */
63621 void module_free(struct module *mod, void *module_region);
63622
63623 +#ifdef CONFIG_PAX_KERNEXEC
63624 +void module_free_exec(struct module *mod, void *module_region);
63625 +#else
63626 +#define module_free_exec(x, y) module_free((x), (y))
63627 +#endif
63628 +
63629 /* Apply the given relocation to the (simplified) ELF. Return -error
63630 or 0. */
63631 int apply_relocate(Elf_Shdr *sechdrs,
63632 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
63633 index d6a5806..7c13347 100644
63634 --- a/include/linux/moduleparam.h
63635 +++ b/include/linux/moduleparam.h
63636 @@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
63637 * @len is usually just sizeof(string).
63638 */
63639 #define module_param_string(name, string, len, perm) \
63640 - static const struct kparam_string __param_string_##name \
63641 + static const struct kparam_string __param_string_##name __used \
63642 = { len, string }; \
63643 __module_param_call(MODULE_PARAM_PREFIX, name, \
63644 &param_ops_string, \
63645 @@ -425,7 +425,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
63646 */
63647 #define module_param_array_named(name, array, type, nump, perm) \
63648 param_check_##type(name, &(array)[0]); \
63649 - static const struct kparam_array __param_arr_##name \
63650 + static const struct kparam_array __param_arr_##name __used \
63651 = { .max = ARRAY_SIZE(array), .num = nump, \
63652 .ops = &param_ops_##type, \
63653 .elemsize = sizeof(array[0]), .elem = array }; \
63654 diff --git a/include/linux/namei.h b/include/linux/namei.h
63655 index ffc0213..2c1f2cb 100644
63656 --- a/include/linux/namei.h
63657 +++ b/include/linux/namei.h
63658 @@ -24,7 +24,7 @@ struct nameidata {
63659 unsigned seq;
63660 int last_type;
63661 unsigned depth;
63662 - char *saved_names[MAX_NESTED_LINKS + 1];
63663 + const char *saved_names[MAX_NESTED_LINKS + 1];
63664
63665 /* Intent data */
63666 union {
63667 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
63668 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
63669 extern void unlock_rename(struct dentry *, struct dentry *);
63670
63671 -static inline void nd_set_link(struct nameidata *nd, char *path)
63672 +static inline void nd_set_link(struct nameidata *nd, const char *path)
63673 {
63674 nd->saved_names[nd->depth] = path;
63675 }
63676
63677 -static inline char *nd_get_link(struct nameidata *nd)
63678 +static inline const char *nd_get_link(const struct nameidata *nd)
63679 {
63680 return nd->saved_names[nd->depth];
63681 }
63682 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
63683 index d94cb14..e64c951 100644
63684 --- a/include/linux/netdevice.h
63685 +++ b/include/linux/netdevice.h
63686 @@ -1026,6 +1026,7 @@ struct net_device_ops {
63687 struct net_device *dev,
63688 int idx);
63689 };
63690 +typedef struct net_device_ops __no_const net_device_ops_no_const;
63691
63692 /*
63693 * The DEVICE structure.
63694 @@ -1087,7 +1088,7 @@ struct net_device {
63695 int iflink;
63696
63697 struct net_device_stats stats;
63698 - atomic_long_t rx_dropped; /* dropped packets by core network
63699 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
63700 * Do not use this in drivers.
63701 */
63702
63703 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
63704 new file mode 100644
63705 index 0000000..33f4af8
63706 --- /dev/null
63707 +++ b/include/linux/netfilter/xt_gradm.h
63708 @@ -0,0 +1,9 @@
63709 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
63710 +#define _LINUX_NETFILTER_XT_GRADM_H 1
63711 +
63712 +struct xt_gradm_mtinfo {
63713 + __u16 flags;
63714 + __u16 invflags;
63715 +};
63716 +
63717 +#endif
63718 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
63719 index c65a18a..0c05f3a 100644
63720 --- a/include/linux/of_pdt.h
63721 +++ b/include/linux/of_pdt.h
63722 @@ -32,7 +32,7 @@ struct of_pdt_ops {
63723
63724 /* return 0 on success; fill in 'len' with number of bytes in path */
63725 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
63726 -};
63727 +} __no_const;
63728
63729 extern void *prom_early_alloc(unsigned long size);
63730
63731 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
63732 index a4c5624..79d6d88 100644
63733 --- a/include/linux/oprofile.h
63734 +++ b/include/linux/oprofile.h
63735 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
63736 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
63737 char const * name, ulong * val);
63738
63739 -/** Create a file for read-only access to an atomic_t. */
63740 +/** Create a file for read-only access to an atomic_unchecked_t. */
63741 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
63742 - char const * name, atomic_t * val);
63743 + char const * name, atomic_unchecked_t * val);
63744
63745 /** create a directory */
63746 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
63747 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
63748 index 45db49f..8795db3 100644
63749 --- a/include/linux/perf_event.h
63750 +++ b/include/linux/perf_event.h
63751 @@ -879,8 +879,8 @@ struct perf_event {
63752
63753 enum perf_event_active_state state;
63754 unsigned int attach_state;
63755 - local64_t count;
63756 - atomic64_t child_count;
63757 + local64_t count; /* PaX: fix it one day */
63758 + atomic64_unchecked_t child_count;
63759
63760 /*
63761 * These are the total time in nanoseconds that the event
63762 @@ -925,14 +925,14 @@ struct perf_event {
63763 struct hw_perf_event hw;
63764
63765 struct perf_event_context *ctx;
63766 - struct file *filp;
63767 + atomic_long_t refcount;
63768
63769 /*
63770 * These accumulate total time (in nanoseconds) that children
63771 * events have been enabled and running, respectively.
63772 */
63773 - atomic64_t child_total_time_enabled;
63774 - atomic64_t child_total_time_running;
63775 + atomic64_unchecked_t child_total_time_enabled;
63776 + atomic64_unchecked_t child_total_time_running;
63777
63778 /*
63779 * Protect attach/detach and child_list:
63780 diff --git a/include/linux/personality.h b/include/linux/personality.h
63781 index 8fc7dd1a..c19d89e 100644
63782 --- a/include/linux/personality.h
63783 +++ b/include/linux/personality.h
63784 @@ -44,6 +44,7 @@ enum {
63785 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
63786 ADDR_NO_RANDOMIZE | \
63787 ADDR_COMPAT_LAYOUT | \
63788 + ADDR_LIMIT_3GB | \
63789 MMAP_PAGE_ZERO)
63790
63791 /*
63792 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
63793 index e1ac1ce..0675fed 100644
63794 --- a/include/linux/pipe_fs_i.h
63795 +++ b/include/linux/pipe_fs_i.h
63796 @@ -45,9 +45,9 @@ struct pipe_buffer {
63797 struct pipe_inode_info {
63798 wait_queue_head_t wait;
63799 unsigned int nrbufs, curbuf, buffers;
63800 - unsigned int readers;
63801 - unsigned int writers;
63802 - unsigned int waiting_writers;
63803 + atomic_t readers;
63804 + atomic_t writers;
63805 + atomic_t waiting_writers;
63806 unsigned int r_counter;
63807 unsigned int w_counter;
63808 struct page *tmp_page;
63809 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
63810 index f271860..6b3bec5 100644
63811 --- a/include/linux/pm_runtime.h
63812 +++ b/include/linux/pm_runtime.h
63813 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
63814
63815 static inline void pm_runtime_mark_last_busy(struct device *dev)
63816 {
63817 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
63818 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
63819 }
63820
63821 #else /* !CONFIG_PM_RUNTIME */
63822 diff --git a/include/linux/poison.h b/include/linux/poison.h
63823 index 2110a81..13a11bb 100644
63824 --- a/include/linux/poison.h
63825 +++ b/include/linux/poison.h
63826 @@ -19,8 +19,8 @@
63827 * under normal circumstances, used to verify that nobody uses
63828 * non-initialized list entries.
63829 */
63830 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
63831 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
63832 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
63833 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
63834
63835 /********** include/linux/timer.h **********/
63836 /*
63837 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
63838 index 5a710b9..0b0dab9 100644
63839 --- a/include/linux/preempt.h
63840 +++ b/include/linux/preempt.h
63841 @@ -126,7 +126,7 @@ struct preempt_ops {
63842 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
63843 void (*sched_out)(struct preempt_notifier *notifier,
63844 struct task_struct *next);
63845 -};
63846 +} __no_const;
63847
63848 /**
63849 * preempt_notifier - key for installing preemption notifiers
63850 diff --git a/include/linux/printk.h b/include/linux/printk.h
63851 index 1bec2f7..b66e833 100644
63852 --- a/include/linux/printk.h
63853 +++ b/include/linux/printk.h
63854 @@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
63855 extern int printk_needs_cpu(int cpu);
63856 extern void printk_tick(void);
63857
63858 +extern int kptr_restrict;
63859 +
63860 #ifdef CONFIG_PRINTK
63861 asmlinkage __printf(5, 0)
63862 int vprintk_emit(int facility, int level,
63863 @@ -128,7 +130,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
63864
63865 extern int printk_delay_msec;
63866 extern int dmesg_restrict;
63867 -extern int kptr_restrict;
63868
63869 void log_buf_kexec_setup(void);
63870 void __init setup_log_buf(int early);
63871 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
63872 index 3fd2e87..d93a721 100644
63873 --- a/include/linux/proc_fs.h
63874 +++ b/include/linux/proc_fs.h
63875 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
63876 return proc_create_data(name, mode, parent, proc_fops, NULL);
63877 }
63878
63879 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
63880 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
63881 +{
63882 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63883 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
63884 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63885 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
63886 +#else
63887 + return proc_create_data(name, mode, parent, proc_fops, NULL);
63888 +#endif
63889 +}
63890 +
63891 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
63892 umode_t mode, struct proc_dir_entry *base,
63893 read_proc_t *read_proc, void * data)
63894 @@ -258,7 +270,7 @@ union proc_op {
63895 int (*proc_show)(struct seq_file *m,
63896 struct pid_namespace *ns, struct pid *pid,
63897 struct task_struct *task);
63898 -};
63899 +} __no_const;
63900
63901 struct ctl_table_header;
63902 struct ctl_table;
63903 diff --git a/include/linux/random.h b/include/linux/random.h
63904 index ac621ce..c1215f3 100644
63905 --- a/include/linux/random.h
63906 +++ b/include/linux/random.h
63907 @@ -53,6 +53,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
63908 unsigned int value);
63909 extern void add_interrupt_randomness(int irq, int irq_flags);
63910
63911 +#ifdef CONFIG_PAX_LATENT_ENTROPY
63912 +extern void transfer_latent_entropy(void);
63913 +#endif
63914 +
63915 extern void get_random_bytes(void *buf, int nbytes);
63916 extern void get_random_bytes_arch(void *buf, int nbytes);
63917 void generate_random_uuid(unsigned char uuid_out[16]);
63918 @@ -69,12 +73,17 @@ void srandom32(u32 seed);
63919
63920 u32 prandom32(struct rnd_state *);
63921
63922 +static inline unsigned long pax_get_random_long(void)
63923 +{
63924 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
63925 +}
63926 +
63927 /*
63928 * Handle minimum values for seeds
63929 */
63930 static inline u32 __seed(u32 x, u32 m)
63931 {
63932 - return (x < m) ? x + m : x;
63933 + return (x <= m) ? x + m + 1 : x;
63934 }
63935
63936 /**
63937 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
63938 index e0879a7..a12f962 100644
63939 --- a/include/linux/reboot.h
63940 +++ b/include/linux/reboot.h
63941 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
63942 * Architecture-specific implementations of sys_reboot commands.
63943 */
63944
63945 -extern void machine_restart(char *cmd);
63946 -extern void machine_halt(void);
63947 -extern void machine_power_off(void);
63948 +extern void machine_restart(char *cmd) __noreturn;
63949 +extern void machine_halt(void) __noreturn;
63950 +extern void machine_power_off(void) __noreturn;
63951
63952 extern void machine_shutdown(void);
63953 struct pt_regs;
63954 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
63955 */
63956
63957 extern void kernel_restart_prepare(char *cmd);
63958 -extern void kernel_restart(char *cmd);
63959 -extern void kernel_halt(void);
63960 -extern void kernel_power_off(void);
63961 +extern void kernel_restart(char *cmd) __noreturn;
63962 +extern void kernel_halt(void) __noreturn;
63963 +extern void kernel_power_off(void) __noreturn;
63964
63965 extern int C_A_D; /* for sysctl */
63966 void ctrl_alt_del(void);
63967 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
63968 * Emergency restart, callable from an interrupt handler.
63969 */
63970
63971 -extern void emergency_restart(void);
63972 +extern void emergency_restart(void) __noreturn;
63973 #include <asm/emergency-restart.h>
63974
63975 #endif
63976 diff --git a/include/linux/relay.h b/include/linux/relay.h
63977 index 91cacc3..b55ff74 100644
63978 --- a/include/linux/relay.h
63979 +++ b/include/linux/relay.h
63980 @@ -160,7 +160,7 @@ struct rchan_callbacks
63981 * The callback should return 0 if successful, negative if not.
63982 */
63983 int (*remove_buf_file)(struct dentry *dentry);
63984 -};
63985 +} __no_const;
63986
63987 /*
63988 * CONFIG_RELAY kernel API, kernel/relay.c
63989 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
63990 index 6fdf027..ff72610 100644
63991 --- a/include/linux/rfkill.h
63992 +++ b/include/linux/rfkill.h
63993 @@ -147,6 +147,7 @@ struct rfkill_ops {
63994 void (*query)(struct rfkill *rfkill, void *data);
63995 int (*set_block)(void *data, bool blocked);
63996 };
63997 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
63998
63999 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
64000 /**
64001 diff --git a/include/linux/rio.h b/include/linux/rio.h
64002 index a90ebad..fd87b5d 100644
64003 --- a/include/linux/rio.h
64004 +++ b/include/linux/rio.h
64005 @@ -321,7 +321,7 @@ struct rio_ops {
64006 int mbox, void *buffer, size_t len);
64007 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
64008 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
64009 -};
64010 +} __no_const;
64011
64012 #define RIO_RESOURCE_MEM 0x00000100
64013 #define RIO_RESOURCE_DOORBELL 0x00000200
64014 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
64015 index 3fce545..b4fed6e 100644
64016 --- a/include/linux/rmap.h
64017 +++ b/include/linux/rmap.h
64018 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
64019 void anon_vma_init(void); /* create anon_vma_cachep */
64020 int anon_vma_prepare(struct vm_area_struct *);
64021 void unlink_anon_vmas(struct vm_area_struct *);
64022 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
64023 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
64024 void anon_vma_moveto_tail(struct vm_area_struct *);
64025 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
64026 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
64027
64028 static inline void anon_vma_merge(struct vm_area_struct *vma,
64029 struct vm_area_struct *next)
64030 diff --git a/include/linux/sched.h b/include/linux/sched.h
64031 index 4a1f493..5812aeb 100644
64032 --- a/include/linux/sched.h
64033 +++ b/include/linux/sched.h
64034 @@ -101,6 +101,7 @@ struct bio_list;
64035 struct fs_struct;
64036 struct perf_event_context;
64037 struct blk_plug;
64038 +struct linux_binprm;
64039
64040 /*
64041 * List of flags we want to share for kernel threads,
64042 @@ -384,10 +385,13 @@ struct user_namespace;
64043 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
64044
64045 extern int sysctl_max_map_count;
64046 +extern unsigned long sysctl_heap_stack_gap;
64047
64048 #include <linux/aio.h>
64049
64050 #ifdef CONFIG_MMU
64051 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
64052 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
64053 extern void arch_pick_mmap_layout(struct mm_struct *mm);
64054 extern unsigned long
64055 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
64056 @@ -406,6 +410,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
64057 extern void set_dumpable(struct mm_struct *mm, int value);
64058 extern int get_dumpable(struct mm_struct *mm);
64059
64060 +/* get/set_dumpable() values */
64061 +#define SUID_DUMPABLE_DISABLED 0
64062 +#define SUID_DUMPABLE_ENABLED 1
64063 +#define SUID_DUMPABLE_SAFE 2
64064 +
64065 /* mm flags */
64066 /* dumpable bits */
64067 #define MMF_DUMPABLE 0 /* core dump is permitted */
64068 @@ -646,6 +655,17 @@ struct signal_struct {
64069 #ifdef CONFIG_TASKSTATS
64070 struct taskstats *stats;
64071 #endif
64072 +
64073 +#ifdef CONFIG_GRKERNSEC
64074 + u32 curr_ip;
64075 + u32 saved_ip;
64076 + u32 gr_saddr;
64077 + u32 gr_daddr;
64078 + u16 gr_sport;
64079 + u16 gr_dport;
64080 + u8 used_accept:1;
64081 +#endif
64082 +
64083 #ifdef CONFIG_AUDIT
64084 unsigned audit_tty;
64085 struct tty_audit_buf *tty_audit_buf;
64086 @@ -729,6 +749,11 @@ struct user_struct {
64087 struct key *session_keyring; /* UID's default session keyring */
64088 #endif
64089
64090 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64091 + unsigned int banned;
64092 + unsigned long ban_expires;
64093 +#endif
64094 +
64095 /* Hash table maintenance information */
64096 struct hlist_node uidhash_node;
64097 kuid_t uid;
64098 @@ -1348,8 +1373,8 @@ struct task_struct {
64099 struct list_head thread_group;
64100
64101 struct completion *vfork_done; /* for vfork() */
64102 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
64103 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
64104 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
64105 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
64106
64107 cputime_t utime, stime, utimescaled, stimescaled;
64108 cputime_t gtime;
64109 @@ -1365,11 +1390,6 @@ struct task_struct {
64110 struct task_cputime cputime_expires;
64111 struct list_head cpu_timers[3];
64112
64113 -/* process credentials */
64114 - const struct cred __rcu *real_cred; /* objective and real subjective task
64115 - * credentials (COW) */
64116 - const struct cred __rcu *cred; /* effective (overridable) subjective task
64117 - * credentials (COW) */
64118 char comm[TASK_COMM_LEN]; /* executable name excluding path
64119 - access with [gs]et_task_comm (which lock
64120 it with task_lock())
64121 @@ -1386,8 +1406,16 @@ struct task_struct {
64122 #endif
64123 /* CPU-specific state of this task */
64124 struct thread_struct thread;
64125 +/* thread_info moved to task_struct */
64126 +#ifdef CONFIG_X86
64127 + struct thread_info tinfo;
64128 +#endif
64129 /* filesystem information */
64130 struct fs_struct *fs;
64131 +
64132 + const struct cred __rcu *cred; /* effective (overridable) subjective task
64133 + * credentials (COW) */
64134 +
64135 /* open file information */
64136 struct files_struct *files;
64137 /* namespaces */
64138 @@ -1431,6 +1459,11 @@ struct task_struct {
64139 struct rt_mutex_waiter *pi_blocked_on;
64140 #endif
64141
64142 +/* process credentials */
64143 + const struct cred __rcu *real_cred; /* objective and real subjective task
64144 + * credentials (COW) */
64145 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
64146 +
64147 #ifdef CONFIG_DEBUG_MUTEXES
64148 /* mutex deadlock detection */
64149 struct mutex_waiter *blocked_on;
64150 @@ -1547,6 +1580,27 @@ struct task_struct {
64151 unsigned long default_timer_slack_ns;
64152
64153 struct list_head *scm_work_list;
64154 +
64155 +#ifdef CONFIG_GRKERNSEC
64156 + /* grsecurity */
64157 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64158 + u64 exec_id;
64159 +#endif
64160 +#ifdef CONFIG_GRKERNSEC_SETXID
64161 + const struct cred *delayed_cred;
64162 +#endif
64163 + struct dentry *gr_chroot_dentry;
64164 + struct acl_subject_label *acl;
64165 + struct acl_role_label *role;
64166 + struct file *exec_file;
64167 + u16 acl_role_id;
64168 + /* is this the task that authenticated to the special role */
64169 + u8 acl_sp_role;
64170 + u8 is_writable;
64171 + u8 brute;
64172 + u8 gr_is_chrooted;
64173 +#endif
64174 +
64175 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64176 /* Index of current stored address in ret_stack */
64177 int curr_ret_stack;
64178 @@ -1585,6 +1639,51 @@ struct task_struct {
64179 #endif
64180 };
64181
64182 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
64183 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
64184 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
64185 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
64186 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
64187 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
64188 +
64189 +#ifdef CONFIG_PAX_SOFTMODE
64190 +extern int pax_softmode;
64191 +#endif
64192 +
64193 +extern int pax_check_flags(unsigned long *);
64194 +
64195 +/* if tsk != current then task_lock must be held on it */
64196 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64197 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
64198 +{
64199 + if (likely(tsk->mm))
64200 + return tsk->mm->pax_flags;
64201 + else
64202 + return 0UL;
64203 +}
64204 +
64205 +/* if tsk != current then task_lock must be held on it */
64206 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
64207 +{
64208 + if (likely(tsk->mm)) {
64209 + tsk->mm->pax_flags = flags;
64210 + return 0;
64211 + }
64212 + return -EINVAL;
64213 +}
64214 +#endif
64215 +
64216 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64217 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
64218 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
64219 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
64220 +#endif
64221 +
64222 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
64223 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
64224 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
64225 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
64226 +
64227 /* Future-safe accessor for struct task_struct's cpus_allowed. */
64228 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
64229
64230 @@ -2112,7 +2211,9 @@ void yield(void);
64231 extern struct exec_domain default_exec_domain;
64232
64233 union thread_union {
64234 +#ifndef CONFIG_X86
64235 struct thread_info thread_info;
64236 +#endif
64237 unsigned long stack[THREAD_SIZE/sizeof(long)];
64238 };
64239
64240 @@ -2145,6 +2246,7 @@ extern struct pid_namespace init_pid_ns;
64241 */
64242
64243 extern struct task_struct *find_task_by_vpid(pid_t nr);
64244 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
64245 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
64246 struct pid_namespace *ns);
64247
64248 @@ -2301,7 +2403,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
64249 extern void exit_itimers(struct signal_struct *);
64250 extern void flush_itimer_signals(void);
64251
64252 -extern void do_group_exit(int);
64253 +extern __noreturn void do_group_exit(int);
64254
64255 extern void daemonize(const char *, ...);
64256 extern int allow_signal(int);
64257 @@ -2502,9 +2604,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
64258
64259 #endif
64260
64261 -static inline int object_is_on_stack(void *obj)
64262 +static inline int object_starts_on_stack(void *obj)
64263 {
64264 - void *stack = task_stack_page(current);
64265 + const void *stack = task_stack_page(current);
64266
64267 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
64268 }
64269 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
64270 index 899fbb4..1cb4138 100644
64271 --- a/include/linux/screen_info.h
64272 +++ b/include/linux/screen_info.h
64273 @@ -43,7 +43,8 @@ struct screen_info {
64274 __u16 pages; /* 0x32 */
64275 __u16 vesa_attributes; /* 0x34 */
64276 __u32 capabilities; /* 0x36 */
64277 - __u8 _reserved[6]; /* 0x3a */
64278 + __u16 vesapm_size; /* 0x3a */
64279 + __u8 _reserved[4]; /* 0x3c */
64280 } __attribute__((packed));
64281
64282 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
64283 diff --git a/include/linux/security.h b/include/linux/security.h
64284 index 3dea6a9..81fd81f 100644
64285 --- a/include/linux/security.h
64286 +++ b/include/linux/security.h
64287 @@ -26,6 +26,7 @@
64288 #include <linux/capability.h>
64289 #include <linux/slab.h>
64290 #include <linux/err.h>
64291 +#include <linux/grsecurity.h>
64292
64293 struct linux_binprm;
64294 struct cred;
64295 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
64296 index fc61854..d7c490b 100644
64297 --- a/include/linux/seq_file.h
64298 +++ b/include/linux/seq_file.h
64299 @@ -25,6 +25,9 @@ struct seq_file {
64300 struct mutex lock;
64301 const struct seq_operations *op;
64302 int poll_event;
64303 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64304 + u64 exec_id;
64305 +#endif
64306 void *private;
64307 };
64308
64309 @@ -34,6 +37,7 @@ struct seq_operations {
64310 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
64311 int (*show) (struct seq_file *m, void *v);
64312 };
64313 +typedef struct seq_operations __no_const seq_operations_no_const;
64314
64315 #define SEQ_SKIP 1
64316
64317 diff --git a/include/linux/shm.h b/include/linux/shm.h
64318 index 92808b8..c28cac4 100644
64319 --- a/include/linux/shm.h
64320 +++ b/include/linux/shm.h
64321 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
64322
64323 /* The task created the shm object. NULL if the task is dead. */
64324 struct task_struct *shm_creator;
64325 +#ifdef CONFIG_GRKERNSEC
64326 + time_t shm_createtime;
64327 + pid_t shm_lapid;
64328 +#endif
64329 };
64330
64331 /* shm_mode upper byte flags */
64332 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
64333 index 642cb73..2efdb98 100644
64334 --- a/include/linux/skbuff.h
64335 +++ b/include/linux/skbuff.h
64336 @@ -567,7 +567,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
64337 extern struct sk_buff *__alloc_skb(unsigned int size,
64338 gfp_t priority, int fclone, int node);
64339 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
64340 -static inline struct sk_buff *alloc_skb(unsigned int size,
64341 +static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
64342 gfp_t priority)
64343 {
64344 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
64345 @@ -680,7 +680,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
64346 */
64347 static inline int skb_queue_empty(const struct sk_buff_head *list)
64348 {
64349 - return list->next == (struct sk_buff *)list;
64350 + return list->next == (const struct sk_buff *)list;
64351 }
64352
64353 /**
64354 @@ -693,7 +693,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
64355 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64356 const struct sk_buff *skb)
64357 {
64358 - return skb->next == (struct sk_buff *)list;
64359 + return skb->next == (const struct sk_buff *)list;
64360 }
64361
64362 /**
64363 @@ -706,7 +706,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64364 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
64365 const struct sk_buff *skb)
64366 {
64367 - return skb->prev == (struct sk_buff *)list;
64368 + return skb->prev == (const struct sk_buff *)list;
64369 }
64370
64371 /**
64372 @@ -1605,7 +1605,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
64373 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
64374 */
64375 #ifndef NET_SKB_PAD
64376 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
64377 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
64378 #endif
64379
64380 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
64381 @@ -2112,7 +2112,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
64382 int noblock, int *err);
64383 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
64384 struct poll_table_struct *wait);
64385 -extern int skb_copy_datagram_iovec(const struct sk_buff *from,
64386 +extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
64387 int offset, struct iovec *to,
64388 int size);
64389 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
64390 diff --git a/include/linux/slab.h b/include/linux/slab.h
64391 index 67d5d94..bbd740b 100644
64392 --- a/include/linux/slab.h
64393 +++ b/include/linux/slab.h
64394 @@ -11,12 +11,20 @@
64395
64396 #include <linux/gfp.h>
64397 #include <linux/types.h>
64398 +#include <linux/err.h>
64399
64400 /*
64401 * Flags to pass to kmem_cache_create().
64402 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
64403 */
64404 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
64405 +
64406 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64407 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
64408 +#else
64409 +#define SLAB_USERCOPY 0x00000000UL
64410 +#endif
64411 +
64412 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
64413 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
64414 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
64415 @@ -87,10 +95,13 @@
64416 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
64417 * Both make kfree a no-op.
64418 */
64419 -#define ZERO_SIZE_PTR ((void *)16)
64420 +#define ZERO_SIZE_PTR \
64421 +({ \
64422 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
64423 + (void *)(-MAX_ERRNO-1L); \
64424 +})
64425
64426 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
64427 - (unsigned long)ZERO_SIZE_PTR)
64428 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
64429
64430 /*
64431 * struct kmem_cache related prototypes
64432 @@ -161,6 +172,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
64433 void kfree(const void *);
64434 void kzfree(const void *);
64435 size_t ksize(const void *);
64436 +const char *check_heap_object(const void *ptr, unsigned long n, bool to);
64437 +bool is_usercopy_object(const void *ptr);
64438
64439 /*
64440 * Allocator specific definitions. These are mainly used to establish optimized
64441 @@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64442 */
64443 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64444 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64445 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64446 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
64447 #define kmalloc_track_caller(size, flags) \
64448 __kmalloc_track_caller(size, flags, _RET_IP_)
64449 #else
64450 @@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64451 */
64452 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64453 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64454 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
64455 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
64456 #define kmalloc_node_track_caller(size, flags, node) \
64457 __kmalloc_node_track_caller(size, flags, node, \
64458 _RET_IP_)
64459 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
64460 index fbd1117..0a3d314 100644
64461 --- a/include/linux/slab_def.h
64462 +++ b/include/linux/slab_def.h
64463 @@ -66,10 +66,10 @@ struct kmem_cache {
64464 unsigned long node_allocs;
64465 unsigned long node_frees;
64466 unsigned long node_overflow;
64467 - atomic_t allochit;
64468 - atomic_t allocmiss;
64469 - atomic_t freehit;
64470 - atomic_t freemiss;
64471 + atomic_unchecked_t allochit;
64472 + atomic_unchecked_t allocmiss;
64473 + atomic_unchecked_t freehit;
64474 + atomic_unchecked_t freemiss;
64475
64476 /*
64477 * If debugging is enabled, then the allocator can add additional
64478 @@ -103,11 +103,16 @@ struct cache_sizes {
64479 #ifdef CONFIG_ZONE_DMA
64480 struct kmem_cache *cs_dmacachep;
64481 #endif
64482 +
64483 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64484 + struct kmem_cache *cs_usercopycachep;
64485 +#endif
64486 +
64487 };
64488 extern struct cache_sizes malloc_sizes[];
64489
64490 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64491 -void *__kmalloc(size_t size, gfp_t flags);
64492 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64493
64494 #ifdef CONFIG_TRACING
64495 extern void *kmem_cache_alloc_trace(size_t size,
64496 @@ -150,6 +155,13 @@ found:
64497 cachep = malloc_sizes[i].cs_dmacachep;
64498 else
64499 #endif
64500 +
64501 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64502 + if (flags & GFP_USERCOPY)
64503 + cachep = malloc_sizes[i].cs_usercopycachep;
64504 + else
64505 +#endif
64506 +
64507 cachep = malloc_sizes[i].cs_cachep;
64508
64509 ret = kmem_cache_alloc_trace(size, cachep, flags);
64510 @@ -160,7 +172,7 @@ found:
64511 }
64512
64513 #ifdef CONFIG_NUMA
64514 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
64515 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64516 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64517
64518 #ifdef CONFIG_TRACING
64519 @@ -203,6 +215,13 @@ found:
64520 cachep = malloc_sizes[i].cs_dmacachep;
64521 else
64522 #endif
64523 +
64524 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64525 + if (flags & GFP_USERCOPY)
64526 + cachep = malloc_sizes[i].cs_usercopycachep;
64527 + else
64528 +#endif
64529 +
64530 cachep = malloc_sizes[i].cs_cachep;
64531
64532 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
64533 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
64534 index 0ec00b3..22b4715 100644
64535 --- a/include/linux/slob_def.h
64536 +++ b/include/linux/slob_def.h
64537 @@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
64538 return kmem_cache_alloc_node(cachep, flags, -1);
64539 }
64540
64541 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
64542 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64543
64544 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64545 {
64546 @@ -29,7 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64547 return __kmalloc_node(size, flags, -1);
64548 }
64549
64550 -static __always_inline void *__kmalloc(size_t size, gfp_t flags)
64551 +static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
64552 {
64553 return kmalloc(size, flags);
64554 }
64555 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
64556 index c2f8c8b..d992a41 100644
64557 --- a/include/linux/slub_def.h
64558 +++ b/include/linux/slub_def.h
64559 @@ -92,7 +92,7 @@ struct kmem_cache {
64560 struct kmem_cache_order_objects max;
64561 struct kmem_cache_order_objects min;
64562 gfp_t allocflags; /* gfp flags to use on each alloc */
64563 - int refcount; /* Refcount for slab cache destroy */
64564 + atomic_t refcount; /* Refcount for slab cache destroy */
64565 void (*ctor)(void *);
64566 int inuse; /* Offset to metadata */
64567 int align; /* Alignment */
64568 @@ -153,7 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
64569 * Sorry that the following has to be that ugly but some versions of GCC
64570 * have trouble with constant propagation and loops.
64571 */
64572 -static __always_inline int kmalloc_index(size_t size)
64573 +static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
64574 {
64575 if (!size)
64576 return 0;
64577 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64578 }
64579
64580 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64581 -void *__kmalloc(size_t size, gfp_t flags);
64582 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
64583
64584 static __always_inline void *
64585 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
64586 @@ -259,7 +259,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
64587 }
64588 #endif
64589
64590 -static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
64591 +static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
64592 {
64593 unsigned int order = get_order(size);
64594 return kmalloc_order_trace(size, flags, order);
64595 @@ -284,7 +284,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64596 }
64597
64598 #ifdef CONFIG_NUMA
64599 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
64600 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64601 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64602
64603 #ifdef CONFIG_TRACING
64604 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
64605 index de8832d..0147b46 100644
64606 --- a/include/linux/sonet.h
64607 +++ b/include/linux/sonet.h
64608 @@ -61,7 +61,7 @@ struct sonet_stats {
64609 #include <linux/atomic.h>
64610
64611 struct k_sonet_stats {
64612 -#define __HANDLE_ITEM(i) atomic_t i
64613 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
64614 __SONET_ITEMS
64615 #undef __HANDLE_ITEM
64616 };
64617 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
64618 index 523547e..2cb7140 100644
64619 --- a/include/linux/sunrpc/clnt.h
64620 +++ b/include/linux/sunrpc/clnt.h
64621 @@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
64622 {
64623 switch (sap->sa_family) {
64624 case AF_INET:
64625 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
64626 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
64627 case AF_INET6:
64628 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
64629 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
64630 }
64631 return 0;
64632 }
64633 @@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
64634 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
64635 const struct sockaddr *src)
64636 {
64637 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
64638 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
64639 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
64640
64641 dsin->sin_family = ssin->sin_family;
64642 @@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
64643 if (sa->sa_family != AF_INET6)
64644 return 0;
64645
64646 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
64647 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
64648 }
64649
64650 #endif /* __KERNEL__ */
64651 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
64652 index dc0c3cc..8503fb6 100644
64653 --- a/include/linux/sunrpc/sched.h
64654 +++ b/include/linux/sunrpc/sched.h
64655 @@ -106,6 +106,7 @@ struct rpc_call_ops {
64656 void (*rpc_count_stats)(struct rpc_task *, void *);
64657 void (*rpc_release)(void *);
64658 };
64659 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
64660
64661 struct rpc_task_setup {
64662 struct rpc_task *task;
64663 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
64664 index 0b8e3e6..33e0a01 100644
64665 --- a/include/linux/sunrpc/svc_rdma.h
64666 +++ b/include/linux/sunrpc/svc_rdma.h
64667 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
64668 extern unsigned int svcrdma_max_requests;
64669 extern unsigned int svcrdma_max_req_size;
64670
64671 -extern atomic_t rdma_stat_recv;
64672 -extern atomic_t rdma_stat_read;
64673 -extern atomic_t rdma_stat_write;
64674 -extern atomic_t rdma_stat_sq_starve;
64675 -extern atomic_t rdma_stat_rq_starve;
64676 -extern atomic_t rdma_stat_rq_poll;
64677 -extern atomic_t rdma_stat_rq_prod;
64678 -extern atomic_t rdma_stat_sq_poll;
64679 -extern atomic_t rdma_stat_sq_prod;
64680 +extern atomic_unchecked_t rdma_stat_recv;
64681 +extern atomic_unchecked_t rdma_stat_read;
64682 +extern atomic_unchecked_t rdma_stat_write;
64683 +extern atomic_unchecked_t rdma_stat_sq_starve;
64684 +extern atomic_unchecked_t rdma_stat_rq_starve;
64685 +extern atomic_unchecked_t rdma_stat_rq_poll;
64686 +extern atomic_unchecked_t rdma_stat_rq_prod;
64687 +extern atomic_unchecked_t rdma_stat_sq_poll;
64688 +extern atomic_unchecked_t rdma_stat_sq_prod;
64689
64690 #define RPCRDMA_VERSION 1
64691
64692 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
64693 index c34b4c8..a65b67d 100644
64694 --- a/include/linux/sysctl.h
64695 +++ b/include/linux/sysctl.h
64696 @@ -155,7 +155,11 @@ enum
64697 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
64698 };
64699
64700 -
64701 +#ifdef CONFIG_PAX_SOFTMODE
64702 +enum {
64703 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
64704 +};
64705 +#endif
64706
64707 /* CTL_VM names: */
64708 enum
64709 @@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
64710
64711 extern int proc_dostring(struct ctl_table *, int,
64712 void __user *, size_t *, loff_t *);
64713 +extern int proc_dostring_modpriv(struct ctl_table *, int,
64714 + void __user *, size_t *, loff_t *);
64715 extern int proc_dointvec(struct ctl_table *, int,
64716 void __user *, size_t *, loff_t *);
64717 extern int proc_dointvec_minmax(struct ctl_table *, int,
64718 diff --git a/include/linux/tty.h b/include/linux/tty.h
64719 index 9f47ab5..73da944 100644
64720 --- a/include/linux/tty.h
64721 +++ b/include/linux/tty.h
64722 @@ -225,7 +225,7 @@ struct tty_port {
64723 const struct tty_port_operations *ops; /* Port operations */
64724 spinlock_t lock; /* Lock protecting tty field */
64725 int blocked_open; /* Waiting to open */
64726 - int count; /* Usage count */
64727 + atomic_t count; /* Usage count */
64728 wait_queue_head_t open_wait; /* Open waiters */
64729 wait_queue_head_t close_wait; /* Close waiters */
64730 wait_queue_head_t delta_msr_wait; /* Modem status change */
64731 @@ -525,7 +525,7 @@ extern int tty_port_open(struct tty_port *port,
64732 struct tty_struct *tty, struct file *filp);
64733 static inline int tty_port_users(struct tty_port *port)
64734 {
64735 - return port->count + port->blocked_open;
64736 + return atomic_read(&port->count) + port->blocked_open;
64737 }
64738
64739 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
64740 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
64741 index fb79dd8d..07d4773 100644
64742 --- a/include/linux/tty_ldisc.h
64743 +++ b/include/linux/tty_ldisc.h
64744 @@ -149,7 +149,7 @@ struct tty_ldisc_ops {
64745
64746 struct module *owner;
64747
64748 - int refcount;
64749 + atomic_t refcount;
64750 };
64751
64752 struct tty_ldisc {
64753 diff --git a/include/linux/types.h b/include/linux/types.h
64754 index 9c1bd53..c2370f6 100644
64755 --- a/include/linux/types.h
64756 +++ b/include/linux/types.h
64757 @@ -220,10 +220,26 @@ typedef struct {
64758 int counter;
64759 } atomic_t;
64760
64761 +#ifdef CONFIG_PAX_REFCOUNT
64762 +typedef struct {
64763 + int counter;
64764 +} atomic_unchecked_t;
64765 +#else
64766 +typedef atomic_t atomic_unchecked_t;
64767 +#endif
64768 +
64769 #ifdef CONFIG_64BIT
64770 typedef struct {
64771 long counter;
64772 } atomic64_t;
64773 +
64774 +#ifdef CONFIG_PAX_REFCOUNT
64775 +typedef struct {
64776 + long counter;
64777 +} atomic64_unchecked_t;
64778 +#else
64779 +typedef atomic64_t atomic64_unchecked_t;
64780 +#endif
64781 #endif
64782
64783 struct list_head {
64784 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
64785 index 5ca0951..ab496a5 100644
64786 --- a/include/linux/uaccess.h
64787 +++ b/include/linux/uaccess.h
64788 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
64789 long ret; \
64790 mm_segment_t old_fs = get_fs(); \
64791 \
64792 - set_fs(KERNEL_DS); \
64793 pagefault_disable(); \
64794 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
64795 - pagefault_enable(); \
64796 + set_fs(KERNEL_DS); \
64797 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
64798 set_fs(old_fs); \
64799 + pagefault_enable(); \
64800 ret; \
64801 })
64802
64803 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
64804 index 99c1b4d..bb94261 100644
64805 --- a/include/linux/unaligned/access_ok.h
64806 +++ b/include/linux/unaligned/access_ok.h
64807 @@ -6,32 +6,32 @@
64808
64809 static inline u16 get_unaligned_le16(const void *p)
64810 {
64811 - return le16_to_cpup((__le16 *)p);
64812 + return le16_to_cpup((const __le16 *)p);
64813 }
64814
64815 static inline u32 get_unaligned_le32(const void *p)
64816 {
64817 - return le32_to_cpup((__le32 *)p);
64818 + return le32_to_cpup((const __le32 *)p);
64819 }
64820
64821 static inline u64 get_unaligned_le64(const void *p)
64822 {
64823 - return le64_to_cpup((__le64 *)p);
64824 + return le64_to_cpup((const __le64 *)p);
64825 }
64826
64827 static inline u16 get_unaligned_be16(const void *p)
64828 {
64829 - return be16_to_cpup((__be16 *)p);
64830 + return be16_to_cpup((const __be16 *)p);
64831 }
64832
64833 static inline u32 get_unaligned_be32(const void *p)
64834 {
64835 - return be32_to_cpup((__be32 *)p);
64836 + return be32_to_cpup((const __be32 *)p);
64837 }
64838
64839 static inline u64 get_unaligned_be64(const void *p)
64840 {
64841 - return be64_to_cpup((__be64 *)p);
64842 + return be64_to_cpup((const __be64 *)p);
64843 }
64844
64845 static inline void put_unaligned_le16(u16 val, void *p)
64846 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
64847 index 547e59c..db6ad19 100644
64848 --- a/include/linux/usb/renesas_usbhs.h
64849 +++ b/include/linux/usb/renesas_usbhs.h
64850 @@ -39,7 +39,7 @@ enum {
64851 */
64852 struct renesas_usbhs_driver_callback {
64853 int (*notify_hotplug)(struct platform_device *pdev);
64854 -};
64855 +} __no_const;
64856
64857 /*
64858 * callback functions for platform
64859 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
64860 * VBUS control is needed for Host
64861 */
64862 int (*set_vbus)(struct platform_device *pdev, int enable);
64863 -};
64864 +} __no_const;
64865
64866 /*
64867 * parameters for renesas usbhs
64868 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
64869 index 6f8fbcf..8259001 100644
64870 --- a/include/linux/vermagic.h
64871 +++ b/include/linux/vermagic.h
64872 @@ -25,9 +25,35 @@
64873 #define MODULE_ARCH_VERMAGIC ""
64874 #endif
64875
64876 +#ifdef CONFIG_PAX_REFCOUNT
64877 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
64878 +#else
64879 +#define MODULE_PAX_REFCOUNT ""
64880 +#endif
64881 +
64882 +#ifdef CONSTIFY_PLUGIN
64883 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64884 +#else
64885 +#define MODULE_CONSTIFY_PLUGIN ""
64886 +#endif
64887 +
64888 +#ifdef STACKLEAK_PLUGIN
64889 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
64890 +#else
64891 +#define MODULE_STACKLEAK_PLUGIN ""
64892 +#endif
64893 +
64894 +#ifdef CONFIG_GRKERNSEC
64895 +#define MODULE_GRSEC "GRSEC "
64896 +#else
64897 +#define MODULE_GRSEC ""
64898 +#endif
64899 +
64900 #define VERMAGIC_STRING \
64901 UTS_RELEASE " " \
64902 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64903 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64904 - MODULE_ARCH_VERMAGIC
64905 + MODULE_ARCH_VERMAGIC \
64906 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
64907 + MODULE_GRSEC
64908
64909 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
64910 index dcdfc2b..ec79ab5 100644
64911 --- a/include/linux/vmalloc.h
64912 +++ b/include/linux/vmalloc.h
64913 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
64914 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64915 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
64916 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
64917 +
64918 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64919 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
64920 +#endif
64921 +
64922 /* bits [20..32] reserved for arch specific ioremap internals */
64923
64924 /*
64925 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
64926 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
64927 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
64928 unsigned long start, unsigned long end, gfp_t gfp_mask,
64929 - pgprot_t prot, int node, void *caller);
64930 + pgprot_t prot, int node, void *caller) __size_overflow(1);
64931 extern void vfree(const void *addr);
64932
64933 extern void *vmap(struct page **pages, unsigned int count,
64934 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
64935 extern void free_vm_area(struct vm_struct *area);
64936
64937 /* for /dev/kmem */
64938 -extern long vread(char *buf, char *addr, unsigned long count);
64939 -extern long vwrite(char *buf, char *addr, unsigned long count);
64940 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
64941 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
64942
64943 /*
64944 * Internals. Dont't use..
64945 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
64946 index 65efb92..137adbb 100644
64947 --- a/include/linux/vmstat.h
64948 +++ b/include/linux/vmstat.h
64949 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
64950 /*
64951 * Zone based page accounting with per cpu differentials.
64952 */
64953 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64954 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64955
64956 static inline void zone_page_state_add(long x, struct zone *zone,
64957 enum zone_stat_item item)
64958 {
64959 - atomic_long_add(x, &zone->vm_stat[item]);
64960 - atomic_long_add(x, &vm_stat[item]);
64961 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
64962 + atomic_long_add_unchecked(x, &vm_stat[item]);
64963 }
64964
64965 static inline unsigned long global_page_state(enum zone_stat_item item)
64966 {
64967 - long x = atomic_long_read(&vm_stat[item]);
64968 + long x = atomic_long_read_unchecked(&vm_stat[item]);
64969 #ifdef CONFIG_SMP
64970 if (x < 0)
64971 x = 0;
64972 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
64973 static inline unsigned long zone_page_state(struct zone *zone,
64974 enum zone_stat_item item)
64975 {
64976 - long x = atomic_long_read(&zone->vm_stat[item]);
64977 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64978 #ifdef CONFIG_SMP
64979 if (x < 0)
64980 x = 0;
64981 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
64982 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
64983 enum zone_stat_item item)
64984 {
64985 - long x = atomic_long_read(&zone->vm_stat[item]);
64986 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64987
64988 #ifdef CONFIG_SMP
64989 int cpu;
64990 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
64991
64992 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
64993 {
64994 - atomic_long_inc(&zone->vm_stat[item]);
64995 - atomic_long_inc(&vm_stat[item]);
64996 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
64997 + atomic_long_inc_unchecked(&vm_stat[item]);
64998 }
64999
65000 static inline void __inc_zone_page_state(struct page *page,
65001 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
65002
65003 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
65004 {
65005 - atomic_long_dec(&zone->vm_stat[item]);
65006 - atomic_long_dec(&vm_stat[item]);
65007 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
65008 + atomic_long_dec_unchecked(&vm_stat[item]);
65009 }
65010
65011 static inline void __dec_zone_page_state(struct page *page,
65012 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
65013 index e5d1220..ef6e406 100644
65014 --- a/include/linux/xattr.h
65015 +++ b/include/linux/xattr.h
65016 @@ -57,6 +57,11 @@
65017 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
65018 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
65019
65020 +/* User namespace */
65021 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
65022 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
65023 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
65024 +
65025 #ifdef __KERNEL__
65026
65027 #include <linux/types.h>
65028 diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
65029 index 22e61fd..28e493b 100644
65030 --- a/include/linux/xfrm.h
65031 +++ b/include/linux/xfrm.h
65032 @@ -84,6 +84,8 @@ struct xfrm_replay_state {
65033 __u32 bitmap;
65034 };
65035
65036 +#define XFRMA_REPLAY_ESN_MAX 4096
65037 +
65038 struct xfrm_replay_state_esn {
65039 unsigned int bmp_len;
65040 __u32 oseq;
65041 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
65042 index 944ecdf..a3994fc 100644
65043 --- a/include/media/saa7146_vv.h
65044 +++ b/include/media/saa7146_vv.h
65045 @@ -161,8 +161,8 @@ struct saa7146_ext_vv
65046 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
65047
65048 /* the extension can override this */
65049 - struct v4l2_ioctl_ops vid_ops;
65050 - struct v4l2_ioctl_ops vbi_ops;
65051 + v4l2_ioctl_ops_no_const vid_ops;
65052 + v4l2_ioctl_ops_no_const vbi_ops;
65053 /* pointer to the saa7146 core ops */
65054 const struct v4l2_ioctl_ops *core_ops;
65055
65056 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
65057 index a056e6e..31023a5 100644
65058 --- a/include/media/v4l2-dev.h
65059 +++ b/include/media/v4l2-dev.h
65060 @@ -73,7 +73,8 @@ struct v4l2_file_operations {
65061 int (*mmap) (struct file *, struct vm_area_struct *);
65062 int (*open) (struct file *);
65063 int (*release) (struct file *);
65064 -};
65065 +} __do_const;
65066 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
65067
65068 /*
65069 * Newer version of video_device, handled by videodev2.c
65070 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
65071 index d8b76f7..7d5aa18 100644
65072 --- a/include/media/v4l2-ioctl.h
65073 +++ b/include/media/v4l2-ioctl.h
65074 @@ -287,7 +287,7 @@ struct v4l2_ioctl_ops {
65075 long (*vidioc_default) (struct file *file, void *fh,
65076 bool valid_prio, int cmd, void *arg);
65077 };
65078 -
65079 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
65080
65081 /* v4l debugging and diagnostics */
65082
65083 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
65084 index 439dadc..1c67e3f 100644
65085 --- a/include/net/caif/caif_hsi.h
65086 +++ b/include/net/caif/caif_hsi.h
65087 @@ -98,7 +98,7 @@ struct cfhsi_drv {
65088 void (*rx_done_cb) (struct cfhsi_drv *drv);
65089 void (*wake_up_cb) (struct cfhsi_drv *drv);
65090 void (*wake_down_cb) (struct cfhsi_drv *drv);
65091 -};
65092 +} __no_const;
65093
65094 /* Structure implemented by HSI device. */
65095 struct cfhsi_dev {
65096 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
65097 index 9e5425b..8136ffc 100644
65098 --- a/include/net/caif/cfctrl.h
65099 +++ b/include/net/caif/cfctrl.h
65100 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
65101 void (*radioset_rsp)(void);
65102 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
65103 struct cflayer *client_layer);
65104 -};
65105 +} __no_const;
65106
65107 /* Link Setup Parameters for CAIF-Links. */
65108 struct cfctrl_link_param {
65109 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
65110 struct cfctrl {
65111 struct cfsrvl serv;
65112 struct cfctrl_rsp res;
65113 - atomic_t req_seq_no;
65114 - atomic_t rsp_seq_no;
65115 + atomic_unchecked_t req_seq_no;
65116 + atomic_unchecked_t rsp_seq_no;
65117 struct list_head list;
65118 /* Protects from simultaneous access to first_req list */
65119 spinlock_t info_list_lock;
65120 diff --git a/include/net/flow.h b/include/net/flow.h
65121 index 6c469db..7743b8e 100644
65122 --- a/include/net/flow.h
65123 +++ b/include/net/flow.h
65124 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
65125
65126 extern void flow_cache_flush(void);
65127 extern void flow_cache_flush_deferred(void);
65128 -extern atomic_t flow_cache_genid;
65129 +extern atomic_unchecked_t flow_cache_genid;
65130
65131 #endif
65132 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
65133 index 2040bff..f4c0733 100644
65134 --- a/include/net/inetpeer.h
65135 +++ b/include/net/inetpeer.h
65136 @@ -51,8 +51,8 @@ struct inet_peer {
65137 */
65138 union {
65139 struct {
65140 - atomic_t rid; /* Frag reception counter */
65141 - atomic_t ip_id_count; /* IP ID for the next packet */
65142 + atomic_unchecked_t rid; /* Frag reception counter */
65143 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
65144 __u32 tcp_ts;
65145 __u32 tcp_ts_stamp;
65146 };
65147 @@ -118,11 +118,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
65148 more++;
65149 inet_peer_refcheck(p);
65150 do {
65151 - old = atomic_read(&p->ip_id_count);
65152 + old = atomic_read_unchecked(&p->ip_id_count);
65153 new = old + more;
65154 if (!new)
65155 new = 1;
65156 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
65157 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
65158 return new;
65159 }
65160
65161 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
65162 index 78df0866..00e5c9b 100644
65163 --- a/include/net/ip_fib.h
65164 +++ b/include/net/ip_fib.h
65165 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
65166
65167 #define FIB_RES_SADDR(net, res) \
65168 ((FIB_RES_NH(res).nh_saddr_genid == \
65169 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
65170 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
65171 FIB_RES_NH(res).nh_saddr : \
65172 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
65173 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
65174 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
65175 index 95374d1..2300e36 100644
65176 --- a/include/net/ip_vs.h
65177 +++ b/include/net/ip_vs.h
65178 @@ -510,7 +510,7 @@ struct ip_vs_conn {
65179 struct ip_vs_conn *control; /* Master control connection */
65180 atomic_t n_control; /* Number of controlled ones */
65181 struct ip_vs_dest *dest; /* real server */
65182 - atomic_t in_pkts; /* incoming packet counter */
65183 + atomic_unchecked_t in_pkts; /* incoming packet counter */
65184
65185 /* packet transmitter for different forwarding methods. If it
65186 mangles the packet, it must return NF_DROP or better NF_STOLEN,
65187 @@ -648,7 +648,7 @@ struct ip_vs_dest {
65188 __be16 port; /* port number of the server */
65189 union nf_inet_addr addr; /* IP address of the server */
65190 volatile unsigned int flags; /* dest status flags */
65191 - atomic_t conn_flags; /* flags to copy to conn */
65192 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
65193 atomic_t weight; /* server weight */
65194
65195 atomic_t refcnt; /* reference counter */
65196 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
65197 index 69b610a..fe3962c 100644
65198 --- a/include/net/irda/ircomm_core.h
65199 +++ b/include/net/irda/ircomm_core.h
65200 @@ -51,7 +51,7 @@ typedef struct {
65201 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
65202 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
65203 struct ircomm_info *);
65204 -} call_t;
65205 +} __no_const call_t;
65206
65207 struct ircomm_cb {
65208 irda_queue_t queue;
65209 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
65210 index 59ba38bc..d515662 100644
65211 --- a/include/net/irda/ircomm_tty.h
65212 +++ b/include/net/irda/ircomm_tty.h
65213 @@ -35,6 +35,7 @@
65214 #include <linux/termios.h>
65215 #include <linux/timer.h>
65216 #include <linux/tty.h> /* struct tty_struct */
65217 +#include <asm/local.h>
65218
65219 #include <net/irda/irias_object.h>
65220 #include <net/irda/ircomm_core.h>
65221 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
65222 unsigned short close_delay;
65223 unsigned short closing_wait; /* time to wait before closing */
65224
65225 - int open_count;
65226 - int blocked_open; /* # of blocked opens */
65227 + local_t open_count;
65228 + local_t blocked_open; /* # of blocked opens */
65229
65230 /* Protect concurent access to :
65231 * o self->open_count
65232 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
65233 index cc7c197..9f2da2a 100644
65234 --- a/include/net/iucv/af_iucv.h
65235 +++ b/include/net/iucv/af_iucv.h
65236 @@ -141,7 +141,7 @@ struct iucv_sock {
65237 struct iucv_sock_list {
65238 struct hlist_head head;
65239 rwlock_t lock;
65240 - atomic_t autobind_name;
65241 + atomic_unchecked_t autobind_name;
65242 };
65243
65244 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
65245 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
65246 index 6cdfeed..55a0256 100644
65247 --- a/include/net/neighbour.h
65248 +++ b/include/net/neighbour.h
65249 @@ -123,7 +123,7 @@ struct neigh_ops {
65250 void (*error_report)(struct neighbour *, struct sk_buff *);
65251 int (*output)(struct neighbour *, struct sk_buff *);
65252 int (*connected_output)(struct neighbour *, struct sk_buff *);
65253 -};
65254 +} __do_const;
65255
65256 struct pneigh_entry {
65257 struct pneigh_entry *next;
65258 diff --git a/include/net/netdma.h b/include/net/netdma.h
65259 index 8ba8ce2..99b7fff 100644
65260 --- a/include/net/netdma.h
65261 +++ b/include/net/netdma.h
65262 @@ -24,7 +24,7 @@
65263 #include <linux/dmaengine.h>
65264 #include <linux/skbuff.h>
65265
65266 -int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
65267 +int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
65268 struct sk_buff *skb, int offset, struct iovec *to,
65269 size_t len, struct dma_pinned_list *pinned_list);
65270
65271 diff --git a/include/net/netlink.h b/include/net/netlink.h
65272 index 785f37a..c81dc0c 100644
65273 --- a/include/net/netlink.h
65274 +++ b/include/net/netlink.h
65275 @@ -520,7 +520,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
65276 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
65277 {
65278 if (mark)
65279 - skb_trim(skb, (unsigned char *) mark - skb->data);
65280 + skb_trim(skb, (const unsigned char *) mark - skb->data);
65281 }
65282
65283 /**
65284 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
65285 index bbd023a..97c6d0d 100644
65286 --- a/include/net/netns/ipv4.h
65287 +++ b/include/net/netns/ipv4.h
65288 @@ -57,8 +57,8 @@ struct netns_ipv4 {
65289 unsigned int sysctl_ping_group_range[2];
65290 long sysctl_tcp_mem[3];
65291
65292 - atomic_t rt_genid;
65293 - atomic_t dev_addr_genid;
65294 + atomic_unchecked_t rt_genid;
65295 + atomic_unchecked_t dev_addr_genid;
65296
65297 #ifdef CONFIG_IP_MROUTE
65298 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
65299 diff --git a/include/net/scm.h b/include/net/scm.h
65300 index d456f4c..0c0017c 100644
65301 --- a/include/net/scm.h
65302 +++ b/include/net/scm.h
65303 @@ -71,9 +71,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
65304 }
65305
65306 static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
65307 - struct scm_cookie *scm)
65308 + struct scm_cookie *scm, bool forcecreds)
65309 {
65310 memset(scm, 0, sizeof(*scm));
65311 + if (forcecreds)
65312 + scm_set_cred(scm, task_tgid(current), current_cred());
65313 unix_get_peersec_dgram(sock, scm);
65314 if (msg->msg_controllen <= 0)
65315 return 0;
65316 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
65317 index a2ef814..31a8e3f 100644
65318 --- a/include/net/sctp/sctp.h
65319 +++ b/include/net/sctp/sctp.h
65320 @@ -318,9 +318,9 @@ do { \
65321
65322 #else /* SCTP_DEBUG */
65323
65324 -#define SCTP_DEBUG_PRINTK(whatever...)
65325 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
65326 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
65327 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
65328 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
65329 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
65330 #define SCTP_ENABLE_DEBUG
65331 #define SCTP_DISABLE_DEBUG
65332 #define SCTP_ASSERT(expr, str, func)
65333 diff --git a/include/net/sock.h b/include/net/sock.h
65334 index 4a45216..2ae7cd8 100644
65335 --- a/include/net/sock.h
65336 +++ b/include/net/sock.h
65337 @@ -303,7 +303,7 @@ struct sock {
65338 #ifdef CONFIG_RPS
65339 __u32 sk_rxhash;
65340 #endif
65341 - atomic_t sk_drops;
65342 + atomic_unchecked_t sk_drops;
65343 int sk_rcvbuf;
65344
65345 struct sk_filter __rcu *sk_filter;
65346 @@ -1726,7 +1726,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
65347 }
65348
65349 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
65350 - char __user *from, char *to,
65351 + char __user *from, unsigned char *to,
65352 int copy, int offset)
65353 {
65354 if (skb->ip_summed == CHECKSUM_NONE) {
65355 @@ -1985,7 +1985,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
65356 }
65357 }
65358
65359 -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
65360 +struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
65361
65362 static inline struct page *sk_stream_alloc_page(struct sock *sk)
65363 {
65364 diff --git a/include/net/tcp.h b/include/net/tcp.h
65365 index e79aa48..05e52de 100644
65366 --- a/include/net/tcp.h
65367 +++ b/include/net/tcp.h
65368 @@ -476,7 +476,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
65369 extern void tcp_xmit_retransmit_queue(struct sock *);
65370 extern void tcp_simple_retransmit(struct sock *);
65371 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
65372 -extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
65373 +extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
65374
65375 extern void tcp_send_probe0(struct sock *);
65376 extern void tcp_send_partial(struct sock *);
65377 @@ -643,8 +643,8 @@ struct tcp_skb_cb {
65378 struct inet6_skb_parm h6;
65379 #endif
65380 } header; /* For incoming frames */
65381 - __u32 seq; /* Starting sequence number */
65382 - __u32 end_seq; /* SEQ + FIN + SYN + datalen */
65383 + __u32 seq __intentional_overflow(0); /* Starting sequence number */
65384 + __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
65385 __u32 when; /* used to compute rtt's */
65386 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
65387
65388 @@ -658,7 +658,7 @@ struct tcp_skb_cb {
65389
65390 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
65391 /* 1 byte hole */
65392 - __u32 ack_seq; /* Sequence number ACK'd */
65393 + __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
65394 };
65395
65396 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
65397 @@ -1459,7 +1459,7 @@ struct tcp_seq_afinfo {
65398 char *name;
65399 sa_family_t family;
65400 const struct file_operations *seq_fops;
65401 - struct seq_operations seq_ops;
65402 + seq_operations_no_const seq_ops;
65403 };
65404
65405 struct tcp_iter_state {
65406 diff --git a/include/net/udp.h b/include/net/udp.h
65407 index 065f379..b661b40 100644
65408 --- a/include/net/udp.h
65409 +++ b/include/net/udp.h
65410 @@ -244,7 +244,7 @@ struct udp_seq_afinfo {
65411 sa_family_t family;
65412 struct udp_table *udp_table;
65413 const struct file_operations *seq_fops;
65414 - struct seq_operations seq_ops;
65415 + seq_operations_no_const seq_ops;
65416 };
65417
65418 struct udp_iter_state {
65419 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
65420 index e0a55df..5890bca07 100644
65421 --- a/include/net/xfrm.h
65422 +++ b/include/net/xfrm.h
65423 @@ -505,7 +505,7 @@ struct xfrm_policy {
65424 struct timer_list timer;
65425
65426 struct flow_cache_object flo;
65427 - atomic_t genid;
65428 + atomic_unchecked_t genid;
65429 u32 priority;
65430 u32 index;
65431 struct xfrm_mark mark;
65432 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
65433 index 1a046b1..ee0bef0 100644
65434 --- a/include/rdma/iw_cm.h
65435 +++ b/include/rdma/iw_cm.h
65436 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
65437 int backlog);
65438
65439 int (*destroy_listen)(struct iw_cm_id *cm_id);
65440 -};
65441 +} __no_const;
65442
65443 /**
65444 * iw_create_cm_id - Create an IW CM identifier.
65445 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
65446 index 8f9dfba..610ab6c 100644
65447 --- a/include/scsi/libfc.h
65448 +++ b/include/scsi/libfc.h
65449 @@ -756,6 +756,7 @@ struct libfc_function_template {
65450 */
65451 void (*disc_stop_final) (struct fc_lport *);
65452 };
65453 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
65454
65455 /**
65456 * struct fc_disc - Discovery context
65457 @@ -861,7 +862,7 @@ struct fc_lport {
65458 struct fc_vport *vport;
65459
65460 /* Operational Information */
65461 - struct libfc_function_template tt;
65462 + libfc_function_template_no_const tt;
65463 u8 link_up;
65464 u8 qfull;
65465 enum fc_lport_state state;
65466 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
65467 index ba96988..ecf2eb9 100644
65468 --- a/include/scsi/scsi_device.h
65469 +++ b/include/scsi/scsi_device.h
65470 @@ -163,9 +163,9 @@ struct scsi_device {
65471 unsigned int max_device_blocked; /* what device_blocked counts down from */
65472 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
65473
65474 - atomic_t iorequest_cnt;
65475 - atomic_t iodone_cnt;
65476 - atomic_t ioerr_cnt;
65477 + atomic_unchecked_t iorequest_cnt;
65478 + atomic_unchecked_t iodone_cnt;
65479 + atomic_unchecked_t ioerr_cnt;
65480
65481 struct device sdev_gendev,
65482 sdev_dev;
65483 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
65484 index 719faf1..07b6728 100644
65485 --- a/include/scsi/scsi_transport_fc.h
65486 +++ b/include/scsi/scsi_transport_fc.h
65487 @@ -739,7 +739,8 @@ struct fc_function_template {
65488 unsigned long show_host_system_hostname:1;
65489
65490 unsigned long disable_target_scan:1;
65491 -};
65492 +} __do_const;
65493 +typedef struct fc_function_template __no_const fc_function_template_no_const;
65494
65495
65496 /**
65497 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
65498 index 030b87c..98a6954 100644
65499 --- a/include/sound/ak4xxx-adda.h
65500 +++ b/include/sound/ak4xxx-adda.h
65501 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
65502 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
65503 unsigned char val);
65504 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
65505 -};
65506 +} __no_const;
65507
65508 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
65509
65510 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
65511 index 8c05e47..2b5df97 100644
65512 --- a/include/sound/hwdep.h
65513 +++ b/include/sound/hwdep.h
65514 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
65515 struct snd_hwdep_dsp_status *status);
65516 int (*dsp_load)(struct snd_hwdep *hw,
65517 struct snd_hwdep_dsp_image *image);
65518 -};
65519 +} __no_const;
65520
65521 struct snd_hwdep {
65522 struct snd_card *card;
65523 diff --git a/include/sound/info.h b/include/sound/info.h
65524 index 9ca1a49..aba1728 100644
65525 --- a/include/sound/info.h
65526 +++ b/include/sound/info.h
65527 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
65528 struct snd_info_buffer *buffer);
65529 void (*write)(struct snd_info_entry *entry,
65530 struct snd_info_buffer *buffer);
65531 -};
65532 +} __no_const;
65533
65534 struct snd_info_entry_ops {
65535 int (*open)(struct snd_info_entry *entry,
65536 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
65537 index 0d11128..814178e 100644
65538 --- a/include/sound/pcm.h
65539 +++ b/include/sound/pcm.h
65540 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
65541 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
65542 int (*ack)(struct snd_pcm_substream *substream);
65543 };
65544 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
65545
65546 /*
65547 *
65548 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
65549 index af1b49e..a5d55a5 100644
65550 --- a/include/sound/sb16_csp.h
65551 +++ b/include/sound/sb16_csp.h
65552 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
65553 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
65554 int (*csp_stop) (struct snd_sb_csp * p);
65555 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
65556 -};
65557 +} __no_const;
65558
65559 /*
65560 * CSP private data
65561 diff --git a/include/sound/soc.h b/include/sound/soc.h
65562 index c703871..f7fbbbd 100644
65563 --- a/include/sound/soc.h
65564 +++ b/include/sound/soc.h
65565 @@ -757,7 +757,7 @@ struct snd_soc_platform_driver {
65566 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
65567 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
65568 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
65569 -};
65570 +} __do_const;
65571
65572 struct snd_soc_platform {
65573 const char *name;
65574 @@ -949,7 +949,7 @@ struct snd_soc_pcm_runtime {
65575 struct snd_soc_dai_link *dai_link;
65576 struct mutex pcm_mutex;
65577 enum snd_soc_pcm_subclass pcm_subclass;
65578 - struct snd_pcm_ops ops;
65579 + snd_pcm_ops_no_const ops;
65580
65581 unsigned int dev_registered:1;
65582
65583 diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h
65584 index 0c3c2fb..d9d9990 100644
65585 --- a/include/sound/tea575x-tuner.h
65586 +++ b/include/sound/tea575x-tuner.h
65587 @@ -44,7 +44,7 @@ struct snd_tea575x_ops {
65588
65589 struct snd_tea575x {
65590 struct v4l2_device *v4l2_dev;
65591 - struct v4l2_file_operations fops;
65592 + v4l2_file_operations_no_const fops;
65593 struct video_device vd; /* video device */
65594 int radio_nr; /* radio_nr */
65595 bool tea5759; /* 5759 chip is present */
65596 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
65597 index 4119966..1a4671c 100644
65598 --- a/include/sound/ymfpci.h
65599 +++ b/include/sound/ymfpci.h
65600 @@ -358,7 +358,7 @@ struct snd_ymfpci {
65601 spinlock_t reg_lock;
65602 spinlock_t voice_lock;
65603 wait_queue_head_t interrupt_sleep;
65604 - atomic_t interrupt_sleep_count;
65605 + atomic_unchecked_t interrupt_sleep_count;
65606 struct snd_info_entry *proc_entry;
65607 const struct firmware *dsp_microcode;
65608 const struct firmware *controller_microcode;
65609 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
65610 index 362e0d9..36b9a83 100644
65611 --- a/include/target/target_core_base.h
65612 +++ b/include/target/target_core_base.h
65613 @@ -441,7 +441,7 @@ struct t10_reservation_ops {
65614 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
65615 int (*t10_pr_register)(struct se_cmd *);
65616 int (*t10_pr_clear)(struct se_cmd *);
65617 -};
65618 +} __no_const;
65619
65620 struct t10_reservation {
65621 /* Reservation effects all target ports */
65622 @@ -780,7 +780,7 @@ struct se_device {
65623 spinlock_t stats_lock;
65624 /* Active commands on this virtual SE device */
65625 atomic_t simple_cmds;
65626 - atomic_t dev_ordered_id;
65627 + atomic_unchecked_t dev_ordered_id;
65628 atomic_t execute_tasks;
65629 atomic_t dev_ordered_sync;
65630 atomic_t dev_qf_count;
65631 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
65632 new file mode 100644
65633 index 0000000..2efe49d
65634 --- /dev/null
65635 +++ b/include/trace/events/fs.h
65636 @@ -0,0 +1,53 @@
65637 +#undef TRACE_SYSTEM
65638 +#define TRACE_SYSTEM fs
65639 +
65640 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
65641 +#define _TRACE_FS_H
65642 +
65643 +#include <linux/fs.h>
65644 +#include <linux/tracepoint.h>
65645 +
65646 +TRACE_EVENT(do_sys_open,
65647 +
65648 + TP_PROTO(char *filename, int flags, int mode),
65649 +
65650 + TP_ARGS(filename, flags, mode),
65651 +
65652 + TP_STRUCT__entry(
65653 + __string( filename, filename )
65654 + __field( int, flags )
65655 + __field( int, mode )
65656 + ),
65657 +
65658 + TP_fast_assign(
65659 + __assign_str(filename, filename);
65660 + __entry->flags = flags;
65661 + __entry->mode = mode;
65662 + ),
65663 +
65664 + TP_printk("\"%s\" %x %o",
65665 + __get_str(filename), __entry->flags, __entry->mode)
65666 +);
65667 +
65668 +TRACE_EVENT(open_exec,
65669 +
65670 + TP_PROTO(const char *filename),
65671 +
65672 + TP_ARGS(filename),
65673 +
65674 + TP_STRUCT__entry(
65675 + __string( filename, filename )
65676 + ),
65677 +
65678 + TP_fast_assign(
65679 + __assign_str(filename, filename);
65680 + ),
65681 +
65682 + TP_printk("\"%s\"",
65683 + __get_str(filename))
65684 +);
65685 +
65686 +#endif /* _TRACE_FS_H */
65687 +
65688 +/* This part must be outside protection */
65689 +#include <trace/define_trace.h>
65690 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
65691 index 1c09820..7f5ec79 100644
65692 --- a/include/trace/events/irq.h
65693 +++ b/include/trace/events/irq.h
65694 @@ -36,7 +36,7 @@ struct softirq_action;
65695 */
65696 TRACE_EVENT(irq_handler_entry,
65697
65698 - TP_PROTO(int irq, struct irqaction *action),
65699 + TP_PROTO(int irq, const struct irqaction *action),
65700
65701 TP_ARGS(irq, action),
65702
65703 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
65704 */
65705 TRACE_EVENT(irq_handler_exit,
65706
65707 - TP_PROTO(int irq, struct irqaction *action, int ret),
65708 + TP_PROTO(int irq, const struct irqaction *action, int ret),
65709
65710 TP_ARGS(irq, action, ret),
65711
65712 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
65713 index f9466fa..f4e2b81 100644
65714 --- a/include/video/udlfb.h
65715 +++ b/include/video/udlfb.h
65716 @@ -53,10 +53,10 @@ struct dlfb_data {
65717 u32 pseudo_palette[256];
65718 int blank_mode; /*one of FB_BLANK_ */
65719 /* blit-only rendering path metrics, exposed through sysfs */
65720 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65721 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
65722 - atomic_t bytes_sent; /* to usb, after compression including overhead */
65723 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
65724 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65725 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
65726 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
65727 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
65728 };
65729
65730 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
65731 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
65732 index 0993a22..32ba2fe 100644
65733 --- a/include/video/uvesafb.h
65734 +++ b/include/video/uvesafb.h
65735 @@ -177,6 +177,7 @@ struct uvesafb_par {
65736 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
65737 u8 pmi_setpal; /* PMI for palette changes */
65738 u16 *pmi_base; /* protected mode interface location */
65739 + u8 *pmi_code; /* protected mode code location */
65740 void *pmi_start;
65741 void *pmi_pal;
65742 u8 *vbe_state_orig; /*
65743 diff --git a/init/Kconfig b/init/Kconfig
65744 index d07dcf9..fa47d0e 100644
65745 --- a/init/Kconfig
65746 +++ b/init/Kconfig
65747 @@ -835,6 +835,7 @@ endif # CGROUPS
65748
65749 config CHECKPOINT_RESTORE
65750 bool "Checkpoint/restore support" if EXPERT
65751 + depends on !GRKERNSEC
65752 default n
65753 help
65754 Enables additional kernel features in a sake of checkpoint/restore.
65755 @@ -1014,6 +1015,7 @@ config UIDGID_CONVERTED
65756 # Security modules
65757 depends on SECURITY_TOMOYO = n
65758 depends on SECURITY_APPARMOR = n
65759 + depends on GRKERNSEC = n
65760
65761 config UIDGID_STRICT_TYPE_CHECKS
65762 bool "Require conversions between uid/gids and their internal representation"
65763 @@ -1401,7 +1403,7 @@ config SLUB_DEBUG
65764
65765 config COMPAT_BRK
65766 bool "Disable heap randomization"
65767 - default y
65768 + default n
65769 help
65770 Randomizing heap placement makes heap exploits harder, but it
65771 also breaks ancient binaries (including anything libc5 based).
65772 @@ -1584,7 +1586,7 @@ config INIT_ALL_POSSIBLE
65773 config STOP_MACHINE
65774 bool
65775 default y
65776 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
65777 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
65778 help
65779 Need stop_machine() primitive.
65780
65781 diff --git a/init/Makefile b/init/Makefile
65782 index 7bc47ee..6da2dc7 100644
65783 --- a/init/Makefile
65784 +++ b/init/Makefile
65785 @@ -2,6 +2,9 @@
65786 # Makefile for the linux kernel.
65787 #
65788
65789 +ccflags-y := $(GCC_PLUGINS_CFLAGS)
65790 +asflags-y := $(GCC_PLUGINS_AFLAGS)
65791 +
65792 obj-y := main.o version.o mounts.o
65793 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
65794 obj-y += noinitramfs.o
65795 diff --git a/init/do_mounts.c b/init/do_mounts.c
65796 index d3f0aee..c9322f5 100644
65797 --- a/init/do_mounts.c
65798 +++ b/init/do_mounts.c
65799 @@ -336,11 +336,11 @@ static void __init get_fs_names(char *page)
65800 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
65801 {
65802 struct super_block *s;
65803 - int err = sys_mount(name, "/root", fs, flags, data);
65804 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
65805 if (err)
65806 return err;
65807
65808 - sys_chdir("/root");
65809 + sys_chdir((const char __force_user *)"/root");
65810 s = current->fs->pwd.dentry->d_sb;
65811 ROOT_DEV = s->s_dev;
65812 printk(KERN_INFO
65813 @@ -460,18 +460,18 @@ void __init change_floppy(char *fmt, ...)
65814 va_start(args, fmt);
65815 vsprintf(buf, fmt, args);
65816 va_end(args);
65817 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
65818 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
65819 if (fd >= 0) {
65820 sys_ioctl(fd, FDEJECT, 0);
65821 sys_close(fd);
65822 }
65823 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
65824 - fd = sys_open("/dev/console", O_RDWR, 0);
65825 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
65826 if (fd >= 0) {
65827 sys_ioctl(fd, TCGETS, (long)&termios);
65828 termios.c_lflag &= ~ICANON;
65829 sys_ioctl(fd, TCSETSF, (long)&termios);
65830 - sys_read(fd, &c, 1);
65831 + sys_read(fd, (char __user *)&c, 1);
65832 termios.c_lflag |= ICANON;
65833 sys_ioctl(fd, TCSETSF, (long)&termios);
65834 sys_close(fd);
65835 @@ -565,6 +565,6 @@ void __init prepare_namespace(void)
65836 mount_root();
65837 out:
65838 devtmpfs_mount("dev");
65839 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
65840 - sys_chroot(".");
65841 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65842 + sys_chroot((const char __force_user *)".");
65843 }
65844 diff --git a/init/do_mounts.h b/init/do_mounts.h
65845 index f5b978a..69dbfe8 100644
65846 --- a/init/do_mounts.h
65847 +++ b/init/do_mounts.h
65848 @@ -15,15 +15,15 @@ extern int root_mountflags;
65849
65850 static inline int create_dev(char *name, dev_t dev)
65851 {
65852 - sys_unlink(name);
65853 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
65854 + sys_unlink((char __force_user *)name);
65855 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
65856 }
65857
65858 #if BITS_PER_LONG == 32
65859 static inline u32 bstat(char *name)
65860 {
65861 struct stat64 stat;
65862 - if (sys_stat64(name, &stat) != 0)
65863 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
65864 return 0;
65865 if (!S_ISBLK(stat.st_mode))
65866 return 0;
65867 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
65868 static inline u32 bstat(char *name)
65869 {
65870 struct stat stat;
65871 - if (sys_newstat(name, &stat) != 0)
65872 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
65873 return 0;
65874 if (!S_ISBLK(stat.st_mode))
65875 return 0;
65876 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
65877 index 135959a2..28a3f43 100644
65878 --- a/init/do_mounts_initrd.c
65879 +++ b/init/do_mounts_initrd.c
65880 @@ -53,13 +53,13 @@ static void __init handle_initrd(void)
65881 create_dev("/dev/root.old", Root_RAM0);
65882 /* mount initrd on rootfs' /root */
65883 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
65884 - sys_mkdir("/old", 0700);
65885 - root_fd = sys_open("/", 0, 0);
65886 - old_fd = sys_open("/old", 0, 0);
65887 + sys_mkdir((const char __force_user *)"/old", 0700);
65888 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
65889 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
65890 /* move initrd over / and chdir/chroot in initrd root */
65891 - sys_chdir("/root");
65892 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
65893 - sys_chroot(".");
65894 + sys_chdir((const char __force_user *)"/root");
65895 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65896 + sys_chroot((const char __force_user *)".");
65897
65898 /*
65899 * In case that a resume from disk is carried out by linuxrc or one of
65900 @@ -76,15 +76,15 @@ static void __init handle_initrd(void)
65901
65902 /* move initrd to rootfs' /old */
65903 sys_fchdir(old_fd);
65904 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
65905 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
65906 /* switch root and cwd back to / of rootfs */
65907 sys_fchdir(root_fd);
65908 - sys_chroot(".");
65909 + sys_chroot((const char __force_user *)".");
65910 sys_close(old_fd);
65911 sys_close(root_fd);
65912
65913 if (new_decode_dev(real_root_dev) == Root_RAM0) {
65914 - sys_chdir("/old");
65915 + sys_chdir((const char __force_user *)"/old");
65916 return;
65917 }
65918
65919 @@ -92,17 +92,17 @@ static void __init handle_initrd(void)
65920 mount_root();
65921
65922 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
65923 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
65924 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
65925 if (!error)
65926 printk("okay\n");
65927 else {
65928 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
65929 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
65930 if (error == -ENOENT)
65931 printk("/initrd does not exist. Ignored.\n");
65932 else
65933 printk("failed\n");
65934 printk(KERN_NOTICE "Unmounting old root\n");
65935 - sys_umount("/old", MNT_DETACH);
65936 + sys_umount((char __force_user *)"/old", MNT_DETACH);
65937 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
65938 if (fd < 0) {
65939 error = fd;
65940 @@ -125,11 +125,11 @@ int __init initrd_load(void)
65941 * mounted in the normal path.
65942 */
65943 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
65944 - sys_unlink("/initrd.image");
65945 + sys_unlink((const char __force_user *)"/initrd.image");
65946 handle_initrd();
65947 return 1;
65948 }
65949 }
65950 - sys_unlink("/initrd.image");
65951 + sys_unlink((const char __force_user *)"/initrd.image");
65952 return 0;
65953 }
65954 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
65955 index 8cb6db5..d729f50 100644
65956 --- a/init/do_mounts_md.c
65957 +++ b/init/do_mounts_md.c
65958 @@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
65959 partitioned ? "_d" : "", minor,
65960 md_setup_args[ent].device_names);
65961
65962 - fd = sys_open(name, 0, 0);
65963 + fd = sys_open((char __force_user *)name, 0, 0);
65964 if (fd < 0) {
65965 printk(KERN_ERR "md: open failed - cannot start "
65966 "array %s\n", name);
65967 @@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
65968 * array without it
65969 */
65970 sys_close(fd);
65971 - fd = sys_open(name, 0, 0);
65972 + fd = sys_open((char __force_user *)name, 0, 0);
65973 sys_ioctl(fd, BLKRRPART, 0);
65974 }
65975 sys_close(fd);
65976 @@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
65977
65978 wait_for_device_probe();
65979
65980 - fd = sys_open("/dev/md0", 0, 0);
65981 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
65982 if (fd >= 0) {
65983 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
65984 sys_close(fd);
65985 diff --git a/init/init_task.c b/init/init_task.c
65986 index 8b2f399..f0797c9 100644
65987 --- a/init/init_task.c
65988 +++ b/init/init_task.c
65989 @@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
65990 * Initial thread structure. Alignment of this is handled by a special
65991 * linker map entry.
65992 */
65993 +#ifdef CONFIG_X86
65994 +union thread_union init_thread_union __init_task_data;
65995 +#else
65996 union thread_union init_thread_union __init_task_data =
65997 { INIT_THREAD_INFO(init_task) };
65998 +#endif
65999 diff --git a/init/initramfs.c b/init/initramfs.c
66000 index 84c6bf1..8899338 100644
66001 --- a/init/initramfs.c
66002 +++ b/init/initramfs.c
66003 @@ -84,7 +84,7 @@ static void __init free_hash(void)
66004 }
66005 }
66006
66007 -static long __init do_utime(char *filename, time_t mtime)
66008 +static long __init do_utime(char __force_user *filename, time_t mtime)
66009 {
66010 struct timespec t[2];
66011
66012 @@ -119,7 +119,7 @@ static void __init dir_utime(void)
66013 struct dir_entry *de, *tmp;
66014 list_for_each_entry_safe(de, tmp, &dir_list, list) {
66015 list_del(&de->list);
66016 - do_utime(de->name, de->mtime);
66017 + do_utime((char __force_user *)de->name, de->mtime);
66018 kfree(de->name);
66019 kfree(de);
66020 }
66021 @@ -281,7 +281,7 @@ static int __init maybe_link(void)
66022 if (nlink >= 2) {
66023 char *old = find_link(major, minor, ino, mode, collected);
66024 if (old)
66025 - return (sys_link(old, collected) < 0) ? -1 : 1;
66026 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
66027 }
66028 return 0;
66029 }
66030 @@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
66031 {
66032 struct stat st;
66033
66034 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
66035 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
66036 if (S_ISDIR(st.st_mode))
66037 - sys_rmdir(path);
66038 + sys_rmdir((char __force_user *)path);
66039 else
66040 - sys_unlink(path);
66041 + sys_unlink((char __force_user *)path);
66042 }
66043 }
66044
66045 @@ -315,7 +315,7 @@ static int __init do_name(void)
66046 int openflags = O_WRONLY|O_CREAT;
66047 if (ml != 1)
66048 openflags |= O_TRUNC;
66049 - wfd = sys_open(collected, openflags, mode);
66050 + wfd = sys_open((char __force_user *)collected, openflags, mode);
66051
66052 if (wfd >= 0) {
66053 sys_fchown(wfd, uid, gid);
66054 @@ -327,17 +327,17 @@ static int __init do_name(void)
66055 }
66056 }
66057 } else if (S_ISDIR(mode)) {
66058 - sys_mkdir(collected, mode);
66059 - sys_chown(collected, uid, gid);
66060 - sys_chmod(collected, mode);
66061 + sys_mkdir((char __force_user *)collected, mode);
66062 + sys_chown((char __force_user *)collected, uid, gid);
66063 + sys_chmod((char __force_user *)collected, mode);
66064 dir_add(collected, mtime);
66065 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
66066 S_ISFIFO(mode) || S_ISSOCK(mode)) {
66067 if (maybe_link() == 0) {
66068 - sys_mknod(collected, mode, rdev);
66069 - sys_chown(collected, uid, gid);
66070 - sys_chmod(collected, mode);
66071 - do_utime(collected, mtime);
66072 + sys_mknod((char __force_user *)collected, mode, rdev);
66073 + sys_chown((char __force_user *)collected, uid, gid);
66074 + sys_chmod((char __force_user *)collected, mode);
66075 + do_utime((char __force_user *)collected, mtime);
66076 }
66077 }
66078 return 0;
66079 @@ -346,15 +346,15 @@ static int __init do_name(void)
66080 static int __init do_copy(void)
66081 {
66082 if (count >= body_len) {
66083 - sys_write(wfd, victim, body_len);
66084 + sys_write(wfd, (char __force_user *)victim, body_len);
66085 sys_close(wfd);
66086 - do_utime(vcollected, mtime);
66087 + do_utime((char __force_user *)vcollected, mtime);
66088 kfree(vcollected);
66089 eat(body_len);
66090 state = SkipIt;
66091 return 0;
66092 } else {
66093 - sys_write(wfd, victim, count);
66094 + sys_write(wfd, (char __force_user *)victim, count);
66095 body_len -= count;
66096 eat(count);
66097 return 1;
66098 @@ -365,9 +365,9 @@ static int __init do_symlink(void)
66099 {
66100 collected[N_ALIGN(name_len) + body_len] = '\0';
66101 clean_path(collected, 0);
66102 - sys_symlink(collected + N_ALIGN(name_len), collected);
66103 - sys_lchown(collected, uid, gid);
66104 - do_utime(collected, mtime);
66105 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
66106 + sys_lchown((char __force_user *)collected, uid, gid);
66107 + do_utime((char __force_user *)collected, mtime);
66108 state = SkipIt;
66109 next_state = Reset;
66110 return 0;
66111 diff --git a/init/main.c b/init/main.c
66112 index b5cc0a7..8e67244 100644
66113 --- a/init/main.c
66114 +++ b/init/main.c
66115 @@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
66116 extern void tc_init(void);
66117 #endif
66118
66119 +extern void grsecurity_init(void);
66120 +
66121 /*
66122 * Debug helper: via this flag we know that we are in 'early bootup code'
66123 * where only the boot processor is running with IRQ disabled. This means
66124 @@ -148,6 +150,51 @@ static int __init set_reset_devices(char *str)
66125
66126 __setup("reset_devices", set_reset_devices);
66127
66128 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
66129 +extern char pax_enter_kernel_user[];
66130 +extern char pax_exit_kernel_user[];
66131 +extern pgdval_t clone_pgd_mask;
66132 +#endif
66133 +
66134 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
66135 +static int __init setup_pax_nouderef(char *str)
66136 +{
66137 +#ifdef CONFIG_X86_32
66138 + unsigned int cpu;
66139 + struct desc_struct *gdt;
66140 +
66141 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
66142 + gdt = get_cpu_gdt_table(cpu);
66143 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
66144 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
66145 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
66146 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
66147 + }
66148 + loadsegment(ds, __KERNEL_DS);
66149 + loadsegment(es, __KERNEL_DS);
66150 + loadsegment(ss, __KERNEL_DS);
66151 +#else
66152 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
66153 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
66154 + clone_pgd_mask = ~(pgdval_t)0UL;
66155 +#endif
66156 +
66157 + return 0;
66158 +}
66159 +early_param("pax_nouderef", setup_pax_nouderef);
66160 +#endif
66161 +
66162 +#ifdef CONFIG_PAX_SOFTMODE
66163 +int pax_softmode;
66164 +
66165 +static int __init setup_pax_softmode(char *str)
66166 +{
66167 + get_option(&str, &pax_softmode);
66168 + return 1;
66169 +}
66170 +__setup("pax_softmode=", setup_pax_softmode);
66171 +#endif
66172 +
66173 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
66174 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
66175 static const char *panic_later, *panic_param;
66176 @@ -674,6 +721,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
66177 {
66178 int count = preempt_count();
66179 int ret;
66180 + const char *msg1 = "", *msg2 = "";
66181
66182 if (initcall_debug)
66183 ret = do_one_initcall_debug(fn);
66184 @@ -686,15 +734,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
66185 sprintf(msgbuf, "error code %d ", ret);
66186
66187 if (preempt_count() != count) {
66188 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
66189 + msg1 = " preemption imbalance";
66190 preempt_count() = count;
66191 }
66192 if (irqs_disabled()) {
66193 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
66194 + msg2 = " disabled interrupts";
66195 local_irq_enable();
66196 }
66197 - if (msgbuf[0]) {
66198 - printk("initcall %pF returned with %s\n", fn, msgbuf);
66199 + if (msgbuf[0] || *msg1 || *msg2) {
66200 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
66201 }
66202
66203 return ret;
66204 @@ -747,8 +795,14 @@ static void __init do_initcall_level(int level)
66205 level, level,
66206 &repair_env_string);
66207
66208 - for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
66209 + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
66210 do_one_initcall(*fn);
66211 +
66212 +#ifdef CONFIG_PAX_LATENT_ENTROPY
66213 + transfer_latent_entropy();
66214 +#endif
66215 +
66216 + }
66217 }
66218
66219 static void __init do_initcalls(void)
66220 @@ -782,8 +836,14 @@ static void __init do_pre_smp_initcalls(void)
66221 {
66222 initcall_t *fn;
66223
66224 - for (fn = __initcall_start; fn < __initcall0_start; fn++)
66225 + for (fn = __initcall_start; fn < __initcall0_start; fn++) {
66226 do_one_initcall(*fn);
66227 +
66228 +#ifdef CONFIG_PAX_LATENT_ENTROPY
66229 + transfer_latent_entropy();
66230 +#endif
66231 +
66232 + }
66233 }
66234
66235 static void run_init_process(const char *init_filename)
66236 @@ -865,7 +925,7 @@ static int __init kernel_init(void * unused)
66237 do_basic_setup();
66238
66239 /* Open the /dev/console on the rootfs, this should never fail */
66240 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
66241 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
66242 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
66243
66244 (void) sys_dup(0);
66245 @@ -878,11 +938,13 @@ static int __init kernel_init(void * unused)
66246 if (!ramdisk_execute_command)
66247 ramdisk_execute_command = "/init";
66248
66249 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
66250 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
66251 ramdisk_execute_command = NULL;
66252 prepare_namespace();
66253 }
66254
66255 + grsecurity_init();
66256 +
66257 /*
66258 * Ok, we have completed the initial bootup, and
66259 * we're essentially up and running. Get rid of the
66260 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
66261 index 8ce5769..4666884 100644
66262 --- a/ipc/mqueue.c
66263 +++ b/ipc/mqueue.c
66264 @@ -279,6 +279,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
66265 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
66266 info->attr.mq_msgsize);
66267
66268 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
66269 spin_lock(&mq_lock);
66270 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
66271 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
66272 diff --git a/ipc/msg.c b/ipc/msg.c
66273 index 7385de2..a8180e08 100644
66274 --- a/ipc/msg.c
66275 +++ b/ipc/msg.c
66276 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
66277 return security_msg_queue_associate(msq, msgflg);
66278 }
66279
66280 +static struct ipc_ops msg_ops = {
66281 + .getnew = newque,
66282 + .associate = msg_security,
66283 + .more_checks = NULL
66284 +};
66285 +
66286 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
66287 {
66288 struct ipc_namespace *ns;
66289 - struct ipc_ops msg_ops;
66290 struct ipc_params msg_params;
66291
66292 ns = current->nsproxy->ipc_ns;
66293
66294 - msg_ops.getnew = newque;
66295 - msg_ops.associate = msg_security;
66296 - msg_ops.more_checks = NULL;
66297 -
66298 msg_params.key = key;
66299 msg_params.flg = msgflg;
66300
66301 diff --git a/ipc/sem.c b/ipc/sem.c
66302 index 5215a81..cfc0cac 100644
66303 --- a/ipc/sem.c
66304 +++ b/ipc/sem.c
66305 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
66306 return 0;
66307 }
66308
66309 +static struct ipc_ops sem_ops = {
66310 + .getnew = newary,
66311 + .associate = sem_security,
66312 + .more_checks = sem_more_checks
66313 +};
66314 +
66315 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66316 {
66317 struct ipc_namespace *ns;
66318 - struct ipc_ops sem_ops;
66319 struct ipc_params sem_params;
66320
66321 ns = current->nsproxy->ipc_ns;
66322 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66323 if (nsems < 0 || nsems > ns->sc_semmsl)
66324 return -EINVAL;
66325
66326 - sem_ops.getnew = newary;
66327 - sem_ops.associate = sem_security;
66328 - sem_ops.more_checks = sem_more_checks;
66329 -
66330 sem_params.key = key;
66331 sem_params.flg = semflg;
66332 sem_params.u.nsems = nsems;
66333 diff --git a/ipc/shm.c b/ipc/shm.c
66334 index 41c1285..cf6404c 100644
66335 --- a/ipc/shm.c
66336 +++ b/ipc/shm.c
66337 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
66338 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
66339 #endif
66340
66341 +#ifdef CONFIG_GRKERNSEC
66342 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66343 + const time_t shm_createtime, const uid_t cuid,
66344 + const int shmid);
66345 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66346 + const time_t shm_createtime);
66347 +#endif
66348 +
66349 void shm_init_ns(struct ipc_namespace *ns)
66350 {
66351 ns->shm_ctlmax = SHMMAX;
66352 @@ -520,6 +528,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
66353 shp->shm_lprid = 0;
66354 shp->shm_atim = shp->shm_dtim = 0;
66355 shp->shm_ctim = get_seconds();
66356 +#ifdef CONFIG_GRKERNSEC
66357 + {
66358 + struct timespec timeval;
66359 + do_posix_clock_monotonic_gettime(&timeval);
66360 +
66361 + shp->shm_createtime = timeval.tv_sec;
66362 + }
66363 +#endif
66364 shp->shm_segsz = size;
66365 shp->shm_nattch = 0;
66366 shp->shm_file = file;
66367 @@ -571,18 +587,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
66368 return 0;
66369 }
66370
66371 +static struct ipc_ops shm_ops = {
66372 + .getnew = newseg,
66373 + .associate = shm_security,
66374 + .more_checks = shm_more_checks
66375 +};
66376 +
66377 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
66378 {
66379 struct ipc_namespace *ns;
66380 - struct ipc_ops shm_ops;
66381 struct ipc_params shm_params;
66382
66383 ns = current->nsproxy->ipc_ns;
66384
66385 - shm_ops.getnew = newseg;
66386 - shm_ops.associate = shm_security;
66387 - shm_ops.more_checks = shm_more_checks;
66388 -
66389 shm_params.key = key;
66390 shm_params.flg = shmflg;
66391 shm_params.u.size = size;
66392 @@ -1000,6 +1017,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66393 f_mode = FMODE_READ | FMODE_WRITE;
66394 }
66395 if (shmflg & SHM_EXEC) {
66396 +
66397 +#ifdef CONFIG_PAX_MPROTECT
66398 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
66399 + goto out;
66400 +#endif
66401 +
66402 prot |= PROT_EXEC;
66403 acc_mode |= S_IXUGO;
66404 }
66405 @@ -1023,9 +1046,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66406 if (err)
66407 goto out_unlock;
66408
66409 +#ifdef CONFIG_GRKERNSEC
66410 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
66411 + shp->shm_perm.cuid, shmid) ||
66412 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
66413 + err = -EACCES;
66414 + goto out_unlock;
66415 + }
66416 +#endif
66417 +
66418 path = shp->shm_file->f_path;
66419 path_get(&path);
66420 shp->shm_nattch++;
66421 +#ifdef CONFIG_GRKERNSEC
66422 + shp->shm_lapid = current->pid;
66423 +#endif
66424 size = i_size_read(path.dentry->d_inode);
66425 shm_unlock(shp);
66426
66427 diff --git a/kernel/acct.c b/kernel/acct.c
66428 index 02e6167..54824f7 100644
66429 --- a/kernel/acct.c
66430 +++ b/kernel/acct.c
66431 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
66432 */
66433 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
66434 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
66435 - file->f_op->write(file, (char *)&ac,
66436 + file->f_op->write(file, (char __force_user *)&ac,
66437 sizeof(acct_t), &file->f_pos);
66438 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
66439 set_fs(fs);
66440 diff --git a/kernel/audit.c b/kernel/audit.c
66441 index 1c7f2c6..9ba5359 100644
66442 --- a/kernel/audit.c
66443 +++ b/kernel/audit.c
66444 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
66445 3) suppressed due to audit_rate_limit
66446 4) suppressed due to audit_backlog_limit
66447 */
66448 -static atomic_t audit_lost = ATOMIC_INIT(0);
66449 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
66450
66451 /* The netlink socket. */
66452 static struct sock *audit_sock;
66453 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
66454 unsigned long now;
66455 int print;
66456
66457 - atomic_inc(&audit_lost);
66458 + atomic_inc_unchecked(&audit_lost);
66459
66460 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
66461
66462 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
66463 printk(KERN_WARNING
66464 "audit: audit_lost=%d audit_rate_limit=%d "
66465 "audit_backlog_limit=%d\n",
66466 - atomic_read(&audit_lost),
66467 + atomic_read_unchecked(&audit_lost),
66468 audit_rate_limit,
66469 audit_backlog_limit);
66470 audit_panic(message);
66471 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
66472 status_set.pid = audit_pid;
66473 status_set.rate_limit = audit_rate_limit;
66474 status_set.backlog_limit = audit_backlog_limit;
66475 - status_set.lost = atomic_read(&audit_lost);
66476 + status_set.lost = atomic_read_unchecked(&audit_lost);
66477 status_set.backlog = skb_queue_len(&audit_skb_queue);
66478 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
66479 &status_set, sizeof(status_set));
66480 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
66481 index 4b96415..d8c16ee 100644
66482 --- a/kernel/auditsc.c
66483 +++ b/kernel/auditsc.c
66484 @@ -2289,7 +2289,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
66485 }
66486
66487 /* global counter which is incremented every time something logs in */
66488 -static atomic_t session_id = ATOMIC_INIT(0);
66489 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
66490
66491 /**
66492 * audit_set_loginuid - set current task's audit_context loginuid
66493 @@ -2313,7 +2313,7 @@ int audit_set_loginuid(uid_t loginuid)
66494 return -EPERM;
66495 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
66496
66497 - sessionid = atomic_inc_return(&session_id);
66498 + sessionid = atomic_inc_return_unchecked(&session_id);
66499 if (context && context->in_syscall) {
66500 struct audit_buffer *ab;
66501
66502 diff --git a/kernel/capability.c b/kernel/capability.c
66503 index 493d972..ea17248 100644
66504 --- a/kernel/capability.c
66505 +++ b/kernel/capability.c
66506 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
66507 * before modification is attempted and the application
66508 * fails.
66509 */
66510 + if (tocopy > ARRAY_SIZE(kdata))
66511 + return -EFAULT;
66512 +
66513 if (copy_to_user(dataptr, kdata, tocopy
66514 * sizeof(struct __user_cap_data_struct))) {
66515 return -EFAULT;
66516 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
66517 int ret;
66518
66519 rcu_read_lock();
66520 - ret = security_capable(__task_cred(t), ns, cap);
66521 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
66522 + gr_task_is_capable(t, __task_cred(t), cap);
66523 rcu_read_unlock();
66524
66525 - return (ret == 0);
66526 + return ret;
66527 }
66528
66529 /**
66530 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
66531 int ret;
66532
66533 rcu_read_lock();
66534 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
66535 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
66536 rcu_read_unlock();
66537
66538 - return (ret == 0);
66539 + return ret;
66540 }
66541
66542 /**
66543 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
66544 BUG();
66545 }
66546
66547 - if (security_capable(current_cred(), ns, cap) == 0) {
66548 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
66549 current->flags |= PF_SUPERPRIV;
66550 return true;
66551 }
66552 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
66553 }
66554 EXPORT_SYMBOL(ns_capable);
66555
66556 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
66557 +{
66558 + if (unlikely(!cap_valid(cap))) {
66559 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
66560 + BUG();
66561 + }
66562 +
66563 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
66564 + current->flags |= PF_SUPERPRIV;
66565 + return true;
66566 + }
66567 + return false;
66568 +}
66569 +EXPORT_SYMBOL(ns_capable_nolog);
66570 +
66571 /**
66572 * capable - Determine if the current task has a superior capability in effect
66573 * @cap: The capability to be tested for
66574 @@ -408,6 +427,12 @@ bool capable(int cap)
66575 }
66576 EXPORT_SYMBOL(capable);
66577
66578 +bool capable_nolog(int cap)
66579 +{
66580 + return ns_capable_nolog(&init_user_ns, cap);
66581 +}
66582 +EXPORT_SYMBOL(capable_nolog);
66583 +
66584 /**
66585 * nsown_capable - Check superior capability to one's own user_ns
66586 * @cap: The capability in question
66587 @@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
66588
66589 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
66590 }
66591 +
66592 +bool inode_capable_nolog(const struct inode *inode, int cap)
66593 +{
66594 + struct user_namespace *ns = current_user_ns();
66595 +
66596 + return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
66597 +}
66598 diff --git a/kernel/compat.c b/kernel/compat.c
66599 index c28a306..b4d0cf3 100644
66600 --- a/kernel/compat.c
66601 +++ b/kernel/compat.c
66602 @@ -13,6 +13,7 @@
66603
66604 #include <linux/linkage.h>
66605 #include <linux/compat.h>
66606 +#include <linux/module.h>
66607 #include <linux/errno.h>
66608 #include <linux/time.h>
66609 #include <linux/signal.h>
66610 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
66611 mm_segment_t oldfs;
66612 long ret;
66613
66614 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
66615 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
66616 oldfs = get_fs();
66617 set_fs(KERNEL_DS);
66618 ret = hrtimer_nanosleep_restart(restart);
66619 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
66620 oldfs = get_fs();
66621 set_fs(KERNEL_DS);
66622 ret = hrtimer_nanosleep(&tu,
66623 - rmtp ? (struct timespec __user *)&rmt : NULL,
66624 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
66625 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
66626 set_fs(oldfs);
66627
66628 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
66629 mm_segment_t old_fs = get_fs();
66630
66631 set_fs(KERNEL_DS);
66632 - ret = sys_sigpending((old_sigset_t __user *) &s);
66633 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
66634 set_fs(old_fs);
66635 if (ret == 0)
66636 ret = put_user(s, set);
66637 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
66638 mm_segment_t old_fs = get_fs();
66639
66640 set_fs(KERNEL_DS);
66641 - ret = sys_old_getrlimit(resource, &r);
66642 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
66643 set_fs(old_fs);
66644
66645 if (!ret) {
66646 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
66647 mm_segment_t old_fs = get_fs();
66648
66649 set_fs(KERNEL_DS);
66650 - ret = sys_getrusage(who, (struct rusage __user *) &r);
66651 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
66652 set_fs(old_fs);
66653
66654 if (ret)
66655 @@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
66656 set_fs (KERNEL_DS);
66657 ret = sys_wait4(pid,
66658 (stat_addr ?
66659 - (unsigned int __user *) &status : NULL),
66660 - options, (struct rusage __user *) &r);
66661 + (unsigned int __force_user *) &status : NULL),
66662 + options, (struct rusage __force_user *) &r);
66663 set_fs (old_fs);
66664
66665 if (ret > 0) {
66666 @@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
66667 memset(&info, 0, sizeof(info));
66668
66669 set_fs(KERNEL_DS);
66670 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
66671 - uru ? (struct rusage __user *)&ru : NULL);
66672 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
66673 + uru ? (struct rusage __force_user *)&ru : NULL);
66674 set_fs(old_fs);
66675
66676 if ((ret < 0) || (info.si_signo == 0))
66677 @@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
66678 oldfs = get_fs();
66679 set_fs(KERNEL_DS);
66680 err = sys_timer_settime(timer_id, flags,
66681 - (struct itimerspec __user *) &newts,
66682 - (struct itimerspec __user *) &oldts);
66683 + (struct itimerspec __force_user *) &newts,
66684 + (struct itimerspec __force_user *) &oldts);
66685 set_fs(oldfs);
66686 if (!err && old && put_compat_itimerspec(old, &oldts))
66687 return -EFAULT;
66688 @@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
66689 oldfs = get_fs();
66690 set_fs(KERNEL_DS);
66691 err = sys_timer_gettime(timer_id,
66692 - (struct itimerspec __user *) &ts);
66693 + (struct itimerspec __force_user *) &ts);
66694 set_fs(oldfs);
66695 if (!err && put_compat_itimerspec(setting, &ts))
66696 return -EFAULT;
66697 @@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
66698 oldfs = get_fs();
66699 set_fs(KERNEL_DS);
66700 err = sys_clock_settime(which_clock,
66701 - (struct timespec __user *) &ts);
66702 + (struct timespec __force_user *) &ts);
66703 set_fs(oldfs);
66704 return err;
66705 }
66706 @@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
66707 oldfs = get_fs();
66708 set_fs(KERNEL_DS);
66709 err = sys_clock_gettime(which_clock,
66710 - (struct timespec __user *) &ts);
66711 + (struct timespec __force_user *) &ts);
66712 set_fs(oldfs);
66713 if (!err && put_compat_timespec(&ts, tp))
66714 return -EFAULT;
66715 @@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
66716
66717 oldfs = get_fs();
66718 set_fs(KERNEL_DS);
66719 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
66720 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
66721 set_fs(oldfs);
66722
66723 err = compat_put_timex(utp, &txc);
66724 @@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
66725 oldfs = get_fs();
66726 set_fs(KERNEL_DS);
66727 err = sys_clock_getres(which_clock,
66728 - (struct timespec __user *) &ts);
66729 + (struct timespec __force_user *) &ts);
66730 set_fs(oldfs);
66731 if (!err && tp && put_compat_timespec(&ts, tp))
66732 return -EFAULT;
66733 @@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
66734 long err;
66735 mm_segment_t oldfs;
66736 struct timespec tu;
66737 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
66738 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
66739
66740 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
66741 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
66742 oldfs = get_fs();
66743 set_fs(KERNEL_DS);
66744 err = clock_nanosleep_restart(restart);
66745 @@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
66746 oldfs = get_fs();
66747 set_fs(KERNEL_DS);
66748 err = sys_clock_nanosleep(which_clock, flags,
66749 - (struct timespec __user *) &in,
66750 - (struct timespec __user *) &out);
66751 + (struct timespec __force_user *) &in,
66752 + (struct timespec __force_user *) &out);
66753 set_fs(oldfs);
66754
66755 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
66756 diff --git a/kernel/configs.c b/kernel/configs.c
66757 index 42e8fa0..9e7406b 100644
66758 --- a/kernel/configs.c
66759 +++ b/kernel/configs.c
66760 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
66761 struct proc_dir_entry *entry;
66762
66763 /* create the current config file */
66764 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66765 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
66766 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
66767 + &ikconfig_file_ops);
66768 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66769 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
66770 + &ikconfig_file_ops);
66771 +#endif
66772 +#else
66773 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
66774 &ikconfig_file_ops);
66775 +#endif
66776 +
66777 if (!entry)
66778 return -ENOMEM;
66779
66780 diff --git a/kernel/cred.c b/kernel/cred.c
66781 index de728ac..e3c267c 100644
66782 --- a/kernel/cred.c
66783 +++ b/kernel/cred.c
66784 @@ -207,6 +207,16 @@ void exit_creds(struct task_struct *tsk)
66785 validate_creds(cred);
66786 alter_cred_subscribers(cred, -1);
66787 put_cred(cred);
66788 +
66789 +#ifdef CONFIG_GRKERNSEC_SETXID
66790 + cred = (struct cred *) tsk->delayed_cred;
66791 + if (cred != NULL) {
66792 + tsk->delayed_cred = NULL;
66793 + validate_creds(cred);
66794 + alter_cred_subscribers(cred, -1);
66795 + put_cred(cred);
66796 + }
66797 +#endif
66798 }
66799
66800 /**
66801 @@ -469,7 +479,7 @@ error_put:
66802 * Always returns 0 thus allowing this function to be tail-called at the end
66803 * of, say, sys_setgid().
66804 */
66805 -int commit_creds(struct cred *new)
66806 +static int __commit_creds(struct cred *new)
66807 {
66808 struct task_struct *task = current;
66809 const struct cred *old = task->real_cred;
66810 @@ -488,6 +498,8 @@ int commit_creds(struct cred *new)
66811
66812 get_cred(new); /* we will require a ref for the subj creds too */
66813
66814 + gr_set_role_label(task, new->uid, new->gid);
66815 +
66816 /* dumpability changes */
66817 if (!uid_eq(old->euid, new->euid) ||
66818 !gid_eq(old->egid, new->egid) ||
66819 @@ -537,6 +549,101 @@ int commit_creds(struct cred *new)
66820 put_cred(old);
66821 return 0;
66822 }
66823 +#ifdef CONFIG_GRKERNSEC_SETXID
66824 +extern int set_user(struct cred *new);
66825 +
66826 +void gr_delayed_cred_worker(void)
66827 +{
66828 + const struct cred *new = current->delayed_cred;
66829 + struct cred *ncred;
66830 +
66831 + current->delayed_cred = NULL;
66832 +
66833 + if (current_uid() && new != NULL) {
66834 + // from doing get_cred on it when queueing this
66835 + put_cred(new);
66836 + return;
66837 + } else if (new == NULL)
66838 + return;
66839 +
66840 + ncred = prepare_creds();
66841 + if (!ncred)
66842 + goto die;
66843 + // uids
66844 + ncred->uid = new->uid;
66845 + ncred->euid = new->euid;
66846 + ncred->suid = new->suid;
66847 + ncred->fsuid = new->fsuid;
66848 + // gids
66849 + ncred->gid = new->gid;
66850 + ncred->egid = new->egid;
66851 + ncred->sgid = new->sgid;
66852 + ncred->fsgid = new->fsgid;
66853 + // groups
66854 + if (set_groups(ncred, new->group_info) < 0) {
66855 + abort_creds(ncred);
66856 + goto die;
66857 + }
66858 + // caps
66859 + ncred->securebits = new->securebits;
66860 + ncred->cap_inheritable = new->cap_inheritable;
66861 + ncred->cap_permitted = new->cap_permitted;
66862 + ncred->cap_effective = new->cap_effective;
66863 + ncred->cap_bset = new->cap_bset;
66864 +
66865 + if (set_user(ncred)) {
66866 + abort_creds(ncred);
66867 + goto die;
66868 + }
66869 +
66870 + // from doing get_cred on it when queueing this
66871 + put_cred(new);
66872 +
66873 + __commit_creds(ncred);
66874 + return;
66875 +die:
66876 + // from doing get_cred on it when queueing this
66877 + put_cred(new);
66878 + do_group_exit(SIGKILL);
66879 +}
66880 +#endif
66881 +
66882 +int commit_creds(struct cred *new)
66883 +{
66884 +#ifdef CONFIG_GRKERNSEC_SETXID
66885 + int ret;
66886 + int schedule_it = 0;
66887 + struct task_struct *t;
66888 +
66889 + /* we won't get called with tasklist_lock held for writing
66890 + and interrupts disabled as the cred struct in that case is
66891 + init_cred
66892 + */
66893 + if (grsec_enable_setxid && !current_is_single_threaded() &&
66894 + !current_uid() && new->uid) {
66895 + schedule_it = 1;
66896 + }
66897 + ret = __commit_creds(new);
66898 + if (schedule_it) {
66899 + rcu_read_lock();
66900 + read_lock(&tasklist_lock);
66901 + for (t = next_thread(current); t != current;
66902 + t = next_thread(t)) {
66903 + if (t->delayed_cred == NULL) {
66904 + t->delayed_cred = get_cred(new);
66905 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
66906 + set_tsk_need_resched(t);
66907 + }
66908 + }
66909 + read_unlock(&tasklist_lock);
66910 + rcu_read_unlock();
66911 + }
66912 + return ret;
66913 +#else
66914 + return __commit_creds(new);
66915 +#endif
66916 +}
66917 +
66918 EXPORT_SYMBOL(commit_creds);
66919
66920 /**
66921 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
66922 index 0557f24..1a00d9a 100644
66923 --- a/kernel/debug/debug_core.c
66924 +++ b/kernel/debug/debug_core.c
66925 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
66926 */
66927 static atomic_t masters_in_kgdb;
66928 static atomic_t slaves_in_kgdb;
66929 -static atomic_t kgdb_break_tasklet_var;
66930 +static atomic_unchecked_t kgdb_break_tasklet_var;
66931 atomic_t kgdb_setting_breakpoint;
66932
66933 struct task_struct *kgdb_usethread;
66934 @@ -132,7 +132,7 @@ int kgdb_single_step;
66935 static pid_t kgdb_sstep_pid;
66936
66937 /* to keep track of the CPU which is doing the single stepping*/
66938 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66939 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66940
66941 /*
66942 * If you are debugging a problem where roundup (the collection of
66943 @@ -540,7 +540,7 @@ return_normal:
66944 * kernel will only try for the value of sstep_tries before
66945 * giving up and continuing on.
66946 */
66947 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
66948 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
66949 (kgdb_info[cpu].task &&
66950 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
66951 atomic_set(&kgdb_active, -1);
66952 @@ -634,8 +634,8 @@ cpu_master_loop:
66953 }
66954
66955 kgdb_restore:
66956 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
66957 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
66958 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
66959 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
66960 if (kgdb_info[sstep_cpu].task)
66961 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
66962 else
66963 @@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
66964 static void kgdb_tasklet_bpt(unsigned long ing)
66965 {
66966 kgdb_breakpoint();
66967 - atomic_set(&kgdb_break_tasklet_var, 0);
66968 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
66969 }
66970
66971 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
66972
66973 void kgdb_schedule_breakpoint(void)
66974 {
66975 - if (atomic_read(&kgdb_break_tasklet_var) ||
66976 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
66977 atomic_read(&kgdb_active) != -1 ||
66978 atomic_read(&kgdb_setting_breakpoint))
66979 return;
66980 - atomic_inc(&kgdb_break_tasklet_var);
66981 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
66982 tasklet_schedule(&kgdb_tasklet_breakpoint);
66983 }
66984 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
66985 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
66986 index 1f91413..362a0a1 100644
66987 --- a/kernel/debug/kdb/kdb_main.c
66988 +++ b/kernel/debug/kdb/kdb_main.c
66989 @@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
66990 list_for_each_entry(mod, kdb_modules, list) {
66991
66992 kdb_printf("%-20s%8u 0x%p ", mod->name,
66993 - mod->core_size, (void *)mod);
66994 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
66995 #ifdef CONFIG_MODULE_UNLOAD
66996 kdb_printf("%4ld ", module_refcount(mod));
66997 #endif
66998 @@ -1994,7 +1994,7 @@ static int kdb_lsmod(int argc, const char **argv)
66999 kdb_printf(" (Loading)");
67000 else
67001 kdb_printf(" (Live)");
67002 - kdb_printf(" 0x%p", mod->module_core);
67003 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
67004
67005 #ifdef CONFIG_MODULE_UNLOAD
67006 {
67007 diff --git a/kernel/events/core.c b/kernel/events/core.c
67008 index d7d71d6..b6ec863 100644
67009 --- a/kernel/events/core.c
67010 +++ b/kernel/events/core.c
67011 @@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
67012 return 0;
67013 }
67014
67015 -static atomic64_t perf_event_id;
67016 +static atomic64_unchecked_t perf_event_id;
67017
67018 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
67019 enum event_type_t event_type);
67020 @@ -2663,7 +2663,7 @@ static void __perf_event_read(void *info)
67021
67022 static inline u64 perf_event_count(struct perf_event *event)
67023 {
67024 - return local64_read(&event->count) + atomic64_read(&event->child_count);
67025 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
67026 }
67027
67028 static u64 perf_event_read(struct perf_event *event)
67029 @@ -2933,12 +2933,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
67030 /*
67031 * Called when the last reference to the file is gone.
67032 */
67033 -static int perf_release(struct inode *inode, struct file *file)
67034 +static void put_event(struct perf_event *event)
67035 {
67036 - struct perf_event *event = file->private_data;
67037 struct task_struct *owner;
67038
67039 - file->private_data = NULL;
67040 + if (!atomic_long_dec_and_test(&event->refcount))
67041 + return;
67042
67043 rcu_read_lock();
67044 owner = ACCESS_ONCE(event->owner);
67045 @@ -2973,7 +2973,13 @@ static int perf_release(struct inode *inode, struct file *file)
67046 put_task_struct(owner);
67047 }
67048
67049 - return perf_event_release_kernel(event);
67050 + perf_event_release_kernel(event);
67051 +}
67052 +
67053 +static int perf_release(struct inode *inode, struct file *file)
67054 +{
67055 + put_event(file->private_data);
67056 + return 0;
67057 }
67058
67059 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
67060 @@ -2987,9 +2993,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
67061 mutex_lock(&event->child_mutex);
67062 total += perf_event_read(event);
67063 *enabled += event->total_time_enabled +
67064 - atomic64_read(&event->child_total_time_enabled);
67065 + atomic64_read_unchecked(&event->child_total_time_enabled);
67066 *running += event->total_time_running +
67067 - atomic64_read(&event->child_total_time_running);
67068 + atomic64_read_unchecked(&event->child_total_time_running);
67069
67070 list_for_each_entry(child, &event->child_list, child_list) {
67071 total += perf_event_read(child);
67072 @@ -3225,7 +3231,7 @@ unlock:
67073
67074 static const struct file_operations perf_fops;
67075
67076 -static struct perf_event *perf_fget_light(int fd, int *fput_needed)
67077 +static struct file *perf_fget_light(int fd, int *fput_needed)
67078 {
67079 struct file *file;
67080
67081 @@ -3239,7 +3245,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
67082 return ERR_PTR(-EBADF);
67083 }
67084
67085 - return file->private_data;
67086 + return file;
67087 }
67088
67089 static int perf_event_set_output(struct perf_event *event,
67090 @@ -3271,19 +3277,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
67091
67092 case PERF_EVENT_IOC_SET_OUTPUT:
67093 {
67094 + struct file *output_file = NULL;
67095 struct perf_event *output_event = NULL;
67096 int fput_needed = 0;
67097 int ret;
67098
67099 if (arg != -1) {
67100 - output_event = perf_fget_light(arg, &fput_needed);
67101 - if (IS_ERR(output_event))
67102 - return PTR_ERR(output_event);
67103 + output_file = perf_fget_light(arg, &fput_needed);
67104 + if (IS_ERR(output_file))
67105 + return PTR_ERR(output_file);
67106 + output_event = output_file->private_data;
67107 }
67108
67109 ret = perf_event_set_output(event, output_event);
67110 if (output_event)
67111 - fput_light(output_event->filp, fput_needed);
67112 + fput_light(output_file, fput_needed);
67113
67114 return ret;
67115 }
67116 @@ -3396,10 +3404,10 @@ void perf_event_update_userpage(struct perf_event *event)
67117 userpg->offset -= local64_read(&event->hw.prev_count);
67118
67119 userpg->time_enabled = enabled +
67120 - atomic64_read(&event->child_total_time_enabled);
67121 + atomic64_read_unchecked(&event->child_total_time_enabled);
67122
67123 userpg->time_running = running +
67124 - atomic64_read(&event->child_total_time_running);
67125 + atomic64_read_unchecked(&event->child_total_time_running);
67126
67127 arch_perf_update_userpage(userpg, now);
67128
67129 @@ -3832,11 +3840,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
67130 values[n++] = perf_event_count(event);
67131 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
67132 values[n++] = enabled +
67133 - atomic64_read(&event->child_total_time_enabled);
67134 + atomic64_read_unchecked(&event->child_total_time_enabled);
67135 }
67136 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
67137 values[n++] = running +
67138 - atomic64_read(&event->child_total_time_running);
67139 + atomic64_read_unchecked(&event->child_total_time_running);
67140 }
67141 if (read_format & PERF_FORMAT_ID)
67142 values[n++] = primary_event_id(event);
67143 @@ -4514,12 +4522,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
67144 * need to add enough zero bytes after the string to handle
67145 * the 64bit alignment we do later.
67146 */
67147 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
67148 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
67149 if (!buf) {
67150 name = strncpy(tmp, "//enomem", sizeof(tmp));
67151 goto got_name;
67152 }
67153 - name = d_path(&file->f_path, buf, PATH_MAX);
67154 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
67155 if (IS_ERR(name)) {
67156 name = strncpy(tmp, "//toolong", sizeof(tmp));
67157 goto got_name;
67158 @@ -5922,6 +5930,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
67159
67160 mutex_init(&event->mmap_mutex);
67161
67162 + atomic_long_set(&event->refcount, 1);
67163 event->cpu = cpu;
67164 event->attr = *attr;
67165 event->group_leader = group_leader;
67166 @@ -5931,7 +5940,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
67167 event->parent = parent_event;
67168
67169 event->ns = get_pid_ns(current->nsproxy->pid_ns);
67170 - event->id = atomic64_inc_return(&perf_event_id);
67171 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
67172
67173 event->state = PERF_EVENT_STATE_INACTIVE;
67174
67175 @@ -6232,12 +6241,12 @@ SYSCALL_DEFINE5(perf_event_open,
67176 return event_fd;
67177
67178 if (group_fd != -1) {
67179 - group_leader = perf_fget_light(group_fd, &fput_needed);
67180 - if (IS_ERR(group_leader)) {
67181 - err = PTR_ERR(group_leader);
67182 + group_file = perf_fget_light(group_fd, &fput_needed);
67183 + if (IS_ERR(group_file)) {
67184 + err = PTR_ERR(group_file);
67185 goto err_fd;
67186 }
67187 - group_file = group_leader->filp;
67188 + group_leader = group_file->private_data;
67189 if (flags & PERF_FLAG_FD_OUTPUT)
67190 output_event = group_leader;
67191 if (flags & PERF_FLAG_FD_NO_GROUP)
67192 @@ -6372,7 +6381,6 @@ SYSCALL_DEFINE5(perf_event_open,
67193 put_ctx(gctx);
67194 }
67195
67196 - event->filp = event_file;
67197 WARN_ON_ONCE(ctx->parent_ctx);
67198 mutex_lock(&ctx->mutex);
67199
67200 @@ -6462,7 +6470,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
67201 goto err_free;
67202 }
67203
67204 - event->filp = NULL;
67205 WARN_ON_ONCE(ctx->parent_ctx);
67206 mutex_lock(&ctx->mutex);
67207 perf_install_in_context(ctx, event, cpu);
67208 @@ -6493,10 +6500,10 @@ static void sync_child_event(struct perf_event *child_event,
67209 /*
67210 * Add back the child's count to the parent's count:
67211 */
67212 - atomic64_add(child_val, &parent_event->child_count);
67213 - atomic64_add(child_event->total_time_enabled,
67214 + atomic64_add_unchecked(child_val, &parent_event->child_count);
67215 + atomic64_add_unchecked(child_event->total_time_enabled,
67216 &parent_event->child_total_time_enabled);
67217 - atomic64_add(child_event->total_time_running,
67218 + atomic64_add_unchecked(child_event->total_time_running,
67219 &parent_event->child_total_time_running);
67220
67221 /*
67222 @@ -6511,7 +6518,7 @@ static void sync_child_event(struct perf_event *child_event,
67223 * Release the parent event, if this was the last
67224 * reference to it.
67225 */
67226 - fput(parent_event->filp);
67227 + put_event(parent_event);
67228 }
67229
67230 static void
67231 @@ -6587,9 +6594,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
67232 *
67233 * __perf_event_exit_task()
67234 * sync_child_event()
67235 - * fput(parent_event->filp)
67236 - * perf_release()
67237 - * mutex_lock(&ctx->mutex)
67238 + * put_event()
67239 + * mutex_lock(&ctx->mutex)
67240 *
67241 * But since its the parent context it won't be the same instance.
67242 */
67243 @@ -6657,7 +6663,7 @@ static void perf_free_event(struct perf_event *event,
67244 list_del_init(&event->child_list);
67245 mutex_unlock(&parent->child_mutex);
67246
67247 - fput(parent->filp);
67248 + put_event(parent);
67249
67250 perf_group_detach(event);
67251 list_del_event(event, ctx);
67252 @@ -6737,6 +6743,12 @@ inherit_event(struct perf_event *parent_event,
67253 NULL, NULL);
67254 if (IS_ERR(child_event))
67255 return child_event;
67256 +
67257 + if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
67258 + free_event(child_event);
67259 + return NULL;
67260 + }
67261 +
67262 get_ctx(child_ctx);
67263
67264 /*
67265 @@ -6778,14 +6790,6 @@ inherit_event(struct perf_event *parent_event,
67266 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
67267
67268 /*
67269 - * Get a reference to the parent filp - we will fput it
67270 - * when the child event exits. This is safe to do because
67271 - * we are in the parent and we know that the filp still
67272 - * exists and has a nonzero count:
67273 - */
67274 - atomic_long_inc(&parent_event->filp->f_count);
67275 -
67276 - /*
67277 * Link this into the parent event's child list
67278 */
67279 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
67280 diff --git a/kernel/exit.c b/kernel/exit.c
67281 index 46ce8da..c648f3a 100644
67282 --- a/kernel/exit.c
67283 +++ b/kernel/exit.c
67284 @@ -59,6 +59,10 @@
67285 #include <asm/pgtable.h>
67286 #include <asm/mmu_context.h>
67287
67288 +#ifdef CONFIG_GRKERNSEC
67289 +extern rwlock_t grsec_exec_file_lock;
67290 +#endif
67291 +
67292 static void exit_mm(struct task_struct * tsk);
67293
67294 static void __unhash_process(struct task_struct *p, bool group_dead)
67295 @@ -182,6 +186,10 @@ void release_task(struct task_struct * p)
67296 struct task_struct *leader;
67297 int zap_leader;
67298 repeat:
67299 +#ifdef CONFIG_NET
67300 + gr_del_task_from_ip_table(p);
67301 +#endif
67302 +
67303 /* don't need to get the RCU readlock here - the process is dead and
67304 * can't be modifying its own credentials. But shut RCU-lockdep up */
67305 rcu_read_lock();
67306 @@ -394,7 +402,7 @@ int allow_signal(int sig)
67307 * know it'll be handled, so that they don't get converted to
67308 * SIGKILL or just silently dropped.
67309 */
67310 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
67311 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
67312 recalc_sigpending();
67313 spin_unlock_irq(&current->sighand->siglock);
67314 return 0;
67315 @@ -430,6 +438,17 @@ void daemonize(const char *name, ...)
67316 vsnprintf(current->comm, sizeof(current->comm), name, args);
67317 va_end(args);
67318
67319 +#ifdef CONFIG_GRKERNSEC
67320 + write_lock(&grsec_exec_file_lock);
67321 + if (current->exec_file) {
67322 + fput(current->exec_file);
67323 + current->exec_file = NULL;
67324 + }
67325 + write_unlock(&grsec_exec_file_lock);
67326 +#endif
67327 +
67328 + gr_set_kernel_label(current);
67329 +
67330 /*
67331 * If we were started as result of loading a module, close all of the
67332 * user space pages. We don't need them, and if we didn't close them
67333 @@ -907,6 +926,8 @@ void do_exit(long code)
67334 struct task_struct *tsk = current;
67335 int group_dead;
67336
67337 + set_fs(USER_DS);
67338 +
67339 profile_task_exit(tsk);
67340
67341 WARN_ON(blk_needs_flush_plug(tsk));
67342 @@ -923,7 +944,6 @@ void do_exit(long code)
67343 * mm_release()->clear_child_tid() from writing to a user-controlled
67344 * kernel address.
67345 */
67346 - set_fs(USER_DS);
67347
67348 ptrace_event(PTRACE_EVENT_EXIT, code);
67349
67350 @@ -985,6 +1005,9 @@ void do_exit(long code)
67351 tsk->exit_code = code;
67352 taskstats_exit(tsk, group_dead);
67353
67354 + gr_acl_handle_psacct(tsk, code);
67355 + gr_acl_handle_exit();
67356 +
67357 exit_mm(tsk);
67358
67359 if (group_dead)
67360 @@ -1101,7 +1124,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
67361 * Take down every thread in the group. This is called by fatal signals
67362 * as well as by sys_exit_group (below).
67363 */
67364 -void
67365 +__noreturn void
67366 do_group_exit(int exit_code)
67367 {
67368 struct signal_struct *sig = current->signal;
67369 diff --git a/kernel/fork.c b/kernel/fork.c
67370 index f9d0499..e4f8f44 100644
67371 --- a/kernel/fork.c
67372 +++ b/kernel/fork.c
67373 @@ -321,7 +321,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
67374 *stackend = STACK_END_MAGIC; /* for overflow detection */
67375
67376 #ifdef CONFIG_CC_STACKPROTECTOR
67377 - tsk->stack_canary = get_random_int();
67378 + tsk->stack_canary = pax_get_random_long();
67379 #endif
67380
67381 /*
67382 @@ -345,13 +345,78 @@ out:
67383 }
67384
67385 #ifdef CONFIG_MMU
67386 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
67387 +{
67388 + struct vm_area_struct *tmp;
67389 + unsigned long charge;
67390 + struct mempolicy *pol;
67391 + struct file *file;
67392 +
67393 + charge = 0;
67394 + if (mpnt->vm_flags & VM_ACCOUNT) {
67395 + unsigned long len;
67396 + len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
67397 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
67398 + goto fail_nomem;
67399 + charge = len;
67400 + }
67401 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67402 + if (!tmp)
67403 + goto fail_nomem;
67404 + *tmp = *mpnt;
67405 + tmp->vm_mm = mm;
67406 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
67407 + pol = mpol_dup(vma_policy(mpnt));
67408 + if (IS_ERR(pol))
67409 + goto fail_nomem_policy;
67410 + vma_set_policy(tmp, pol);
67411 + if (anon_vma_fork(tmp, mpnt))
67412 + goto fail_nomem_anon_vma_fork;
67413 + tmp->vm_flags &= ~VM_LOCKED;
67414 + tmp->vm_next = tmp->vm_prev = NULL;
67415 + tmp->vm_mirror = NULL;
67416 + file = tmp->vm_file;
67417 + if (file) {
67418 + struct inode *inode = file->f_path.dentry->d_inode;
67419 + struct address_space *mapping = file->f_mapping;
67420 +
67421 + get_file(file);
67422 + if (tmp->vm_flags & VM_DENYWRITE)
67423 + atomic_dec(&inode->i_writecount);
67424 + mutex_lock(&mapping->i_mmap_mutex);
67425 + if (tmp->vm_flags & VM_SHARED)
67426 + mapping->i_mmap_writable++;
67427 + flush_dcache_mmap_lock(mapping);
67428 + /* insert tmp into the share list, just after mpnt */
67429 + vma_prio_tree_add(tmp, mpnt);
67430 + flush_dcache_mmap_unlock(mapping);
67431 + mutex_unlock(&mapping->i_mmap_mutex);
67432 + }
67433 +
67434 + /*
67435 + * Clear hugetlb-related page reserves for children. This only
67436 + * affects MAP_PRIVATE mappings. Faults generated by the child
67437 + * are not guaranteed to succeed, even if read-only
67438 + */
67439 + if (is_vm_hugetlb_page(tmp))
67440 + reset_vma_resv_huge_pages(tmp);
67441 +
67442 + return tmp;
67443 +
67444 +fail_nomem_anon_vma_fork:
67445 + mpol_put(pol);
67446 +fail_nomem_policy:
67447 + kmem_cache_free(vm_area_cachep, tmp);
67448 +fail_nomem:
67449 + vm_unacct_memory(charge);
67450 + return NULL;
67451 +}
67452 +
67453 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67454 {
67455 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
67456 struct rb_node **rb_link, *rb_parent;
67457 int retval;
67458 - unsigned long charge;
67459 - struct mempolicy *pol;
67460
67461 down_write(&oldmm->mmap_sem);
67462 flush_cache_dup_mm(oldmm);
67463 @@ -363,8 +428,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67464 mm->locked_vm = 0;
67465 mm->mmap = NULL;
67466 mm->mmap_cache = NULL;
67467 - mm->free_area_cache = oldmm->mmap_base;
67468 - mm->cached_hole_size = ~0UL;
67469 + mm->free_area_cache = oldmm->free_area_cache;
67470 + mm->cached_hole_size = oldmm->cached_hole_size;
67471 mm->map_count = 0;
67472 cpumask_clear(mm_cpumask(mm));
67473 mm->mm_rb = RB_ROOT;
67474 @@ -380,8 +445,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67475
67476 prev = NULL;
67477 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
67478 - struct file *file;
67479 -
67480 if (mpnt->vm_flags & VM_DONTCOPY) {
67481 long pages = vma_pages(mpnt);
67482 mm->total_vm -= pages;
67483 @@ -389,54 +452,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67484 -pages);
67485 continue;
67486 }
67487 - charge = 0;
67488 - if (mpnt->vm_flags & VM_ACCOUNT) {
67489 - unsigned long len;
67490 - len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
67491 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
67492 - goto fail_nomem;
67493 - charge = len;
67494 + tmp = dup_vma(mm, oldmm, mpnt);
67495 + if (!tmp) {
67496 + retval = -ENOMEM;
67497 + goto out;
67498 }
67499 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67500 - if (!tmp)
67501 - goto fail_nomem;
67502 - *tmp = *mpnt;
67503 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
67504 - pol = mpol_dup(vma_policy(mpnt));
67505 - retval = PTR_ERR(pol);
67506 - if (IS_ERR(pol))
67507 - goto fail_nomem_policy;
67508 - vma_set_policy(tmp, pol);
67509 - tmp->vm_mm = mm;
67510 - if (anon_vma_fork(tmp, mpnt))
67511 - goto fail_nomem_anon_vma_fork;
67512 - tmp->vm_flags &= ~VM_LOCKED;
67513 - tmp->vm_next = tmp->vm_prev = NULL;
67514 - file = tmp->vm_file;
67515 - if (file) {
67516 - struct inode *inode = file->f_path.dentry->d_inode;
67517 - struct address_space *mapping = file->f_mapping;
67518 -
67519 - get_file(file);
67520 - if (tmp->vm_flags & VM_DENYWRITE)
67521 - atomic_dec(&inode->i_writecount);
67522 - mutex_lock(&mapping->i_mmap_mutex);
67523 - if (tmp->vm_flags & VM_SHARED)
67524 - mapping->i_mmap_writable++;
67525 - flush_dcache_mmap_lock(mapping);
67526 - /* insert tmp into the share list, just after mpnt */
67527 - vma_prio_tree_add(tmp, mpnt);
67528 - flush_dcache_mmap_unlock(mapping);
67529 - mutex_unlock(&mapping->i_mmap_mutex);
67530 - }
67531 -
67532 - /*
67533 - * Clear hugetlb-related page reserves for children. This only
67534 - * affects MAP_PRIVATE mappings. Faults generated by the child
67535 - * are not guaranteed to succeed, even if read-only
67536 - */
67537 - if (is_vm_hugetlb_page(tmp))
67538 - reset_vma_resv_huge_pages(tmp);
67539
67540 /*
67541 * Link in the new vma and copy the page table entries.
67542 @@ -459,9 +479,34 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67543 if (retval)
67544 goto out;
67545
67546 - if (file)
67547 + if (tmp->vm_file)
67548 uprobe_mmap(tmp);
67549 }
67550 +
67551 +#ifdef CONFIG_PAX_SEGMEXEC
67552 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
67553 + struct vm_area_struct *mpnt_m;
67554 +
67555 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
67556 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
67557 +
67558 + if (!mpnt->vm_mirror)
67559 + continue;
67560 +
67561 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
67562 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
67563 + mpnt->vm_mirror = mpnt_m;
67564 + } else {
67565 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
67566 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
67567 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
67568 + mpnt->vm_mirror->vm_mirror = mpnt;
67569 + }
67570 + }
67571 + BUG_ON(mpnt_m);
67572 + }
67573 +#endif
67574 +
67575 /* a new mm has just been created */
67576 arch_dup_mmap(oldmm, mm);
67577 retval = 0;
67578 @@ -470,14 +515,6 @@ out:
67579 flush_tlb_mm(oldmm);
67580 up_write(&oldmm->mmap_sem);
67581 return retval;
67582 -fail_nomem_anon_vma_fork:
67583 - mpol_put(pol);
67584 -fail_nomem_policy:
67585 - kmem_cache_free(vm_area_cachep, tmp);
67586 -fail_nomem:
67587 - retval = -ENOMEM;
67588 - vm_unacct_memory(charge);
67589 - goto out;
67590 }
67591
67592 static inline int mm_alloc_pgd(struct mm_struct *mm)
67593 @@ -714,8 +751,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
67594 return ERR_PTR(err);
67595
67596 mm = get_task_mm(task);
67597 - if (mm && mm != current->mm &&
67598 - !ptrace_may_access(task, mode)) {
67599 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
67600 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
67601 mmput(mm);
67602 mm = ERR_PTR(-EACCES);
67603 }
67604 @@ -936,13 +973,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
67605 spin_unlock(&fs->lock);
67606 return -EAGAIN;
67607 }
67608 - fs->users++;
67609 + atomic_inc(&fs->users);
67610 spin_unlock(&fs->lock);
67611 return 0;
67612 }
67613 tsk->fs = copy_fs_struct(fs);
67614 if (!tsk->fs)
67615 return -ENOMEM;
67616 + gr_set_chroot_entries(tsk, &tsk->fs->root);
67617 return 0;
67618 }
67619
67620 @@ -1209,6 +1247,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67621 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
67622 #endif
67623 retval = -EAGAIN;
67624 +
67625 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
67626 +
67627 if (atomic_read(&p->real_cred->user->processes) >=
67628 task_rlimit(p, RLIMIT_NPROC)) {
67629 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
67630 @@ -1431,6 +1472,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67631 /* Need tasklist lock for parent etc handling! */
67632 write_lock_irq(&tasklist_lock);
67633
67634 + /* synchronizes with gr_set_acls() */
67635 + gr_copy_label(p);
67636 +
67637 /* CLONE_PARENT re-uses the old parent */
67638 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
67639 p->real_parent = current->real_parent;
67640 @@ -1541,6 +1585,8 @@ bad_fork_cleanup_count:
67641 bad_fork_free:
67642 free_task(p);
67643 fork_out:
67644 + gr_log_forkfail(retval);
67645 +
67646 return ERR_PTR(retval);
67647 }
67648
67649 @@ -1641,6 +1687,8 @@ long do_fork(unsigned long clone_flags,
67650 if (clone_flags & CLONE_PARENT_SETTID)
67651 put_user(nr, parent_tidptr);
67652
67653 + gr_handle_brute_check();
67654 +
67655 if (clone_flags & CLONE_VFORK) {
67656 p->vfork_done = &vfork;
67657 init_completion(&vfork);
67658 @@ -1739,7 +1787,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
67659 return 0;
67660
67661 /* don't need lock here; in the worst case we'll do useless copy */
67662 - if (fs->users == 1)
67663 + if (atomic_read(&fs->users) == 1)
67664 return 0;
67665
67666 *new_fsp = copy_fs_struct(fs);
67667 @@ -1828,7 +1876,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
67668 fs = current->fs;
67669 spin_lock(&fs->lock);
67670 current->fs = new_fs;
67671 - if (--fs->users)
67672 + gr_set_chroot_entries(current, &current->fs->root);
67673 + if (atomic_dec_return(&fs->users))
67674 new_fs = NULL;
67675 else
67676 new_fs = fs;
67677 diff --git a/kernel/futex.c b/kernel/futex.c
67678 index 3717e7b..473c750 100644
67679 --- a/kernel/futex.c
67680 +++ b/kernel/futex.c
67681 @@ -54,6 +54,7 @@
67682 #include <linux/mount.h>
67683 #include <linux/pagemap.h>
67684 #include <linux/syscalls.h>
67685 +#include <linux/ptrace.h>
67686 #include <linux/signal.h>
67687 #include <linux/export.h>
67688 #include <linux/magic.h>
67689 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
67690 struct page *page, *page_head;
67691 int err, ro = 0;
67692
67693 +#ifdef CONFIG_PAX_SEGMEXEC
67694 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
67695 + return -EFAULT;
67696 +#endif
67697 +
67698 /*
67699 * The futex address must be "naturally" aligned.
67700 */
67701 @@ -2714,6 +2720,7 @@ static int __init futex_init(void)
67702 {
67703 u32 curval;
67704 int i;
67705 + mm_segment_t oldfs;
67706
67707 /*
67708 * This will fail and we want it. Some arch implementations do
67709 @@ -2725,8 +2732,11 @@ static int __init futex_init(void)
67710 * implementation, the non-functional ones will return
67711 * -ENOSYS.
67712 */
67713 + oldfs = get_fs();
67714 + set_fs(USER_DS);
67715 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
67716 futex_cmpxchg_enabled = 1;
67717 + set_fs(oldfs);
67718
67719 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
67720 plist_head_init(&futex_queues[i].chain);
67721 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
67722 index 9b22d03..6295b62 100644
67723 --- a/kernel/gcov/base.c
67724 +++ b/kernel/gcov/base.c
67725 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
67726 }
67727
67728 #ifdef CONFIG_MODULES
67729 -static inline int within(void *addr, void *start, unsigned long size)
67730 -{
67731 - return ((addr >= start) && (addr < start + size));
67732 -}
67733 -
67734 /* Update list and generate events when modules are unloaded. */
67735 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67736 void *data)
67737 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67738 prev = NULL;
67739 /* Remove entries located in module from linked list. */
67740 for (info = gcov_info_head; info; info = info->next) {
67741 - if (within(info, mod->module_core, mod->core_size)) {
67742 + if (within_module_core_rw((unsigned long)info, mod)) {
67743 if (prev)
67744 prev->next = info->next;
67745 else
67746 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
67747 index 6db7a5e..25b6648 100644
67748 --- a/kernel/hrtimer.c
67749 +++ b/kernel/hrtimer.c
67750 @@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
67751 local_irq_restore(flags);
67752 }
67753
67754 -static void run_hrtimer_softirq(struct softirq_action *h)
67755 +static void run_hrtimer_softirq(void)
67756 {
67757 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
67758
67759 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
67760 index 4304919..408c4c0 100644
67761 --- a/kernel/jump_label.c
67762 +++ b/kernel/jump_label.c
67763 @@ -13,6 +13,7 @@
67764 #include <linux/sort.h>
67765 #include <linux/err.h>
67766 #include <linux/static_key.h>
67767 +#include <linux/mm.h>
67768
67769 #ifdef HAVE_JUMP_LABEL
67770
67771 @@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
67772
67773 size = (((unsigned long)stop - (unsigned long)start)
67774 / sizeof(struct jump_entry));
67775 + pax_open_kernel();
67776 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
67777 + pax_close_kernel();
67778 }
67779
67780 static void jump_label_update(struct static_key *key, int enable);
67781 @@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
67782 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
67783 struct jump_entry *iter;
67784
67785 + pax_open_kernel();
67786 for (iter = iter_start; iter < iter_stop; iter++) {
67787 if (within_module_init(iter->code, mod))
67788 iter->code = 0;
67789 }
67790 + pax_close_kernel();
67791 }
67792
67793 static int
67794 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
67795 index 2169fee..45c017a 100644
67796 --- a/kernel/kallsyms.c
67797 +++ b/kernel/kallsyms.c
67798 @@ -11,6 +11,9 @@
67799 * Changed the compression method from stem compression to "table lookup"
67800 * compression (see scripts/kallsyms.c for a more complete description)
67801 */
67802 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67803 +#define __INCLUDED_BY_HIDESYM 1
67804 +#endif
67805 #include <linux/kallsyms.h>
67806 #include <linux/module.h>
67807 #include <linux/init.h>
67808 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
67809
67810 static inline int is_kernel_inittext(unsigned long addr)
67811 {
67812 + if (system_state != SYSTEM_BOOTING)
67813 + return 0;
67814 +
67815 if (addr >= (unsigned long)_sinittext
67816 && addr <= (unsigned long)_einittext)
67817 return 1;
67818 return 0;
67819 }
67820
67821 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67822 +#ifdef CONFIG_MODULES
67823 +static inline int is_module_text(unsigned long addr)
67824 +{
67825 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
67826 + return 1;
67827 +
67828 + addr = ktla_ktva(addr);
67829 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
67830 +}
67831 +#else
67832 +static inline int is_module_text(unsigned long addr)
67833 +{
67834 + return 0;
67835 +}
67836 +#endif
67837 +#endif
67838 +
67839 static inline int is_kernel_text(unsigned long addr)
67840 {
67841 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
67842 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
67843
67844 static inline int is_kernel(unsigned long addr)
67845 {
67846 +
67847 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67848 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
67849 + return 1;
67850 +
67851 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
67852 +#else
67853 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
67854 +#endif
67855 +
67856 return 1;
67857 return in_gate_area_no_mm(addr);
67858 }
67859
67860 static int is_ksym_addr(unsigned long addr)
67861 {
67862 +
67863 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67864 + if (is_module_text(addr))
67865 + return 0;
67866 +#endif
67867 +
67868 if (all_var)
67869 return is_kernel(addr);
67870
67871 @@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
67872
67873 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
67874 {
67875 - iter->name[0] = '\0';
67876 iter->nameoff = get_symbol_offset(new_pos);
67877 iter->pos = new_pos;
67878 }
67879 @@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
67880 {
67881 struct kallsym_iter *iter = m->private;
67882
67883 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67884 + if (current_uid())
67885 + return 0;
67886 +#endif
67887 +
67888 /* Some debugging symbols have no name. Ignore them. */
67889 if (!iter->name[0])
67890 return 0;
67891 @@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
67892 */
67893 type = iter->exported ? toupper(iter->type) :
67894 tolower(iter->type);
67895 +
67896 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
67897 type, iter->name, iter->module_name);
67898 } else
67899 @@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
67900 struct kallsym_iter *iter;
67901 int ret;
67902
67903 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
67904 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
67905 if (!iter)
67906 return -ENOMEM;
67907 reset_iter(iter, 0);
67908 diff --git a/kernel/kexec.c b/kernel/kexec.c
67909 index 4e2e472..cd0c7ae 100644
67910 --- a/kernel/kexec.c
67911 +++ b/kernel/kexec.c
67912 @@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
67913 unsigned long flags)
67914 {
67915 struct compat_kexec_segment in;
67916 - struct kexec_segment out, __user *ksegments;
67917 + struct kexec_segment out;
67918 + struct kexec_segment __user *ksegments;
67919 unsigned long i, result;
67920
67921 /* Don't allow clients that don't understand the native
67922 diff --git a/kernel/kmod.c b/kernel/kmod.c
67923 index ff2c7cb..085d7af 100644
67924 --- a/kernel/kmod.c
67925 +++ b/kernel/kmod.c
67926 @@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
67927 kfree(info->argv);
67928 }
67929
67930 -static int call_modprobe(char *module_name, int wait)
67931 +static int call_modprobe(char *module_name, char *module_param, int wait)
67932 {
67933 static char *envp[] = {
67934 "HOME=/",
67935 @@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
67936 NULL
67937 };
67938
67939 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
67940 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
67941 if (!argv)
67942 goto out;
67943
67944 @@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
67945 argv[1] = "-q";
67946 argv[2] = "--";
67947 argv[3] = module_name; /* check free_modprobe_argv() */
67948 - argv[4] = NULL;
67949 + argv[4] = module_param;
67950 + argv[5] = NULL;
67951
67952 return call_usermodehelper_fns(modprobe_path, argv, envp,
67953 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
67954 @@ -112,9 +113,8 @@ out:
67955 * If module auto-loading support is disabled then this function
67956 * becomes a no-operation.
67957 */
67958 -int __request_module(bool wait, const char *fmt, ...)
67959 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
67960 {
67961 - va_list args;
67962 char module_name[MODULE_NAME_LEN];
67963 unsigned int max_modprobes;
67964 int ret;
67965 @@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
67966 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
67967 static int kmod_loop_msg;
67968
67969 - va_start(args, fmt);
67970 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
67971 - va_end(args);
67972 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
67973 if (ret >= MODULE_NAME_LEN)
67974 return -ENAMETOOLONG;
67975
67976 @@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
67977 if (ret)
67978 return ret;
67979
67980 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67981 + if (!current_uid()) {
67982 + /* hack to workaround consolekit/udisks stupidity */
67983 + read_lock(&tasklist_lock);
67984 + if (!strcmp(current->comm, "mount") &&
67985 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
67986 + read_unlock(&tasklist_lock);
67987 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
67988 + return -EPERM;
67989 + }
67990 + read_unlock(&tasklist_lock);
67991 + }
67992 +#endif
67993 +
67994 /* If modprobe needs a service that is in a module, we get a recursive
67995 * loop. Limit the number of running kmod threads to max_threads/2 or
67996 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
67997 @@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
67998
67999 trace_module_request(module_name, wait, _RET_IP_);
68000
68001 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
68002 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
68003
68004 atomic_dec(&kmod_concurrent);
68005 return ret;
68006 }
68007 +
68008 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
68009 +{
68010 + va_list args;
68011 + int ret;
68012 +
68013 + va_start(args, fmt);
68014 + ret = ____request_module(wait, module_param, fmt, args);
68015 + va_end(args);
68016 +
68017 + return ret;
68018 +}
68019 +
68020 +int __request_module(bool wait, const char *fmt, ...)
68021 +{
68022 + va_list args;
68023 + int ret;
68024 +
68025 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68026 + if (current_uid()) {
68027 + char module_param[MODULE_NAME_LEN];
68028 +
68029 + memset(module_param, 0, sizeof(module_param));
68030 +
68031 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
68032 +
68033 + va_start(args, fmt);
68034 + ret = ____request_module(wait, module_param, fmt, args);
68035 + va_end(args);
68036 +
68037 + return ret;
68038 + }
68039 +#endif
68040 +
68041 + va_start(args, fmt);
68042 + ret = ____request_module(wait, NULL, fmt, args);
68043 + va_end(args);
68044 +
68045 + return ret;
68046 +}
68047 +
68048 EXPORT_SYMBOL(__request_module);
68049 #endif /* CONFIG_MODULES */
68050
68051 @@ -266,7 +319,7 @@ static int wait_for_helper(void *data)
68052 *
68053 * Thus the __user pointer cast is valid here.
68054 */
68055 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
68056 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
68057
68058 /*
68059 * If ret is 0, either ____call_usermodehelper failed and the
68060 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
68061 index c62b854..cb67968 100644
68062 --- a/kernel/kprobes.c
68063 +++ b/kernel/kprobes.c
68064 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
68065 * kernel image and loaded module images reside. This is required
68066 * so x86_64 can correctly handle the %rip-relative fixups.
68067 */
68068 - kip->insns = module_alloc(PAGE_SIZE);
68069 + kip->insns = module_alloc_exec(PAGE_SIZE);
68070 if (!kip->insns) {
68071 kfree(kip);
68072 return NULL;
68073 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
68074 */
68075 if (!list_is_singular(&kip->list)) {
68076 list_del(&kip->list);
68077 - module_free(NULL, kip->insns);
68078 + module_free_exec(NULL, kip->insns);
68079 kfree(kip);
68080 }
68081 return 1;
68082 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
68083 {
68084 int i, err = 0;
68085 unsigned long offset = 0, size = 0;
68086 - char *modname, namebuf[128];
68087 + char *modname, namebuf[KSYM_NAME_LEN];
68088 const char *symbol_name;
68089 void *addr;
68090 struct kprobe_blackpoint *kb;
68091 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
68092 const char *sym = NULL;
68093 unsigned int i = *(loff_t *) v;
68094 unsigned long offset = 0;
68095 - char *modname, namebuf[128];
68096 + char *modname, namebuf[KSYM_NAME_LEN];
68097
68098 head = &kprobe_table[i];
68099 preempt_disable();
68100 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
68101 index 4e316e1..5501eef 100644
68102 --- a/kernel/ksysfs.c
68103 +++ b/kernel/ksysfs.c
68104 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
68105 {
68106 if (count+1 > UEVENT_HELPER_PATH_LEN)
68107 return -ENOENT;
68108 + if (!capable(CAP_SYS_ADMIN))
68109 + return -EPERM;
68110 memcpy(uevent_helper, buf, count);
68111 uevent_helper[count] = '\0';
68112 if (count && uevent_helper[count-1] == '\n')
68113 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
68114 index ea9ee45..67ebc8f 100644
68115 --- a/kernel/lockdep.c
68116 +++ b/kernel/lockdep.c
68117 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
68118 end = (unsigned long) &_end,
68119 addr = (unsigned long) obj;
68120
68121 +#ifdef CONFIG_PAX_KERNEXEC
68122 + start = ktla_ktva(start);
68123 +#endif
68124 +
68125 /*
68126 * static variable?
68127 */
68128 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
68129 if (!static_obj(lock->key)) {
68130 debug_locks_off();
68131 printk("INFO: trying to register non-static key.\n");
68132 + printk("lock:%pS key:%pS.\n", lock, lock->key);
68133 printk("the code is fine but needs lockdep annotation.\n");
68134 printk("turning off the locking correctness validator.\n");
68135 dump_stack();
68136 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
68137 if (!class)
68138 return 0;
68139 }
68140 - atomic_inc((atomic_t *)&class->ops);
68141 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
68142 if (very_verbose(class)) {
68143 printk("\nacquire class [%p] %s", class->key, class->name);
68144 if (class->name_version > 1)
68145 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
68146 index 91c32a0..b2c71c5 100644
68147 --- a/kernel/lockdep_proc.c
68148 +++ b/kernel/lockdep_proc.c
68149 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
68150
68151 static void print_name(struct seq_file *m, struct lock_class *class)
68152 {
68153 - char str[128];
68154 + char str[KSYM_NAME_LEN];
68155 const char *name = class->name;
68156
68157 if (!name) {
68158 diff --git a/kernel/module.c b/kernel/module.c
68159 index 4edbd9c..165e780 100644
68160 --- a/kernel/module.c
68161 +++ b/kernel/module.c
68162 @@ -58,6 +58,7 @@
68163 #include <linux/jump_label.h>
68164 #include <linux/pfn.h>
68165 #include <linux/bsearch.h>
68166 +#include <linux/grsecurity.h>
68167
68168 #define CREATE_TRACE_POINTS
68169 #include <trace/events/module.h>
68170 @@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
68171
68172 /* Bounds of module allocation, for speeding __module_address.
68173 * Protected by module_mutex. */
68174 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
68175 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
68176 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
68177
68178 int register_module_notifier(struct notifier_block * nb)
68179 {
68180 @@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
68181 return true;
68182
68183 list_for_each_entry_rcu(mod, &modules, list) {
68184 - struct symsearch arr[] = {
68185 + struct symsearch modarr[] = {
68186 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
68187 NOT_GPL_ONLY, false },
68188 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
68189 @@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
68190 #endif
68191 };
68192
68193 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
68194 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
68195 return true;
68196 }
68197 return false;
68198 @@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
68199 static int percpu_modalloc(struct module *mod,
68200 unsigned long size, unsigned long align)
68201 {
68202 - if (align > PAGE_SIZE) {
68203 + if (align-1 >= PAGE_SIZE) {
68204 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
68205 mod->name, align, PAGE_SIZE);
68206 align = PAGE_SIZE;
68207 @@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
68208 static ssize_t show_coresize(struct module_attribute *mattr,
68209 struct module_kobject *mk, char *buffer)
68210 {
68211 - return sprintf(buffer, "%u\n", mk->mod->core_size);
68212 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
68213 }
68214
68215 static struct module_attribute modinfo_coresize =
68216 @@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
68217 static ssize_t show_initsize(struct module_attribute *mattr,
68218 struct module_kobject *mk, char *buffer)
68219 {
68220 - return sprintf(buffer, "%u\n", mk->mod->init_size);
68221 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
68222 }
68223
68224 static struct module_attribute modinfo_initsize =
68225 @@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
68226 */
68227 #ifdef CONFIG_SYSFS
68228
68229 -#ifdef CONFIG_KALLSYMS
68230 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68231 static inline bool sect_empty(const Elf_Shdr *sect)
68232 {
68233 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
68234 @@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
68235
68236 static void unset_module_core_ro_nx(struct module *mod)
68237 {
68238 - set_page_attributes(mod->module_core + mod->core_text_size,
68239 - mod->module_core + mod->core_size,
68240 + set_page_attributes(mod->module_core_rw,
68241 + mod->module_core_rw + mod->core_size_rw,
68242 set_memory_x);
68243 - set_page_attributes(mod->module_core,
68244 - mod->module_core + mod->core_ro_size,
68245 + set_page_attributes(mod->module_core_rx,
68246 + mod->module_core_rx + mod->core_size_rx,
68247 set_memory_rw);
68248 }
68249
68250 static void unset_module_init_ro_nx(struct module *mod)
68251 {
68252 - set_page_attributes(mod->module_init + mod->init_text_size,
68253 - mod->module_init + mod->init_size,
68254 + set_page_attributes(mod->module_init_rw,
68255 + mod->module_init_rw + mod->init_size_rw,
68256 set_memory_x);
68257 - set_page_attributes(mod->module_init,
68258 - mod->module_init + mod->init_ro_size,
68259 + set_page_attributes(mod->module_init_rx,
68260 + mod->module_init_rx + mod->init_size_rx,
68261 set_memory_rw);
68262 }
68263
68264 @@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
68265
68266 mutex_lock(&module_mutex);
68267 list_for_each_entry_rcu(mod, &modules, list) {
68268 - if ((mod->module_core) && (mod->core_text_size)) {
68269 - set_page_attributes(mod->module_core,
68270 - mod->module_core + mod->core_text_size,
68271 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
68272 + set_page_attributes(mod->module_core_rx,
68273 + mod->module_core_rx + mod->core_size_rx,
68274 set_memory_rw);
68275 }
68276 - if ((mod->module_init) && (mod->init_text_size)) {
68277 - set_page_attributes(mod->module_init,
68278 - mod->module_init + mod->init_text_size,
68279 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
68280 + set_page_attributes(mod->module_init_rx,
68281 + mod->module_init_rx + mod->init_size_rx,
68282 set_memory_rw);
68283 }
68284 }
68285 @@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
68286
68287 mutex_lock(&module_mutex);
68288 list_for_each_entry_rcu(mod, &modules, list) {
68289 - if ((mod->module_core) && (mod->core_text_size)) {
68290 - set_page_attributes(mod->module_core,
68291 - mod->module_core + mod->core_text_size,
68292 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
68293 + set_page_attributes(mod->module_core_rx,
68294 + mod->module_core_rx + mod->core_size_rx,
68295 set_memory_ro);
68296 }
68297 - if ((mod->module_init) && (mod->init_text_size)) {
68298 - set_page_attributes(mod->module_init,
68299 - mod->module_init + mod->init_text_size,
68300 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
68301 + set_page_attributes(mod->module_init_rx,
68302 + mod->module_init_rx + mod->init_size_rx,
68303 set_memory_ro);
68304 }
68305 }
68306 @@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
68307
68308 /* This may be NULL, but that's OK */
68309 unset_module_init_ro_nx(mod);
68310 - module_free(mod, mod->module_init);
68311 + module_free(mod, mod->module_init_rw);
68312 + module_free_exec(mod, mod->module_init_rx);
68313 kfree(mod->args);
68314 percpu_modfree(mod);
68315
68316 /* Free lock-classes: */
68317 - lockdep_free_key_range(mod->module_core, mod->core_size);
68318 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
68319 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
68320
68321 /* Finally, free the core (containing the module structure) */
68322 unset_module_core_ro_nx(mod);
68323 - module_free(mod, mod->module_core);
68324 + module_free_exec(mod, mod->module_core_rx);
68325 + module_free(mod, mod->module_core_rw);
68326
68327 #ifdef CONFIG_MPU
68328 update_protections(current->mm);
68329 @@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68330 int ret = 0;
68331 const struct kernel_symbol *ksym;
68332
68333 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68334 + int is_fs_load = 0;
68335 + int register_filesystem_found = 0;
68336 + char *p;
68337 +
68338 + p = strstr(mod->args, "grsec_modharden_fs");
68339 + if (p) {
68340 + char *endptr = p + strlen("grsec_modharden_fs");
68341 + /* copy \0 as well */
68342 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
68343 + is_fs_load = 1;
68344 + }
68345 +#endif
68346 +
68347 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
68348 const char *name = info->strtab + sym[i].st_name;
68349
68350 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68351 + /* it's a real shame this will never get ripped and copied
68352 + upstream! ;(
68353 + */
68354 + if (is_fs_load && !strcmp(name, "register_filesystem"))
68355 + register_filesystem_found = 1;
68356 +#endif
68357 +
68358 switch (sym[i].st_shndx) {
68359 case SHN_COMMON:
68360 /* We compiled with -fno-common. These are not
68361 @@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68362 ksym = resolve_symbol_wait(mod, info, name);
68363 /* Ok if resolved. */
68364 if (ksym && !IS_ERR(ksym)) {
68365 + pax_open_kernel();
68366 sym[i].st_value = ksym->value;
68367 + pax_close_kernel();
68368 break;
68369 }
68370
68371 @@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68372 secbase = (unsigned long)mod_percpu(mod);
68373 else
68374 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
68375 + pax_open_kernel();
68376 sym[i].st_value += secbase;
68377 + pax_close_kernel();
68378 break;
68379 }
68380 }
68381
68382 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68383 + if (is_fs_load && !register_filesystem_found) {
68384 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
68385 + ret = -EPERM;
68386 + }
68387 +#endif
68388 +
68389 return ret;
68390 }
68391
68392 @@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
68393 || s->sh_entsize != ~0UL
68394 || strstarts(sname, ".init"))
68395 continue;
68396 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
68397 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
68398 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
68399 + else
68400 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
68401 pr_debug("\t%s\n", sname);
68402 }
68403 - switch (m) {
68404 - case 0: /* executable */
68405 - mod->core_size = debug_align(mod->core_size);
68406 - mod->core_text_size = mod->core_size;
68407 - break;
68408 - case 1: /* RO: text and ro-data */
68409 - mod->core_size = debug_align(mod->core_size);
68410 - mod->core_ro_size = mod->core_size;
68411 - break;
68412 - case 3: /* whole core */
68413 - mod->core_size = debug_align(mod->core_size);
68414 - break;
68415 - }
68416 }
68417
68418 pr_debug("Init section allocation order:\n");
68419 @@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
68420 || s->sh_entsize != ~0UL
68421 || !strstarts(sname, ".init"))
68422 continue;
68423 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
68424 - | INIT_OFFSET_MASK);
68425 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
68426 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
68427 + else
68428 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
68429 + s->sh_entsize |= INIT_OFFSET_MASK;
68430 pr_debug("\t%s\n", sname);
68431 }
68432 - switch (m) {
68433 - case 0: /* executable */
68434 - mod->init_size = debug_align(mod->init_size);
68435 - mod->init_text_size = mod->init_size;
68436 - break;
68437 - case 1: /* RO: text and ro-data */
68438 - mod->init_size = debug_align(mod->init_size);
68439 - mod->init_ro_size = mod->init_size;
68440 - break;
68441 - case 3: /* whole init */
68442 - mod->init_size = debug_align(mod->init_size);
68443 - break;
68444 - }
68445 }
68446 }
68447
68448 @@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
68449
68450 /* Put symbol section at end of init part of module. */
68451 symsect->sh_flags |= SHF_ALLOC;
68452 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
68453 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
68454 info->index.sym) | INIT_OFFSET_MASK;
68455 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
68456
68457 @@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
68458 }
68459
68460 /* Append room for core symbols at end of core part. */
68461 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
68462 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
68463 - mod->core_size += strtab_size;
68464 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
68465 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
68466 + mod->core_size_rx += strtab_size;
68467
68468 /* Put string table section at end of init part of module. */
68469 strsect->sh_flags |= SHF_ALLOC;
68470 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
68471 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
68472 info->index.str) | INIT_OFFSET_MASK;
68473 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
68474 }
68475 @@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
68476 /* Make sure we get permanent strtab: don't use info->strtab. */
68477 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
68478
68479 + pax_open_kernel();
68480 +
68481 /* Set types up while we still have access to sections. */
68482 for (i = 0; i < mod->num_symtab; i++)
68483 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
68484
68485 - mod->core_symtab = dst = mod->module_core + info->symoffs;
68486 - mod->core_strtab = s = mod->module_core + info->stroffs;
68487 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
68488 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
68489 src = mod->symtab;
68490 *dst = *src;
68491 *s++ = 0;
68492 @@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
68493 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
68494 }
68495 mod->core_num_syms = ndst;
68496 +
68497 + pax_close_kernel();
68498 }
68499 #else
68500 static inline void layout_symtab(struct module *mod, struct load_info *info)
68501 @@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
68502 return size == 0 ? NULL : vmalloc_exec(size);
68503 }
68504
68505 -static void *module_alloc_update_bounds(unsigned long size)
68506 +static void *module_alloc_update_bounds_rw(unsigned long size)
68507 {
68508 void *ret = module_alloc(size);
68509
68510 if (ret) {
68511 mutex_lock(&module_mutex);
68512 /* Update module bounds. */
68513 - if ((unsigned long)ret < module_addr_min)
68514 - module_addr_min = (unsigned long)ret;
68515 - if ((unsigned long)ret + size > module_addr_max)
68516 - module_addr_max = (unsigned long)ret + size;
68517 + if ((unsigned long)ret < module_addr_min_rw)
68518 + module_addr_min_rw = (unsigned long)ret;
68519 + if ((unsigned long)ret + size > module_addr_max_rw)
68520 + module_addr_max_rw = (unsigned long)ret + size;
68521 + mutex_unlock(&module_mutex);
68522 + }
68523 + return ret;
68524 +}
68525 +
68526 +static void *module_alloc_update_bounds_rx(unsigned long size)
68527 +{
68528 + void *ret = module_alloc_exec(size);
68529 +
68530 + if (ret) {
68531 + mutex_lock(&module_mutex);
68532 + /* Update module bounds. */
68533 + if ((unsigned long)ret < module_addr_min_rx)
68534 + module_addr_min_rx = (unsigned long)ret;
68535 + if ((unsigned long)ret + size > module_addr_max_rx)
68536 + module_addr_max_rx = (unsigned long)ret + size;
68537 mutex_unlock(&module_mutex);
68538 }
68539 return ret;
68540 @@ -2544,8 +2582,14 @@ static struct module *setup_load_info(struct load_info *info)
68541 static int check_modinfo(struct module *mod, struct load_info *info)
68542 {
68543 const char *modmagic = get_modinfo(info, "vermagic");
68544 + const char *license = get_modinfo(info, "license");
68545 int err;
68546
68547 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
68548 + if (!license || !license_is_gpl_compatible(license))
68549 + return -ENOEXEC;
68550 +#endif
68551 +
68552 /* This is allowed: modprobe --force will invalidate it. */
68553 if (!modmagic) {
68554 err = try_to_force_load(mod, "bad vermagic");
68555 @@ -2568,7 +2612,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
68556 }
68557
68558 /* Set up license info based on the info section */
68559 - set_license(mod, get_modinfo(info, "license"));
68560 + set_license(mod, license);
68561
68562 return 0;
68563 }
68564 @@ -2662,7 +2706,7 @@ static int move_module(struct module *mod, struct load_info *info)
68565 void *ptr;
68566
68567 /* Do the allocs. */
68568 - ptr = module_alloc_update_bounds(mod->core_size);
68569 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
68570 /*
68571 * The pointer to this block is stored in the module structure
68572 * which is inside the block. Just mark it as not being a
68573 @@ -2672,23 +2716,50 @@ static int move_module(struct module *mod, struct load_info *info)
68574 if (!ptr)
68575 return -ENOMEM;
68576
68577 - memset(ptr, 0, mod->core_size);
68578 - mod->module_core = ptr;
68579 + memset(ptr, 0, mod->core_size_rw);
68580 + mod->module_core_rw = ptr;
68581
68582 - ptr = module_alloc_update_bounds(mod->init_size);
68583 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
68584 /*
68585 * The pointer to this block is stored in the module structure
68586 * which is inside the block. This block doesn't need to be
68587 * scanned as it contains data and code that will be freed
68588 * after the module is initialized.
68589 */
68590 - kmemleak_ignore(ptr);
68591 - if (!ptr && mod->init_size) {
68592 - module_free(mod, mod->module_core);
68593 + kmemleak_not_leak(ptr);
68594 + if (!ptr && mod->init_size_rw) {
68595 + module_free(mod, mod->module_core_rw);
68596 return -ENOMEM;
68597 }
68598 - memset(ptr, 0, mod->init_size);
68599 - mod->module_init = ptr;
68600 + memset(ptr, 0, mod->init_size_rw);
68601 + mod->module_init_rw = ptr;
68602 +
68603 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
68604 + kmemleak_not_leak(ptr);
68605 + if (!ptr) {
68606 + module_free(mod, mod->module_init_rw);
68607 + module_free(mod, mod->module_core_rw);
68608 + return -ENOMEM;
68609 + }
68610 +
68611 + pax_open_kernel();
68612 + memset(ptr, 0, mod->core_size_rx);
68613 + pax_close_kernel();
68614 + mod->module_core_rx = ptr;
68615 +
68616 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
68617 + kmemleak_not_leak(ptr);
68618 + if (!ptr && mod->init_size_rx) {
68619 + module_free_exec(mod, mod->module_core_rx);
68620 + module_free(mod, mod->module_init_rw);
68621 + module_free(mod, mod->module_core_rw);
68622 + return -ENOMEM;
68623 + }
68624 +
68625 + pax_open_kernel();
68626 + memset(ptr, 0, mod->init_size_rx);
68627 + pax_close_kernel();
68628 + mod->module_init_rx = ptr;
68629
68630 /* Transfer each section which specifies SHF_ALLOC */
68631 pr_debug("final section addresses:\n");
68632 @@ -2699,16 +2770,45 @@ static int move_module(struct module *mod, struct load_info *info)
68633 if (!(shdr->sh_flags & SHF_ALLOC))
68634 continue;
68635
68636 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
68637 - dest = mod->module_init
68638 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68639 - else
68640 - dest = mod->module_core + shdr->sh_entsize;
68641 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
68642 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68643 + dest = mod->module_init_rw
68644 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68645 + else
68646 + dest = mod->module_init_rx
68647 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68648 + } else {
68649 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68650 + dest = mod->module_core_rw + shdr->sh_entsize;
68651 + else
68652 + dest = mod->module_core_rx + shdr->sh_entsize;
68653 + }
68654 +
68655 + if (shdr->sh_type != SHT_NOBITS) {
68656 +
68657 +#ifdef CONFIG_PAX_KERNEXEC
68658 +#ifdef CONFIG_X86_64
68659 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
68660 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
68661 +#endif
68662 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
68663 + pax_open_kernel();
68664 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
68665 + pax_close_kernel();
68666 + } else
68667 +#endif
68668
68669 - if (shdr->sh_type != SHT_NOBITS)
68670 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
68671 + }
68672 /* Update sh_addr to point to copy in image. */
68673 - shdr->sh_addr = (unsigned long)dest;
68674 +
68675 +#ifdef CONFIG_PAX_KERNEXEC
68676 + if (shdr->sh_flags & SHF_EXECINSTR)
68677 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
68678 + else
68679 +#endif
68680 +
68681 + shdr->sh_addr = (unsigned long)dest;
68682 pr_debug("\t0x%lx %s\n",
68683 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
68684 }
68685 @@ -2759,12 +2859,12 @@ static void flush_module_icache(const struct module *mod)
68686 * Do it before processing of module parameters, so the module
68687 * can provide parameter accessor functions of its own.
68688 */
68689 - if (mod->module_init)
68690 - flush_icache_range((unsigned long)mod->module_init,
68691 - (unsigned long)mod->module_init
68692 - + mod->init_size);
68693 - flush_icache_range((unsigned long)mod->module_core,
68694 - (unsigned long)mod->module_core + mod->core_size);
68695 + if (mod->module_init_rx)
68696 + flush_icache_range((unsigned long)mod->module_init_rx,
68697 + (unsigned long)mod->module_init_rx
68698 + + mod->init_size_rx);
68699 + flush_icache_range((unsigned long)mod->module_core_rx,
68700 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
68701
68702 set_fs(old_fs);
68703 }
68704 @@ -2834,8 +2934,10 @@ out:
68705 static void module_deallocate(struct module *mod, struct load_info *info)
68706 {
68707 percpu_modfree(mod);
68708 - module_free(mod, mod->module_init);
68709 - module_free(mod, mod->module_core);
68710 + module_free_exec(mod, mod->module_init_rx);
68711 + module_free_exec(mod, mod->module_core_rx);
68712 + module_free(mod, mod->module_init_rw);
68713 + module_free(mod, mod->module_core_rw);
68714 }
68715
68716 int __weak module_finalize(const Elf_Ehdr *hdr,
68717 @@ -2848,7 +2950,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
68718 static int post_relocation(struct module *mod, const struct load_info *info)
68719 {
68720 /* Sort exception table now relocations are done. */
68721 + pax_open_kernel();
68722 sort_extable(mod->extable, mod->extable + mod->num_exentries);
68723 + pax_close_kernel();
68724
68725 /* Copy relocated percpu area over. */
68726 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
68727 @@ -2899,9 +3003,38 @@ static struct module *load_module(void __user *umod,
68728 if (err)
68729 goto free_unload;
68730
68731 + /* Now copy in args */
68732 + mod->args = strndup_user(uargs, ~0UL >> 1);
68733 + if (IS_ERR(mod->args)) {
68734 + err = PTR_ERR(mod->args);
68735 + goto free_unload;
68736 + }
68737 +
68738 /* Set up MODINFO_ATTR fields */
68739 setup_modinfo(mod, &info);
68740
68741 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68742 + {
68743 + char *p, *p2;
68744 +
68745 + if (strstr(mod->args, "grsec_modharden_netdev")) {
68746 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
68747 + err = -EPERM;
68748 + goto free_modinfo;
68749 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
68750 + p += strlen("grsec_modharden_normal");
68751 + p2 = strstr(p, "_");
68752 + if (p2) {
68753 + *p2 = '\0';
68754 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
68755 + *p2 = '_';
68756 + }
68757 + err = -EPERM;
68758 + goto free_modinfo;
68759 + }
68760 + }
68761 +#endif
68762 +
68763 /* Fix up syms, so that st_value is a pointer to location. */
68764 err = simplify_symbols(mod, &info);
68765 if (err < 0)
68766 @@ -2917,13 +3050,6 @@ static struct module *load_module(void __user *umod,
68767
68768 flush_module_icache(mod);
68769
68770 - /* Now copy in args */
68771 - mod->args = strndup_user(uargs, ~0UL >> 1);
68772 - if (IS_ERR(mod->args)) {
68773 - err = PTR_ERR(mod->args);
68774 - goto free_arch_cleanup;
68775 - }
68776 -
68777 /* Mark state as coming so strong_try_module_get() ignores us. */
68778 mod->state = MODULE_STATE_COMING;
68779
68780 @@ -2981,11 +3107,10 @@ static struct module *load_module(void __user *umod,
68781 unlock:
68782 mutex_unlock(&module_mutex);
68783 synchronize_sched();
68784 - kfree(mod->args);
68785 - free_arch_cleanup:
68786 module_arch_cleanup(mod);
68787 free_modinfo:
68788 free_modinfo(mod);
68789 + kfree(mod->args);
68790 free_unload:
68791 module_unload_free(mod);
68792 free_module:
68793 @@ -3026,16 +3151,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68794 MODULE_STATE_COMING, mod);
68795
68796 /* Set RO and NX regions for core */
68797 - set_section_ro_nx(mod->module_core,
68798 - mod->core_text_size,
68799 - mod->core_ro_size,
68800 - mod->core_size);
68801 + set_section_ro_nx(mod->module_core_rx,
68802 + mod->core_size_rx,
68803 + mod->core_size_rx,
68804 + mod->core_size_rx);
68805
68806 /* Set RO and NX regions for init */
68807 - set_section_ro_nx(mod->module_init,
68808 - mod->init_text_size,
68809 - mod->init_ro_size,
68810 - mod->init_size);
68811 + set_section_ro_nx(mod->module_init_rx,
68812 + mod->init_size_rx,
68813 + mod->init_size_rx,
68814 + mod->init_size_rx);
68815
68816 do_mod_ctors(mod);
68817 /* Start the module */
68818 @@ -3081,11 +3206,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68819 mod->strtab = mod->core_strtab;
68820 #endif
68821 unset_module_init_ro_nx(mod);
68822 - module_free(mod, mod->module_init);
68823 - mod->module_init = NULL;
68824 - mod->init_size = 0;
68825 - mod->init_ro_size = 0;
68826 - mod->init_text_size = 0;
68827 + module_free(mod, mod->module_init_rw);
68828 + module_free_exec(mod, mod->module_init_rx);
68829 + mod->module_init_rw = NULL;
68830 + mod->module_init_rx = NULL;
68831 + mod->init_size_rw = 0;
68832 + mod->init_size_rx = 0;
68833 mutex_unlock(&module_mutex);
68834
68835 return 0;
68836 @@ -3116,10 +3242,16 @@ static const char *get_ksymbol(struct module *mod,
68837 unsigned long nextval;
68838
68839 /* At worse, next value is at end of module */
68840 - if (within_module_init(addr, mod))
68841 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
68842 + if (within_module_init_rx(addr, mod))
68843 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
68844 + else if (within_module_init_rw(addr, mod))
68845 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
68846 + else if (within_module_core_rx(addr, mod))
68847 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
68848 + else if (within_module_core_rw(addr, mod))
68849 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
68850 else
68851 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
68852 + return NULL;
68853
68854 /* Scan for closest preceding symbol, and next symbol. (ELF
68855 starts real symbols at 1). */
68856 @@ -3354,7 +3486,7 @@ static int m_show(struct seq_file *m, void *p)
68857 char buf[8];
68858
68859 seq_printf(m, "%s %u",
68860 - mod->name, mod->init_size + mod->core_size);
68861 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
68862 print_unload_info(m, mod);
68863
68864 /* Informative for users. */
68865 @@ -3363,7 +3495,7 @@ static int m_show(struct seq_file *m, void *p)
68866 mod->state == MODULE_STATE_COMING ? "Loading":
68867 "Live");
68868 /* Used by oprofile and other similar tools. */
68869 - seq_printf(m, " 0x%pK", mod->module_core);
68870 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
68871
68872 /* Taints info */
68873 if (mod->taints)
68874 @@ -3399,7 +3531,17 @@ static const struct file_operations proc_modules_operations = {
68875
68876 static int __init proc_modules_init(void)
68877 {
68878 +#ifndef CONFIG_GRKERNSEC_HIDESYM
68879 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68880 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68881 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68882 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
68883 +#else
68884 proc_create("modules", 0, NULL, &proc_modules_operations);
68885 +#endif
68886 +#else
68887 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68888 +#endif
68889 return 0;
68890 }
68891 module_init(proc_modules_init);
68892 @@ -3458,12 +3600,12 @@ struct module *__module_address(unsigned long addr)
68893 {
68894 struct module *mod;
68895
68896 - if (addr < module_addr_min || addr > module_addr_max)
68897 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
68898 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
68899 return NULL;
68900
68901 list_for_each_entry_rcu(mod, &modules, list)
68902 - if (within_module_core(addr, mod)
68903 - || within_module_init(addr, mod))
68904 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
68905 return mod;
68906 return NULL;
68907 }
68908 @@ -3497,11 +3639,20 @@ bool is_module_text_address(unsigned long addr)
68909 */
68910 struct module *__module_text_address(unsigned long addr)
68911 {
68912 - struct module *mod = __module_address(addr);
68913 + struct module *mod;
68914 +
68915 +#ifdef CONFIG_X86_32
68916 + addr = ktla_ktva(addr);
68917 +#endif
68918 +
68919 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
68920 + return NULL;
68921 +
68922 + mod = __module_address(addr);
68923 +
68924 if (mod) {
68925 /* Make sure it's within the text section. */
68926 - if (!within(addr, mod->module_init, mod->init_text_size)
68927 - && !within(addr, mod->module_core, mod->core_text_size))
68928 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
68929 mod = NULL;
68930 }
68931 return mod;
68932 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
68933 index 7e3443f..b2a1e6b 100644
68934 --- a/kernel/mutex-debug.c
68935 +++ b/kernel/mutex-debug.c
68936 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
68937 }
68938
68939 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68940 - struct thread_info *ti)
68941 + struct task_struct *task)
68942 {
68943 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
68944
68945 /* Mark the current thread as blocked on the lock: */
68946 - ti->task->blocked_on = waiter;
68947 + task->blocked_on = waiter;
68948 }
68949
68950 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68951 - struct thread_info *ti)
68952 + struct task_struct *task)
68953 {
68954 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
68955 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
68956 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
68957 - ti->task->blocked_on = NULL;
68958 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
68959 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
68960 + task->blocked_on = NULL;
68961
68962 list_del_init(&waiter->list);
68963 waiter->task = NULL;
68964 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
68965 index 0799fd3..d06ae3b 100644
68966 --- a/kernel/mutex-debug.h
68967 +++ b/kernel/mutex-debug.h
68968 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
68969 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
68970 extern void debug_mutex_add_waiter(struct mutex *lock,
68971 struct mutex_waiter *waiter,
68972 - struct thread_info *ti);
68973 + struct task_struct *task);
68974 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68975 - struct thread_info *ti);
68976 + struct task_struct *task);
68977 extern void debug_mutex_unlock(struct mutex *lock);
68978 extern void debug_mutex_init(struct mutex *lock, const char *name,
68979 struct lock_class_key *key);
68980 diff --git a/kernel/mutex.c b/kernel/mutex.c
68981 index a307cc9..27fd2e9 100644
68982 --- a/kernel/mutex.c
68983 +++ b/kernel/mutex.c
68984 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68985 spin_lock_mutex(&lock->wait_lock, flags);
68986
68987 debug_mutex_lock_common(lock, &waiter);
68988 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
68989 + debug_mutex_add_waiter(lock, &waiter, task);
68990
68991 /* add waiting tasks to the end of the waitqueue (FIFO): */
68992 list_add_tail(&waiter.list, &lock->wait_list);
68993 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68994 * TASK_UNINTERRUPTIBLE case.)
68995 */
68996 if (unlikely(signal_pending_state(state, task))) {
68997 - mutex_remove_waiter(lock, &waiter,
68998 - task_thread_info(task));
68999 + mutex_remove_waiter(lock, &waiter, task);
69000 mutex_release(&lock->dep_map, 1, ip);
69001 spin_unlock_mutex(&lock->wait_lock, flags);
69002
69003 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
69004 done:
69005 lock_acquired(&lock->dep_map, ip);
69006 /* got the lock - rejoice! */
69007 - mutex_remove_waiter(lock, &waiter, current_thread_info());
69008 + mutex_remove_waiter(lock, &waiter, task);
69009 mutex_set_owner(lock);
69010
69011 /* set it to 0 if there are no waiters left: */
69012 diff --git a/kernel/panic.c b/kernel/panic.c
69013 index d2a5f4e..5edc1d9 100644
69014 --- a/kernel/panic.c
69015 +++ b/kernel/panic.c
69016 @@ -75,6 +75,14 @@ void panic(const char *fmt, ...)
69017 int state = 0;
69018
69019 /*
69020 + * Disable local interrupts. This will prevent panic_smp_self_stop
69021 + * from deadlocking the first cpu that invokes the panic, since
69022 + * there is nothing to prevent an interrupt handler (that runs
69023 + * after the panic_lock is acquired) from invoking panic again.
69024 + */
69025 + local_irq_disable();
69026 +
69027 + /*
69028 * It's possible to come here directly from a panic-assertion and
69029 * not have preempt disabled. Some functions called from here want
69030 * preempt to be disabled. No point enabling it later though...
69031 @@ -402,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
69032 const char *board;
69033
69034 printk(KERN_WARNING "------------[ cut here ]------------\n");
69035 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
69036 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
69037 board = dmi_get_system_info(DMI_PRODUCT_NAME);
69038 if (board)
69039 printk(KERN_WARNING "Hardware name: %s\n", board);
69040 @@ -457,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
69041 */
69042 void __stack_chk_fail(void)
69043 {
69044 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
69045 + dump_stack();
69046 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
69047 __builtin_return_address(0));
69048 }
69049 EXPORT_SYMBOL(__stack_chk_fail);
69050 diff --git a/kernel/pid.c b/kernel/pid.c
69051 index e86b291a..e8b0fb5 100644
69052 --- a/kernel/pid.c
69053 +++ b/kernel/pid.c
69054 @@ -33,6 +33,7 @@
69055 #include <linux/rculist.h>
69056 #include <linux/bootmem.h>
69057 #include <linux/hash.h>
69058 +#include <linux/security.h>
69059 #include <linux/pid_namespace.h>
69060 #include <linux/init_task.h>
69061 #include <linux/syscalls.h>
69062 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
69063
69064 int pid_max = PID_MAX_DEFAULT;
69065
69066 -#define RESERVED_PIDS 300
69067 +#define RESERVED_PIDS 500
69068
69069 int pid_max_min = RESERVED_PIDS + 1;
69070 int pid_max_max = PID_MAX_LIMIT;
69071 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
69072 */
69073 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
69074 {
69075 + struct task_struct *task;
69076 +
69077 rcu_lockdep_assert(rcu_read_lock_held(),
69078 "find_task_by_pid_ns() needs rcu_read_lock()"
69079 " protection");
69080 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
69081 +
69082 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
69083 +
69084 + if (gr_pid_is_chrooted(task))
69085 + return NULL;
69086 +
69087 + return task;
69088 }
69089
69090 struct task_struct *find_task_by_vpid(pid_t vnr)
69091 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
69092 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
69093 }
69094
69095 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
69096 +{
69097 + rcu_lockdep_assert(rcu_read_lock_held(),
69098 + "find_task_by_pid_ns() needs rcu_read_lock()"
69099 + " protection");
69100 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
69101 +}
69102 +
69103 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
69104 {
69105 struct pid *pid;
69106 diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
69107 index b3c7fd5..6144bab 100644
69108 --- a/kernel/pid_namespace.c
69109 +++ b/kernel/pid_namespace.c
69110 @@ -232,15 +232,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
69111 */
69112
69113 tmp.data = &current->nsproxy->pid_ns->last_pid;
69114 - return proc_dointvec(&tmp, write, buffer, lenp, ppos);
69115 + return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
69116 }
69117
69118 +extern int pid_max;
69119 +static int zero = 0;
69120 static struct ctl_table pid_ns_ctl_table[] = {
69121 {
69122 .procname = "ns_last_pid",
69123 .maxlen = sizeof(int),
69124 .mode = 0666, /* permissions are checked in the handler */
69125 .proc_handler = pid_ns_ctl_handler,
69126 + .extra1 = &zero,
69127 + .extra2 = &pid_max,
69128 },
69129 { }
69130 };
69131 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
69132 index 125cb67..a4d1c30 100644
69133 --- a/kernel/posix-cpu-timers.c
69134 +++ b/kernel/posix-cpu-timers.c
69135 @@ -6,6 +6,7 @@
69136 #include <linux/posix-timers.h>
69137 #include <linux/errno.h>
69138 #include <linux/math64.h>
69139 +#include <linux/security.h>
69140 #include <asm/uaccess.h>
69141 #include <linux/kernel_stat.h>
69142 #include <trace/events/timer.h>
69143 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
69144
69145 static __init int init_posix_cpu_timers(void)
69146 {
69147 - struct k_clock process = {
69148 + static struct k_clock process = {
69149 .clock_getres = process_cpu_clock_getres,
69150 .clock_get = process_cpu_clock_get,
69151 .timer_create = process_cpu_timer_create,
69152 .nsleep = process_cpu_nsleep,
69153 .nsleep_restart = process_cpu_nsleep_restart,
69154 };
69155 - struct k_clock thread = {
69156 + static struct k_clock thread = {
69157 .clock_getres = thread_cpu_clock_getres,
69158 .clock_get = thread_cpu_clock_get,
69159 .timer_create = thread_cpu_timer_create,
69160 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
69161 index 69185ae..cc2847a 100644
69162 --- a/kernel/posix-timers.c
69163 +++ b/kernel/posix-timers.c
69164 @@ -43,6 +43,7 @@
69165 #include <linux/idr.h>
69166 #include <linux/posix-clock.h>
69167 #include <linux/posix-timers.h>
69168 +#include <linux/grsecurity.h>
69169 #include <linux/syscalls.h>
69170 #include <linux/wait.h>
69171 #include <linux/workqueue.h>
69172 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
69173 * which we beg off on and pass to do_sys_settimeofday().
69174 */
69175
69176 -static struct k_clock posix_clocks[MAX_CLOCKS];
69177 +static struct k_clock *posix_clocks[MAX_CLOCKS];
69178
69179 /*
69180 * These ones are defined below.
69181 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
69182 */
69183 static __init int init_posix_timers(void)
69184 {
69185 - struct k_clock clock_realtime = {
69186 + static struct k_clock clock_realtime = {
69187 .clock_getres = hrtimer_get_res,
69188 .clock_get = posix_clock_realtime_get,
69189 .clock_set = posix_clock_realtime_set,
69190 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
69191 .timer_get = common_timer_get,
69192 .timer_del = common_timer_del,
69193 };
69194 - struct k_clock clock_monotonic = {
69195 + static struct k_clock clock_monotonic = {
69196 .clock_getres = hrtimer_get_res,
69197 .clock_get = posix_ktime_get_ts,
69198 .nsleep = common_nsleep,
69199 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
69200 .timer_get = common_timer_get,
69201 .timer_del = common_timer_del,
69202 };
69203 - struct k_clock clock_monotonic_raw = {
69204 + static struct k_clock clock_monotonic_raw = {
69205 .clock_getres = hrtimer_get_res,
69206 .clock_get = posix_get_monotonic_raw,
69207 };
69208 - struct k_clock clock_realtime_coarse = {
69209 + static struct k_clock clock_realtime_coarse = {
69210 .clock_getres = posix_get_coarse_res,
69211 .clock_get = posix_get_realtime_coarse,
69212 };
69213 - struct k_clock clock_monotonic_coarse = {
69214 + static struct k_clock clock_monotonic_coarse = {
69215 .clock_getres = posix_get_coarse_res,
69216 .clock_get = posix_get_monotonic_coarse,
69217 };
69218 - struct k_clock clock_boottime = {
69219 + static struct k_clock clock_boottime = {
69220 .clock_getres = hrtimer_get_res,
69221 .clock_get = posix_get_boottime,
69222 .nsleep = common_nsleep,
69223 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
69224 return;
69225 }
69226
69227 - posix_clocks[clock_id] = *new_clock;
69228 + posix_clocks[clock_id] = new_clock;
69229 }
69230 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
69231
69232 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
69233 return (id & CLOCKFD_MASK) == CLOCKFD ?
69234 &clock_posix_dynamic : &clock_posix_cpu;
69235
69236 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
69237 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
69238 return NULL;
69239 - return &posix_clocks[id];
69240 + return posix_clocks[id];
69241 }
69242
69243 static int common_timer_create(struct k_itimer *new_timer)
69244 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
69245 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
69246 return -EFAULT;
69247
69248 + /* only the CLOCK_REALTIME clock can be set, all other clocks
69249 + have their clock_set fptr set to a nosettime dummy function
69250 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
69251 + call common_clock_set, which calls do_sys_settimeofday, which
69252 + we hook
69253 + */
69254 +
69255 return kc->clock_set(which_clock, &new_tp);
69256 }
69257
69258 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
69259 index d523593..68197a4 100644
69260 --- a/kernel/power/poweroff.c
69261 +++ b/kernel/power/poweroff.c
69262 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
69263 .enable_mask = SYSRQ_ENABLE_BOOT,
69264 };
69265
69266 -static int pm_sysrq_init(void)
69267 +static int __init pm_sysrq_init(void)
69268 {
69269 register_sysrq_key('o', &sysrq_poweroff_op);
69270 return 0;
69271 diff --git a/kernel/power/process.c b/kernel/power/process.c
69272 index 19db29f..33b52b6 100644
69273 --- a/kernel/power/process.c
69274 +++ b/kernel/power/process.c
69275 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
69276 u64 elapsed_csecs64;
69277 unsigned int elapsed_csecs;
69278 bool wakeup = false;
69279 + bool timedout = false;
69280
69281 do_gettimeofday(&start);
69282
69283 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
69284
69285 while (true) {
69286 todo = 0;
69287 + if (time_after(jiffies, end_time))
69288 + timedout = true;
69289 read_lock(&tasklist_lock);
69290 do_each_thread(g, p) {
69291 if (p == current || !freeze_task(p))
69292 @@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
69293 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
69294 * transition can't race with task state testing here.
69295 */
69296 - if (!task_is_stopped_or_traced(p) &&
69297 - !freezer_should_skip(p))
69298 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
69299 todo++;
69300 + if (timedout) {
69301 + printk(KERN_ERR "Task refusing to freeze:\n");
69302 + sched_show_task(p);
69303 + }
69304 + }
69305 } while_each_thread(g, p);
69306 read_unlock(&tasklist_lock);
69307
69308 @@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
69309 todo += wq_busy;
69310 }
69311
69312 - if (!todo || time_after(jiffies, end_time))
69313 + if (!todo || timedout)
69314 break;
69315
69316 if (pm_wakeup_pending()) {
69317 diff --git a/kernel/printk.c b/kernel/printk.c
69318 index 146827f..a501fec 100644
69319 --- a/kernel/printk.c
69320 +++ b/kernel/printk.c
69321 @@ -782,6 +782,11 @@ static int check_syslog_permissions(int type, bool from_file)
69322 if (from_file && type != SYSLOG_ACTION_OPEN)
69323 return 0;
69324
69325 +#ifdef CONFIG_GRKERNSEC_DMESG
69326 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
69327 + return -EPERM;
69328 +#endif
69329 +
69330 if (syslog_action_restricted(type)) {
69331 if (capable(CAP_SYSLOG))
69332 return 0;
69333 diff --git a/kernel/profile.c b/kernel/profile.c
69334 index 76b8e77..a2930e8 100644
69335 --- a/kernel/profile.c
69336 +++ b/kernel/profile.c
69337 @@ -39,7 +39,7 @@ struct profile_hit {
69338 /* Oprofile timer tick hook */
69339 static int (*timer_hook)(struct pt_regs *) __read_mostly;
69340
69341 -static atomic_t *prof_buffer;
69342 +static atomic_unchecked_t *prof_buffer;
69343 static unsigned long prof_len, prof_shift;
69344
69345 int prof_on __read_mostly;
69346 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
69347 hits[i].pc = 0;
69348 continue;
69349 }
69350 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
69351 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
69352 hits[i].hits = hits[i].pc = 0;
69353 }
69354 }
69355 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
69356 * Add the current hit(s) and flush the write-queue out
69357 * to the global buffer:
69358 */
69359 - atomic_add(nr_hits, &prof_buffer[pc]);
69360 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
69361 for (i = 0; i < NR_PROFILE_HIT; ++i) {
69362 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
69363 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
69364 hits[i].pc = hits[i].hits = 0;
69365 }
69366 out:
69367 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
69368 {
69369 unsigned long pc;
69370 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
69371 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
69372 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
69373 }
69374 #endif /* !CONFIG_SMP */
69375
69376 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
69377 return -EFAULT;
69378 buf++; p++; count--; read++;
69379 }
69380 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
69381 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
69382 if (copy_to_user(buf, (void *)pnt, count))
69383 return -EFAULT;
69384 read += count;
69385 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
69386 }
69387 #endif
69388 profile_discard_flip_buffers();
69389 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
69390 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
69391 return count;
69392 }
69393
69394 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
69395 index a232bb5..2a65ef9 100644
69396 --- a/kernel/ptrace.c
69397 +++ b/kernel/ptrace.c
69398 @@ -279,7 +279,7 @@ static int ptrace_attach(struct task_struct *task, long request,
69399
69400 if (seize)
69401 flags |= PT_SEIZED;
69402 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
69403 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
69404 flags |= PT_PTRACE_CAP;
69405 task->ptrace = flags;
69406
69407 @@ -486,7 +486,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
69408 break;
69409 return -EIO;
69410 }
69411 - if (copy_to_user(dst, buf, retval))
69412 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
69413 return -EFAULT;
69414 copied += retval;
69415 src += retval;
69416 @@ -671,7 +671,7 @@ int ptrace_request(struct task_struct *child, long request,
69417 bool seized = child->ptrace & PT_SEIZED;
69418 int ret = -EIO;
69419 siginfo_t siginfo, *si;
69420 - void __user *datavp = (void __user *) data;
69421 + void __user *datavp = (__force void __user *) data;
69422 unsigned long __user *datalp = datavp;
69423 unsigned long flags;
69424
69425 @@ -873,14 +873,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
69426 goto out;
69427 }
69428
69429 + if (gr_handle_ptrace(child, request)) {
69430 + ret = -EPERM;
69431 + goto out_put_task_struct;
69432 + }
69433 +
69434 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
69435 ret = ptrace_attach(child, request, addr, data);
69436 /*
69437 * Some architectures need to do book-keeping after
69438 * a ptrace attach.
69439 */
69440 - if (!ret)
69441 + if (!ret) {
69442 arch_ptrace_attach(child);
69443 + gr_audit_ptrace(child);
69444 + }
69445 goto out_put_task_struct;
69446 }
69447
69448 @@ -906,7 +913,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
69449 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
69450 if (copied != sizeof(tmp))
69451 return -EIO;
69452 - return put_user(tmp, (unsigned long __user *)data);
69453 + return put_user(tmp, (__force unsigned long __user *)data);
69454 }
69455
69456 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
69457 @@ -1016,14 +1023,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
69458 goto out;
69459 }
69460
69461 + if (gr_handle_ptrace(child, request)) {
69462 + ret = -EPERM;
69463 + goto out_put_task_struct;
69464 + }
69465 +
69466 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
69467 ret = ptrace_attach(child, request, addr, data);
69468 /*
69469 * Some architectures need to do book-keeping after
69470 * a ptrace attach.
69471 */
69472 - if (!ret)
69473 + if (!ret) {
69474 arch_ptrace_attach(child);
69475 + gr_audit_ptrace(child);
69476 + }
69477 goto out_put_task_struct;
69478 }
69479
69480 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
69481 index 37a5444..eec170a 100644
69482 --- a/kernel/rcutiny.c
69483 +++ b/kernel/rcutiny.c
69484 @@ -46,7 +46,7 @@
69485 struct rcu_ctrlblk;
69486 static void invoke_rcu_callbacks(void);
69487 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
69488 -static void rcu_process_callbacks(struct softirq_action *unused);
69489 +static void rcu_process_callbacks(void);
69490 static void __call_rcu(struct rcu_head *head,
69491 void (*func)(struct rcu_head *rcu),
69492 struct rcu_ctrlblk *rcp);
69493 @@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
69494 rcu_is_callbacks_kthread()));
69495 }
69496
69497 -static void rcu_process_callbacks(struct softirq_action *unused)
69498 +static void rcu_process_callbacks(void)
69499 {
69500 __rcu_process_callbacks(&rcu_sched_ctrlblk);
69501 __rcu_process_callbacks(&rcu_bh_ctrlblk);
69502 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
69503 index fc31a2d..be2ec04 100644
69504 --- a/kernel/rcutiny_plugin.h
69505 +++ b/kernel/rcutiny_plugin.h
69506 @@ -939,7 +939,7 @@ static int rcu_kthread(void *arg)
69507 have_rcu_kthread_work = morework;
69508 local_irq_restore(flags);
69509 if (work)
69510 - rcu_process_callbacks(NULL);
69511 + rcu_process_callbacks();
69512 schedule_timeout_interruptible(1); /* Leave CPU for others. */
69513 }
69514
69515 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
69516 index e66b34a..4b8b626 100644
69517 --- a/kernel/rcutorture.c
69518 +++ b/kernel/rcutorture.c
69519 @@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
69520 { 0 };
69521 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
69522 { 0 };
69523 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69524 -static atomic_t n_rcu_torture_alloc;
69525 -static atomic_t n_rcu_torture_alloc_fail;
69526 -static atomic_t n_rcu_torture_free;
69527 -static atomic_t n_rcu_torture_mberror;
69528 -static atomic_t n_rcu_torture_error;
69529 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69530 +static atomic_unchecked_t n_rcu_torture_alloc;
69531 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
69532 +static atomic_unchecked_t n_rcu_torture_free;
69533 +static atomic_unchecked_t n_rcu_torture_mberror;
69534 +static atomic_unchecked_t n_rcu_torture_error;
69535 static long n_rcu_torture_barrier_error;
69536 static long n_rcu_torture_boost_ktrerror;
69537 static long n_rcu_torture_boost_rterror;
69538 @@ -265,11 +265,11 @@ rcu_torture_alloc(void)
69539
69540 spin_lock_bh(&rcu_torture_lock);
69541 if (list_empty(&rcu_torture_freelist)) {
69542 - atomic_inc(&n_rcu_torture_alloc_fail);
69543 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
69544 spin_unlock_bh(&rcu_torture_lock);
69545 return NULL;
69546 }
69547 - atomic_inc(&n_rcu_torture_alloc);
69548 + atomic_inc_unchecked(&n_rcu_torture_alloc);
69549 p = rcu_torture_freelist.next;
69550 list_del_init(p);
69551 spin_unlock_bh(&rcu_torture_lock);
69552 @@ -282,7 +282,7 @@ rcu_torture_alloc(void)
69553 static void
69554 rcu_torture_free(struct rcu_torture *p)
69555 {
69556 - atomic_inc(&n_rcu_torture_free);
69557 + atomic_inc_unchecked(&n_rcu_torture_free);
69558 spin_lock_bh(&rcu_torture_lock);
69559 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
69560 spin_unlock_bh(&rcu_torture_lock);
69561 @@ -403,7 +403,7 @@ rcu_torture_cb(struct rcu_head *p)
69562 i = rp->rtort_pipe_count;
69563 if (i > RCU_TORTURE_PIPE_LEN)
69564 i = RCU_TORTURE_PIPE_LEN;
69565 - atomic_inc(&rcu_torture_wcount[i]);
69566 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
69567 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69568 rp->rtort_mbtest = 0;
69569 rcu_torture_free(rp);
69570 @@ -451,7 +451,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
69571 i = rp->rtort_pipe_count;
69572 if (i > RCU_TORTURE_PIPE_LEN)
69573 i = RCU_TORTURE_PIPE_LEN;
69574 - atomic_inc(&rcu_torture_wcount[i]);
69575 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
69576 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69577 rp->rtort_mbtest = 0;
69578 list_del(&rp->rtort_free);
69579 @@ -983,7 +983,7 @@ rcu_torture_writer(void *arg)
69580 i = old_rp->rtort_pipe_count;
69581 if (i > RCU_TORTURE_PIPE_LEN)
69582 i = RCU_TORTURE_PIPE_LEN;
69583 - atomic_inc(&rcu_torture_wcount[i]);
69584 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
69585 old_rp->rtort_pipe_count++;
69586 cur_ops->deferred_free(old_rp);
69587 }
69588 @@ -1064,7 +1064,7 @@ static void rcu_torture_timer(unsigned long unused)
69589 }
69590 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
69591 if (p->rtort_mbtest == 0)
69592 - atomic_inc(&n_rcu_torture_mberror);
69593 + atomic_inc_unchecked(&n_rcu_torture_mberror);
69594 spin_lock(&rand_lock);
69595 cur_ops->read_delay(&rand);
69596 n_rcu_torture_timers++;
69597 @@ -1128,7 +1128,7 @@ rcu_torture_reader(void *arg)
69598 }
69599 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
69600 if (p->rtort_mbtest == 0)
69601 - atomic_inc(&n_rcu_torture_mberror);
69602 + atomic_inc_unchecked(&n_rcu_torture_mberror);
69603 cur_ops->read_delay(&rand);
69604 preempt_disable();
69605 pipe_count = p->rtort_pipe_count;
69606 @@ -1191,10 +1191,10 @@ rcu_torture_printk(char *page)
69607 rcu_torture_current,
69608 rcu_torture_current_version,
69609 list_empty(&rcu_torture_freelist),
69610 - atomic_read(&n_rcu_torture_alloc),
69611 - atomic_read(&n_rcu_torture_alloc_fail),
69612 - atomic_read(&n_rcu_torture_free),
69613 - atomic_read(&n_rcu_torture_mberror),
69614 + atomic_read_unchecked(&n_rcu_torture_alloc),
69615 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
69616 + atomic_read_unchecked(&n_rcu_torture_free),
69617 + atomic_read_unchecked(&n_rcu_torture_mberror),
69618 n_rcu_torture_boost_ktrerror,
69619 n_rcu_torture_boost_rterror,
69620 n_rcu_torture_boost_failure,
69621 @@ -1208,14 +1208,14 @@ rcu_torture_printk(char *page)
69622 n_barrier_attempts,
69623 n_rcu_torture_barrier_error);
69624 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
69625 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
69626 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
69627 n_rcu_torture_barrier_error != 0 ||
69628 n_rcu_torture_boost_ktrerror != 0 ||
69629 n_rcu_torture_boost_rterror != 0 ||
69630 n_rcu_torture_boost_failure != 0 ||
69631 i > 1) {
69632 cnt += sprintf(&page[cnt], "!!! ");
69633 - atomic_inc(&n_rcu_torture_error);
69634 + atomic_inc_unchecked(&n_rcu_torture_error);
69635 WARN_ON_ONCE(1);
69636 }
69637 cnt += sprintf(&page[cnt], "Reader Pipe: ");
69638 @@ -1229,7 +1229,7 @@ rcu_torture_printk(char *page)
69639 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
69640 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69641 cnt += sprintf(&page[cnt], " %d",
69642 - atomic_read(&rcu_torture_wcount[i]));
69643 + atomic_read_unchecked(&rcu_torture_wcount[i]));
69644 }
69645 cnt += sprintf(&page[cnt], "\n");
69646 if (cur_ops->stats)
69647 @@ -1888,7 +1888,7 @@ rcu_torture_cleanup(void)
69648
69649 if (cur_ops->cleanup)
69650 cur_ops->cleanup();
69651 - if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
69652 + if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
69653 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
69654 else if (n_online_successes != n_online_attempts ||
69655 n_offline_successes != n_offline_attempts)
69656 @@ -1958,18 +1958,18 @@ rcu_torture_init(void)
69657
69658 rcu_torture_current = NULL;
69659 rcu_torture_current_version = 0;
69660 - atomic_set(&n_rcu_torture_alloc, 0);
69661 - atomic_set(&n_rcu_torture_alloc_fail, 0);
69662 - atomic_set(&n_rcu_torture_free, 0);
69663 - atomic_set(&n_rcu_torture_mberror, 0);
69664 - atomic_set(&n_rcu_torture_error, 0);
69665 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
69666 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
69667 + atomic_set_unchecked(&n_rcu_torture_free, 0);
69668 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
69669 + atomic_set_unchecked(&n_rcu_torture_error, 0);
69670 n_rcu_torture_barrier_error = 0;
69671 n_rcu_torture_boost_ktrerror = 0;
69672 n_rcu_torture_boost_rterror = 0;
69673 n_rcu_torture_boost_failure = 0;
69674 n_rcu_torture_boosts = 0;
69675 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
69676 - atomic_set(&rcu_torture_wcount[i], 0);
69677 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
69678 for_each_possible_cpu(cpu) {
69679 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69680 per_cpu(rcu_torture_count, cpu)[i] = 0;
69681 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
69682 index 4b97bba..b92c9d2 100644
69683 --- a/kernel/rcutree.c
69684 +++ b/kernel/rcutree.c
69685 @@ -366,9 +366,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
69686 rcu_prepare_for_idle(smp_processor_id());
69687 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69688 smp_mb__before_atomic_inc(); /* See above. */
69689 - atomic_inc(&rdtp->dynticks);
69690 + atomic_inc_unchecked(&rdtp->dynticks);
69691 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
69692 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69693 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69694
69695 /*
69696 * The idle task is not permitted to enter the idle loop while
69697 @@ -457,10 +457,10 @@ void rcu_irq_exit(void)
69698 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
69699 {
69700 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
69701 - atomic_inc(&rdtp->dynticks);
69702 + atomic_inc_unchecked(&rdtp->dynticks);
69703 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69704 smp_mb__after_atomic_inc(); /* See above. */
69705 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69706 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69707 rcu_cleanup_after_idle(smp_processor_id());
69708 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
69709 if (!is_idle_task(current)) {
69710 @@ -554,14 +554,14 @@ void rcu_nmi_enter(void)
69711 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
69712
69713 if (rdtp->dynticks_nmi_nesting == 0 &&
69714 - (atomic_read(&rdtp->dynticks) & 0x1))
69715 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
69716 return;
69717 rdtp->dynticks_nmi_nesting++;
69718 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
69719 - atomic_inc(&rdtp->dynticks);
69720 + atomic_inc_unchecked(&rdtp->dynticks);
69721 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69722 smp_mb__after_atomic_inc(); /* See above. */
69723 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69724 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69725 }
69726
69727 /**
69728 @@ -580,9 +580,9 @@ void rcu_nmi_exit(void)
69729 return;
69730 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69731 smp_mb__before_atomic_inc(); /* See above. */
69732 - atomic_inc(&rdtp->dynticks);
69733 + atomic_inc_unchecked(&rdtp->dynticks);
69734 smp_mb__after_atomic_inc(); /* Force delay to next write. */
69735 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69736 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69737 }
69738
69739 #ifdef CONFIG_PROVE_RCU
69740 @@ -598,7 +598,7 @@ int rcu_is_cpu_idle(void)
69741 int ret;
69742
69743 preempt_disable();
69744 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69745 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69746 preempt_enable();
69747 return ret;
69748 }
69749 @@ -668,7 +668,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
69750 */
69751 static int dyntick_save_progress_counter(struct rcu_data *rdp)
69752 {
69753 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
69754 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69755 return (rdp->dynticks_snap & 0x1) == 0;
69756 }
69757
69758 @@ -683,7 +683,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
69759 unsigned int curr;
69760 unsigned int snap;
69761
69762 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
69763 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69764 snap = (unsigned int)rdp->dynticks_snap;
69765
69766 /*
69767 @@ -713,10 +713,10 @@ static int jiffies_till_stall_check(void)
69768 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
69769 */
69770 if (till_stall_check < 3) {
69771 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
69772 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
69773 till_stall_check = 3;
69774 } else if (till_stall_check > 300) {
69775 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
69776 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
69777 till_stall_check = 300;
69778 }
69779 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
69780 @@ -1824,7 +1824,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
69781 /*
69782 * Do RCU core processing for the current CPU.
69783 */
69784 -static void rcu_process_callbacks(struct softirq_action *unused)
69785 +static void rcu_process_callbacks(void)
69786 {
69787 trace_rcu_utilization("Start RCU core");
69788 __rcu_process_callbacks(&rcu_sched_state,
69789 @@ -2042,8 +2042,8 @@ void synchronize_rcu_bh(void)
69790 }
69791 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
69792
69793 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
69794 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
69795 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
69796 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
69797
69798 static int synchronize_sched_expedited_cpu_stop(void *data)
69799 {
69800 @@ -2104,7 +2104,7 @@ void synchronize_sched_expedited(void)
69801 int firstsnap, s, snap, trycount = 0;
69802
69803 /* Note that atomic_inc_return() implies full memory barrier. */
69804 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
69805 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
69806 get_online_cpus();
69807 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
69808
69809 @@ -2126,7 +2126,7 @@ void synchronize_sched_expedited(void)
69810 }
69811
69812 /* Check to see if someone else did our work for us. */
69813 - s = atomic_read(&sync_sched_expedited_done);
69814 + s = atomic_read_unchecked(&sync_sched_expedited_done);
69815 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
69816 smp_mb(); /* ensure test happens before caller kfree */
69817 return;
69818 @@ -2141,7 +2141,7 @@ void synchronize_sched_expedited(void)
69819 * grace period works for us.
69820 */
69821 get_online_cpus();
69822 - snap = atomic_read(&sync_sched_expedited_started);
69823 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
69824 smp_mb(); /* ensure read is before try_stop_cpus(). */
69825 }
69826
69827 @@ -2152,12 +2152,12 @@ void synchronize_sched_expedited(void)
69828 * than we did beat us to the punch.
69829 */
69830 do {
69831 - s = atomic_read(&sync_sched_expedited_done);
69832 + s = atomic_read_unchecked(&sync_sched_expedited_done);
69833 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
69834 smp_mb(); /* ensure test happens before caller kfree */
69835 break;
69836 }
69837 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
69838 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
69839
69840 put_online_cpus();
69841 }
69842 @@ -2421,7 +2421,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
69843 rdp->qlen = 0;
69844 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
69845 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
69846 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
69847 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
69848 rdp->cpu = cpu;
69849 rdp->rsp = rsp;
69850 raw_spin_unlock_irqrestore(&rnp->lock, flags);
69851 @@ -2449,8 +2449,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
69852 rdp->n_force_qs_snap = rsp->n_force_qs;
69853 rdp->blimit = blimit;
69854 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
69855 - atomic_set(&rdp->dynticks->dynticks,
69856 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
69857 + atomic_set_unchecked(&rdp->dynticks->dynticks,
69858 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
69859 rcu_prepare_for_idle_init(cpu);
69860 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
69861
69862 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
69863 index 19b61ac..5c60a94 100644
69864 --- a/kernel/rcutree.h
69865 +++ b/kernel/rcutree.h
69866 @@ -83,7 +83,7 @@ struct rcu_dynticks {
69867 long long dynticks_nesting; /* Track irq/process nesting level. */
69868 /* Process level is worth LLONG_MAX/2. */
69869 int dynticks_nmi_nesting; /* Track NMI nesting level. */
69870 - atomic_t dynticks; /* Even value for idle, else odd. */
69871 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
69872 #ifdef CONFIG_RCU_FAST_NO_HZ
69873 int dyntick_drain; /* Prepare-for-idle state variable. */
69874 unsigned long dyntick_holdoff;
69875 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
69876 index 3e48994..d94f03a 100644
69877 --- a/kernel/rcutree_plugin.h
69878 +++ b/kernel/rcutree_plugin.h
69879 @@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
69880
69881 /* Clean up and exit. */
69882 smp_mb(); /* ensure expedited GP seen before counter increment. */
69883 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
69884 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
69885 unlock_mb_ret:
69886 mutex_unlock(&sync_rcu_preempt_exp_mutex);
69887 mb_ret:
69888 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
69889 index d4bc16d..c234a5c 100644
69890 --- a/kernel/rcutree_trace.c
69891 +++ b/kernel/rcutree_trace.c
69892 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
69893 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69894 rdp->qs_pending);
69895 seq_printf(m, " dt=%d/%llx/%d df=%lu",
69896 - atomic_read(&rdp->dynticks->dynticks),
69897 + atomic_read_unchecked(&rdp->dynticks->dynticks),
69898 rdp->dynticks->dynticks_nesting,
69899 rdp->dynticks->dynticks_nmi_nesting,
69900 rdp->dynticks_fqs);
69901 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
69902 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69903 rdp->qs_pending);
69904 seq_printf(m, ",%d,%llx,%d,%lu",
69905 - atomic_read(&rdp->dynticks->dynticks),
69906 + atomic_read_unchecked(&rdp->dynticks->dynticks),
69907 rdp->dynticks->dynticks_nesting,
69908 rdp->dynticks->dynticks_nmi_nesting,
69909 rdp->dynticks_fqs);
69910 diff --git a/kernel/resource.c b/kernel/resource.c
69911 index e1d2b8e..24820bb 100644
69912 --- a/kernel/resource.c
69913 +++ b/kernel/resource.c
69914 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
69915
69916 static int __init ioresources_init(void)
69917 {
69918 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69919 +#ifdef CONFIG_GRKERNSEC_PROC_USER
69920 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
69921 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
69922 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69923 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
69924 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
69925 +#endif
69926 +#else
69927 proc_create("ioports", 0, NULL, &proc_ioports_operations);
69928 proc_create("iomem", 0, NULL, &proc_iomem_operations);
69929 +#endif
69930 return 0;
69931 }
69932 __initcall(ioresources_init);
69933 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
69934 index 98ec494..4241d6d 100644
69935 --- a/kernel/rtmutex-tester.c
69936 +++ b/kernel/rtmutex-tester.c
69937 @@ -20,7 +20,7 @@
69938 #define MAX_RT_TEST_MUTEXES 8
69939
69940 static spinlock_t rttest_lock;
69941 -static atomic_t rttest_event;
69942 +static atomic_unchecked_t rttest_event;
69943
69944 struct test_thread_data {
69945 int opcode;
69946 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69947
69948 case RTTEST_LOCKCONT:
69949 td->mutexes[td->opdata] = 1;
69950 - td->event = atomic_add_return(1, &rttest_event);
69951 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69952 return 0;
69953
69954 case RTTEST_RESET:
69955 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69956 return 0;
69957
69958 case RTTEST_RESETEVENT:
69959 - atomic_set(&rttest_event, 0);
69960 + atomic_set_unchecked(&rttest_event, 0);
69961 return 0;
69962
69963 default:
69964 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69965 return ret;
69966
69967 td->mutexes[id] = 1;
69968 - td->event = atomic_add_return(1, &rttest_event);
69969 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69970 rt_mutex_lock(&mutexes[id]);
69971 - td->event = atomic_add_return(1, &rttest_event);
69972 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69973 td->mutexes[id] = 4;
69974 return 0;
69975
69976 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69977 return ret;
69978
69979 td->mutexes[id] = 1;
69980 - td->event = atomic_add_return(1, &rttest_event);
69981 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69982 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
69983 - td->event = atomic_add_return(1, &rttest_event);
69984 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69985 td->mutexes[id] = ret ? 0 : 4;
69986 return ret ? -EINTR : 0;
69987
69988 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69989 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
69990 return ret;
69991
69992 - td->event = atomic_add_return(1, &rttest_event);
69993 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69994 rt_mutex_unlock(&mutexes[id]);
69995 - td->event = atomic_add_return(1, &rttest_event);
69996 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69997 td->mutexes[id] = 0;
69998 return 0;
69999
70000 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
70001 break;
70002
70003 td->mutexes[dat] = 2;
70004 - td->event = atomic_add_return(1, &rttest_event);
70005 + td->event = atomic_add_return_unchecked(1, &rttest_event);
70006 break;
70007
70008 default:
70009 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
70010 return;
70011
70012 td->mutexes[dat] = 3;
70013 - td->event = atomic_add_return(1, &rttest_event);
70014 + td->event = atomic_add_return_unchecked(1, &rttest_event);
70015 break;
70016
70017 case RTTEST_LOCKNOWAIT:
70018 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
70019 return;
70020
70021 td->mutexes[dat] = 1;
70022 - td->event = atomic_add_return(1, &rttest_event);
70023 + td->event = atomic_add_return_unchecked(1, &rttest_event);
70024 return;
70025
70026 default:
70027 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
70028 index 0984a21..939f183 100644
70029 --- a/kernel/sched/auto_group.c
70030 +++ b/kernel/sched/auto_group.c
70031 @@ -11,7 +11,7 @@
70032
70033 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
70034 static struct autogroup autogroup_default;
70035 -static atomic_t autogroup_seq_nr;
70036 +static atomic_unchecked_t autogroup_seq_nr;
70037
70038 void __init autogroup_init(struct task_struct *init_task)
70039 {
70040 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
70041
70042 kref_init(&ag->kref);
70043 init_rwsem(&ag->lock);
70044 - ag->id = atomic_inc_return(&autogroup_seq_nr);
70045 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
70046 ag->tg = tg;
70047 #ifdef CONFIG_RT_GROUP_SCHED
70048 /*
70049 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
70050 index 39c44fa..70edb8b 100644
70051 --- a/kernel/sched/core.c
70052 +++ b/kernel/sched/core.c
70053 @@ -4103,6 +4103,8 @@ int can_nice(const struct task_struct *p, const int nice)
70054 /* convert nice value [19,-20] to rlimit style value [1,40] */
70055 int nice_rlim = 20 - nice;
70056
70057 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
70058 +
70059 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
70060 capable(CAP_SYS_NICE));
70061 }
70062 @@ -4136,7 +4138,8 @@ SYSCALL_DEFINE1(nice, int, increment)
70063 if (nice > 19)
70064 nice = 19;
70065
70066 - if (increment < 0 && !can_nice(current, nice))
70067 + if (increment < 0 && (!can_nice(current, nice) ||
70068 + gr_handle_chroot_nice()))
70069 return -EPERM;
70070
70071 retval = security_task_setnice(current, nice);
70072 @@ -4290,6 +4293,7 @@ recheck:
70073 unsigned long rlim_rtprio =
70074 task_rlimit(p, RLIMIT_RTPRIO);
70075
70076 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
70077 /* can't set/change the rt policy */
70078 if (policy != p->policy && !rlim_rtprio)
70079 return -EPERM;
70080 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
70081 index c099cc6..06aec4f 100644
70082 --- a/kernel/sched/fair.c
70083 +++ b/kernel/sched/fair.c
70084 @@ -4846,7 +4846,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
70085 * run_rebalance_domains is triggered when needed from the scheduler tick.
70086 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
70087 */
70088 -static void run_rebalance_domains(struct softirq_action *h)
70089 +static void run_rebalance_domains(void)
70090 {
70091 int this_cpu = smp_processor_id();
70092 struct rq *this_rq = cpu_rq(this_cpu);
70093 diff --git a/kernel/signal.c b/kernel/signal.c
70094 index 6771027..763e51e 100644
70095 --- a/kernel/signal.c
70096 +++ b/kernel/signal.c
70097 @@ -48,12 +48,12 @@ static struct kmem_cache *sigqueue_cachep;
70098
70099 int print_fatal_signals __read_mostly;
70100
70101 -static void __user *sig_handler(struct task_struct *t, int sig)
70102 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
70103 {
70104 return t->sighand->action[sig - 1].sa.sa_handler;
70105 }
70106
70107 -static int sig_handler_ignored(void __user *handler, int sig)
70108 +static int sig_handler_ignored(__sighandler_t handler, int sig)
70109 {
70110 /* Is it explicitly or implicitly ignored? */
70111 return handler == SIG_IGN ||
70112 @@ -62,7 +62,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
70113
70114 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
70115 {
70116 - void __user *handler;
70117 + __sighandler_t handler;
70118
70119 handler = sig_handler(t, sig);
70120
70121 @@ -366,6 +366,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
70122 atomic_inc(&user->sigpending);
70123 rcu_read_unlock();
70124
70125 + if (!override_rlimit)
70126 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
70127 +
70128 if (override_rlimit ||
70129 atomic_read(&user->sigpending) <=
70130 task_rlimit(t, RLIMIT_SIGPENDING)) {
70131 @@ -490,7 +493,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
70132
70133 int unhandled_signal(struct task_struct *tsk, int sig)
70134 {
70135 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
70136 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
70137 if (is_global_init(tsk))
70138 return 1;
70139 if (handler != SIG_IGN && handler != SIG_DFL)
70140 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
70141 }
70142 }
70143
70144 + /* allow glibc communication via tgkill to other threads in our
70145 + thread group */
70146 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
70147 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
70148 + && gr_handle_signal(t, sig))
70149 + return -EPERM;
70150 +
70151 return security_task_kill(t, info, sig, 0);
70152 }
70153
70154 @@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
70155 return send_signal(sig, info, p, 1);
70156 }
70157
70158 -static int
70159 +int
70160 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
70161 {
70162 return send_signal(sig, info, t, 0);
70163 @@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
70164 unsigned long int flags;
70165 int ret, blocked, ignored;
70166 struct k_sigaction *action;
70167 + int is_unhandled = 0;
70168
70169 spin_lock_irqsave(&t->sighand->siglock, flags);
70170 action = &t->sighand->action[sig-1];
70171 @@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
70172 }
70173 if (action->sa.sa_handler == SIG_DFL)
70174 t->signal->flags &= ~SIGNAL_UNKILLABLE;
70175 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
70176 + is_unhandled = 1;
70177 ret = specific_send_sig_info(sig, info, t);
70178 spin_unlock_irqrestore(&t->sighand->siglock, flags);
70179
70180 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
70181 + normal operation */
70182 + if (is_unhandled) {
70183 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
70184 + gr_handle_crash(t, sig);
70185 + }
70186 +
70187 return ret;
70188 }
70189
70190 @@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
70191 ret = check_kill_permission(sig, info, p);
70192 rcu_read_unlock();
70193
70194 - if (!ret && sig)
70195 + if (!ret && sig) {
70196 ret = do_send_sig_info(sig, info, p, true);
70197 + if (!ret)
70198 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
70199 + }
70200
70201 return ret;
70202 }
70203 @@ -2858,7 +2881,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
70204 int error = -ESRCH;
70205
70206 rcu_read_lock();
70207 - p = find_task_by_vpid(pid);
70208 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
70209 + /* allow glibc communication via tgkill to other threads in our
70210 + thread group */
70211 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
70212 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
70213 + p = find_task_by_vpid_unrestricted(pid);
70214 + else
70215 +#endif
70216 + p = find_task_by_vpid(pid);
70217 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
70218 error = check_kill_permission(sig, info, p);
70219 /*
70220 diff --git a/kernel/smp.c b/kernel/smp.c
70221 index d0ae5b2..b87c5a8 100644
70222 --- a/kernel/smp.c
70223 +++ b/kernel/smp.c
70224 @@ -582,22 +582,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
70225 }
70226 EXPORT_SYMBOL(smp_call_function);
70227
70228 -void ipi_call_lock(void)
70229 +void ipi_call_lock(void) __acquires(call_function.lock)
70230 {
70231 raw_spin_lock(&call_function.lock);
70232 }
70233
70234 -void ipi_call_unlock(void)
70235 +void ipi_call_unlock(void) __releases(call_function.lock)
70236 {
70237 raw_spin_unlock(&call_function.lock);
70238 }
70239
70240 -void ipi_call_lock_irq(void)
70241 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
70242 {
70243 raw_spin_lock_irq(&call_function.lock);
70244 }
70245
70246 -void ipi_call_unlock_irq(void)
70247 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
70248 {
70249 raw_spin_unlock_irq(&call_function.lock);
70250 }
70251 diff --git a/kernel/softirq.c b/kernel/softirq.c
70252 index 671f959..91c51cb 100644
70253 --- a/kernel/softirq.c
70254 +++ b/kernel/softirq.c
70255 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
70256
70257 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
70258
70259 -char *softirq_to_name[NR_SOFTIRQS] = {
70260 +const char * const softirq_to_name[NR_SOFTIRQS] = {
70261 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
70262 "TASKLET", "SCHED", "HRTIMER", "RCU"
70263 };
70264 @@ -235,7 +235,7 @@ restart:
70265 kstat_incr_softirqs_this_cpu(vec_nr);
70266
70267 trace_softirq_entry(vec_nr);
70268 - h->action(h);
70269 + h->action();
70270 trace_softirq_exit(vec_nr);
70271 if (unlikely(prev_count != preempt_count())) {
70272 printk(KERN_ERR "huh, entered softirq %u %s %p"
70273 @@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
70274 or_softirq_pending(1UL << nr);
70275 }
70276
70277 -void open_softirq(int nr, void (*action)(struct softirq_action *))
70278 +void open_softirq(int nr, void (*action)(void))
70279 {
70280 - softirq_vec[nr].action = action;
70281 + pax_open_kernel();
70282 + *(void **)&softirq_vec[nr].action = action;
70283 + pax_close_kernel();
70284 }
70285
70286 /*
70287 @@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
70288
70289 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
70290
70291 -static void tasklet_action(struct softirq_action *a)
70292 +static void tasklet_action(void)
70293 {
70294 struct tasklet_struct *list;
70295
70296 @@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
70297 }
70298 }
70299
70300 -static void tasklet_hi_action(struct softirq_action *a)
70301 +static void tasklet_hi_action(void)
70302 {
70303 struct tasklet_struct *list;
70304
70305 diff --git a/kernel/srcu.c b/kernel/srcu.c
70306 index 2095be3..9a5b89d 100644
70307 --- a/kernel/srcu.c
70308 +++ b/kernel/srcu.c
70309 @@ -302,9 +302,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
70310 preempt_disable();
70311 idx = rcu_dereference_index_check(sp->completed,
70312 rcu_read_lock_sched_held()) & 0x1;
70313 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
70314 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
70315 smp_mb(); /* B */ /* Avoid leaking the critical section. */
70316 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
70317 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
70318 preempt_enable();
70319 return idx;
70320 }
70321 @@ -320,7 +320,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
70322 {
70323 preempt_disable();
70324 smp_mb(); /* C */ /* Avoid leaking the critical section. */
70325 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
70326 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
70327 preempt_enable();
70328 }
70329 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
70330 diff --git a/kernel/sys.c b/kernel/sys.c
70331 index 2d39a84..1c3abe4 100644
70332 --- a/kernel/sys.c
70333 +++ b/kernel/sys.c
70334 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
70335 error = -EACCES;
70336 goto out;
70337 }
70338 +
70339 + if (gr_handle_chroot_setpriority(p, niceval)) {
70340 + error = -EACCES;
70341 + goto out;
70342 + }
70343 +
70344 no_nice = security_task_setnice(p, niceval);
70345 if (no_nice) {
70346 error = no_nice;
70347 @@ -594,6 +600,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
70348 goto error;
70349 }
70350
70351 + if (gr_check_group_change(new->gid, new->egid, -1))
70352 + goto error;
70353 +
70354 if (rgid != (gid_t) -1 ||
70355 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
70356 new->sgid = new->egid;
70357 @@ -629,6 +638,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
70358 old = current_cred();
70359
70360 retval = -EPERM;
70361 +
70362 + if (gr_check_group_change(kgid, kgid, kgid))
70363 + goto error;
70364 +
70365 if (nsown_capable(CAP_SETGID))
70366 new->gid = new->egid = new->sgid = new->fsgid = kgid;
70367 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
70368 @@ -646,7 +659,7 @@ error:
70369 /*
70370 * change the user struct in a credentials set to match the new UID
70371 */
70372 -static int set_user(struct cred *new)
70373 +int set_user(struct cred *new)
70374 {
70375 struct user_struct *new_user;
70376
70377 @@ -726,6 +739,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
70378 goto error;
70379 }
70380
70381 + if (gr_check_user_change(new->uid, new->euid, -1))
70382 + goto error;
70383 +
70384 if (!uid_eq(new->uid, old->uid)) {
70385 retval = set_user(new);
70386 if (retval < 0)
70387 @@ -776,6 +792,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
70388 old = current_cred();
70389
70390 retval = -EPERM;
70391 +
70392 + if (gr_check_crash_uid(kuid))
70393 + goto error;
70394 + if (gr_check_user_change(kuid, kuid, kuid))
70395 + goto error;
70396 +
70397 if (nsown_capable(CAP_SETUID)) {
70398 new->suid = new->uid = kuid;
70399 if (!uid_eq(kuid, old->uid)) {
70400 @@ -845,6 +867,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
70401 goto error;
70402 }
70403
70404 + if (gr_check_user_change(kruid, keuid, -1))
70405 + goto error;
70406 +
70407 if (ruid != (uid_t) -1) {
70408 new->uid = kruid;
70409 if (!uid_eq(kruid, old->uid)) {
70410 @@ -927,6 +952,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
70411 goto error;
70412 }
70413
70414 + if (gr_check_group_change(krgid, kegid, -1))
70415 + goto error;
70416 +
70417 if (rgid != (gid_t) -1)
70418 new->gid = krgid;
70419 if (egid != (gid_t) -1)
70420 @@ -980,6 +1008,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
70421 if (!uid_valid(kuid))
70422 return old_fsuid;
70423
70424 + if (gr_check_user_change(-1, -1, kuid))
70425 + goto error;
70426 +
70427 new = prepare_creds();
70428 if (!new)
70429 return old_fsuid;
70430 @@ -994,6 +1025,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
70431 }
70432 }
70433
70434 +error:
70435 abort_creds(new);
70436 return old_fsuid;
70437
70438 @@ -1026,12 +1058,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
70439 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
70440 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
70441 nsown_capable(CAP_SETGID)) {
70442 + if (gr_check_group_change(-1, -1, kgid))
70443 + goto error;
70444 +
70445 if (!gid_eq(kgid, old->fsgid)) {
70446 new->fsgid = kgid;
70447 goto change_okay;
70448 }
70449 }
70450
70451 +error:
70452 abort_creds(new);
70453 return old_fsgid;
70454
70455 @@ -1264,13 +1300,13 @@ DECLARE_RWSEM(uts_sem);
70456 * Work around broken programs that cannot handle "Linux 3.0".
70457 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
70458 */
70459 -static int override_release(char __user *release, int len)
70460 +static int override_release(char __user *release, size_t len)
70461 {
70462 int ret = 0;
70463 - char buf[65];
70464
70465 if (current->personality & UNAME26) {
70466 - char *rest = UTS_RELEASE;
70467 + char buf[65] = { 0 };
70468 + const char *rest = UTS_RELEASE;
70469 int ndots = 0;
70470 unsigned v;
70471
70472 @@ -1282,7 +1318,10 @@ static int override_release(char __user *release, int len)
70473 rest++;
70474 }
70475 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
70476 + if (sizeof buf < len)
70477 + len = sizeof buf;
70478 snprintf(buf, len, "2.6.%u%s", v, rest);
70479 + buf[len - 1] = 0;
70480 ret = copy_to_user(release, buf, len);
70481 }
70482 return ret;
70483 @@ -1337,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
70484 return -EFAULT;
70485
70486 down_read(&uts_sem);
70487 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
70488 + error = __copy_to_user(name->sysname, &utsname()->sysname,
70489 __OLD_UTS_LEN);
70490 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
70491 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
70492 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
70493 __OLD_UTS_LEN);
70494 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
70495 - error |= __copy_to_user(&name->release, &utsname()->release,
70496 + error |= __copy_to_user(name->release, &utsname()->release,
70497 __OLD_UTS_LEN);
70498 error |= __put_user(0, name->release + __OLD_UTS_LEN);
70499 - error |= __copy_to_user(&name->version, &utsname()->version,
70500 + error |= __copy_to_user(name->version, &utsname()->version,
70501 __OLD_UTS_LEN);
70502 error |= __put_user(0, name->version + __OLD_UTS_LEN);
70503 - error |= __copy_to_user(&name->machine, &utsname()->machine,
70504 + error |= __copy_to_user(name->machine, &utsname()->machine,
70505 __OLD_UTS_LEN);
70506 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
70507 up_read(&uts_sem);
70508 @@ -2024,7 +2063,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
70509 error = get_dumpable(me->mm);
70510 break;
70511 case PR_SET_DUMPABLE:
70512 - if (arg2 < 0 || arg2 > 1) {
70513 + if (arg2 > 1) {
70514 error = -EINVAL;
70515 break;
70516 }
70517 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
70518 index 4ab1187..33f4f2b 100644
70519 --- a/kernel/sysctl.c
70520 +++ b/kernel/sysctl.c
70521 @@ -91,7 +91,6 @@
70522
70523
70524 #if defined(CONFIG_SYSCTL)
70525 -
70526 /* External variables not in a header file. */
70527 extern int sysctl_overcommit_memory;
70528 extern int sysctl_overcommit_ratio;
70529 @@ -169,10 +168,13 @@ static int proc_taint(struct ctl_table *table, int write,
70530 void __user *buffer, size_t *lenp, loff_t *ppos);
70531 #endif
70532
70533 -#ifdef CONFIG_PRINTK
70534 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70535 void __user *buffer, size_t *lenp, loff_t *ppos);
70536 -#endif
70537 +
70538 +static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
70539 + void __user *buffer, size_t *lenp, loff_t *ppos);
70540 +static int proc_dostring_coredump(struct ctl_table *table, int write,
70541 + void __user *buffer, size_t *lenp, loff_t *ppos);
70542
70543 #ifdef CONFIG_MAGIC_SYSRQ
70544 /* Note: sysrq code uses it's own private copy */
70545 @@ -196,6 +198,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
70546
70547 #endif
70548
70549 +extern struct ctl_table grsecurity_table[];
70550 +
70551 static struct ctl_table kern_table[];
70552 static struct ctl_table vm_table[];
70553 static struct ctl_table fs_table[];
70554 @@ -210,6 +214,20 @@ extern struct ctl_table epoll_table[];
70555 int sysctl_legacy_va_layout;
70556 #endif
70557
70558 +#ifdef CONFIG_PAX_SOFTMODE
70559 +static ctl_table pax_table[] = {
70560 + {
70561 + .procname = "softmode",
70562 + .data = &pax_softmode,
70563 + .maxlen = sizeof(unsigned int),
70564 + .mode = 0600,
70565 + .proc_handler = &proc_dointvec,
70566 + },
70567 +
70568 + { }
70569 +};
70570 +#endif
70571 +
70572 /* The default sysctl tables: */
70573
70574 static struct ctl_table sysctl_base_table[] = {
70575 @@ -256,6 +274,22 @@ static int max_extfrag_threshold = 1000;
70576 #endif
70577
70578 static struct ctl_table kern_table[] = {
70579 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
70580 + {
70581 + .procname = "grsecurity",
70582 + .mode = 0500,
70583 + .child = grsecurity_table,
70584 + },
70585 +#endif
70586 +
70587 +#ifdef CONFIG_PAX_SOFTMODE
70588 + {
70589 + .procname = "pax",
70590 + .mode = 0500,
70591 + .child = pax_table,
70592 + },
70593 +#endif
70594 +
70595 {
70596 .procname = "sched_child_runs_first",
70597 .data = &sysctl_sched_child_runs_first,
70598 @@ -410,7 +444,7 @@ static struct ctl_table kern_table[] = {
70599 .data = core_pattern,
70600 .maxlen = CORENAME_MAX_SIZE,
70601 .mode = 0644,
70602 - .proc_handler = proc_dostring,
70603 + .proc_handler = proc_dostring_coredump,
70604 },
70605 {
70606 .procname = "core_pipe_limit",
70607 @@ -540,7 +574,7 @@ static struct ctl_table kern_table[] = {
70608 .data = &modprobe_path,
70609 .maxlen = KMOD_PATH_LEN,
70610 .mode = 0644,
70611 - .proc_handler = proc_dostring,
70612 + .proc_handler = proc_dostring_modpriv,
70613 },
70614 {
70615 .procname = "modules_disabled",
70616 @@ -707,16 +741,20 @@ static struct ctl_table kern_table[] = {
70617 .extra1 = &zero,
70618 .extra2 = &one,
70619 },
70620 +#endif
70621 {
70622 .procname = "kptr_restrict",
70623 .data = &kptr_restrict,
70624 .maxlen = sizeof(int),
70625 .mode = 0644,
70626 .proc_handler = proc_dointvec_minmax_sysadmin,
70627 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70628 + .extra1 = &two,
70629 +#else
70630 .extra1 = &zero,
70631 +#endif
70632 .extra2 = &two,
70633 },
70634 -#endif
70635 {
70636 .procname = "ngroups_max",
70637 .data = &ngroups_max,
70638 @@ -1215,6 +1253,13 @@ static struct ctl_table vm_table[] = {
70639 .proc_handler = proc_dointvec_minmax,
70640 .extra1 = &zero,
70641 },
70642 + {
70643 + .procname = "heap_stack_gap",
70644 + .data = &sysctl_heap_stack_gap,
70645 + .maxlen = sizeof(sysctl_heap_stack_gap),
70646 + .mode = 0644,
70647 + .proc_handler = proc_doulongvec_minmax,
70648 + },
70649 #else
70650 {
70651 .procname = "nr_trim_pages",
70652 @@ -1498,7 +1543,7 @@ static struct ctl_table fs_table[] = {
70653 .data = &suid_dumpable,
70654 .maxlen = sizeof(int),
70655 .mode = 0644,
70656 - .proc_handler = proc_dointvec_minmax,
70657 + .proc_handler = proc_dointvec_minmax_coredump,
70658 .extra1 = &zero,
70659 .extra2 = &two,
70660 },
70661 @@ -1645,6 +1690,16 @@ int proc_dostring(struct ctl_table *table, int write,
70662 buffer, lenp, ppos);
70663 }
70664
70665 +int proc_dostring_modpriv(struct ctl_table *table, int write,
70666 + void __user *buffer, size_t *lenp, loff_t *ppos)
70667 +{
70668 + if (write && !capable(CAP_SYS_MODULE))
70669 + return -EPERM;
70670 +
70671 + return _proc_do_string(table->data, table->maxlen, write,
70672 + buffer, lenp, ppos);
70673 +}
70674 +
70675 static size_t proc_skip_spaces(char **buf)
70676 {
70677 size_t ret;
70678 @@ -1750,6 +1805,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
70679 len = strlen(tmp);
70680 if (len > *size)
70681 len = *size;
70682 + if (len > sizeof(tmp))
70683 + len = sizeof(tmp);
70684 if (copy_to_user(*buf, tmp, len))
70685 return -EFAULT;
70686 *size -= len;
70687 @@ -1942,7 +1999,6 @@ static int proc_taint(struct ctl_table *table, int write,
70688 return err;
70689 }
70690
70691 -#ifdef CONFIG_PRINTK
70692 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70693 void __user *buffer, size_t *lenp, loff_t *ppos)
70694 {
70695 @@ -1951,7 +2007,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70696
70697 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
70698 }
70699 -#endif
70700
70701 struct do_proc_dointvec_minmax_conv_param {
70702 int *min;
70703 @@ -2009,6 +2064,34 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
70704 do_proc_dointvec_minmax_conv, &param);
70705 }
70706
70707 +static void validate_coredump_safety(void)
70708 +{
70709 + if (suid_dumpable == SUID_DUMPABLE_SAFE &&
70710 + core_pattern[0] != '/' && core_pattern[0] != '|') {
70711 + printk(KERN_WARNING "Unsafe core_pattern used with "\
70712 + "suid_dumpable=2. Pipe handler or fully qualified "\
70713 + "core dump path required.\n");
70714 + }
70715 +}
70716 +
70717 +static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
70718 + void __user *buffer, size_t *lenp, loff_t *ppos)
70719 +{
70720 + int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
70721 + if (!error)
70722 + validate_coredump_safety();
70723 + return error;
70724 +}
70725 +
70726 +static int proc_dostring_coredump(struct ctl_table *table, int write,
70727 + void __user *buffer, size_t *lenp, loff_t *ppos)
70728 +{
70729 + int error = proc_dostring(table, write, buffer, lenp, ppos);
70730 + if (!error)
70731 + validate_coredump_safety();
70732 + return error;
70733 +}
70734 +
70735 static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
70736 void __user *buffer,
70737 size_t *lenp, loff_t *ppos,
70738 @@ -2066,8 +2149,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
70739 *i = val;
70740 } else {
70741 val = convdiv * (*i) / convmul;
70742 - if (!first)
70743 + if (!first) {
70744 err = proc_put_char(&buffer, &left, '\t');
70745 + if (err)
70746 + break;
70747 + }
70748 err = proc_put_long(&buffer, &left, val, false);
70749 if (err)
70750 break;
70751 @@ -2459,6 +2545,12 @@ int proc_dostring(struct ctl_table *table, int write,
70752 return -ENOSYS;
70753 }
70754
70755 +int proc_dostring_modpriv(struct ctl_table *table, int write,
70756 + void __user *buffer, size_t *lenp, loff_t *ppos)
70757 +{
70758 + return -ENOSYS;
70759 +}
70760 +
70761 int proc_dointvec(struct ctl_table *table, int write,
70762 void __user *buffer, size_t *lenp, loff_t *ppos)
70763 {
70764 @@ -2515,5 +2607,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
70765 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
70766 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
70767 EXPORT_SYMBOL(proc_dostring);
70768 +EXPORT_SYMBOL(proc_dostring_modpriv);
70769 EXPORT_SYMBOL(proc_doulongvec_minmax);
70770 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
70771 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
70772 index a650694..aaeeb20 100644
70773 --- a/kernel/sysctl_binary.c
70774 +++ b/kernel/sysctl_binary.c
70775 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
70776 int i;
70777
70778 set_fs(KERNEL_DS);
70779 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70780 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70781 set_fs(old_fs);
70782 if (result < 0)
70783 goto out_kfree;
70784 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
70785 }
70786
70787 set_fs(KERNEL_DS);
70788 - result = vfs_write(file, buffer, str - buffer, &pos);
70789 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70790 set_fs(old_fs);
70791 if (result < 0)
70792 goto out_kfree;
70793 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
70794 int i;
70795
70796 set_fs(KERNEL_DS);
70797 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70798 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70799 set_fs(old_fs);
70800 if (result < 0)
70801 goto out_kfree;
70802 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
70803 }
70804
70805 set_fs(KERNEL_DS);
70806 - result = vfs_write(file, buffer, str - buffer, &pos);
70807 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70808 set_fs(old_fs);
70809 if (result < 0)
70810 goto out_kfree;
70811 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
70812 int i;
70813
70814 set_fs(KERNEL_DS);
70815 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70816 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70817 set_fs(old_fs);
70818 if (result < 0)
70819 goto out;
70820 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70821 __le16 dnaddr;
70822
70823 set_fs(KERNEL_DS);
70824 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70825 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70826 set_fs(old_fs);
70827 if (result < 0)
70828 goto out;
70829 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70830 le16_to_cpu(dnaddr) & 0x3ff);
70831
70832 set_fs(KERNEL_DS);
70833 - result = vfs_write(file, buf, len, &pos);
70834 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
70835 set_fs(old_fs);
70836 if (result < 0)
70837 goto out;
70838 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
70839 index e660464..c8b9e67 100644
70840 --- a/kernel/taskstats.c
70841 +++ b/kernel/taskstats.c
70842 @@ -27,9 +27,12 @@
70843 #include <linux/cgroup.h>
70844 #include <linux/fs.h>
70845 #include <linux/file.h>
70846 +#include <linux/grsecurity.h>
70847 #include <net/genetlink.h>
70848 #include <linux/atomic.h>
70849
70850 +extern int gr_is_taskstats_denied(int pid);
70851 +
70852 /*
70853 * Maximum length of a cpumask that can be specified in
70854 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
70855 @@ -556,6 +559,9 @@ err:
70856
70857 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
70858 {
70859 + if (gr_is_taskstats_denied(current->pid))
70860 + return -EACCES;
70861 +
70862 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
70863 return cmd_attr_register_cpumask(info);
70864 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
70865 diff --git a/kernel/time.c b/kernel/time.c
70866 index ba744cf..267b7c5 100644
70867 --- a/kernel/time.c
70868 +++ b/kernel/time.c
70869 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
70870 return error;
70871
70872 if (tz) {
70873 + /* we log in do_settimeofday called below, so don't log twice
70874 + */
70875 + if (!tv)
70876 + gr_log_timechange();
70877 +
70878 sys_tz = *tz;
70879 update_vsyscall_tz();
70880 if (firsttime) {
70881 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
70882 index aa27d39..34d221c 100644
70883 --- a/kernel/time/alarmtimer.c
70884 +++ b/kernel/time/alarmtimer.c
70885 @@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
70886 struct platform_device *pdev;
70887 int error = 0;
70888 int i;
70889 - struct k_clock alarm_clock = {
70890 + static struct k_clock alarm_clock = {
70891 .clock_getres = alarm_clock_getres,
70892 .clock_get = alarm_clock_get,
70893 .timer_create = alarm_timer_create,
70894 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
70895 index f113755..ec24223 100644
70896 --- a/kernel/time/tick-broadcast.c
70897 +++ b/kernel/time/tick-broadcast.c
70898 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
70899 * then clear the broadcast bit.
70900 */
70901 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
70902 - int cpu = smp_processor_id();
70903 + cpu = smp_processor_id();
70904
70905 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
70906 tick_broadcast_clear_oneshot(cpu);
70907 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
70908 index 3447cfa..291806b 100644
70909 --- a/kernel/time/timekeeping.c
70910 +++ b/kernel/time/timekeeping.c
70911 @@ -14,6 +14,7 @@
70912 #include <linux/init.h>
70913 #include <linux/mm.h>
70914 #include <linux/sched.h>
70915 +#include <linux/grsecurity.h>
70916 #include <linux/syscore_ops.h>
70917 #include <linux/clocksource.h>
70918 #include <linux/jiffies.h>
70919 @@ -387,6 +388,8 @@ int do_settimeofday(const struct timespec *tv)
70920 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
70921 return -EINVAL;
70922
70923 + gr_log_timechange();
70924 +
70925 write_seqlock_irqsave(&timekeeper.lock, flags);
70926
70927 timekeeping_forward_now();
70928 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
70929 index 3258455..f35227d 100644
70930 --- a/kernel/time/timer_list.c
70931 +++ b/kernel/time/timer_list.c
70932 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
70933
70934 static void print_name_offset(struct seq_file *m, void *sym)
70935 {
70936 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70937 + SEQ_printf(m, "<%p>", NULL);
70938 +#else
70939 char symname[KSYM_NAME_LEN];
70940
70941 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
70942 SEQ_printf(m, "<%pK>", sym);
70943 else
70944 SEQ_printf(m, "%s", symname);
70945 +#endif
70946 }
70947
70948 static void
70949 @@ -112,7 +116,11 @@ next_one:
70950 static void
70951 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
70952 {
70953 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70954 + SEQ_printf(m, " .base: %p\n", NULL);
70955 +#else
70956 SEQ_printf(m, " .base: %pK\n", base);
70957 +#endif
70958 SEQ_printf(m, " .index: %d\n",
70959 base->index);
70960 SEQ_printf(m, " .resolution: %Lu nsecs\n",
70961 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
70962 {
70963 struct proc_dir_entry *pe;
70964
70965 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70966 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
70967 +#else
70968 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
70969 +#endif
70970 if (!pe)
70971 return -ENOMEM;
70972 return 0;
70973 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
70974 index 0b537f2..9e71eca 100644
70975 --- a/kernel/time/timer_stats.c
70976 +++ b/kernel/time/timer_stats.c
70977 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
70978 static unsigned long nr_entries;
70979 static struct entry entries[MAX_ENTRIES];
70980
70981 -static atomic_t overflow_count;
70982 +static atomic_unchecked_t overflow_count;
70983
70984 /*
70985 * The entries are in a hash-table, for fast lookup:
70986 @@ -140,7 +140,7 @@ static void reset_entries(void)
70987 nr_entries = 0;
70988 memset(entries, 0, sizeof(entries));
70989 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
70990 - atomic_set(&overflow_count, 0);
70991 + atomic_set_unchecked(&overflow_count, 0);
70992 }
70993
70994 static struct entry *alloc_entry(void)
70995 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70996 if (likely(entry))
70997 entry->count++;
70998 else
70999 - atomic_inc(&overflow_count);
71000 + atomic_inc_unchecked(&overflow_count);
71001
71002 out_unlock:
71003 raw_spin_unlock_irqrestore(lock, flags);
71004 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
71005
71006 static void print_name_offset(struct seq_file *m, unsigned long addr)
71007 {
71008 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71009 + seq_printf(m, "<%p>", NULL);
71010 +#else
71011 char symname[KSYM_NAME_LEN];
71012
71013 if (lookup_symbol_name(addr, symname) < 0)
71014 seq_printf(m, "<%p>", (void *)addr);
71015 else
71016 seq_printf(m, "%s", symname);
71017 +#endif
71018 }
71019
71020 static int tstats_show(struct seq_file *m, void *v)
71021 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
71022
71023 seq_puts(m, "Timer Stats Version: v0.2\n");
71024 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
71025 - if (atomic_read(&overflow_count))
71026 + if (atomic_read_unchecked(&overflow_count))
71027 seq_printf(m, "Overflow: %d entries\n",
71028 - atomic_read(&overflow_count));
71029 + atomic_read_unchecked(&overflow_count));
71030
71031 for (i = 0; i < nr_entries; i++) {
71032 entry = entries + i;
71033 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
71034 {
71035 struct proc_dir_entry *pe;
71036
71037 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71038 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
71039 +#else
71040 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
71041 +#endif
71042 if (!pe)
71043 return -ENOMEM;
71044 return 0;
71045 diff --git a/kernel/timer.c b/kernel/timer.c
71046 index 6ec7e7e..cbc448b 100644
71047 --- a/kernel/timer.c
71048 +++ b/kernel/timer.c
71049 @@ -1362,7 +1362,7 @@ void update_process_times(int user_tick)
71050 /*
71051 * This function runs timers and the timer-tq in bottom half context.
71052 */
71053 -static void run_timer_softirq(struct softirq_action *h)
71054 +static void run_timer_softirq(void)
71055 {
71056 struct tvec_base *base = __this_cpu_read(tvec_bases);
71057
71058 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
71059 index c0bd030..62a1927 100644
71060 --- a/kernel/trace/blktrace.c
71061 +++ b/kernel/trace/blktrace.c
71062 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
71063 struct blk_trace *bt = filp->private_data;
71064 char buf[16];
71065
71066 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
71067 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
71068
71069 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
71070 }
71071 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
71072 return 1;
71073
71074 bt = buf->chan->private_data;
71075 - atomic_inc(&bt->dropped);
71076 + atomic_inc_unchecked(&bt->dropped);
71077 return 0;
71078 }
71079
71080 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
71081
71082 bt->dir = dir;
71083 bt->dev = dev;
71084 - atomic_set(&bt->dropped, 0);
71085 + atomic_set_unchecked(&bt->dropped, 0);
71086
71087 ret = -EIO;
71088 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
71089 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
71090 index a008663..30d7429 100644
71091 --- a/kernel/trace/ftrace.c
71092 +++ b/kernel/trace/ftrace.c
71093 @@ -1785,12 +1785,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
71094 if (unlikely(ftrace_disabled))
71095 return 0;
71096
71097 + ret = ftrace_arch_code_modify_prepare();
71098 + FTRACE_WARN_ON(ret);
71099 + if (ret)
71100 + return 0;
71101 +
71102 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
71103 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
71104 if (ret) {
71105 ftrace_bug(ret, ip);
71106 - return 0;
71107 }
71108 - return 1;
71109 + return ret ? 0 : 1;
71110 }
71111
71112 /*
71113 @@ -2885,7 +2890,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
71114
71115 int
71116 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
71117 - void *data)
71118 + void *data)
71119 {
71120 struct ftrace_func_probe *entry;
71121 struct ftrace_page *pg;
71122 @@ -3697,8 +3702,10 @@ static int ftrace_process_locs(struct module *mod,
71123 if (!count)
71124 return 0;
71125
71126 + pax_open_kernel();
71127 sort(start, count, sizeof(*start),
71128 ftrace_cmp_ips, ftrace_swap_ips);
71129 + pax_close_kernel();
71130
71131 start_pg = ftrace_allocate_pages(count);
71132 if (!start_pg)
71133 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
71134 index a7fa070..403bc8d 100644
71135 --- a/kernel/trace/trace.c
71136 +++ b/kernel/trace/trace.c
71137 @@ -4421,10 +4421,9 @@ static const struct file_operations tracing_dyn_info_fops = {
71138 };
71139 #endif
71140
71141 -static struct dentry *d_tracer;
71142 -
71143 struct dentry *tracing_init_dentry(void)
71144 {
71145 + static struct dentry *d_tracer;
71146 static int once;
71147
71148 if (d_tracer)
71149 @@ -4444,10 +4443,9 @@ struct dentry *tracing_init_dentry(void)
71150 return d_tracer;
71151 }
71152
71153 -static struct dentry *d_percpu;
71154 -
71155 struct dentry *tracing_dentry_percpu(void)
71156 {
71157 + static struct dentry *d_percpu;
71158 static int once;
71159 struct dentry *d_tracer;
71160
71161 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
71162 index 29111da..d190fe2 100644
71163 --- a/kernel/trace/trace_events.c
71164 +++ b/kernel/trace/trace_events.c
71165 @@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
71166 struct ftrace_module_file_ops {
71167 struct list_head list;
71168 struct module *mod;
71169 - struct file_operations id;
71170 - struct file_operations enable;
71171 - struct file_operations format;
71172 - struct file_operations filter;
71173 };
71174
71175 static struct ftrace_module_file_ops *
71176 @@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
71177
71178 file_ops->mod = mod;
71179
71180 - file_ops->id = ftrace_event_id_fops;
71181 - file_ops->id.owner = mod;
71182 -
71183 - file_ops->enable = ftrace_enable_fops;
71184 - file_ops->enable.owner = mod;
71185 -
71186 - file_ops->filter = ftrace_event_filter_fops;
71187 - file_ops->filter.owner = mod;
71188 -
71189 - file_ops->format = ftrace_event_format_fops;
71190 - file_ops->format.owner = mod;
71191 + pax_open_kernel();
71192 + *(void **)&mod->trace_id.owner = mod;
71193 + *(void **)&mod->trace_enable.owner = mod;
71194 + *(void **)&mod->trace_filter.owner = mod;
71195 + *(void **)&mod->trace_format.owner = mod;
71196 + pax_close_kernel();
71197
71198 list_add(&file_ops->list, &ftrace_module_file_list);
71199
71200 @@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
71201
71202 for_each_event(call, start, end) {
71203 __trace_add_event_call(*call, mod,
71204 - &file_ops->id, &file_ops->enable,
71205 - &file_ops->filter, &file_ops->format);
71206 + &mod->trace_id, &mod->trace_enable,
71207 + &mod->trace_filter, &mod->trace_format);
71208 }
71209 }
71210
71211 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
71212 index fd3c8aa..5f324a6 100644
71213 --- a/kernel/trace/trace_mmiotrace.c
71214 +++ b/kernel/trace/trace_mmiotrace.c
71215 @@ -24,7 +24,7 @@ struct header_iter {
71216 static struct trace_array *mmio_trace_array;
71217 static bool overrun_detected;
71218 static unsigned long prev_overruns;
71219 -static atomic_t dropped_count;
71220 +static atomic_unchecked_t dropped_count;
71221
71222 static void mmio_reset_data(struct trace_array *tr)
71223 {
71224 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
71225
71226 static unsigned long count_overruns(struct trace_iterator *iter)
71227 {
71228 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
71229 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
71230 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
71231
71232 if (over > prev_overruns)
71233 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
71234 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
71235 sizeof(*entry), 0, pc);
71236 if (!event) {
71237 - atomic_inc(&dropped_count);
71238 + atomic_inc_unchecked(&dropped_count);
71239 return;
71240 }
71241 entry = ring_buffer_event_data(event);
71242 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
71243 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
71244 sizeof(*entry), 0, pc);
71245 if (!event) {
71246 - atomic_inc(&dropped_count);
71247 + atomic_inc_unchecked(&dropped_count);
71248 return;
71249 }
71250 entry = ring_buffer_event_data(event);
71251 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
71252 index df611a0..10d8b32 100644
71253 --- a/kernel/trace/trace_output.c
71254 +++ b/kernel/trace/trace_output.c
71255 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
71256
71257 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
71258 if (!IS_ERR(p)) {
71259 - p = mangle_path(s->buffer + s->len, p, "\n");
71260 + p = mangle_path(s->buffer + s->len, p, "\n\\");
71261 if (p) {
71262 s->len = p - s->buffer;
71263 return 1;
71264 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
71265 index d4545f4..a9010a1 100644
71266 --- a/kernel/trace/trace_stack.c
71267 +++ b/kernel/trace/trace_stack.c
71268 @@ -53,7 +53,7 @@ static inline void check_stack(void)
71269 return;
71270
71271 /* we do not handle interrupt stacks yet */
71272 - if (!object_is_on_stack(&this_size))
71273 + if (!object_starts_on_stack(&this_size))
71274 return;
71275
71276 local_irq_save(flags);
71277 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
71278 index ff5bdee..3eaeba6 100644
71279 --- a/lib/Kconfig.debug
71280 +++ b/lib/Kconfig.debug
71281 @@ -1165,6 +1165,7 @@ config LATENCYTOP
71282 depends on DEBUG_KERNEL
71283 depends on STACKTRACE_SUPPORT
71284 depends on PROC_FS
71285 + depends on !GRKERNSEC_HIDESYM
71286 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
71287 select KALLSYMS
71288 select KALLSYMS_ALL
71289 diff --git a/lib/bitmap.c b/lib/bitmap.c
71290 index 06fdfa1..97c5c7d 100644
71291 --- a/lib/bitmap.c
71292 +++ b/lib/bitmap.c
71293 @@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
71294 {
71295 int c, old_c, totaldigits, ndigits, nchunks, nbits;
71296 u32 chunk;
71297 - const char __user __force *ubuf = (const char __user __force *)buf;
71298 + const char __user *ubuf = (const char __force_user *)buf;
71299
71300 bitmap_zero(maskp, nmaskbits);
71301
71302 @@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
71303 {
71304 if (!access_ok(VERIFY_READ, ubuf, ulen))
71305 return -EFAULT;
71306 - return __bitmap_parse((const char __force *)ubuf,
71307 + return __bitmap_parse((const char __force_kernel *)ubuf,
71308 ulen, 1, maskp, nmaskbits);
71309
71310 }
71311 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
71312 {
71313 unsigned a, b;
71314 int c, old_c, totaldigits;
71315 - const char __user __force *ubuf = (const char __user __force *)buf;
71316 + const char __user *ubuf = (const char __force_user *)buf;
71317 int exp_digit, in_range;
71318
71319 totaldigits = c = 0;
71320 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
71321 {
71322 if (!access_ok(VERIFY_READ, ubuf, ulen))
71323 return -EFAULT;
71324 - return __bitmap_parselist((const char __force *)ubuf,
71325 + return __bitmap_parselist((const char __force_kernel *)ubuf,
71326 ulen, 1, maskp, nmaskbits);
71327 }
71328 EXPORT_SYMBOL(bitmap_parselist_user);
71329 diff --git a/lib/bug.c b/lib/bug.c
71330 index a28c141..2bd3d95 100644
71331 --- a/lib/bug.c
71332 +++ b/lib/bug.c
71333 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
71334 return BUG_TRAP_TYPE_NONE;
71335
71336 bug = find_bug(bugaddr);
71337 + if (!bug)
71338 + return BUG_TRAP_TYPE_NONE;
71339
71340 file = NULL;
71341 line = 0;
71342 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
71343 index d11808c..dc2d6f8 100644
71344 --- a/lib/debugobjects.c
71345 +++ b/lib/debugobjects.c
71346 @@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
71347 if (limit > 4)
71348 return;
71349
71350 - is_on_stack = object_is_on_stack(addr);
71351 + is_on_stack = object_starts_on_stack(addr);
71352 if (is_on_stack == onstack)
71353 return;
71354
71355 diff --git a/lib/devres.c b/lib/devres.c
71356 index 80b9c76..9e32279 100644
71357 --- a/lib/devres.c
71358 +++ b/lib/devres.c
71359 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
71360 void devm_iounmap(struct device *dev, void __iomem *addr)
71361 {
71362 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
71363 - (void *)addr));
71364 + (void __force *)addr));
71365 iounmap(addr);
71366 }
71367 EXPORT_SYMBOL(devm_iounmap);
71368 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
71369 {
71370 ioport_unmap(addr);
71371 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
71372 - devm_ioport_map_match, (void *)addr));
71373 + devm_ioport_map_match, (void __force *)addr));
71374 }
71375 EXPORT_SYMBOL(devm_ioport_unmap);
71376
71377 diff --git a/lib/digsig.c b/lib/digsig.c
71378 index 286d558..8c0e629 100644
71379 --- a/lib/digsig.c
71380 +++ b/lib/digsig.c
71381 @@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key,
71382 memcpy(out1 + head, p, l);
71383
71384 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
71385 + if (err)
71386 + goto err;
71387
71388 - if (!err && len == hlen)
71389 - err = memcmp(out2, h, hlen);
71390 + if (len != hlen || memcmp(out2, h, hlen))
71391 + err = -EINVAL;
71392
71393 err:
71394 mpi_free(in);
71395 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
71396 index 66ce414..6f0a0dd 100644
71397 --- a/lib/dma-debug.c
71398 +++ b/lib/dma-debug.c
71399 @@ -924,7 +924,7 @@ out:
71400
71401 static void check_for_stack(struct device *dev, void *addr)
71402 {
71403 - if (object_is_on_stack(addr))
71404 + if (object_starts_on_stack(addr))
71405 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
71406 "stack [addr=%p]\n", addr);
71407 }
71408 diff --git a/lib/inflate.c b/lib/inflate.c
71409 index 013a761..c28f3fc 100644
71410 --- a/lib/inflate.c
71411 +++ b/lib/inflate.c
71412 @@ -269,7 +269,7 @@ static void free(void *where)
71413 malloc_ptr = free_mem_ptr;
71414 }
71415 #else
71416 -#define malloc(a) kmalloc(a, GFP_KERNEL)
71417 +#define malloc(a) kmalloc((a), GFP_KERNEL)
71418 #define free(a) kfree(a)
71419 #endif
71420
71421 diff --git a/lib/ioremap.c b/lib/ioremap.c
71422 index 0c9216c..863bd89 100644
71423 --- a/lib/ioremap.c
71424 +++ b/lib/ioremap.c
71425 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
71426 unsigned long next;
71427
71428 phys_addr -= addr;
71429 - pmd = pmd_alloc(&init_mm, pud, addr);
71430 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
71431 if (!pmd)
71432 return -ENOMEM;
71433 do {
71434 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
71435 unsigned long next;
71436
71437 phys_addr -= addr;
71438 - pud = pud_alloc(&init_mm, pgd, addr);
71439 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
71440 if (!pud)
71441 return -ENOMEM;
71442 do {
71443 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
71444 index bd2bea9..6b3c95e 100644
71445 --- a/lib/is_single_threaded.c
71446 +++ b/lib/is_single_threaded.c
71447 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
71448 struct task_struct *p, *t;
71449 bool ret;
71450
71451 + if (!mm)
71452 + return true;
71453 +
71454 if (atomic_read(&task->signal->live) != 1)
71455 return false;
71456
71457 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
71458 index e796429..6e38f9f 100644
71459 --- a/lib/radix-tree.c
71460 +++ b/lib/radix-tree.c
71461 @@ -92,7 +92,7 @@ struct radix_tree_preload {
71462 int nr;
71463 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
71464 };
71465 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
71466 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
71467
71468 static inline void *ptr_to_indirect(void *ptr)
71469 {
71470 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
71471 index 598a73e..5c5aeb5 100644
71472 --- a/lib/vsprintf.c
71473 +++ b/lib/vsprintf.c
71474 @@ -16,6 +16,9 @@
71475 * - scnprintf and vscnprintf
71476 */
71477
71478 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71479 +#define __INCLUDED_BY_HIDESYM 1
71480 +#endif
71481 #include <stdarg.h>
71482 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
71483 #include <linux/types.h>
71484 @@ -536,7 +539,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
71485 char sym[KSYM_SYMBOL_LEN];
71486 if (ext == 'B')
71487 sprint_backtrace(sym, value);
71488 - else if (ext != 'f' && ext != 's')
71489 + else if (ext != 'f' && ext != 's' && ext != 'a')
71490 sprint_symbol(sym, value);
71491 else
71492 sprint_symbol_no_offset(sym, value);
71493 @@ -912,7 +915,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
71494 return number(buf, end, *(const netdev_features_t *)addr, spec);
71495 }
71496
71497 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71498 +int kptr_restrict __read_mostly = 2;
71499 +#else
71500 int kptr_restrict __read_mostly;
71501 +#endif
71502
71503 /*
71504 * Show a '%p' thing. A kernel extension is that the '%p' is followed
71505 @@ -926,6 +933,8 @@ int kptr_restrict __read_mostly;
71506 * - 'S' For symbolic direct pointers with offset
71507 * - 's' For symbolic direct pointers without offset
71508 * - 'B' For backtraced symbolic direct pointers with offset
71509 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
71510 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
71511 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
71512 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
71513 * - 'M' For a 6-byte MAC address, it prints the address in the
71514 @@ -973,12 +982,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71515
71516 if (!ptr && *fmt != 'K') {
71517 /*
71518 - * Print (null) with the same width as a pointer so it makes
71519 + * Print (nil) with the same width as a pointer so it makes
71520 * tabular output look nice.
71521 */
71522 if (spec.field_width == -1)
71523 spec.field_width = default_width;
71524 - return string(buf, end, "(null)", spec);
71525 + return string(buf, end, "(nil)", spec);
71526 }
71527
71528 switch (*fmt) {
71529 @@ -988,6 +997,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71530 /* Fallthrough */
71531 case 'S':
71532 case 's':
71533 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71534 + break;
71535 +#else
71536 + return symbol_string(buf, end, ptr, spec, *fmt);
71537 +#endif
71538 + case 'A':
71539 + case 'a':
71540 case 'B':
71541 return symbol_string(buf, end, ptr, spec, *fmt);
71542 case 'R':
71543 @@ -1025,6 +1041,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71544 va_end(va);
71545 return buf;
71546 }
71547 + case 'P':
71548 + break;
71549 case 'K':
71550 /*
71551 * %pK cannot be used in IRQ context because its test
71552 @@ -1048,6 +1066,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71553 }
71554 break;
71555 }
71556 +
71557 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71558 + /* 'P' = approved pointers to copy to userland,
71559 + as in the /proc/kallsyms case, as we make it display nothing
71560 + for non-root users, and the real contents for root users
71561 + Also ignore 'K' pointers, since we force their NULLing for non-root users
71562 + above
71563 + */
71564 + if (ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
71565 + printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
71566 + dump_stack();
71567 + ptr = NULL;
71568 + }
71569 +#endif
71570 +
71571 spec.flags |= SMALL;
71572 if (spec.field_width == -1) {
71573 spec.field_width = default_width;
71574 @@ -1759,11 +1792,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
71575 typeof(type) value; \
71576 if (sizeof(type) == 8) { \
71577 args = PTR_ALIGN(args, sizeof(u32)); \
71578 - *(u32 *)&value = *(u32 *)args; \
71579 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
71580 + *(u32 *)&value = *(const u32 *)args; \
71581 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
71582 } else { \
71583 args = PTR_ALIGN(args, sizeof(type)); \
71584 - value = *(typeof(type) *)args; \
71585 + value = *(const typeof(type) *)args; \
71586 } \
71587 args += sizeof(type); \
71588 value; \
71589 @@ -1826,7 +1859,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
71590 case FORMAT_TYPE_STR: {
71591 const char *str_arg = args;
71592 args += strlen(str_arg) + 1;
71593 - str = string(str, end, (char *)str_arg, spec);
71594 + str = string(str, end, str_arg, spec);
71595 break;
71596 }
71597
71598 diff --git a/localversion-grsec b/localversion-grsec
71599 new file mode 100644
71600 index 0000000..7cd6065
71601 --- /dev/null
71602 +++ b/localversion-grsec
71603 @@ -0,0 +1 @@
71604 +-grsec
71605 diff --git a/mm/Kconfig b/mm/Kconfig
71606 index 82fed4e..979e814 100644
71607 --- a/mm/Kconfig
71608 +++ b/mm/Kconfig
71609 @@ -247,10 +247,10 @@ config KSM
71610 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
71611
71612 config DEFAULT_MMAP_MIN_ADDR
71613 - int "Low address space to protect from user allocation"
71614 + int "Low address space to protect from user allocation"
71615 depends on MMU
71616 - default 4096
71617 - help
71618 + default 65536
71619 + help
71620 This is the portion of low virtual memory which should be protected
71621 from userspace allocation. Keeping a user from writing to low pages
71622 can help reduce the impact of kernel NULL pointer bugs.
71623 @@ -280,7 +280,7 @@ config MEMORY_FAILURE
71624
71625 config HWPOISON_INJECT
71626 tristate "HWPoison pages injector"
71627 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
71628 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
71629 select PROC_PAGE_MONITOR
71630
71631 config NOMMU_INITIAL_TRIM_EXCESS
71632 diff --git a/mm/filemap.c b/mm/filemap.c
71633 index a4a5260..6151dc5 100644
71634 --- a/mm/filemap.c
71635 +++ b/mm/filemap.c
71636 @@ -1723,7 +1723,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
71637 struct address_space *mapping = file->f_mapping;
71638
71639 if (!mapping->a_ops->readpage)
71640 - return -ENOEXEC;
71641 + return -ENODEV;
71642 file_accessed(file);
71643 vma->vm_ops = &generic_file_vm_ops;
71644 vma->vm_flags |= VM_CAN_NONLINEAR;
71645 @@ -2064,6 +2064,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
71646 *pos = i_size_read(inode);
71647
71648 if (limit != RLIM_INFINITY) {
71649 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
71650 if (*pos >= limit) {
71651 send_sig(SIGXFSZ, current, 0);
71652 return -EFBIG;
71653 diff --git a/mm/fremap.c b/mm/fremap.c
71654 index 9ed4fd4..c42648d 100644
71655 --- a/mm/fremap.c
71656 +++ b/mm/fremap.c
71657 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
71658 retry:
71659 vma = find_vma(mm, start);
71660
71661 +#ifdef CONFIG_PAX_SEGMEXEC
71662 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
71663 + goto out;
71664 +#endif
71665 +
71666 /*
71667 * Make sure the vma is shared, that it supports prefaulting,
71668 * and that the remapped range is valid and fully within
71669 diff --git a/mm/highmem.c b/mm/highmem.c
71670 index 57d82c6..e9e0552 100644
71671 --- a/mm/highmem.c
71672 +++ b/mm/highmem.c
71673 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
71674 * So no dangers, even with speculative execution.
71675 */
71676 page = pte_page(pkmap_page_table[i]);
71677 + pax_open_kernel();
71678 pte_clear(&init_mm, (unsigned long)page_address(page),
71679 &pkmap_page_table[i]);
71680 -
71681 + pax_close_kernel();
71682 set_page_address(page, NULL);
71683 need_flush = 1;
71684 }
71685 @@ -186,9 +187,11 @@ start:
71686 }
71687 }
71688 vaddr = PKMAP_ADDR(last_pkmap_nr);
71689 +
71690 + pax_open_kernel();
71691 set_pte_at(&init_mm, vaddr,
71692 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
71693 -
71694 + pax_close_kernel();
71695 pkmap_count[last_pkmap_nr] = 1;
71696 set_page_address(page, (void *)vaddr);
71697
71698 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
71699 index 57c4b93..24b8f59 100644
71700 --- a/mm/huge_memory.c
71701 +++ b/mm/huge_memory.c
71702 @@ -735,7 +735,7 @@ out:
71703 * run pte_offset_map on the pmd, if an huge pmd could
71704 * materialize from under us from a different thread.
71705 */
71706 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
71707 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71708 return VM_FAULT_OOM;
71709 /* if an huge pmd materialized from under us just retry later */
71710 if (unlikely(pmd_trans_huge(*pmd)))
71711 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
71712 index 19558df..f7743b3 100644
71713 --- a/mm/hugetlb.c
71714 +++ b/mm/hugetlb.c
71715 @@ -2463,6 +2463,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
71716 return 1;
71717 }
71718
71719 +#ifdef CONFIG_PAX_SEGMEXEC
71720 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
71721 +{
71722 + struct mm_struct *mm = vma->vm_mm;
71723 + struct vm_area_struct *vma_m;
71724 + unsigned long address_m;
71725 + pte_t *ptep_m;
71726 +
71727 + vma_m = pax_find_mirror_vma(vma);
71728 + if (!vma_m)
71729 + return;
71730 +
71731 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71732 + address_m = address + SEGMEXEC_TASK_SIZE;
71733 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
71734 + get_page(page_m);
71735 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
71736 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
71737 +}
71738 +#endif
71739 +
71740 /*
71741 * Hugetlb_cow() should be called with page lock of the original hugepage held.
71742 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
71743 @@ -2575,6 +2596,11 @@ retry_avoidcopy:
71744 make_huge_pte(vma, new_page, 1));
71745 page_remove_rmap(old_page);
71746 hugepage_add_new_anon_rmap(new_page, vma, address);
71747 +
71748 +#ifdef CONFIG_PAX_SEGMEXEC
71749 + pax_mirror_huge_pte(vma, address, new_page);
71750 +#endif
71751 +
71752 /* Make the old page be freed below */
71753 new_page = old_page;
71754 mmu_notifier_invalidate_range_end(mm,
71755 @@ -2729,6 +2755,10 @@ retry:
71756 && (vma->vm_flags & VM_SHARED)));
71757 set_huge_pte_at(mm, address, ptep, new_pte);
71758
71759 +#ifdef CONFIG_PAX_SEGMEXEC
71760 + pax_mirror_huge_pte(vma, address, page);
71761 +#endif
71762 +
71763 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
71764 /* Optimization, do the COW without a second fault */
71765 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
71766 @@ -2758,6 +2788,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71767 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
71768 struct hstate *h = hstate_vma(vma);
71769
71770 +#ifdef CONFIG_PAX_SEGMEXEC
71771 + struct vm_area_struct *vma_m;
71772 +#endif
71773 +
71774 address &= huge_page_mask(h);
71775
71776 ptep = huge_pte_offset(mm, address);
71777 @@ -2771,6 +2805,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71778 VM_FAULT_SET_HINDEX(h - hstates);
71779 }
71780
71781 +#ifdef CONFIG_PAX_SEGMEXEC
71782 + vma_m = pax_find_mirror_vma(vma);
71783 + if (vma_m) {
71784 + unsigned long address_m;
71785 +
71786 + if (vma->vm_start > vma_m->vm_start) {
71787 + address_m = address;
71788 + address -= SEGMEXEC_TASK_SIZE;
71789 + vma = vma_m;
71790 + h = hstate_vma(vma);
71791 + } else
71792 + address_m = address + SEGMEXEC_TASK_SIZE;
71793 +
71794 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
71795 + return VM_FAULT_OOM;
71796 + address_m &= HPAGE_MASK;
71797 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
71798 + }
71799 +#endif
71800 +
71801 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
71802 if (!ptep)
71803 return VM_FAULT_OOM;
71804 diff --git a/mm/internal.h b/mm/internal.h
71805 index 8052379..47029d1 100644
71806 --- a/mm/internal.h
71807 +++ b/mm/internal.h
71808 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
71809 * in mm/page_alloc.c
71810 */
71811 extern void __free_pages_bootmem(struct page *page, unsigned int order);
71812 +extern void free_compound_page(struct page *page);
71813 extern void prep_compound_page(struct page *page, unsigned long order);
71814 #ifdef CONFIG_MEMORY_FAILURE
71815 extern bool is_free_buddy_page(struct page *page);
71816 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
71817 index 45eb621..6ccd8ea 100644
71818 --- a/mm/kmemleak.c
71819 +++ b/mm/kmemleak.c
71820 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
71821
71822 for (i = 0; i < object->trace_len; i++) {
71823 void *ptr = (void *)object->trace[i];
71824 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
71825 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
71826 }
71827 }
71828
71829 diff --git a/mm/maccess.c b/mm/maccess.c
71830 index d53adf9..03a24bf 100644
71831 --- a/mm/maccess.c
71832 +++ b/mm/maccess.c
71833 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
71834 set_fs(KERNEL_DS);
71835 pagefault_disable();
71836 ret = __copy_from_user_inatomic(dst,
71837 - (__force const void __user *)src, size);
71838 + (const void __force_user *)src, size);
71839 pagefault_enable();
71840 set_fs(old_fs);
71841
71842 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
71843
71844 set_fs(KERNEL_DS);
71845 pagefault_disable();
71846 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
71847 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
71848 pagefault_enable();
71849 set_fs(old_fs);
71850
71851 diff --git a/mm/madvise.c b/mm/madvise.c
71852 index 14d260f..b2a80fd 100644
71853 --- a/mm/madvise.c
71854 +++ b/mm/madvise.c
71855 @@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
71856 pgoff_t pgoff;
71857 unsigned long new_flags = vma->vm_flags;
71858
71859 +#ifdef CONFIG_PAX_SEGMEXEC
71860 + struct vm_area_struct *vma_m;
71861 +#endif
71862 +
71863 switch (behavior) {
71864 case MADV_NORMAL:
71865 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
71866 @@ -119,6 +123,13 @@ success:
71867 /*
71868 * vm_flags is protected by the mmap_sem held in write mode.
71869 */
71870 +
71871 +#ifdef CONFIG_PAX_SEGMEXEC
71872 + vma_m = pax_find_mirror_vma(vma);
71873 + if (vma_m)
71874 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
71875 +#endif
71876 +
71877 vma->vm_flags = new_flags;
71878
71879 out:
71880 @@ -177,6 +188,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71881 struct vm_area_struct ** prev,
71882 unsigned long start, unsigned long end)
71883 {
71884 +
71885 +#ifdef CONFIG_PAX_SEGMEXEC
71886 + struct vm_area_struct *vma_m;
71887 +#endif
71888 +
71889 *prev = vma;
71890 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
71891 return -EINVAL;
71892 @@ -189,6 +205,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71893 zap_page_range(vma, start, end - start, &details);
71894 } else
71895 zap_page_range(vma, start, end - start, NULL);
71896 +
71897 +#ifdef CONFIG_PAX_SEGMEXEC
71898 + vma_m = pax_find_mirror_vma(vma);
71899 + if (vma_m) {
71900 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
71901 + struct zap_details details = {
71902 + .nonlinear_vma = vma_m,
71903 + .last_index = ULONG_MAX,
71904 + };
71905 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
71906 + } else
71907 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
71908 + }
71909 +#endif
71910 +
71911 return 0;
71912 }
71913
71914 @@ -393,6 +424,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
71915 if (end < start)
71916 goto out;
71917
71918 +#ifdef CONFIG_PAX_SEGMEXEC
71919 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71920 + if (end > SEGMEXEC_TASK_SIZE)
71921 + goto out;
71922 + } else
71923 +#endif
71924 +
71925 + if (end > TASK_SIZE)
71926 + goto out;
71927 +
71928 error = 0;
71929 if (end == start)
71930 goto out;
71931 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
71932 index 6de0d61..da836cf 100644
71933 --- a/mm/memory-failure.c
71934 +++ b/mm/memory-failure.c
71935 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
71936
71937 int sysctl_memory_failure_recovery __read_mostly = 1;
71938
71939 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71940 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71941
71942 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
71943
71944 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
71945 pfn, t->comm, t->pid);
71946 si.si_signo = SIGBUS;
71947 si.si_errno = 0;
71948 - si.si_addr = (void *)addr;
71949 + si.si_addr = (void __user *)addr;
71950 #ifdef __ARCH_SI_TRAPNO
71951 si.si_trapno = trapno;
71952 #endif
71953 @@ -1038,7 +1038,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71954 }
71955
71956 nr_pages = 1 << compound_trans_order(hpage);
71957 - atomic_long_add(nr_pages, &mce_bad_pages);
71958 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
71959
71960 /*
71961 * We need/can do nothing about count=0 pages.
71962 @@ -1068,7 +1068,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71963 if (!PageHWPoison(hpage)
71964 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
71965 || (p != hpage && TestSetPageHWPoison(hpage))) {
71966 - atomic_long_sub(nr_pages, &mce_bad_pages);
71967 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71968 return 0;
71969 }
71970 set_page_hwpoison_huge_page(hpage);
71971 @@ -1126,7 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71972 }
71973 if (hwpoison_filter(p)) {
71974 if (TestClearPageHWPoison(p))
71975 - atomic_long_sub(nr_pages, &mce_bad_pages);
71976 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71977 unlock_page(hpage);
71978 put_page(hpage);
71979 return 0;
71980 @@ -1321,7 +1321,7 @@ int unpoison_memory(unsigned long pfn)
71981 return 0;
71982 }
71983 if (TestClearPageHWPoison(p))
71984 - atomic_long_sub(nr_pages, &mce_bad_pages);
71985 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71986 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
71987 return 0;
71988 }
71989 @@ -1335,7 +1335,7 @@ int unpoison_memory(unsigned long pfn)
71990 */
71991 if (TestClearPageHWPoison(page)) {
71992 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
71993 - atomic_long_sub(nr_pages, &mce_bad_pages);
71994 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71995 freeit = 1;
71996 if (PageHuge(page))
71997 clear_page_hwpoison_huge_page(page);
71998 @@ -1448,7 +1448,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
71999 }
72000 done:
72001 if (!PageHWPoison(hpage))
72002 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
72003 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
72004 set_page_hwpoison_huge_page(hpage);
72005 dequeue_hwpoisoned_huge_page(hpage);
72006 /* keep elevated page count for bad page */
72007 @@ -1579,7 +1579,7 @@ int soft_offline_page(struct page *page, int flags)
72008 return ret;
72009
72010 done:
72011 - atomic_long_add(1, &mce_bad_pages);
72012 + atomic_long_add_unchecked(1, &mce_bad_pages);
72013 SetPageHWPoison(page);
72014 /* keep elevated page count for bad page */
72015 return ret;
72016 diff --git a/mm/memory.c b/mm/memory.c
72017 index 2466d12..595ed79 100644
72018 --- a/mm/memory.c
72019 +++ b/mm/memory.c
72020 @@ -422,6 +422,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
72021 free_pte_range(tlb, pmd, addr);
72022 } while (pmd++, addr = next, addr != end);
72023
72024 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
72025 start &= PUD_MASK;
72026 if (start < floor)
72027 return;
72028 @@ -436,6 +437,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
72029 pmd = pmd_offset(pud, start);
72030 pud_clear(pud);
72031 pmd_free_tlb(tlb, pmd, start);
72032 +#endif
72033 +
72034 }
72035
72036 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
72037 @@ -455,6 +458,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
72038 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
72039 } while (pud++, addr = next, addr != end);
72040
72041 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
72042 start &= PGDIR_MASK;
72043 if (start < floor)
72044 return;
72045 @@ -469,6 +473,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
72046 pud = pud_offset(pgd, start);
72047 pgd_clear(pgd);
72048 pud_free_tlb(tlb, pud, start);
72049 +#endif
72050 +
72051 }
72052
72053 /*
72054 @@ -1602,12 +1608,6 @@ no_page_table:
72055 return page;
72056 }
72057
72058 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
72059 -{
72060 - return stack_guard_page_start(vma, addr) ||
72061 - stack_guard_page_end(vma, addr+PAGE_SIZE);
72062 -}
72063 -
72064 /**
72065 * __get_user_pages() - pin user pages in memory
72066 * @tsk: task_struct of target task
72067 @@ -1680,10 +1680,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
72068 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
72069 i = 0;
72070
72071 - do {
72072 + while (nr_pages) {
72073 struct vm_area_struct *vma;
72074
72075 - vma = find_extend_vma(mm, start);
72076 + vma = find_vma(mm, start);
72077 if (!vma && in_gate_area(mm, start)) {
72078 unsigned long pg = start & PAGE_MASK;
72079 pgd_t *pgd;
72080 @@ -1731,7 +1731,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
72081 goto next_page;
72082 }
72083
72084 - if (!vma ||
72085 + if (!vma || start < vma->vm_start ||
72086 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
72087 !(vm_flags & vma->vm_flags))
72088 return i ? : -EFAULT;
72089 @@ -1758,11 +1758,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
72090 int ret;
72091 unsigned int fault_flags = 0;
72092
72093 - /* For mlock, just skip the stack guard page. */
72094 - if (foll_flags & FOLL_MLOCK) {
72095 - if (stack_guard_page(vma, start))
72096 - goto next_page;
72097 - }
72098 if (foll_flags & FOLL_WRITE)
72099 fault_flags |= FAULT_FLAG_WRITE;
72100 if (nonblocking)
72101 @@ -1836,7 +1831,7 @@ next_page:
72102 start += PAGE_SIZE;
72103 nr_pages--;
72104 } while (nr_pages && start < vma->vm_end);
72105 - } while (nr_pages);
72106 + }
72107 return i;
72108 }
72109 EXPORT_SYMBOL(__get_user_pages);
72110 @@ -2043,6 +2038,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
72111 page_add_file_rmap(page);
72112 set_pte_at(mm, addr, pte, mk_pte(page, prot));
72113
72114 +#ifdef CONFIG_PAX_SEGMEXEC
72115 + pax_mirror_file_pte(vma, addr, page, ptl);
72116 +#endif
72117 +
72118 retval = 0;
72119 pte_unmap_unlock(pte, ptl);
72120 return retval;
72121 @@ -2077,10 +2076,22 @@ out:
72122 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
72123 struct page *page)
72124 {
72125 +
72126 +#ifdef CONFIG_PAX_SEGMEXEC
72127 + struct vm_area_struct *vma_m;
72128 +#endif
72129 +
72130 if (addr < vma->vm_start || addr >= vma->vm_end)
72131 return -EFAULT;
72132 if (!page_count(page))
72133 return -EINVAL;
72134 +
72135 +#ifdef CONFIG_PAX_SEGMEXEC
72136 + vma_m = pax_find_mirror_vma(vma);
72137 + if (vma_m)
72138 + vma_m->vm_flags |= VM_INSERTPAGE;
72139 +#endif
72140 +
72141 vma->vm_flags |= VM_INSERTPAGE;
72142 return insert_page(vma, addr, page, vma->vm_page_prot);
72143 }
72144 @@ -2166,6 +2177,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
72145 unsigned long pfn)
72146 {
72147 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
72148 + BUG_ON(vma->vm_mirror);
72149
72150 if (addr < vma->vm_start || addr >= vma->vm_end)
72151 return -EFAULT;
72152 @@ -2373,7 +2385,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
72153
72154 BUG_ON(pud_huge(*pud));
72155
72156 - pmd = pmd_alloc(mm, pud, addr);
72157 + pmd = (mm == &init_mm) ?
72158 + pmd_alloc_kernel(mm, pud, addr) :
72159 + pmd_alloc(mm, pud, addr);
72160 if (!pmd)
72161 return -ENOMEM;
72162 do {
72163 @@ -2393,7 +2407,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
72164 unsigned long next;
72165 int err;
72166
72167 - pud = pud_alloc(mm, pgd, addr);
72168 + pud = (mm == &init_mm) ?
72169 + pud_alloc_kernel(mm, pgd, addr) :
72170 + pud_alloc(mm, pgd, addr);
72171 if (!pud)
72172 return -ENOMEM;
72173 do {
72174 @@ -2481,6 +2497,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
72175 copy_user_highpage(dst, src, va, vma);
72176 }
72177
72178 +#ifdef CONFIG_PAX_SEGMEXEC
72179 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
72180 +{
72181 + struct mm_struct *mm = vma->vm_mm;
72182 + spinlock_t *ptl;
72183 + pte_t *pte, entry;
72184 +
72185 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
72186 + entry = *pte;
72187 + if (!pte_present(entry)) {
72188 + if (!pte_none(entry)) {
72189 + BUG_ON(pte_file(entry));
72190 + free_swap_and_cache(pte_to_swp_entry(entry));
72191 + pte_clear_not_present_full(mm, address, pte, 0);
72192 + }
72193 + } else {
72194 + struct page *page;
72195 +
72196 + flush_cache_page(vma, address, pte_pfn(entry));
72197 + entry = ptep_clear_flush(vma, address, pte);
72198 + BUG_ON(pte_dirty(entry));
72199 + page = vm_normal_page(vma, address, entry);
72200 + if (page) {
72201 + update_hiwater_rss(mm);
72202 + if (PageAnon(page))
72203 + dec_mm_counter_fast(mm, MM_ANONPAGES);
72204 + else
72205 + dec_mm_counter_fast(mm, MM_FILEPAGES);
72206 + page_remove_rmap(page);
72207 + page_cache_release(page);
72208 + }
72209 + }
72210 + pte_unmap_unlock(pte, ptl);
72211 +}
72212 +
72213 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
72214 + *
72215 + * the ptl of the lower mapped page is held on entry and is not released on exit
72216 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
72217 + */
72218 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
72219 +{
72220 + struct mm_struct *mm = vma->vm_mm;
72221 + unsigned long address_m;
72222 + spinlock_t *ptl_m;
72223 + struct vm_area_struct *vma_m;
72224 + pmd_t *pmd_m;
72225 + pte_t *pte_m, entry_m;
72226 +
72227 + BUG_ON(!page_m || !PageAnon(page_m));
72228 +
72229 + vma_m = pax_find_mirror_vma(vma);
72230 + if (!vma_m)
72231 + return;
72232 +
72233 + BUG_ON(!PageLocked(page_m));
72234 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
72235 + address_m = address + SEGMEXEC_TASK_SIZE;
72236 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
72237 + pte_m = pte_offset_map(pmd_m, address_m);
72238 + ptl_m = pte_lockptr(mm, pmd_m);
72239 + if (ptl != ptl_m) {
72240 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
72241 + if (!pte_none(*pte_m))
72242 + goto out;
72243 + }
72244 +
72245 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
72246 + page_cache_get(page_m);
72247 + page_add_anon_rmap(page_m, vma_m, address_m);
72248 + inc_mm_counter_fast(mm, MM_ANONPAGES);
72249 + set_pte_at(mm, address_m, pte_m, entry_m);
72250 + update_mmu_cache(vma_m, address_m, entry_m);
72251 +out:
72252 + if (ptl != ptl_m)
72253 + spin_unlock(ptl_m);
72254 + pte_unmap(pte_m);
72255 + unlock_page(page_m);
72256 +}
72257 +
72258 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
72259 +{
72260 + struct mm_struct *mm = vma->vm_mm;
72261 + unsigned long address_m;
72262 + spinlock_t *ptl_m;
72263 + struct vm_area_struct *vma_m;
72264 + pmd_t *pmd_m;
72265 + pte_t *pte_m, entry_m;
72266 +
72267 + BUG_ON(!page_m || PageAnon(page_m));
72268 +
72269 + vma_m = pax_find_mirror_vma(vma);
72270 + if (!vma_m)
72271 + return;
72272 +
72273 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
72274 + address_m = address + SEGMEXEC_TASK_SIZE;
72275 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
72276 + pte_m = pte_offset_map(pmd_m, address_m);
72277 + ptl_m = pte_lockptr(mm, pmd_m);
72278 + if (ptl != ptl_m) {
72279 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
72280 + if (!pte_none(*pte_m))
72281 + goto out;
72282 + }
72283 +
72284 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
72285 + page_cache_get(page_m);
72286 + page_add_file_rmap(page_m);
72287 + inc_mm_counter_fast(mm, MM_FILEPAGES);
72288 + set_pte_at(mm, address_m, pte_m, entry_m);
72289 + update_mmu_cache(vma_m, address_m, entry_m);
72290 +out:
72291 + if (ptl != ptl_m)
72292 + spin_unlock(ptl_m);
72293 + pte_unmap(pte_m);
72294 +}
72295 +
72296 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
72297 +{
72298 + struct mm_struct *mm = vma->vm_mm;
72299 + unsigned long address_m;
72300 + spinlock_t *ptl_m;
72301 + struct vm_area_struct *vma_m;
72302 + pmd_t *pmd_m;
72303 + pte_t *pte_m, entry_m;
72304 +
72305 + vma_m = pax_find_mirror_vma(vma);
72306 + if (!vma_m)
72307 + return;
72308 +
72309 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
72310 + address_m = address + SEGMEXEC_TASK_SIZE;
72311 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
72312 + pte_m = pte_offset_map(pmd_m, address_m);
72313 + ptl_m = pte_lockptr(mm, pmd_m);
72314 + if (ptl != ptl_m) {
72315 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
72316 + if (!pte_none(*pte_m))
72317 + goto out;
72318 + }
72319 +
72320 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
72321 + set_pte_at(mm, address_m, pte_m, entry_m);
72322 +out:
72323 + if (ptl != ptl_m)
72324 + spin_unlock(ptl_m);
72325 + pte_unmap(pte_m);
72326 +}
72327 +
72328 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
72329 +{
72330 + struct page *page_m;
72331 + pte_t entry;
72332 +
72333 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
72334 + goto out;
72335 +
72336 + entry = *pte;
72337 + page_m = vm_normal_page(vma, address, entry);
72338 + if (!page_m)
72339 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
72340 + else if (PageAnon(page_m)) {
72341 + if (pax_find_mirror_vma(vma)) {
72342 + pte_unmap_unlock(pte, ptl);
72343 + lock_page(page_m);
72344 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
72345 + if (pte_same(entry, *pte))
72346 + pax_mirror_anon_pte(vma, address, page_m, ptl);
72347 + else
72348 + unlock_page(page_m);
72349 + }
72350 + } else
72351 + pax_mirror_file_pte(vma, address, page_m, ptl);
72352 +
72353 +out:
72354 + pte_unmap_unlock(pte, ptl);
72355 +}
72356 +#endif
72357 +
72358 /*
72359 * This routine handles present pages, when users try to write
72360 * to a shared page. It is done by copying the page to a new address
72361 @@ -2692,6 +2888,12 @@ gotten:
72362 */
72363 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
72364 if (likely(pte_same(*page_table, orig_pte))) {
72365 +
72366 +#ifdef CONFIG_PAX_SEGMEXEC
72367 + if (pax_find_mirror_vma(vma))
72368 + BUG_ON(!trylock_page(new_page));
72369 +#endif
72370 +
72371 if (old_page) {
72372 if (!PageAnon(old_page)) {
72373 dec_mm_counter_fast(mm, MM_FILEPAGES);
72374 @@ -2743,6 +2945,10 @@ gotten:
72375 page_remove_rmap(old_page);
72376 }
72377
72378 +#ifdef CONFIG_PAX_SEGMEXEC
72379 + pax_mirror_anon_pte(vma, address, new_page, ptl);
72380 +#endif
72381 +
72382 /* Free the old page.. */
72383 new_page = old_page;
72384 ret |= VM_FAULT_WRITE;
72385 @@ -3022,6 +3228,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
72386 swap_free(entry);
72387 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
72388 try_to_free_swap(page);
72389 +
72390 +#ifdef CONFIG_PAX_SEGMEXEC
72391 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
72392 +#endif
72393 +
72394 unlock_page(page);
72395 if (swapcache) {
72396 /*
72397 @@ -3045,6 +3256,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
72398
72399 /* No need to invalidate - it was non-present before */
72400 update_mmu_cache(vma, address, page_table);
72401 +
72402 +#ifdef CONFIG_PAX_SEGMEXEC
72403 + pax_mirror_anon_pte(vma, address, page, ptl);
72404 +#endif
72405 +
72406 unlock:
72407 pte_unmap_unlock(page_table, ptl);
72408 out:
72409 @@ -3064,40 +3280,6 @@ out_release:
72410 }
72411
72412 /*
72413 - * This is like a special single-page "expand_{down|up}wards()",
72414 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
72415 - * doesn't hit another vma.
72416 - */
72417 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
72418 -{
72419 - address &= PAGE_MASK;
72420 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
72421 - struct vm_area_struct *prev = vma->vm_prev;
72422 -
72423 - /*
72424 - * Is there a mapping abutting this one below?
72425 - *
72426 - * That's only ok if it's the same stack mapping
72427 - * that has gotten split..
72428 - */
72429 - if (prev && prev->vm_end == address)
72430 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
72431 -
72432 - expand_downwards(vma, address - PAGE_SIZE);
72433 - }
72434 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
72435 - struct vm_area_struct *next = vma->vm_next;
72436 -
72437 - /* As VM_GROWSDOWN but s/below/above/ */
72438 - if (next && next->vm_start == address + PAGE_SIZE)
72439 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
72440 -
72441 - expand_upwards(vma, address + PAGE_SIZE);
72442 - }
72443 - return 0;
72444 -}
72445 -
72446 -/*
72447 * We enter with non-exclusive mmap_sem (to exclude vma changes,
72448 * but allow concurrent faults), and pte mapped but not yet locked.
72449 * We return with mmap_sem still held, but pte unmapped and unlocked.
72450 @@ -3106,27 +3288,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
72451 unsigned long address, pte_t *page_table, pmd_t *pmd,
72452 unsigned int flags)
72453 {
72454 - struct page *page;
72455 + struct page *page = NULL;
72456 spinlock_t *ptl;
72457 pte_t entry;
72458
72459 - pte_unmap(page_table);
72460 -
72461 - /* Check if we need to add a guard page to the stack */
72462 - if (check_stack_guard_page(vma, address) < 0)
72463 - return VM_FAULT_SIGBUS;
72464 -
72465 - /* Use the zero-page for reads */
72466 if (!(flags & FAULT_FLAG_WRITE)) {
72467 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
72468 vma->vm_page_prot));
72469 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
72470 + ptl = pte_lockptr(mm, pmd);
72471 + spin_lock(ptl);
72472 if (!pte_none(*page_table))
72473 goto unlock;
72474 goto setpte;
72475 }
72476
72477 /* Allocate our own private page. */
72478 + pte_unmap(page_table);
72479 +
72480 if (unlikely(anon_vma_prepare(vma)))
72481 goto oom;
72482 page = alloc_zeroed_user_highpage_movable(vma, address);
72483 @@ -3145,6 +3323,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
72484 if (!pte_none(*page_table))
72485 goto release;
72486
72487 +#ifdef CONFIG_PAX_SEGMEXEC
72488 + if (pax_find_mirror_vma(vma))
72489 + BUG_ON(!trylock_page(page));
72490 +#endif
72491 +
72492 inc_mm_counter_fast(mm, MM_ANONPAGES);
72493 page_add_new_anon_rmap(page, vma, address);
72494 setpte:
72495 @@ -3152,6 +3335,12 @@ setpte:
72496
72497 /* No need to invalidate - it was non-present before */
72498 update_mmu_cache(vma, address, page_table);
72499 +
72500 +#ifdef CONFIG_PAX_SEGMEXEC
72501 + if (page)
72502 + pax_mirror_anon_pte(vma, address, page, ptl);
72503 +#endif
72504 +
72505 unlock:
72506 pte_unmap_unlock(page_table, ptl);
72507 return 0;
72508 @@ -3295,6 +3484,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72509 */
72510 /* Only go through if we didn't race with anybody else... */
72511 if (likely(pte_same(*page_table, orig_pte))) {
72512 +
72513 +#ifdef CONFIG_PAX_SEGMEXEC
72514 + if (anon && pax_find_mirror_vma(vma))
72515 + BUG_ON(!trylock_page(page));
72516 +#endif
72517 +
72518 flush_icache_page(vma, page);
72519 entry = mk_pte(page, vma->vm_page_prot);
72520 if (flags & FAULT_FLAG_WRITE)
72521 @@ -3314,6 +3509,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72522
72523 /* no need to invalidate: a not-present page won't be cached */
72524 update_mmu_cache(vma, address, page_table);
72525 +
72526 +#ifdef CONFIG_PAX_SEGMEXEC
72527 + if (anon)
72528 + pax_mirror_anon_pte(vma, address, page, ptl);
72529 + else
72530 + pax_mirror_file_pte(vma, address, page, ptl);
72531 +#endif
72532 +
72533 } else {
72534 if (cow_page)
72535 mem_cgroup_uncharge_page(cow_page);
72536 @@ -3467,6 +3670,12 @@ int handle_pte_fault(struct mm_struct *mm,
72537 if (flags & FAULT_FLAG_WRITE)
72538 flush_tlb_fix_spurious_fault(vma, address);
72539 }
72540 +
72541 +#ifdef CONFIG_PAX_SEGMEXEC
72542 + pax_mirror_pte(vma, address, pte, pmd, ptl);
72543 + return 0;
72544 +#endif
72545 +
72546 unlock:
72547 pte_unmap_unlock(pte, ptl);
72548 return 0;
72549 @@ -3483,6 +3692,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72550 pmd_t *pmd;
72551 pte_t *pte;
72552
72553 +#ifdef CONFIG_PAX_SEGMEXEC
72554 + struct vm_area_struct *vma_m;
72555 +#endif
72556 +
72557 __set_current_state(TASK_RUNNING);
72558
72559 count_vm_event(PGFAULT);
72560 @@ -3494,6 +3707,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72561 if (unlikely(is_vm_hugetlb_page(vma)))
72562 return hugetlb_fault(mm, vma, address, flags);
72563
72564 +#ifdef CONFIG_PAX_SEGMEXEC
72565 + vma_m = pax_find_mirror_vma(vma);
72566 + if (vma_m) {
72567 + unsigned long address_m;
72568 + pgd_t *pgd_m;
72569 + pud_t *pud_m;
72570 + pmd_t *pmd_m;
72571 +
72572 + if (vma->vm_start > vma_m->vm_start) {
72573 + address_m = address;
72574 + address -= SEGMEXEC_TASK_SIZE;
72575 + vma = vma_m;
72576 + } else
72577 + address_m = address + SEGMEXEC_TASK_SIZE;
72578 +
72579 + pgd_m = pgd_offset(mm, address_m);
72580 + pud_m = pud_alloc(mm, pgd_m, address_m);
72581 + if (!pud_m)
72582 + return VM_FAULT_OOM;
72583 + pmd_m = pmd_alloc(mm, pud_m, address_m);
72584 + if (!pmd_m)
72585 + return VM_FAULT_OOM;
72586 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
72587 + return VM_FAULT_OOM;
72588 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
72589 + }
72590 +#endif
72591 +
72592 retry:
72593 pgd = pgd_offset(mm, address);
72594 pud = pud_alloc(mm, pgd, address);
72595 @@ -3535,7 +3776,7 @@ retry:
72596 * run pte_offset_map on the pmd, if an huge pmd could
72597 * materialize from under us from a different thread.
72598 */
72599 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
72600 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
72601 return VM_FAULT_OOM;
72602 /* if an huge pmd materialized from under us just retry later */
72603 if (unlikely(pmd_trans_huge(*pmd)))
72604 @@ -3572,6 +3813,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72605 spin_unlock(&mm->page_table_lock);
72606 return 0;
72607 }
72608 +
72609 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72610 +{
72611 + pud_t *new = pud_alloc_one(mm, address);
72612 + if (!new)
72613 + return -ENOMEM;
72614 +
72615 + smp_wmb(); /* See comment in __pte_alloc */
72616 +
72617 + spin_lock(&mm->page_table_lock);
72618 + if (pgd_present(*pgd)) /* Another has populated it */
72619 + pud_free(mm, new);
72620 + else
72621 + pgd_populate_kernel(mm, pgd, new);
72622 + spin_unlock(&mm->page_table_lock);
72623 + return 0;
72624 +}
72625 #endif /* __PAGETABLE_PUD_FOLDED */
72626
72627 #ifndef __PAGETABLE_PMD_FOLDED
72628 @@ -3602,6 +3860,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
72629 spin_unlock(&mm->page_table_lock);
72630 return 0;
72631 }
72632 +
72633 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
72634 +{
72635 + pmd_t *new = pmd_alloc_one(mm, address);
72636 + if (!new)
72637 + return -ENOMEM;
72638 +
72639 + smp_wmb(); /* See comment in __pte_alloc */
72640 +
72641 + spin_lock(&mm->page_table_lock);
72642 +#ifndef __ARCH_HAS_4LEVEL_HACK
72643 + if (pud_present(*pud)) /* Another has populated it */
72644 + pmd_free(mm, new);
72645 + else
72646 + pud_populate_kernel(mm, pud, new);
72647 +#else
72648 + if (pgd_present(*pud)) /* Another has populated it */
72649 + pmd_free(mm, new);
72650 + else
72651 + pgd_populate_kernel(mm, pud, new);
72652 +#endif /* __ARCH_HAS_4LEVEL_HACK */
72653 + spin_unlock(&mm->page_table_lock);
72654 + return 0;
72655 +}
72656 #endif /* __PAGETABLE_PMD_FOLDED */
72657
72658 int make_pages_present(unsigned long addr, unsigned long end)
72659 @@ -3639,7 +3921,7 @@ static int __init gate_vma_init(void)
72660 gate_vma.vm_start = FIXADDR_USER_START;
72661 gate_vma.vm_end = FIXADDR_USER_END;
72662 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
72663 - gate_vma.vm_page_prot = __P101;
72664 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
72665
72666 return 0;
72667 }
72668 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
72669 index b12b28a..64b57d0 100644
72670 --- a/mm/mempolicy.c
72671 +++ b/mm/mempolicy.c
72672 @@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
72673 unsigned long vmstart;
72674 unsigned long vmend;
72675
72676 +#ifdef CONFIG_PAX_SEGMEXEC
72677 + struct vm_area_struct *vma_m;
72678 +#endif
72679 +
72680 vma = find_vma(mm, start);
72681 if (!vma || vma->vm_start > start)
72682 return -EFAULT;
72683 @@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
72684 if (err)
72685 goto out;
72686 }
72687 +
72688 +#ifdef CONFIG_PAX_SEGMEXEC
72689 + vma_m = pax_find_mirror_vma(vma);
72690 + if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
72691 + err = vma_m->vm_ops->set_policy(vma_m, new_pol);
72692 + if (err)
72693 + goto out;
72694 + }
72695 +#endif
72696 +
72697 }
72698
72699 out:
72700 @@ -1125,6 +1139,17 @@ static long do_mbind(unsigned long start, unsigned long len,
72701
72702 if (end < start)
72703 return -EINVAL;
72704 +
72705 +#ifdef CONFIG_PAX_SEGMEXEC
72706 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72707 + if (end > SEGMEXEC_TASK_SIZE)
72708 + return -EINVAL;
72709 + } else
72710 +#endif
72711 +
72712 + if (end > TASK_SIZE)
72713 + return -EINVAL;
72714 +
72715 if (end == start)
72716 return 0;
72717
72718 @@ -1348,8 +1373,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
72719 */
72720 tcred = __task_cred(task);
72721 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
72722 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
72723 - !capable(CAP_SYS_NICE)) {
72724 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
72725 rcu_read_unlock();
72726 err = -EPERM;
72727 goto out_put;
72728 @@ -1380,6 +1404,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
72729 goto out;
72730 }
72731
72732 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72733 + if (mm != current->mm &&
72734 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72735 + mmput(mm);
72736 + err = -EPERM;
72737 + goto out;
72738 + }
72739 +#endif
72740 +
72741 err = do_migrate_pages(mm, old, new,
72742 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
72743
72744 diff --git a/mm/mlock.c b/mm/mlock.c
72745 index ef726e8..cd7f1ec 100644
72746 --- a/mm/mlock.c
72747 +++ b/mm/mlock.c
72748 @@ -13,6 +13,7 @@
72749 #include <linux/pagemap.h>
72750 #include <linux/mempolicy.h>
72751 #include <linux/syscalls.h>
72752 +#include <linux/security.h>
72753 #include <linux/sched.h>
72754 #include <linux/export.h>
72755 #include <linux/rmap.h>
72756 @@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
72757 {
72758 unsigned long nstart, end, tmp;
72759 struct vm_area_struct * vma, * prev;
72760 - int error;
72761 + int error = 0;
72762
72763 VM_BUG_ON(start & ~PAGE_MASK);
72764 VM_BUG_ON(len != PAGE_ALIGN(len));
72765 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
72766 return -EINVAL;
72767 if (end == start)
72768 return 0;
72769 + if (end > TASK_SIZE)
72770 + return -EINVAL;
72771 +
72772 vma = find_vma(current->mm, start);
72773 if (!vma || vma->vm_start > start)
72774 return -ENOMEM;
72775 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
72776 for (nstart = start ; ; ) {
72777 vm_flags_t newflags;
72778
72779 +#ifdef CONFIG_PAX_SEGMEXEC
72780 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72781 + break;
72782 +#endif
72783 +
72784 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
72785
72786 newflags = vma->vm_flags | VM_LOCKED;
72787 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
72788 lock_limit >>= PAGE_SHIFT;
72789
72790 /* check against resource limits */
72791 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
72792 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
72793 error = do_mlock(start, len, 1);
72794 up_write(&current->mm->mmap_sem);
72795 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
72796 static int do_mlockall(int flags)
72797 {
72798 struct vm_area_struct * vma, * prev = NULL;
72799 - unsigned int def_flags = 0;
72800
72801 if (flags & MCL_FUTURE)
72802 - def_flags = VM_LOCKED;
72803 - current->mm->def_flags = def_flags;
72804 + current->mm->def_flags |= VM_LOCKED;
72805 + else
72806 + current->mm->def_flags &= ~VM_LOCKED;
72807 if (flags == MCL_FUTURE)
72808 goto out;
72809
72810 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
72811 vm_flags_t newflags;
72812
72813 +#ifdef CONFIG_PAX_SEGMEXEC
72814 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72815 + break;
72816 +#endif
72817 +
72818 + BUG_ON(vma->vm_end > TASK_SIZE);
72819 newflags = vma->vm_flags | VM_LOCKED;
72820 if (!(flags & MCL_CURRENT))
72821 newflags &= ~VM_LOCKED;
72822 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
72823 lock_limit >>= PAGE_SHIFT;
72824
72825 ret = -ENOMEM;
72826 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
72827 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
72828 capable(CAP_IPC_LOCK))
72829 ret = do_mlockall(flags);
72830 diff --git a/mm/mmap.c b/mm/mmap.c
72831 index fa1f274..86de476 100644
72832 --- a/mm/mmap.c
72833 +++ b/mm/mmap.c
72834 @@ -47,6 +47,16 @@
72835 #define arch_rebalance_pgtables(addr, len) (addr)
72836 #endif
72837
72838 +static inline void verify_mm_writelocked(struct mm_struct *mm)
72839 +{
72840 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
72841 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72842 + up_read(&mm->mmap_sem);
72843 + BUG();
72844 + }
72845 +#endif
72846 +}
72847 +
72848 static void unmap_region(struct mm_struct *mm,
72849 struct vm_area_struct *vma, struct vm_area_struct *prev,
72850 unsigned long start, unsigned long end);
72851 @@ -72,22 +82,32 @@ static void unmap_region(struct mm_struct *mm,
72852 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72853 *
72854 */
72855 -pgprot_t protection_map[16] = {
72856 +pgprot_t protection_map[16] __read_only = {
72857 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
72858 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
72859 };
72860
72861 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
72862 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
72863 {
72864 - return __pgprot(pgprot_val(protection_map[vm_flags &
72865 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
72866 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
72867 pgprot_val(arch_vm_get_page_prot(vm_flags)));
72868 +
72869 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72870 + if (!(__supported_pte_mask & _PAGE_NX) &&
72871 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
72872 + (vm_flags & (VM_READ | VM_WRITE)))
72873 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
72874 +#endif
72875 +
72876 + return prot;
72877 }
72878 EXPORT_SYMBOL(vm_get_page_prot);
72879
72880 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
72881 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
72882 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
72883 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
72884 /*
72885 * Make sure vm_committed_as in one cacheline and not cacheline shared with
72886 * other variables. It can be updated by several CPUs frequently.
72887 @@ -229,6 +249,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
72888 struct vm_area_struct *next = vma->vm_next;
72889
72890 might_sleep();
72891 + BUG_ON(vma->vm_mirror);
72892 if (vma->vm_ops && vma->vm_ops->close)
72893 vma->vm_ops->close(vma);
72894 if (vma->vm_file) {
72895 @@ -275,6 +296,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
72896 * not page aligned -Ram Gupta
72897 */
72898 rlim = rlimit(RLIMIT_DATA);
72899 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
72900 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
72901 (mm->end_data - mm->start_data) > rlim)
72902 goto out;
72903 @@ -708,6 +730,12 @@ static int
72904 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
72905 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72906 {
72907 +
72908 +#ifdef CONFIG_PAX_SEGMEXEC
72909 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
72910 + return 0;
72911 +#endif
72912 +
72913 if (is_mergeable_vma(vma, file, vm_flags) &&
72914 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72915 if (vma->vm_pgoff == vm_pgoff)
72916 @@ -727,6 +755,12 @@ static int
72917 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72918 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72919 {
72920 +
72921 +#ifdef CONFIG_PAX_SEGMEXEC
72922 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
72923 + return 0;
72924 +#endif
72925 +
72926 if (is_mergeable_vma(vma, file, vm_flags) &&
72927 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72928 pgoff_t vm_pglen;
72929 @@ -769,13 +803,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72930 struct vm_area_struct *vma_merge(struct mm_struct *mm,
72931 struct vm_area_struct *prev, unsigned long addr,
72932 unsigned long end, unsigned long vm_flags,
72933 - struct anon_vma *anon_vma, struct file *file,
72934 + struct anon_vma *anon_vma, struct file *file,
72935 pgoff_t pgoff, struct mempolicy *policy)
72936 {
72937 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
72938 struct vm_area_struct *area, *next;
72939 int err;
72940
72941 +#ifdef CONFIG_PAX_SEGMEXEC
72942 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
72943 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
72944 +
72945 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
72946 +#endif
72947 +
72948 /*
72949 * We later require that vma->vm_flags == vm_flags,
72950 * so this tests vma->vm_flags & VM_SPECIAL, too.
72951 @@ -791,6 +832,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72952 if (next && next->vm_end == end) /* cases 6, 7, 8 */
72953 next = next->vm_next;
72954
72955 +#ifdef CONFIG_PAX_SEGMEXEC
72956 + if (prev)
72957 + prev_m = pax_find_mirror_vma(prev);
72958 + if (area)
72959 + area_m = pax_find_mirror_vma(area);
72960 + if (next)
72961 + next_m = pax_find_mirror_vma(next);
72962 +#endif
72963 +
72964 /*
72965 * Can it merge with the predecessor?
72966 */
72967 @@ -810,9 +860,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72968 /* cases 1, 6 */
72969 err = vma_adjust(prev, prev->vm_start,
72970 next->vm_end, prev->vm_pgoff, NULL);
72971 - } else /* cases 2, 5, 7 */
72972 +
72973 +#ifdef CONFIG_PAX_SEGMEXEC
72974 + if (!err && prev_m)
72975 + err = vma_adjust(prev_m, prev_m->vm_start,
72976 + next_m->vm_end, prev_m->vm_pgoff, NULL);
72977 +#endif
72978 +
72979 + } else { /* cases 2, 5, 7 */
72980 err = vma_adjust(prev, prev->vm_start,
72981 end, prev->vm_pgoff, NULL);
72982 +
72983 +#ifdef CONFIG_PAX_SEGMEXEC
72984 + if (!err && prev_m)
72985 + err = vma_adjust(prev_m, prev_m->vm_start,
72986 + end_m, prev_m->vm_pgoff, NULL);
72987 +#endif
72988 +
72989 + }
72990 if (err)
72991 return NULL;
72992 khugepaged_enter_vma_merge(prev);
72993 @@ -826,12 +891,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72994 mpol_equal(policy, vma_policy(next)) &&
72995 can_vma_merge_before(next, vm_flags,
72996 anon_vma, file, pgoff+pglen)) {
72997 - if (prev && addr < prev->vm_end) /* case 4 */
72998 + if (prev && addr < prev->vm_end) { /* case 4 */
72999 err = vma_adjust(prev, prev->vm_start,
73000 addr, prev->vm_pgoff, NULL);
73001 - else /* cases 3, 8 */
73002 +
73003 +#ifdef CONFIG_PAX_SEGMEXEC
73004 + if (!err && prev_m)
73005 + err = vma_adjust(prev_m, prev_m->vm_start,
73006 + addr_m, prev_m->vm_pgoff, NULL);
73007 +#endif
73008 +
73009 + } else { /* cases 3, 8 */
73010 err = vma_adjust(area, addr, next->vm_end,
73011 next->vm_pgoff - pglen, NULL);
73012 +
73013 +#ifdef CONFIG_PAX_SEGMEXEC
73014 + if (!err && area_m)
73015 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
73016 + next_m->vm_pgoff - pglen, NULL);
73017 +#endif
73018 +
73019 + }
73020 if (err)
73021 return NULL;
73022 khugepaged_enter_vma_merge(area);
73023 @@ -940,14 +1020,11 @@ none:
73024 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
73025 struct file *file, long pages)
73026 {
73027 - const unsigned long stack_flags
73028 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
73029 -
73030 if (file) {
73031 mm->shared_vm += pages;
73032 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
73033 mm->exec_vm += pages;
73034 - } else if (flags & stack_flags)
73035 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
73036 mm->stack_vm += pages;
73037 if (flags & (VM_RESERVED|VM_IO))
73038 mm->reserved_vm += pages;
73039 @@ -985,7 +1062,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
73040 * (the exception is when the underlying filesystem is noexec
73041 * mounted, in which case we dont add PROT_EXEC.)
73042 */
73043 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
73044 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
73045 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
73046 prot |= PROT_EXEC;
73047
73048 @@ -1011,7 +1088,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
73049 /* Obtain the address to map to. we verify (or select) it and ensure
73050 * that it represents a valid section of the address space.
73051 */
73052 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
73053 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
73054 if (addr & ~PAGE_MASK)
73055 return addr;
73056
73057 @@ -1022,6 +1099,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
73058 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
73059 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
73060
73061 +#ifdef CONFIG_PAX_MPROTECT
73062 + if (mm->pax_flags & MF_PAX_MPROTECT) {
73063 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
73064 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
73065 + gr_log_rwxmmap(file);
73066 +
73067 +#ifdef CONFIG_PAX_EMUPLT
73068 + vm_flags &= ~VM_EXEC;
73069 +#else
73070 + return -EPERM;
73071 +#endif
73072 +
73073 + }
73074 +
73075 + if (!(vm_flags & VM_EXEC))
73076 + vm_flags &= ~VM_MAYEXEC;
73077 +#else
73078 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
73079 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
73080 +#endif
73081 + else
73082 + vm_flags &= ~VM_MAYWRITE;
73083 + }
73084 +#endif
73085 +
73086 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
73087 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
73088 + vm_flags &= ~VM_PAGEEXEC;
73089 +#endif
73090 +
73091 if (flags & MAP_LOCKED)
73092 if (!can_do_mlock())
73093 return -EPERM;
73094 @@ -1033,6 +1140,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
73095 locked += mm->locked_vm;
73096 lock_limit = rlimit(RLIMIT_MEMLOCK);
73097 lock_limit >>= PAGE_SHIFT;
73098 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
73099 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
73100 return -EAGAIN;
73101 }
73102 @@ -1099,6 +1207,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
73103 }
73104 }
73105
73106 + if (!gr_acl_handle_mmap(file, prot))
73107 + return -EACCES;
73108 +
73109 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
73110 }
73111
73112 @@ -1175,7 +1286,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
73113 vm_flags_t vm_flags = vma->vm_flags;
73114
73115 /* If it was private or non-writable, the write bit is already clear */
73116 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
73117 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
73118 return 0;
73119
73120 /* The backer wishes to know when pages are first written to? */
73121 @@ -1224,14 +1335,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
73122 unsigned long charged = 0;
73123 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
73124
73125 +#ifdef CONFIG_PAX_SEGMEXEC
73126 + struct vm_area_struct *vma_m = NULL;
73127 +#endif
73128 +
73129 + /*
73130 + * mm->mmap_sem is required to protect against another thread
73131 + * changing the mappings in case we sleep.
73132 + */
73133 + verify_mm_writelocked(mm);
73134 +
73135 /* Clear old maps */
73136 error = -ENOMEM;
73137 -munmap_back:
73138 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73139 if (vma && vma->vm_start < addr + len) {
73140 if (do_munmap(mm, addr, len))
73141 return -ENOMEM;
73142 - goto munmap_back;
73143 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73144 + BUG_ON(vma && vma->vm_start < addr + len);
73145 }
73146
73147 /* Check against address space limit. */
73148 @@ -1280,6 +1401,16 @@ munmap_back:
73149 goto unacct_error;
73150 }
73151
73152 +#ifdef CONFIG_PAX_SEGMEXEC
73153 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
73154 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73155 + if (!vma_m) {
73156 + error = -ENOMEM;
73157 + goto free_vma;
73158 + }
73159 + }
73160 +#endif
73161 +
73162 vma->vm_mm = mm;
73163 vma->vm_start = addr;
73164 vma->vm_end = addr + len;
73165 @@ -1304,6 +1435,19 @@ munmap_back:
73166 error = file->f_op->mmap(file, vma);
73167 if (error)
73168 goto unmap_and_free_vma;
73169 +
73170 +#ifdef CONFIG_PAX_SEGMEXEC
73171 + if (vma_m && (vm_flags & VM_EXECUTABLE))
73172 + added_exe_file_vma(mm);
73173 +#endif
73174 +
73175 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
73176 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
73177 + vma->vm_flags |= VM_PAGEEXEC;
73178 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73179 + }
73180 +#endif
73181 +
73182 if (vm_flags & VM_EXECUTABLE)
73183 added_exe_file_vma(mm);
73184
73185 @@ -1341,6 +1485,11 @@ munmap_back:
73186 vma_link(mm, vma, prev, rb_link, rb_parent);
73187 file = vma->vm_file;
73188
73189 +#ifdef CONFIG_PAX_SEGMEXEC
73190 + if (vma_m)
73191 + BUG_ON(pax_mirror_vma(vma_m, vma));
73192 +#endif
73193 +
73194 /* Once vma denies write, undo our temporary denial count */
73195 if (correct_wcount)
73196 atomic_inc(&inode->i_writecount);
73197 @@ -1349,6 +1498,7 @@ out:
73198
73199 mm->total_vm += len >> PAGE_SHIFT;
73200 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
73201 + track_exec_limit(mm, addr, addr + len, vm_flags);
73202 if (vm_flags & VM_LOCKED) {
73203 if (!mlock_vma_pages_range(vma, addr, addr + len))
73204 mm->locked_vm += (len >> PAGE_SHIFT);
73205 @@ -1370,6 +1520,12 @@ unmap_and_free_vma:
73206 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
73207 charged = 0;
73208 free_vma:
73209 +
73210 +#ifdef CONFIG_PAX_SEGMEXEC
73211 + if (vma_m)
73212 + kmem_cache_free(vm_area_cachep, vma_m);
73213 +#endif
73214 +
73215 kmem_cache_free(vm_area_cachep, vma);
73216 unacct_error:
73217 if (charged)
73218 @@ -1377,6 +1533,44 @@ unacct_error:
73219 return error;
73220 }
73221
73222 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
73223 +{
73224 + if (!vma) {
73225 +#ifdef CONFIG_STACK_GROWSUP
73226 + if (addr > sysctl_heap_stack_gap)
73227 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
73228 + else
73229 + vma = find_vma(current->mm, 0);
73230 + if (vma && (vma->vm_flags & VM_GROWSUP))
73231 + return false;
73232 +#endif
73233 + return true;
73234 + }
73235 +
73236 + if (addr + len > vma->vm_start)
73237 + return false;
73238 +
73239 + if (vma->vm_flags & VM_GROWSDOWN)
73240 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
73241 +#ifdef CONFIG_STACK_GROWSUP
73242 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
73243 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
73244 +#endif
73245 +
73246 + return true;
73247 +}
73248 +
73249 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
73250 +{
73251 + if (vma->vm_start < len)
73252 + return -ENOMEM;
73253 + if (!(vma->vm_flags & VM_GROWSDOWN))
73254 + return vma->vm_start - len;
73255 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
73256 + return vma->vm_start - len - sysctl_heap_stack_gap;
73257 + return -ENOMEM;
73258 +}
73259 +
73260 /* Get an address range which is currently unmapped.
73261 * For shmat() with addr=0.
73262 *
73263 @@ -1403,18 +1597,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
73264 if (flags & MAP_FIXED)
73265 return addr;
73266
73267 +#ifdef CONFIG_PAX_RANDMMAP
73268 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
73269 +#endif
73270 +
73271 if (addr) {
73272 addr = PAGE_ALIGN(addr);
73273 - vma = find_vma(mm, addr);
73274 - if (TASK_SIZE - len >= addr &&
73275 - (!vma || addr + len <= vma->vm_start))
73276 - return addr;
73277 + if (TASK_SIZE - len >= addr) {
73278 + vma = find_vma(mm, addr);
73279 + if (check_heap_stack_gap(vma, addr, len))
73280 + return addr;
73281 + }
73282 }
73283 if (len > mm->cached_hole_size) {
73284 - start_addr = addr = mm->free_area_cache;
73285 + start_addr = addr = mm->free_area_cache;
73286 } else {
73287 - start_addr = addr = TASK_UNMAPPED_BASE;
73288 - mm->cached_hole_size = 0;
73289 + start_addr = addr = mm->mmap_base;
73290 + mm->cached_hole_size = 0;
73291 }
73292
73293 full_search:
73294 @@ -1425,34 +1624,40 @@ full_search:
73295 * Start a new search - just in case we missed
73296 * some holes.
73297 */
73298 - if (start_addr != TASK_UNMAPPED_BASE) {
73299 - addr = TASK_UNMAPPED_BASE;
73300 - start_addr = addr;
73301 + if (start_addr != mm->mmap_base) {
73302 + start_addr = addr = mm->mmap_base;
73303 mm->cached_hole_size = 0;
73304 goto full_search;
73305 }
73306 return -ENOMEM;
73307 }
73308 - if (!vma || addr + len <= vma->vm_start) {
73309 - /*
73310 - * Remember the place where we stopped the search:
73311 - */
73312 - mm->free_area_cache = addr + len;
73313 - return addr;
73314 - }
73315 + if (check_heap_stack_gap(vma, addr, len))
73316 + break;
73317 if (addr + mm->cached_hole_size < vma->vm_start)
73318 mm->cached_hole_size = vma->vm_start - addr;
73319 addr = vma->vm_end;
73320 }
73321 +
73322 + /*
73323 + * Remember the place where we stopped the search:
73324 + */
73325 + mm->free_area_cache = addr + len;
73326 + return addr;
73327 }
73328 #endif
73329
73330 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
73331 {
73332 +
73333 +#ifdef CONFIG_PAX_SEGMEXEC
73334 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
73335 + return;
73336 +#endif
73337 +
73338 /*
73339 * Is this a new hole at the lowest possible address?
73340 */
73341 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
73342 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
73343 mm->free_area_cache = addr;
73344 }
73345
73346 @@ -1468,7 +1673,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
73347 {
73348 struct vm_area_struct *vma;
73349 struct mm_struct *mm = current->mm;
73350 - unsigned long addr = addr0, start_addr;
73351 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
73352
73353 /* requested length too big for entire address space */
73354 if (len > TASK_SIZE)
73355 @@ -1477,13 +1682,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
73356 if (flags & MAP_FIXED)
73357 return addr;
73358
73359 +#ifdef CONFIG_PAX_RANDMMAP
73360 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
73361 +#endif
73362 +
73363 /* requesting a specific address */
73364 if (addr) {
73365 addr = PAGE_ALIGN(addr);
73366 - vma = find_vma(mm, addr);
73367 - if (TASK_SIZE - len >= addr &&
73368 - (!vma || addr + len <= vma->vm_start))
73369 - return addr;
73370 + if (TASK_SIZE - len >= addr) {
73371 + vma = find_vma(mm, addr);
73372 + if (check_heap_stack_gap(vma, addr, len))
73373 + return addr;
73374 + }
73375 }
73376
73377 /* check if free_area_cache is useful for us */
73378 @@ -1507,7 +1717,7 @@ try_again:
73379 * return with success:
73380 */
73381 vma = find_vma(mm, addr);
73382 - if (!vma || addr+len <= vma->vm_start)
73383 + if (check_heap_stack_gap(vma, addr, len))
73384 /* remember the address as a hint for next time */
73385 return (mm->free_area_cache = addr);
73386
73387 @@ -1516,8 +1726,8 @@ try_again:
73388 mm->cached_hole_size = vma->vm_start - addr;
73389
73390 /* try just below the current vma->vm_start */
73391 - addr = vma->vm_start-len;
73392 - } while (len < vma->vm_start);
73393 + addr = skip_heap_stack_gap(vma, len);
73394 + } while (!IS_ERR_VALUE(addr));
73395
73396 fail:
73397 /*
73398 @@ -1540,13 +1750,21 @@ fail:
73399 * can happen with large stack limits and large mmap()
73400 * allocations.
73401 */
73402 + mm->mmap_base = TASK_UNMAPPED_BASE;
73403 +
73404 +#ifdef CONFIG_PAX_RANDMMAP
73405 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73406 + mm->mmap_base += mm->delta_mmap;
73407 +#endif
73408 +
73409 + mm->free_area_cache = mm->mmap_base;
73410 mm->cached_hole_size = ~0UL;
73411 - mm->free_area_cache = TASK_UNMAPPED_BASE;
73412 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
73413 /*
73414 * Restore the topdown base:
73415 */
73416 - mm->free_area_cache = mm->mmap_base;
73417 + mm->mmap_base = base;
73418 + mm->free_area_cache = base;
73419 mm->cached_hole_size = ~0UL;
73420
73421 return addr;
73422 @@ -1555,6 +1773,12 @@ fail:
73423
73424 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
73425 {
73426 +
73427 +#ifdef CONFIG_PAX_SEGMEXEC
73428 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
73429 + return;
73430 +#endif
73431 +
73432 /*
73433 * Is this a new hole at the highest possible address?
73434 */
73435 @@ -1562,8 +1786,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
73436 mm->free_area_cache = addr;
73437
73438 /* dont allow allocations above current base */
73439 - if (mm->free_area_cache > mm->mmap_base)
73440 + if (mm->free_area_cache > mm->mmap_base) {
73441 mm->free_area_cache = mm->mmap_base;
73442 + mm->cached_hole_size = ~0UL;
73443 + }
73444 }
73445
73446 unsigned long
73447 @@ -1662,6 +1888,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
73448 return vma;
73449 }
73450
73451 +#ifdef CONFIG_PAX_SEGMEXEC
73452 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
73453 +{
73454 + struct vm_area_struct *vma_m;
73455 +
73456 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
73457 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
73458 + BUG_ON(vma->vm_mirror);
73459 + return NULL;
73460 + }
73461 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
73462 + vma_m = vma->vm_mirror;
73463 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
73464 + BUG_ON(vma->vm_file != vma_m->vm_file);
73465 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
73466 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
73467 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
73468 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
73469 + return vma_m;
73470 +}
73471 +#endif
73472 +
73473 /*
73474 * Verify that the stack growth is acceptable and
73475 * update accounting. This is shared with both the
73476 @@ -1678,6 +1926,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73477 return -ENOMEM;
73478
73479 /* Stack limit test */
73480 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
73481 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
73482 return -ENOMEM;
73483
73484 @@ -1688,6 +1937,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73485 locked = mm->locked_vm + grow;
73486 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
73487 limit >>= PAGE_SHIFT;
73488 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
73489 if (locked > limit && !capable(CAP_IPC_LOCK))
73490 return -ENOMEM;
73491 }
73492 @@ -1718,37 +1968,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73493 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
73494 * vma is the last one with address > vma->vm_end. Have to extend vma.
73495 */
73496 +#ifndef CONFIG_IA64
73497 +static
73498 +#endif
73499 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
73500 {
73501 int error;
73502 + bool locknext;
73503
73504 if (!(vma->vm_flags & VM_GROWSUP))
73505 return -EFAULT;
73506
73507 + /* Also guard against wrapping around to address 0. */
73508 + if (address < PAGE_ALIGN(address+1))
73509 + address = PAGE_ALIGN(address+1);
73510 + else
73511 + return -ENOMEM;
73512 +
73513 /*
73514 * We must make sure the anon_vma is allocated
73515 * so that the anon_vma locking is not a noop.
73516 */
73517 if (unlikely(anon_vma_prepare(vma)))
73518 return -ENOMEM;
73519 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
73520 + if (locknext && anon_vma_prepare(vma->vm_next))
73521 + return -ENOMEM;
73522 vma_lock_anon_vma(vma);
73523 + if (locknext)
73524 + vma_lock_anon_vma(vma->vm_next);
73525
73526 /*
73527 * vma->vm_start/vm_end cannot change under us because the caller
73528 * is required to hold the mmap_sem in read mode. We need the
73529 - * anon_vma lock to serialize against concurrent expand_stacks.
73530 - * Also guard against wrapping around to address 0.
73531 + * anon_vma locks to serialize against concurrent expand_stacks
73532 + * and expand_upwards.
73533 */
73534 - if (address < PAGE_ALIGN(address+4))
73535 - address = PAGE_ALIGN(address+4);
73536 - else {
73537 - vma_unlock_anon_vma(vma);
73538 - return -ENOMEM;
73539 - }
73540 error = 0;
73541
73542 /* Somebody else might have raced and expanded it already */
73543 - if (address > vma->vm_end) {
73544 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
73545 + error = -ENOMEM;
73546 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
73547 unsigned long size, grow;
73548
73549 size = address - vma->vm_start;
73550 @@ -1763,6 +2024,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
73551 }
73552 }
73553 }
73554 + if (locknext)
73555 + vma_unlock_anon_vma(vma->vm_next);
73556 vma_unlock_anon_vma(vma);
73557 khugepaged_enter_vma_merge(vma);
73558 return error;
73559 @@ -1776,6 +2039,8 @@ int expand_downwards(struct vm_area_struct *vma,
73560 unsigned long address)
73561 {
73562 int error;
73563 + bool lockprev = false;
73564 + struct vm_area_struct *prev;
73565
73566 /*
73567 * We must make sure the anon_vma is allocated
73568 @@ -1789,6 +2054,15 @@ int expand_downwards(struct vm_area_struct *vma,
73569 if (error)
73570 return error;
73571
73572 + prev = vma->vm_prev;
73573 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
73574 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
73575 +#endif
73576 + if (lockprev && anon_vma_prepare(prev))
73577 + return -ENOMEM;
73578 + if (lockprev)
73579 + vma_lock_anon_vma(prev);
73580 +
73581 vma_lock_anon_vma(vma);
73582
73583 /*
73584 @@ -1798,9 +2072,17 @@ int expand_downwards(struct vm_area_struct *vma,
73585 */
73586
73587 /* Somebody else might have raced and expanded it already */
73588 - if (address < vma->vm_start) {
73589 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
73590 + error = -ENOMEM;
73591 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
73592 unsigned long size, grow;
73593
73594 +#ifdef CONFIG_PAX_SEGMEXEC
73595 + struct vm_area_struct *vma_m;
73596 +
73597 + vma_m = pax_find_mirror_vma(vma);
73598 +#endif
73599 +
73600 size = vma->vm_end - address;
73601 grow = (vma->vm_start - address) >> PAGE_SHIFT;
73602
73603 @@ -1810,11 +2092,22 @@ int expand_downwards(struct vm_area_struct *vma,
73604 if (!error) {
73605 vma->vm_start = address;
73606 vma->vm_pgoff -= grow;
73607 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
73608 +
73609 +#ifdef CONFIG_PAX_SEGMEXEC
73610 + if (vma_m) {
73611 + vma_m->vm_start -= grow << PAGE_SHIFT;
73612 + vma_m->vm_pgoff -= grow;
73613 + }
73614 +#endif
73615 +
73616 perf_event_mmap(vma);
73617 }
73618 }
73619 }
73620 vma_unlock_anon_vma(vma);
73621 + if (lockprev)
73622 + vma_unlock_anon_vma(prev);
73623 khugepaged_enter_vma_merge(vma);
73624 return error;
73625 }
73626 @@ -1886,6 +2179,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
73627 do {
73628 long nrpages = vma_pages(vma);
73629
73630 +#ifdef CONFIG_PAX_SEGMEXEC
73631 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
73632 + vma = remove_vma(vma);
73633 + continue;
73634 + }
73635 +#endif
73636 +
73637 if (vma->vm_flags & VM_ACCOUNT)
73638 nr_accounted += nrpages;
73639 mm->total_vm -= nrpages;
73640 @@ -1932,6 +2232,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
73641 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
73642 vma->vm_prev = NULL;
73643 do {
73644 +
73645 +#ifdef CONFIG_PAX_SEGMEXEC
73646 + if (vma->vm_mirror) {
73647 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
73648 + vma->vm_mirror->vm_mirror = NULL;
73649 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
73650 + vma->vm_mirror = NULL;
73651 + }
73652 +#endif
73653 +
73654 rb_erase(&vma->vm_rb, &mm->mm_rb);
73655 mm->map_count--;
73656 tail_vma = vma;
73657 @@ -1960,14 +2270,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73658 struct vm_area_struct *new;
73659 int err = -ENOMEM;
73660
73661 +#ifdef CONFIG_PAX_SEGMEXEC
73662 + struct vm_area_struct *vma_m, *new_m = NULL;
73663 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
73664 +#endif
73665 +
73666 if (is_vm_hugetlb_page(vma) && (addr &
73667 ~(huge_page_mask(hstate_vma(vma)))))
73668 return -EINVAL;
73669
73670 +#ifdef CONFIG_PAX_SEGMEXEC
73671 + vma_m = pax_find_mirror_vma(vma);
73672 +#endif
73673 +
73674 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73675 if (!new)
73676 goto out_err;
73677
73678 +#ifdef CONFIG_PAX_SEGMEXEC
73679 + if (vma_m) {
73680 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73681 + if (!new_m) {
73682 + kmem_cache_free(vm_area_cachep, new);
73683 + goto out_err;
73684 + }
73685 + }
73686 +#endif
73687 +
73688 /* most fields are the same, copy all, and then fixup */
73689 *new = *vma;
73690
73691 @@ -1980,6 +2309,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73692 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
73693 }
73694
73695 +#ifdef CONFIG_PAX_SEGMEXEC
73696 + if (vma_m) {
73697 + *new_m = *vma_m;
73698 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
73699 + new_m->vm_mirror = new;
73700 + new->vm_mirror = new_m;
73701 +
73702 + if (new_below)
73703 + new_m->vm_end = addr_m;
73704 + else {
73705 + new_m->vm_start = addr_m;
73706 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
73707 + }
73708 + }
73709 +#endif
73710 +
73711 pol = mpol_dup(vma_policy(vma));
73712 if (IS_ERR(pol)) {
73713 err = PTR_ERR(pol);
73714 @@ -2005,6 +2350,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73715 else
73716 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
73717
73718 +#ifdef CONFIG_PAX_SEGMEXEC
73719 + if (!err && vma_m) {
73720 + if (anon_vma_clone(new_m, vma_m))
73721 + goto out_free_mpol;
73722 +
73723 + mpol_get(pol);
73724 + vma_set_policy(new_m, pol);
73725 +
73726 + if (new_m->vm_file) {
73727 + get_file(new_m->vm_file);
73728 + if (vma_m->vm_flags & VM_EXECUTABLE)
73729 + added_exe_file_vma(mm);
73730 + }
73731 +
73732 + if (new_m->vm_ops && new_m->vm_ops->open)
73733 + new_m->vm_ops->open(new_m);
73734 +
73735 + if (new_below)
73736 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
73737 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
73738 + else
73739 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
73740 +
73741 + if (err) {
73742 + if (new_m->vm_ops && new_m->vm_ops->close)
73743 + new_m->vm_ops->close(new_m);
73744 + if (new_m->vm_file) {
73745 + if (vma_m->vm_flags & VM_EXECUTABLE)
73746 + removed_exe_file_vma(mm);
73747 + fput(new_m->vm_file);
73748 + }
73749 + mpol_put(pol);
73750 + }
73751 + }
73752 +#endif
73753 +
73754 /* Success. */
73755 if (!err)
73756 return 0;
73757 @@ -2017,10 +2398,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73758 removed_exe_file_vma(mm);
73759 fput(new->vm_file);
73760 }
73761 - unlink_anon_vmas(new);
73762 out_free_mpol:
73763 mpol_put(pol);
73764 out_free_vma:
73765 +
73766 +#ifdef CONFIG_PAX_SEGMEXEC
73767 + if (new_m) {
73768 + unlink_anon_vmas(new_m);
73769 + kmem_cache_free(vm_area_cachep, new_m);
73770 + }
73771 +#endif
73772 +
73773 + unlink_anon_vmas(new);
73774 kmem_cache_free(vm_area_cachep, new);
73775 out_err:
73776 return err;
73777 @@ -2033,6 +2422,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73778 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73779 unsigned long addr, int new_below)
73780 {
73781 +
73782 +#ifdef CONFIG_PAX_SEGMEXEC
73783 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
73784 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
73785 + if (mm->map_count >= sysctl_max_map_count-1)
73786 + return -ENOMEM;
73787 + } else
73788 +#endif
73789 +
73790 if (mm->map_count >= sysctl_max_map_count)
73791 return -ENOMEM;
73792
73793 @@ -2044,11 +2442,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73794 * work. This now handles partial unmappings.
73795 * Jeremy Fitzhardinge <jeremy@goop.org>
73796 */
73797 +#ifdef CONFIG_PAX_SEGMEXEC
73798 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73799 {
73800 + int ret = __do_munmap(mm, start, len);
73801 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
73802 + return ret;
73803 +
73804 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
73805 +}
73806 +
73807 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73808 +#else
73809 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73810 +#endif
73811 +{
73812 unsigned long end;
73813 struct vm_area_struct *vma, *prev, *last;
73814
73815 + /*
73816 + * mm->mmap_sem is required to protect against another thread
73817 + * changing the mappings in case we sleep.
73818 + */
73819 + verify_mm_writelocked(mm);
73820 +
73821 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
73822 return -EINVAL;
73823
73824 @@ -2123,6 +2540,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73825 /* Fix up all other VM information */
73826 remove_vma_list(mm, vma);
73827
73828 + track_exec_limit(mm, start, end, 0UL);
73829 +
73830 return 0;
73831 }
73832
73833 @@ -2131,6 +2550,13 @@ int vm_munmap(unsigned long start, size_t len)
73834 int ret;
73835 struct mm_struct *mm = current->mm;
73836
73837 +
73838 +#ifdef CONFIG_PAX_SEGMEXEC
73839 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
73840 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
73841 + return -EINVAL;
73842 +#endif
73843 +
73844 down_write(&mm->mmap_sem);
73845 ret = do_munmap(mm, start, len);
73846 up_write(&mm->mmap_sem);
73847 @@ -2144,16 +2570,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
73848 return vm_munmap(addr, len);
73849 }
73850
73851 -static inline void verify_mm_writelocked(struct mm_struct *mm)
73852 -{
73853 -#ifdef CONFIG_DEBUG_VM
73854 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
73855 - WARN_ON(1);
73856 - up_read(&mm->mmap_sem);
73857 - }
73858 -#endif
73859 -}
73860 -
73861 /*
73862 * this is really a simplified "do_mmap". it only handles
73863 * anonymous maps. eventually we may be able to do some
73864 @@ -2167,6 +2583,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73865 struct rb_node ** rb_link, * rb_parent;
73866 pgoff_t pgoff = addr >> PAGE_SHIFT;
73867 int error;
73868 + unsigned long charged;
73869
73870 len = PAGE_ALIGN(len);
73871 if (!len)
73872 @@ -2174,16 +2591,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73873
73874 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
73875
73876 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
73877 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
73878 + flags &= ~VM_EXEC;
73879 +
73880 +#ifdef CONFIG_PAX_MPROTECT
73881 + if (mm->pax_flags & MF_PAX_MPROTECT)
73882 + flags &= ~VM_MAYEXEC;
73883 +#endif
73884 +
73885 + }
73886 +#endif
73887 +
73888 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
73889 if (error & ~PAGE_MASK)
73890 return error;
73891
73892 + charged = len >> PAGE_SHIFT;
73893 +
73894 /*
73895 * mlock MCL_FUTURE?
73896 */
73897 if (mm->def_flags & VM_LOCKED) {
73898 unsigned long locked, lock_limit;
73899 - locked = len >> PAGE_SHIFT;
73900 + locked = charged;
73901 locked += mm->locked_vm;
73902 lock_limit = rlimit(RLIMIT_MEMLOCK);
73903 lock_limit >>= PAGE_SHIFT;
73904 @@ -2200,22 +2631,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73905 /*
73906 * Clear old maps. this also does some error checking for us
73907 */
73908 - munmap_back:
73909 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73910 if (vma && vma->vm_start < addr + len) {
73911 if (do_munmap(mm, addr, len))
73912 return -ENOMEM;
73913 - goto munmap_back;
73914 - }
73915 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73916 + BUG_ON(vma && vma->vm_start < addr + len);
73917 + }
73918
73919 /* Check against address space limits *after* clearing old maps... */
73920 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
73921 + if (!may_expand_vm(mm, charged))
73922 return -ENOMEM;
73923
73924 if (mm->map_count > sysctl_max_map_count)
73925 return -ENOMEM;
73926
73927 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
73928 + if (security_vm_enough_memory_mm(mm, charged))
73929 return -ENOMEM;
73930
73931 /* Can we just expand an old private anonymous mapping? */
73932 @@ -2229,7 +2660,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73933 */
73934 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73935 if (!vma) {
73936 - vm_unacct_memory(len >> PAGE_SHIFT);
73937 + vm_unacct_memory(charged);
73938 return -ENOMEM;
73939 }
73940
73941 @@ -2243,11 +2674,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73942 vma_link(mm, vma, prev, rb_link, rb_parent);
73943 out:
73944 perf_event_mmap(vma);
73945 - mm->total_vm += len >> PAGE_SHIFT;
73946 + mm->total_vm += charged;
73947 if (flags & VM_LOCKED) {
73948 if (!mlock_vma_pages_range(vma, addr, addr + len))
73949 - mm->locked_vm += (len >> PAGE_SHIFT);
73950 + mm->locked_vm += charged;
73951 }
73952 + track_exec_limit(mm, addr, addr + len, flags);
73953 return addr;
73954 }
73955
73956 @@ -2305,6 +2737,7 @@ void exit_mmap(struct mm_struct *mm)
73957 while (vma) {
73958 if (vma->vm_flags & VM_ACCOUNT)
73959 nr_accounted += vma_pages(vma);
73960 + vma->vm_mirror = NULL;
73961 vma = remove_vma(vma);
73962 }
73963 vm_unacct_memory(nr_accounted);
73964 @@ -2321,6 +2754,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73965 struct vm_area_struct * __vma, * prev;
73966 struct rb_node ** rb_link, * rb_parent;
73967
73968 +#ifdef CONFIG_PAX_SEGMEXEC
73969 + struct vm_area_struct *vma_m = NULL;
73970 +#endif
73971 +
73972 + if (security_mmap_addr(vma->vm_start))
73973 + return -EPERM;
73974 +
73975 /*
73976 * The vm_pgoff of a purely anonymous vma should be irrelevant
73977 * until its first write fault, when page's anon_vma and index
73978 @@ -2347,7 +2787,21 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73979 if (vma->vm_file && uprobe_mmap(vma))
73980 return -EINVAL;
73981
73982 +#ifdef CONFIG_PAX_SEGMEXEC
73983 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
73984 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73985 + if (!vma_m)
73986 + return -ENOMEM;
73987 + }
73988 +#endif
73989 +
73990 vma_link(mm, vma, prev, rb_link, rb_parent);
73991 +
73992 +#ifdef CONFIG_PAX_SEGMEXEC
73993 + if (vma_m)
73994 + BUG_ON(pax_mirror_vma(vma_m, vma));
73995 +#endif
73996 +
73997 return 0;
73998 }
73999
74000 @@ -2366,6 +2820,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
74001 struct mempolicy *pol;
74002 bool faulted_in_anon_vma = true;
74003
74004 + BUG_ON(vma->vm_mirror);
74005 +
74006 /*
74007 * If anonymous vma has not yet been faulted, update new pgoff
74008 * to match new location, to increase its chance of merging.
74009 @@ -2437,6 +2893,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
74010 return NULL;
74011 }
74012
74013 +#ifdef CONFIG_PAX_SEGMEXEC
74014 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
74015 +{
74016 + struct vm_area_struct *prev_m;
74017 + struct rb_node **rb_link_m, *rb_parent_m;
74018 + struct mempolicy *pol_m;
74019 +
74020 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
74021 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
74022 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
74023 + *vma_m = *vma;
74024 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
74025 + if (anon_vma_clone(vma_m, vma))
74026 + return -ENOMEM;
74027 + pol_m = vma_policy(vma_m);
74028 + mpol_get(pol_m);
74029 + vma_set_policy(vma_m, pol_m);
74030 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
74031 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
74032 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
74033 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
74034 + if (vma_m->vm_file)
74035 + get_file(vma_m->vm_file);
74036 + if (vma_m->vm_ops && vma_m->vm_ops->open)
74037 + vma_m->vm_ops->open(vma_m);
74038 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
74039 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
74040 + vma_m->vm_mirror = vma;
74041 + vma->vm_mirror = vma_m;
74042 + return 0;
74043 +}
74044 +#endif
74045 +
74046 /*
74047 * Return true if the calling process may expand its vm space by the passed
74048 * number of pages
74049 @@ -2448,6 +2937,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
74050
74051 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
74052
74053 +#ifdef CONFIG_PAX_RANDMMAP
74054 + if (mm->pax_flags & MF_PAX_RANDMMAP)
74055 + cur -= mm->brk_gap;
74056 +#endif
74057 +
74058 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
74059 if (cur + npages > lim)
74060 return 0;
74061 return 1;
74062 @@ -2518,6 +3013,22 @@ int install_special_mapping(struct mm_struct *mm,
74063 vma->vm_start = addr;
74064 vma->vm_end = addr + len;
74065
74066 +#ifdef CONFIG_PAX_MPROTECT
74067 + if (mm->pax_flags & MF_PAX_MPROTECT) {
74068 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
74069 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
74070 + return -EPERM;
74071 + if (!(vm_flags & VM_EXEC))
74072 + vm_flags &= ~VM_MAYEXEC;
74073 +#else
74074 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
74075 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
74076 +#endif
74077 + else
74078 + vm_flags &= ~VM_MAYWRITE;
74079 + }
74080 +#endif
74081 +
74082 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
74083 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
74084
74085 diff --git a/mm/mprotect.c b/mm/mprotect.c
74086 index a409926..8b32e6d 100644
74087 --- a/mm/mprotect.c
74088 +++ b/mm/mprotect.c
74089 @@ -23,10 +23,17 @@
74090 #include <linux/mmu_notifier.h>
74091 #include <linux/migrate.h>
74092 #include <linux/perf_event.h>
74093 +
74094 +#ifdef CONFIG_PAX_MPROTECT
74095 +#include <linux/elf.h>
74096 +#include <linux/binfmts.h>
74097 +#endif
74098 +
74099 #include <asm/uaccess.h>
74100 #include <asm/pgtable.h>
74101 #include <asm/cacheflush.h>
74102 #include <asm/tlbflush.h>
74103 +#include <asm/mmu_context.h>
74104
74105 #ifndef pgprot_modify
74106 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
74107 @@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
74108 flush_tlb_range(vma, start, end);
74109 }
74110
74111 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
74112 +/* called while holding the mmap semaphor for writing except stack expansion */
74113 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
74114 +{
74115 + unsigned long oldlimit, newlimit = 0UL;
74116 +
74117 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
74118 + return;
74119 +
74120 + spin_lock(&mm->page_table_lock);
74121 + oldlimit = mm->context.user_cs_limit;
74122 + if ((prot & VM_EXEC) && oldlimit < end)
74123 + /* USER_CS limit moved up */
74124 + newlimit = end;
74125 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
74126 + /* USER_CS limit moved down */
74127 + newlimit = start;
74128 +
74129 + if (newlimit) {
74130 + mm->context.user_cs_limit = newlimit;
74131 +
74132 +#ifdef CONFIG_SMP
74133 + wmb();
74134 + cpus_clear(mm->context.cpu_user_cs_mask);
74135 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
74136 +#endif
74137 +
74138 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
74139 + }
74140 + spin_unlock(&mm->page_table_lock);
74141 + if (newlimit == end) {
74142 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
74143 +
74144 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
74145 + if (is_vm_hugetlb_page(vma))
74146 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
74147 + else
74148 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
74149 + }
74150 +}
74151 +#endif
74152 +
74153 int
74154 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
74155 unsigned long start, unsigned long end, unsigned long newflags)
74156 @@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
74157 int error;
74158 int dirty_accountable = 0;
74159
74160 +#ifdef CONFIG_PAX_SEGMEXEC
74161 + struct vm_area_struct *vma_m = NULL;
74162 + unsigned long start_m, end_m;
74163 +
74164 + start_m = start + SEGMEXEC_TASK_SIZE;
74165 + end_m = end + SEGMEXEC_TASK_SIZE;
74166 +#endif
74167 +
74168 if (newflags == oldflags) {
74169 *pprev = vma;
74170 return 0;
74171 }
74172
74173 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
74174 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
74175 +
74176 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
74177 + return -ENOMEM;
74178 +
74179 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
74180 + return -ENOMEM;
74181 + }
74182 +
74183 /*
74184 * If we make a private mapping writable we increase our commit;
74185 * but (without finer accounting) cannot reduce our commit if we
74186 @@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
74187 }
74188 }
74189
74190 +#ifdef CONFIG_PAX_SEGMEXEC
74191 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
74192 + if (start != vma->vm_start) {
74193 + error = split_vma(mm, vma, start, 1);
74194 + if (error)
74195 + goto fail;
74196 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
74197 + *pprev = (*pprev)->vm_next;
74198 + }
74199 +
74200 + if (end != vma->vm_end) {
74201 + error = split_vma(mm, vma, end, 0);
74202 + if (error)
74203 + goto fail;
74204 + }
74205 +
74206 + if (pax_find_mirror_vma(vma)) {
74207 + error = __do_munmap(mm, start_m, end_m - start_m);
74208 + if (error)
74209 + goto fail;
74210 + } else {
74211 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
74212 + if (!vma_m) {
74213 + error = -ENOMEM;
74214 + goto fail;
74215 + }
74216 + vma->vm_flags = newflags;
74217 + error = pax_mirror_vma(vma_m, vma);
74218 + if (error) {
74219 + vma->vm_flags = oldflags;
74220 + goto fail;
74221 + }
74222 + }
74223 + }
74224 +#endif
74225 +
74226 /*
74227 * First try to merge with previous and/or next vma.
74228 */
74229 @@ -204,9 +307,21 @@ success:
74230 * vm_flags and vm_page_prot are protected by the mmap_sem
74231 * held in write mode.
74232 */
74233 +
74234 +#ifdef CONFIG_PAX_SEGMEXEC
74235 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
74236 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
74237 +#endif
74238 +
74239 vma->vm_flags = newflags;
74240 +
74241 +#ifdef CONFIG_PAX_MPROTECT
74242 + if (mm->binfmt && mm->binfmt->handle_mprotect)
74243 + mm->binfmt->handle_mprotect(vma, newflags);
74244 +#endif
74245 +
74246 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
74247 - vm_get_page_prot(newflags));
74248 + vm_get_page_prot(vma->vm_flags));
74249
74250 if (vma_wants_writenotify(vma)) {
74251 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
74252 @@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74253 end = start + len;
74254 if (end <= start)
74255 return -ENOMEM;
74256 +
74257 +#ifdef CONFIG_PAX_SEGMEXEC
74258 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
74259 + if (end > SEGMEXEC_TASK_SIZE)
74260 + return -EINVAL;
74261 + } else
74262 +#endif
74263 +
74264 + if (end > TASK_SIZE)
74265 + return -EINVAL;
74266 +
74267 if (!arch_validate_prot(prot))
74268 return -EINVAL;
74269
74270 @@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74271 /*
74272 * Does the application expect PROT_READ to imply PROT_EXEC:
74273 */
74274 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
74275 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
74276 prot |= PROT_EXEC;
74277
74278 vm_flags = calc_vm_prot_bits(prot);
74279 @@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74280 if (start > vma->vm_start)
74281 prev = vma;
74282
74283 +#ifdef CONFIG_PAX_MPROTECT
74284 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
74285 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
74286 +#endif
74287 +
74288 for (nstart = start ; ; ) {
74289 unsigned long newflags;
74290
74291 @@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74292
74293 /* newflags >> 4 shift VM_MAY% in place of VM_% */
74294 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
74295 + if (prot & (PROT_WRITE | PROT_EXEC))
74296 + gr_log_rwxmprotect(vma->vm_file);
74297 +
74298 + error = -EACCES;
74299 + goto out;
74300 + }
74301 +
74302 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
74303 error = -EACCES;
74304 goto out;
74305 }
74306 @@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74307 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
74308 if (error)
74309 goto out;
74310 +
74311 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
74312 +
74313 nstart = tmp;
74314
74315 if (nstart < prev->vm_end)
74316 diff --git a/mm/mremap.c b/mm/mremap.c
74317 index 21fed20..6822658 100644
74318 --- a/mm/mremap.c
74319 +++ b/mm/mremap.c
74320 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
74321 continue;
74322 pte = ptep_get_and_clear(mm, old_addr, old_pte);
74323 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
74324 +
74325 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
74326 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
74327 + pte = pte_exprotect(pte);
74328 +#endif
74329 +
74330 set_pte_at(mm, new_addr, new_pte, pte);
74331 }
74332
74333 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
74334 if (is_vm_hugetlb_page(vma))
74335 goto Einval;
74336
74337 +#ifdef CONFIG_PAX_SEGMEXEC
74338 + if (pax_find_mirror_vma(vma))
74339 + goto Einval;
74340 +#endif
74341 +
74342 /* We can't remap across vm area boundaries */
74343 if (old_len > vma->vm_end - addr)
74344 goto Efault;
74345 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
74346 unsigned long ret = -EINVAL;
74347 unsigned long charged = 0;
74348 unsigned long map_flags;
74349 + unsigned long pax_task_size = TASK_SIZE;
74350
74351 if (new_addr & ~PAGE_MASK)
74352 goto out;
74353
74354 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
74355 +#ifdef CONFIG_PAX_SEGMEXEC
74356 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
74357 + pax_task_size = SEGMEXEC_TASK_SIZE;
74358 +#endif
74359 +
74360 + pax_task_size -= PAGE_SIZE;
74361 +
74362 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
74363 goto out;
74364
74365 /* Check if the location we're moving into overlaps the
74366 * old location at all, and fail if it does.
74367 */
74368 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
74369 - goto out;
74370 -
74371 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
74372 + if (addr + old_len > new_addr && new_addr + new_len > addr)
74373 goto out;
74374
74375 ret = do_munmap(mm, new_addr, new_len);
74376 @@ -436,6 +452,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74377 struct vm_area_struct *vma;
74378 unsigned long ret = -EINVAL;
74379 unsigned long charged = 0;
74380 + unsigned long pax_task_size = TASK_SIZE;
74381
74382 down_write(&current->mm->mmap_sem);
74383
74384 @@ -456,6 +473,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74385 if (!new_len)
74386 goto out;
74387
74388 +#ifdef CONFIG_PAX_SEGMEXEC
74389 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
74390 + pax_task_size = SEGMEXEC_TASK_SIZE;
74391 +#endif
74392 +
74393 + pax_task_size -= PAGE_SIZE;
74394 +
74395 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
74396 + old_len > pax_task_size || addr > pax_task_size-old_len)
74397 + goto out;
74398 +
74399 if (flags & MREMAP_FIXED) {
74400 if (flags & MREMAP_MAYMOVE)
74401 ret = mremap_to(addr, old_len, new_addr, new_len);
74402 @@ -505,6 +533,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74403 addr + new_len);
74404 }
74405 ret = addr;
74406 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
74407 goto out;
74408 }
74409 }
74410 @@ -528,7 +557,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74411 goto out;
74412 }
74413
74414 + map_flags = vma->vm_flags;
74415 ret = move_vma(vma, addr, old_len, new_len, new_addr);
74416 + if (!(ret & ~PAGE_MASK)) {
74417 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
74418 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
74419 + }
74420 }
74421 out:
74422 if (ret & ~PAGE_MASK)
74423 diff --git a/mm/nommu.c b/mm/nommu.c
74424 index d4b0c10..ed421b5 100644
74425 --- a/mm/nommu.c
74426 +++ b/mm/nommu.c
74427 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
74428 int sysctl_overcommit_ratio = 50; /* default is 50% */
74429 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
74430 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
74431 -int heap_stack_gap = 0;
74432
74433 atomic_long_t mmap_pages_allocated;
74434
74435 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
74436 EXPORT_SYMBOL(find_vma);
74437
74438 /*
74439 - * find a VMA
74440 - * - we don't extend stack VMAs under NOMMU conditions
74441 - */
74442 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
74443 -{
74444 - return find_vma(mm, addr);
74445 -}
74446 -
74447 -/*
74448 * expand a stack to a given address
74449 * - not supported under NOMMU conditions
74450 */
74451 @@ -1551,6 +1541,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
74452
74453 /* most fields are the same, copy all, and then fixup */
74454 *new = *vma;
74455 + INIT_LIST_HEAD(&new->anon_vma_chain);
74456 *region = *vma->vm_region;
74457 new->vm_region = region;
74458
74459 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
74460 index 201b508..1fb51ca 100644
74461 --- a/mm/page_alloc.c
74462 +++ b/mm/page_alloc.c
74463 @@ -336,7 +336,7 @@ out:
74464 * This usage means that zero-order pages may not be compound.
74465 */
74466
74467 -static void free_compound_page(struct page *page)
74468 +void free_compound_page(struct page *page)
74469 {
74470 __free_pages_ok(page, compound_order(page));
74471 }
74472 @@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
74473 int i;
74474 int bad = 0;
74475
74476 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
74477 + unsigned long index = 1UL << order;
74478 +#endif
74479 +
74480 trace_mm_page_free(page, order);
74481 kmemcheck_free_shadow(page, order);
74482
74483 @@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
74484 debug_check_no_obj_freed(page_address(page),
74485 PAGE_SIZE << order);
74486 }
74487 +
74488 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
74489 + for (; index; --index)
74490 + sanitize_highpage(page + index - 1);
74491 +#endif
74492 +
74493 arch_free_page(page, order);
74494 kernel_map_pages(page, 1 << order, 0);
74495
74496 @@ -849,8 +859,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
74497 arch_alloc_page(page, order);
74498 kernel_map_pages(page, 1 << order, 1);
74499
74500 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
74501 if (gfp_flags & __GFP_ZERO)
74502 prep_zero_page(page, order, gfp_flags);
74503 +#endif
74504
74505 if (order && (gfp_flags & __GFP_COMP))
74506 prep_compound_page(page, order);
74507 @@ -3579,7 +3591,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
74508 unsigned long pfn;
74509
74510 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
74511 +#ifdef CONFIG_X86_32
74512 + /* boot failures in VMware 8 on 32bit vanilla since
74513 + this change */
74514 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
74515 +#else
74516 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
74517 +#endif
74518 return 1;
74519 }
74520 return 0;
74521 diff --git a/mm/percpu.c b/mm/percpu.c
74522 index bb4be74..a43ea85 100644
74523 --- a/mm/percpu.c
74524 +++ b/mm/percpu.c
74525 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
74526 static unsigned int pcpu_high_unit_cpu __read_mostly;
74527
74528 /* the address of the first chunk which starts with the kernel static area */
74529 -void *pcpu_base_addr __read_mostly;
74530 +void *pcpu_base_addr __read_only;
74531 EXPORT_SYMBOL_GPL(pcpu_base_addr);
74532
74533 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
74534 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
74535 index 926b466..b23df53 100644
74536 --- a/mm/process_vm_access.c
74537 +++ b/mm/process_vm_access.c
74538 @@ -13,6 +13,7 @@
74539 #include <linux/uio.h>
74540 #include <linux/sched.h>
74541 #include <linux/highmem.h>
74542 +#include <linux/security.h>
74543 #include <linux/ptrace.h>
74544 #include <linux/slab.h>
74545 #include <linux/syscalls.h>
74546 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
74547 size_t iov_l_curr_offset = 0;
74548 ssize_t iov_len;
74549
74550 + return -ENOSYS; // PaX: until properly audited
74551 +
74552 /*
74553 * Work out how many pages of struct pages we're going to need
74554 * when eventually calling get_user_pages
74555 */
74556 for (i = 0; i < riovcnt; i++) {
74557 iov_len = rvec[i].iov_len;
74558 - if (iov_len > 0) {
74559 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
74560 - + iov_len)
74561 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
74562 - / PAGE_SIZE + 1;
74563 - nr_pages = max(nr_pages, nr_pages_iov);
74564 - }
74565 + if (iov_len <= 0)
74566 + continue;
74567 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
74568 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
74569 + nr_pages = max(nr_pages, nr_pages_iov);
74570 }
74571
74572 if (nr_pages == 0)
74573 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
74574 goto free_proc_pages;
74575 }
74576
74577 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
74578 + rc = -EPERM;
74579 + goto put_task_struct;
74580 + }
74581 +
74582 mm = mm_access(task, PTRACE_MODE_ATTACH);
74583 if (!mm || IS_ERR(mm)) {
74584 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
74585 diff --git a/mm/rmap.c b/mm/rmap.c
74586 index 0f3b7cd..c5652b6 100644
74587 --- a/mm/rmap.c
74588 +++ b/mm/rmap.c
74589 @@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74590 struct anon_vma *anon_vma = vma->anon_vma;
74591 struct anon_vma_chain *avc;
74592
74593 +#ifdef CONFIG_PAX_SEGMEXEC
74594 + struct anon_vma_chain *avc_m = NULL;
74595 +#endif
74596 +
74597 might_sleep();
74598 if (unlikely(!anon_vma)) {
74599 struct mm_struct *mm = vma->vm_mm;
74600 @@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74601 if (!avc)
74602 goto out_enomem;
74603
74604 +#ifdef CONFIG_PAX_SEGMEXEC
74605 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
74606 + if (!avc_m)
74607 + goto out_enomem_free_avc;
74608 +#endif
74609 +
74610 anon_vma = find_mergeable_anon_vma(vma);
74611 allocated = NULL;
74612 if (!anon_vma) {
74613 @@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74614 /* page_table_lock to protect against threads */
74615 spin_lock(&mm->page_table_lock);
74616 if (likely(!vma->anon_vma)) {
74617 +
74618 +#ifdef CONFIG_PAX_SEGMEXEC
74619 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
74620 +
74621 + if (vma_m) {
74622 + BUG_ON(vma_m->anon_vma);
74623 + vma_m->anon_vma = anon_vma;
74624 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
74625 + avc_m = NULL;
74626 + }
74627 +#endif
74628 +
74629 vma->anon_vma = anon_vma;
74630 anon_vma_chain_link(vma, avc, anon_vma);
74631 allocated = NULL;
74632 @@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74633
74634 if (unlikely(allocated))
74635 put_anon_vma(allocated);
74636 +
74637 +#ifdef CONFIG_PAX_SEGMEXEC
74638 + if (unlikely(avc_m))
74639 + anon_vma_chain_free(avc_m);
74640 +#endif
74641 +
74642 if (unlikely(avc))
74643 anon_vma_chain_free(avc);
74644 }
74645 return 0;
74646
74647 out_enomem_free_avc:
74648 +
74649 +#ifdef CONFIG_PAX_SEGMEXEC
74650 + if (avc_m)
74651 + anon_vma_chain_free(avc_m);
74652 +#endif
74653 +
74654 anon_vma_chain_free(avc);
74655 out_enomem:
74656 return -ENOMEM;
74657 @@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
74658 * Attach the anon_vmas from src to dst.
74659 * Returns 0 on success, -ENOMEM on failure.
74660 */
74661 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
74662 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
74663 {
74664 struct anon_vma_chain *avc, *pavc;
74665 struct anon_vma *root = NULL;
74666 @@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
74667 * the corresponding VMA in the parent process is attached to.
74668 * Returns 0 on success, non-zero on failure.
74669 */
74670 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
74671 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
74672 {
74673 struct anon_vma_chain *avc;
74674 struct anon_vma *anon_vma;
74675 diff --git a/mm/shmem.c b/mm/shmem.c
74676 index bd10636..5c16d49 100644
74677 --- a/mm/shmem.c
74678 +++ b/mm/shmem.c
74679 @@ -31,7 +31,7 @@
74680 #include <linux/export.h>
74681 #include <linux/swap.h>
74682
74683 -static struct vfsmount *shm_mnt;
74684 +struct vfsmount *shm_mnt;
74685
74686 #ifdef CONFIG_SHMEM
74687 /*
74688 @@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
74689 #define BOGO_DIRENT_SIZE 20
74690
74691 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
74692 -#define SHORT_SYMLINK_LEN 128
74693 +#define SHORT_SYMLINK_LEN 64
74694
74695 struct shmem_xattr {
74696 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
74697 @@ -2590,8 +2590,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
74698 int err = -ENOMEM;
74699
74700 /* Round up to L1_CACHE_BYTES to resist false sharing */
74701 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
74702 - L1_CACHE_BYTES), GFP_KERNEL);
74703 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
74704 if (!sbinfo)
74705 return -ENOMEM;
74706
74707 diff --git a/mm/slab.c b/mm/slab.c
74708 index e901a36..9ff3f90 100644
74709 --- a/mm/slab.c
74710 +++ b/mm/slab.c
74711 @@ -153,7 +153,7 @@
74712
74713 /* Legal flag mask for kmem_cache_create(). */
74714 #if DEBUG
74715 -# define CREATE_MASK (SLAB_RED_ZONE | \
74716 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
74717 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
74718 SLAB_CACHE_DMA | \
74719 SLAB_STORE_USER | \
74720 @@ -161,7 +161,7 @@
74721 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74722 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
74723 #else
74724 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
74725 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
74726 SLAB_CACHE_DMA | \
74727 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
74728 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74729 @@ -290,7 +290,7 @@ struct kmem_list3 {
74730 * Need this for bootstrapping a per node allocator.
74731 */
74732 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
74733 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
74734 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
74735 #define CACHE_CACHE 0
74736 #define SIZE_AC MAX_NUMNODES
74737 #define SIZE_L3 (2 * MAX_NUMNODES)
74738 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
74739 if ((x)->max_freeable < i) \
74740 (x)->max_freeable = i; \
74741 } while (0)
74742 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
74743 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
74744 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
74745 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
74746 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
74747 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
74748 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
74749 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
74750 #else
74751 #define STATS_INC_ACTIVE(x) do { } while (0)
74752 #define STATS_DEC_ACTIVE(x) do { } while (0)
74753 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
74754 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
74755 */
74756 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
74757 - const struct slab *slab, void *obj)
74758 + const struct slab *slab, const void *obj)
74759 {
74760 u32 offset = (obj - slab->s_mem);
74761 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
74762 @@ -563,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
74763 struct cache_names {
74764 char *name;
74765 char *name_dma;
74766 + char *name_usercopy;
74767 };
74768
74769 static struct cache_names __initdata cache_names[] = {
74770 -#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
74771 +#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
74772 #include <linux/kmalloc_sizes.h>
74773 - {NULL,}
74774 + {NULL}
74775 #undef CACHE
74776 };
74777
74778 @@ -756,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
74779 if (unlikely(gfpflags & GFP_DMA))
74780 return csizep->cs_dmacachep;
74781 #endif
74782 +
74783 +#ifdef CONFIG_PAX_USERCOPY_SLABS
74784 + if (unlikely(gfpflags & GFP_USERCOPY))
74785 + return csizep->cs_usercopycachep;
74786 +#endif
74787 +
74788 return csizep->cs_cachep;
74789 }
74790
74791 @@ -1588,7 +1595,7 @@ void __init kmem_cache_init(void)
74792 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
74793 sizes[INDEX_AC].cs_size,
74794 ARCH_KMALLOC_MINALIGN,
74795 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74796 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74797 NULL);
74798
74799 if (INDEX_AC != INDEX_L3) {
74800 @@ -1596,7 +1603,7 @@ void __init kmem_cache_init(void)
74801 kmem_cache_create(names[INDEX_L3].name,
74802 sizes[INDEX_L3].cs_size,
74803 ARCH_KMALLOC_MINALIGN,
74804 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74805 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74806 NULL);
74807 }
74808
74809 @@ -1614,7 +1621,7 @@ void __init kmem_cache_init(void)
74810 sizes->cs_cachep = kmem_cache_create(names->name,
74811 sizes->cs_size,
74812 ARCH_KMALLOC_MINALIGN,
74813 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74814 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74815 NULL);
74816 }
74817 #ifdef CONFIG_ZONE_DMA
74818 @@ -1626,6 +1633,16 @@ void __init kmem_cache_init(void)
74819 SLAB_PANIC,
74820 NULL);
74821 #endif
74822 +
74823 +#ifdef CONFIG_PAX_USERCOPY_SLABS
74824 + sizes->cs_usercopycachep = kmem_cache_create(
74825 + names->name_usercopy,
74826 + sizes->cs_size,
74827 + ARCH_KMALLOC_MINALIGN,
74828 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74829 + NULL);
74830 +#endif
74831 +
74832 sizes++;
74833 names++;
74834 }
74835 @@ -4390,10 +4407,10 @@ static int s_show(struct seq_file *m, void *p)
74836 }
74837 /* cpu stats */
74838 {
74839 - unsigned long allochit = atomic_read(&cachep->allochit);
74840 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
74841 - unsigned long freehit = atomic_read(&cachep->freehit);
74842 - unsigned long freemiss = atomic_read(&cachep->freemiss);
74843 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
74844 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
74845 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
74846 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
74847
74848 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
74849 allochit, allocmiss, freehit, freemiss);
74850 @@ -4652,13 +4669,71 @@ static int __init slab_proc_init(void)
74851 {
74852 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
74853 #ifdef CONFIG_DEBUG_SLAB_LEAK
74854 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
74855 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
74856 #endif
74857 return 0;
74858 }
74859 module_init(slab_proc_init);
74860 #endif
74861
74862 +bool is_usercopy_object(const void *ptr)
74863 +{
74864 + struct page *page;
74865 + struct kmem_cache *cachep;
74866 +
74867 + if (ZERO_OR_NULL_PTR(ptr))
74868 + return false;
74869 +
74870 + if (!slab_is_available())
74871 + return false;
74872 +
74873 + if (!virt_addr_valid(ptr))
74874 + return false;
74875 +
74876 + page = virt_to_head_page(ptr);
74877 +
74878 + if (!PageSlab(page))
74879 + return false;
74880 +
74881 + cachep = page_get_cache(page);
74882 + return cachep->flags & SLAB_USERCOPY;
74883 +}
74884 +
74885 +#ifdef CONFIG_PAX_USERCOPY
74886 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
74887 +{
74888 + struct page *page;
74889 + struct kmem_cache *cachep;
74890 + struct slab *slabp;
74891 + unsigned int objnr;
74892 + unsigned long offset;
74893 +
74894 + if (ZERO_OR_NULL_PTR(ptr))
74895 + return "<null>";
74896 +
74897 + if (!virt_addr_valid(ptr))
74898 + return NULL;
74899 +
74900 + page = virt_to_head_page(ptr);
74901 +
74902 + if (!PageSlab(page))
74903 + return NULL;
74904 +
74905 + cachep = page_get_cache(page);
74906 + if (!(cachep->flags & SLAB_USERCOPY))
74907 + return cachep->name;
74908 +
74909 + slabp = page_get_slab(page);
74910 + objnr = obj_to_index(cachep, slabp, ptr);
74911 + BUG_ON(objnr >= cachep->num);
74912 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
74913 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
74914 + return NULL;
74915 +
74916 + return cachep->name;
74917 +}
74918 +#endif
74919 +
74920 /**
74921 * ksize - get the actual amount of memory allocated for a given object
74922 * @objp: Pointer to the object
74923 diff --git a/mm/slob.c b/mm/slob.c
74924 index 8105be4..33e52d7 100644
74925 --- a/mm/slob.c
74926 +++ b/mm/slob.c
74927 @@ -29,7 +29,7 @@
74928 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
74929 * alloc_pages() directly, allocating compound pages so the page order
74930 * does not have to be separately tracked, and also stores the exact
74931 - * allocation size in page->private so that it can be used to accurately
74932 + * allocation size in slob_page->size so that it can be used to accurately
74933 * provide ksize(). These objects are detected in kfree() because slob_page()
74934 * is false for them.
74935 *
74936 @@ -58,6 +58,7 @@
74937 */
74938
74939 #include <linux/kernel.h>
74940 +#include <linux/sched.h>
74941 #include <linux/slab.h>
74942 #include <linux/mm.h>
74943 #include <linux/swap.h> /* struct reclaim_state */
74944 @@ -100,9 +101,8 @@ struct slob_page {
74945 union {
74946 struct {
74947 unsigned long flags; /* mandatory */
74948 - atomic_t _count; /* mandatory */
74949 slobidx_t units; /* free units left in page */
74950 - unsigned long pad[2];
74951 + unsigned long size; /* size when >=PAGE_SIZE */
74952 slob_t *free; /* first free slob_t in page */
74953 struct list_head list; /* linked list of free pages */
74954 };
74955 @@ -135,7 +135,7 @@ static LIST_HEAD(free_slob_large);
74956 */
74957 static inline int is_slob_page(struct slob_page *sp)
74958 {
74959 - return PageSlab((struct page *)sp);
74960 + return PageSlab((struct page *)sp) && !sp->size;
74961 }
74962
74963 static inline void set_slob_page(struct slob_page *sp)
74964 @@ -150,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
74965
74966 static inline struct slob_page *slob_page(const void *addr)
74967 {
74968 - return (struct slob_page *)virt_to_page(addr);
74969 + return (struct slob_page *)virt_to_head_page(addr);
74970 }
74971
74972 /*
74973 @@ -210,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
74974 /*
74975 * Return the size of a slob block.
74976 */
74977 -static slobidx_t slob_units(slob_t *s)
74978 +static slobidx_t slob_units(const slob_t *s)
74979 {
74980 if (s->units > 0)
74981 return s->units;
74982 @@ -220,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
74983 /*
74984 * Return the next free slob block pointer after this one.
74985 */
74986 -static slob_t *slob_next(slob_t *s)
74987 +static slob_t *slob_next(const slob_t *s)
74988 {
74989 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
74990 slobidx_t next;
74991 @@ -235,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
74992 /*
74993 * Returns true if s is the last free block in its page.
74994 */
74995 -static int slob_last(slob_t *s)
74996 +static int slob_last(const slob_t *s)
74997 {
74998 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
74999 }
75000 @@ -254,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
75001 if (!page)
75002 return NULL;
75003
75004 + set_slob_page(page);
75005 return page_address(page);
75006 }
75007
75008 @@ -370,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
75009 if (!b)
75010 return NULL;
75011 sp = slob_page(b);
75012 - set_slob_page(sp);
75013
75014 spin_lock_irqsave(&slob_lock, flags);
75015 sp->units = SLOB_UNITS(PAGE_SIZE);
75016 sp->free = b;
75017 + sp->size = 0;
75018 INIT_LIST_HEAD(&sp->list);
75019 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
75020 set_slob_page_free(sp, slob_list);
75021 @@ -476,10 +477,9 @@ out:
75022 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
75023 */
75024
75025 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
75026 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
75027 {
75028 - unsigned int *m;
75029 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
75030 + slob_t *m;
75031 void *ret;
75032
75033 gfp &= gfp_allowed_mask;
75034 @@ -494,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
75035
75036 if (!m)
75037 return NULL;
75038 - *m = size;
75039 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
75040 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
75041 + m[0].units = size;
75042 + m[1].units = align;
75043 ret = (void *)m + align;
75044
75045 trace_kmalloc_node(_RET_IP_, ret,
75046 @@ -506,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
75047 gfp |= __GFP_COMP;
75048 ret = slob_new_pages(gfp, order, node);
75049 if (ret) {
75050 - struct page *page;
75051 - page = virt_to_page(ret);
75052 - page->private = size;
75053 + struct slob_page *sp;
75054 + sp = slob_page(ret);
75055 + sp->size = size;
75056 }
75057
75058 trace_kmalloc_node(_RET_IP_, ret,
75059 size, PAGE_SIZE << order, gfp, node);
75060 }
75061
75062 - kmemleak_alloc(ret, size, 1, gfp);
75063 + return ret;
75064 +}
75065 +
75066 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
75067 +{
75068 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
75069 + void *ret = __kmalloc_node_align(size, gfp, node, align);
75070 +
75071 + if (!ZERO_OR_NULL_PTR(ret))
75072 + kmemleak_alloc(ret, size, 1, gfp);
75073 return ret;
75074 }
75075 EXPORT_SYMBOL(__kmalloc_node);
75076 @@ -533,13 +545,88 @@ void kfree(const void *block)
75077 sp = slob_page(block);
75078 if (is_slob_page(sp)) {
75079 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
75080 - unsigned int *m = (unsigned int *)(block - align);
75081 - slob_free(m, *m + align);
75082 - } else
75083 + slob_t *m = (slob_t *)(block - align);
75084 + slob_free(m, m[0].units + align);
75085 + } else {
75086 + clear_slob_page(sp);
75087 + free_slob_page(sp);
75088 + sp->size = 0;
75089 put_page(&sp->page);
75090 + }
75091 }
75092 EXPORT_SYMBOL(kfree);
75093
75094 +bool is_usercopy_object(const void *ptr)
75095 +{
75096 + if (!slab_is_available())
75097 + return false;
75098 +
75099 + // PAX: TODO
75100 +
75101 + return false;
75102 +}
75103 +
75104 +#ifdef CONFIG_PAX_USERCOPY
75105 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
75106 +{
75107 + struct slob_page *sp;
75108 + const slob_t *free;
75109 + const void *base;
75110 + unsigned long flags;
75111 +
75112 + if (ZERO_OR_NULL_PTR(ptr))
75113 + return "<null>";
75114 +
75115 + if (!virt_addr_valid(ptr))
75116 + return NULL;
75117 +
75118 + sp = slob_page(ptr);
75119 + if (!PageSlab((struct page *)sp))
75120 + return NULL;
75121 +
75122 + if (sp->size) {
75123 + base = page_address(&sp->page);
75124 + if (base <= ptr && n <= sp->size - (ptr - base))
75125 + return NULL;
75126 + return "<slob>";
75127 + }
75128 +
75129 + /* some tricky double walking to find the chunk */
75130 + spin_lock_irqsave(&slob_lock, flags);
75131 + base = (void *)((unsigned long)ptr & PAGE_MASK);
75132 + free = sp->free;
75133 +
75134 + while ((void *)free <= ptr) {
75135 + base = free + slob_units(free);
75136 + free = slob_next(free);
75137 + }
75138 +
75139 + while (base < (void *)free) {
75140 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
75141 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
75142 + int offset;
75143 +
75144 + if (ptr < base + align)
75145 + break;
75146 +
75147 + offset = ptr - base - align;
75148 + if (offset >= m) {
75149 + base += size;
75150 + continue;
75151 + }
75152 +
75153 + if (n > m - offset)
75154 + break;
75155 +
75156 + spin_unlock_irqrestore(&slob_lock, flags);
75157 + return NULL;
75158 + }
75159 +
75160 + spin_unlock_irqrestore(&slob_lock, flags);
75161 + return "<slob>";
75162 +}
75163 +#endif
75164 +
75165 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
75166 size_t ksize(const void *block)
75167 {
75168 @@ -552,10 +639,10 @@ size_t ksize(const void *block)
75169 sp = slob_page(block);
75170 if (is_slob_page(sp)) {
75171 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
75172 - unsigned int *m = (unsigned int *)(block - align);
75173 - return SLOB_UNITS(*m) * SLOB_UNIT;
75174 + slob_t *m = (slob_t *)(block - align);
75175 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
75176 } else
75177 - return sp->page.private;
75178 + return sp->size;
75179 }
75180 EXPORT_SYMBOL(ksize);
75181
75182 @@ -571,8 +658,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
75183 {
75184 struct kmem_cache *c;
75185
75186 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75187 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
75188 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
75189 +#else
75190 c = slob_alloc(sizeof(struct kmem_cache),
75191 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
75192 +#endif
75193
75194 if (c) {
75195 c->name = name;
75196 @@ -614,17 +706,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
75197
75198 lockdep_trace_alloc(flags);
75199
75200 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75201 + b = __kmalloc_node_align(c->size, flags, node, c->align);
75202 +#else
75203 if (c->size < PAGE_SIZE) {
75204 b = slob_alloc(c->size, flags, c->align, node);
75205 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
75206 SLOB_UNITS(c->size) * SLOB_UNIT,
75207 flags, node);
75208 } else {
75209 + struct slob_page *sp;
75210 +
75211 b = slob_new_pages(flags, get_order(c->size), node);
75212 + sp = slob_page(b);
75213 + sp->size = c->size;
75214 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
75215 PAGE_SIZE << get_order(c->size),
75216 flags, node);
75217 }
75218 +#endif
75219
75220 if (c->ctor)
75221 c->ctor(b);
75222 @@ -636,10 +736,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
75223
75224 static void __kmem_cache_free(void *b, int size)
75225 {
75226 - if (size < PAGE_SIZE)
75227 + struct slob_page *sp = slob_page(b);
75228 +
75229 + if (is_slob_page(sp))
75230 slob_free(b, size);
75231 - else
75232 + else {
75233 + clear_slob_page(sp);
75234 + free_slob_page(sp);
75235 + sp->size = 0;
75236 slob_free_pages(b, get_order(size));
75237 + }
75238 }
75239
75240 static void kmem_rcu_free(struct rcu_head *head)
75241 @@ -652,17 +758,31 @@ static void kmem_rcu_free(struct rcu_head *head)
75242
75243 void kmem_cache_free(struct kmem_cache *c, void *b)
75244 {
75245 + int size = c->size;
75246 +
75247 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75248 + if (size + c->align < PAGE_SIZE) {
75249 + size += c->align;
75250 + b -= c->align;
75251 + }
75252 +#endif
75253 +
75254 kmemleak_free_recursive(b, c->flags);
75255 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
75256 struct slob_rcu *slob_rcu;
75257 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
75258 - slob_rcu->size = c->size;
75259 + slob_rcu = b + (size - sizeof(struct slob_rcu));
75260 + slob_rcu->size = size;
75261 call_rcu(&slob_rcu->head, kmem_rcu_free);
75262 } else {
75263 - __kmem_cache_free(b, c->size);
75264 + __kmem_cache_free(b, size);
75265 }
75266
75267 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75268 + trace_kfree(_RET_IP_, b);
75269 +#else
75270 trace_kmem_cache_free(_RET_IP_, b);
75271 +#endif
75272 +
75273 }
75274 EXPORT_SYMBOL(kmem_cache_free);
75275
75276 diff --git a/mm/slub.c b/mm/slub.c
75277 index 8c691fa..2993c2b 100644
75278 --- a/mm/slub.c
75279 +++ b/mm/slub.c
75280 @@ -209,7 +209,7 @@ struct track {
75281
75282 enum track_item { TRACK_ALLOC, TRACK_FREE };
75283
75284 -#ifdef CONFIG_SYSFS
75285 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75286 static int sysfs_slab_add(struct kmem_cache *);
75287 static int sysfs_slab_alias(struct kmem_cache *, const char *);
75288 static void sysfs_slab_remove(struct kmem_cache *);
75289 @@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
75290 if (!t->addr)
75291 return;
75292
75293 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
75294 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
75295 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
75296 #ifdef CONFIG_STACKTRACE
75297 {
75298 @@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
75299
75300 page = virt_to_head_page(x);
75301
75302 + BUG_ON(!PageSlab(page));
75303 +
75304 slab_free(s, page, x, _RET_IP_);
75305
75306 trace_kmem_cache_free(_RET_IP_, x);
75307 @@ -2636,7 +2638,7 @@ static int slub_min_objects;
75308 * Merge control. If this is set then no merging of slab caches will occur.
75309 * (Could be removed. This was introduced to pacify the merge skeptics.)
75310 */
75311 -static int slub_nomerge;
75312 +static int slub_nomerge = 1;
75313
75314 /*
75315 * Calculate the order of allocation given an slab object size.
75316 @@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
75317 else
75318 s->cpu_partial = 30;
75319
75320 - s->refcount = 1;
75321 + atomic_set(&s->refcount, 1);
75322 #ifdef CONFIG_NUMA
75323 s->remote_node_defrag_ratio = 1000;
75324 #endif
75325 @@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
75326 void kmem_cache_destroy(struct kmem_cache *s)
75327 {
75328 down_write(&slub_lock);
75329 - s->refcount--;
75330 - if (!s->refcount) {
75331 + if (atomic_dec_and_test(&s->refcount)) {
75332 list_del(&s->list);
75333 up_write(&slub_lock);
75334 if (kmem_cache_close(s)) {
75335 @@ -3223,6 +3224,10 @@ static struct kmem_cache *kmem_cache;
75336 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
75337 #endif
75338
75339 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75340 +static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
75341 +#endif
75342 +
75343 static int __init setup_slub_min_order(char *str)
75344 {
75345 get_option(&str, &slub_min_order);
75346 @@ -3337,6 +3342,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
75347 return kmalloc_dma_caches[index];
75348
75349 #endif
75350 +
75351 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75352 + if (flags & SLAB_USERCOPY)
75353 + return kmalloc_usercopy_caches[index];
75354 +
75355 +#endif
75356 +
75357 return kmalloc_caches[index];
75358 }
75359
75360 @@ -3405,6 +3417,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
75361 EXPORT_SYMBOL(__kmalloc_node);
75362 #endif
75363
75364 +bool is_usercopy_object(const void *ptr)
75365 +{
75366 + struct page *page;
75367 + struct kmem_cache *s;
75368 +
75369 + if (ZERO_OR_NULL_PTR(ptr))
75370 + return false;
75371 +
75372 + if (!slab_is_available())
75373 + return false;
75374 +
75375 + if (!virt_addr_valid(ptr))
75376 + return false;
75377 +
75378 + page = virt_to_head_page(ptr);
75379 +
75380 + if (!PageSlab(page))
75381 + return false;
75382 +
75383 + s = page->slab;
75384 + return s->flags & SLAB_USERCOPY;
75385 +}
75386 +
75387 +#ifdef CONFIG_PAX_USERCOPY
75388 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
75389 +{
75390 + struct page *page;
75391 + struct kmem_cache *s;
75392 + unsigned long offset;
75393 +
75394 + if (ZERO_OR_NULL_PTR(ptr))
75395 + return "<null>";
75396 +
75397 + if (!virt_addr_valid(ptr))
75398 + return NULL;
75399 +
75400 + page = virt_to_head_page(ptr);
75401 +
75402 + if (!PageSlab(page))
75403 + return NULL;
75404 +
75405 + s = page->slab;
75406 + if (!(s->flags & SLAB_USERCOPY))
75407 + return s->name;
75408 +
75409 + offset = (ptr - page_address(page)) % s->size;
75410 + if (offset <= s->objsize && n <= s->objsize - offset)
75411 + return NULL;
75412 +
75413 + return s->name;
75414 +}
75415 +#endif
75416 +
75417 size_t ksize(const void *object)
75418 {
75419 struct page *page;
75420 @@ -3679,7 +3744,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
75421 int node;
75422
75423 list_add(&s->list, &slab_caches);
75424 - s->refcount = -1;
75425 + atomic_set(&s->refcount, -1);
75426
75427 for_each_node_state(node, N_NORMAL_MEMORY) {
75428 struct kmem_cache_node *n = get_node(s, node);
75429 @@ -3799,17 +3864,17 @@ void __init kmem_cache_init(void)
75430
75431 /* Caches that are not of the two-to-the-power-of size */
75432 if (KMALLOC_MIN_SIZE <= 32) {
75433 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
75434 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
75435 caches++;
75436 }
75437
75438 if (KMALLOC_MIN_SIZE <= 64) {
75439 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
75440 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
75441 caches++;
75442 }
75443
75444 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
75445 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
75446 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
75447 caches++;
75448 }
75449
75450 @@ -3851,6 +3916,22 @@ void __init kmem_cache_init(void)
75451 }
75452 }
75453 #endif
75454 +
75455 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75456 + for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
75457 + struct kmem_cache *s = kmalloc_caches[i];
75458 +
75459 + if (s && s->size) {
75460 + char *name = kasprintf(GFP_NOWAIT,
75461 + "usercopy-kmalloc-%d", s->objsize);
75462 +
75463 + BUG_ON(!name);
75464 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
75465 + s->objsize, SLAB_USERCOPY);
75466 + }
75467 + }
75468 +#endif
75469 +
75470 printk(KERN_INFO
75471 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
75472 " CPUs=%d, Nodes=%d\n",
75473 @@ -3877,7 +3958,7 @@ static int slab_unmergeable(struct kmem_cache *s)
75474 /*
75475 * We may have set a slab to be unmergeable during bootstrap.
75476 */
75477 - if (s->refcount < 0)
75478 + if (atomic_read(&s->refcount) < 0)
75479 return 1;
75480
75481 return 0;
75482 @@ -3936,7 +4017,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
75483 down_write(&slub_lock);
75484 s = find_mergeable(size, align, flags, name, ctor);
75485 if (s) {
75486 - s->refcount++;
75487 + atomic_inc(&s->refcount);
75488 /*
75489 * Adjust the object sizes so that we clear
75490 * the complete object on kzalloc.
75491 @@ -3945,7 +4026,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
75492 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
75493
75494 if (sysfs_slab_alias(s, name)) {
75495 - s->refcount--;
75496 + atomic_dec(&s->refcount);
75497 goto err;
75498 }
75499 up_write(&slub_lock);
75500 @@ -4074,7 +4155,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
75501 }
75502 #endif
75503
75504 -#ifdef CONFIG_SYSFS
75505 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75506 static int count_inuse(struct page *page)
75507 {
75508 return page->inuse;
75509 @@ -4461,12 +4542,12 @@ static void resiliency_test(void)
75510 validate_slab_cache(kmalloc_caches[9]);
75511 }
75512 #else
75513 -#ifdef CONFIG_SYSFS
75514 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75515 static void resiliency_test(void) {};
75516 #endif
75517 #endif
75518
75519 -#ifdef CONFIG_SYSFS
75520 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75521 enum slab_stat_type {
75522 SL_ALL, /* All slabs */
75523 SL_PARTIAL, /* Only partially allocated slabs */
75524 @@ -4709,7 +4790,7 @@ SLAB_ATTR_RO(ctor);
75525
75526 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
75527 {
75528 - return sprintf(buf, "%d\n", s->refcount - 1);
75529 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
75530 }
75531 SLAB_ATTR_RO(aliases);
75532
75533 @@ -5280,6 +5361,7 @@ static char *create_unique_id(struct kmem_cache *s)
75534 return name;
75535 }
75536
75537 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75538 static int sysfs_slab_add(struct kmem_cache *s)
75539 {
75540 int err;
75541 @@ -5342,6 +5424,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
75542 kobject_del(&s->kobj);
75543 kobject_put(&s->kobj);
75544 }
75545 +#endif
75546
75547 /*
75548 * Need to buffer aliases during bootup until sysfs becomes
75549 @@ -5355,6 +5438,7 @@ struct saved_alias {
75550
75551 static struct saved_alias *alias_list;
75552
75553 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75554 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
75555 {
75556 struct saved_alias *al;
75557 @@ -5377,6 +5461,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
75558 alias_list = al;
75559 return 0;
75560 }
75561 +#endif
75562
75563 static int __init slab_sysfs_init(void)
75564 {
75565 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
75566 index 1b7e22a..3fcd4f3 100644
75567 --- a/mm/sparse-vmemmap.c
75568 +++ b/mm/sparse-vmemmap.c
75569 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
75570 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
75571 if (!p)
75572 return NULL;
75573 - pud_populate(&init_mm, pud, p);
75574 + pud_populate_kernel(&init_mm, pud, p);
75575 }
75576 return pud;
75577 }
75578 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
75579 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
75580 if (!p)
75581 return NULL;
75582 - pgd_populate(&init_mm, pgd, p);
75583 + pgd_populate_kernel(&init_mm, pgd, p);
75584 }
75585 return pgd;
75586 }
75587 diff --git a/mm/swap.c b/mm/swap.c
75588 index 4e7e2ec..0c57830 100644
75589 --- a/mm/swap.c
75590 +++ b/mm/swap.c
75591 @@ -30,6 +30,7 @@
75592 #include <linux/backing-dev.h>
75593 #include <linux/memcontrol.h>
75594 #include <linux/gfp.h>
75595 +#include <linux/hugetlb.h>
75596
75597 #include "internal.h"
75598
75599 @@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
75600
75601 __page_cache_release(page);
75602 dtor = get_compound_page_dtor(page);
75603 + if (!PageHuge(page))
75604 + BUG_ON(dtor != free_compound_page);
75605 (*dtor)(page);
75606 }
75607
75608 diff --git a/mm/swapfile.c b/mm/swapfile.c
75609 index 71373d0..11fa7d9 100644
75610 --- a/mm/swapfile.c
75611 +++ b/mm/swapfile.c
75612 @@ -63,7 +63,7 @@ static DEFINE_MUTEX(swapon_mutex);
75613
75614 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
75615 /* Activity counter to indicate that a swapon or swapoff has occurred */
75616 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
75617 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
75618
75619 static inline unsigned char swap_count(unsigned char ent)
75620 {
75621 @@ -1663,7 +1663,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
75622 }
75623 filp_close(swap_file, NULL);
75624 err = 0;
75625 - atomic_inc(&proc_poll_event);
75626 + atomic_inc_unchecked(&proc_poll_event);
75627 wake_up_interruptible(&proc_poll_wait);
75628
75629 out_dput:
75630 @@ -1679,8 +1679,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
75631
75632 poll_wait(file, &proc_poll_wait, wait);
75633
75634 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
75635 - seq->poll_event = atomic_read(&proc_poll_event);
75636 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
75637 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
75638 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
75639 }
75640
75641 @@ -1778,7 +1778,7 @@ static int swaps_open(struct inode *inode, struct file *file)
75642 return ret;
75643
75644 seq = file->private_data;
75645 - seq->poll_event = atomic_read(&proc_poll_event);
75646 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
75647 return 0;
75648 }
75649
75650 @@ -2120,7 +2120,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
75651 (frontswap_map) ? "FS" : "");
75652
75653 mutex_unlock(&swapon_mutex);
75654 - atomic_inc(&proc_poll_event);
75655 + atomic_inc_unchecked(&proc_poll_event);
75656 wake_up_interruptible(&proc_poll_wait);
75657
75658 if (S_ISREG(inode->i_mode))
75659 diff --git a/mm/util.c b/mm/util.c
75660 index 8c7265a..c96d884 100644
75661 --- a/mm/util.c
75662 +++ b/mm/util.c
75663 @@ -285,6 +285,12 @@ done:
75664 void arch_pick_mmap_layout(struct mm_struct *mm)
75665 {
75666 mm->mmap_base = TASK_UNMAPPED_BASE;
75667 +
75668 +#ifdef CONFIG_PAX_RANDMMAP
75669 + if (mm->pax_flags & MF_PAX_RANDMMAP)
75670 + mm->mmap_base += mm->delta_mmap;
75671 +#endif
75672 +
75673 mm->get_unmapped_area = arch_get_unmapped_area;
75674 mm->unmap_area = arch_unmap_area;
75675 }
75676 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
75677 index 2aad499..8aad8b1 100644
75678 --- a/mm/vmalloc.c
75679 +++ b/mm/vmalloc.c
75680 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
75681
75682 pte = pte_offset_kernel(pmd, addr);
75683 do {
75684 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75685 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75686 +
75687 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75688 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
75689 + BUG_ON(!pte_exec(*pte));
75690 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
75691 + continue;
75692 + }
75693 +#endif
75694 +
75695 + {
75696 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75697 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75698 + }
75699 } while (pte++, addr += PAGE_SIZE, addr != end);
75700 }
75701
75702 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
75703 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
75704 {
75705 pte_t *pte;
75706 + int ret = -ENOMEM;
75707
75708 /*
75709 * nr is a running index into the array which helps higher level
75710 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
75711 pte = pte_alloc_kernel(pmd, addr);
75712 if (!pte)
75713 return -ENOMEM;
75714 +
75715 + pax_open_kernel();
75716 do {
75717 struct page *page = pages[*nr];
75718
75719 - if (WARN_ON(!pte_none(*pte)))
75720 - return -EBUSY;
75721 - if (WARN_ON(!page))
75722 - return -ENOMEM;
75723 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75724 + if (pgprot_val(prot) & _PAGE_NX)
75725 +#endif
75726 +
75727 + if (WARN_ON(!pte_none(*pte))) {
75728 + ret = -EBUSY;
75729 + goto out;
75730 + }
75731 + if (WARN_ON(!page)) {
75732 + ret = -ENOMEM;
75733 + goto out;
75734 + }
75735 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
75736 (*nr)++;
75737 } while (pte++, addr += PAGE_SIZE, addr != end);
75738 - return 0;
75739 + ret = 0;
75740 +out:
75741 + pax_close_kernel();
75742 + return ret;
75743 }
75744
75745 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75746 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75747 pmd_t *pmd;
75748 unsigned long next;
75749
75750 - pmd = pmd_alloc(&init_mm, pud, addr);
75751 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
75752 if (!pmd)
75753 return -ENOMEM;
75754 do {
75755 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
75756 pud_t *pud;
75757 unsigned long next;
75758
75759 - pud = pud_alloc(&init_mm, pgd, addr);
75760 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
75761 if (!pud)
75762 return -ENOMEM;
75763 do {
75764 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
75765 * and fall back on vmalloc() if that fails. Others
75766 * just put it in the vmalloc space.
75767 */
75768 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
75769 +#ifdef CONFIG_MODULES
75770 +#ifdef MODULES_VADDR
75771 unsigned long addr = (unsigned long)x;
75772 if (addr >= MODULES_VADDR && addr < MODULES_END)
75773 return 1;
75774 #endif
75775 +
75776 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75777 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
75778 + return 1;
75779 +#endif
75780 +
75781 +#endif
75782 +
75783 return is_vmalloc_addr(x);
75784 }
75785
75786 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
75787
75788 if (!pgd_none(*pgd)) {
75789 pud_t *pud = pud_offset(pgd, addr);
75790 +#ifdef CONFIG_X86
75791 + if (!pud_large(*pud))
75792 +#endif
75793 if (!pud_none(*pud)) {
75794 pmd_t *pmd = pmd_offset(pud, addr);
75795 +#ifdef CONFIG_X86
75796 + if (!pmd_large(*pmd))
75797 +#endif
75798 if (!pmd_none(*pmd)) {
75799 pte_t *ptep, pte;
75800
75801 @@ -329,7 +369,7 @@ static void purge_vmap_area_lazy(void);
75802 * Allocate a region of KVA of the specified size and alignment, within the
75803 * vstart and vend.
75804 */
75805 -static struct vmap_area *alloc_vmap_area(unsigned long size,
75806 +static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
75807 unsigned long align,
75808 unsigned long vstart, unsigned long vend,
75809 int node, gfp_t gfp_mask)
75810 @@ -1320,6 +1360,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
75811 struct vm_struct *area;
75812
75813 BUG_ON(in_interrupt());
75814 +
75815 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75816 + if (flags & VM_KERNEXEC) {
75817 + if (start != VMALLOC_START || end != VMALLOC_END)
75818 + return NULL;
75819 + start = (unsigned long)MODULES_EXEC_VADDR;
75820 + end = (unsigned long)MODULES_EXEC_END;
75821 + }
75822 +#endif
75823 +
75824 if (flags & VM_IOREMAP) {
75825 int bit = fls(size);
75826
75827 @@ -1552,6 +1602,11 @@ void *vmap(struct page **pages, unsigned int count,
75828 if (count > totalram_pages)
75829 return NULL;
75830
75831 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75832 + if (!(pgprot_val(prot) & _PAGE_NX))
75833 + flags |= VM_KERNEXEC;
75834 +#endif
75835 +
75836 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
75837 __builtin_return_address(0));
75838 if (!area)
75839 @@ -1653,6 +1708,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
75840 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
75841 goto fail;
75842
75843 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75844 + if (!(pgprot_val(prot) & _PAGE_NX))
75845 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
75846 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
75847 + else
75848 +#endif
75849 +
75850 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
75851 start, end, node, gfp_mask, caller);
75852 if (!area)
75853 @@ -1826,10 +1888,9 @@ EXPORT_SYMBOL(vzalloc_node);
75854 * For tight control over page level allocator and protection flags
75855 * use __vmalloc() instead.
75856 */
75857 -
75858 void *vmalloc_exec(unsigned long size)
75859 {
75860 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
75861 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
75862 -1, __builtin_return_address(0));
75863 }
75864
75865 @@ -2124,6 +2185,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
75866 unsigned long uaddr = vma->vm_start;
75867 unsigned long usize = vma->vm_end - vma->vm_start;
75868
75869 + BUG_ON(vma->vm_mirror);
75870 +
75871 if ((PAGE_SIZE-1) & (unsigned long)addr)
75872 return -EINVAL;
75873
75874 @@ -2560,7 +2623,7 @@ static int s_show(struct seq_file *m, void *p)
75875 {
75876 struct vm_struct *v = p;
75877
75878 - seq_printf(m, "0x%p-0x%p %7ld",
75879 + seq_printf(m, "0x%pK-0x%pK %7ld",
75880 v->addr, v->addr + v->size, v->size);
75881
75882 if (v->caller)
75883 diff --git a/mm/vmstat.c b/mm/vmstat.c
75884 index 1bbbbd9..ff35669 100644
75885 --- a/mm/vmstat.c
75886 +++ b/mm/vmstat.c
75887 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
75888 *
75889 * vm_stat contains the global counters
75890 */
75891 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75892 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75893 EXPORT_SYMBOL(vm_stat);
75894
75895 #ifdef CONFIG_SMP
75896 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
75897 v = p->vm_stat_diff[i];
75898 p->vm_stat_diff[i] = 0;
75899 local_irq_restore(flags);
75900 - atomic_long_add(v, &zone->vm_stat[i]);
75901 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
75902 global_diff[i] += v;
75903 #ifdef CONFIG_NUMA
75904 /* 3 seconds idle till flush */
75905 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
75906
75907 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
75908 if (global_diff[i])
75909 - atomic_long_add(global_diff[i], &vm_stat[i]);
75910 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
75911 }
75912
75913 #endif
75914 @@ -1211,10 +1211,20 @@ static int __init setup_vmstat(void)
75915 start_cpu_timer(cpu);
75916 #endif
75917 #ifdef CONFIG_PROC_FS
75918 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
75919 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
75920 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
75921 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
75922 + {
75923 + mode_t gr_mode = S_IRUGO;
75924 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75925 + gr_mode = S_IRUSR;
75926 +#endif
75927 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
75928 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
75929 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75930 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
75931 +#else
75932 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
75933 +#endif
75934 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
75935 + }
75936 #endif
75937 return 0;
75938 }
75939 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
75940 index 9096bcb..43ed7bb 100644
75941 --- a/net/8021q/vlan.c
75942 +++ b/net/8021q/vlan.c
75943 @@ -557,8 +557,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
75944 err = -EPERM;
75945 if (!capable(CAP_NET_ADMIN))
75946 break;
75947 - if ((args.u.name_type >= 0) &&
75948 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
75949 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
75950 struct vlan_net *vn;
75951
75952 vn = net_generic(net, vlan_net_id);
75953 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
75954 index 6449bae..8c1f454 100644
75955 --- a/net/9p/trans_fd.c
75956 +++ b/net/9p/trans_fd.c
75957 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
75958 oldfs = get_fs();
75959 set_fs(get_ds());
75960 /* The cast to a user pointer is valid due to the set_fs() */
75961 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
75962 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
75963 set_fs(oldfs);
75964
75965 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
75966 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
75967 index 876fbe8..8bbea9f 100644
75968 --- a/net/atm/atm_misc.c
75969 +++ b/net/atm/atm_misc.c
75970 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
75971 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
75972 return 1;
75973 atm_return(vcc, truesize);
75974 - atomic_inc(&vcc->stats->rx_drop);
75975 + atomic_inc_unchecked(&vcc->stats->rx_drop);
75976 return 0;
75977 }
75978 EXPORT_SYMBOL(atm_charge);
75979 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
75980 }
75981 }
75982 atm_return(vcc, guess);
75983 - atomic_inc(&vcc->stats->rx_drop);
75984 + atomic_inc_unchecked(&vcc->stats->rx_drop);
75985 return NULL;
75986 }
75987 EXPORT_SYMBOL(atm_alloc_charge);
75988 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
75989
75990 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75991 {
75992 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75993 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75994 __SONET_ITEMS
75995 #undef __HANDLE_ITEM
75996 }
75997 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
75998
75999 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
76000 {
76001 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
76002 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
76003 __SONET_ITEMS
76004 #undef __HANDLE_ITEM
76005 }
76006 diff --git a/net/atm/common.c b/net/atm/common.c
76007 index b4b44db..0c0ad93 100644
76008 --- a/net/atm/common.c
76009 +++ b/net/atm/common.c
76010 @@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
76011
76012 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
76013 return -ENOTCONN;
76014 + memset(&pvc, 0, sizeof(pvc));
76015 pvc.sap_family = AF_ATMPVC;
76016 pvc.sap_addr.itf = vcc->dev->number;
76017 pvc.sap_addr.vpi = vcc->vpi;
76018 diff --git a/net/atm/lec.h b/net/atm/lec.h
76019 index a86aff9..3a0d6f6 100644
76020 --- a/net/atm/lec.h
76021 +++ b/net/atm/lec.h
76022 @@ -48,7 +48,7 @@ struct lane2_ops {
76023 const u8 *tlvs, u32 sizeoftlvs);
76024 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
76025 const u8 *tlvs, u32 sizeoftlvs);
76026 -};
76027 +} __no_const;
76028
76029 /*
76030 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
76031 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
76032 index 0919a88..a23d54e 100644
76033 --- a/net/atm/mpc.h
76034 +++ b/net/atm/mpc.h
76035 @@ -33,7 +33,7 @@ struct mpoa_client {
76036 struct mpc_parameters parameters; /* parameters for this client */
76037
76038 const struct net_device_ops *old_ops;
76039 - struct net_device_ops new_ops;
76040 + net_device_ops_no_const new_ops;
76041 };
76042
76043
76044 diff --git a/net/atm/proc.c b/net/atm/proc.c
76045 index 0d020de..011c7bb 100644
76046 --- a/net/atm/proc.c
76047 +++ b/net/atm/proc.c
76048 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
76049 const struct k_atm_aal_stats *stats)
76050 {
76051 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
76052 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
76053 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
76054 - atomic_read(&stats->rx_drop));
76055 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
76056 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
76057 + atomic_read_unchecked(&stats->rx_drop));
76058 }
76059
76060 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
76061 diff --git a/net/atm/pvc.c b/net/atm/pvc.c
76062 index 3a73491..ae03240 100644
76063 --- a/net/atm/pvc.c
76064 +++ b/net/atm/pvc.c
76065 @@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
76066 return -ENOTCONN;
76067 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
76068 addr = (struct sockaddr_atmpvc *)sockaddr;
76069 + memset(addr, 0, sizeof(*addr));
76070 addr->sap_family = AF_ATMPVC;
76071 addr->sap_addr.itf = vcc->dev->number;
76072 addr->sap_addr.vpi = vcc->vpi;
76073 diff --git a/net/atm/resources.c b/net/atm/resources.c
76074 index 23f45ce..c748f1a 100644
76075 --- a/net/atm/resources.c
76076 +++ b/net/atm/resources.c
76077 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
76078 static void copy_aal_stats(struct k_atm_aal_stats *from,
76079 struct atm_aal_stats *to)
76080 {
76081 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
76082 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
76083 __AAL_STAT_ITEMS
76084 #undef __HANDLE_ITEM
76085 }
76086 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
76087 static void subtract_aal_stats(struct k_atm_aal_stats *from,
76088 struct atm_aal_stats *to)
76089 {
76090 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
76091 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
76092 __AAL_STAT_ITEMS
76093 #undef __HANDLE_ITEM
76094 }
76095 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
76096 index dc53798..dc66333 100644
76097 --- a/net/batman-adv/bat_iv_ogm.c
76098 +++ b/net/batman-adv/bat_iv_ogm.c
76099 @@ -63,7 +63,7 @@ static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
76100
76101 /* randomize initial seqno to avoid collision */
76102 get_random_bytes(&random_seqno, sizeof(random_seqno));
76103 - atomic_set(&hard_iface->seqno, random_seqno);
76104 + atomic_set_unchecked(&hard_iface->seqno, random_seqno);
76105
76106 hard_iface->packet_len = BATMAN_OGM_HLEN;
76107 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
76108 @@ -572,7 +572,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
76109
76110 /* change sequence number to network order */
76111 batman_ogm_packet->seqno =
76112 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
76113 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
76114
76115 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
76116 batman_ogm_packet->tt_crc = htons((uint16_t)
76117 @@ -592,7 +592,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
76118 else
76119 batman_ogm_packet->gw_flags = NO_FLAGS;
76120
76121 - atomic_inc(&hard_iface->seqno);
76122 + atomic_inc_unchecked(&hard_iface->seqno);
76123
76124 slide_own_bcast_window(hard_iface);
76125 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
76126 @@ -956,7 +956,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
76127 return;
76128
76129 /* could be changed by schedule_own_packet() */
76130 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
76131 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
76132
76133 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
76134
76135 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
76136 index dc334fa..766a01a 100644
76137 --- a/net/batman-adv/hard-interface.c
76138 +++ b/net/batman-adv/hard-interface.c
76139 @@ -321,7 +321,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
76140 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
76141 dev_add_pack(&hard_iface->batman_adv_ptype);
76142
76143 - atomic_set(&hard_iface->frag_seqno, 1);
76144 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
76145 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
76146 hard_iface->net_dev->name);
76147
76148 @@ -444,7 +444,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
76149 * This can't be called via a bat_priv callback because
76150 * we have no bat_priv yet.
76151 */
76152 - atomic_set(&hard_iface->seqno, 1);
76153 + atomic_set_unchecked(&hard_iface->seqno, 1);
76154 hard_iface->packet_buff = NULL;
76155
76156 return hard_iface;
76157 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
76158 index a0ec0e4..7beb587 100644
76159 --- a/net/batman-adv/soft-interface.c
76160 +++ b/net/batman-adv/soft-interface.c
76161 @@ -214,7 +214,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
76162
76163 /* set broadcast sequence number */
76164 bcast_packet->seqno =
76165 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
76166 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
76167
76168 add_bcast_packet_to_list(bat_priv, skb, 1);
76169
76170 @@ -390,7 +390,7 @@ struct net_device *softif_create(const char *name)
76171 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
76172
76173 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
76174 - atomic_set(&bat_priv->bcast_seqno, 1);
76175 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
76176 atomic_set(&bat_priv->ttvn, 0);
76177 atomic_set(&bat_priv->tt_local_changes, 0);
76178 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
76179 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
76180 index 61308e8..2e142b2 100644
76181 --- a/net/batman-adv/types.h
76182 +++ b/net/batman-adv/types.h
76183 @@ -38,8 +38,8 @@ struct hard_iface {
76184 int16_t if_num;
76185 char if_status;
76186 struct net_device *net_dev;
76187 - atomic_t seqno;
76188 - atomic_t frag_seqno;
76189 + atomic_unchecked_t seqno;
76190 + atomic_unchecked_t frag_seqno;
76191 unsigned char *packet_buff;
76192 int packet_len;
76193 struct kobject *hardif_obj;
76194 @@ -163,7 +163,7 @@ struct bat_priv {
76195 atomic_t orig_interval; /* uint */
76196 atomic_t hop_penalty; /* uint */
76197 atomic_t log_level; /* uint */
76198 - atomic_t bcast_seqno;
76199 + atomic_unchecked_t bcast_seqno;
76200 atomic_t bcast_queue_left;
76201 atomic_t batman_queue_left;
76202 atomic_t ttvn; /* translation table version number */
76203 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
76204 index 74175c2..32f8901 100644
76205 --- a/net/batman-adv/unicast.c
76206 +++ b/net/batman-adv/unicast.c
76207 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
76208 frag1->flags = UNI_FRAG_HEAD | large_tail;
76209 frag2->flags = large_tail;
76210
76211 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
76212 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
76213 frag1->seqno = htons(seqno - 1);
76214 frag2->seqno = htons(seqno);
76215
76216 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
76217 index 5914623..a4a2b19 100644
76218 --- a/net/bluetooth/hci_sock.c
76219 +++ b/net/bluetooth/hci_sock.c
76220 @@ -706,6 +706,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
76221 *addr_len = sizeof(*haddr);
76222 haddr->hci_family = AF_BLUETOOTH;
76223 haddr->hci_dev = hdev->id;
76224 + haddr->hci_channel= 0;
76225
76226 release_sock(sk);
76227 return 0;
76228 @@ -941,7 +942,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
76229 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
76230 }
76231
76232 - len = min_t(unsigned int, len, sizeof(uf));
76233 + len = min((size_t)len, sizeof(uf));
76234 if (copy_from_user(&uf, optval, len)) {
76235 err = -EFAULT;
76236 break;
76237 @@ -1016,6 +1017,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
76238 {
76239 struct hci_filter *f = &hci_pi(sk)->filter;
76240
76241 + memset(&uf, 0, sizeof(uf));
76242 uf.type_mask = f->type_mask;
76243 uf.opcode = f->opcode;
76244 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
76245 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
76246 index 7568a6f..ea3097b 100644
76247 --- a/net/bluetooth/l2cap_core.c
76248 +++ b/net/bluetooth/l2cap_core.c
76249 @@ -2799,8 +2799,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
76250 break;
76251
76252 case L2CAP_CONF_RFC:
76253 - if (olen == sizeof(rfc))
76254 - memcpy(&rfc, (void *)val, olen);
76255 + if (olen != sizeof(rfc))
76256 + break;
76257 +
76258 + memcpy(&rfc, (void *)val, olen);
76259
76260 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
76261 rfc.mode != chan->mode)
76262 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
76263 index 3bb1611..d2f89c1 100644
76264 --- a/net/bluetooth/l2cap_sock.c
76265 +++ b/net/bluetooth/l2cap_sock.c
76266 @@ -246,6 +246,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
76267
76268 BT_DBG("sock %p, sk %p", sock, sk);
76269
76270 + memset(la, 0, sizeof(struct sockaddr_l2));
76271 addr->sa_family = AF_BLUETOOTH;
76272 *len = sizeof(struct sockaddr_l2);
76273
76274 @@ -451,7 +452,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
76275 struct sock *sk = sock->sk;
76276 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
76277 struct l2cap_options opts;
76278 - int len, err = 0;
76279 + int err = 0;
76280 + size_t len = optlen;
76281 u32 opt;
76282
76283 BT_DBG("sk %p", sk);
76284 @@ -473,7 +475,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
76285 opts.max_tx = chan->max_tx;
76286 opts.txwin_size = chan->tx_win;
76287
76288 - len = min_t(unsigned int, sizeof(opts), optlen);
76289 + len = min(sizeof(opts), len);
76290 if (copy_from_user((char *) &opts, optval, len)) {
76291 err = -EFAULT;
76292 break;
76293 @@ -546,7 +548,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
76294 struct bt_security sec;
76295 struct bt_power pwr;
76296 struct l2cap_conn *conn;
76297 - int len, err = 0;
76298 + int err = 0;
76299 + size_t len = optlen;
76300 u32 opt;
76301
76302 BT_DBG("sk %p", sk);
76303 @@ -569,7 +572,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
76304
76305 sec.level = BT_SECURITY_LOW;
76306
76307 - len = min_t(unsigned int, sizeof(sec), optlen);
76308 + len = min(sizeof(sec), len);
76309 if (copy_from_user((char *) &sec, optval, len)) {
76310 err = -EFAULT;
76311 break;
76312 @@ -666,7 +669,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
76313
76314 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
76315
76316 - len = min_t(unsigned int, sizeof(pwr), optlen);
76317 + len = min(sizeof(pwr), len);
76318 if (copy_from_user((char *) &pwr, optval, len)) {
76319 err = -EFAULT;
76320 break;
76321 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
76322 index e8707de..4b24eef 100644
76323 --- a/net/bluetooth/rfcomm/sock.c
76324 +++ b/net/bluetooth/rfcomm/sock.c
76325 @@ -547,6 +547,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
76326
76327 BT_DBG("sock %p, sk %p", sock, sk);
76328
76329 + memset(sa, 0, sizeof(*sa));
76330 sa->rc_family = AF_BLUETOOTH;
76331 sa->rc_channel = rfcomm_pi(sk)->channel;
76332 if (peer)
76333 @@ -685,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
76334 struct sock *sk = sock->sk;
76335 struct bt_security sec;
76336 int err = 0;
76337 - size_t len;
76338 + size_t len = optlen;
76339 u32 opt;
76340
76341 BT_DBG("sk %p", sk);
76342 @@ -707,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
76343
76344 sec.level = BT_SECURITY_LOW;
76345
76346 - len = min_t(unsigned int, sizeof(sec), optlen);
76347 + len = min(sizeof(sec), len);
76348 if (copy_from_user((char *) &sec, optval, len)) {
76349 err = -EFAULT;
76350 break;
76351 @@ -841,6 +842,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
76352 }
76353
76354 sec.level = rfcomm_pi(sk)->sec_level;
76355 + sec.key_size = 0;
76356
76357 len = min_t(unsigned int, len, sizeof(sec));
76358 if (copy_to_user(optval, (char *) &sec, len))
76359 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
76360 index d1820ff..4f8c8f6 100644
76361 --- a/net/bluetooth/rfcomm/tty.c
76362 +++ b/net/bluetooth/rfcomm/tty.c
76363 @@ -314,7 +314,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
76364 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
76365
76366 spin_lock_irqsave(&dev->port.lock, flags);
76367 - if (dev->port.count > 0) {
76368 + if (atomic_read(&dev->port.count) > 0) {
76369 spin_unlock_irqrestore(&dev->port.lock, flags);
76370 return;
76371 }
76372 @@ -461,7 +461,7 @@ static int rfcomm_get_dev_list(void __user *arg)
76373
76374 size = sizeof(*dl) + dev_num * sizeof(*di);
76375
76376 - dl = kmalloc(size, GFP_KERNEL);
76377 + dl = kzalloc(size, GFP_KERNEL);
76378 if (!dl)
76379 return -ENOMEM;
76380
76381 @@ -669,10 +669,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
76382 return -ENODEV;
76383
76384 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
76385 - dev->channel, dev->port.count);
76386 + dev->channel, atomic_read(&dev->port.count));
76387
76388 spin_lock_irqsave(&dev->port.lock, flags);
76389 - if (++dev->port.count > 1) {
76390 + if (atomic_inc_return(&dev->port.count) > 1) {
76391 spin_unlock_irqrestore(&dev->port.lock, flags);
76392 return 0;
76393 }
76394 @@ -737,10 +737,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
76395 return;
76396
76397 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
76398 - dev->port.count);
76399 + atomic_read(&dev->port.count));
76400
76401 spin_lock_irqsave(&dev->port.lock, flags);
76402 - if (!--dev->port.count) {
76403 + if (!atomic_dec_return(&dev->port.count)) {
76404 spin_unlock_irqrestore(&dev->port.lock, flags);
76405 if (dev->tty_dev->parent)
76406 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
76407 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
76408 index 5fe2ff3..121d696 100644
76409 --- a/net/bridge/netfilter/ebtables.c
76410 +++ b/net/bridge/netfilter/ebtables.c
76411 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
76412 tmp.valid_hooks = t->table->valid_hooks;
76413 }
76414 mutex_unlock(&ebt_mutex);
76415 - if (copy_to_user(user, &tmp, *len) != 0){
76416 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
76417 BUGPRINT("c2u Didn't work\n");
76418 ret = -EFAULT;
76419 break;
76420 @@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
76421 goto out;
76422 tmp.valid_hooks = t->valid_hooks;
76423
76424 - if (copy_to_user(user, &tmp, *len) != 0) {
76425 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
76426 ret = -EFAULT;
76427 break;
76428 }
76429 @@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
76430 tmp.entries_size = t->table->entries_size;
76431 tmp.valid_hooks = t->table->valid_hooks;
76432
76433 - if (copy_to_user(user, &tmp, *len) != 0) {
76434 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
76435 ret = -EFAULT;
76436 break;
76437 }
76438 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
76439 index 047cd0e..461fd28 100644
76440 --- a/net/caif/cfctrl.c
76441 +++ b/net/caif/cfctrl.c
76442 @@ -10,6 +10,7 @@
76443 #include <linux/spinlock.h>
76444 #include <linux/slab.h>
76445 #include <linux/pkt_sched.h>
76446 +#include <linux/sched.h>
76447 #include <net/caif/caif_layer.h>
76448 #include <net/caif/cfpkt.h>
76449 #include <net/caif/cfctrl.h>
76450 @@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
76451 memset(&dev_info, 0, sizeof(dev_info));
76452 dev_info.id = 0xff;
76453 cfsrvl_init(&this->serv, 0, &dev_info, false);
76454 - atomic_set(&this->req_seq_no, 1);
76455 - atomic_set(&this->rsp_seq_no, 1);
76456 + atomic_set_unchecked(&this->req_seq_no, 1);
76457 + atomic_set_unchecked(&this->rsp_seq_no, 1);
76458 this->serv.layer.receive = cfctrl_recv;
76459 sprintf(this->serv.layer.name, "ctrl");
76460 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
76461 @@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
76462 struct cfctrl_request_info *req)
76463 {
76464 spin_lock_bh(&ctrl->info_list_lock);
76465 - atomic_inc(&ctrl->req_seq_no);
76466 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
76467 + atomic_inc_unchecked(&ctrl->req_seq_no);
76468 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
76469 list_add_tail(&req->list, &ctrl->list);
76470 spin_unlock_bh(&ctrl->info_list_lock);
76471 }
76472 @@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
76473 if (p != first)
76474 pr_warn("Requests are not received in order\n");
76475
76476 - atomic_set(&ctrl->rsp_seq_no,
76477 + atomic_set_unchecked(&ctrl->rsp_seq_no,
76478 p->sequence_no);
76479 list_del(&p->list);
76480 goto out;
76481 diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
76482 index 69771c0..e597733 100644
76483 --- a/net/caif/chnl_net.c
76484 +++ b/net/caif/chnl_net.c
76485 @@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
76486
76487 /* check the version of IP */
76488 ip_version = skb_header_pointer(skb, 0, 1, &buf);
76489 + if (!ip_version) {
76490 + kfree_skb(skb);
76491 + return -EINVAL;
76492 + }
76493
76494 switch (*ip_version >> 4) {
76495 case 4:
76496 diff --git a/net/can/gw.c b/net/can/gw.c
76497 index b41acf2..3affb3a 100644
76498 --- a/net/can/gw.c
76499 +++ b/net/can/gw.c
76500 @@ -96,7 +96,7 @@ struct cf_mod {
76501 struct {
76502 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
76503 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
76504 - } csumfunc;
76505 + } __no_const csumfunc;
76506 };
76507
76508
76509 diff --git a/net/compat.c b/net/compat.c
76510 index 74ed1d7..3695bd9 100644
76511 --- a/net/compat.c
76512 +++ b/net/compat.c
76513 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
76514 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
76515 __get_user(kmsg->msg_flags, &umsg->msg_flags))
76516 return -EFAULT;
76517 - kmsg->msg_name = compat_ptr(tmp1);
76518 - kmsg->msg_iov = compat_ptr(tmp2);
76519 - kmsg->msg_control = compat_ptr(tmp3);
76520 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
76521 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
76522 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
76523 return 0;
76524 }
76525
76526 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76527
76528 if (kern_msg->msg_namelen) {
76529 if (mode == VERIFY_READ) {
76530 - int err = move_addr_to_kernel(kern_msg->msg_name,
76531 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
76532 kern_msg->msg_namelen,
76533 kern_address);
76534 if (err < 0)
76535 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76536 kern_msg->msg_name = NULL;
76537
76538 tot_len = iov_from_user_compat_to_kern(kern_iov,
76539 - (struct compat_iovec __user *)kern_msg->msg_iov,
76540 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
76541 kern_msg->msg_iovlen);
76542 if (tot_len >= 0)
76543 kern_msg->msg_iov = kern_iov;
76544 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76545
76546 #define CMSG_COMPAT_FIRSTHDR(msg) \
76547 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
76548 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
76549 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
76550 (struct compat_cmsghdr __user *)NULL)
76551
76552 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
76553 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
76554 (ucmlen) <= (unsigned long) \
76555 ((mhdr)->msg_controllen - \
76556 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
76557 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
76558
76559 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
76560 struct compat_cmsghdr __user *cmsg, int cmsg_len)
76561 {
76562 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
76563 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
76564 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
76565 msg->msg_controllen)
76566 return NULL;
76567 return (struct compat_cmsghdr __user *)ptr;
76568 @@ -219,7 +219,7 @@ Efault:
76569
76570 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
76571 {
76572 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
76573 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
76574 struct compat_cmsghdr cmhdr;
76575 struct compat_timeval ctv;
76576 struct compat_timespec cts[3];
76577 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
76578
76579 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
76580 {
76581 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
76582 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
76583 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
76584 int fdnum = scm->fp->count;
76585 struct file **fp = scm->fp->fp;
76586 @@ -364,7 +364,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
76587 return -EFAULT;
76588 old_fs = get_fs();
76589 set_fs(KERNEL_DS);
76590 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
76591 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
76592 set_fs(old_fs);
76593
76594 return err;
76595 @@ -425,7 +425,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
76596 len = sizeof(ktime);
76597 old_fs = get_fs();
76598 set_fs(KERNEL_DS);
76599 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
76600 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
76601 set_fs(old_fs);
76602
76603 if (!err) {
76604 @@ -568,7 +568,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76605 case MCAST_JOIN_GROUP:
76606 case MCAST_LEAVE_GROUP:
76607 {
76608 - struct compat_group_req __user *gr32 = (void *)optval;
76609 + struct compat_group_req __user *gr32 = (void __user *)optval;
76610 struct group_req __user *kgr =
76611 compat_alloc_user_space(sizeof(struct group_req));
76612 u32 interface;
76613 @@ -589,7 +589,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76614 case MCAST_BLOCK_SOURCE:
76615 case MCAST_UNBLOCK_SOURCE:
76616 {
76617 - struct compat_group_source_req __user *gsr32 = (void *)optval;
76618 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
76619 struct group_source_req __user *kgsr = compat_alloc_user_space(
76620 sizeof(struct group_source_req));
76621 u32 interface;
76622 @@ -610,7 +610,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76623 }
76624 case MCAST_MSFILTER:
76625 {
76626 - struct compat_group_filter __user *gf32 = (void *)optval;
76627 + struct compat_group_filter __user *gf32 = (void __user *)optval;
76628 struct group_filter __user *kgf;
76629 u32 interface, fmode, numsrc;
76630
76631 @@ -648,7 +648,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
76632 char __user *optval, int __user *optlen,
76633 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
76634 {
76635 - struct compat_group_filter __user *gf32 = (void *)optval;
76636 + struct compat_group_filter __user *gf32 = (void __user *)optval;
76637 struct group_filter __user *kgf;
76638 int __user *koptlen;
76639 u32 interface, fmode, numsrc;
76640 @@ -797,7 +797,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
76641
76642 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
76643 return -EINVAL;
76644 - if (copy_from_user(a, args, nas[call]))
76645 + if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
76646 return -EFAULT;
76647 a0 = a[0];
76648 a1 = a[1];
76649 diff --git a/net/core/datagram.c b/net/core/datagram.c
76650 index ae6acf6..d5c8f66 100644
76651 --- a/net/core/datagram.c
76652 +++ b/net/core/datagram.c
76653 @@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
76654 }
76655
76656 kfree_skb(skb);
76657 - atomic_inc(&sk->sk_drops);
76658 + atomic_inc_unchecked(&sk->sk_drops);
76659 sk_mem_reclaim_partial(sk);
76660
76661 return err;
76662 diff --git a/net/core/dev.c b/net/core/dev.c
76663 index a000840..566cee1 100644
76664 --- a/net/core/dev.c
76665 +++ b/net/core/dev.c
76666 @@ -1135,9 +1135,13 @@ void dev_load(struct net *net, const char *name)
76667 if (no_module && capable(CAP_NET_ADMIN))
76668 no_module = request_module("netdev-%s", name);
76669 if (no_module && capable(CAP_SYS_MODULE)) {
76670 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
76671 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
76672 +#else
76673 if (!request_module("%s", name))
76674 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
76675 name);
76676 +#endif
76677 }
76678 }
76679 EXPORT_SYMBOL(dev_load);
76680 @@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
76681 {
76682 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
76683 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
76684 - atomic_long_inc(&dev->rx_dropped);
76685 + atomic_long_inc_unchecked(&dev->rx_dropped);
76686 kfree_skb(skb);
76687 return NET_RX_DROP;
76688 }
76689 @@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
76690 nf_reset(skb);
76691
76692 if (unlikely(!is_skb_forwardable(dev, skb))) {
76693 - atomic_long_inc(&dev->rx_dropped);
76694 + atomic_long_inc_unchecked(&dev->rx_dropped);
76695 kfree_skb(skb);
76696 return NET_RX_DROP;
76697 }
76698 @@ -2041,7 +2045,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
76699
76700 struct dev_gso_cb {
76701 void (*destructor)(struct sk_buff *skb);
76702 -};
76703 +} __no_const;
76704
76705 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
76706
76707 @@ -2876,7 +2880,7 @@ enqueue:
76708
76709 local_irq_restore(flags);
76710
76711 - atomic_long_inc(&skb->dev->rx_dropped);
76712 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
76713 kfree_skb(skb);
76714 return NET_RX_DROP;
76715 }
76716 @@ -2948,7 +2952,7 @@ int netif_rx_ni(struct sk_buff *skb)
76717 }
76718 EXPORT_SYMBOL(netif_rx_ni);
76719
76720 -static void net_tx_action(struct softirq_action *h)
76721 +static void net_tx_action(void)
76722 {
76723 struct softnet_data *sd = &__get_cpu_var(softnet_data);
76724
76725 @@ -3235,7 +3239,7 @@ ncls:
76726 if (pt_prev) {
76727 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
76728 } else {
76729 - atomic_long_inc(&skb->dev->rx_dropped);
76730 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
76731 kfree_skb(skb);
76732 /* Jamal, now you will not able to escape explaining
76733 * me how you were going to use this. :-)
76734 @@ -3800,7 +3804,7 @@ void netif_napi_del(struct napi_struct *napi)
76735 }
76736 EXPORT_SYMBOL(netif_napi_del);
76737
76738 -static void net_rx_action(struct softirq_action *h)
76739 +static void net_rx_action(void)
76740 {
76741 struct softnet_data *sd = &__get_cpu_var(softnet_data);
76742 unsigned long time_limit = jiffies + 2;
76743 @@ -4270,8 +4274,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
76744 else
76745 seq_printf(seq, "%04x", ntohs(pt->type));
76746
76747 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76748 + seq_printf(seq, " %-8s %p\n",
76749 + pt->dev ? pt->dev->name : "", NULL);
76750 +#else
76751 seq_printf(seq, " %-8s %pF\n",
76752 pt->dev ? pt->dev->name : "", pt->func);
76753 +#endif
76754 }
76755
76756 return 0;
76757 @@ -5823,7 +5832,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
76758 } else {
76759 netdev_stats_to_stats64(storage, &dev->stats);
76760 }
76761 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
76762 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
76763 return storage;
76764 }
76765 EXPORT_SYMBOL(dev_get_stats);
76766 diff --git a/net/core/flow.c b/net/core/flow.c
76767 index e318c7e..168b1d0 100644
76768 --- a/net/core/flow.c
76769 +++ b/net/core/flow.c
76770 @@ -61,7 +61,7 @@ struct flow_cache {
76771 struct timer_list rnd_timer;
76772 };
76773
76774 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
76775 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
76776 EXPORT_SYMBOL(flow_cache_genid);
76777 static struct flow_cache flow_cache_global;
76778 static struct kmem_cache *flow_cachep __read_mostly;
76779 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
76780
76781 static int flow_entry_valid(struct flow_cache_entry *fle)
76782 {
76783 - if (atomic_read(&flow_cache_genid) != fle->genid)
76784 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
76785 return 0;
76786 if (fle->object && !fle->object->ops->check(fle->object))
76787 return 0;
76788 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
76789 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
76790 fcp->hash_count++;
76791 }
76792 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
76793 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
76794 flo = fle->object;
76795 if (!flo)
76796 goto ret_object;
76797 @@ -280,7 +280,7 @@ nocache:
76798 }
76799 flo = resolver(net, key, family, dir, flo, ctx);
76800 if (fle) {
76801 - fle->genid = atomic_read(&flow_cache_genid);
76802 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
76803 if (!IS_ERR(flo))
76804 fle->object = flo;
76805 else
76806 diff --git a/net/core/iovec.c b/net/core/iovec.c
76807 index 7e7aeb0..2a998cb 100644
76808 --- a/net/core/iovec.c
76809 +++ b/net/core/iovec.c
76810 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
76811 if (m->msg_namelen) {
76812 if (mode == VERIFY_READ) {
76813 void __user *namep;
76814 - namep = (void __user __force *) m->msg_name;
76815 + namep = (void __force_user *) m->msg_name;
76816 err = move_addr_to_kernel(namep, m->msg_namelen,
76817 address);
76818 if (err < 0)
76819 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
76820 }
76821
76822 size = m->msg_iovlen * sizeof(struct iovec);
76823 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
76824 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
76825 return -EFAULT;
76826
76827 m->msg_iov = iov;
76828 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
76829 index 6c50ac0..6b4c038 100644
76830 --- a/net/core/rtnetlink.c
76831 +++ b/net/core/rtnetlink.c
76832 @@ -58,7 +58,7 @@ struct rtnl_link {
76833 rtnl_doit_func doit;
76834 rtnl_dumpit_func dumpit;
76835 rtnl_calcit_func calcit;
76836 -};
76837 +} __no_const;
76838
76839 static DEFINE_MUTEX(rtnl_mutex);
76840
76841 diff --git a/net/core/scm.c b/net/core/scm.c
76842 index 611c5ef..88f6d6d 100644
76843 --- a/net/core/scm.c
76844 +++ b/net/core/scm.c
76845 @@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
76846 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76847 {
76848 struct cmsghdr __user *cm
76849 - = (__force struct cmsghdr __user *)msg->msg_control;
76850 + = (struct cmsghdr __force_user *)msg->msg_control;
76851 struct cmsghdr cmhdr;
76852 int cmlen = CMSG_LEN(len);
76853 int err;
76854 @@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76855 err = -EFAULT;
76856 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
76857 goto out;
76858 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
76859 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
76860 goto out;
76861 cmlen = CMSG_SPACE(len);
76862 if (msg->msg_controllen < cmlen)
76863 @@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
76864 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76865 {
76866 struct cmsghdr __user *cm
76867 - = (__force struct cmsghdr __user*)msg->msg_control;
76868 + = (struct cmsghdr __force_user *)msg->msg_control;
76869
76870 int fdmax = 0;
76871 int fdnum = scm->fp->count;
76872 @@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76873 if (fdnum < fdmax)
76874 fdmax = fdnum;
76875
76876 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
76877 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
76878 i++, cmfptr++)
76879 {
76880 int new_fd;
76881 diff --git a/net/core/sock.c b/net/core/sock.c
76882 index 9e5b71f..ee7aab4 100644
76883 --- a/net/core/sock.c
76884 +++ b/net/core/sock.c
76885 @@ -344,7 +344,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76886 struct sk_buff_head *list = &sk->sk_receive_queue;
76887
76888 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
76889 - atomic_inc(&sk->sk_drops);
76890 + atomic_inc_unchecked(&sk->sk_drops);
76891 trace_sock_rcvqueue_full(sk, skb);
76892 return -ENOMEM;
76893 }
76894 @@ -354,7 +354,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76895 return err;
76896
76897 if (!sk_rmem_schedule(sk, skb->truesize)) {
76898 - atomic_inc(&sk->sk_drops);
76899 + atomic_inc_unchecked(&sk->sk_drops);
76900 return -ENOBUFS;
76901 }
76902
76903 @@ -374,7 +374,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76904 skb_dst_force(skb);
76905
76906 spin_lock_irqsave(&list->lock, flags);
76907 - skb->dropcount = atomic_read(&sk->sk_drops);
76908 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76909 __skb_queue_tail(list, skb);
76910 spin_unlock_irqrestore(&list->lock, flags);
76911
76912 @@ -394,7 +394,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76913 skb->dev = NULL;
76914
76915 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
76916 - atomic_inc(&sk->sk_drops);
76917 + atomic_inc_unchecked(&sk->sk_drops);
76918 goto discard_and_relse;
76919 }
76920 if (nested)
76921 @@ -412,7 +412,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76922 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
76923 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
76924 bh_unlock_sock(sk);
76925 - atomic_inc(&sk->sk_drops);
76926 + atomic_inc_unchecked(&sk->sk_drops);
76927 goto discard_and_relse;
76928 }
76929
76930 @@ -830,12 +830,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76931 struct timeval tm;
76932 } v;
76933
76934 - int lv = sizeof(int);
76935 - int len;
76936 + unsigned int lv = sizeof(int);
76937 + unsigned int len;
76938
76939 if (get_user(len, optlen))
76940 return -EFAULT;
76941 - if (len < 0)
76942 + if (len > INT_MAX)
76943 return -EINVAL;
76944
76945 memset(&v, 0, sizeof(v));
76946 @@ -976,18 +976,18 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76947 if (len > sizeof(peercred))
76948 len = sizeof(peercred);
76949 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
76950 - if (copy_to_user(optval, &peercred, len))
76951 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
76952 return -EFAULT;
76953 goto lenout;
76954 }
76955
76956 case SO_PEERNAME:
76957 {
76958 - char address[128];
76959 + char address[_K_SS_MAXSIZE];
76960
76961 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
76962 return -ENOTCONN;
76963 - if (lv < len)
76964 + if (lv < len || sizeof address < len)
76965 return -EINVAL;
76966 if (copy_to_user(optval, address, len))
76967 return -EFAULT;
76968 @@ -1035,7 +1035,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76969
76970 if (len > lv)
76971 len = lv;
76972 - if (copy_to_user(optval, &v, len))
76973 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
76974 return -EFAULT;
76975 lenout:
76976 if (put_user(len, optlen))
76977 @@ -2124,7 +2124,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
76978 */
76979 smp_wmb();
76980 atomic_set(&sk->sk_refcnt, 1);
76981 - atomic_set(&sk->sk_drops, 0);
76982 + atomic_set_unchecked(&sk->sk_drops, 0);
76983 }
76984 EXPORT_SYMBOL(sock_init_data);
76985
76986 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
76987 index 5fd1467..8b70900 100644
76988 --- a/net/core/sock_diag.c
76989 +++ b/net/core/sock_diag.c
76990 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
76991
76992 int sock_diag_check_cookie(void *sk, __u32 *cookie)
76993 {
76994 +#ifndef CONFIG_GRKERNSEC_HIDESYM
76995 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
76996 cookie[1] != INET_DIAG_NOCOOKIE) &&
76997 ((u32)(unsigned long)sk != cookie[0] ||
76998 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
76999 return -ESTALE;
77000 else
77001 +#endif
77002 return 0;
77003 }
77004 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
77005
77006 void sock_diag_save_cookie(void *sk, __u32 *cookie)
77007 {
77008 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77009 + cookie[0] = 0;
77010 + cookie[1] = 0;
77011 +#else
77012 cookie[0] = (u32)(unsigned long)sk;
77013 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
77014 +#endif
77015 }
77016 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
77017
77018 diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
77019 index 8c67bed..ce0d140 100644
77020 --- a/net/dccp/ccids/ccid3.c
77021 +++ b/net/dccp/ccids/ccid3.c
77022 @@ -531,6 +531,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
77023 case DCCP_SOCKOPT_CCID_TX_INFO:
77024 if (len < sizeof(tfrc))
77025 return -EINVAL;
77026 + memset(&tfrc, 0, sizeof(tfrc));
77027 tfrc.tfrctx_x = hc->tx_x;
77028 tfrc.tfrctx_x_recv = hc->tx_x_recv;
77029 tfrc.tfrctx_x_calc = hc->tx_x_calc;
77030 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
77031 index a55eecc..dd8428c 100644
77032 --- a/net/decnet/sysctl_net_decnet.c
77033 +++ b/net/decnet/sysctl_net_decnet.c
77034 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
77035
77036 if (len > *lenp) len = *lenp;
77037
77038 - if (copy_to_user(buffer, addr, len))
77039 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
77040 return -EFAULT;
77041
77042 *lenp = len;
77043 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
77044
77045 if (len > *lenp) len = *lenp;
77046
77047 - if (copy_to_user(buffer, devname, len))
77048 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
77049 return -EFAULT;
77050
77051 *lenp = len;
77052 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
77053 index 3854411..2201a94 100644
77054 --- a/net/ipv4/fib_frontend.c
77055 +++ b/net/ipv4/fib_frontend.c
77056 @@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
77057 #ifdef CONFIG_IP_ROUTE_MULTIPATH
77058 fib_sync_up(dev);
77059 #endif
77060 - atomic_inc(&net->ipv4.dev_addr_genid);
77061 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
77062 rt_cache_flush(dev_net(dev), -1);
77063 break;
77064 case NETDEV_DOWN:
77065 fib_del_ifaddr(ifa, NULL);
77066 - atomic_inc(&net->ipv4.dev_addr_genid);
77067 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
77068 if (ifa->ifa_dev->ifa_list == NULL) {
77069 /* Last address was deleted from this interface.
77070 * Disable IP.
77071 @@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
77072 #ifdef CONFIG_IP_ROUTE_MULTIPATH
77073 fib_sync_up(dev);
77074 #endif
77075 - atomic_inc(&net->ipv4.dev_addr_genid);
77076 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
77077 rt_cache_flush(dev_net(dev), -1);
77078 break;
77079 case NETDEV_DOWN:
77080 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
77081 index e5b7182..570a90e 100644
77082 --- a/net/ipv4/fib_semantics.c
77083 +++ b/net/ipv4/fib_semantics.c
77084 @@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
77085 nh->nh_saddr = inet_select_addr(nh->nh_dev,
77086 nh->nh_gw,
77087 nh->nh_parent->fib_scope);
77088 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
77089 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
77090
77091 return nh->nh_saddr;
77092 }
77093 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
77094 index 7880af9..70f92a3 100644
77095 --- a/net/ipv4/inet_hashtables.c
77096 +++ b/net/ipv4/inet_hashtables.c
77097 @@ -18,12 +18,15 @@
77098 #include <linux/sched.h>
77099 #include <linux/slab.h>
77100 #include <linux/wait.h>
77101 +#include <linux/security.h>
77102
77103 #include <net/inet_connection_sock.h>
77104 #include <net/inet_hashtables.h>
77105 #include <net/secure_seq.h>
77106 #include <net/ip.h>
77107
77108 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
77109 +
77110 /*
77111 * Allocate and initialize a new local port bind bucket.
77112 * The bindhash mutex for snum's hash chain must be held here.
77113 @@ -530,6 +533,8 @@ ok:
77114 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
77115 spin_unlock(&head->lock);
77116
77117 + gr_update_task_in_ip_table(current, inet_sk(sk));
77118 +
77119 if (tw) {
77120 inet_twsk_deschedule(tw, death_row);
77121 while (twrefcnt) {
77122 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
77123 index dfba343..c827d50 100644
77124 --- a/net/ipv4/inetpeer.c
77125 +++ b/net/ipv4/inetpeer.c
77126 @@ -487,8 +487,8 @@ relookup:
77127 if (p) {
77128 p->daddr = *daddr;
77129 atomic_set(&p->refcnt, 1);
77130 - atomic_set(&p->rid, 0);
77131 - atomic_set(&p->ip_id_count,
77132 + atomic_set_unchecked(&p->rid, 0);
77133 + atomic_set_unchecked(&p->ip_id_count,
77134 (daddr->family == AF_INET) ?
77135 secure_ip_id(daddr->addr.a4) :
77136 secure_ipv6_id(daddr->addr.a6));
77137 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
77138 index 9dbd3dd..0c59fb2 100644
77139 --- a/net/ipv4/ip_fragment.c
77140 +++ b/net/ipv4/ip_fragment.c
77141 @@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
77142 return 0;
77143
77144 start = qp->rid;
77145 - end = atomic_inc_return(&peer->rid);
77146 + end = atomic_inc_return_unchecked(&peer->rid);
77147 qp->rid = end;
77148
77149 rc = qp->q.fragments && (end - start) > max;
77150 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
77151 index 0d11f23..2bb3f64 100644
77152 --- a/net/ipv4/ip_sockglue.c
77153 +++ b/net/ipv4/ip_sockglue.c
77154 @@ -1142,7 +1142,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
77155 len = min_t(unsigned int, len, opt->optlen);
77156 if (put_user(len, optlen))
77157 return -EFAULT;
77158 - if (copy_to_user(optval, opt->__data, len))
77159 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
77160 + copy_to_user(optval, opt->__data, len))
77161 return -EFAULT;
77162 return 0;
77163 }
77164 @@ -1273,7 +1274,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
77165 if (sk->sk_type != SOCK_STREAM)
77166 return -ENOPROTOOPT;
77167
77168 - msg.msg_control = optval;
77169 + msg.msg_control = (void __force_kernel *)optval;
77170 msg.msg_controllen = len;
77171 msg.msg_flags = flags;
77172
77173 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
77174 index 67e8a6b..386764d 100644
77175 --- a/net/ipv4/ipconfig.c
77176 +++ b/net/ipv4/ipconfig.c
77177 @@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
77178
77179 mm_segment_t oldfs = get_fs();
77180 set_fs(get_ds());
77181 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
77182 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
77183 set_fs(oldfs);
77184 return res;
77185 }
77186 @@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
77187
77188 mm_segment_t oldfs = get_fs();
77189 set_fs(get_ds());
77190 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
77191 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
77192 set_fs(oldfs);
77193 return res;
77194 }
77195 @@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
77196
77197 mm_segment_t oldfs = get_fs();
77198 set_fs(get_ds());
77199 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
77200 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
77201 set_fs(oldfs);
77202 return res;
77203 }
77204 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
77205 index 97e61ea..cac1bbb 100644
77206 --- a/net/ipv4/netfilter/arp_tables.c
77207 +++ b/net/ipv4/netfilter/arp_tables.c
77208 @@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
77209 #endif
77210
77211 static int get_info(struct net *net, void __user *user,
77212 - const int *len, int compat)
77213 + int len, int compat)
77214 {
77215 char name[XT_TABLE_MAXNAMELEN];
77216 struct xt_table *t;
77217 int ret;
77218
77219 - if (*len != sizeof(struct arpt_getinfo)) {
77220 - duprintf("length %u != %Zu\n", *len,
77221 + if (len != sizeof(struct arpt_getinfo)) {
77222 + duprintf("length %u != %Zu\n", len,
77223 sizeof(struct arpt_getinfo));
77224 return -EINVAL;
77225 }
77226 @@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
77227 info.size = private->size;
77228 strcpy(info.name, name);
77229
77230 - if (copy_to_user(user, &info, *len) != 0)
77231 + if (copy_to_user(user, &info, len) != 0)
77232 ret = -EFAULT;
77233 else
77234 ret = 0;
77235 @@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
77236
77237 switch (cmd) {
77238 case ARPT_SO_GET_INFO:
77239 - ret = get_info(sock_net(sk), user, len, 1);
77240 + ret = get_info(sock_net(sk), user, *len, 1);
77241 break;
77242 case ARPT_SO_GET_ENTRIES:
77243 ret = compat_get_entries(sock_net(sk), user, len);
77244 @@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
77245
77246 switch (cmd) {
77247 case ARPT_SO_GET_INFO:
77248 - ret = get_info(sock_net(sk), user, len, 0);
77249 + ret = get_info(sock_net(sk), user, *len, 0);
77250 break;
77251
77252 case ARPT_SO_GET_ENTRIES:
77253 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
77254 index 170b1fd..6105b91 100644
77255 --- a/net/ipv4/netfilter/ip_tables.c
77256 +++ b/net/ipv4/netfilter/ip_tables.c
77257 @@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
77258 #endif
77259
77260 static int get_info(struct net *net, void __user *user,
77261 - const int *len, int compat)
77262 + int len, int compat)
77263 {
77264 char name[XT_TABLE_MAXNAMELEN];
77265 struct xt_table *t;
77266 int ret;
77267
77268 - if (*len != sizeof(struct ipt_getinfo)) {
77269 - duprintf("length %u != %zu\n", *len,
77270 + if (len != sizeof(struct ipt_getinfo)) {
77271 + duprintf("length %u != %zu\n", len,
77272 sizeof(struct ipt_getinfo));
77273 return -EINVAL;
77274 }
77275 @@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
77276 info.size = private->size;
77277 strcpy(info.name, name);
77278
77279 - if (copy_to_user(user, &info, *len) != 0)
77280 + if (copy_to_user(user, &info, len) != 0)
77281 ret = -EFAULT;
77282 else
77283 ret = 0;
77284 @@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77285
77286 switch (cmd) {
77287 case IPT_SO_GET_INFO:
77288 - ret = get_info(sock_net(sk), user, len, 1);
77289 + ret = get_info(sock_net(sk), user, *len, 1);
77290 break;
77291 case IPT_SO_GET_ENTRIES:
77292 ret = compat_get_entries(sock_net(sk), user, len);
77293 @@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77294
77295 switch (cmd) {
77296 case IPT_SO_GET_INFO:
77297 - ret = get_info(sock_net(sk), user, len, 0);
77298 + ret = get_info(sock_net(sk), user, *len, 0);
77299 break;
77300
77301 case IPT_SO_GET_ENTRIES:
77302 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
77303 index 2c00e8b..45b3bdd 100644
77304 --- a/net/ipv4/ping.c
77305 +++ b/net/ipv4/ping.c
77306 @@ -845,7 +845,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
77307 sk_rmem_alloc_get(sp),
77308 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
77309 atomic_read(&sp->sk_refcnt), sp,
77310 - atomic_read(&sp->sk_drops), len);
77311 + atomic_read_unchecked(&sp->sk_drops), len);
77312 }
77313
77314 static int ping_seq_show(struct seq_file *seq, void *v)
77315 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
77316 index 4032b81..625143c 100644
77317 --- a/net/ipv4/raw.c
77318 +++ b/net/ipv4/raw.c
77319 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
77320 int raw_rcv(struct sock *sk, struct sk_buff *skb)
77321 {
77322 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
77323 - atomic_inc(&sk->sk_drops);
77324 + atomic_inc_unchecked(&sk->sk_drops);
77325 kfree_skb(skb);
77326 return NET_RX_DROP;
77327 }
77328 @@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
77329
77330 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
77331 {
77332 + struct icmp_filter filter;
77333 +
77334 if (optlen > sizeof(struct icmp_filter))
77335 optlen = sizeof(struct icmp_filter);
77336 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
77337 + if (copy_from_user(&filter, optval, optlen))
77338 return -EFAULT;
77339 + raw_sk(sk)->filter = filter;
77340 return 0;
77341 }
77342
77343 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
77344 {
77345 int len, ret = -EFAULT;
77346 + struct icmp_filter filter;
77347
77348 if (get_user(len, optlen))
77349 goto out;
77350 @@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
77351 if (len > sizeof(struct icmp_filter))
77352 len = sizeof(struct icmp_filter);
77353 ret = -EFAULT;
77354 - if (put_user(len, optlen) ||
77355 - copy_to_user(optval, &raw_sk(sk)->filter, len))
77356 + filter = raw_sk(sk)->filter;
77357 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
77358 goto out;
77359 ret = 0;
77360 out: return ret;
77361 @@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
77362 sk_wmem_alloc_get(sp),
77363 sk_rmem_alloc_get(sp),
77364 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
77365 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
77366 + atomic_read(&sp->sk_refcnt),
77367 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77368 + NULL,
77369 +#else
77370 + sp,
77371 +#endif
77372 + atomic_read_unchecked(&sp->sk_drops));
77373 }
77374
77375 static int raw_seq_show(struct seq_file *seq, void *v)
77376 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
77377 index 98b30d0..cfa3cf7 100644
77378 --- a/net/ipv4/route.c
77379 +++ b/net/ipv4/route.c
77380 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
77381
77382 static inline int rt_genid(struct net *net)
77383 {
77384 - return atomic_read(&net->ipv4.rt_genid);
77385 + return atomic_read_unchecked(&net->ipv4.rt_genid);
77386 }
77387
77388 #ifdef CONFIG_PROC_FS
77389 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
77390 unsigned char shuffle;
77391
77392 get_random_bytes(&shuffle, sizeof(shuffle));
77393 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
77394 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
77395 inetpeer_invalidate_tree(AF_INET);
77396 }
77397
77398 @@ -3011,7 +3011,7 @@ static int rt_fill_info(struct net *net,
77399 error = rt->dst.error;
77400 if (peer) {
77401 inet_peer_refcheck(rt->peer);
77402 - id = atomic_read(&peer->ip_id_count) & 0xffff;
77403 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
77404 if (peer->tcp_ts_stamp) {
77405 ts = peer->tcp_ts;
77406 tsage = get_seconds() - peer->tcp_ts_stamp;
77407 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
77408 index 05fe1f4..cc273dd 100644
77409 --- a/net/ipv4/tcp_input.c
77410 +++ b/net/ipv4/tcp_input.c
77411 @@ -4886,7 +4886,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
77412 * simplifies code)
77413 */
77414 static void
77415 -tcp_collapse(struct sock *sk, struct sk_buff_head *list,
77416 +__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
77417 struct sk_buff *head, struct sk_buff *tail,
77418 u32 start, u32 end)
77419 {
77420 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
77421 index c8d28c4..e40f75a 100644
77422 --- a/net/ipv4/tcp_ipv4.c
77423 +++ b/net/ipv4/tcp_ipv4.c
77424 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
77425 EXPORT_SYMBOL(sysctl_tcp_low_latency);
77426
77427
77428 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77429 +extern int grsec_enable_blackhole;
77430 +#endif
77431 +
77432 #ifdef CONFIG_TCP_MD5SIG
77433 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
77434 __be32 daddr, __be32 saddr, const struct tcphdr *th);
77435 @@ -1656,6 +1660,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
77436 return 0;
77437
77438 reset:
77439 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77440 + if (!grsec_enable_blackhole)
77441 +#endif
77442 tcp_v4_send_reset(rsk, skb);
77443 discard:
77444 kfree_skb(skb);
77445 @@ -1718,12 +1725,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
77446 TCP_SKB_CB(skb)->sacked = 0;
77447
77448 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
77449 - if (!sk)
77450 + if (!sk) {
77451 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77452 + ret = 1;
77453 +#endif
77454 goto no_tcp_socket;
77455 -
77456 + }
77457 process:
77458 - if (sk->sk_state == TCP_TIME_WAIT)
77459 + if (sk->sk_state == TCP_TIME_WAIT) {
77460 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77461 + ret = 2;
77462 +#endif
77463 goto do_time_wait;
77464 + }
77465
77466 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
77467 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
77468 @@ -1774,6 +1788,10 @@ no_tcp_socket:
77469 bad_packet:
77470 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
77471 } else {
77472 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77473 + if (!grsec_enable_blackhole || (ret == 1 &&
77474 + (skb->dev->flags & IFF_LOOPBACK)))
77475 +#endif
77476 tcp_v4_send_reset(NULL, skb);
77477 }
77478
77479 @@ -2386,7 +2404,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
77480 0, /* non standard timer */
77481 0, /* open_requests have no inode */
77482 atomic_read(&sk->sk_refcnt),
77483 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77484 + NULL,
77485 +#else
77486 req,
77487 +#endif
77488 len);
77489 }
77490
77491 @@ -2436,7 +2458,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
77492 sock_i_uid(sk),
77493 icsk->icsk_probes_out,
77494 sock_i_ino(sk),
77495 - atomic_read(&sk->sk_refcnt), sk,
77496 + atomic_read(&sk->sk_refcnt),
77497 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77498 + NULL,
77499 +#else
77500 + sk,
77501 +#endif
77502 jiffies_to_clock_t(icsk->icsk_rto),
77503 jiffies_to_clock_t(icsk->icsk_ack.ato),
77504 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
77505 @@ -2464,7 +2491,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
77506 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
77507 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
77508 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
77509 - atomic_read(&tw->tw_refcnt), tw, len);
77510 + atomic_read(&tw->tw_refcnt),
77511 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77512 + NULL,
77513 +#else
77514 + tw,
77515 +#endif
77516 + len);
77517 }
77518
77519 #define TMPSZ 150
77520 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
77521 index b85d9fe..4b0eed9 100644
77522 --- a/net/ipv4/tcp_minisocks.c
77523 +++ b/net/ipv4/tcp_minisocks.c
77524 @@ -27,6 +27,10 @@
77525 #include <net/inet_common.h>
77526 #include <net/xfrm.h>
77527
77528 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77529 +extern int grsec_enable_blackhole;
77530 +#endif
77531 +
77532 int sysctl_tcp_syncookies __read_mostly = 1;
77533 EXPORT_SYMBOL(sysctl_tcp_syncookies);
77534
77535 @@ -754,6 +758,10 @@ listen_overflow:
77536
77537 embryonic_reset:
77538 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
77539 +
77540 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77541 + if (!grsec_enable_blackhole)
77542 +#endif
77543 if (!(flg & TCP_FLAG_RST))
77544 req->rsk_ops->send_reset(sk, skb);
77545
77546 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
77547 index 4526fe6..1a34e43 100644
77548 --- a/net/ipv4/tcp_probe.c
77549 +++ b/net/ipv4/tcp_probe.c
77550 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
77551 if (cnt + width >= len)
77552 break;
77553
77554 - if (copy_to_user(buf + cnt, tbuf, width))
77555 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
77556 return -EFAULT;
77557 cnt += width;
77558 }
77559 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
77560 index e911e6c..d0a9356 100644
77561 --- a/net/ipv4/tcp_timer.c
77562 +++ b/net/ipv4/tcp_timer.c
77563 @@ -22,6 +22,10 @@
77564 #include <linux/gfp.h>
77565 #include <net/tcp.h>
77566
77567 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77568 +extern int grsec_lastack_retries;
77569 +#endif
77570 +
77571 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
77572 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
77573 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
77574 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
77575 }
77576 }
77577
77578 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77579 + if ((sk->sk_state == TCP_LAST_ACK) &&
77580 + (grsec_lastack_retries > 0) &&
77581 + (grsec_lastack_retries < retry_until))
77582 + retry_until = grsec_lastack_retries;
77583 +#endif
77584 +
77585 if (retransmits_timed_out(sk, retry_until,
77586 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
77587 /* Has it gone just too far? */
77588 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
77589 index eaca736..60488ae 100644
77590 --- a/net/ipv4/udp.c
77591 +++ b/net/ipv4/udp.c
77592 @@ -87,6 +87,7 @@
77593 #include <linux/types.h>
77594 #include <linux/fcntl.h>
77595 #include <linux/module.h>
77596 +#include <linux/security.h>
77597 #include <linux/socket.h>
77598 #include <linux/sockios.h>
77599 #include <linux/igmp.h>
77600 @@ -110,6 +111,10 @@
77601 #include <linux/static_key.h>
77602 #include "udp_impl.h"
77603
77604 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77605 +extern int grsec_enable_blackhole;
77606 +#endif
77607 +
77608 struct udp_table udp_table __read_mostly;
77609 EXPORT_SYMBOL(udp_table);
77610
77611 @@ -568,6 +573,9 @@ found:
77612 return s;
77613 }
77614
77615 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
77616 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
77617 +
77618 /*
77619 * This routine is called by the ICMP module when it gets some
77620 * sort of error condition. If err < 0 then the socket should
77621 @@ -859,9 +867,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
77622 dport = usin->sin_port;
77623 if (dport == 0)
77624 return -EINVAL;
77625 +
77626 + err = gr_search_udp_sendmsg(sk, usin);
77627 + if (err)
77628 + return err;
77629 } else {
77630 if (sk->sk_state != TCP_ESTABLISHED)
77631 return -EDESTADDRREQ;
77632 +
77633 + err = gr_search_udp_sendmsg(sk, NULL);
77634 + if (err)
77635 + return err;
77636 +
77637 daddr = inet->inet_daddr;
77638 dport = inet->inet_dport;
77639 /* Open fast path for connected socket.
77640 @@ -1103,7 +1120,7 @@ static unsigned int first_packet_length(struct sock *sk)
77641 udp_lib_checksum_complete(skb)) {
77642 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
77643 IS_UDPLITE(sk));
77644 - atomic_inc(&sk->sk_drops);
77645 + atomic_inc_unchecked(&sk->sk_drops);
77646 __skb_unlink(skb, rcvq);
77647 __skb_queue_tail(&list_kill, skb);
77648 }
77649 @@ -1189,6 +1206,10 @@ try_again:
77650 if (!skb)
77651 goto out;
77652
77653 + err = gr_search_udp_recvmsg(sk, skb);
77654 + if (err)
77655 + goto out_free;
77656 +
77657 ulen = skb->len - sizeof(struct udphdr);
77658 copied = len;
77659 if (copied > ulen)
77660 @@ -1498,7 +1519,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
77661
77662 drop:
77663 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
77664 - atomic_inc(&sk->sk_drops);
77665 + atomic_inc_unchecked(&sk->sk_drops);
77666 kfree_skb(skb);
77667 return -1;
77668 }
77669 @@ -1517,7 +1538,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
77670 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
77671
77672 if (!skb1) {
77673 - atomic_inc(&sk->sk_drops);
77674 + atomic_inc_unchecked(&sk->sk_drops);
77675 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
77676 IS_UDPLITE(sk));
77677 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
77678 @@ -1686,6 +1707,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77679 goto csum_error;
77680
77681 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
77682 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77683 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
77684 +#endif
77685 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
77686
77687 /*
77688 @@ -2104,8 +2128,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
77689 sk_wmem_alloc_get(sp),
77690 sk_rmem_alloc_get(sp),
77691 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
77692 - atomic_read(&sp->sk_refcnt), sp,
77693 - atomic_read(&sp->sk_drops), len);
77694 + atomic_read(&sp->sk_refcnt),
77695 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77696 + NULL,
77697 +#else
77698 + sp,
77699 +#endif
77700 + atomic_read_unchecked(&sp->sk_drops), len);
77701 }
77702
77703 int udp4_seq_show(struct seq_file *seq, void *v)
77704 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
77705 index 8f6411c..5767579 100644
77706 --- a/net/ipv6/addrconf.c
77707 +++ b/net/ipv6/addrconf.c
77708 @@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
77709 p.iph.ihl = 5;
77710 p.iph.protocol = IPPROTO_IPV6;
77711 p.iph.ttl = 64;
77712 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
77713 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
77714
77715 if (ops->ndo_do_ioctl) {
77716 mm_segment_t oldfs = get_fs();
77717 diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
77718 index db1521f..ebb3314 100644
77719 --- a/net/ipv6/esp6.c
77720 +++ b/net/ipv6/esp6.c
77721 @@ -166,8 +166,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
77722 struct esp_data *esp = x->data;
77723
77724 /* skb is pure payload to encrypt */
77725 - err = -ENOMEM;
77726 -
77727 aead = esp->aead;
77728 alen = crypto_aead_authsize(aead);
77729
77730 @@ -202,8 +200,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
77731 }
77732
77733 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
77734 - if (!tmp)
77735 + if (!tmp) {
77736 + err = -ENOMEM;
77737 goto error;
77738 + }
77739
77740 seqhi = esp_tmp_seqhi(tmp);
77741 iv = esp_tmp_iv(aead, tmp, seqhilen);
77742 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
77743 index e6cee52..cf47476 100644
77744 --- a/net/ipv6/inet6_connection_sock.c
77745 +++ b/net/ipv6/inet6_connection_sock.c
77746 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
77747 #ifdef CONFIG_XFRM
77748 {
77749 struct rt6_info *rt = (struct rt6_info *)dst;
77750 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
77751 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
77752 }
77753 #endif
77754 }
77755 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
77756 #ifdef CONFIG_XFRM
77757 if (dst) {
77758 struct rt6_info *rt = (struct rt6_info *)dst;
77759 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
77760 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
77761 __sk_dst_reset(sk);
77762 dst = NULL;
77763 }
77764 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
77765 index ba6d13d..6899122 100644
77766 --- a/net/ipv6/ipv6_sockglue.c
77767 +++ b/net/ipv6/ipv6_sockglue.c
77768 @@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
77769 if (sk->sk_type != SOCK_STREAM)
77770 return -ENOPROTOOPT;
77771
77772 - msg.msg_control = optval;
77773 + msg.msg_control = (void __force_kernel *)optval;
77774 msg.msg_controllen = len;
77775 msg.msg_flags = flags;
77776
77777 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
77778 index d7cb045..8c0ded6 100644
77779 --- a/net/ipv6/netfilter/ip6_tables.c
77780 +++ b/net/ipv6/netfilter/ip6_tables.c
77781 @@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
77782 #endif
77783
77784 static int get_info(struct net *net, void __user *user,
77785 - const int *len, int compat)
77786 + int len, int compat)
77787 {
77788 char name[XT_TABLE_MAXNAMELEN];
77789 struct xt_table *t;
77790 int ret;
77791
77792 - if (*len != sizeof(struct ip6t_getinfo)) {
77793 - duprintf("length %u != %zu\n", *len,
77794 + if (len != sizeof(struct ip6t_getinfo)) {
77795 + duprintf("length %u != %zu\n", len,
77796 sizeof(struct ip6t_getinfo));
77797 return -EINVAL;
77798 }
77799 @@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
77800 info.size = private->size;
77801 strcpy(info.name, name);
77802
77803 - if (copy_to_user(user, &info, *len) != 0)
77804 + if (copy_to_user(user, &info, len) != 0)
77805 ret = -EFAULT;
77806 else
77807 ret = 0;
77808 @@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77809
77810 switch (cmd) {
77811 case IP6T_SO_GET_INFO:
77812 - ret = get_info(sock_net(sk), user, len, 1);
77813 + ret = get_info(sock_net(sk), user, *len, 1);
77814 break;
77815 case IP6T_SO_GET_ENTRIES:
77816 ret = compat_get_entries(sock_net(sk), user, len);
77817 @@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77818
77819 switch (cmd) {
77820 case IP6T_SO_GET_INFO:
77821 - ret = get_info(sock_net(sk), user, len, 0);
77822 + ret = get_info(sock_net(sk), user, *len, 0);
77823 break;
77824
77825 case IP6T_SO_GET_ENTRIES:
77826 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
77827 index 93d6983..8e54c4d 100644
77828 --- a/net/ipv6/raw.c
77829 +++ b/net/ipv6/raw.c
77830 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
77831 {
77832 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
77833 skb_checksum_complete(skb)) {
77834 - atomic_inc(&sk->sk_drops);
77835 + atomic_inc_unchecked(&sk->sk_drops);
77836 kfree_skb(skb);
77837 return NET_RX_DROP;
77838 }
77839 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
77840 struct raw6_sock *rp = raw6_sk(sk);
77841
77842 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
77843 - atomic_inc(&sk->sk_drops);
77844 + atomic_inc_unchecked(&sk->sk_drops);
77845 kfree_skb(skb);
77846 return NET_RX_DROP;
77847 }
77848 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
77849
77850 if (inet->hdrincl) {
77851 if (skb_checksum_complete(skb)) {
77852 - atomic_inc(&sk->sk_drops);
77853 + atomic_inc_unchecked(&sk->sk_drops);
77854 kfree_skb(skb);
77855 return NET_RX_DROP;
77856 }
77857 @@ -602,7 +602,7 @@ out:
77858 return err;
77859 }
77860
77861 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
77862 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
77863 struct flowi6 *fl6, struct dst_entry **dstp,
77864 unsigned int flags)
77865 {
77866 @@ -914,12 +914,15 @@ do_confirm:
77867 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
77868 char __user *optval, int optlen)
77869 {
77870 + struct icmp6_filter filter;
77871 +
77872 switch (optname) {
77873 case ICMPV6_FILTER:
77874 if (optlen > sizeof(struct icmp6_filter))
77875 optlen = sizeof(struct icmp6_filter);
77876 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
77877 + if (copy_from_user(&filter, optval, optlen))
77878 return -EFAULT;
77879 + raw6_sk(sk)->filter = filter;
77880 return 0;
77881 default:
77882 return -ENOPROTOOPT;
77883 @@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
77884 char __user *optval, int __user *optlen)
77885 {
77886 int len;
77887 + struct icmp6_filter filter;
77888
77889 switch (optname) {
77890 case ICMPV6_FILTER:
77891 @@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
77892 len = sizeof(struct icmp6_filter);
77893 if (put_user(len, optlen))
77894 return -EFAULT;
77895 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
77896 + filter = raw6_sk(sk)->filter;
77897 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
77898 return -EFAULT;
77899 return 0;
77900 default:
77901 @@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
77902 0, 0L, 0,
77903 sock_i_uid(sp), 0,
77904 sock_i_ino(sp),
77905 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
77906 + atomic_read(&sp->sk_refcnt),
77907 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77908 + NULL,
77909 +#else
77910 + sp,
77911 +#endif
77912 + atomic_read_unchecked(&sp->sk_drops));
77913 }
77914
77915 static int raw6_seq_show(struct seq_file *seq, void *v)
77916 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
77917 index 9df64a5..39875da 100644
77918 --- a/net/ipv6/tcp_ipv6.c
77919 +++ b/net/ipv6/tcp_ipv6.c
77920 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
77921 }
77922 #endif
77923
77924 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77925 +extern int grsec_enable_blackhole;
77926 +#endif
77927 +
77928 static void tcp_v6_hash(struct sock *sk)
77929 {
77930 if (sk->sk_state != TCP_CLOSE) {
77931 @@ -1544,6 +1548,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
77932 return 0;
77933
77934 reset:
77935 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77936 + if (!grsec_enable_blackhole)
77937 +#endif
77938 tcp_v6_send_reset(sk, skb);
77939 discard:
77940 if (opt_skb)
77941 @@ -1625,12 +1632,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
77942 TCP_SKB_CB(skb)->sacked = 0;
77943
77944 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
77945 - if (!sk)
77946 + if (!sk) {
77947 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77948 + ret = 1;
77949 +#endif
77950 goto no_tcp_socket;
77951 + }
77952
77953 process:
77954 - if (sk->sk_state == TCP_TIME_WAIT)
77955 + if (sk->sk_state == TCP_TIME_WAIT) {
77956 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77957 + ret = 2;
77958 +#endif
77959 goto do_time_wait;
77960 + }
77961
77962 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
77963 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
77964 @@ -1679,6 +1694,10 @@ no_tcp_socket:
77965 bad_packet:
77966 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
77967 } else {
77968 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77969 + if (!grsec_enable_blackhole || (ret == 1 &&
77970 + (skb->dev->flags & IFF_LOOPBACK)))
77971 +#endif
77972 tcp_v6_send_reset(NULL, skb);
77973 }
77974
77975 @@ -1885,7 +1904,13 @@ static void get_openreq6(struct seq_file *seq,
77976 uid,
77977 0, /* non standard timer */
77978 0, /* open_requests have no inode */
77979 - 0, req);
77980 + 0,
77981 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77982 + NULL
77983 +#else
77984 + req
77985 +#endif
77986 + );
77987 }
77988
77989 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77990 @@ -1935,7 +1960,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77991 sock_i_uid(sp),
77992 icsk->icsk_probes_out,
77993 sock_i_ino(sp),
77994 - atomic_read(&sp->sk_refcnt), sp,
77995 + atomic_read(&sp->sk_refcnt),
77996 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77997 + NULL,
77998 +#else
77999 + sp,
78000 +#endif
78001 jiffies_to_clock_t(icsk->icsk_rto),
78002 jiffies_to_clock_t(icsk->icsk_ack.ato),
78003 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
78004 @@ -1970,7 +2000,13 @@ static void get_timewait6_sock(struct seq_file *seq,
78005 dest->s6_addr32[2], dest->s6_addr32[3], destp,
78006 tw->tw_substate, 0, 0,
78007 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
78008 - atomic_read(&tw->tw_refcnt), tw);
78009 + atomic_read(&tw->tw_refcnt),
78010 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78011 + NULL
78012 +#else
78013 + tw
78014 +#endif
78015 + );
78016 }
78017
78018 static int tcp6_seq_show(struct seq_file *seq, void *v)
78019 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
78020 index f05099f..ea613f0 100644
78021 --- a/net/ipv6/udp.c
78022 +++ b/net/ipv6/udp.c
78023 @@ -50,6 +50,10 @@
78024 #include <linux/seq_file.h>
78025 #include "udp_impl.h"
78026
78027 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78028 +extern int grsec_enable_blackhole;
78029 +#endif
78030 +
78031 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
78032 {
78033 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
78034 @@ -615,7 +619,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
78035 return rc;
78036 drop:
78037 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
78038 - atomic_inc(&sk->sk_drops);
78039 + atomic_inc_unchecked(&sk->sk_drops);
78040 kfree_skb(skb);
78041 return -1;
78042 }
78043 @@ -673,7 +677,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
78044 if (likely(skb1 == NULL))
78045 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
78046 if (!skb1) {
78047 - atomic_inc(&sk->sk_drops);
78048 + atomic_inc_unchecked(&sk->sk_drops);
78049 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
78050 IS_UDPLITE(sk));
78051 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
78052 @@ -844,6 +848,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
78053 goto discard;
78054
78055 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
78056 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78057 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
78058 +#endif
78059 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
78060
78061 kfree_skb(skb);
78062 @@ -1453,8 +1460,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
78063 0, 0L, 0,
78064 sock_i_uid(sp), 0,
78065 sock_i_ino(sp),
78066 - atomic_read(&sp->sk_refcnt), sp,
78067 - atomic_read(&sp->sk_drops));
78068 + atomic_read(&sp->sk_refcnt),
78069 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78070 + NULL,
78071 +#else
78072 + sp,
78073 +#endif
78074 + atomic_read_unchecked(&sp->sk_drops));
78075 }
78076
78077 int udp6_seq_show(struct seq_file *seq, void *v)
78078 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
78079 index 6b9d5a0..4dffaf1 100644
78080 --- a/net/irda/ircomm/ircomm_tty.c
78081 +++ b/net/irda/ircomm/ircomm_tty.c
78082 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
78083 add_wait_queue(&self->open_wait, &wait);
78084
78085 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
78086 - __FILE__,__LINE__, tty->driver->name, self->open_count );
78087 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
78088
78089 /* As far as I can see, we protect open_count - Jean II */
78090 spin_lock_irqsave(&self->spinlock, flags);
78091 if (!tty_hung_up_p(filp)) {
78092 extra_count = 1;
78093 - self->open_count--;
78094 + local_dec(&self->open_count);
78095 }
78096 spin_unlock_irqrestore(&self->spinlock, flags);
78097 - self->blocked_open++;
78098 + local_inc(&self->blocked_open);
78099
78100 while (1) {
78101 if (tty->termios->c_cflag & CBAUD) {
78102 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
78103 }
78104
78105 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
78106 - __FILE__,__LINE__, tty->driver->name, self->open_count );
78107 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
78108
78109 schedule();
78110 }
78111 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
78112 if (extra_count) {
78113 /* ++ is not atomic, so this should be protected - Jean II */
78114 spin_lock_irqsave(&self->spinlock, flags);
78115 - self->open_count++;
78116 + local_inc(&self->open_count);
78117 spin_unlock_irqrestore(&self->spinlock, flags);
78118 }
78119 - self->blocked_open--;
78120 + local_dec(&self->blocked_open);
78121
78122 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
78123 - __FILE__,__LINE__, tty->driver->name, self->open_count);
78124 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
78125
78126 if (!retval)
78127 self->flags |= ASYNC_NORMAL_ACTIVE;
78128 @@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
78129 }
78130 /* ++ is not atomic, so this should be protected - Jean II */
78131 spin_lock_irqsave(&self->spinlock, flags);
78132 - self->open_count++;
78133 + local_inc(&self->open_count);
78134
78135 tty->driver_data = self;
78136 self->tty = tty;
78137 spin_unlock_irqrestore(&self->spinlock, flags);
78138
78139 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
78140 - self->line, self->open_count);
78141 + self->line, local_read(&self->open_count));
78142
78143 /* Not really used by us, but lets do it anyway */
78144 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
78145 @@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
78146 return;
78147 }
78148
78149 - if ((tty->count == 1) && (self->open_count != 1)) {
78150 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
78151 /*
78152 * Uh, oh. tty->count is 1, which means that the tty
78153 * structure will be freed. state->count should always
78154 @@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
78155 */
78156 IRDA_DEBUG(0, "%s(), bad serial port count; "
78157 "tty->count is 1, state->count is %d\n", __func__ ,
78158 - self->open_count);
78159 - self->open_count = 1;
78160 + local_read(&self->open_count));
78161 + local_set(&self->open_count, 1);
78162 }
78163
78164 - if (--self->open_count < 0) {
78165 + if (local_dec_return(&self->open_count) < 0) {
78166 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
78167 - __func__, self->line, self->open_count);
78168 - self->open_count = 0;
78169 + __func__, self->line, local_read(&self->open_count));
78170 + local_set(&self->open_count, 0);
78171 }
78172 - if (self->open_count) {
78173 + if (local_read(&self->open_count)) {
78174 spin_unlock_irqrestore(&self->spinlock, flags);
78175
78176 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
78177 @@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
78178 tty->closing = 0;
78179 self->tty = NULL;
78180
78181 - if (self->blocked_open) {
78182 + if (local_read(&self->blocked_open)) {
78183 if (self->close_delay)
78184 schedule_timeout_interruptible(self->close_delay);
78185 wake_up_interruptible(&self->open_wait);
78186 @@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
78187 spin_lock_irqsave(&self->spinlock, flags);
78188 self->flags &= ~ASYNC_NORMAL_ACTIVE;
78189 self->tty = NULL;
78190 - self->open_count = 0;
78191 + local_set(&self->open_count, 0);
78192 spin_unlock_irqrestore(&self->spinlock, flags);
78193
78194 wake_up_interruptible(&self->open_wait);
78195 @@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
78196 seq_putc(m, '\n');
78197
78198 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
78199 - seq_printf(m, "Open count: %d\n", self->open_count);
78200 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
78201 seq_printf(m, "Max data size: %d\n", self->max_data_size);
78202 seq_printf(m, "Max header size: %d\n", self->max_header_size);
78203
78204 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
78205 index cd6f7a9..e63fe89 100644
78206 --- a/net/iucv/af_iucv.c
78207 +++ b/net/iucv/af_iucv.c
78208 @@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
78209
78210 write_lock_bh(&iucv_sk_list.lock);
78211
78212 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
78213 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
78214 while (__iucv_get_sock_by_name(name)) {
78215 sprintf(name, "%08x",
78216 - atomic_inc_return(&iucv_sk_list.autobind_name));
78217 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
78218 }
78219
78220 write_unlock_bh(&iucv_sk_list.lock);
78221 diff --git a/net/key/af_key.c b/net/key/af_key.c
78222 index 34e4185..8823368 100644
78223 --- a/net/key/af_key.c
78224 +++ b/net/key/af_key.c
78225 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
78226 static u32 get_acqseq(void)
78227 {
78228 u32 res;
78229 - static atomic_t acqseq;
78230 + static atomic_unchecked_t acqseq;
78231
78232 do {
78233 - res = atomic_inc_return(&acqseq);
78234 + res = atomic_inc_return_unchecked(&acqseq);
78235 } while (!res);
78236 return res;
78237 }
78238 diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
78239 index 35e1e4b..9275471 100644
78240 --- a/net/l2tp/l2tp_ip6.c
78241 +++ b/net/l2tp/l2tp_ip6.c
78242 @@ -410,6 +410,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
78243 lsa->l2tp_family = AF_INET6;
78244 lsa->l2tp_flowinfo = 0;
78245 lsa->l2tp_scope_id = 0;
78246 + lsa->l2tp_unused = 0;
78247 if (peer) {
78248 if (!lsk->peer_conn_id)
78249 return -ENOTCONN;
78250 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
78251 index fe5453c..a13c3e23 100644
78252 --- a/net/llc/af_llc.c
78253 +++ b/net/llc/af_llc.c
78254 @@ -969,14 +969,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
78255 struct sockaddr_llc sllc;
78256 struct sock *sk = sock->sk;
78257 struct llc_sock *llc = llc_sk(sk);
78258 - int rc = 0;
78259 + int rc = -EBADF;
78260
78261 memset(&sllc, 0, sizeof(sllc));
78262 lock_sock(sk);
78263 if (sock_flag(sk, SOCK_ZAPPED))
78264 goto out;
78265 *uaddrlen = sizeof(sllc);
78266 - memset(uaddr, 0, *uaddrlen);
78267 if (peer) {
78268 rc = -ENOTCONN;
78269 if (sk->sk_state != TCP_ESTABLISHED)
78270 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
78271 index 3f3cd50..d2cf249 100644
78272 --- a/net/mac80211/ieee80211_i.h
78273 +++ b/net/mac80211/ieee80211_i.h
78274 @@ -28,6 +28,7 @@
78275 #include <net/ieee80211_radiotap.h>
78276 #include <net/cfg80211.h>
78277 #include <net/mac80211.h>
78278 +#include <asm/local.h>
78279 #include "key.h"
78280 #include "sta_info.h"
78281
78282 @@ -863,7 +864,7 @@ struct ieee80211_local {
78283 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
78284 spinlock_t queue_stop_reason_lock;
78285
78286 - int open_count;
78287 + local_t open_count;
78288 int monitors, cooked_mntrs;
78289 /* number of interfaces with corresponding FIF_ flags */
78290 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
78291 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
78292 index 8664111..1d6a065 100644
78293 --- a/net/mac80211/iface.c
78294 +++ b/net/mac80211/iface.c
78295 @@ -328,7 +328,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
78296 break;
78297 }
78298
78299 - if (local->open_count == 0) {
78300 + if (local_read(&local->open_count) == 0) {
78301 res = drv_start(local);
78302 if (res)
78303 goto err_del_bss;
78304 @@ -371,7 +371,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
78305 break;
78306 }
78307
78308 - if (local->monitors == 0 && local->open_count == 0) {
78309 + if (local->monitors == 0 && local_read(&local->open_count) == 0) {
78310 res = ieee80211_add_virtual_monitor(local);
78311 if (res)
78312 goto err_stop;
78313 @@ -468,7 +468,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
78314 mutex_unlock(&local->mtx);
78315
78316 if (coming_up)
78317 - local->open_count++;
78318 + local_inc(&local->open_count);
78319
78320 if (hw_reconf_flags)
78321 ieee80211_hw_config(local, hw_reconf_flags);
78322 @@ -481,7 +481,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
78323 err_del_interface:
78324 drv_remove_interface(local, sdata);
78325 err_stop:
78326 - if (!local->open_count)
78327 + if (!local_read(&local->open_count))
78328 drv_stop(local);
78329 err_del_bss:
78330 sdata->bss = NULL;
78331 @@ -613,7 +613,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
78332 }
78333
78334 if (going_down)
78335 - local->open_count--;
78336 + local_dec(&local->open_count);
78337
78338 switch (sdata->vif.type) {
78339 case NL80211_IFTYPE_AP_VLAN:
78340 @@ -685,7 +685,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
78341
78342 ieee80211_recalc_ps(local, -1);
78343
78344 - if (local->open_count == 0) {
78345 + if (local_read(&local->open_count) == 0) {
78346 if (local->ops->napi_poll)
78347 napi_disable(&local->napi);
78348 ieee80211_clear_tx_pending(local);
78349 @@ -717,7 +717,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
78350 }
78351 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
78352
78353 - if (local->monitors == local->open_count && local->monitors > 0)
78354 + if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
78355 ieee80211_add_virtual_monitor(local);
78356 }
78357
78358 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
78359 index f5548e9..474a15f 100644
78360 --- a/net/mac80211/main.c
78361 +++ b/net/mac80211/main.c
78362 @@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
78363 local->hw.conf.power_level = power;
78364 }
78365
78366 - if (changed && local->open_count) {
78367 + if (changed && local_read(&local->open_count)) {
78368 ret = drv_config(local, changed);
78369 /*
78370 * Goal:
78371 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
78372 index af1c4e2..24dbbe3 100644
78373 --- a/net/mac80211/pm.c
78374 +++ b/net/mac80211/pm.c
78375 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
78376 struct ieee80211_sub_if_data *sdata;
78377 struct sta_info *sta;
78378
78379 - if (!local->open_count)
78380 + if (!local_read(&local->open_count))
78381 goto suspend;
78382
78383 ieee80211_scan_cancel(local);
78384 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
78385 cancel_work_sync(&local->dynamic_ps_enable_work);
78386 del_timer_sync(&local->dynamic_ps_timer);
78387
78388 - local->wowlan = wowlan && local->open_count;
78389 + local->wowlan = wowlan && local_read(&local->open_count);
78390 if (local->wowlan) {
78391 int err = drv_suspend(local, wowlan);
78392 if (err < 0) {
78393 @@ -132,7 +132,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
78394 drv_remove_interface(local, sdata);
78395
78396 /* stop hardware - this must stop RX */
78397 - if (local->open_count)
78398 + if (local_read(&local->open_count))
78399 ieee80211_stop_device(local);
78400
78401 suspend:
78402 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
78403 index 3313c11..bec9f17 100644
78404 --- a/net/mac80211/rate.c
78405 +++ b/net/mac80211/rate.c
78406 @@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
78407
78408 ASSERT_RTNL();
78409
78410 - if (local->open_count)
78411 + if (local_read(&local->open_count))
78412 return -EBUSY;
78413
78414 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
78415 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
78416 index c97a065..ff61928 100644
78417 --- a/net/mac80211/rc80211_pid_debugfs.c
78418 +++ b/net/mac80211/rc80211_pid_debugfs.c
78419 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
78420
78421 spin_unlock_irqrestore(&events->lock, status);
78422
78423 - if (copy_to_user(buf, pb, p))
78424 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
78425 return -EFAULT;
78426
78427 return p;
78428 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
78429 index f564b5e..22fee47 100644
78430 --- a/net/mac80211/util.c
78431 +++ b/net/mac80211/util.c
78432 @@ -1224,7 +1224,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
78433 }
78434 #endif
78435 /* everything else happens only if HW was up & running */
78436 - if (!local->open_count)
78437 + if (!local_read(&local->open_count))
78438 goto wake_up;
78439
78440 /*
78441 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
78442 index 209c1ed..39484dc 100644
78443 --- a/net/netfilter/Kconfig
78444 +++ b/net/netfilter/Kconfig
78445 @@ -851,6 +851,16 @@ config NETFILTER_XT_MATCH_ESP
78446
78447 To compile it as a module, choose M here. If unsure, say N.
78448
78449 +config NETFILTER_XT_MATCH_GRADM
78450 + tristate '"gradm" match support'
78451 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
78452 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
78453 + ---help---
78454 + The gradm match allows to match on grsecurity RBAC being enabled.
78455 + It is useful when iptables rules are applied early on bootup to
78456 + prevent connections to the machine (except from a trusted host)
78457 + while the RBAC system is disabled.
78458 +
78459 config NETFILTER_XT_MATCH_HASHLIMIT
78460 tristate '"hashlimit" match support'
78461 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
78462 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
78463 index 4e7960c..89e48d4 100644
78464 --- a/net/netfilter/Makefile
78465 +++ b/net/netfilter/Makefile
78466 @@ -87,6 +87,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
78467 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
78468 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
78469 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
78470 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
78471 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
78472 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
78473 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
78474 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
78475 index 1548df9..98ad9b4 100644
78476 --- a/net/netfilter/ipvs/ip_vs_conn.c
78477 +++ b/net/netfilter/ipvs/ip_vs_conn.c
78478 @@ -557,7 +557,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
78479 /* Increase the refcnt counter of the dest */
78480 atomic_inc(&dest->refcnt);
78481
78482 - conn_flags = atomic_read(&dest->conn_flags);
78483 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
78484 if (cp->protocol != IPPROTO_UDP)
78485 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
78486 flags = cp->flags;
78487 @@ -902,7 +902,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
78488 atomic_set(&cp->refcnt, 1);
78489
78490 atomic_set(&cp->n_control, 0);
78491 - atomic_set(&cp->in_pkts, 0);
78492 + atomic_set_unchecked(&cp->in_pkts, 0);
78493
78494 atomic_inc(&ipvs->conn_count);
78495 if (flags & IP_VS_CONN_F_NO_CPORT)
78496 @@ -1183,7 +1183,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
78497
78498 /* Don't drop the entry if its number of incoming packets is not
78499 located in [0, 8] */
78500 - i = atomic_read(&cp->in_pkts);
78501 + i = atomic_read_unchecked(&cp->in_pkts);
78502 if (i > 8 || i < 0) return 0;
78503
78504 if (!todrop_rate[i]) return 0;
78505 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
78506 index a54b018c..07e0120 100644
78507 --- a/net/netfilter/ipvs/ip_vs_core.c
78508 +++ b/net/netfilter/ipvs/ip_vs_core.c
78509 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
78510 ret = cp->packet_xmit(skb, cp, pd->pp);
78511 /* do not touch skb anymore */
78512
78513 - atomic_inc(&cp->in_pkts);
78514 + atomic_inc_unchecked(&cp->in_pkts);
78515 ip_vs_conn_put(cp);
78516 return ret;
78517 }
78518 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
78519 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
78520 pkts = sysctl_sync_threshold(ipvs);
78521 else
78522 - pkts = atomic_add_return(1, &cp->in_pkts);
78523 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
78524
78525 if (ipvs->sync_state & IP_VS_STATE_MASTER)
78526 ip_vs_sync_conn(net, cp, pkts);
78527 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
78528 index 84444dd..f91c066 100644
78529 --- a/net/netfilter/ipvs/ip_vs_ctl.c
78530 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
78531 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
78532 ip_vs_rs_hash(ipvs, dest);
78533 write_unlock_bh(&ipvs->rs_lock);
78534 }
78535 - atomic_set(&dest->conn_flags, conn_flags);
78536 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
78537
78538 /* bind the service */
78539 if (!dest->svc) {
78540 @@ -2074,7 +2074,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
78541 " %-7s %-6d %-10d %-10d\n",
78542 &dest->addr.in6,
78543 ntohs(dest->port),
78544 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
78545 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
78546 atomic_read(&dest->weight),
78547 atomic_read(&dest->activeconns),
78548 atomic_read(&dest->inactconns));
78549 @@ -2085,7 +2085,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
78550 "%-7s %-6d %-10d %-10d\n",
78551 ntohl(dest->addr.ip),
78552 ntohs(dest->port),
78553 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
78554 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
78555 atomic_read(&dest->weight),
78556 atomic_read(&dest->activeconns),
78557 atomic_read(&dest->inactconns));
78558 @@ -2555,7 +2555,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
78559
78560 entry.addr = dest->addr.ip;
78561 entry.port = dest->port;
78562 - entry.conn_flags = atomic_read(&dest->conn_flags);
78563 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
78564 entry.weight = atomic_read(&dest->weight);
78565 entry.u_threshold = dest->u_threshold;
78566 entry.l_threshold = dest->l_threshold;
78567 @@ -2759,6 +2759,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
78568 {
78569 struct ip_vs_timeout_user t;
78570
78571 + memset(&t, 0, sizeof(t));
78572 __ip_vs_get_timeouts(net, &t);
78573 if (copy_to_user(user, &t, sizeof(t)) != 0)
78574 ret = -EFAULT;
78575 @@ -3089,7 +3090,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
78576 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
78577 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
78578 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
78579 - (atomic_read(&dest->conn_flags) &
78580 + (atomic_read_unchecked(&dest->conn_flags) &
78581 IP_VS_CONN_F_FWD_MASK)) ||
78582 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
78583 atomic_read(&dest->weight)) ||
78584 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
78585 index effa10c..9058928 100644
78586 --- a/net/netfilter/ipvs/ip_vs_sync.c
78587 +++ b/net/netfilter/ipvs/ip_vs_sync.c
78588 @@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
78589 cp = cp->control;
78590 if (cp) {
78591 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
78592 - pkts = atomic_add_return(1, &cp->in_pkts);
78593 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
78594 else
78595 pkts = sysctl_sync_threshold(ipvs);
78596 ip_vs_sync_conn(net, cp->control, pkts);
78597 @@ -758,7 +758,7 @@ control:
78598 if (!cp)
78599 return;
78600 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
78601 - pkts = atomic_add_return(1, &cp->in_pkts);
78602 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
78603 else
78604 pkts = sysctl_sync_threshold(ipvs);
78605 goto sloop;
78606 @@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
78607
78608 if (opt)
78609 memcpy(&cp->in_seq, opt, sizeof(*opt));
78610 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
78611 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
78612 cp->state = state;
78613 cp->old_state = cp->state;
78614 /*
78615 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
78616 index 7fd66de..e6fb361 100644
78617 --- a/net/netfilter/ipvs/ip_vs_xmit.c
78618 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
78619 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
78620 else
78621 rc = NF_ACCEPT;
78622 /* do not touch skb anymore */
78623 - atomic_inc(&cp->in_pkts);
78624 + atomic_inc_unchecked(&cp->in_pkts);
78625 goto out;
78626 }
78627
78628 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
78629 else
78630 rc = NF_ACCEPT;
78631 /* do not touch skb anymore */
78632 - atomic_inc(&cp->in_pkts);
78633 + atomic_inc_unchecked(&cp->in_pkts);
78634 goto out;
78635 }
78636
78637 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
78638 index ac3af97..c134c21 100644
78639 --- a/net/netfilter/nf_conntrack_core.c
78640 +++ b/net/netfilter/nf_conntrack_core.c
78641 @@ -1530,6 +1530,10 @@ err_proto:
78642 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
78643 #define DYING_NULLS_VAL ((1<<30)+1)
78644
78645 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78646 +static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
78647 +#endif
78648 +
78649 static int nf_conntrack_init_net(struct net *net)
78650 {
78651 int ret;
78652 @@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
78653 goto err_stat;
78654 }
78655
78656 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78657 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
78658 +#else
78659 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
78660 +#endif
78661 if (!net->ct.slabname) {
78662 ret = -ENOMEM;
78663 goto err_slabname;
78664 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
78665 index 3c3cfc0..7a6ea1a 100644
78666 --- a/net/netfilter/nfnetlink_log.c
78667 +++ b/net/netfilter/nfnetlink_log.c
78668 @@ -70,7 +70,7 @@ struct nfulnl_instance {
78669 };
78670
78671 static DEFINE_SPINLOCK(instances_lock);
78672 -static atomic_t global_seq;
78673 +static atomic_unchecked_t global_seq;
78674
78675 #define INSTANCE_BUCKETS 16
78676 static struct hlist_head instance_table[INSTANCE_BUCKETS];
78677 @@ -517,7 +517,7 @@ __build_packet_message(struct nfulnl_instance *inst,
78678 /* global sequence number */
78679 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
78680 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
78681 - htonl(atomic_inc_return(&global_seq))))
78682 + htonl(atomic_inc_return_unchecked(&global_seq))))
78683 goto nla_put_failure;
78684
78685 if (data_len) {
78686 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
78687 new file mode 100644
78688 index 0000000..6905327
78689 --- /dev/null
78690 +++ b/net/netfilter/xt_gradm.c
78691 @@ -0,0 +1,51 @@
78692 +/*
78693 + * gradm match for netfilter
78694 + * Copyright © Zbigniew Krzystolik, 2010
78695 + *
78696 + * This program is free software; you can redistribute it and/or modify
78697 + * it under the terms of the GNU General Public License; either version
78698 + * 2 or 3 as published by the Free Software Foundation.
78699 + */
78700 +#include <linux/module.h>
78701 +#include <linux/moduleparam.h>
78702 +#include <linux/skbuff.h>
78703 +#include <linux/netfilter/x_tables.h>
78704 +#include <linux/grsecurity.h>
78705 +#include <linux/netfilter/xt_gradm.h>
78706 +
78707 +static bool
78708 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
78709 +{
78710 + const struct xt_gradm_mtinfo *info = par->matchinfo;
78711 + bool retval = false;
78712 + if (gr_acl_is_enabled())
78713 + retval = true;
78714 + return retval ^ info->invflags;
78715 +}
78716 +
78717 +static struct xt_match gradm_mt_reg __read_mostly = {
78718 + .name = "gradm",
78719 + .revision = 0,
78720 + .family = NFPROTO_UNSPEC,
78721 + .match = gradm_mt,
78722 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
78723 + .me = THIS_MODULE,
78724 +};
78725 +
78726 +static int __init gradm_mt_init(void)
78727 +{
78728 + return xt_register_match(&gradm_mt_reg);
78729 +}
78730 +
78731 +static void __exit gradm_mt_exit(void)
78732 +{
78733 + xt_unregister_match(&gradm_mt_reg);
78734 +}
78735 +
78736 +module_init(gradm_mt_init);
78737 +module_exit(gradm_mt_exit);
78738 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
78739 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
78740 +MODULE_LICENSE("GPL");
78741 +MODULE_ALIAS("ipt_gradm");
78742 +MODULE_ALIAS("ip6t_gradm");
78743 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
78744 index 4fe4fb4..87a89e5 100644
78745 --- a/net/netfilter/xt_statistic.c
78746 +++ b/net/netfilter/xt_statistic.c
78747 @@ -19,7 +19,7 @@
78748 #include <linux/module.h>
78749
78750 struct xt_statistic_priv {
78751 - atomic_t count;
78752 + atomic_unchecked_t count;
78753 } ____cacheline_aligned_in_smp;
78754
78755 MODULE_LICENSE("GPL");
78756 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
78757 break;
78758 case XT_STATISTIC_MODE_NTH:
78759 do {
78760 - oval = atomic_read(&info->master->count);
78761 + oval = atomic_read_unchecked(&info->master->count);
78762 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
78763 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
78764 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
78765 if (nval == 0)
78766 ret = !ret;
78767 break;
78768 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
78769 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
78770 if (info->master == NULL)
78771 return -ENOMEM;
78772 - atomic_set(&info->master->count, info->u.nth.count);
78773 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
78774
78775 return 0;
78776 }
78777 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
78778 index b3025a6..e717db9 100644
78779 --- a/net/netlink/af_netlink.c
78780 +++ b/net/netlink/af_netlink.c
78781 @@ -753,7 +753,7 @@ static void netlink_overrun(struct sock *sk)
78782 sk->sk_error_report(sk);
78783 }
78784 }
78785 - atomic_inc(&sk->sk_drops);
78786 + atomic_inc_unchecked(&sk->sk_drops);
78787 }
78788
78789 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
78790 @@ -1344,7 +1344,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
78791 if (NULL == siocb->scm)
78792 siocb->scm = &scm;
78793
78794 - err = scm_send(sock, msg, siocb->scm);
78795 + err = scm_send(sock, msg, siocb->scm, true);
78796 if (err < 0)
78797 return err;
78798
78799 @@ -1355,7 +1355,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
78800 dst_pid = addr->nl_pid;
78801 dst_group = ffs(addr->nl_groups);
78802 err = -EPERM;
78803 - if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
78804 + if ((dst_group || dst_pid) &&
78805 + !netlink_capable(sock, NL_NONROOT_SEND))
78806 goto out;
78807 } else {
78808 dst_pid = nlk->dst_pid;
78809 @@ -2022,7 +2023,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
78810 sk_wmem_alloc_get(s),
78811 nlk->cb,
78812 atomic_read(&s->sk_refcnt),
78813 - atomic_read(&s->sk_drops),
78814 + atomic_read_unchecked(&s->sk_drops),
78815 sock_i_ino(s)
78816 );
78817
78818 @@ -2124,6 +2125,7 @@ static void __init netlink_add_usersock_entry(void)
78819 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
78820 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
78821 nl_table[NETLINK_USERSOCK].registered = 1;
78822 + nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
78823
78824 netlink_table_ungrab();
78825 }
78826 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
78827 index 06592d8..64860f6 100644
78828 --- a/net/netrom/af_netrom.c
78829 +++ b/net/netrom/af_netrom.c
78830 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
78831 struct sock *sk = sock->sk;
78832 struct nr_sock *nr = nr_sk(sk);
78833
78834 + memset(sax, 0, sizeof(*sax));
78835 lock_sock(sk);
78836 if (peer != 0) {
78837 if (sk->sk_state != TCP_ESTABLISHED) {
78838 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
78839 *uaddr_len = sizeof(struct full_sockaddr_ax25);
78840 } else {
78841 sax->fsa_ax25.sax25_family = AF_NETROM;
78842 - sax->fsa_ax25.sax25_ndigis = 0;
78843 sax->fsa_ax25.sax25_call = nr->source_addr;
78844 *uaddr_len = sizeof(struct sockaddr_ax25);
78845 }
78846 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
78847 index 0f66174..e7cb04c 100644
78848 --- a/net/packet/af_packet.c
78849 +++ b/net/packet/af_packet.c
78850 @@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
78851
78852 spin_lock(&sk->sk_receive_queue.lock);
78853 po->stats.tp_packets++;
78854 - skb->dropcount = atomic_read(&sk->sk_drops);
78855 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
78856 __skb_queue_tail(&sk->sk_receive_queue, skb);
78857 spin_unlock(&sk->sk_receive_queue.lock);
78858 sk->sk_data_ready(sk, skb->len);
78859 @@ -1696,7 +1696,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
78860 drop_n_acct:
78861 spin_lock(&sk->sk_receive_queue.lock);
78862 po->stats.tp_drops++;
78863 - atomic_inc(&sk->sk_drops);
78864 + atomic_inc_unchecked(&sk->sk_drops);
78865 spin_unlock(&sk->sk_receive_queue.lock);
78866
78867 drop_n_restore:
78868 @@ -2641,6 +2641,7 @@ out:
78869
78870 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
78871 {
78872 + struct sock_extended_err ee;
78873 struct sock_exterr_skb *serr;
78874 struct sk_buff *skb, *skb2;
78875 int copied, err;
78876 @@ -2662,8 +2663,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
78877 sock_recv_timestamp(msg, sk, skb);
78878
78879 serr = SKB_EXT_ERR(skb);
78880 + ee = serr->ee;
78881 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
78882 - sizeof(serr->ee), &serr->ee);
78883 + sizeof ee, &ee);
78884
78885 msg->msg_flags |= MSG_ERRQUEUE;
78886 err = copied;
78887 @@ -3275,7 +3277,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
78888 case PACKET_HDRLEN:
78889 if (len > sizeof(int))
78890 len = sizeof(int);
78891 - if (copy_from_user(&val, optval, len))
78892 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
78893 return -EFAULT;
78894 switch (val) {
78895 case TPACKET_V1:
78896 @@ -3314,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
78897 len = lv;
78898 if (put_user(len, optlen))
78899 return -EFAULT;
78900 - if (copy_to_user(optval, data, len))
78901 + if (len > sizeof(st) || copy_to_user(optval, data, len))
78902 return -EFAULT;
78903 return 0;
78904 }
78905 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
78906 index 5a940db..f0b9c12 100644
78907 --- a/net/phonet/af_phonet.c
78908 +++ b/net/phonet/af_phonet.c
78909 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
78910 {
78911 struct phonet_protocol *pp;
78912
78913 - if (protocol >= PHONET_NPROTO)
78914 + if (protocol < 0 || protocol >= PHONET_NPROTO)
78915 return NULL;
78916
78917 rcu_read_lock();
78918 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
78919 {
78920 int err = 0;
78921
78922 - if (protocol >= PHONET_NPROTO)
78923 + if (protocol < 0 || protocol >= PHONET_NPROTO)
78924 return -EINVAL;
78925
78926 err = proto_register(pp->prot, 1);
78927 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
78928 index 576f22c..bc7a71b 100644
78929 --- a/net/phonet/pep.c
78930 +++ b/net/phonet/pep.c
78931 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
78932
78933 case PNS_PEP_CTRL_REQ:
78934 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
78935 - atomic_inc(&sk->sk_drops);
78936 + atomic_inc_unchecked(&sk->sk_drops);
78937 break;
78938 }
78939 __skb_pull(skb, 4);
78940 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
78941 }
78942
78943 if (pn->rx_credits == 0) {
78944 - atomic_inc(&sk->sk_drops);
78945 + atomic_inc_unchecked(&sk->sk_drops);
78946 err = -ENOBUFS;
78947 break;
78948 }
78949 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
78950 }
78951
78952 if (pn->rx_credits == 0) {
78953 - atomic_inc(&sk->sk_drops);
78954 + atomic_inc_unchecked(&sk->sk_drops);
78955 err = NET_RX_DROP;
78956 break;
78957 }
78958 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
78959 index 0acc943..c727611 100644
78960 --- a/net/phonet/socket.c
78961 +++ b/net/phonet/socket.c
78962 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
78963 pn->resource, sk->sk_state,
78964 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
78965 sock_i_uid(sk), sock_i_ino(sk),
78966 - atomic_read(&sk->sk_refcnt), sk,
78967 - atomic_read(&sk->sk_drops), &len);
78968 + atomic_read(&sk->sk_refcnt),
78969 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78970 + NULL,
78971 +#else
78972 + sk,
78973 +#endif
78974 + atomic_read_unchecked(&sk->sk_drops), &len);
78975 }
78976 seq_printf(seq, "%*s\n", 127 - len, "");
78977 return 0;
78978 diff --git a/net/rds/cong.c b/net/rds/cong.c
78979 index e5b65ac..f3b6fb7 100644
78980 --- a/net/rds/cong.c
78981 +++ b/net/rds/cong.c
78982 @@ -78,7 +78,7 @@
78983 * finds that the saved generation number is smaller than the global generation
78984 * number, it wakes up the process.
78985 */
78986 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
78987 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
78988
78989 /*
78990 * Congestion monitoring
78991 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
78992 rdsdebug("waking map %p for %pI4\n",
78993 map, &map->m_addr);
78994 rds_stats_inc(s_cong_update_received);
78995 - atomic_inc(&rds_cong_generation);
78996 + atomic_inc_unchecked(&rds_cong_generation);
78997 if (waitqueue_active(&map->m_waitq))
78998 wake_up(&map->m_waitq);
78999 if (waitqueue_active(&rds_poll_waitq))
79000 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
79001
79002 int rds_cong_updated_since(unsigned long *recent)
79003 {
79004 - unsigned long gen = atomic_read(&rds_cong_generation);
79005 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
79006
79007 if (likely(*recent == gen))
79008 return 0;
79009 diff --git a/net/rds/ib.h b/net/rds/ib.h
79010 index 8d2b3d5..227ec5b 100644
79011 --- a/net/rds/ib.h
79012 +++ b/net/rds/ib.h
79013 @@ -128,7 +128,7 @@ struct rds_ib_connection {
79014 /* sending acks */
79015 unsigned long i_ack_flags;
79016 #ifdef KERNEL_HAS_ATOMIC64
79017 - atomic64_t i_ack_next; /* next ACK to send */
79018 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
79019 #else
79020 spinlock_t i_ack_lock; /* protect i_ack_next */
79021 u64 i_ack_next; /* next ACK to send */
79022 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
79023 index a1e1162..265e129 100644
79024 --- a/net/rds/ib_cm.c
79025 +++ b/net/rds/ib_cm.c
79026 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
79027 /* Clear the ACK state */
79028 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
79029 #ifdef KERNEL_HAS_ATOMIC64
79030 - atomic64_set(&ic->i_ack_next, 0);
79031 + atomic64_set_unchecked(&ic->i_ack_next, 0);
79032 #else
79033 ic->i_ack_next = 0;
79034 #endif
79035 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
79036 index 8d19491..05a3e65 100644
79037 --- a/net/rds/ib_recv.c
79038 +++ b/net/rds/ib_recv.c
79039 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
79040 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
79041 int ack_required)
79042 {
79043 - atomic64_set(&ic->i_ack_next, seq);
79044 + atomic64_set_unchecked(&ic->i_ack_next, seq);
79045 if (ack_required) {
79046 smp_mb__before_clear_bit();
79047 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
79048 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
79049 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
79050 smp_mb__after_clear_bit();
79051
79052 - return atomic64_read(&ic->i_ack_next);
79053 + return atomic64_read_unchecked(&ic->i_ack_next);
79054 }
79055 #endif
79056
79057 diff --git a/net/rds/iw.h b/net/rds/iw.h
79058 index 04ce3b1..48119a6 100644
79059 --- a/net/rds/iw.h
79060 +++ b/net/rds/iw.h
79061 @@ -134,7 +134,7 @@ struct rds_iw_connection {
79062 /* sending acks */
79063 unsigned long i_ack_flags;
79064 #ifdef KERNEL_HAS_ATOMIC64
79065 - atomic64_t i_ack_next; /* next ACK to send */
79066 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
79067 #else
79068 spinlock_t i_ack_lock; /* protect i_ack_next */
79069 u64 i_ack_next; /* next ACK to send */
79070 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
79071 index a91e1db..cf3053f 100644
79072 --- a/net/rds/iw_cm.c
79073 +++ b/net/rds/iw_cm.c
79074 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
79075 /* Clear the ACK state */
79076 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
79077 #ifdef KERNEL_HAS_ATOMIC64
79078 - atomic64_set(&ic->i_ack_next, 0);
79079 + atomic64_set_unchecked(&ic->i_ack_next, 0);
79080 #else
79081 ic->i_ack_next = 0;
79082 #endif
79083 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
79084 index 4503335..db566b4 100644
79085 --- a/net/rds/iw_recv.c
79086 +++ b/net/rds/iw_recv.c
79087 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
79088 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
79089 int ack_required)
79090 {
79091 - atomic64_set(&ic->i_ack_next, seq);
79092 + atomic64_set_unchecked(&ic->i_ack_next, seq);
79093 if (ack_required) {
79094 smp_mb__before_clear_bit();
79095 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
79096 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
79097 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
79098 smp_mb__after_clear_bit();
79099
79100 - return atomic64_read(&ic->i_ack_next);
79101 + return atomic64_read_unchecked(&ic->i_ack_next);
79102 }
79103 #endif
79104
79105 diff --git a/net/rds/recv.c b/net/rds/recv.c
79106 index 5c6e9f1..9f0f17c 100644
79107 --- a/net/rds/recv.c
79108 +++ b/net/rds/recv.c
79109 @@ -410,6 +410,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
79110
79111 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
79112
79113 + msg->msg_namelen = 0;
79114 +
79115 if (msg_flags & MSG_OOB)
79116 goto out;
79117
79118 @@ -485,6 +487,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
79119 sin->sin_port = inc->i_hdr.h_sport;
79120 sin->sin_addr.s_addr = inc->i_saddr;
79121 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
79122 + msg->msg_namelen = sizeof(*sin);
79123 }
79124 break;
79125 }
79126 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
79127 index edac9ef..16bcb98 100644
79128 --- a/net/rds/tcp.c
79129 +++ b/net/rds/tcp.c
79130 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
79131 int val = 1;
79132
79133 set_fs(KERNEL_DS);
79134 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
79135 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
79136 sizeof(val));
79137 set_fs(oldfs);
79138 }
79139 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
79140 index 1b4fd68..2234175 100644
79141 --- a/net/rds/tcp_send.c
79142 +++ b/net/rds/tcp_send.c
79143 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
79144
79145 oldfs = get_fs();
79146 set_fs(KERNEL_DS);
79147 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
79148 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
79149 sizeof(val));
79150 set_fs(oldfs);
79151 }
79152 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
79153 index 05996d0..5a1dfe0 100644
79154 --- a/net/rxrpc/af_rxrpc.c
79155 +++ b/net/rxrpc/af_rxrpc.c
79156 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
79157 __be32 rxrpc_epoch;
79158
79159 /* current debugging ID */
79160 -atomic_t rxrpc_debug_id;
79161 +atomic_unchecked_t rxrpc_debug_id;
79162
79163 /* count of skbs currently in use */
79164 atomic_t rxrpc_n_skbs;
79165 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
79166 index e4d9cbc..b229649 100644
79167 --- a/net/rxrpc/ar-ack.c
79168 +++ b/net/rxrpc/ar-ack.c
79169 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
79170
79171 _enter("{%d,%d,%d,%d},",
79172 call->acks_hard, call->acks_unacked,
79173 - atomic_read(&call->sequence),
79174 + atomic_read_unchecked(&call->sequence),
79175 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
79176
79177 stop = 0;
79178 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
79179
79180 /* each Tx packet has a new serial number */
79181 sp->hdr.serial =
79182 - htonl(atomic_inc_return(&call->conn->serial));
79183 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
79184
79185 hdr = (struct rxrpc_header *) txb->head;
79186 hdr->serial = sp->hdr.serial;
79187 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
79188 */
79189 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
79190 {
79191 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
79192 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
79193 }
79194
79195 /*
79196 @@ -629,7 +629,7 @@ process_further:
79197
79198 latest = ntohl(sp->hdr.serial);
79199 hard = ntohl(ack.firstPacket);
79200 - tx = atomic_read(&call->sequence);
79201 + tx = atomic_read_unchecked(&call->sequence);
79202
79203 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
79204 latest,
79205 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
79206 goto maybe_reschedule;
79207
79208 send_ACK_with_skew:
79209 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
79210 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
79211 ntohl(ack.serial));
79212 send_ACK:
79213 mtu = call->conn->trans->peer->if_mtu;
79214 @@ -1173,7 +1173,7 @@ send_ACK:
79215 ackinfo.rxMTU = htonl(5692);
79216 ackinfo.jumbo_max = htonl(4);
79217
79218 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
79219 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
79220 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
79221 ntohl(hdr.serial),
79222 ntohs(ack.maxSkew),
79223 @@ -1191,7 +1191,7 @@ send_ACK:
79224 send_message:
79225 _debug("send message");
79226
79227 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
79228 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
79229 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
79230 send_message_2:
79231
79232 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
79233 index a3bbb36..3341fb9 100644
79234 --- a/net/rxrpc/ar-call.c
79235 +++ b/net/rxrpc/ar-call.c
79236 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
79237 spin_lock_init(&call->lock);
79238 rwlock_init(&call->state_lock);
79239 atomic_set(&call->usage, 1);
79240 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
79241 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79242 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
79243
79244 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
79245 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
79246 index 4106ca9..a338d7a 100644
79247 --- a/net/rxrpc/ar-connection.c
79248 +++ b/net/rxrpc/ar-connection.c
79249 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
79250 rwlock_init(&conn->lock);
79251 spin_lock_init(&conn->state_lock);
79252 atomic_set(&conn->usage, 1);
79253 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
79254 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79255 conn->avail_calls = RXRPC_MAXCALLS;
79256 conn->size_align = 4;
79257 conn->header_size = sizeof(struct rxrpc_header);
79258 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
79259 index e7ed43a..6afa140 100644
79260 --- a/net/rxrpc/ar-connevent.c
79261 +++ b/net/rxrpc/ar-connevent.c
79262 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
79263
79264 len = iov[0].iov_len + iov[1].iov_len;
79265
79266 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
79267 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
79268 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
79269
79270 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
79271 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
79272 index 529572f..c758ca7 100644
79273 --- a/net/rxrpc/ar-input.c
79274 +++ b/net/rxrpc/ar-input.c
79275 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
79276 /* track the latest serial number on this connection for ACK packet
79277 * information */
79278 serial = ntohl(sp->hdr.serial);
79279 - hi_serial = atomic_read(&call->conn->hi_serial);
79280 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
79281 while (serial > hi_serial)
79282 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
79283 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
79284 serial);
79285
79286 /* request ACK generation for any ACK or DATA packet that requests
79287 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
79288 index a693aca..81e7293 100644
79289 --- a/net/rxrpc/ar-internal.h
79290 +++ b/net/rxrpc/ar-internal.h
79291 @@ -272,8 +272,8 @@ struct rxrpc_connection {
79292 int error; /* error code for local abort */
79293 int debug_id; /* debug ID for printks */
79294 unsigned int call_counter; /* call ID counter */
79295 - atomic_t serial; /* packet serial number counter */
79296 - atomic_t hi_serial; /* highest serial number received */
79297 + atomic_unchecked_t serial; /* packet serial number counter */
79298 + atomic_unchecked_t hi_serial; /* highest serial number received */
79299 u8 avail_calls; /* number of calls available */
79300 u8 size_align; /* data size alignment (for security) */
79301 u8 header_size; /* rxrpc + security header size */
79302 @@ -346,7 +346,7 @@ struct rxrpc_call {
79303 spinlock_t lock;
79304 rwlock_t state_lock; /* lock for state transition */
79305 atomic_t usage;
79306 - atomic_t sequence; /* Tx data packet sequence counter */
79307 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
79308 u32 abort_code; /* local/remote abort code */
79309 enum { /* current state of call */
79310 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
79311 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
79312 */
79313 extern atomic_t rxrpc_n_skbs;
79314 extern __be32 rxrpc_epoch;
79315 -extern atomic_t rxrpc_debug_id;
79316 +extern atomic_unchecked_t rxrpc_debug_id;
79317 extern struct workqueue_struct *rxrpc_workqueue;
79318
79319 /*
79320 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
79321 index 87f7135..74d3703 100644
79322 --- a/net/rxrpc/ar-local.c
79323 +++ b/net/rxrpc/ar-local.c
79324 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
79325 spin_lock_init(&local->lock);
79326 rwlock_init(&local->services_lock);
79327 atomic_set(&local->usage, 1);
79328 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
79329 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79330 memcpy(&local->srx, srx, sizeof(*srx));
79331 }
79332
79333 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
79334 index 16ae887..d24f12b 100644
79335 --- a/net/rxrpc/ar-output.c
79336 +++ b/net/rxrpc/ar-output.c
79337 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
79338 sp->hdr.cid = call->cid;
79339 sp->hdr.callNumber = call->call_id;
79340 sp->hdr.seq =
79341 - htonl(atomic_inc_return(&call->sequence));
79342 + htonl(atomic_inc_return_unchecked(&call->sequence));
79343 sp->hdr.serial =
79344 - htonl(atomic_inc_return(&conn->serial));
79345 + htonl(atomic_inc_return_unchecked(&conn->serial));
79346 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
79347 sp->hdr.userStatus = 0;
79348 sp->hdr.securityIndex = conn->security_ix;
79349 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
79350 index bebaa43..2644591 100644
79351 --- a/net/rxrpc/ar-peer.c
79352 +++ b/net/rxrpc/ar-peer.c
79353 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
79354 INIT_LIST_HEAD(&peer->error_targets);
79355 spin_lock_init(&peer->lock);
79356 atomic_set(&peer->usage, 1);
79357 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
79358 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79359 memcpy(&peer->srx, srx, sizeof(*srx));
79360
79361 rxrpc_assess_MTU_size(peer);
79362 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
79363 index 38047f7..9f48511 100644
79364 --- a/net/rxrpc/ar-proc.c
79365 +++ b/net/rxrpc/ar-proc.c
79366 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
79367 atomic_read(&conn->usage),
79368 rxrpc_conn_states[conn->state],
79369 key_serial(conn->key),
79370 - atomic_read(&conn->serial),
79371 - atomic_read(&conn->hi_serial));
79372 + atomic_read_unchecked(&conn->serial),
79373 + atomic_read_unchecked(&conn->hi_serial));
79374
79375 return 0;
79376 }
79377 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
79378 index 92df566..87ec1bf 100644
79379 --- a/net/rxrpc/ar-transport.c
79380 +++ b/net/rxrpc/ar-transport.c
79381 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
79382 spin_lock_init(&trans->client_lock);
79383 rwlock_init(&trans->conn_lock);
79384 atomic_set(&trans->usage, 1);
79385 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
79386 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79387
79388 if (peer->srx.transport.family == AF_INET) {
79389 switch (peer->srx.transport_type) {
79390 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
79391 index f226709..0e735a8 100644
79392 --- a/net/rxrpc/rxkad.c
79393 +++ b/net/rxrpc/rxkad.c
79394 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
79395
79396 len = iov[0].iov_len + iov[1].iov_len;
79397
79398 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
79399 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
79400 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
79401
79402 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
79403 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
79404
79405 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
79406
79407 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
79408 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
79409 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
79410
79411 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
79412 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
79413 index 1e2eee8..ce3967e 100644
79414 --- a/net/sctp/proc.c
79415 +++ b/net/sctp/proc.c
79416 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
79417 seq_printf(seq,
79418 "%8pK %8pK %-3d %-3d %-2d %-4d "
79419 "%4d %8d %8d %7d %5lu %-5d %5d ",
79420 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
79421 + assoc, sk,
79422 + sctp_sk(sk)->type, sk->sk_state,
79423 assoc->state, hash,
79424 assoc->assoc_id,
79425 assoc->sndbuf_used,
79426 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
79427 index 31c7bfc..af7bfdc 100644
79428 --- a/net/sctp/socket.c
79429 +++ b/net/sctp/socket.c
79430 @@ -4577,6 +4577,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
79431 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
79432 if (space_left < addrlen)
79433 return -ENOMEM;
79434 + if (addrlen > sizeof(temp) || addrlen < 0)
79435 + return -EFAULT;
79436 if (copy_to_user(to, &temp, addrlen))
79437 return -EFAULT;
79438 to += addrlen;
79439 diff --git a/net/socket.c b/net/socket.c
79440 index a990aa9..5af9802 100644
79441 --- a/net/socket.c
79442 +++ b/net/socket.c
79443 @@ -88,6 +88,7 @@
79444 #include <linux/nsproxy.h>
79445 #include <linux/magic.h>
79446 #include <linux/slab.h>
79447 +#include <linux/in.h>
79448
79449 #include <asm/uaccess.h>
79450 #include <asm/unistd.h>
79451 @@ -105,6 +106,8 @@
79452 #include <linux/sockios.h>
79453 #include <linux/atalk.h>
79454
79455 +#include <linux/grsock.h>
79456 +
79457 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
79458 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
79459 unsigned long nr_segs, loff_t pos);
79460 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
79461 &sockfs_dentry_operations, SOCKFS_MAGIC);
79462 }
79463
79464 -static struct vfsmount *sock_mnt __read_mostly;
79465 +struct vfsmount *sock_mnt __read_mostly;
79466
79467 static struct file_system_type sock_fs_type = {
79468 .name = "sockfs",
79469 @@ -1210,6 +1213,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
79470 return -EAFNOSUPPORT;
79471 if (type < 0 || type >= SOCK_MAX)
79472 return -EINVAL;
79473 + if (protocol < 0)
79474 + return -EINVAL;
79475
79476 /* Compatibility.
79477
79478 @@ -1341,6 +1346,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
79479 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
79480 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
79481
79482 + if(!gr_search_socket(family, type, protocol)) {
79483 + retval = -EACCES;
79484 + goto out;
79485 + }
79486 +
79487 + if (gr_handle_sock_all(family, type, protocol)) {
79488 + retval = -EACCES;
79489 + goto out;
79490 + }
79491 +
79492 retval = sock_create(family, type, protocol, &sock);
79493 if (retval < 0)
79494 goto out;
79495 @@ -1453,6 +1468,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
79496 if (sock) {
79497 err = move_addr_to_kernel(umyaddr, addrlen, &address);
79498 if (err >= 0) {
79499 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
79500 + err = -EACCES;
79501 + goto error;
79502 + }
79503 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
79504 + if (err)
79505 + goto error;
79506 +
79507 err = security_socket_bind(sock,
79508 (struct sockaddr *)&address,
79509 addrlen);
79510 @@ -1461,6 +1484,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
79511 (struct sockaddr *)
79512 &address, addrlen);
79513 }
79514 +error:
79515 fput_light(sock->file, fput_needed);
79516 }
79517 return err;
79518 @@ -1484,10 +1508,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
79519 if ((unsigned int)backlog > somaxconn)
79520 backlog = somaxconn;
79521
79522 + if (gr_handle_sock_server_other(sock->sk)) {
79523 + err = -EPERM;
79524 + goto error;
79525 + }
79526 +
79527 + err = gr_search_listen(sock);
79528 + if (err)
79529 + goto error;
79530 +
79531 err = security_socket_listen(sock, backlog);
79532 if (!err)
79533 err = sock->ops->listen(sock, backlog);
79534
79535 +error:
79536 fput_light(sock->file, fput_needed);
79537 }
79538 return err;
79539 @@ -1531,6 +1565,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
79540 newsock->type = sock->type;
79541 newsock->ops = sock->ops;
79542
79543 + if (gr_handle_sock_server_other(sock->sk)) {
79544 + err = -EPERM;
79545 + sock_release(newsock);
79546 + goto out_put;
79547 + }
79548 +
79549 + err = gr_search_accept(sock);
79550 + if (err) {
79551 + sock_release(newsock);
79552 + goto out_put;
79553 + }
79554 +
79555 /*
79556 * We don't need try_module_get here, as the listening socket (sock)
79557 * has the protocol module (sock->ops->owner) held.
79558 @@ -1569,6 +1615,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
79559 fd_install(newfd, newfile);
79560 err = newfd;
79561
79562 + gr_attach_curr_ip(newsock->sk);
79563 +
79564 out_put:
79565 fput_light(sock->file, fput_needed);
79566 out:
79567 @@ -1601,6 +1649,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
79568 int, addrlen)
79569 {
79570 struct socket *sock;
79571 + struct sockaddr *sck;
79572 struct sockaddr_storage address;
79573 int err, fput_needed;
79574
79575 @@ -1611,6 +1660,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
79576 if (err < 0)
79577 goto out_put;
79578
79579 + sck = (struct sockaddr *)&address;
79580 +
79581 + if (gr_handle_sock_client(sck)) {
79582 + err = -EACCES;
79583 + goto out_put;
79584 + }
79585 +
79586 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
79587 + if (err)
79588 + goto out_put;
79589 +
79590 err =
79591 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
79592 if (err)
79593 @@ -1965,7 +2025,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
79594 * checking falls down on this.
79595 */
79596 if (copy_from_user(ctl_buf,
79597 - (void __user __force *)msg_sys->msg_control,
79598 + (void __force_user *)msg_sys->msg_control,
79599 ctl_len))
79600 goto out_freectl;
79601 msg_sys->msg_control = ctl_buf;
79602 @@ -2133,7 +2193,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
79603 * kernel msghdr to use the kernel address space)
79604 */
79605
79606 - uaddr = (__force void __user *)msg_sys->msg_name;
79607 + uaddr = (void __force_user *)msg_sys->msg_name;
79608 uaddr_len = COMPAT_NAMELEN(msg);
79609 if (MSG_CMSG_COMPAT & flags) {
79610 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
79611 @@ -2658,6 +2718,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
79612 if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
79613 return -EFAULT;
79614
79615 + memset(&ifc, 0, sizeof(ifc));
79616 if (ifc32.ifcbuf == 0) {
79617 ifc32.ifc_len = 0;
79618 ifc.ifc_len = 0;
79619 @@ -2761,7 +2822,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
79620 }
79621
79622 ifr = compat_alloc_user_space(buf_size);
79623 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
79624 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
79625
79626 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
79627 return -EFAULT;
79628 @@ -2785,12 +2846,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
79629 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
79630
79631 if (copy_in_user(rxnfc, compat_rxnfc,
79632 - (void *)(&rxnfc->fs.m_ext + 1) -
79633 - (void *)rxnfc) ||
79634 + (void __user *)(&rxnfc->fs.m_ext + 1) -
79635 + (void __user *)rxnfc) ||
79636 copy_in_user(&rxnfc->fs.ring_cookie,
79637 &compat_rxnfc->fs.ring_cookie,
79638 - (void *)(&rxnfc->fs.location + 1) -
79639 - (void *)&rxnfc->fs.ring_cookie) ||
79640 + (void __user *)(&rxnfc->fs.location + 1) -
79641 + (void __user *)&rxnfc->fs.ring_cookie) ||
79642 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
79643 sizeof(rxnfc->rule_cnt)))
79644 return -EFAULT;
79645 @@ -2802,12 +2863,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
79646
79647 if (convert_out) {
79648 if (copy_in_user(compat_rxnfc, rxnfc,
79649 - (const void *)(&rxnfc->fs.m_ext + 1) -
79650 - (const void *)rxnfc) ||
79651 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
79652 + (const void __user *)rxnfc) ||
79653 copy_in_user(&compat_rxnfc->fs.ring_cookie,
79654 &rxnfc->fs.ring_cookie,
79655 - (const void *)(&rxnfc->fs.location + 1) -
79656 - (const void *)&rxnfc->fs.ring_cookie) ||
79657 + (const void __user *)(&rxnfc->fs.location + 1) -
79658 + (const void __user *)&rxnfc->fs.ring_cookie) ||
79659 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
79660 sizeof(rxnfc->rule_cnt)))
79661 return -EFAULT;
79662 @@ -2877,7 +2938,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
79663 old_fs = get_fs();
79664 set_fs(KERNEL_DS);
79665 err = dev_ioctl(net, cmd,
79666 - (struct ifreq __user __force *) &kifr);
79667 + (struct ifreq __force_user *) &kifr);
79668 set_fs(old_fs);
79669
79670 return err;
79671 @@ -2986,7 +3047,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
79672
79673 old_fs = get_fs();
79674 set_fs(KERNEL_DS);
79675 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
79676 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
79677 set_fs(old_fs);
79678
79679 if (cmd == SIOCGIFMAP && !err) {
79680 @@ -3091,7 +3152,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
79681 ret |= __get_user(rtdev, &(ur4->rt_dev));
79682 if (rtdev) {
79683 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
79684 - r4.rt_dev = (char __user __force *)devname;
79685 + r4.rt_dev = (char __force_user *)devname;
79686 devname[15] = 0;
79687 } else
79688 r4.rt_dev = NULL;
79689 @@ -3317,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
79690 int __user *uoptlen;
79691 int err;
79692
79693 - uoptval = (char __user __force *) optval;
79694 - uoptlen = (int __user __force *) optlen;
79695 + uoptval = (char __force_user *) optval;
79696 + uoptlen = (int __force_user *) optlen;
79697
79698 set_fs(KERNEL_DS);
79699 if (level == SOL_SOCKET)
79700 @@ -3338,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
79701 char __user *uoptval;
79702 int err;
79703
79704 - uoptval = (char __user __force *) optval;
79705 + uoptval = (char __force_user *) optval;
79706
79707 set_fs(KERNEL_DS);
79708 if (level == SOL_SOCKET)
79709 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
79710 index eda32ae..1c9fa7c 100644
79711 --- a/net/sunrpc/sched.c
79712 +++ b/net/sunrpc/sched.c
79713 @@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
79714 #ifdef RPC_DEBUG
79715 static void rpc_task_set_debuginfo(struct rpc_task *task)
79716 {
79717 - static atomic_t rpc_pid;
79718 + static atomic_unchecked_t rpc_pid;
79719
79720 - task->tk_pid = atomic_inc_return(&rpc_pid);
79721 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
79722 }
79723 #else
79724 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
79725 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
79726 index 8343737..677025e 100644
79727 --- a/net/sunrpc/xprtrdma/svc_rdma.c
79728 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
79729 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
79730 static unsigned int min_max_inline = 4096;
79731 static unsigned int max_max_inline = 65536;
79732
79733 -atomic_t rdma_stat_recv;
79734 -atomic_t rdma_stat_read;
79735 -atomic_t rdma_stat_write;
79736 -atomic_t rdma_stat_sq_starve;
79737 -atomic_t rdma_stat_rq_starve;
79738 -atomic_t rdma_stat_rq_poll;
79739 -atomic_t rdma_stat_rq_prod;
79740 -atomic_t rdma_stat_sq_poll;
79741 -atomic_t rdma_stat_sq_prod;
79742 +atomic_unchecked_t rdma_stat_recv;
79743 +atomic_unchecked_t rdma_stat_read;
79744 +atomic_unchecked_t rdma_stat_write;
79745 +atomic_unchecked_t rdma_stat_sq_starve;
79746 +atomic_unchecked_t rdma_stat_rq_starve;
79747 +atomic_unchecked_t rdma_stat_rq_poll;
79748 +atomic_unchecked_t rdma_stat_rq_prod;
79749 +atomic_unchecked_t rdma_stat_sq_poll;
79750 +atomic_unchecked_t rdma_stat_sq_prod;
79751
79752 /* Temporary NFS request map and context caches */
79753 struct kmem_cache *svc_rdma_map_cachep;
79754 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
79755 len -= *ppos;
79756 if (len > *lenp)
79757 len = *lenp;
79758 - if (len && copy_to_user(buffer, str_buf, len))
79759 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
79760 return -EFAULT;
79761 *lenp = len;
79762 *ppos += len;
79763 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
79764 {
79765 .procname = "rdma_stat_read",
79766 .data = &rdma_stat_read,
79767 - .maxlen = sizeof(atomic_t),
79768 + .maxlen = sizeof(atomic_unchecked_t),
79769 .mode = 0644,
79770 .proc_handler = read_reset_stat,
79771 },
79772 {
79773 .procname = "rdma_stat_recv",
79774 .data = &rdma_stat_recv,
79775 - .maxlen = sizeof(atomic_t),
79776 + .maxlen = sizeof(atomic_unchecked_t),
79777 .mode = 0644,
79778 .proc_handler = read_reset_stat,
79779 },
79780 {
79781 .procname = "rdma_stat_write",
79782 .data = &rdma_stat_write,
79783 - .maxlen = sizeof(atomic_t),
79784 + .maxlen = sizeof(atomic_unchecked_t),
79785 .mode = 0644,
79786 .proc_handler = read_reset_stat,
79787 },
79788 {
79789 .procname = "rdma_stat_sq_starve",
79790 .data = &rdma_stat_sq_starve,
79791 - .maxlen = sizeof(atomic_t),
79792 + .maxlen = sizeof(atomic_unchecked_t),
79793 .mode = 0644,
79794 .proc_handler = read_reset_stat,
79795 },
79796 {
79797 .procname = "rdma_stat_rq_starve",
79798 .data = &rdma_stat_rq_starve,
79799 - .maxlen = sizeof(atomic_t),
79800 + .maxlen = sizeof(atomic_unchecked_t),
79801 .mode = 0644,
79802 .proc_handler = read_reset_stat,
79803 },
79804 {
79805 .procname = "rdma_stat_rq_poll",
79806 .data = &rdma_stat_rq_poll,
79807 - .maxlen = sizeof(atomic_t),
79808 + .maxlen = sizeof(atomic_unchecked_t),
79809 .mode = 0644,
79810 .proc_handler = read_reset_stat,
79811 },
79812 {
79813 .procname = "rdma_stat_rq_prod",
79814 .data = &rdma_stat_rq_prod,
79815 - .maxlen = sizeof(atomic_t),
79816 + .maxlen = sizeof(atomic_unchecked_t),
79817 .mode = 0644,
79818 .proc_handler = read_reset_stat,
79819 },
79820 {
79821 .procname = "rdma_stat_sq_poll",
79822 .data = &rdma_stat_sq_poll,
79823 - .maxlen = sizeof(atomic_t),
79824 + .maxlen = sizeof(atomic_unchecked_t),
79825 .mode = 0644,
79826 .proc_handler = read_reset_stat,
79827 },
79828 {
79829 .procname = "rdma_stat_sq_prod",
79830 .data = &rdma_stat_sq_prod,
79831 - .maxlen = sizeof(atomic_t),
79832 + .maxlen = sizeof(atomic_unchecked_t),
79833 .mode = 0644,
79834 .proc_handler = read_reset_stat,
79835 },
79836 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
79837 index 41cb63b..c4a1489 100644
79838 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
79839 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
79840 @@ -501,7 +501,7 @@ next_sge:
79841 svc_rdma_put_context(ctxt, 0);
79842 goto out;
79843 }
79844 - atomic_inc(&rdma_stat_read);
79845 + atomic_inc_unchecked(&rdma_stat_read);
79846
79847 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
79848 chl_map->ch[ch_no].count -= read_wr.num_sge;
79849 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
79850 dto_q);
79851 list_del_init(&ctxt->dto_q);
79852 } else {
79853 - atomic_inc(&rdma_stat_rq_starve);
79854 + atomic_inc_unchecked(&rdma_stat_rq_starve);
79855 clear_bit(XPT_DATA, &xprt->xpt_flags);
79856 ctxt = NULL;
79857 }
79858 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
79859 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
79860 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
79861 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
79862 - atomic_inc(&rdma_stat_recv);
79863 + atomic_inc_unchecked(&rdma_stat_recv);
79864
79865 /* Build up the XDR from the receive buffers. */
79866 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
79867 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
79868 index 42eb7ba..c887c45 100644
79869 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
79870 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
79871 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
79872 write_wr.wr.rdma.remote_addr = to;
79873
79874 /* Post It */
79875 - atomic_inc(&rdma_stat_write);
79876 + atomic_inc_unchecked(&rdma_stat_write);
79877 if (svc_rdma_send(xprt, &write_wr))
79878 goto err;
79879 return 0;
79880 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
79881 index 73b428b..5f3f8f3 100644
79882 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
79883 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
79884 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
79885 return;
79886
79887 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
79888 - atomic_inc(&rdma_stat_rq_poll);
79889 + atomic_inc_unchecked(&rdma_stat_rq_poll);
79890
79891 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
79892 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
79893 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
79894 }
79895
79896 if (ctxt)
79897 - atomic_inc(&rdma_stat_rq_prod);
79898 + atomic_inc_unchecked(&rdma_stat_rq_prod);
79899
79900 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
79901 /*
79902 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
79903 return;
79904
79905 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
79906 - atomic_inc(&rdma_stat_sq_poll);
79907 + atomic_inc_unchecked(&rdma_stat_sq_poll);
79908 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
79909 if (wc.status != IB_WC_SUCCESS)
79910 /* Close the transport */
79911 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
79912 }
79913
79914 if (ctxt)
79915 - atomic_inc(&rdma_stat_sq_prod);
79916 + atomic_inc_unchecked(&rdma_stat_sq_prod);
79917 }
79918
79919 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
79920 @@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
79921 spin_lock_bh(&xprt->sc_lock);
79922 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
79923 spin_unlock_bh(&xprt->sc_lock);
79924 - atomic_inc(&rdma_stat_sq_starve);
79925 + atomic_inc_unchecked(&rdma_stat_sq_starve);
79926
79927 /* See if we can opportunistically reap SQ WR to make room */
79928 sq_cq_reap(xprt);
79929 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
79930 index e3a6e37..be2ea77 100644
79931 --- a/net/sysctl_net.c
79932 +++ b/net/sysctl_net.c
79933 @@ -43,7 +43,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
79934 struct ctl_table *table)
79935 {
79936 /* Allow network administrator to have same access as root. */
79937 - if (capable(CAP_NET_ADMIN)) {
79938 + if (capable_nolog(CAP_NET_ADMIN)) {
79939 int mode = (table->mode >> 6) & 7;
79940 return (mode << 6) | (mode << 3) | mode;
79941 }
79942 diff --git a/net/tipc/link.c b/net/tipc/link.c
79943 index 7a614f4..b14dbd2 100644
79944 --- a/net/tipc/link.c
79945 +++ b/net/tipc/link.c
79946 @@ -1164,7 +1164,7 @@ static int link_send_sections_long(struct tipc_port *sender,
79947 struct tipc_msg fragm_hdr;
79948 struct sk_buff *buf, *buf_chain, *prev;
79949 u32 fragm_crs, fragm_rest, hsz, sect_rest;
79950 - const unchar *sect_crs;
79951 + const unchar __user *sect_crs;
79952 int curr_sect;
79953 u32 fragm_no;
79954
79955 @@ -1205,7 +1205,7 @@ again:
79956
79957 if (!sect_rest) {
79958 sect_rest = msg_sect[++curr_sect].iov_len;
79959 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
79960 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
79961 }
79962
79963 if (sect_rest < fragm_rest)
79964 @@ -1224,7 +1224,7 @@ error:
79965 }
79966 } else
79967 skb_copy_to_linear_data_offset(buf, fragm_crs,
79968 - sect_crs, sz);
79969 + (const void __force_kernel *)sect_crs, sz);
79970 sect_crs += sz;
79971 sect_rest -= sz;
79972 fragm_crs += sz;
79973 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
79974 index deea0d2..fa13bd7 100644
79975 --- a/net/tipc/msg.c
79976 +++ b/net/tipc/msg.c
79977 @@ -98,7 +98,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
79978 msg_sect[cnt].iov_len);
79979 else
79980 skb_copy_to_linear_data_offset(*buf, pos,
79981 - msg_sect[cnt].iov_base,
79982 + (const void __force_kernel *)msg_sect[cnt].iov_base,
79983 msg_sect[cnt].iov_len);
79984 pos += msg_sect[cnt].iov_len;
79985 }
79986 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
79987 index f976e9cd..560d055 100644
79988 --- a/net/tipc/subscr.c
79989 +++ b/net/tipc/subscr.c
79990 @@ -96,7 +96,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
79991 {
79992 struct iovec msg_sect;
79993
79994 - msg_sect.iov_base = (void *)&sub->evt;
79995 + msg_sect.iov_base = (void __force_user *)&sub->evt;
79996 msg_sect.iov_len = sizeof(struct tipc_event);
79997
79998 sub->evt.event = htohl(event, sub->swap);
79999 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
80000 index 641f2e4..590bb48 100644
80001 --- a/net/unix/af_unix.c
80002 +++ b/net/unix/af_unix.c
80003 @@ -780,6 +780,12 @@ static struct sock *unix_find_other(struct net *net,
80004 err = -ECONNREFUSED;
80005 if (!S_ISSOCK(inode->i_mode))
80006 goto put_fail;
80007 +
80008 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
80009 + err = -EACCES;
80010 + goto put_fail;
80011 + }
80012 +
80013 u = unix_find_socket_byinode(inode);
80014 if (!u)
80015 goto put_fail;
80016 @@ -800,6 +806,13 @@ static struct sock *unix_find_other(struct net *net,
80017 if (u) {
80018 struct dentry *dentry;
80019 dentry = unix_sk(u)->path.dentry;
80020 +
80021 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
80022 + err = -EPERM;
80023 + sock_put(u);
80024 + goto fail;
80025 + }
80026 +
80027 if (dentry)
80028 touch_atime(&unix_sk(u)->path);
80029 } else
80030 @@ -882,11 +895,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
80031 err = security_path_mknod(&path, dentry, mode, 0);
80032 if (err)
80033 goto out_mknod_drop_write;
80034 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
80035 + err = -EACCES;
80036 + goto out_mknod_drop_write;
80037 + }
80038 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
80039 out_mknod_drop_write:
80040 mnt_drop_write(path.mnt);
80041 if (err)
80042 goto out_mknod_dput;
80043 +
80044 + gr_handle_create(dentry, path.mnt);
80045 +
80046 mutex_unlock(&path.dentry->d_inode->i_mutex);
80047 dput(path.dentry);
80048 path.dentry = dentry;
80049 @@ -1448,7 +1468,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
80050 if (NULL == siocb->scm)
80051 siocb->scm = &tmp_scm;
80052 wait_for_unix_gc();
80053 - err = scm_send(sock, msg, siocb->scm);
80054 + err = scm_send(sock, msg, siocb->scm, false);
80055 if (err < 0)
80056 return err;
80057
80058 @@ -1617,7 +1637,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
80059 if (NULL == siocb->scm)
80060 siocb->scm = &tmp_scm;
80061 wait_for_unix_gc();
80062 - err = scm_send(sock, msg, siocb->scm);
80063 + err = scm_send(sock, msg, siocb->scm, false);
80064 if (err < 0)
80065 return err;
80066
80067 diff --git a/net/wireless/core.h b/net/wireless/core.h
80068 index bc686ef..27845e6 100644
80069 --- a/net/wireless/core.h
80070 +++ b/net/wireless/core.h
80071 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
80072 struct mutex mtx;
80073
80074 /* rfkill support */
80075 - struct rfkill_ops rfkill_ops;
80076 + rfkill_ops_no_const rfkill_ops;
80077 struct rfkill *rfkill;
80078 struct work_struct rfkill_sync;
80079
80080 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
80081 index b0eb7aa..7d73e82 100644
80082 --- a/net/wireless/wext-core.c
80083 +++ b/net/wireless/wext-core.c
80084 @@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
80085 */
80086
80087 /* Support for very large requests */
80088 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
80089 - (user_length > descr->max_tokens)) {
80090 + if (user_length > descr->max_tokens) {
80091 /* Allow userspace to GET more than max so
80092 * we can support any size GET requests.
80093 * There is still a limit : -ENOMEM.
80094 @@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
80095 }
80096 }
80097
80098 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
80099 - /*
80100 - * If this is a GET, but not NOMAX, it means that the extra
80101 - * data is not bounded by userspace, but by max_tokens. Thus
80102 - * set the length to max_tokens. This matches the extra data
80103 - * allocation.
80104 - * The driver should fill it with the number of tokens it
80105 - * provided, and it may check iwp->length rather than having
80106 - * knowledge of max_tokens. If the driver doesn't change the
80107 - * iwp->length, this ioctl just copies back max_token tokens
80108 - * filled with zeroes. Hopefully the driver isn't claiming
80109 - * them to be valid data.
80110 - */
80111 - iwp->length = descr->max_tokens;
80112 - }
80113 -
80114 err = handler(dev, info, (union iwreq_data *) iwp, extra);
80115
80116 iwp->length += essid_compat;
80117 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
80118 index ccfbd32..9b61cf9f 100644
80119 --- a/net/xfrm/xfrm_policy.c
80120 +++ b/net/xfrm/xfrm_policy.c
80121 @@ -300,7 +300,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
80122 {
80123 policy->walk.dead = 1;
80124
80125 - atomic_inc(&policy->genid);
80126 + atomic_inc_unchecked(&policy->genid);
80127
80128 if (del_timer(&policy->timer))
80129 xfrm_pol_put(policy);
80130 @@ -584,7 +584,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
80131 hlist_add_head(&policy->bydst, chain);
80132 xfrm_pol_hold(policy);
80133 net->xfrm.policy_count[dir]++;
80134 - atomic_inc(&flow_cache_genid);
80135 + atomic_inc_unchecked(&flow_cache_genid);
80136 if (delpol)
80137 __xfrm_policy_unlink(delpol, dir);
80138 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
80139 @@ -1532,7 +1532,7 @@ free_dst:
80140 goto out;
80141 }
80142
80143 -static int inline
80144 +static inline int
80145 xfrm_dst_alloc_copy(void **target, const void *src, int size)
80146 {
80147 if (!*target) {
80148 @@ -1544,7 +1544,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
80149 return 0;
80150 }
80151
80152 -static int inline
80153 +static inline int
80154 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
80155 {
80156 #ifdef CONFIG_XFRM_SUB_POLICY
80157 @@ -1556,7 +1556,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
80158 #endif
80159 }
80160
80161 -static int inline
80162 +static inline int
80163 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
80164 {
80165 #ifdef CONFIG_XFRM_SUB_POLICY
80166 @@ -1650,7 +1650,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
80167
80168 xdst->num_pols = num_pols;
80169 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
80170 - xdst->policy_genid = atomic_read(&pols[0]->genid);
80171 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
80172
80173 return xdst;
80174 }
80175 @@ -2350,7 +2350,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
80176 if (xdst->xfrm_genid != dst->xfrm->genid)
80177 return 0;
80178 if (xdst->num_pols > 0 &&
80179 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
80180 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
80181 return 0;
80182
80183 mtu = dst_mtu(dst->child);
80184 @@ -2887,7 +2887,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
80185 sizeof(pol->xfrm_vec[i].saddr));
80186 pol->xfrm_vec[i].encap_family = mp->new_family;
80187 /* flush bundles */
80188 - atomic_inc(&pol->genid);
80189 + atomic_inc_unchecked(&pol->genid);
80190 }
80191 }
80192
80193 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
80194 index 5b228f9..6aca4e3 100644
80195 --- a/net/xfrm/xfrm_state.c
80196 +++ b/net/xfrm/xfrm_state.c
80197 @@ -1981,8 +1981,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
80198 goto error;
80199
80200 x->outer_mode = xfrm_get_mode(x->props.mode, family);
80201 - if (x->outer_mode == NULL)
80202 + if (x->outer_mode == NULL) {
80203 + err = -EPROTONOSUPPORT;
80204 goto error;
80205 + }
80206
80207 if (init_replay) {
80208 err = xfrm_init_replay(x);
80209 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
80210 index 44293b3..1870c62 100644
80211 --- a/net/xfrm/xfrm_user.c
80212 +++ b/net/xfrm/xfrm_user.c
80213 @@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
80214 struct nlattr **attrs)
80215 {
80216 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
80217 + struct xfrm_replay_state_esn *rs;
80218
80219 - if ((p->flags & XFRM_STATE_ESN) && !rt)
80220 - return -EINVAL;
80221 + if (p->flags & XFRM_STATE_ESN) {
80222 + if (!rt)
80223 + return -EINVAL;
80224 +
80225 + rs = nla_data(rt);
80226 +
80227 + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
80228 + return -EINVAL;
80229 +
80230 + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
80231 + nla_len(rt) != sizeof(*rs))
80232 + return -EINVAL;
80233 + }
80234
80235 if (!rt)
80236 return 0;
80237 @@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
80238 struct nlattr *rp)
80239 {
80240 struct xfrm_replay_state_esn *up;
80241 + int ulen;
80242
80243 if (!replay_esn || !rp)
80244 return 0;
80245
80246 up = nla_data(rp);
80247 + ulen = xfrm_replay_state_esn_len(up);
80248
80249 - if (xfrm_replay_state_esn_len(replay_esn) !=
80250 - xfrm_replay_state_esn_len(up))
80251 + if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
80252 return -EINVAL;
80253
80254 return 0;
80255 @@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
80256 struct nlattr *rta)
80257 {
80258 struct xfrm_replay_state_esn *p, *pp, *up;
80259 + int klen, ulen;
80260
80261 if (!rta)
80262 return 0;
80263
80264 up = nla_data(rta);
80265 + klen = xfrm_replay_state_esn_len(up);
80266 + ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
80267
80268 - p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
80269 + p = kzalloc(klen, GFP_KERNEL);
80270 if (!p)
80271 return -ENOMEM;
80272
80273 - pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
80274 + pp = kzalloc(klen, GFP_KERNEL);
80275 if (!pp) {
80276 kfree(p);
80277 return -ENOMEM;
80278 }
80279
80280 + memcpy(p, up, ulen);
80281 + memcpy(pp, up, ulen);
80282 +
80283 *replay_esn = p;
80284 *preplay_esn = pp;
80285
80286 @@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
80287 * somehow made shareable and move it to xfrm_state.c - JHS
80288 *
80289 */
80290 -static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
80291 +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
80292 + int update_esn)
80293 {
80294 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
80295 - struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
80296 + struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
80297 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
80298 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
80299 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
80300 @@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
80301 goto error;
80302
80303 /* override default values from above */
80304 - xfrm_update_ae_params(x, attrs);
80305 + xfrm_update_ae_params(x, attrs, 0);
80306
80307 return x;
80308
80309 @@ -689,6 +709,7 @@ out:
80310
80311 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
80312 {
80313 + memset(p, 0, sizeof(*p));
80314 memcpy(&p->id, &x->id, sizeof(p->id));
80315 memcpy(&p->sel, &x->sel, sizeof(p->sel));
80316 memcpy(&p->lft, &x->lft, sizeof(p->lft));
80317 @@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
80318 return -EMSGSIZE;
80319
80320 algo = nla_data(nla);
80321 - strcpy(algo->alg_name, auth->alg_name);
80322 + strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
80323 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
80324 algo->alg_key_len = auth->alg_key_len;
80325
80326 @@ -872,6 +893,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
80327 {
80328 struct xfrm_dump_info info;
80329 struct sk_buff *skb;
80330 + int err;
80331
80332 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
80333 if (!skb)
80334 @@ -882,9 +904,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
80335 info.nlmsg_seq = seq;
80336 info.nlmsg_flags = 0;
80337
80338 - if (dump_one_state(x, 0, &info)) {
80339 + err = dump_one_state(x, 0, &info);
80340 + if (err) {
80341 kfree_skb(skb);
80342 - return NULL;
80343 + return ERR_PTR(err);
80344 }
80345
80346 return skb;
80347 @@ -1309,6 +1332,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy
80348
80349 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
80350 {
80351 + memset(p, 0, sizeof(*p));
80352 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
80353 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
80354 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
80355 @@ -1413,6 +1437,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
80356 struct xfrm_user_tmpl *up = &vec[i];
80357 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
80358
80359 + memset(up, 0, sizeof(*up));
80360 memcpy(&up->id, &kp->id, sizeof(up->id));
80361 up->family = kp->encap_family;
80362 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
80363 @@ -1812,7 +1837,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
80364 goto out;
80365
80366 spin_lock_bh(&x->lock);
80367 - xfrm_update_ae_params(x, attrs);
80368 + xfrm_update_ae_params(x, attrs, 1);
80369 spin_unlock_bh(&x->lock);
80370
80371 c.event = nlh->nlmsg_type;
80372 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
80373 index ff1720d..ed8475e 100644
80374 --- a/scripts/Makefile.build
80375 +++ b/scripts/Makefile.build
80376 @@ -111,7 +111,7 @@ endif
80377 endif
80378
80379 # Do not include host rules unless needed
80380 -ifneq ($(hostprogs-y)$(hostprogs-m),)
80381 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
80382 include scripts/Makefile.host
80383 endif
80384
80385 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
80386 index 686cb0d..9d653bf 100644
80387 --- a/scripts/Makefile.clean
80388 +++ b/scripts/Makefile.clean
80389 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
80390 __clean-files := $(extra-y) $(always) \
80391 $(targets) $(clean-files) \
80392 $(host-progs) \
80393 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
80394 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
80395 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
80396
80397 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
80398
80399 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
80400 index 1ac414f..38575f7 100644
80401 --- a/scripts/Makefile.host
80402 +++ b/scripts/Makefile.host
80403 @@ -31,6 +31,8 @@
80404 # Note: Shared libraries consisting of C++ files are not supported
80405
80406 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
80407 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
80408 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
80409
80410 # C code
80411 # Executables compiled from a single .c file
80412 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
80413 # Shared libaries (only .c supported)
80414 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
80415 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
80416 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
80417 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
80418 # Remove .so files from "xxx-objs"
80419 host-cobjs := $(filter-out %.so,$(host-cobjs))
80420 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
80421
80422 -#Object (.o) files used by the shared libaries
80423 +# Object (.o) files used by the shared libaries
80424 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
80425 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
80426
80427 # output directory for programs/.o files
80428 # hostprogs-y := tools/build may have been specified. Retrieve directory
80429 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
80430 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
80431 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
80432 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
80433 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
80434 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
80435 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
80436 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
80437
80438 obj-dirs += $(host-objdirs)
80439 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
80440 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
80441 $(call if_changed_dep,host-cshobjs)
80442
80443 +# Compile .c file, create position independent .o file
80444 +# host-cxxshobjs -> .o
80445 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
80446 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
80447 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
80448 + $(call if_changed_dep,host-cxxshobjs)
80449 +
80450 # Link a shared library, based on position independent .o files
80451 # *.o -> .so shared library (host-cshlib)
80452 quiet_cmd_host-cshlib = HOSTLLD -shared $@
80453 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
80454 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
80455 $(call if_changed,host-cshlib)
80456
80457 +# Link a shared library, based on position independent .o files
80458 +# *.o -> .so shared library (host-cxxshlib)
80459 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
80460 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
80461 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
80462 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
80463 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
80464 + $(call if_changed,host-cxxshlib)
80465 +
80466 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
80467 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
80468 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
80469
80470 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
80471 index cb1f50c..cef2a7c 100644
80472 --- a/scripts/basic/fixdep.c
80473 +++ b/scripts/basic/fixdep.c
80474 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
80475 /*
80476 * Lookup a value in the configuration string.
80477 */
80478 -static int is_defined_config(const char *name, int len, unsigned int hash)
80479 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
80480 {
80481 struct item *aux;
80482
80483 @@ -211,10 +211,10 @@ static void clear_config(void)
80484 /*
80485 * Record the use of a CONFIG_* word.
80486 */
80487 -static void use_config(const char *m, int slen)
80488 +static void use_config(const char *m, unsigned int slen)
80489 {
80490 unsigned int hash = strhash(m, slen);
80491 - int c, i;
80492 + unsigned int c, i;
80493
80494 if (is_defined_config(m, slen, hash))
80495 return;
80496 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
80497
80498 static void parse_config_file(const char *map, size_t len)
80499 {
80500 - const int *end = (const int *) (map + len);
80501 + const unsigned int *end = (const unsigned int *) (map + len);
80502 /* start at +1, so that p can never be < map */
80503 - const int *m = (const int *) map + 1;
80504 + const unsigned int *m = (const unsigned int *) map + 1;
80505 const char *p, *q;
80506
80507 for (; m < end; m++) {
80508 @@ -406,7 +406,7 @@ static void print_deps(void)
80509 static void traps(void)
80510 {
80511 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
80512 - int *p = (int *)test;
80513 + unsigned int *p = (unsigned int *)test;
80514
80515 if (*p != INT_CONF) {
80516 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
80517 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
80518 new file mode 100644
80519 index 0000000..008ac1a
80520 --- /dev/null
80521 +++ b/scripts/gcc-plugin.sh
80522 @@ -0,0 +1,17 @@
80523 +#!/bin/bash
80524 +plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
80525 +#include "gcc-plugin.h"
80526 +#include "tree.h"
80527 +#include "tm.h"
80528 +#include "rtl.h"
80529 +#ifdef ENABLE_BUILD_WITH_CXX
80530 +#warning $2
80531 +#else
80532 +#warning $1
80533 +#endif
80534 +EOF`
80535 +if [ $? -eq 0 ]
80536 +then
80537 + [[ "$plugincc" =~ "$1" ]] && echo "$1"
80538 + [[ "$plugincc" =~ "$2" ]] && echo "$2"
80539 +fi
80540 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
80541 index cd9c6c6..0c8f0fa 100644
80542 --- a/scripts/link-vmlinux.sh
80543 +++ b/scripts/link-vmlinux.sh
80544 @@ -147,7 +147,7 @@ else
80545 fi;
80546
80547 # final build of init/
80548 -${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
80549 +${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
80550
80551 kallsymso=""
80552 kallsyms_vmlinux=""
80553 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
80554 index 5759751..b170367 100644
80555 --- a/scripts/mod/file2alias.c
80556 +++ b/scripts/mod/file2alias.c
80557 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
80558 unsigned long size, unsigned long id_size,
80559 void *symval)
80560 {
80561 - int i;
80562 + unsigned int i;
80563
80564 if (size % id_size || size < id_size) {
80565 if (cross_build != 0)
80566 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
80567 /* USB is special because the bcdDevice can be matched against a numeric range */
80568 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
80569 static void do_usb_entry(struct usb_device_id *id,
80570 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
80571 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
80572 unsigned char range_lo, unsigned char range_hi,
80573 unsigned char max, struct module *mod)
80574 {
80575 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
80576 {
80577 unsigned int devlo, devhi;
80578 unsigned char chi, clo, max;
80579 - int ndigits;
80580 + unsigned int ndigits;
80581
80582 id->match_flags = TO_NATIVE(id->match_flags);
80583 id->idVendor = TO_NATIVE(id->idVendor);
80584 @@ -504,7 +504,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
80585 for (i = 0; i < count; i++) {
80586 const char *id = (char *)devs[i].id;
80587 char acpi_id[sizeof(devs[0].id)];
80588 - int j;
80589 + unsigned int j;
80590
80591 buf_printf(&mod->dev_table_buf,
80592 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
80593 @@ -534,7 +534,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
80594
80595 for (j = 0; j < PNP_MAX_DEVICES; j++) {
80596 const char *id = (char *)card->devs[j].id;
80597 - int i2, j2;
80598 + unsigned int i2, j2;
80599 int dup = 0;
80600
80601 if (!id[0])
80602 @@ -560,7 +560,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
80603 /* add an individual alias for every device entry */
80604 if (!dup) {
80605 char acpi_id[sizeof(card->devs[0].id)];
80606 - int k;
80607 + unsigned int k;
80608
80609 buf_printf(&mod->dev_table_buf,
80610 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
80611 @@ -885,7 +885,7 @@ static void dmi_ascii_filter(char *d, const char *s)
80612 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
80613 char *alias)
80614 {
80615 - int i, j;
80616 + unsigned int i, j;
80617
80618 sprintf(alias, "dmi*");
80619
80620 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
80621 index 0f84bb3..2d42035 100644
80622 --- a/scripts/mod/modpost.c
80623 +++ b/scripts/mod/modpost.c
80624 @@ -925,6 +925,7 @@ enum mismatch {
80625 ANY_INIT_TO_ANY_EXIT,
80626 ANY_EXIT_TO_ANY_INIT,
80627 EXPORT_TO_INIT_EXIT,
80628 + DATA_TO_TEXT
80629 };
80630
80631 struct sectioncheck {
80632 @@ -1033,6 +1034,12 @@ const struct sectioncheck sectioncheck[] = {
80633 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
80634 .mismatch = EXPORT_TO_INIT_EXIT,
80635 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
80636 +},
80637 +/* Do not reference code from writable data */
80638 +{
80639 + .fromsec = { DATA_SECTIONS, NULL },
80640 + .tosec = { TEXT_SECTIONS, NULL },
80641 + .mismatch = DATA_TO_TEXT
80642 }
80643 };
80644
80645 @@ -1155,10 +1162,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
80646 continue;
80647 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
80648 continue;
80649 - if (sym->st_value == addr)
80650 - return sym;
80651 /* Find a symbol nearby - addr are maybe negative */
80652 d = sym->st_value - addr;
80653 + if (d == 0)
80654 + return sym;
80655 if (d < 0)
80656 d = addr - sym->st_value;
80657 if (d < distance) {
80658 @@ -1437,6 +1444,14 @@ static void report_sec_mismatch(const char *modname,
80659 tosym, prl_to, prl_to, tosym);
80660 free(prl_to);
80661 break;
80662 + case DATA_TO_TEXT:
80663 +#if 0
80664 + fprintf(stderr,
80665 + "The %s %s:%s references\n"
80666 + "the %s %s:%s%s\n",
80667 + from, fromsec, fromsym, to, tosec, tosym, to_p);
80668 +#endif
80669 + break;
80670 }
80671 fprintf(stderr, "\n");
80672 }
80673 @@ -1671,7 +1686,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
80674 static void check_sec_ref(struct module *mod, const char *modname,
80675 struct elf_info *elf)
80676 {
80677 - int i;
80678 + unsigned int i;
80679 Elf_Shdr *sechdrs = elf->sechdrs;
80680
80681 /* Walk through all sections */
80682 @@ -1769,7 +1784,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
80683 va_end(ap);
80684 }
80685
80686 -void buf_write(struct buffer *buf, const char *s, int len)
80687 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
80688 {
80689 if (buf->size - buf->pos < len) {
80690 buf->size += len + SZ;
80691 @@ -1987,7 +2002,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
80692 if (fstat(fileno(file), &st) < 0)
80693 goto close_write;
80694
80695 - if (st.st_size != b->pos)
80696 + if (st.st_size != (off_t)b->pos)
80697 goto close_write;
80698
80699 tmp = NOFAIL(malloc(b->pos));
80700 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
80701 index 51207e4..f7d603d 100644
80702 --- a/scripts/mod/modpost.h
80703 +++ b/scripts/mod/modpost.h
80704 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
80705
80706 struct buffer {
80707 char *p;
80708 - int pos;
80709 - int size;
80710 + unsigned int pos;
80711 + unsigned int size;
80712 };
80713
80714 void __attribute__((format(printf, 2, 3)))
80715 buf_printf(struct buffer *buf, const char *fmt, ...);
80716
80717 void
80718 -buf_write(struct buffer *buf, const char *s, int len);
80719 +buf_write(struct buffer *buf, const char *s, unsigned int len);
80720
80721 struct module {
80722 struct module *next;
80723 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
80724 index 9dfcd6d..099068e 100644
80725 --- a/scripts/mod/sumversion.c
80726 +++ b/scripts/mod/sumversion.c
80727 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
80728 goto out;
80729 }
80730
80731 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
80732 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
80733 warn("writing sum in %s failed: %s\n",
80734 filename, strerror(errno));
80735 goto out;
80736 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
80737 index 5c11312..72742b5 100644
80738 --- a/scripts/pnmtologo.c
80739 +++ b/scripts/pnmtologo.c
80740 @@ -237,14 +237,14 @@ static void write_header(void)
80741 fprintf(out, " * Linux logo %s\n", logoname);
80742 fputs(" */\n\n", out);
80743 fputs("#include <linux/linux_logo.h>\n\n", out);
80744 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
80745 + fprintf(out, "static unsigned char %s_data[] = {\n",
80746 logoname);
80747 }
80748
80749 static void write_footer(void)
80750 {
80751 fputs("\n};\n\n", out);
80752 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
80753 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
80754 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
80755 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
80756 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
80757 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
80758 fputs("\n};\n\n", out);
80759
80760 /* write logo clut */
80761 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
80762 + fprintf(out, "static unsigned char %s_clut[] = {\n",
80763 logoname);
80764 write_hex_cnt = 0;
80765 for (i = 0; i < logo_clutsize; i++) {
80766 diff --git a/security/Kconfig b/security/Kconfig
80767 index e9c6ac7..5ff1ad1 100644
80768 --- a/security/Kconfig
80769 +++ b/security/Kconfig
80770 @@ -4,6 +4,885 @@
80771
80772 menu "Security options"
80773
80774 +menu "Grsecurity"
80775 +
80776 + config ARCH_TRACK_EXEC_LIMIT
80777 + bool
80778 +
80779 + config PAX_KERNEXEC_PLUGIN
80780 + bool
80781 +
80782 + config PAX_PER_CPU_PGD
80783 + bool
80784 +
80785 + config TASK_SIZE_MAX_SHIFT
80786 + int
80787 + depends on X86_64
80788 + default 47 if !PAX_PER_CPU_PGD
80789 + default 42 if PAX_PER_CPU_PGD
80790 +
80791 + config PAX_ENABLE_PAE
80792 + bool
80793 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
80794 +
80795 + config PAX_USERCOPY_SLABS
80796 + bool
80797 +
80798 +config GRKERNSEC
80799 + bool "Grsecurity"
80800 + select CRYPTO
80801 + select CRYPTO_SHA256
80802 + select PROC_FS
80803 + select STOP_MACHINE
80804 + help
80805 + If you say Y here, you will be able to configure many features
80806 + that will enhance the security of your system. It is highly
80807 + recommended that you say Y here and read through the help
80808 + for each option so that you fully understand the features and
80809 + can evaluate their usefulness for your machine.
80810 +
80811 +choice
80812 + prompt "Configuration Method"
80813 + depends on GRKERNSEC
80814 + default GRKERNSEC_CONFIG_CUSTOM
80815 + help
80816 +
80817 +config GRKERNSEC_CONFIG_AUTO
80818 + bool "Automatic"
80819 + help
80820 + If you choose this configuration method, you'll be able to answer a small
80821 + number of simple questions about how you plan to use this kernel.
80822 + The settings of grsecurity and PaX will be automatically configured for
80823 + the highest commonly-used settings within the provided constraints.
80824 +
80825 + If you require additional configuration, custom changes can still be made
80826 + from the "custom configuration" menu.
80827 +
80828 +config GRKERNSEC_CONFIG_CUSTOM
80829 + bool "Custom"
80830 + help
80831 + If you choose this configuration method, you'll be able to configure all
80832 + grsecurity and PaX settings manually. Via this method, no options are
80833 + automatically enabled.
80834 +
80835 +endchoice
80836 +
80837 +choice
80838 + prompt "Usage Type"
80839 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
80840 + default GRKERNSEC_CONFIG_SERVER
80841 + help
80842 +
80843 +config GRKERNSEC_CONFIG_SERVER
80844 + bool "Server"
80845 + help
80846 + Choose this option if you plan to use this kernel on a server.
80847 +
80848 +config GRKERNSEC_CONFIG_DESKTOP
80849 + bool "Desktop"
80850 + help
80851 + Choose this option if you plan to use this kernel on a desktop.
80852 +
80853 +endchoice
80854 +
80855 +choice
80856 + prompt "Virtualization Type"
80857 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
80858 + default GRKERNSEC_CONFIG_VIRT_NONE
80859 + help
80860 +
80861 +config GRKERNSEC_CONFIG_VIRT_NONE
80862 + bool "None"
80863 + help
80864 + Choose this option if this kernel will be run on bare metal.
80865 +
80866 +config GRKERNSEC_CONFIG_VIRT_GUEST
80867 + bool "Guest"
80868 + help
80869 + Choose this option if this kernel will be run as a VM guest.
80870 +
80871 +config GRKERNSEC_CONFIG_VIRT_HOST
80872 + bool "Host"
80873 + help
80874 + Choose this option if this kernel will be run as a VM host.
80875 +
80876 +endchoice
80877 +
80878 +choice
80879 + prompt "Virtualization Hardware"
80880 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
80881 + help
80882 +
80883 +config GRKERNSEC_CONFIG_VIRT_EPT
80884 + bool "EPT/RVI Processor Support"
80885 + depends on X86
80886 + help
80887 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
80888 + hardware virtualization. This allows for additional kernel hardening protections
80889 + to operate without additional performance impact.
80890 +
80891 + To see if your Intel processor supports EPT, see:
80892 + http://ark.intel.com/Products/VirtualizationTechnology
80893 + (Most Core i3/5/7 support EPT)
80894 +
80895 + To see if your AMD processor supports RVI, see:
80896 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
80897 +
80898 +config GRKERNSEC_CONFIG_VIRT_SOFT
80899 + bool "First-gen/No Hardware Virtualization"
80900 + help
80901 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
80902 + support hardware virtualization or doesn't support the EPT/RVI extensions.
80903 +
80904 +endchoice
80905 +
80906 +choice
80907 + prompt "Virtualization Software"
80908 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
80909 + help
80910 +
80911 +config GRKERNSEC_CONFIG_VIRT_XEN
80912 + bool "Xen"
80913 + help
80914 + Choose this option if this kernel is running as a Xen guest or host.
80915 +
80916 +config GRKERNSEC_CONFIG_VIRT_VMWARE
80917 + bool "VMWare"
80918 + help
80919 + Choose this option if this kernel is running as a VMWare guest or host.
80920 +
80921 +config GRKERNSEC_CONFIG_VIRT_KVM
80922 + bool "KVM"
80923 + help
80924 + Choose this option if this kernel is running as a KVM guest or host.
80925 +
80926 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
80927 + bool "VirtualBox"
80928 + help
80929 + Choose this option if this kernel is running as a VirtualBox guest or host.
80930 +
80931 +endchoice
80932 +
80933 +choice
80934 + prompt "Required Priorities"
80935 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
80936 + default GRKERNSEC_CONFIG_PRIORITY_PERF
80937 + help
80938 +
80939 +config GRKERNSEC_CONFIG_PRIORITY_PERF
80940 + bool "Performance"
80941 + help
80942 + Choose this option if performance is of highest priority for this deployment
80943 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
80944 + and freed memory sanitizing will be disabled.
80945 +
80946 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
80947 + bool "Security"
80948 + help
80949 + Choose this option if security is of highest priority for this deployment of
80950 + grsecurity. UDEREF, kernel stack clearing, and freed memory sanitizing will
80951 + be enabled for this kernel. In a worst-case scenario, these features can
80952 + introduce a 20% performance hit (UDEREF on x64 contributing half of this hit).
80953 +
80954 +endchoice
80955 +
80956 +menu "Default Special Groups"
80957 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
80958 +
80959 +config GRKERNSEC_PROC_GID
80960 + int "GID exempted from /proc restrictions"
80961 + default 1001
80962 + help
80963 + Setting this GID determines which group will be exempted from
80964 + grsecurity's /proc restrictions, allowing users of the specified
80965 + group to view network statistics and the existence of other users'
80966 + processes on the system.
80967 +
80968 +config GRKERNSEC_TPE_GID
80969 + int "GID for untrusted users"
80970 + depends on GRKERNSEC_CONFIG_SERVER
80971 + default 1005
80972 + help
80973 + Setting this GID determines which group untrusted users should
80974 + be added to. These users will be placed under grsecurity's Trusted Path
80975 + Execution mechanism, preventing them from executing their own binaries.
80976 + The users will only be able to execute binaries in directories owned and
80977 + writable only by the root user.
80978 +
80979 +config GRKERNSEC_SYMLINKOWN_GID
80980 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
80981 + depends on GRKERNSEC_CONFIG_SERVER
80982 + default 1006
80983 + help
80984 + Setting this GID determines what group kernel-enforced
80985 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
80986 + is enabled, a sysctl option with name "symlinkown_gid" is created.
80987 +
80988 +
80989 +endmenu
80990 +
80991 +menu "Customize Configuration"
80992 +depends on GRKERNSEC
80993 +
80994 +menu "PaX"
80995 +
80996 +config PAX
80997 + bool "Enable various PaX features"
80998 + default y if GRKERNSEC_CONFIG_AUTO
80999 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
81000 + help
81001 + This allows you to enable various PaX features. PaX adds
81002 + intrusion prevention mechanisms to the kernel that reduce
81003 + the risks posed by exploitable memory corruption bugs.
81004 +
81005 +menu "PaX Control"
81006 + depends on PAX
81007 +
81008 +config PAX_SOFTMODE
81009 + bool 'Support soft mode'
81010 + help
81011 + Enabling this option will allow you to run PaX in soft mode, that
81012 + is, PaX features will not be enforced by default, only on executables
81013 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
81014 + support as they are the only way to mark executables for soft mode use.
81015 +
81016 + Soft mode can be activated by using the "pax_softmode=1" kernel command
81017 + line option on boot. Furthermore you can control various PaX features
81018 + at runtime via the entries in /proc/sys/kernel/pax.
81019 +
81020 +config PAX_EI_PAX
81021 + bool 'Use legacy ELF header marking'
81022 + default y if GRKERNSEC_CONFIG_AUTO
81023 + help
81024 + Enabling this option will allow you to control PaX features on
81025 + a per executable basis via the 'chpax' utility available at
81026 + http://pax.grsecurity.net/. The control flags will be read from
81027 + an otherwise reserved part of the ELF header. This marking has
81028 + numerous drawbacks (no support for soft-mode, toolchain does not
81029 + know about the non-standard use of the ELF header) therefore it
81030 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
81031 + support.
81032 +
81033 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
81034 + support as well, they will override the legacy EI_PAX marks.
81035 +
81036 + If you enable none of the marking options then all applications
81037 + will run with PaX enabled on them by default.
81038 +
81039 +config PAX_PT_PAX_FLAGS
81040 + bool 'Use ELF program header marking'
81041 + default y if GRKERNSEC_CONFIG_AUTO
81042 + help
81043 + Enabling this option will allow you to control PaX features on
81044 + a per executable basis via the 'paxctl' utility available at
81045 + http://pax.grsecurity.net/. The control flags will be read from
81046 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
81047 + has the benefits of supporting both soft mode and being fully
81048 + integrated into the toolchain (the binutils patch is available
81049 + from http://pax.grsecurity.net).
81050 +
81051 + Note that if you enable the legacy EI_PAX marking support as well,
81052 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
81053 +
81054 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
81055 + must make sure that the marks are the same if a binary has both marks.
81056 +
81057 + If you enable none of the marking options then all applications
81058 + will run with PaX enabled on them by default.
81059 +
81060 +config PAX_XATTR_PAX_FLAGS
81061 + bool 'Use filesystem extended attributes marking'
81062 + default y if GRKERNSEC_CONFIG_AUTO
81063 + select CIFS_XATTR if CIFS
81064 + select EXT2_FS_XATTR if EXT2_FS
81065 + select EXT3_FS_XATTR if EXT3_FS
81066 + select EXT4_FS_XATTR if EXT4_FS
81067 + select JFFS2_FS_XATTR if JFFS2_FS
81068 + select REISERFS_FS_XATTR if REISERFS_FS
81069 + select SQUASHFS_XATTR if SQUASHFS
81070 + select TMPFS_XATTR if TMPFS
81071 + select UBIFS_FS_XATTR if UBIFS_FS
81072 + help
81073 + Enabling this option will allow you to control PaX features on
81074 + a per executable basis via the 'setfattr' utility. The control
81075 + flags will be read from the user.pax.flags extended attribute of
81076 + the file. This marking has the benefit of supporting binary-only
81077 + applications that self-check themselves (e.g., skype) and would
81078 + not tolerate chpax/paxctl changes. The main drawback is that
81079 + extended attributes are not supported by some filesystems (e.g.,
81080 + isofs, udf, vfat) so copying files through such filesystems will
81081 + lose the extended attributes and these PaX markings.
81082 +
81083 + Note that if you enable the legacy EI_PAX marking support as well,
81084 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
81085 +
81086 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
81087 + must make sure that the marks are the same if a binary has both marks.
81088 +
81089 + If you enable none of the marking options then all applications
81090 + will run with PaX enabled on them by default.
81091 +
81092 +choice
81093 + prompt 'MAC system integration'
81094 + default PAX_HAVE_ACL_FLAGS
81095 + help
81096 + Mandatory Access Control systems have the option of controlling
81097 + PaX flags on a per executable basis, choose the method supported
81098 + by your particular system.
81099 +
81100 + - "none": if your MAC system does not interact with PaX,
81101 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
81102 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
81103 +
81104 + NOTE: this option is for developers/integrators only.
81105 +
81106 + config PAX_NO_ACL_FLAGS
81107 + bool 'none'
81108 +
81109 + config PAX_HAVE_ACL_FLAGS
81110 + bool 'direct'
81111 +
81112 + config PAX_HOOK_ACL_FLAGS
81113 + bool 'hook'
81114 +endchoice
81115 +
81116 +endmenu
81117 +
81118 +menu "Non-executable pages"
81119 + depends on PAX
81120 +
81121 +config PAX_NOEXEC
81122 + bool "Enforce non-executable pages"
81123 + default y if GRKERNSEC_CONFIG_AUTO
81124 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
81125 + help
81126 + By design some architectures do not allow for protecting memory
81127 + pages against execution or even if they do, Linux does not make
81128 + use of this feature. In practice this means that if a page is
81129 + readable (such as the stack or heap) it is also executable.
81130 +
81131 + There is a well known exploit technique that makes use of this
81132 + fact and a common programming mistake where an attacker can
81133 + introduce code of his choice somewhere in the attacked program's
81134 + memory (typically the stack or the heap) and then execute it.
81135 +
81136 + If the attacked program was running with different (typically
81137 + higher) privileges than that of the attacker, then he can elevate
81138 + his own privilege level (e.g. get a root shell, write to files for
81139 + which he does not have write access to, etc).
81140 +
81141 + Enabling this option will let you choose from various features
81142 + that prevent the injection and execution of 'foreign' code in
81143 + a program.
81144 +
81145 + This will also break programs that rely on the old behaviour and
81146 + expect that dynamically allocated memory via the malloc() family
81147 + of functions is executable (which it is not). Notable examples
81148 + are the XFree86 4.x server, the java runtime and wine.
81149 +
81150 +config PAX_PAGEEXEC
81151 + bool "Paging based non-executable pages"
81152 + default y if GRKERNSEC_CONFIG_AUTO
81153 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
81154 + select S390_SWITCH_AMODE if S390
81155 + select S390_EXEC_PROTECT if S390
81156 + select ARCH_TRACK_EXEC_LIMIT if X86_32
81157 + help
81158 + This implementation is based on the paging feature of the CPU.
81159 + On i386 without hardware non-executable bit support there is a
81160 + variable but usually low performance impact, however on Intel's
81161 + P4 core based CPUs it is very high so you should not enable this
81162 + for kernels meant to be used on such CPUs.
81163 +
81164 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
81165 + with hardware non-executable bit support there is no performance
81166 + impact, on ppc the impact is negligible.
81167 +
81168 + Note that several architectures require various emulations due to
81169 + badly designed userland ABIs, this will cause a performance impact
81170 + but will disappear as soon as userland is fixed. For example, ppc
81171 + userland MUST have been built with secure-plt by a recent toolchain.
81172 +
81173 +config PAX_SEGMEXEC
81174 + bool "Segmentation based non-executable pages"
81175 + default y if GRKERNSEC_CONFIG_AUTO
81176 + depends on PAX_NOEXEC && X86_32
81177 + help
81178 + This implementation is based on the segmentation feature of the
81179 + CPU and has a very small performance impact, however applications
81180 + will be limited to a 1.5 GB address space instead of the normal
81181 + 3 GB.
81182 +
81183 +config PAX_EMUTRAMP
81184 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
81185 + default y if PARISC
81186 + help
81187 + There are some programs and libraries that for one reason or
81188 + another attempt to execute special small code snippets from
81189 + non-executable memory pages. Most notable examples are the
81190 + signal handler return code generated by the kernel itself and
81191 + the GCC trampolines.
81192 +
81193 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
81194 + such programs will no longer work under your kernel.
81195 +
81196 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
81197 + utilities to enable trampoline emulation for the affected programs
81198 + yet still have the protection provided by the non-executable pages.
81199 +
81200 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
81201 + your system will not even boot.
81202 +
81203 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
81204 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
81205 + for the affected files.
81206 +
81207 + NOTE: enabling this feature *may* open up a loophole in the
81208 + protection provided by non-executable pages that an attacker
81209 + could abuse. Therefore the best solution is to not have any
81210 + files on your system that would require this option. This can
81211 + be achieved by not using libc5 (which relies on the kernel
81212 + signal handler return code) and not using or rewriting programs
81213 + that make use of the nested function implementation of GCC.
81214 + Skilled users can just fix GCC itself so that it implements
81215 + nested function calls in a way that does not interfere with PaX.
81216 +
81217 +config PAX_EMUSIGRT
81218 + bool "Automatically emulate sigreturn trampolines"
81219 + depends on PAX_EMUTRAMP && PARISC
81220 + default y
81221 + help
81222 + Enabling this option will have the kernel automatically detect
81223 + and emulate signal return trampolines executing on the stack
81224 + that would otherwise lead to task termination.
81225 +
81226 + This solution is intended as a temporary one for users with
81227 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
81228 + Modula-3 runtime, etc) or executables linked to such, basically
81229 + everything that does not specify its own SA_RESTORER function in
81230 + normal executable memory like glibc 2.1+ does.
81231 +
81232 + On parisc you MUST enable this option, otherwise your system will
81233 + not even boot.
81234 +
81235 + NOTE: this feature cannot be disabled on a per executable basis
81236 + and since it *does* open up a loophole in the protection provided
81237 + by non-executable pages, the best solution is to not have any
81238 + files on your system that would require this option.
81239 +
81240 +config PAX_MPROTECT
81241 + bool "Restrict mprotect()"
81242 + default y if GRKERNSEC_CONFIG_AUTO
81243 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
81244 + help
81245 + Enabling this option will prevent programs from
81246 + - changing the executable status of memory pages that were
81247 + not originally created as executable,
81248 + - making read-only executable pages writable again,
81249 + - creating executable pages from anonymous memory,
81250 + - making read-only-after-relocations (RELRO) data pages writable again.
81251 +
81252 + You should say Y here to complete the protection provided by
81253 + the enforcement of non-executable pages.
81254 +
81255 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
81256 + this feature on a per file basis.
81257 +
81258 +config PAX_MPROTECT_COMPAT
81259 + bool "Use legacy/compat protection demoting (read help)"
81260 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
81261 + depends on PAX_MPROTECT
81262 + help
81263 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
81264 + by sending the proper error code to the application. For some broken
81265 + userland, this can cause problems with Python or other applications. The
81266 + current implementation however allows for applications like clamav to
81267 + detect if JIT compilation/execution is allowed and to fall back gracefully
81268 + to an interpreter-based mode if it does not. While we encourage everyone
81269 + to use the current implementation as-is and push upstream to fix broken
81270 + userland (note that the RWX logging option can assist with this), in some
81271 + environments this may not be possible. Having to disable MPROTECT
81272 + completely on certain binaries reduces the security benefit of PaX,
81273 + so this option is provided for those environments to revert to the old
81274 + behavior.
81275 +
81276 +config PAX_ELFRELOCS
81277 + bool "Allow ELF text relocations (read help)"
81278 + depends on PAX_MPROTECT
81279 + default n
81280 + help
81281 + Non-executable pages and mprotect() restrictions are effective
81282 + in preventing the introduction of new executable code into an
81283 + attacked task's address space. There remain only two venues
81284 + for this kind of attack: if the attacker can execute already
81285 + existing code in the attacked task then he can either have it
81286 + create and mmap() a file containing his code or have it mmap()
81287 + an already existing ELF library that does not have position
81288 + independent code in it and use mprotect() on it to make it
81289 + writable and copy his code there. While protecting against
81290 + the former approach is beyond PaX, the latter can be prevented
81291 + by having only PIC ELF libraries on one's system (which do not
81292 + need to relocate their code). If you are sure this is your case,
81293 + as is the case with all modern Linux distributions, then leave
81294 + this option disabled. You should say 'n' here.
81295 +
81296 +config PAX_ETEXECRELOCS
81297 + bool "Allow ELF ET_EXEC text relocations"
81298 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
81299 + select PAX_ELFRELOCS
81300 + default y
81301 + help
81302 + On some architectures there are incorrectly created applications
81303 + that require text relocations and would not work without enabling
81304 + this option. If you are an alpha, ia64 or parisc user, you should
81305 + enable this option and disable it once you have made sure that
81306 + none of your applications need it.
81307 +
81308 +config PAX_EMUPLT
81309 + bool "Automatically emulate ELF PLT"
81310 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
81311 + default y
81312 + help
81313 + Enabling this option will have the kernel automatically detect
81314 + and emulate the Procedure Linkage Table entries in ELF files.
81315 + On some architectures such entries are in writable memory, and
81316 + become non-executable leading to task termination. Therefore
81317 + it is mandatory that you enable this option on alpha, parisc,
81318 + sparc and sparc64, otherwise your system would not even boot.
81319 +
81320 + NOTE: this feature *does* open up a loophole in the protection
81321 + provided by the non-executable pages, therefore the proper
81322 + solution is to modify the toolchain to produce a PLT that does
81323 + not need to be writable.
81324 +
81325 +config PAX_DLRESOLVE
81326 + bool 'Emulate old glibc resolver stub'
81327 + depends on PAX_EMUPLT && SPARC
81328 + default n
81329 + help
81330 + This option is needed if userland has an old glibc (before 2.4)
81331 + that puts a 'save' instruction into the runtime generated resolver
81332 + stub that needs special emulation.
81333 +
81334 +config PAX_KERNEXEC
81335 + bool "Enforce non-executable kernel pages"
81336 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
81337 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
81338 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
81339 + select PAX_KERNEXEC_PLUGIN if X86_64
81340 + help
81341 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
81342 + that is, enabling this option will make it harder to inject
81343 + and execute 'foreign' code in kernel memory itself.
81344 +
81345 +choice
81346 + prompt "Return Address Instrumentation Method"
81347 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
81348 + depends on PAX_KERNEXEC_PLUGIN
81349 + help
81350 + Select the method used to instrument function pointer dereferences.
81351 + Note that binary modules cannot be instrumented by this approach.
81352 +
81353 + Note that the implementation requires a gcc with plugin support,
81354 + i.e., gcc 4.5 or newer. You may need to install the supporting
81355 + headers explicitly in addition to the normal gcc package.
81356 +
81357 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
81358 + bool "bts"
81359 + help
81360 + This method is compatible with binary only modules but has
81361 + a higher runtime overhead.
81362 +
81363 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
81364 + bool "or"
81365 + depends on !PARAVIRT
81366 + help
81367 + This method is incompatible with binary only modules but has
81368 + a lower runtime overhead.
81369 +endchoice
81370 +
81371 +config PAX_KERNEXEC_PLUGIN_METHOD
81372 + string
81373 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
81374 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
81375 + default ""
81376 +
81377 +config PAX_KERNEXEC_MODULE_TEXT
81378 + int "Minimum amount of memory reserved for module code"
81379 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
81380 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
81381 + depends on PAX_KERNEXEC && X86_32 && MODULES
81382 + help
81383 + Due to implementation details the kernel must reserve a fixed
81384 + amount of memory for module code at compile time that cannot be
81385 + changed at runtime. Here you can specify the minimum amount
81386 + in MB that will be reserved. Due to the same implementation
81387 + details this size will always be rounded up to the next 2/4 MB
81388 + boundary (depends on PAE) so the actually available memory for
81389 + module code will usually be more than this minimum.
81390 +
81391 + The default 4 MB should be enough for most users but if you have
81392 + an excessive number of modules (e.g., most distribution configs
81393 + compile many drivers as modules) or use huge modules such as
81394 + nvidia's kernel driver, you will need to adjust this amount.
81395 + A good rule of thumb is to look at your currently loaded kernel
81396 + modules and add up their sizes.
81397 +
81398 +endmenu
81399 +
81400 +menu "Address Space Layout Randomization"
81401 + depends on PAX
81402 +
81403 +config PAX_ASLR
81404 + bool "Address Space Layout Randomization"
81405 + default y if GRKERNSEC_CONFIG_AUTO
81406 + help
81407 + Many if not most exploit techniques rely on the knowledge of
81408 + certain addresses in the attacked program. The following options
81409 + will allow the kernel to apply a certain amount of randomization
81410 + to specific parts of the program thereby forcing an attacker to
81411 + guess them in most cases. Any failed guess will most likely crash
81412 + the attacked program which allows the kernel to detect such attempts
81413 + and react on them. PaX itself provides no reaction mechanisms,
81414 + instead it is strongly encouraged that you make use of Nergal's
81415 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
81416 + (http://www.grsecurity.net/) built-in crash detection features or
81417 + develop one yourself.
81418 +
81419 + By saying Y here you can choose to randomize the following areas:
81420 + - top of the task's kernel stack
81421 + - top of the task's userland stack
81422 + - base address for mmap() requests that do not specify one
81423 + (this includes all libraries)
81424 + - base address of the main executable
81425 +
81426 + It is strongly recommended to say Y here as address space layout
81427 + randomization has negligible impact on performance yet it provides
81428 + a very effective protection.
81429 +
81430 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
81431 + this feature on a per file basis.
81432 +
81433 +config PAX_RANDKSTACK
81434 + bool "Randomize kernel stack base"
81435 + default y if GRKERNSEC_CONFIG_AUTO
81436 + depends on X86_TSC && X86
81437 + help
81438 + By saying Y here the kernel will randomize every task's kernel
81439 + stack on every system call. This will not only force an attacker
81440 + to guess it but also prevent him from making use of possible
81441 + leaked information about it.
81442 +
81443 + Since the kernel stack is a rather scarce resource, randomization
81444 + may cause unexpected stack overflows, therefore you should very
81445 + carefully test your system. Note that once enabled in the kernel
81446 + configuration, this feature cannot be disabled on a per file basis.
81447 +
81448 +config PAX_RANDUSTACK
81449 + bool "Randomize user stack base"
81450 + default y if GRKERNSEC_CONFIG_AUTO
81451 + depends on PAX_ASLR
81452 + help
81453 + By saying Y here the kernel will randomize every task's userland
81454 + stack. The randomization is done in two steps where the second
81455 + one may apply a big amount of shift to the top of the stack and
81456 + cause problems for programs that want to use lots of memory (more
81457 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
81458 + For this reason the second step can be controlled by 'chpax' or
81459 + 'paxctl' on a per file basis.
81460 +
81461 +config PAX_RANDMMAP
81462 + bool "Randomize mmap() base"
81463 + default y if GRKERNSEC_CONFIG_AUTO
81464 + depends on PAX_ASLR
81465 + help
81466 + By saying Y here the kernel will use a randomized base address for
81467 + mmap() requests that do not specify one themselves. As a result
81468 + all dynamically loaded libraries will appear at random addresses
81469 + and therefore be harder to exploit by a technique where an attacker
81470 + attempts to execute library code for his purposes (e.g. spawn a
81471 + shell from an exploited program that is running at an elevated
81472 + privilege level).
81473 +
81474 + Furthermore, if a program is relinked as a dynamic ELF file, its
81475 + base address will be randomized as well, completing the full
81476 + randomization of the address space layout. Attacking such programs
81477 + becomes a guess game. You can find an example of doing this at
81478 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
81479 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
81480 +
81481 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
81482 + feature on a per file basis.
81483 +
81484 +endmenu
81485 +
81486 +menu "Miscellaneous hardening features"
81487 +
81488 +config PAX_MEMORY_SANITIZE
81489 + bool "Sanitize all freed memory"
81490 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
81491 + depends on !HIBERNATION
81492 + help
81493 + By saying Y here the kernel will erase memory pages as soon as they
81494 + are freed. This in turn reduces the lifetime of data stored in the
81495 + pages, making it less likely that sensitive information such as
81496 + passwords, cryptographic secrets, etc stay in memory for too long.
81497 +
81498 + This is especially useful for programs whose runtime is short, long
81499 + lived processes and the kernel itself benefit from this as long as
81500 + they operate on whole memory pages and ensure timely freeing of pages
81501 + that may hold sensitive information.
81502 +
81503 + The tradeoff is performance impact, on a single CPU system kernel
81504 + compilation sees a 3% slowdown, other systems and workloads may vary
81505 + and you are advised to test this feature on your expected workload
81506 + before deploying it.
81507 +
81508 + Note that this feature does not protect data stored in live pages,
81509 + e.g., process memory swapped to disk may stay there for a long time.
81510 +
81511 +config PAX_MEMORY_STACKLEAK
81512 + bool "Sanitize kernel stack"
81513 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
81514 + depends on X86
81515 + help
81516 + By saying Y here the kernel will erase the kernel stack before it
81517 + returns from a system call. This in turn reduces the information
81518 + that a kernel stack leak bug can reveal.
81519 +
81520 + Note that such a bug can still leak information that was put on
81521 + the stack by the current system call (the one eventually triggering
81522 + the bug) but traces of earlier system calls on the kernel stack
81523 + cannot leak anymore.
81524 +
81525 + The tradeoff is performance impact: on a single CPU system kernel
81526 + compilation sees a 1% slowdown, other systems and workloads may vary
81527 + and you are advised to test this feature on your expected workload
81528 + before deploying it.
81529 +
81530 + Note that the full feature requires a gcc with plugin support,
81531 + i.e., gcc 4.5 or newer. You may need to install the supporting
81532 + headers explicitly in addition to the normal gcc package. Using
81533 + older gcc versions means that functions with large enough stack
81534 + frames may leave uninitialized memory behind that may be exposed
81535 + to a later syscall leaking the stack.
81536 +
81537 +config PAX_MEMORY_UDEREF
81538 + bool "Prevent invalid userland pointer dereference"
81539 + default y if GRKERNSEC_CONFIG_AUTO && (X86_32 || (X86_64 && GRKERNSEC_CONFIG_PRIORITY_SECURITY)) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
81540 + depends on X86 && !UML_X86 && !XEN
81541 + select PAX_PER_CPU_PGD if X86_64
81542 + help
81543 + By saying Y here the kernel will be prevented from dereferencing
81544 + userland pointers in contexts where the kernel expects only kernel
81545 + pointers. This is both a useful runtime debugging feature and a
81546 + security measure that prevents exploiting a class of kernel bugs.
81547 +
81548 + The tradeoff is that some virtualization solutions may experience
81549 + a huge slowdown and therefore you should not enable this feature
81550 + for kernels meant to run in such environments. Whether a given VM
81551 + solution is affected or not is best determined by simply trying it
81552 + out, the performance impact will be obvious right on boot as this
81553 + mechanism engages from very early on. A good rule of thumb is that
81554 + VMs running on CPUs without hardware virtualization support (i.e.,
81555 + the majority of IA-32 CPUs) will likely experience the slowdown.
81556 +
81557 +config PAX_REFCOUNT
81558 + bool "Prevent various kernel object reference counter overflows"
81559 + default y if GRKERNSEC_CONFIG_AUTO
81560 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
81561 + help
81562 + By saying Y here the kernel will detect and prevent overflowing
81563 + various (but not all) kinds of object reference counters. Such
81564 + overflows can normally occur due to bugs only and are often, if
81565 + not always, exploitable.
81566 +
81567 + The tradeoff is that data structures protected by an overflowed
81568 + refcount will never be freed and therefore will leak memory. Note
81569 + that this leak also happens even without this protection but in
81570 + that case the overflow can eventually trigger the freeing of the
81571 + data structure while it is still being used elsewhere, resulting
81572 + in the exploitable situation that this feature prevents.
81573 +
81574 + Since this has a negligible performance impact, you should enable
81575 + this feature.
81576 +
81577 +config PAX_USERCOPY
81578 + bool "Harden heap object copies between kernel and userland"
81579 + default y if GRKERNSEC_CONFIG_AUTO
81580 + depends on X86 || PPC || SPARC || ARM
81581 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
81582 + select PAX_USERCOPY_SLABS
81583 + help
81584 + By saying Y here the kernel will enforce the size of heap objects
81585 + when they are copied in either direction between the kernel and
81586 + userland, even if only a part of the heap object is copied.
81587 +
81588 + Specifically, this checking prevents information leaking from the
81589 + kernel heap during kernel to userland copies (if the kernel heap
81590 + object is otherwise fully initialized) and prevents kernel heap
81591 + overflows during userland to kernel copies.
81592 +
81593 + Note that the current implementation provides the strictest bounds
81594 + checks for the SLUB allocator.
81595 +
81596 + Enabling this option also enables per-slab cache protection against
81597 + data in a given cache being copied into/out of via userland
81598 + accessors. Though the whitelist of regions will be reduced over
81599 + time, it notably protects important data structures like task structs.
81600 +
81601 + If frame pointers are enabled on x86, this option will also restrict
81602 + copies into and out of the kernel stack to local variables within a
81603 + single frame.
81604 +
81605 + Since this has a negligible performance impact, you should enable
81606 + this feature.
81607 +
81608 +config PAX_SIZE_OVERFLOW
81609 + bool "Prevent various integer overflows in function size parameters"
81610 + default y if GRKERNSEC_CONFIG_AUTO
81611 + depends on X86
81612 + help
81613 + By saying Y here the kernel recomputes expressions of function
81614 + arguments marked by a size_overflow attribute with double integer
81615 + precision (DImode/TImode for 32/64 bit integer types).
81616 +
81617 + The recomputed argument is checked against TYPE_MAX and an event
81618 + is logged on overflow and the triggering process is killed.
81619 +
81620 + Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
81621 +
81622 + Note that the implementation requires a gcc with plugin support,
81623 + i.e., gcc 4.5 or newer. You may need to install the supporting
81624 + headers explicitly in addition to the normal gcc package.
81625 +
81626 +config PAX_LATENT_ENTROPY
81627 + bool "Generate some entropy during boot"
81628 + default y if GRKERNSEC_CONFIG_AUTO
81629 + help
81630 + By saying Y here the kernel will instrument early boot code to
81631 + extract some entropy from both original and artificially created
81632 + program state. This will help especially embedded systems where
81633 + there is little 'natural' source of entropy normally. The cost
81634 + is some slowdown of the boot process.
81635 +
81636 + Note that the implementation requires a gcc with plugin support,
81637 + i.e., gcc 4.5 or newer. You may need to install the supporting
81638 + headers explicitly in addition to the normal gcc package.
81639 +
81640 + Note that entropy extracted this way is not cryptographically
81641 + secure!
81642 +
81643 +endmenu
81644 +
81645 +endmenu
81646 +
81647 +source grsecurity/Kconfig
81648 +
81649 +endmenu
81650 +
81651 +endmenu
81652 +
81653 source security/keys/Kconfig
81654
81655 config SECURITY_DMESG_RESTRICT
81656 @@ -103,7 +982,7 @@ config INTEL_TXT
81657 config LSM_MMAP_MIN_ADDR
81658 int "Low address space for LSM to protect from user allocation"
81659 depends on SECURITY && SECURITY_SELINUX
81660 - default 32768 if ARM
81661 + default 32768 if ALPHA || ARM || PARISC || SPARC32
81662 default 65536
81663 help
81664 This is the portion of low virtual memory which should be protected
81665 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
81666 index 8ea39aa..8569ac5 100644
81667 --- a/security/apparmor/lsm.c
81668 +++ b/security/apparmor/lsm.c
81669 @@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
81670 return error;
81671 }
81672
81673 -static struct security_operations apparmor_ops = {
81674 +static struct security_operations apparmor_ops __read_only = {
81675 .name = "apparmor",
81676
81677 .ptrace_access_check = apparmor_ptrace_access_check,
81678 diff --git a/security/commoncap.c b/security/commoncap.c
81679 index 6dbae46..d5611fd 100644
81680 --- a/security/commoncap.c
81681 +++ b/security/commoncap.c
81682 @@ -415,6 +415,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
81683 return 0;
81684 }
81685
81686 +/* returns:
81687 + 1 for suid privilege
81688 + 2 for sgid privilege
81689 + 3 for fscap privilege
81690 +*/
81691 +int is_privileged_binary(const struct dentry *dentry)
81692 +{
81693 + struct cpu_vfs_cap_data capdata;
81694 + struct inode *inode = dentry->d_inode;
81695 +
81696 + if (!inode || S_ISDIR(inode->i_mode))
81697 + return 0;
81698 +
81699 + if (inode->i_mode & S_ISUID)
81700 + return 1;
81701 + if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
81702 + return 2;
81703 +
81704 + if (!get_vfs_caps_from_disk(dentry, &capdata)) {
81705 + if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
81706 + return 3;
81707 + }
81708 +
81709 + return 0;
81710 +}
81711 +
81712 /*
81713 * Attempt to get the on-exec apply capability sets for an executable file from
81714 * its xattrs and, if present, apply them to the proposed credentials being
81715 @@ -583,6 +609,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
81716 const struct cred *cred = current_cred();
81717 kuid_t root_uid = make_kuid(cred->user_ns, 0);
81718
81719 + if (gr_acl_enable_at_secure())
81720 + return 1;
81721 +
81722 if (!uid_eq(cred->uid, root_uid)) {
81723 if (bprm->cap_effective)
81724 return 1;
81725 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
81726 index 3ccf7ac..d73ad64 100644
81727 --- a/security/integrity/ima/ima.h
81728 +++ b/security/integrity/ima/ima.h
81729 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
81730 extern spinlock_t ima_queue_lock;
81731
81732 struct ima_h_table {
81733 - atomic_long_t len; /* number of stored measurements in the list */
81734 - atomic_long_t violations;
81735 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
81736 + atomic_long_unchecked_t violations;
81737 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
81738 };
81739 extern struct ima_h_table ima_htable;
81740 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
81741 index 88a2788..581ab92 100644
81742 --- a/security/integrity/ima/ima_api.c
81743 +++ b/security/integrity/ima/ima_api.c
81744 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
81745 int result;
81746
81747 /* can overflow, only indicator */
81748 - atomic_long_inc(&ima_htable.violations);
81749 + atomic_long_inc_unchecked(&ima_htable.violations);
81750
81751 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
81752 if (!entry) {
81753 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
81754 index e1aa2b4..52027bf 100644
81755 --- a/security/integrity/ima/ima_fs.c
81756 +++ b/security/integrity/ima/ima_fs.c
81757 @@ -28,12 +28,12 @@
81758 static int valid_policy = 1;
81759 #define TMPBUFLEN 12
81760 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
81761 - loff_t *ppos, atomic_long_t *val)
81762 + loff_t *ppos, atomic_long_unchecked_t *val)
81763 {
81764 char tmpbuf[TMPBUFLEN];
81765 ssize_t len;
81766
81767 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
81768 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
81769 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
81770 }
81771
81772 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
81773 index 55a6271..ad829c3 100644
81774 --- a/security/integrity/ima/ima_queue.c
81775 +++ b/security/integrity/ima/ima_queue.c
81776 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
81777 INIT_LIST_HEAD(&qe->later);
81778 list_add_tail_rcu(&qe->later, &ima_measurements);
81779
81780 - atomic_long_inc(&ima_htable.len);
81781 + atomic_long_inc_unchecked(&ima_htable.len);
81782 key = ima_hash_key(entry->digest);
81783 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
81784 return 0;
81785 diff --git a/security/keys/compat.c b/security/keys/compat.c
81786 index c92d42b..341e7ea 100644
81787 --- a/security/keys/compat.c
81788 +++ b/security/keys/compat.c
81789 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
81790 if (ret == 0)
81791 goto no_payload_free;
81792
81793 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
81794 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
81795
81796 if (iov != iovstack)
81797 kfree(iov);
81798 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
81799 index 0f5b3f0..b8d47c1 100644
81800 --- a/security/keys/keyctl.c
81801 +++ b/security/keys/keyctl.c
81802 @@ -966,7 +966,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
81803 /*
81804 * Copy the iovec data from userspace
81805 */
81806 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
81807 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
81808 unsigned ioc)
81809 {
81810 for (; ioc > 0; ioc--) {
81811 @@ -988,7 +988,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
81812 * If successful, 0 will be returned.
81813 */
81814 long keyctl_instantiate_key_common(key_serial_t id,
81815 - const struct iovec *payload_iov,
81816 + const struct iovec __user *payload_iov,
81817 unsigned ioc,
81818 size_t plen,
81819 key_serial_t ringid)
81820 @@ -1083,7 +1083,7 @@ long keyctl_instantiate_key(key_serial_t id,
81821 [0].iov_len = plen
81822 };
81823
81824 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
81825 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
81826 }
81827
81828 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
81829 @@ -1116,7 +1116,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
81830 if (ret == 0)
81831 goto no_payload_free;
81832
81833 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
81834 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
81835
81836 if (iov != iovstack)
81837 kfree(iov);
81838 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
81839 index 7445875..262834f 100644
81840 --- a/security/keys/keyring.c
81841 +++ b/security/keys/keyring.c
81842 @@ -227,16 +227,16 @@ static long keyring_read(const struct key *keyring,
81843 ret = -EFAULT;
81844
81845 for (loop = 0; loop < klist->nkeys; loop++) {
81846 + key_serial_t serial;
81847 key = rcu_deref_link_locked(klist, loop,
81848 keyring);
81849 + serial = key->serial;
81850
81851 tmp = sizeof(key_serial_t);
81852 if (tmp > buflen)
81853 tmp = buflen;
81854
81855 - if (copy_to_user(buffer,
81856 - &key->serial,
81857 - tmp) != 0)
81858 + if (copy_to_user(buffer, &serial, tmp))
81859 goto error;
81860
81861 buflen -= tmp;
81862 diff --git a/security/min_addr.c b/security/min_addr.c
81863 index f728728..6457a0c 100644
81864 --- a/security/min_addr.c
81865 +++ b/security/min_addr.c
81866 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
81867 */
81868 static void update_mmap_min_addr(void)
81869 {
81870 +#ifndef SPARC
81871 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
81872 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
81873 mmap_min_addr = dac_mmap_min_addr;
81874 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
81875 #else
81876 mmap_min_addr = dac_mmap_min_addr;
81877 #endif
81878 +#endif
81879 }
81880
81881 /*
81882 diff --git a/security/security.c b/security/security.c
81883 index 860aeb3..45765c0 100644
81884 --- a/security/security.c
81885 +++ b/security/security.c
81886 @@ -20,6 +20,7 @@
81887 #include <linux/ima.h>
81888 #include <linux/evm.h>
81889 #include <linux/fsnotify.h>
81890 +#include <linux/mm.h>
81891 #include <linux/mman.h>
81892 #include <linux/mount.h>
81893 #include <linux/personality.h>
81894 @@ -32,8 +33,8 @@
81895 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
81896 CONFIG_DEFAULT_SECURITY;
81897
81898 -static struct security_operations *security_ops;
81899 -static struct security_operations default_security_ops = {
81900 +static struct security_operations *security_ops __read_only;
81901 +static struct security_operations default_security_ops __read_only = {
81902 .name = "default",
81903 };
81904
81905 @@ -74,7 +75,9 @@ int __init security_init(void)
81906
81907 void reset_security_ops(void)
81908 {
81909 + pax_open_kernel();
81910 security_ops = &default_security_ops;
81911 + pax_close_kernel();
81912 }
81913
81914 /* Save user chosen LSM */
81915 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
81916 index db10db2..99a640f 100644
81917 --- a/security/selinux/hooks.c
81918 +++ b/security/selinux/hooks.c
81919 @@ -95,8 +95,6 @@
81920
81921 #define NUM_SEL_MNT_OPTS 5
81922
81923 -extern struct security_operations *security_ops;
81924 -
81925 /* SECMARK reference count */
81926 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
81927
81928 @@ -5511,7 +5509,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
81929
81930 #endif
81931
81932 -static struct security_operations selinux_ops = {
81933 +static struct security_operations selinux_ops __read_only = {
81934 .name = "selinux",
81935
81936 .ptrace_access_check = selinux_ptrace_access_check,
81937 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
81938 index c220f31..89fab3f 100644
81939 --- a/security/selinux/include/xfrm.h
81940 +++ b/security/selinux/include/xfrm.h
81941 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
81942
81943 static inline void selinux_xfrm_notify_policyload(void)
81944 {
81945 - atomic_inc(&flow_cache_genid);
81946 + atomic_inc_unchecked(&flow_cache_genid);
81947 }
81948 #else
81949 static inline int selinux_xfrm_enabled(void)
81950 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
81951 index ee0bb57..57fcd43 100644
81952 --- a/security/smack/smack_lsm.c
81953 +++ b/security/smack/smack_lsm.c
81954 @@ -3432,7 +3432,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
81955 return 0;
81956 }
81957
81958 -struct security_operations smack_ops = {
81959 +struct security_operations smack_ops __read_only = {
81960 .name = "smack",
81961
81962 .ptrace_access_check = smack_ptrace_access_check,
81963 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
81964 index c2d04a5..e6a1aeb 100644
81965 --- a/security/tomoyo/tomoyo.c
81966 +++ b/security/tomoyo/tomoyo.c
81967 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
81968 * tomoyo_security_ops is a "struct security_operations" which is used for
81969 * registering TOMOYO.
81970 */
81971 -static struct security_operations tomoyo_security_ops = {
81972 +static struct security_operations tomoyo_security_ops __read_only = {
81973 .name = "tomoyo",
81974 .cred_alloc_blank = tomoyo_cred_alloc_blank,
81975 .cred_prepare = tomoyo_cred_prepare,
81976 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
81977 index 51d6709..1f3dbe2 100644
81978 --- a/security/yama/Kconfig
81979 +++ b/security/yama/Kconfig
81980 @@ -1,6 +1,6 @@
81981 config SECURITY_YAMA
81982 bool "Yama support"
81983 - depends on SECURITY
81984 + depends on SECURITY && !GRKERNSEC
81985 select SECURITYFS
81986 select SECURITY_PATH
81987 default n
81988 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
81989 index 270790d..c67dfcb 100644
81990 --- a/sound/aoa/codecs/onyx.c
81991 +++ b/sound/aoa/codecs/onyx.c
81992 @@ -54,7 +54,7 @@ struct onyx {
81993 spdif_locked:1,
81994 analog_locked:1,
81995 original_mute:2;
81996 - int open_count;
81997 + local_t open_count;
81998 struct codec_info *codec_info;
81999
82000 /* mutex serializes concurrent access to the device
82001 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
82002 struct onyx *onyx = cii->codec_data;
82003
82004 mutex_lock(&onyx->mutex);
82005 - onyx->open_count++;
82006 + local_inc(&onyx->open_count);
82007 mutex_unlock(&onyx->mutex);
82008
82009 return 0;
82010 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
82011 struct onyx *onyx = cii->codec_data;
82012
82013 mutex_lock(&onyx->mutex);
82014 - onyx->open_count--;
82015 - if (!onyx->open_count)
82016 + if (local_dec_and_test(&onyx->open_count))
82017 onyx->spdif_locked = onyx->analog_locked = 0;
82018 mutex_unlock(&onyx->mutex);
82019
82020 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
82021 index ffd2025..df062c9 100644
82022 --- a/sound/aoa/codecs/onyx.h
82023 +++ b/sound/aoa/codecs/onyx.h
82024 @@ -11,6 +11,7 @@
82025 #include <linux/i2c.h>
82026 #include <asm/pmac_low_i2c.h>
82027 #include <asm/prom.h>
82028 +#include <asm/local.h>
82029
82030 /* PCM3052 register definitions */
82031
82032 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
82033 index 08fde00..0bf641a 100644
82034 --- a/sound/core/oss/pcm_oss.c
82035 +++ b/sound/core/oss/pcm_oss.c
82036 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
82037 if (in_kernel) {
82038 mm_segment_t fs;
82039 fs = snd_enter_user();
82040 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
82041 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
82042 snd_leave_user(fs);
82043 } else {
82044 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
82045 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
82046 }
82047 if (ret != -EPIPE && ret != -ESTRPIPE)
82048 break;
82049 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
82050 if (in_kernel) {
82051 mm_segment_t fs;
82052 fs = snd_enter_user();
82053 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
82054 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
82055 snd_leave_user(fs);
82056 } else {
82057 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
82058 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
82059 }
82060 if (ret == -EPIPE) {
82061 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
82062 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
82063 struct snd_pcm_plugin_channel *channels;
82064 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
82065 if (!in_kernel) {
82066 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
82067 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
82068 return -EFAULT;
82069 buf = runtime->oss.buffer;
82070 }
82071 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
82072 }
82073 } else {
82074 tmp = snd_pcm_oss_write2(substream,
82075 - (const char __force *)buf,
82076 + (const char __force_kernel *)buf,
82077 runtime->oss.period_bytes, 0);
82078 if (tmp <= 0)
82079 goto err;
82080 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
82081 struct snd_pcm_runtime *runtime = substream->runtime;
82082 snd_pcm_sframes_t frames, frames1;
82083 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
82084 - char __user *final_dst = (char __force __user *)buf;
82085 + char __user *final_dst = (char __force_user *)buf;
82086 if (runtime->oss.plugin_first) {
82087 struct snd_pcm_plugin_channel *channels;
82088 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
82089 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
82090 xfer += tmp;
82091 runtime->oss.buffer_used -= tmp;
82092 } else {
82093 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
82094 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
82095 runtime->oss.period_bytes, 0);
82096 if (tmp <= 0)
82097 goto err;
82098 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
82099 size1);
82100 size1 /= runtime->channels; /* frames */
82101 fs = snd_enter_user();
82102 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
82103 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
82104 snd_leave_user(fs);
82105 }
82106 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
82107 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
82108 index 91cdf94..4085161 100644
82109 --- a/sound/core/pcm_compat.c
82110 +++ b/sound/core/pcm_compat.c
82111 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
82112 int err;
82113
82114 fs = snd_enter_user();
82115 - err = snd_pcm_delay(substream, &delay);
82116 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
82117 snd_leave_user(fs);
82118 if (err < 0)
82119 return err;
82120 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
82121 index 53b5ada..2db94c8 100644
82122 --- a/sound/core/pcm_native.c
82123 +++ b/sound/core/pcm_native.c
82124 @@ -2780,11 +2780,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
82125 switch (substream->stream) {
82126 case SNDRV_PCM_STREAM_PLAYBACK:
82127 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
82128 - (void __user *)arg);
82129 + (void __force_user *)arg);
82130 break;
82131 case SNDRV_PCM_STREAM_CAPTURE:
82132 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
82133 - (void __user *)arg);
82134 + (void __force_user *)arg);
82135 break;
82136 default:
82137 result = -EINVAL;
82138 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
82139 index 5cf8d65..912a79c 100644
82140 --- a/sound/core/seq/seq_device.c
82141 +++ b/sound/core/seq/seq_device.c
82142 @@ -64,7 +64,7 @@ struct ops_list {
82143 int argsize; /* argument size */
82144
82145 /* operators */
82146 - struct snd_seq_dev_ops ops;
82147 + struct snd_seq_dev_ops *ops;
82148
82149 /* registred devices */
82150 struct list_head dev_list; /* list of devices */
82151 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
82152
82153 mutex_lock(&ops->reg_mutex);
82154 /* copy driver operators */
82155 - ops->ops = *entry;
82156 + ops->ops = entry;
82157 ops->driver |= DRIVER_LOADED;
82158 ops->argsize = argsize;
82159
82160 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
82161 dev->name, ops->id, ops->argsize, dev->argsize);
82162 return -EINVAL;
82163 }
82164 - if (ops->ops.init_device(dev) >= 0) {
82165 + if (ops->ops->init_device(dev) >= 0) {
82166 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
82167 ops->num_init_devices++;
82168 } else {
82169 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
82170 dev->name, ops->id, ops->argsize, dev->argsize);
82171 return -EINVAL;
82172 }
82173 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
82174 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
82175 dev->status = SNDRV_SEQ_DEVICE_FREE;
82176 dev->driver_data = NULL;
82177 ops->num_init_devices--;
82178 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
82179 index 621e60e..f4543f5 100644
82180 --- a/sound/drivers/mts64.c
82181 +++ b/sound/drivers/mts64.c
82182 @@ -29,6 +29,7 @@
82183 #include <sound/initval.h>
82184 #include <sound/rawmidi.h>
82185 #include <sound/control.h>
82186 +#include <asm/local.h>
82187
82188 #define CARD_NAME "Miditerminal 4140"
82189 #define DRIVER_NAME "MTS64"
82190 @@ -67,7 +68,7 @@ struct mts64 {
82191 struct pardevice *pardev;
82192 int pardev_claimed;
82193
82194 - int open_count;
82195 + local_t open_count;
82196 int current_midi_output_port;
82197 int current_midi_input_port;
82198 u8 mode[MTS64_NUM_INPUT_PORTS];
82199 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
82200 {
82201 struct mts64 *mts = substream->rmidi->private_data;
82202
82203 - if (mts->open_count == 0) {
82204 + if (local_read(&mts->open_count) == 0) {
82205 /* We don't need a spinlock here, because this is just called
82206 if the device has not been opened before.
82207 So there aren't any IRQs from the device */
82208 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
82209
82210 msleep(50);
82211 }
82212 - ++(mts->open_count);
82213 + local_inc(&mts->open_count);
82214
82215 return 0;
82216 }
82217 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
82218 struct mts64 *mts = substream->rmidi->private_data;
82219 unsigned long flags;
82220
82221 - --(mts->open_count);
82222 - if (mts->open_count == 0) {
82223 + if (local_dec_return(&mts->open_count) == 0) {
82224 /* We need the spinlock_irqsave here because we can still
82225 have IRQs at this point */
82226 spin_lock_irqsave(&mts->lock, flags);
82227 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
82228
82229 msleep(500);
82230
82231 - } else if (mts->open_count < 0)
82232 - mts->open_count = 0;
82233 + } else if (local_read(&mts->open_count) < 0)
82234 + local_set(&mts->open_count, 0);
82235
82236 return 0;
82237 }
82238 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
82239 index b953fb4..1999c01 100644
82240 --- a/sound/drivers/opl4/opl4_lib.c
82241 +++ b/sound/drivers/opl4/opl4_lib.c
82242 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
82243 MODULE_DESCRIPTION("OPL4 driver");
82244 MODULE_LICENSE("GPL");
82245
82246 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
82247 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
82248 {
82249 int timeout = 10;
82250 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
82251 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
82252 index 3e32bd3..46fc152 100644
82253 --- a/sound/drivers/portman2x4.c
82254 +++ b/sound/drivers/portman2x4.c
82255 @@ -48,6 +48,7 @@
82256 #include <sound/initval.h>
82257 #include <sound/rawmidi.h>
82258 #include <sound/control.h>
82259 +#include <asm/local.h>
82260
82261 #define CARD_NAME "Portman 2x4"
82262 #define DRIVER_NAME "portman"
82263 @@ -85,7 +86,7 @@ struct portman {
82264 struct pardevice *pardev;
82265 int pardev_claimed;
82266
82267 - int open_count;
82268 + local_t open_count;
82269 int mode[PORTMAN_NUM_INPUT_PORTS];
82270 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
82271 };
82272 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
82273 index ea995af..f1bfa37 100644
82274 --- a/sound/firewire/amdtp.c
82275 +++ b/sound/firewire/amdtp.c
82276 @@ -389,7 +389,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
82277 ptr = s->pcm_buffer_pointer + data_blocks;
82278 if (ptr >= pcm->runtime->buffer_size)
82279 ptr -= pcm->runtime->buffer_size;
82280 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
82281 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
82282
82283 s->pcm_period_pointer += data_blocks;
82284 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
82285 @@ -557,7 +557,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer);
82286 */
82287 void amdtp_out_stream_update(struct amdtp_out_stream *s)
82288 {
82289 - ACCESS_ONCE(s->source_node_id_field) =
82290 + ACCESS_ONCE_RW(s->source_node_id_field) =
82291 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
82292 }
82293 EXPORT_SYMBOL(amdtp_out_stream_update);
82294 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
82295 index b680c5e..061b7a0 100644
82296 --- a/sound/firewire/amdtp.h
82297 +++ b/sound/firewire/amdtp.h
82298 @@ -139,7 +139,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s)
82299 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
82300 struct snd_pcm_substream *pcm)
82301 {
82302 - ACCESS_ONCE(s->pcm) = pcm;
82303 + ACCESS_ONCE_RW(s->pcm) = pcm;
82304 }
82305
82306 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
82307 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
82308 index d428ffe..751ef78 100644
82309 --- a/sound/firewire/isight.c
82310 +++ b/sound/firewire/isight.c
82311 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
82312 ptr += count;
82313 if (ptr >= runtime->buffer_size)
82314 ptr -= runtime->buffer_size;
82315 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
82316 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
82317
82318 isight->period_counter += count;
82319 if (isight->period_counter >= runtime->period_size) {
82320 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
82321 if (err < 0)
82322 return err;
82323
82324 - ACCESS_ONCE(isight->pcm_active) = true;
82325 + ACCESS_ONCE_RW(isight->pcm_active) = true;
82326
82327 return 0;
82328 }
82329 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
82330 {
82331 struct isight *isight = substream->private_data;
82332
82333 - ACCESS_ONCE(isight->pcm_active) = false;
82334 + ACCESS_ONCE_RW(isight->pcm_active) = false;
82335
82336 mutex_lock(&isight->mutex);
82337 isight_stop_streaming(isight);
82338 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
82339
82340 switch (cmd) {
82341 case SNDRV_PCM_TRIGGER_START:
82342 - ACCESS_ONCE(isight->pcm_running) = true;
82343 + ACCESS_ONCE_RW(isight->pcm_running) = true;
82344 break;
82345 case SNDRV_PCM_TRIGGER_STOP:
82346 - ACCESS_ONCE(isight->pcm_running) = false;
82347 + ACCESS_ONCE_RW(isight->pcm_running) = false;
82348 break;
82349 default:
82350 return -EINVAL;
82351 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
82352 index 7bd5e33..1fcab12 100644
82353 --- a/sound/isa/cmi8330.c
82354 +++ b/sound/isa/cmi8330.c
82355 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
82356
82357 struct snd_pcm *pcm;
82358 struct snd_cmi8330_stream {
82359 - struct snd_pcm_ops ops;
82360 + snd_pcm_ops_no_const ops;
82361 snd_pcm_open_callback_t open;
82362 void *private_data; /* sb or wss */
82363 } streams[2];
82364 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
82365 index 733b014..56ce96f 100644
82366 --- a/sound/oss/sb_audio.c
82367 +++ b/sound/oss/sb_audio.c
82368 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
82369 buf16 = (signed short *)(localbuf + localoffs);
82370 while (c)
82371 {
82372 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
82373 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
82374 if (copy_from_user(lbuf8,
82375 userbuf+useroffs + p,
82376 locallen))
82377 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
82378 index 09d4648..cf234c7 100644
82379 --- a/sound/oss/swarm_cs4297a.c
82380 +++ b/sound/oss/swarm_cs4297a.c
82381 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
82382 {
82383 struct cs4297a_state *s;
82384 u32 pwr, id;
82385 - mm_segment_t fs;
82386 int rval;
82387 #ifndef CONFIG_BCM_CS4297A_CSWARM
82388 u64 cfg;
82389 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
82390 if (!rval) {
82391 char *sb1250_duart_present;
82392
82393 +#if 0
82394 + mm_segment_t fs;
82395 fs = get_fs();
82396 set_fs(KERNEL_DS);
82397 -#if 0
82398 val = SOUND_MASK_LINE;
82399 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
82400 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
82401 val = initvol[i].vol;
82402 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
82403 }
82404 + set_fs(fs);
82405 // cs4297a_write_ac97(s, 0x18, 0x0808);
82406 #else
82407 // cs4297a_write_ac97(s, 0x5e, 0x180);
82408 cs4297a_write_ac97(s, 0x02, 0x0808);
82409 cs4297a_write_ac97(s, 0x18, 0x0808);
82410 #endif
82411 - set_fs(fs);
82412
82413 list_add(&s->list, &cs4297a_devs);
82414
82415 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
82416 index 2fdaadb..7df8fc6 100644
82417 --- a/sound/pci/hda/hda_codec.h
82418 +++ b/sound/pci/hda/hda_codec.h
82419 @@ -611,7 +611,7 @@ struct hda_bus_ops {
82420 /* notify power-up/down from codec to controller */
82421 void (*pm_notify)(struct hda_bus *bus);
82422 #endif
82423 -};
82424 +} __no_const;
82425
82426 /* template to pass to the bus constructor */
82427 struct hda_bus_template {
82428 @@ -711,6 +711,7 @@ struct hda_codec_ops {
82429 #endif
82430 void (*reboot_notify)(struct hda_codec *codec);
82431 };
82432 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
82433
82434 /* record for amp information cache */
82435 struct hda_cache_head {
82436 @@ -741,7 +742,7 @@ struct hda_pcm_ops {
82437 struct snd_pcm_substream *substream);
82438 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
82439 struct snd_pcm_substream *substream);
82440 -};
82441 +} __no_const;
82442
82443 /* PCM information for each substream */
82444 struct hda_pcm_stream {
82445 @@ -799,7 +800,7 @@ struct hda_codec {
82446 const char *modelname; /* model name for preset */
82447
82448 /* set by patch */
82449 - struct hda_codec_ops patch_ops;
82450 + hda_codec_ops_no_const patch_ops;
82451
82452 /* PCM to create, set by patch_ops.build_pcms callback */
82453 unsigned int num_pcms;
82454 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
82455 index 0da778a..bc38b84 100644
82456 --- a/sound/pci/ice1712/ice1712.h
82457 +++ b/sound/pci/ice1712/ice1712.h
82458 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
82459 unsigned int mask_flags; /* total mask bits */
82460 struct snd_akm4xxx_ops {
82461 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
82462 - } ops;
82463 + } __no_const ops;
82464 };
82465
82466 struct snd_ice1712_spdif {
82467 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
82468 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
82469 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
82470 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
82471 - } ops;
82472 + } __no_const ops;
82473 };
82474
82475
82476 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
82477 index a8159b81..5f006a5 100644
82478 --- a/sound/pci/ymfpci/ymfpci_main.c
82479 +++ b/sound/pci/ymfpci/ymfpci_main.c
82480 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
82481 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
82482 break;
82483 }
82484 - if (atomic_read(&chip->interrupt_sleep_count)) {
82485 - atomic_set(&chip->interrupt_sleep_count, 0);
82486 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
82487 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
82488 wake_up(&chip->interrupt_sleep);
82489 }
82490 __end:
82491 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
82492 continue;
82493 init_waitqueue_entry(&wait, current);
82494 add_wait_queue(&chip->interrupt_sleep, &wait);
82495 - atomic_inc(&chip->interrupt_sleep_count);
82496 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
82497 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
82498 remove_wait_queue(&chip->interrupt_sleep, &wait);
82499 }
82500 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
82501 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
82502 spin_unlock(&chip->reg_lock);
82503
82504 - if (atomic_read(&chip->interrupt_sleep_count)) {
82505 - atomic_set(&chip->interrupt_sleep_count, 0);
82506 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
82507 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
82508 wake_up(&chip->interrupt_sleep);
82509 }
82510 }
82511 @@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
82512 spin_lock_init(&chip->reg_lock);
82513 spin_lock_init(&chip->voice_lock);
82514 init_waitqueue_head(&chip->interrupt_sleep);
82515 - atomic_set(&chip->interrupt_sleep_count, 0);
82516 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
82517 chip->card = card;
82518 chip->pci = pci;
82519 chip->irq = -1;
82520 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
82521 new file mode 100644
82522 index 0000000..50f2f2f
82523 --- /dev/null
82524 +++ b/tools/gcc/.gitignore
82525 @@ -0,0 +1 @@
82526 +size_overflow_hash.h
82527 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
82528 new file mode 100644
82529 index 0000000..1d09b7e
82530 --- /dev/null
82531 +++ b/tools/gcc/Makefile
82532 @@ -0,0 +1,43 @@
82533 +#CC := gcc
82534 +#PLUGIN_SOURCE_FILES := pax_plugin.c
82535 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
82536 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
82537 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
82538 +
82539 +ifeq ($(PLUGINCC),$(HOSTCC))
82540 +HOSTLIBS := hostlibs
82541 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
82542 +else
82543 +HOSTLIBS := hostcxxlibs
82544 +HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
82545 +endif
82546 +
82547 +$(HOSTLIBS)-y := constify_plugin.so
82548 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
82549 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
82550 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
82551 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
82552 +$(HOSTLIBS)-y += colorize_plugin.so
82553 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
82554 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
82555 +
82556 +always := $($(HOSTLIBS)-y)
82557 +
82558 +constify_plugin-objs := constify_plugin.o
82559 +stackleak_plugin-objs := stackleak_plugin.o
82560 +kallocstat_plugin-objs := kallocstat_plugin.o
82561 +kernexec_plugin-objs := kernexec_plugin.o
82562 +checker_plugin-objs := checker_plugin.o
82563 +colorize_plugin-objs := colorize_plugin.o
82564 +size_overflow_plugin-objs := size_overflow_plugin.o
82565 +latent_entropy_plugin-objs := latent_entropy_plugin.o
82566 +
82567 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
82568 +
82569 +quiet_cmd_build_size_overflow_hash = GENHASH $@
82570 + cmd_build_size_overflow_hash = \
82571 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
82572 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
82573 + $(call if_changed,build_size_overflow_hash)
82574 +
82575 +targets += size_overflow_hash.h
82576 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
82577 new file mode 100644
82578 index 0000000..d41b5af
82579 --- /dev/null
82580 +++ b/tools/gcc/checker_plugin.c
82581 @@ -0,0 +1,171 @@
82582 +/*
82583 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
82584 + * Licensed under the GPL v2
82585 + *
82586 + * Note: the choice of the license means that the compilation process is
82587 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
82588 + * but for the kernel it doesn't matter since it doesn't link against
82589 + * any of the gcc libraries
82590 + *
82591 + * gcc plugin to implement various sparse (source code checker) features
82592 + *
82593 + * TODO:
82594 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
82595 + *
82596 + * BUGS:
82597 + * - none known
82598 + */
82599 +#include "gcc-plugin.h"
82600 +#include "config.h"
82601 +#include "system.h"
82602 +#include "coretypes.h"
82603 +#include "tree.h"
82604 +#include "tree-pass.h"
82605 +#include "flags.h"
82606 +#include "intl.h"
82607 +#include "toplev.h"
82608 +#include "plugin.h"
82609 +//#include "expr.h" where are you...
82610 +#include "diagnostic.h"
82611 +#include "plugin-version.h"
82612 +#include "tm.h"
82613 +#include "function.h"
82614 +#include "basic-block.h"
82615 +#include "gimple.h"
82616 +#include "rtl.h"
82617 +#include "emit-rtl.h"
82618 +#include "tree-flow.h"
82619 +#include "target.h"
82620 +
82621 +extern void c_register_addr_space (const char *str, addr_space_t as);
82622 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
82623 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
82624 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
82625 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
82626 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
82627 +
82628 +extern void print_gimple_stmt(FILE *, gimple, int, int);
82629 +extern rtx emit_move_insn(rtx x, rtx y);
82630 +
82631 +int plugin_is_GPL_compatible;
82632 +
82633 +static struct plugin_info checker_plugin_info = {
82634 + .version = "201111150100",
82635 +};
82636 +
82637 +#define ADDR_SPACE_KERNEL 0
82638 +#define ADDR_SPACE_FORCE_KERNEL 1
82639 +#define ADDR_SPACE_USER 2
82640 +#define ADDR_SPACE_FORCE_USER 3
82641 +#define ADDR_SPACE_IOMEM 0
82642 +#define ADDR_SPACE_FORCE_IOMEM 0
82643 +#define ADDR_SPACE_PERCPU 0
82644 +#define ADDR_SPACE_FORCE_PERCPU 0
82645 +#define ADDR_SPACE_RCU 0
82646 +#define ADDR_SPACE_FORCE_RCU 0
82647 +
82648 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
82649 +{
82650 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
82651 +}
82652 +
82653 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
82654 +{
82655 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
82656 +}
82657 +
82658 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
82659 +{
82660 + return default_addr_space_valid_pointer_mode(mode, as);
82661 +}
82662 +
82663 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
82664 +{
82665 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
82666 +}
82667 +
82668 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
82669 +{
82670 + return default_addr_space_legitimize_address(x, oldx, mode, as);
82671 +}
82672 +
82673 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
82674 +{
82675 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
82676 + return true;
82677 +
82678 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
82679 + return true;
82680 +
82681 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
82682 + return true;
82683 +
82684 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
82685 + return true;
82686 +
82687 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
82688 + return true;
82689 +
82690 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
82691 + return true;
82692 +
82693 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
82694 + return true;
82695 +
82696 + return subset == superset;
82697 +}
82698 +
82699 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
82700 +{
82701 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
82702 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
82703 +
82704 + return op;
82705 +}
82706 +
82707 +static void register_checker_address_spaces(void *event_data, void *data)
82708 +{
82709 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
82710 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
82711 + c_register_addr_space("__user", ADDR_SPACE_USER);
82712 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
82713 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
82714 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
82715 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
82716 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
82717 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
82718 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
82719 +
82720 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
82721 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
82722 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
82723 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
82724 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
82725 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
82726 + targetm.addr_space.convert = checker_addr_space_convert;
82727 +}
82728 +
82729 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
82730 +{
82731 + const char * const plugin_name = plugin_info->base_name;
82732 + const int argc = plugin_info->argc;
82733 + const struct plugin_argument * const argv = plugin_info->argv;
82734 + int i;
82735 +
82736 + if (!plugin_default_version_check(version, &gcc_version)) {
82737 + error(G_("incompatible gcc/plugin versions"));
82738 + return 1;
82739 + }
82740 +
82741 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
82742 +
82743 + for (i = 0; i < argc; ++i)
82744 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
82745 +
82746 + if (TARGET_64BIT == 0)
82747 + return 0;
82748 +
82749 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
82750 +
82751 + return 0;
82752 +}
82753 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
82754 new file mode 100644
82755 index 0000000..846aeb0
82756 --- /dev/null
82757 +++ b/tools/gcc/colorize_plugin.c
82758 @@ -0,0 +1,148 @@
82759 +/*
82760 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
82761 + * Licensed under the GPL v2
82762 + *
82763 + * Note: the choice of the license means that the compilation process is
82764 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
82765 + * but for the kernel it doesn't matter since it doesn't link against
82766 + * any of the gcc libraries
82767 + *
82768 + * gcc plugin to colorize diagnostic output
82769 + *
82770 + */
82771 +
82772 +#include "gcc-plugin.h"
82773 +#include "config.h"
82774 +#include "system.h"
82775 +#include "coretypes.h"
82776 +#include "tree.h"
82777 +#include "tree-pass.h"
82778 +#include "flags.h"
82779 +#include "intl.h"
82780 +#include "toplev.h"
82781 +#include "plugin.h"
82782 +#include "diagnostic.h"
82783 +#include "plugin-version.h"
82784 +#include "tm.h"
82785 +
82786 +int plugin_is_GPL_compatible;
82787 +
82788 +static struct plugin_info colorize_plugin_info = {
82789 + .version = "201203092200",
82790 + .help = NULL,
82791 +};
82792 +
82793 +#define GREEN "\033[32m\033[2m"
82794 +#define LIGHTGREEN "\033[32m\033[1m"
82795 +#define YELLOW "\033[33m\033[2m"
82796 +#define LIGHTYELLOW "\033[33m\033[1m"
82797 +#define RED "\033[31m\033[2m"
82798 +#define LIGHTRED "\033[31m\033[1m"
82799 +#define BLUE "\033[34m\033[2m"
82800 +#define LIGHTBLUE "\033[34m\033[1m"
82801 +#define BRIGHT "\033[m\033[1m"
82802 +#define NORMAL "\033[m"
82803 +
82804 +static diagnostic_starter_fn old_starter;
82805 +static diagnostic_finalizer_fn old_finalizer;
82806 +
82807 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
82808 +{
82809 + const char *color;
82810 + char *newprefix;
82811 +
82812 + switch (diagnostic->kind) {
82813 + case DK_NOTE:
82814 + color = LIGHTBLUE;
82815 + break;
82816 +
82817 + case DK_PEDWARN:
82818 + case DK_WARNING:
82819 + color = LIGHTYELLOW;
82820 + break;
82821 +
82822 + case DK_ERROR:
82823 + case DK_FATAL:
82824 + case DK_ICE:
82825 + case DK_PERMERROR:
82826 + case DK_SORRY:
82827 + color = LIGHTRED;
82828 + break;
82829 +
82830 + default:
82831 + color = NORMAL;
82832 + }
82833 +
82834 + old_starter(context, diagnostic);
82835 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
82836 + return;
82837 + pp_destroy_prefix(context->printer);
82838 + pp_set_prefix(context->printer, newprefix);
82839 +}
82840 +
82841 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
82842 +{
82843 + old_finalizer(context, diagnostic);
82844 +}
82845 +
82846 +static void colorize_arm(void)
82847 +{
82848 + old_starter = diagnostic_starter(global_dc);
82849 + old_finalizer = diagnostic_finalizer(global_dc);
82850 +
82851 + diagnostic_starter(global_dc) = start_colorize;
82852 + diagnostic_finalizer(global_dc) = finalize_colorize;
82853 +}
82854 +
82855 +static unsigned int execute_colorize_rearm(void)
82856 +{
82857 + if (diagnostic_starter(global_dc) == start_colorize)
82858 + return 0;
82859 +
82860 + colorize_arm();
82861 + return 0;
82862 +}
82863 +
82864 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
82865 + .pass = {
82866 + .type = SIMPLE_IPA_PASS,
82867 + .name = "colorize_rearm",
82868 + .gate = NULL,
82869 + .execute = execute_colorize_rearm,
82870 + .sub = NULL,
82871 + .next = NULL,
82872 + .static_pass_number = 0,
82873 + .tv_id = TV_NONE,
82874 + .properties_required = 0,
82875 + .properties_provided = 0,
82876 + .properties_destroyed = 0,
82877 + .todo_flags_start = 0,
82878 + .todo_flags_finish = 0
82879 + }
82880 +};
82881 +
82882 +static void colorize_start_unit(void *gcc_data, void *user_data)
82883 +{
82884 + colorize_arm();
82885 +}
82886 +
82887 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
82888 +{
82889 + const char * const plugin_name = plugin_info->base_name;
82890 + struct register_pass_info colorize_rearm_pass_info = {
82891 + .pass = &pass_ipa_colorize_rearm.pass,
82892 + .reference_pass_name = "*free_lang_data",
82893 + .ref_pass_instance_number = 1,
82894 + .pos_op = PASS_POS_INSERT_AFTER
82895 + };
82896 +
82897 + if (!plugin_default_version_check(version, &gcc_version)) {
82898 + error(G_("incompatible gcc/plugin versions"));
82899 + return 1;
82900 + }
82901 +
82902 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
82903 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
82904 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
82905 + return 0;
82906 +}
82907 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
82908 new file mode 100644
82909 index 0000000..92ed719
82910 --- /dev/null
82911 +++ b/tools/gcc/constify_plugin.c
82912 @@ -0,0 +1,331 @@
82913 +/*
82914 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
82915 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
82916 + * Licensed under the GPL v2, or (at your option) v3
82917 + *
82918 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
82919 + *
82920 + * Homepage:
82921 + * http://www.grsecurity.net/~ephox/const_plugin/
82922 + *
82923 + * Usage:
82924 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
82925 + * $ gcc -fplugin=constify_plugin.so test.c -O2
82926 + */
82927 +
82928 +#include "gcc-plugin.h"
82929 +#include "config.h"
82930 +#include "system.h"
82931 +#include "coretypes.h"
82932 +#include "tree.h"
82933 +#include "tree-pass.h"
82934 +#include "flags.h"
82935 +#include "intl.h"
82936 +#include "toplev.h"
82937 +#include "plugin.h"
82938 +#include "diagnostic.h"
82939 +#include "plugin-version.h"
82940 +#include "tm.h"
82941 +#include "function.h"
82942 +#include "basic-block.h"
82943 +#include "gimple.h"
82944 +#include "rtl.h"
82945 +#include "emit-rtl.h"
82946 +#include "tree-flow.h"
82947 +
82948 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
82949 +
82950 +int plugin_is_GPL_compatible;
82951 +
82952 +static struct plugin_info const_plugin_info = {
82953 + .version = "201205300030",
82954 + .help = "no-constify\tturn off constification\n",
82955 +};
82956 +
82957 +static void deconstify_tree(tree node);
82958 +
82959 +static void deconstify_type(tree type)
82960 +{
82961 + tree field;
82962 +
82963 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
82964 + tree type = TREE_TYPE(field);
82965 +
82966 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
82967 + continue;
82968 + if (!TYPE_READONLY(type))
82969 + continue;
82970 +
82971 + deconstify_tree(field);
82972 + }
82973 + TYPE_READONLY(type) = 0;
82974 + C_TYPE_FIELDS_READONLY(type) = 0;
82975 +}
82976 +
82977 +static void deconstify_tree(tree node)
82978 +{
82979 + tree old_type, new_type, field;
82980 +
82981 + old_type = TREE_TYPE(node);
82982 +
82983 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
82984 +
82985 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
82986 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
82987 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
82988 + DECL_FIELD_CONTEXT(field) = new_type;
82989 +
82990 + deconstify_type(new_type);
82991 +
82992 + TREE_READONLY(node) = 0;
82993 + TREE_TYPE(node) = new_type;
82994 +}
82995 +
82996 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
82997 +{
82998 + tree type;
82999 +
83000 + *no_add_attrs = true;
83001 + if (TREE_CODE(*node) == FUNCTION_DECL) {
83002 + error("%qE attribute does not apply to functions", name);
83003 + return NULL_TREE;
83004 + }
83005 +
83006 + if (TREE_CODE(*node) == VAR_DECL) {
83007 + error("%qE attribute does not apply to variables", name);
83008 + return NULL_TREE;
83009 + }
83010 +
83011 + if (TYPE_P(*node)) {
83012 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
83013 + *no_add_attrs = false;
83014 + else
83015 + error("%qE attribute applies to struct and union types only", name);
83016 + return NULL_TREE;
83017 + }
83018 +
83019 + type = TREE_TYPE(*node);
83020 +
83021 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
83022 + error("%qE attribute applies to struct and union types only", name);
83023 + return NULL_TREE;
83024 + }
83025 +
83026 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
83027 + error("%qE attribute is already applied to the type", name);
83028 + return NULL_TREE;
83029 + }
83030 +
83031 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
83032 + error("%qE attribute used on type that is not constified", name);
83033 + return NULL_TREE;
83034 + }
83035 +
83036 + if (TREE_CODE(*node) == TYPE_DECL) {
83037 + deconstify_tree(*node);
83038 + return NULL_TREE;
83039 + }
83040 +
83041 + return NULL_TREE;
83042 +}
83043 +
83044 +static void constify_type(tree type)
83045 +{
83046 + TYPE_READONLY(type) = 1;
83047 + C_TYPE_FIELDS_READONLY(type) = 1;
83048 +}
83049 +
83050 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
83051 +{
83052 + *no_add_attrs = true;
83053 + if (!TYPE_P(*node)) {
83054 + error("%qE attribute applies to types only", name);
83055 + return NULL_TREE;
83056 + }
83057 +
83058 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
83059 + error("%qE attribute applies to struct and union types only", name);
83060 + return NULL_TREE;
83061 + }
83062 +
83063 + *no_add_attrs = false;
83064 + constify_type(*node);
83065 + return NULL_TREE;
83066 +}
83067 +
83068 +static struct attribute_spec no_const_attr = {
83069 + .name = "no_const",
83070 + .min_length = 0,
83071 + .max_length = 0,
83072 + .decl_required = false,
83073 + .type_required = false,
83074 + .function_type_required = false,
83075 + .handler = handle_no_const_attribute,
83076 +#if BUILDING_GCC_VERSION >= 4007
83077 + .affects_type_identity = true
83078 +#endif
83079 +};
83080 +
83081 +static struct attribute_spec do_const_attr = {
83082 + .name = "do_const",
83083 + .min_length = 0,
83084 + .max_length = 0,
83085 + .decl_required = false,
83086 + .type_required = false,
83087 + .function_type_required = false,
83088 + .handler = handle_do_const_attribute,
83089 +#if BUILDING_GCC_VERSION >= 4007
83090 + .affects_type_identity = true
83091 +#endif
83092 +};
83093 +
83094 +static void register_attributes(void *event_data, void *data)
83095 +{
83096 + register_attribute(&no_const_attr);
83097 + register_attribute(&do_const_attr);
83098 +}
83099 +
83100 +static bool is_fptr(tree field)
83101 +{
83102 + tree ptr = TREE_TYPE(field);
83103 +
83104 + if (TREE_CODE(ptr) != POINTER_TYPE)
83105 + return false;
83106 +
83107 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
83108 +}
83109 +
83110 +static bool walk_struct(tree node)
83111 +{
83112 + tree field;
83113 +
83114 + if (TYPE_FIELDS(node) == NULL_TREE)
83115 + return false;
83116 +
83117 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
83118 + gcc_assert(!TYPE_READONLY(node));
83119 + deconstify_type(node);
83120 + return false;
83121 + }
83122 +
83123 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
83124 + tree type = TREE_TYPE(field);
83125 + enum tree_code code = TREE_CODE(type);
83126 +
83127 + if (node == type)
83128 + return false;
83129 + if (code == RECORD_TYPE || code == UNION_TYPE) {
83130 + if (!(walk_struct(type)))
83131 + return false;
83132 + } else if (!is_fptr(field) && !TREE_READONLY(field))
83133 + return false;
83134 + }
83135 + return true;
83136 +}
83137 +
83138 +static void finish_type(void *event_data, void *data)
83139 +{
83140 + tree type = (tree)event_data;
83141 +
83142 + if (type == NULL_TREE || type == error_mark_node)
83143 + return;
83144 +
83145 + if (TYPE_READONLY(type))
83146 + return;
83147 +
83148 + if (walk_struct(type))
83149 + constify_type(type);
83150 +}
83151 +
83152 +static unsigned int check_local_variables(void);
83153 +
83154 +struct gimple_opt_pass pass_local_variable = {
83155 + {
83156 + .type = GIMPLE_PASS,
83157 + .name = "check_local_variables",
83158 + .gate = NULL,
83159 + .execute = check_local_variables,
83160 + .sub = NULL,
83161 + .next = NULL,
83162 + .static_pass_number = 0,
83163 + .tv_id = TV_NONE,
83164 + .properties_required = 0,
83165 + .properties_provided = 0,
83166 + .properties_destroyed = 0,
83167 + .todo_flags_start = 0,
83168 + .todo_flags_finish = 0
83169 + }
83170 +};
83171 +
83172 +static unsigned int check_local_variables(void)
83173 +{
83174 + tree var;
83175 + referenced_var_iterator rvi;
83176 +
83177 +#if BUILDING_GCC_VERSION == 4005
83178 + FOR_EACH_REFERENCED_VAR(var, rvi) {
83179 +#else
83180 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
83181 +#endif
83182 + tree type = TREE_TYPE(var);
83183 +
83184 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
83185 + continue;
83186 +
83187 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
83188 + continue;
83189 +
83190 + if (!TYPE_READONLY(type))
83191 + continue;
83192 +
83193 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
83194 +// continue;
83195 +
83196 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
83197 +// continue;
83198 +
83199 + if (walk_struct(type)) {
83200 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
83201 + return 1;
83202 + }
83203 + }
83204 + return 0;
83205 +}
83206 +
83207 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
83208 +{
83209 + const char * const plugin_name = plugin_info->base_name;
83210 + const int argc = plugin_info->argc;
83211 + const struct plugin_argument * const argv = plugin_info->argv;
83212 + int i;
83213 + bool constify = true;
83214 +
83215 + struct register_pass_info local_variable_pass_info = {
83216 + .pass = &pass_local_variable.pass,
83217 + .reference_pass_name = "*referenced_vars",
83218 + .ref_pass_instance_number = 1,
83219 + .pos_op = PASS_POS_INSERT_AFTER
83220 + };
83221 +
83222 + if (!plugin_default_version_check(version, &gcc_version)) {
83223 + error(G_("incompatible gcc/plugin versions"));
83224 + return 1;
83225 + }
83226 +
83227 + for (i = 0; i < argc; ++i) {
83228 + if (!(strcmp(argv[i].key, "no-constify"))) {
83229 + constify = false;
83230 + continue;
83231 + }
83232 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
83233 + }
83234 +
83235 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
83236 + if (constify) {
83237 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
83238 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
83239 + }
83240 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
83241 +
83242 + return 0;
83243 +}
83244 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
83245 new file mode 100644
83246 index 0000000..d272d4b
83247 --- /dev/null
83248 +++ b/tools/gcc/generate_size_overflow_hash.sh
83249 @@ -0,0 +1,96 @@
83250 +#!/bin/bash
83251 +
83252 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
83253 +
83254 +header1="size_overflow_hash.h"
83255 +database="size_overflow_hash.data"
83256 +n=65536
83257 +
83258 +usage() {
83259 +cat <<EOF
83260 +usage: $0 options
83261 +OPTIONS:
83262 + -h|--help help
83263 + -o header file
83264 + -d database file
83265 + -n hash array size
83266 +EOF
83267 + return 0
83268 +}
83269 +
83270 +while true
83271 +do
83272 + case "$1" in
83273 + -h|--help) usage && exit 0;;
83274 + -n) n=$2; shift 2;;
83275 + -o) header1="$2"; shift 2;;
83276 + -d) database="$2"; shift 2;;
83277 + --) shift 1; break ;;
83278 + *) break ;;
83279 + esac
83280 +done
83281 +
83282 +create_defines() {
83283 + for i in `seq 1 10`
83284 + do
83285 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
83286 + done
83287 + echo >> "$header1"
83288 +}
83289 +
83290 +create_structs () {
83291 + rm -f "$header1"
83292 +
83293 + create_defines
83294 +
83295 + cat "$database" | while read data
83296 + do
83297 + data_array=(${data// /?})
83298 + data_array=(${data_array[@]//+/ })
83299 + struct_hash_name="${data_array[0]}"
83300 + funcn="${data_array[1]//\?/ }"
83301 + params="${data_array[2]}"
83302 + next="${data_array[5]}"
83303 +
83304 + echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
83305 +
83306 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
83307 + echo -en "\t.param\t= " >> "$header1"
83308 + line=
83309 + for param_num in ${params//-/ };
83310 + do
83311 + line="${line}PARAM"$param_num"|"
83312 + done
83313 +
83314 + echo -e "${line%?},\n};\n" >> "$header1"
83315 + done
83316 +}
83317 +
83318 +create_headers () {
83319 + echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
83320 +}
83321 +
83322 +create_array_elements () {
83323 + index=0
83324 + grep -v "nohasharray" $database | sort -n -t '+' -k 4 | while read data
83325 + do
83326 + data_array=(${data// /?})
83327 + data_array=(${data_array//+/ })
83328 + i="${data_array[3]}"
83329 + hash="${data_array[4]}"
83330 + while [[ $index -lt $i ]]
83331 + do
83332 + echo -e "\t["$index"]\t= NULL," >> "$header1"
83333 + index=$(($index + 1))
83334 + done
83335 + index=$(($index + 1))
83336 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
83337 + done
83338 + echo '};' >> $header1
83339 +}
83340 +
83341 +create_structs
83342 +create_headers
83343 +create_array_elements
83344 +
83345 +exit 0
83346 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
83347 new file mode 100644
83348 index 0000000..a86e422
83349 --- /dev/null
83350 +++ b/tools/gcc/kallocstat_plugin.c
83351 @@ -0,0 +1,167 @@
83352 +/*
83353 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
83354 + * Licensed under the GPL v2
83355 + *
83356 + * Note: the choice of the license means that the compilation process is
83357 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
83358 + * but for the kernel it doesn't matter since it doesn't link against
83359 + * any of the gcc libraries
83360 + *
83361 + * gcc plugin to find the distribution of k*alloc sizes
83362 + *
83363 + * TODO:
83364 + *
83365 + * BUGS:
83366 + * - none known
83367 + */
83368 +#include "gcc-plugin.h"
83369 +#include "config.h"
83370 +#include "system.h"
83371 +#include "coretypes.h"
83372 +#include "tree.h"
83373 +#include "tree-pass.h"
83374 +#include "flags.h"
83375 +#include "intl.h"
83376 +#include "toplev.h"
83377 +#include "plugin.h"
83378 +//#include "expr.h" where are you...
83379 +#include "diagnostic.h"
83380 +#include "plugin-version.h"
83381 +#include "tm.h"
83382 +#include "function.h"
83383 +#include "basic-block.h"
83384 +#include "gimple.h"
83385 +#include "rtl.h"
83386 +#include "emit-rtl.h"
83387 +
83388 +extern void print_gimple_stmt(FILE *, gimple, int, int);
83389 +
83390 +int plugin_is_GPL_compatible;
83391 +
83392 +static const char * const kalloc_functions[] = {
83393 + "__kmalloc",
83394 + "kmalloc",
83395 + "kmalloc_large",
83396 + "kmalloc_node",
83397 + "kmalloc_order",
83398 + "kmalloc_order_trace",
83399 + "kmalloc_slab",
83400 + "kzalloc",
83401 + "kzalloc_node",
83402 +};
83403 +
83404 +static struct plugin_info kallocstat_plugin_info = {
83405 + .version = "201111150100",
83406 +};
83407 +
83408 +static unsigned int execute_kallocstat(void);
83409 +
83410 +static struct gimple_opt_pass kallocstat_pass = {
83411 + .pass = {
83412 + .type = GIMPLE_PASS,
83413 + .name = "kallocstat",
83414 + .gate = NULL,
83415 + .execute = execute_kallocstat,
83416 + .sub = NULL,
83417 + .next = NULL,
83418 + .static_pass_number = 0,
83419 + .tv_id = TV_NONE,
83420 + .properties_required = 0,
83421 + .properties_provided = 0,
83422 + .properties_destroyed = 0,
83423 + .todo_flags_start = 0,
83424 + .todo_flags_finish = 0
83425 + }
83426 +};
83427 +
83428 +static bool is_kalloc(const char *fnname)
83429 +{
83430 + size_t i;
83431 +
83432 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
83433 + if (!strcmp(fnname, kalloc_functions[i]))
83434 + return true;
83435 + return false;
83436 +}
83437 +
83438 +static unsigned int execute_kallocstat(void)
83439 +{
83440 + basic_block bb;
83441 +
83442 + // 1. loop through BBs and GIMPLE statements
83443 + FOR_EACH_BB(bb) {
83444 + gimple_stmt_iterator gsi;
83445 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
83446 + // gimple match:
83447 + tree fndecl, size;
83448 + gimple call_stmt;
83449 + const char *fnname;
83450 +
83451 + // is it a call
83452 + call_stmt = gsi_stmt(gsi);
83453 + if (!is_gimple_call(call_stmt))
83454 + continue;
83455 + fndecl = gimple_call_fndecl(call_stmt);
83456 + if (fndecl == NULL_TREE)
83457 + continue;
83458 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
83459 + continue;
83460 +
83461 + // is it a call to k*alloc
83462 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
83463 + if (!is_kalloc(fnname))
83464 + continue;
83465 +
83466 + // is the size arg the result of a simple const assignment
83467 + size = gimple_call_arg(call_stmt, 0);
83468 + while (true) {
83469 + gimple def_stmt;
83470 + expanded_location xloc;
83471 + size_t size_val;
83472 +
83473 + if (TREE_CODE(size) != SSA_NAME)
83474 + break;
83475 + def_stmt = SSA_NAME_DEF_STMT(size);
83476 + if (!def_stmt || !is_gimple_assign(def_stmt))
83477 + break;
83478 + if (gimple_num_ops(def_stmt) != 2)
83479 + break;
83480 + size = gimple_assign_rhs1(def_stmt);
83481 + if (!TREE_CONSTANT(size))
83482 + continue;
83483 + xloc = expand_location(gimple_location(def_stmt));
83484 + if (!xloc.file)
83485 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
83486 + size_val = TREE_INT_CST_LOW(size);
83487 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
83488 + break;
83489 + }
83490 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
83491 +//debug_tree(gimple_call_fn(call_stmt));
83492 +//print_node(stderr, "pax", fndecl, 4);
83493 + }
83494 + }
83495 +
83496 + return 0;
83497 +}
83498 +
83499 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
83500 +{
83501 + const char * const plugin_name = plugin_info->base_name;
83502 + struct register_pass_info kallocstat_pass_info = {
83503 + .pass = &kallocstat_pass.pass,
83504 + .reference_pass_name = "ssa",
83505 + .ref_pass_instance_number = 1,
83506 + .pos_op = PASS_POS_INSERT_AFTER
83507 + };
83508 +
83509 + if (!plugin_default_version_check(version, &gcc_version)) {
83510 + error(G_("incompatible gcc/plugin versions"));
83511 + return 1;
83512 + }
83513 +
83514 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
83515 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
83516 +
83517 + return 0;
83518 +}
83519 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
83520 new file mode 100644
83521 index 0000000..98011fa
83522 --- /dev/null
83523 +++ b/tools/gcc/kernexec_plugin.c
83524 @@ -0,0 +1,427 @@
83525 +/*
83526 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
83527 + * Licensed under the GPL v2
83528 + *
83529 + * Note: the choice of the license means that the compilation process is
83530 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
83531 + * but for the kernel it doesn't matter since it doesn't link against
83532 + * any of the gcc libraries
83533 + *
83534 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
83535 + *
83536 + * TODO:
83537 + *
83538 + * BUGS:
83539 + * - none known
83540 + */
83541 +#include "gcc-plugin.h"
83542 +#include "config.h"
83543 +#include "system.h"
83544 +#include "coretypes.h"
83545 +#include "tree.h"
83546 +#include "tree-pass.h"
83547 +#include "flags.h"
83548 +#include "intl.h"
83549 +#include "toplev.h"
83550 +#include "plugin.h"
83551 +//#include "expr.h" where are you...
83552 +#include "diagnostic.h"
83553 +#include "plugin-version.h"
83554 +#include "tm.h"
83555 +#include "function.h"
83556 +#include "basic-block.h"
83557 +#include "gimple.h"
83558 +#include "rtl.h"
83559 +#include "emit-rtl.h"
83560 +#include "tree-flow.h"
83561 +
83562 +extern void print_gimple_stmt(FILE *, gimple, int, int);
83563 +extern rtx emit_move_insn(rtx x, rtx y);
83564 +
83565 +int plugin_is_GPL_compatible;
83566 +
83567 +static struct plugin_info kernexec_plugin_info = {
83568 + .version = "201111291120",
83569 + .help = "method=[bts|or]\tinstrumentation method\n"
83570 +};
83571 +
83572 +static unsigned int execute_kernexec_reload(void);
83573 +static unsigned int execute_kernexec_fptr(void);
83574 +static unsigned int execute_kernexec_retaddr(void);
83575 +static bool kernexec_cmodel_check(void);
83576 +
83577 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
83578 +static void (*kernexec_instrument_retaddr)(rtx);
83579 +
83580 +static struct gimple_opt_pass kernexec_reload_pass = {
83581 + .pass = {
83582 + .type = GIMPLE_PASS,
83583 + .name = "kernexec_reload",
83584 + .gate = kernexec_cmodel_check,
83585 + .execute = execute_kernexec_reload,
83586 + .sub = NULL,
83587 + .next = NULL,
83588 + .static_pass_number = 0,
83589 + .tv_id = TV_NONE,
83590 + .properties_required = 0,
83591 + .properties_provided = 0,
83592 + .properties_destroyed = 0,
83593 + .todo_flags_start = 0,
83594 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
83595 + }
83596 +};
83597 +
83598 +static struct gimple_opt_pass kernexec_fptr_pass = {
83599 + .pass = {
83600 + .type = GIMPLE_PASS,
83601 + .name = "kernexec_fptr",
83602 + .gate = kernexec_cmodel_check,
83603 + .execute = execute_kernexec_fptr,
83604 + .sub = NULL,
83605 + .next = NULL,
83606 + .static_pass_number = 0,
83607 + .tv_id = TV_NONE,
83608 + .properties_required = 0,
83609 + .properties_provided = 0,
83610 + .properties_destroyed = 0,
83611 + .todo_flags_start = 0,
83612 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
83613 + }
83614 +};
83615 +
83616 +static struct rtl_opt_pass kernexec_retaddr_pass = {
83617 + .pass = {
83618 + .type = RTL_PASS,
83619 + .name = "kernexec_retaddr",
83620 + .gate = kernexec_cmodel_check,
83621 + .execute = execute_kernexec_retaddr,
83622 + .sub = NULL,
83623 + .next = NULL,
83624 + .static_pass_number = 0,
83625 + .tv_id = TV_NONE,
83626 + .properties_required = 0,
83627 + .properties_provided = 0,
83628 + .properties_destroyed = 0,
83629 + .todo_flags_start = 0,
83630 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
83631 + }
83632 +};
83633 +
83634 +static bool kernexec_cmodel_check(void)
83635 +{
83636 + tree section;
83637 +
83638 + if (ix86_cmodel != CM_KERNEL)
83639 + return false;
83640 +
83641 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
83642 + if (!section || !TREE_VALUE(section))
83643 + return true;
83644 +
83645 + section = TREE_VALUE(TREE_VALUE(section));
83646 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
83647 + return true;
83648 +
83649 + return false;
83650 +}
83651 +
83652 +/*
83653 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
83654 + */
83655 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
83656 +{
83657 + gimple asm_movabs_stmt;
83658 +
83659 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
83660 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
83661 + gimple_asm_set_volatile(asm_movabs_stmt, true);
83662 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
83663 + update_stmt(asm_movabs_stmt);
83664 +}
83665 +
83666 +/*
83667 + * find all asm() stmts that clobber r10 and add a reload of r10
83668 + */
83669 +static unsigned int execute_kernexec_reload(void)
83670 +{
83671 + basic_block bb;
83672 +
83673 + // 1. loop through BBs and GIMPLE statements
83674 + FOR_EACH_BB(bb) {
83675 + gimple_stmt_iterator gsi;
83676 +
83677 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
83678 + // gimple match: __asm__ ("" : : : "r10");
83679 + gimple asm_stmt;
83680 + size_t nclobbers;
83681 +
83682 + // is it an asm ...
83683 + asm_stmt = gsi_stmt(gsi);
83684 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
83685 + continue;
83686 +
83687 + // ... clobbering r10
83688 + nclobbers = gimple_asm_nclobbers(asm_stmt);
83689 + while (nclobbers--) {
83690 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
83691 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
83692 + continue;
83693 + kernexec_reload_fptr_mask(&gsi);
83694 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
83695 + break;
83696 + }
83697 + }
83698 + }
83699 +
83700 + return 0;
83701 +}
83702 +
83703 +/*
83704 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
83705 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
83706 + */
83707 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
83708 +{
83709 + gimple assign_intptr, assign_new_fptr, call_stmt;
83710 + tree intptr, old_fptr, new_fptr, kernexec_mask;
83711 +
83712 + call_stmt = gsi_stmt(*gsi);
83713 + old_fptr = gimple_call_fn(call_stmt);
83714 +
83715 + // create temporary unsigned long variable used for bitops and cast fptr to it
83716 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
83717 + add_referenced_var(intptr);
83718 + mark_sym_for_renaming(intptr);
83719 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
83720 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
83721 + update_stmt(assign_intptr);
83722 +
83723 + // apply logical or to temporary unsigned long and bitmask
83724 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
83725 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
83726 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
83727 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
83728 + update_stmt(assign_intptr);
83729 +
83730 + // cast temporary unsigned long back to a temporary fptr variable
83731 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
83732 + add_referenced_var(new_fptr);
83733 + mark_sym_for_renaming(new_fptr);
83734 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
83735 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
83736 + update_stmt(assign_new_fptr);
83737 +
83738 + // replace call stmt fn with the new fptr
83739 + gimple_call_set_fn(call_stmt, new_fptr);
83740 + update_stmt(call_stmt);
83741 +}
83742 +
83743 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
83744 +{
83745 + gimple asm_or_stmt, call_stmt;
83746 + tree old_fptr, new_fptr, input, output;
83747 + VEC(tree, gc) *inputs = NULL;
83748 + VEC(tree, gc) *outputs = NULL;
83749 +
83750 + call_stmt = gsi_stmt(*gsi);
83751 + old_fptr = gimple_call_fn(call_stmt);
83752 +
83753 + // create temporary fptr variable
83754 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
83755 + add_referenced_var(new_fptr);
83756 + mark_sym_for_renaming(new_fptr);
83757 +
83758 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
83759 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
83760 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
83761 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
83762 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
83763 + VEC_safe_push(tree, gc, inputs, input);
83764 + VEC_safe_push(tree, gc, outputs, output);
83765 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
83766 + gimple_asm_set_volatile(asm_or_stmt, true);
83767 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
83768 + update_stmt(asm_or_stmt);
83769 +
83770 + // replace call stmt fn with the new fptr
83771 + gimple_call_set_fn(call_stmt, new_fptr);
83772 + update_stmt(call_stmt);
83773 +}
83774 +
83775 +/*
83776 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
83777 + */
83778 +static unsigned int execute_kernexec_fptr(void)
83779 +{
83780 + basic_block bb;
83781 +
83782 + // 1. loop through BBs and GIMPLE statements
83783 + FOR_EACH_BB(bb) {
83784 + gimple_stmt_iterator gsi;
83785 +
83786 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
83787 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
83788 + tree fn;
83789 + gimple call_stmt;
83790 +
83791 + // is it a call ...
83792 + call_stmt = gsi_stmt(gsi);
83793 + if (!is_gimple_call(call_stmt))
83794 + continue;
83795 + fn = gimple_call_fn(call_stmt);
83796 + if (TREE_CODE(fn) == ADDR_EXPR)
83797 + continue;
83798 + if (TREE_CODE(fn) != SSA_NAME)
83799 + gcc_unreachable();
83800 +
83801 + // ... through a function pointer
83802 + fn = SSA_NAME_VAR(fn);
83803 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
83804 + continue;
83805 + fn = TREE_TYPE(fn);
83806 + if (TREE_CODE(fn) != POINTER_TYPE)
83807 + continue;
83808 + fn = TREE_TYPE(fn);
83809 + if (TREE_CODE(fn) != FUNCTION_TYPE)
83810 + continue;
83811 +
83812 + kernexec_instrument_fptr(&gsi);
83813 +
83814 +//debug_tree(gimple_call_fn(call_stmt));
83815 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
83816 + }
83817 + }
83818 +
83819 + return 0;
83820 +}
83821 +
83822 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
83823 +static void kernexec_instrument_retaddr_bts(rtx insn)
83824 +{
83825 + rtx btsq;
83826 + rtvec argvec, constraintvec, labelvec;
83827 + int line;
83828 +
83829 + // create asm volatile("btsq $63,(%%rsp)":::)
83830 + argvec = rtvec_alloc(0);
83831 + constraintvec = rtvec_alloc(0);
83832 + labelvec = rtvec_alloc(0);
83833 + line = expand_location(RTL_LOCATION(insn)).line;
83834 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
83835 + MEM_VOLATILE_P(btsq) = 1;
83836 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
83837 + emit_insn_before(btsq, insn);
83838 +}
83839 +
83840 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
83841 +static void kernexec_instrument_retaddr_or(rtx insn)
83842 +{
83843 + rtx orq;
83844 + rtvec argvec, constraintvec, labelvec;
83845 + int line;
83846 +
83847 + // create asm volatile("orq %%r10,(%%rsp)":::)
83848 + argvec = rtvec_alloc(0);
83849 + constraintvec = rtvec_alloc(0);
83850 + labelvec = rtvec_alloc(0);
83851 + line = expand_location(RTL_LOCATION(insn)).line;
83852 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
83853 + MEM_VOLATILE_P(orq) = 1;
83854 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
83855 + emit_insn_before(orq, insn);
83856 +}
83857 +
83858 +/*
83859 + * find all asm level function returns and forcibly set the highest bit of the return address
83860 + */
83861 +static unsigned int execute_kernexec_retaddr(void)
83862 +{
83863 + rtx insn;
83864 +
83865 + // 1. find function returns
83866 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
83867 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
83868 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
83869 + rtx body;
83870 +
83871 + // is it a retn
83872 + if (!JUMP_P(insn))
83873 + continue;
83874 + body = PATTERN(insn);
83875 + if (GET_CODE(body) == PARALLEL)
83876 + body = XVECEXP(body, 0, 0);
83877 + if (GET_CODE(body) != RETURN)
83878 + continue;
83879 + kernexec_instrument_retaddr(insn);
83880 + }
83881 +
83882 +// print_simple_rtl(stderr, get_insns());
83883 +// print_rtl(stderr, get_insns());
83884 +
83885 + return 0;
83886 +}
83887 +
83888 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
83889 +{
83890 + const char * const plugin_name = plugin_info->base_name;
83891 + const int argc = plugin_info->argc;
83892 + const struct plugin_argument * const argv = plugin_info->argv;
83893 + int i;
83894 + struct register_pass_info kernexec_reload_pass_info = {
83895 + .pass = &kernexec_reload_pass.pass,
83896 + .reference_pass_name = "ssa",
83897 + .ref_pass_instance_number = 1,
83898 + .pos_op = PASS_POS_INSERT_AFTER
83899 + };
83900 + struct register_pass_info kernexec_fptr_pass_info = {
83901 + .pass = &kernexec_fptr_pass.pass,
83902 + .reference_pass_name = "ssa",
83903 + .ref_pass_instance_number = 1,
83904 + .pos_op = PASS_POS_INSERT_AFTER
83905 + };
83906 + struct register_pass_info kernexec_retaddr_pass_info = {
83907 + .pass = &kernexec_retaddr_pass.pass,
83908 + .reference_pass_name = "pro_and_epilogue",
83909 + .ref_pass_instance_number = 1,
83910 + .pos_op = PASS_POS_INSERT_AFTER
83911 + };
83912 +
83913 + if (!plugin_default_version_check(version, &gcc_version)) {
83914 + error(G_("incompatible gcc/plugin versions"));
83915 + return 1;
83916 + }
83917 +
83918 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
83919 +
83920 + if (TARGET_64BIT == 0)
83921 + return 0;
83922 +
83923 + for (i = 0; i < argc; ++i) {
83924 + if (!strcmp(argv[i].key, "method")) {
83925 + if (!argv[i].value) {
83926 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
83927 + continue;
83928 + }
83929 + if (!strcmp(argv[i].value, "bts")) {
83930 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
83931 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
83932 + } else if (!strcmp(argv[i].value, "or")) {
83933 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
83934 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
83935 + fix_register("r10", 1, 1);
83936 + } else
83937 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
83938 + continue;
83939 + }
83940 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
83941 + }
83942 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
83943 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
83944 +
83945 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
83946 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
83947 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
83948 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
83949 +
83950 + return 0;
83951 +}
83952 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
83953 new file mode 100644
83954 index 0000000..b8008f7
83955 --- /dev/null
83956 +++ b/tools/gcc/latent_entropy_plugin.c
83957 @@ -0,0 +1,295 @@
83958 +/*
83959 + * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
83960 + * Licensed under the GPL v2
83961 + *
83962 + * Note: the choice of the license means that the compilation process is
83963 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
83964 + * but for the kernel it doesn't matter since it doesn't link against
83965 + * any of the gcc libraries
83966 + *
83967 + * gcc plugin to help generate a little bit of entropy from program state,
83968 + * used during boot in the kernel
83969 + *
83970 + * TODO:
83971 + * - add ipa pass to identify not explicitly marked candidate functions
83972 + * - mix in more program state (function arguments/return values, loop variables, etc)
83973 + * - more instrumentation control via attribute parameters
83974 + *
83975 + * BUGS:
83976 + * - LTO needs -flto-partition=none for now
83977 + */
83978 +#include "gcc-plugin.h"
83979 +#include "config.h"
83980 +#include "system.h"
83981 +#include "coretypes.h"
83982 +#include "tree.h"
83983 +#include "tree-pass.h"
83984 +#include "flags.h"
83985 +#include "intl.h"
83986 +#include "toplev.h"
83987 +#include "plugin.h"
83988 +//#include "expr.h" where are you...
83989 +#include "diagnostic.h"
83990 +#include "plugin-version.h"
83991 +#include "tm.h"
83992 +#include "function.h"
83993 +#include "basic-block.h"
83994 +#include "gimple.h"
83995 +#include "rtl.h"
83996 +#include "emit-rtl.h"
83997 +#include "tree-flow.h"
83998 +
83999 +int plugin_is_GPL_compatible;
84000 +
84001 +static tree latent_entropy_decl;
84002 +
84003 +static struct plugin_info latent_entropy_plugin_info = {
84004 + .version = "201207271820",
84005 + .help = NULL
84006 +};
84007 +
84008 +static unsigned int execute_latent_entropy(void);
84009 +static bool gate_latent_entropy(void);
84010 +
84011 +static struct gimple_opt_pass latent_entropy_pass = {
84012 + .pass = {
84013 + .type = GIMPLE_PASS,
84014 + .name = "latent_entropy",
84015 + .gate = gate_latent_entropy,
84016 + .execute = execute_latent_entropy,
84017 + .sub = NULL,
84018 + .next = NULL,
84019 + .static_pass_number = 0,
84020 + .tv_id = TV_NONE,
84021 + .properties_required = PROP_gimple_leh | PROP_cfg,
84022 + .properties_provided = 0,
84023 + .properties_destroyed = 0,
84024 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
84025 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
84026 + }
84027 +};
84028 +
84029 +static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
84030 +{
84031 + if (TREE_CODE(*node) != FUNCTION_DECL) {
84032 + *no_add_attrs = true;
84033 + error("%qE attribute only applies to functions", name);
84034 + }
84035 + return NULL_TREE;
84036 +}
84037 +
84038 +static struct attribute_spec latent_entropy_attr = {
84039 + .name = "latent_entropy",
84040 + .min_length = 0,
84041 + .max_length = 0,
84042 + .decl_required = true,
84043 + .type_required = false,
84044 + .function_type_required = false,
84045 + .handler = handle_latent_entropy_attribute,
84046 +#if BUILDING_GCC_VERSION >= 4007
84047 + .affects_type_identity = false
84048 +#endif
84049 +};
84050 +
84051 +static void register_attributes(void *event_data, void *data)
84052 +{
84053 + register_attribute(&latent_entropy_attr);
84054 +}
84055 +
84056 +static bool gate_latent_entropy(void)
84057 +{
84058 + tree latent_entropy_attr;
84059 +
84060 + latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
84061 + return latent_entropy_attr != NULL_TREE;
84062 +}
84063 +
84064 +static unsigned HOST_WIDE_INT seed;
84065 +static unsigned HOST_WIDE_INT get_random_const(void)
84066 +{
84067 + seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
84068 + return seed;
84069 +}
84070 +
84071 +static enum tree_code get_op(tree *rhs)
84072 +{
84073 + static enum tree_code op;
84074 + unsigned HOST_WIDE_INT random_const;
84075 +
84076 + random_const = get_random_const();
84077 +
84078 + switch (op) {
84079 + case BIT_XOR_EXPR:
84080 + op = PLUS_EXPR;
84081 + break;
84082 +
84083 + case PLUS_EXPR:
84084 + if (rhs) {
84085 + op = LROTATE_EXPR;
84086 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
84087 + break;
84088 + }
84089 +
84090 + case LROTATE_EXPR:
84091 + default:
84092 + op = BIT_XOR_EXPR;
84093 + break;
84094 + }
84095 + if (rhs)
84096 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
84097 + return op;
84098 +}
84099 +
84100 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
84101 +{
84102 + gimple_stmt_iterator gsi;
84103 + gimple assign;
84104 + tree addxorrol, rhs;
84105 + enum tree_code op;
84106 +
84107 + op = get_op(&rhs);
84108 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
84109 + assign = gimple_build_assign(local_entropy, addxorrol);
84110 + find_referenced_vars_in(assign);
84111 +//debug_bb(bb);
84112 + gsi = gsi_after_labels(bb);
84113 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
84114 + update_stmt(assign);
84115 +}
84116 +
84117 +static void perturb_latent_entropy(basic_block bb, tree rhs)
84118 +{
84119 + gimple_stmt_iterator gsi;
84120 + gimple assign;
84121 + tree addxorrol, temp;
84122 +
84123 + // 1. create temporary copy of latent_entropy
84124 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
84125 + add_referenced_var(temp);
84126 + mark_sym_for_renaming(temp);
84127 +
84128 + // 2. read...
84129 + assign = gimple_build_assign(temp, latent_entropy_decl);
84130 + find_referenced_vars_in(assign);
84131 + gsi = gsi_after_labels(bb);
84132 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
84133 + update_stmt(assign);
84134 +
84135 + // 3. ...modify...
84136 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
84137 + assign = gimple_build_assign(temp, addxorrol);
84138 + find_referenced_vars_in(assign);
84139 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
84140 + update_stmt(assign);
84141 +
84142 + // 4. ...write latent_entropy
84143 + assign = gimple_build_assign(latent_entropy_decl, temp);
84144 + find_referenced_vars_in(assign);
84145 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
84146 + update_stmt(assign);
84147 +}
84148 +
84149 +static unsigned int execute_latent_entropy(void)
84150 +{
84151 + basic_block bb;
84152 + gimple assign;
84153 + gimple_stmt_iterator gsi;
84154 + tree local_entropy;
84155 +
84156 + if (!latent_entropy_decl) {
84157 + struct varpool_node *node;
84158 +
84159 + for (node = varpool_nodes; node; node = node->next) {
84160 + tree var = node->decl;
84161 + if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
84162 + continue;
84163 + latent_entropy_decl = var;
84164 +// debug_tree(var);
84165 + break;
84166 + }
84167 + if (!latent_entropy_decl) {
84168 +// debug_tree(current_function_decl);
84169 + return 0;
84170 + }
84171 + }
84172 +
84173 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
84174 +
84175 + // 1. create local entropy variable
84176 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
84177 + add_referenced_var(local_entropy);
84178 + mark_sym_for_renaming(local_entropy);
84179 +
84180 + // 2. initialize local entropy variable
84181 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
84182 + if (dom_info_available_p(CDI_DOMINATORS))
84183 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
84184 + gsi = gsi_start_bb(bb);
84185 +
84186 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
84187 +// gimple_set_location(assign, loc);
84188 + find_referenced_vars_in(assign);
84189 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
84190 + update_stmt(assign);
84191 + bb = bb->next_bb;
84192 +
84193 + // 3. instrument each BB with an operation on the local entropy variable
84194 + while (bb != EXIT_BLOCK_PTR) {
84195 + perturb_local_entropy(bb, local_entropy);
84196 + bb = bb->next_bb;
84197 + };
84198 +
84199 + // 4. mix local entropy into the global entropy variable
84200 + perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
84201 + return 0;
84202 +}
84203 +
84204 +static void start_unit_callback(void *gcc_data, void *user_data)
84205 +{
84206 +#if BUILDING_GCC_VERSION >= 4007
84207 + seed = get_random_seed(false);
84208 +#else
84209 + sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
84210 + seed *= seed;
84211 +#endif
84212 +
84213 + if (in_lto_p)
84214 + return;
84215 +
84216 + // extern u64 latent_entropy
84217 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
84218 +
84219 + TREE_STATIC(latent_entropy_decl) = 1;
84220 + TREE_PUBLIC(latent_entropy_decl) = 1;
84221 + TREE_USED(latent_entropy_decl) = 1;
84222 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
84223 + DECL_EXTERNAL(latent_entropy_decl) = 1;
84224 + DECL_ARTIFICIAL(latent_entropy_decl) = 0;
84225 + DECL_INITIAL(latent_entropy_decl) = NULL;
84226 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
84227 +// varpool_finalize_decl(latent_entropy_decl);
84228 +// varpool_mark_needed_node(latent_entropy_decl);
84229 +}
84230 +
84231 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
84232 +{
84233 + const char * const plugin_name = plugin_info->base_name;
84234 + struct register_pass_info latent_entropy_pass_info = {
84235 + .pass = &latent_entropy_pass.pass,
84236 + .reference_pass_name = "optimized",
84237 + .ref_pass_instance_number = 1,
84238 + .pos_op = PASS_POS_INSERT_BEFORE
84239 + };
84240 +
84241 + if (!plugin_default_version_check(version, &gcc_version)) {
84242 + error(G_("incompatible gcc/plugin versions"));
84243 + return 1;
84244 + }
84245 +
84246 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
84247 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
84248 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
84249 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
84250 +
84251 + return 0;
84252 +}
84253 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
84254 new file mode 100644
84255 index 0000000..9a72d97
84256 --- /dev/null
84257 +++ b/tools/gcc/size_overflow_hash.data
84258 @@ -0,0 +1,3171 @@
84259 +_000001_hash+alloc_dr+2+65495+_000001_hash+NULL
84260 +_000002_hash+__copy_from_user+3+10918+_000002_hash+NULL
84261 +_000003_hash+copy_from_user+3+17559+_000003_hash+NULL
84262 +_000004_hash+__copy_from_user_inatomic+3+4365+_000004_hash+NULL
84263 +_000005_hash+__copy_from_user_nocache+3+39351+_000005_hash+NULL
84264 +_000006_hash+__copy_to_user_inatomic+3+19214+_000006_hash+NULL
84265 +_000007_hash+do_xip_mapping_read+5+60297+_000007_hash+NULL
84266 +_000008_hash+hugetlbfs_read+3+11268+_000008_hash+NULL
84267 +_000009_hash+kmalloc+1+60432+_002597_hash+NULL+nohasharray
84268 +_000010_hash+kmalloc_array+1-2+9444+_000010_hash+NULL
84269 +_000012_hash+kmalloc_slab+1+11917+_000012_hash+NULL
84270 +_000013_hash+kmemdup+2+64015+_000013_hash+NULL
84271 +_000014_hash+__krealloc+2+14857+_000331_hash+NULL+nohasharray
84272 +_000015_hash+memdup_user+2+59590+_000015_hash+NULL
84273 +_000016_hash+module_alloc+1+63630+_000016_hash+NULL
84274 +_000017_hash+read_default_ldt+2+14302+_000017_hash+NULL
84275 +_000018_hash+read_kcore+3+63488+_000018_hash+NULL
84276 +_000019_hash+read_ldt+2+47570+_000019_hash+NULL
84277 +_000020_hash+read_zero+3+19366+_000020_hash+NULL
84278 +_000021_hash+__vmalloc_node+1+39308+_000021_hash+NULL
84279 +_000022_hash+vm_map_ram+2+23078+_001054_hash+NULL+nohasharray
84280 +_000023_hash+aa_simple_write_to_buffer+4-3+49683+_000023_hash+NULL
84281 +_000024_hash+ablkcipher_copy_iv+3+64140+_000024_hash+NULL
84282 +_000025_hash+ablkcipher_next_slow+4+47274+_000025_hash+NULL
84283 +_000026_hash+acpi_battery_write_alarm+3+1240+_000026_hash+NULL
84284 +_000027_hash+acpi_os_allocate+1+14892+_000027_hash+NULL
84285 +_000028_hash+acpi_system_write_wakeup_device+3+34853+_000028_hash+NULL
84286 +_000029_hash+adu_write+3+30487+_000029_hash+NULL
84287 +_000030_hash+aer_inject_write+3+52399+_000030_hash+NULL
84288 +_000031_hash+afs_alloc_flat_call+2-3+36399+_000031_hash+NULL
84289 +_000033_hash+afs_proc_cells_write+3+61139+_000033_hash+NULL
84290 +_000034_hash+afs_proc_rootcell_write+3+15822+_000034_hash+NULL
84291 +_000035_hash+agp_3_5_isochronous_node_enable+3+49465+_000035_hash+NULL
84292 +_000036_hash+agp_alloc_page_array+1+22554+_000036_hash+NULL
84293 +_000037_hash+ah_alloc_tmp+2+54378+_000037_hash+NULL
84294 +_000038_hash+ahash_setkey_unaligned+3+33521+_000038_hash+NULL
84295 +_000039_hash+alg_setkey+3+31485+_000039_hash+NULL
84296 +_000040_hash+aligned_kmalloc+1+3628+_000040_hash+NULL
84297 +_000041_hash+alloc_context+1+3194+_000041_hash+NULL
84298 +_000042_hash+alloc_ep_req+2+54860+_000042_hash+NULL
84299 +_000043_hash+alloc_fdmem+1+27083+_000043_hash+NULL
84300 +_000044_hash+alloc_flex_gd+1+57259+_000044_hash+NULL
84301 +_000045_hash+alloc_sglist+1-3-2+22960+_000045_hash+NULL
84302 +_000046_hash+aoedev_flush+2+44398+_000046_hash+NULL
84303 +_000047_hash+append_to_buffer+3+63550+_000047_hash+NULL
84304 +_000048_hash+asix_read_cmd+5+13245+_000048_hash+NULL
84305 +_000049_hash+asix_write_cmd+5+58192+_000049_hash+NULL
84306 +_000050_hash+asn1_octets_decode+2+9991+_000050_hash+NULL
84307 +_000051_hash+asn1_oid_decode+2+4999+_000051_hash+NULL
84308 +_000052_hash+at76_set_card_command+4+4471+_000052_hash+NULL
84309 +_000053_hash+ath6kl_add_bss_if_needed+6+24317+_000053_hash+NULL
84310 +_000054_hash+ath6kl_debug_roam_tbl_event+3+5224+_000054_hash+NULL
84311 +_000055_hash+ath6kl_mgmt_powersave_ap+6+13791+_000055_hash+NULL
84312 +_000056_hash+ath6kl_send_go_probe_resp+3+21113+_000056_hash+NULL
84313 +_000057_hash+ath6kl_set_ap_probe_resp_ies+3+50539+_000057_hash+NULL
84314 +_000058_hash+ath6kl_set_assoc_req_ies+3+43185+_000058_hash+NULL
84315 +_000059_hash+ath6kl_wmi_bssinfo_event_rx+3+2275+_000059_hash+NULL
84316 +_000060_hash+ath6kl_wmi_send_action_cmd+7+58860+_000060_hash+NULL
84317 +_000061_hash+__ath6kl_wmi_send_mgmt_cmd+7+38971+_000061_hash+NULL
84318 +_000062_hash+attach_hdlc_protocol+3+19986+_000062_hash+NULL
84319 +_000063_hash+audio_write+4+54261+_001597_hash+NULL+nohasharray
84320 +_000064_hash+audit_unpack_string+3+13748+_000064_hash+NULL
84321 +_000065_hash+av7110_vbi_write+3+34384+_000065_hash+NULL
84322 +_000066_hash+ax25_setsockopt+5+42740+_000066_hash+NULL
84323 +_000067_hash+b43_debugfs_write+3+34838+_000067_hash+NULL
84324 +_000068_hash+b43legacy_debugfs_write+3+28556+_000068_hash+NULL
84325 +_000069_hash+bch_alloc+1+4593+_000069_hash+NULL
84326 +_000070_hash+befs_nls2utf+3+17163+_000070_hash+NULL
84327 +_000071_hash+befs_utf2nls+3+25628+_000071_hash+NULL
84328 +_000072_hash+bfad_debugfs_write_regrd+3+15218+_000072_hash+NULL
84329 +_000073_hash+bfad_debugfs_write_regwr+3+61841+_000073_hash+NULL
84330 +_000074_hash+bio_alloc_map_data+1-2+50782+_000074_hash+NULL
84331 +_000076_hash+bio_kmalloc+2+54672+_000076_hash+NULL
84332 +_000077_hash+blkcipher_copy_iv+3+24075+_000077_hash+NULL
84333 +_000078_hash+blkcipher_next_slow+4+52733+_000078_hash+NULL
84334 +_000079_hash+bl_pipe_downcall+3+34264+_000079_hash+NULL
84335 +_000080_hash+bnad_debugfs_write_regrd+3+6706+_000080_hash+NULL
84336 +_000081_hash+bnad_debugfs_write_regwr+3+57500+_000081_hash+NULL
84337 +_000082_hash+bnx2fc_cmd_mgr_alloc+2-3+24873+_000082_hash+NULL
84338 +_000084_hash+bnx2_nvram_write+4+7790+_000084_hash+NULL
84339 +_000085_hash+brcmf_sdbrcm_downloadvars+3+42064+_000085_hash+NULL
84340 +_000086_hash+btmrvl_gpiogap_write+3+35053+_000086_hash+NULL
84341 +_000087_hash+btmrvl_hscfgcmd_write+3+27143+_000087_hash+NULL
84342 +_000088_hash+btmrvl_hscmd_write+3+27089+_000088_hash+NULL
84343 +_000089_hash+btmrvl_hsmode_write+3+42252+_000089_hash+NULL
84344 +_000090_hash+btmrvl_pscmd_write+3+29504+_000090_hash+NULL
84345 +_000091_hash+btmrvl_psmode_write+3+3703+_000091_hash+NULL
84346 +_000092_hash+btrfs_alloc_delayed_item+1+11678+_000092_hash+NULL
84347 +_000093_hash+cache_do_downcall+3+6926+_000093_hash+NULL
84348 +_000094_hash+cachefiles_cook_key+2+33274+_000094_hash+NULL
84349 +_000095_hash+cachefiles_daemon_write+3+43535+_000095_hash+NULL
84350 +_000096_hash+capi_write+3+35104+_000096_hash+NULL
84351 +_000097_hash+carl9170_debugfs_write+3+50857+_000097_hash+NULL
84352 +_000098_hash+cciss_allocate_sg_chain_blocks+2-3+5368+_000098_hash+NULL
84353 +_000100_hash+cciss_proc_write+3+10259+_000100_hash+NULL
84354 +_000101_hash+cdrom_read_cdda_old+4+27664+_000101_hash+NULL
84355 +_000102_hash+ceph_alloc_page_vector+1+18710+_000102_hash+NULL
84356 +_000103_hash+ceph_buffer_new+1+35974+_000103_hash+NULL
84357 +_000104_hash+ceph_copy_user_to_page_vector+4+656+_000104_hash+NULL
84358 +_000105_hash+ceph_get_direct_page_vector+2+41917+_000105_hash+NULL
84359 +_000106_hash+ceph_msg_new+2+5846+_000106_hash+NULL
84360 +_000107_hash+ceph_setxattr+4+18913+_000107_hash+NULL
84361 +_000108_hash+cfi_read_pri+3+24366+_000108_hash+NULL
84362 +_000109_hash+cgroup_write_string+5+10900+_000109_hash+NULL
84363 +_000110_hash+cgroup_write_X64+5+54514+_000110_hash+NULL
84364 +_000111_hash+change_xattr+5+61390+_000111_hash+NULL
84365 +_000112_hash+check_load_and_stores+2+2143+_000112_hash+NULL
84366 +_000113_hash+cifs_idmap_key_instantiate+3+54503+_000113_hash+NULL
84367 +_000114_hash+cifs_security_flags_proc_write+3+5484+_000114_hash+NULL
84368 +_000115_hash+cifs_setxattr+4+23957+_000115_hash+NULL
84369 +_000116_hash+cifs_spnego_key_instantiate+3+23588+_000116_hash+NULL
84370 +_000117_hash+ci_ll_write+4+3740+_000117_hash+NULL
84371 +_000118_hash+cld_pipe_downcall+3+15058+_000118_hash+NULL
84372 +_000119_hash+clear_refs_write+3+61904+_000119_hash+NULL
84373 +_000120_hash+clusterip_proc_write+3+44729+_000120_hash+NULL
84374 +_000121_hash+cm4040_write+3+58079+_000121_hash+NULL
84375 +_000122_hash+cm_copy_private_data+2+3649+_000122_hash+NULL
84376 +_000123_hash+cmm_write+3+2896+_000123_hash+NULL
84377 +_000124_hash+cm_write+3+36858+_000124_hash+NULL
84378 +_000125_hash+coda_psdev_write+3+1711+_000125_hash+NULL
84379 +_000126_hash+codec_reg_read_file+3+36280+_000126_hash+NULL
84380 +_000127_hash+command_file_write+3+31318+_000127_hash+NULL
84381 +_000128_hash+command_write+3+58841+_000128_hash+NULL
84382 +_000129_hash+comm_write+3+44537+_001532_hash+NULL+nohasharray
84383 +_000130_hash+concat_writev+3+21451+_000130_hash+NULL
84384 +_000131_hash+copy_and_check+3+19089+_000131_hash+NULL
84385 +_000132_hash+copy_from_user_toio+3+31966+_000132_hash+NULL
84386 +_000133_hash+copy_items+6+50140+_000133_hash+NULL
84387 +_000134_hash+copy_macs+4+45534+_000134_hash+NULL
84388 +_000135_hash+__copy_to_user+3+17551+_000135_hash+NULL
84389 +_000136_hash+copy_vm86_regs_from_user+3+45340+_000136_hash+NULL
84390 +_000137_hash+cosa_write+3+1774+_000137_hash+NULL
84391 +_000138_hash+create_entry+2+33479+_000138_hash+NULL
84392 +_000139_hash+create_queues+2-3+9088+_000139_hash+NULL
84393 +_000141_hash+create_xattr+5+54106+_000141_hash+NULL
84394 +_000142_hash+create_xattr_datum+5+33356+_000142_hash+NULL
84395 +_000143_hash+csum_partial_copy_fromiovecend+3-4+9957+_000143_hash+NULL
84396 +_000145_hash+ctrl_out+3-5+8712+_000145_hash+NULL
84397 +_000147_hash+cx24116_writeregN+4+41975+_000147_hash+NULL
84398 +_000148_hash+cxacru_cm_get_array+4+4412+_000148_hash+NULL
84399 +_000149_hash+cxgbi_alloc_big_mem+1+4707+_000149_hash+NULL
84400 +_000150_hash+dac960_user_command_proc_write+3+3071+_000150_hash+NULL
84401 +_000151_hash+datablob_format+2+39571+_002156_hash+NULL+nohasharray
84402 +_000152_hash+dccp_feat_clone_sp_val+3+11942+_000152_hash+NULL
84403 +_000153_hash+dccp_setsockopt_ccid+4+30701+_000153_hash+NULL
84404 +_000154_hash+dccp_setsockopt_cscov+2+37766+_000154_hash+NULL
84405 +_000155_hash+dccp_setsockopt_service+4+65336+_000155_hash+NULL
84406 +_000156_hash+ddb_output_write+3+31902+_000156_hash+NULL
84407 +_000157_hash+ddebug_proc_write+3+18055+_000157_hash+NULL
84408 +_000158_hash+dev_config+3+8506+_000158_hash+NULL
84409 +_000159_hash+device_write+3+45156+_000159_hash+NULL
84410 +_000160_hash+devm_kzalloc+2+4966+_000160_hash+NULL
84411 +_000161_hash+devres_alloc+2+551+_000161_hash+NULL
84412 +_000162_hash+dfs_file_write+3+41196+_000162_hash+NULL
84413 +_000163_hash+direct_entry+3+38836+_000163_hash+NULL
84414 +_000164_hash+dispatch_proc_write+3+44320+_000164_hash+NULL
84415 +_000165_hash+diva_os_copy_from_user+4+7792+_000165_hash+NULL
84416 +_000166_hash+dlm_alloc_pagevec+1+54296+_000166_hash+NULL
84417 +_000167_hash+dlmfs_file_read+3+28385+_000167_hash+NULL
84418 +_000168_hash+dlmfs_file_write+3+6892+_000168_hash+NULL
84419 +_000169_hash+dm_read+3+15674+_000169_hash+NULL
84420 +_000170_hash+dm_write+3+2513+_000170_hash+NULL
84421 +_000171_hash+__dn_setsockopt+5+13060+_000171_hash+NULL
84422 +_000172_hash+dns_query+3+9676+_000172_hash+NULL
84423 +_000173_hash+dns_resolver_instantiate+3+63314+_000173_hash+NULL
84424 +_000174_hash+do_add_counters+3+3992+_000174_hash+NULL
84425 +_000175_hash+__do_config_autodelink+3+58763+_000175_hash+NULL
84426 +_000176_hash+do_ip_setsockopt+5+41852+_000176_hash+NULL
84427 +_000177_hash+do_ipv6_setsockopt+5+18215+_000177_hash+NULL
84428 +_000178_hash+do_ip_vs_set_ctl+4+48641+_000178_hash+NULL
84429 +_000179_hash+do_kimage_alloc+3+64827+_000179_hash+NULL
84430 +_000180_hash+do_register_entry+4+29478+_000180_hash+NULL
84431 +_000181_hash+do_tty_write+5+44896+_000181_hash+NULL
84432 +_000182_hash+do_update_counters+4+2259+_000182_hash+NULL
84433 +_000183_hash+dsp_write+2+46218+_000183_hash+NULL
84434 +_000184_hash+dup_to_netobj+3+26363+_000184_hash+NULL
84435 +_000185_hash+dvb_aplay+3+56296+_000185_hash+NULL
84436 +_000186_hash+dvb_ca_en50221_io_write+3+43533+_000186_hash+NULL
84437 +_000187_hash+dvbdmx_write+3+19423+_000187_hash+NULL
84438 +_000188_hash+dvb_play+3+50814+_000188_hash+NULL
84439 +_000189_hash+dw210x_op_rw+6+39915+_000189_hash+NULL
84440 +_000190_hash+dwc3_link_state_write+3+12641+_000190_hash+NULL
84441 +_000191_hash+dwc3_mode_write+3+51997+_000191_hash+NULL
84442 +_000192_hash+dwc3_testmode_write+3+30516+_000192_hash+NULL
84443 +_000193_hash+ecryptfs_copy_filename+4+11868+_000193_hash+NULL
84444 +_000194_hash+ecryptfs_miscdev_write+3+26847+_000194_hash+NULL
84445 +_000195_hash+ecryptfs_send_miscdev+2+64816+_000195_hash+NULL
84446 +_000196_hash+efx_tsoh_heap_alloc+2+58545+_000196_hash+NULL
84447 +_000197_hash+emi26_writememory+4+57908+_000197_hash+NULL
84448 +_000198_hash+emi62_writememory+4+29731+_000198_hash+NULL
84449 +_000199_hash+encrypted_instantiate+3+3168+_000199_hash+NULL
84450 +_000200_hash+encrypted_update+3+13414+_000200_hash+NULL
84451 +_000201_hash+ep0_write+3+14536+_001328_hash+NULL+nohasharray
84452 +_000202_hash+ep_read+3+58813+_000202_hash+NULL
84453 +_000203_hash+ep_write+3+59008+_000203_hash+NULL
84454 +_000204_hash+erst_dbg_write+3+46715+_000204_hash+NULL
84455 +_000205_hash+esp_alloc_tmp+2+40558+_000205_hash+NULL
84456 +_000206_hash+exofs_read_lookup_dev_table+3+17733+_000206_hash+NULL
84457 +_000207_hash+ext4_kvmalloc+1+14796+_000207_hash+NULL
84458 +_000208_hash+ezusb_writememory+4+45976+_000208_hash+NULL
84459 +_000209_hash+fanotify_write+3+64623+_000209_hash+NULL
84460 +_000210_hash+fd_copyin+3+56247+_000210_hash+NULL
84461 +_000211_hash+ffs_epfile_io+3+64886+_000211_hash+NULL
84462 +_000212_hash+ffs_prepare_buffer+2+59892+_000212_hash+NULL
84463 +_000213_hash+f_hidg_write+3+7932+_000213_hash+NULL
84464 +_000214_hash+file_read_actor+4+1401+_000214_hash+NULL
84465 +_000215_hash+fill_write_buffer+3+3142+_000215_hash+NULL
84466 +_000216_hash+fl_create+5+56435+_000216_hash+NULL
84467 +_000217_hash+ftdi_elan_write+3+57309+_000217_hash+NULL
84468 +_000218_hash+fuse_conn_limit_write+3+30777+_000218_hash+NULL
84469 +_000219_hash+fw_iso_buffer_init+3+54582+_000219_hash+NULL
84470 +_000220_hash+garmin_write_bulk+3+58191+_000220_hash+NULL
84471 +_000221_hash+garp_attr_create+3+3883+_000221_hash+NULL
84472 +_000222_hash+get_arg+3+5694+_000222_hash+NULL
84473 +_000223_hash+getdqbuf+1+62908+_000223_hash+NULL
84474 +_000224_hash+get_fdb_entries+3+41916+_000224_hash+NULL
84475 +_000225_hash+get_indirect_ea+4+51869+_000225_hash+NULL
84476 +_000226_hash+get_registers+3+26187+_000226_hash+NULL
84477 +_000227_hash+get_scq+2+10897+_000227_hash+NULL
84478 +_000228_hash+get_server_iovec+2+16804+_000228_hash+NULL
84479 +_000229_hash+get_ucode_user+3+38202+_000229_hash+NULL
84480 +_000230_hash+get_user_cpu_mask+2+14861+_000230_hash+NULL
84481 +_000231_hash+gfs2_alloc_sort_buffer+1+18275+_000231_hash+NULL
84482 +_000232_hash+gfs2_glock_nq_m+1+20347+_000232_hash+NULL
84483 +_000233_hash+gigaset_initcs+2+43753+_000233_hash+NULL
84484 +_000234_hash+gigaset_initdriver+2+1060+_000234_hash+NULL
84485 +_000235_hash+gs_alloc_req+2+58883+_000235_hash+NULL
84486 +_000236_hash+gs_buf_alloc+2+25067+_000236_hash+NULL
84487 +_000237_hash+gsm_data_alloc+3+42437+_000237_hash+NULL
84488 +_000238_hash+gss_pipe_downcall+3+23182+_000238_hash+NULL
84489 +_000239_hash+handle_request+9+10024+_000239_hash+NULL
84490 +_000240_hash+hash_new+1+62224+_000240_hash+NULL
84491 +_000241_hash+hashtab_create+3+33769+_000241_hash+NULL
84492 +_000242_hash+hcd_buffer_alloc+2+27495+_000242_hash+NULL
84493 +_000243_hash+hci_sock_setsockopt+5+28993+_000243_hash+NULL
84494 +_000244_hash+heap_init+2+49617+_000244_hash+NULL
84495 +_000245_hash+hest_ghes_dev_register+1+46766+_000245_hash+NULL
84496 +_000246_hash+hidraw_get_report+3+45609+_000246_hash+NULL
84497 +_000247_hash+hidraw_report_event+3+49578+_000509_hash+NULL+nohasharray
84498 +_000248_hash+hidraw_send_report+3+23449+_000248_hash+NULL
84499 +_000249_hash+hpfs_translate_name+3+41497+_000249_hash+NULL
84500 +_000250_hash+hysdn_conf_write+3+52145+_000250_hash+NULL
84501 +_000251_hash+hysdn_log_write+3+48694+_000251_hash+NULL
84502 +_000252_hash+__i2400mu_send_barker+3+23652+_000252_hash+NULL
84503 +_000253_hash+i2cdev_read+3+1206+_000253_hash+NULL
84504 +_000254_hash+i2cdev_write+3+23310+_000254_hash+NULL
84505 +_000255_hash+i2o_parm_field_get+5+34477+_000255_hash+NULL
84506 +_000256_hash+i2o_parm_table_get+6+61635+_000256_hash+NULL
84507 +_000257_hash+ib_copy_from_udata+3+59502+_000257_hash+NULL
84508 +_000258_hash+ib_ucm_alloc_data+3+36885+_000258_hash+NULL
84509 +_000259_hash+ib_umad_write+3+47993+_000259_hash+NULL
84510 +_000260_hash+ib_uverbs_unmarshall_recv+5+12251+_000260_hash+NULL
84511 +_000261_hash+icn_writecmd+2+38629+_000261_hash+NULL
84512 +_000262_hash+ide_driver_proc_write+3+32493+_000262_hash+NULL
84513 +_000263_hash+ide_settings_proc_write+3+35110+_000263_hash+NULL
84514 +_000264_hash+idetape_chrdev_write+3+53976+_000264_hash+NULL
84515 +_000265_hash+idmap_pipe_downcall+3+14591+_000265_hash+NULL
84516 +_000266_hash+ieee80211_build_probe_req+7-5+27660+_000266_hash+NULL
84517 +_000267_hash+ieee80211_if_write+3+34894+_000267_hash+NULL
84518 +_000268_hash+if_write+3+51756+_000268_hash+NULL
84519 +_000269_hash+ilo_write+3+64378+_000269_hash+NULL
84520 +_000270_hash+ima_write_policy+3+40548+_000270_hash+NULL
84521 +_000271_hash+init_data_container+1+60709+_000271_hash+NULL
84522 +_000272_hash+init_send_hfcd+1+34586+_000272_hash+NULL
84523 +_000273_hash+insert_dent+7+65034+_000273_hash+NULL
84524 +_000274_hash+interpret_user_input+2+19393+_000274_hash+NULL
84525 +_000275_hash+int_proc_write+3+39542+_000275_hash+NULL
84526 +_000276_hash+ioctl_private_iw_point+7+1273+_000276_hash+NULL
84527 +_000277_hash+iov_iter_copy_from_user+4+31942+_000277_hash+NULL
84528 +_000278_hash+iov_iter_copy_from_user_atomic+4+56368+_000278_hash+NULL
84529 +_000279_hash+iowarrior_write+3+18604+_000279_hash+NULL
84530 +_000280_hash+ipc_alloc+1+1192+_000280_hash+NULL
84531 +_000281_hash+ipc_rcu_alloc+1+21208+_000281_hash+NULL
84532 +_000282_hash+ip_options_get_from_user+4+64958+_000282_hash+NULL
84533 +_000283_hash+ipv6_renew_option+3+38813+_000283_hash+NULL
84534 +_000284_hash+ip_vs_conn_fill_param_sync+6+29771+_002404_hash+NULL+nohasharray
84535 +_000285_hash+ip_vs_create_timeout_table+2+64478+_000285_hash+NULL
84536 +_000286_hash+ipw_queue_tx_init+3+49161+_000286_hash+NULL
84537 +_000287_hash+irda_setsockopt+5+19824+_000287_hash+NULL
84538 +_000288_hash+irias_new_octseq_value+2+13596+_003296_hash+NULL+nohasharray
84539 +_000289_hash+ir_lirc_transmit_ir+3+64403+_000289_hash+NULL
84540 +_000290_hash+irnet_ctrl_write+3+24139+_000290_hash+NULL
84541 +_000291_hash+isdn_add_channels+3+40905+_000291_hash+NULL
84542 +_000292_hash+isdn_ppp_fill_rq+2+41428+_000292_hash+NULL
84543 +_000293_hash+isdn_ppp_write+4+29109+_000293_hash+NULL
84544 +_000294_hash+isdn_read+3+50021+_000294_hash+NULL
84545 +_000295_hash+isdn_v110_open+3+2418+_000295_hash+NULL
84546 +_000296_hash+isdn_writebuf_stub+4+52383+_000296_hash+NULL
84547 +_000297_hash+islpci_mgt_transmit+5+34133+_000297_hash+NULL
84548 +_000298_hash+iso_callback+3+43208+_000298_hash+NULL
84549 +_000299_hash+iso_packets_buffer_init+3+29061+_000299_hash+NULL
84550 +_000300_hash+it821x_firmware_command+3+8628+_000300_hash+NULL
84551 +_000301_hash+ivtv_buf_copy_from_user+4+25502+_000301_hash+NULL
84552 +_000302_hash+iwch_alloc_fastreg_pbl+2+40153+_000302_hash+NULL
84553 +_000303_hash+iwl_calib_set+3+34400+_002188_hash+NULL+nohasharray
84554 +_000304_hash+jbd2_journal_init_revoke_table+1+36336+_000304_hash+NULL
84555 +_000305_hash+jffs2_alloc_full_dirent+1+60179+_001111_hash+NULL+nohasharray
84556 +_000306_hash+journal_init_revoke_table+1+56331+_000306_hash+NULL
84557 +_000307_hash+kcalloc+1-2+27770+_000307_hash+NULL
84558 +_000309_hash+keyctl_instantiate_key_common+4+47889+_000309_hash+NULL
84559 +_000310_hash+keyctl_update_key+3+26061+_000310_hash+NULL
84560 +_000311_hash+__kfifo_alloc+2-3+22173+_000311_hash+NULL
84561 +_000313_hash+kfifo_copy_from_user+3+5091+_000313_hash+NULL
84562 +_000314_hash+kmalloc_node+1+50163+_003293_hash+NULL+nohasharray
84563 +_000315_hash+kmalloc_parameter+1+65279+_000315_hash+NULL
84564 +_000316_hash+kmem_alloc+1+31920+_000316_hash+NULL
84565 +_000317_hash+kobj_map+2-3+9566+_000317_hash+NULL
84566 +_000319_hash+kone_receive+4+4690+_000319_hash+NULL
84567 +_000320_hash+kone_send+4+63435+_000320_hash+NULL
84568 +_000321_hash+krealloc+2+14908+_000321_hash+NULL
84569 +_000322_hash+kvmalloc+1+32646+_000322_hash+NULL
84570 +_000323_hash+kvm_read_guest_atomic+4+10765+_000323_hash+NULL
84571 +_000324_hash+kvm_read_guest_cached+4+39666+_000324_hash+NULL
84572 +_000325_hash+kvm_read_guest_page+5+18074+_000325_hash+NULL
84573 +_000326_hash+kzalloc+1+54740+_000326_hash+NULL
84574 +_000327_hash+l2cap_sock_setsockopt+5+50207+_000327_hash+NULL
84575 +_000328_hash+l2cap_sock_setsockopt_old+4+29346+_000328_hash+NULL
84576 +_000329_hash+lane2_associate_req+4+45398+_000329_hash+NULL
84577 +_000330_hash+lbs_debugfs_write+3+48413+_000330_hash+NULL
84578 +_000331_hash+lcd_write+3+14857+_000331_hash+&_000014_hash
84579 +_000332_hash+ldm_frag_add+2+5611+_000332_hash+NULL
84580 +_000333_hash+__lgread+4+31668+_000333_hash+NULL
84581 +_000334_hash+libipw_alloc_txb+1-3-2+27579+_000334_hash+NULL
84582 +_000335_hash+link_send_sections_long+4+46556+_000335_hash+NULL
84583 +_000336_hash+listxattr+3+12769+_000336_hash+NULL
84584 +_000337_hash+LoadBitmap+2+19658+_000337_hash+NULL
84585 +_000338_hash+load_msg+2+95+_000338_hash+NULL
84586 +_000339_hash+lpfc_debugfs_dif_err_write+3+17424+_000339_hash+NULL
84587 +_000340_hash+lp_write+3+9511+_000340_hash+NULL
84588 +_000341_hash+mb_cache_create+2+17307+_000341_hash+NULL
84589 +_000342_hash+mce_write+3+26201+_000342_hash+NULL
84590 +_000343_hash+mcs7830_get_reg+3+33308+_000343_hash+NULL
84591 +_000344_hash+mcs7830_set_reg+3+31413+_000344_hash+NULL
84592 +_000345_hash+memcpy_fromiovec+3+55247+_000345_hash+NULL
84593 +_000346_hash+memcpy_fromiovecend+3-4+2707+_000346_hash+NULL
84594 +_000348_hash+mempool_kmalloc+2+53831+_000348_hash+NULL
84595 +_000349_hash+mempool_resize+2+47983+_001821_hash+NULL+nohasharray
84596 +_000350_hash+mem_rw+3+22085+_000350_hash+NULL
84597 +_000351_hash+mgmt_control+3+7349+_000351_hash+NULL
84598 +_000352_hash+mgmt_pending_add+5+46976+_000352_hash+NULL
84599 +_000353_hash+mlx4_ib_alloc_fast_reg_page_list+2+46119+_000353_hash+NULL
84600 +_000354_hash+mmc_alloc_sg+1+21504+_000354_hash+NULL
84601 +_000355_hash+mmc_send_bus_test+4+18285+_000355_hash+NULL
84602 +_000356_hash+mmc_send_cxd_data+5+38655+_000356_hash+NULL
84603 +_000357_hash+module_alloc_update_bounds+1+47205+_000357_hash+NULL
84604 +_000358_hash+move_addr_to_kernel+2+32673+_000358_hash+NULL
84605 +_000359_hash+mpi_alloc_limb_space+1+23190+_000359_hash+NULL
84606 +_000360_hash+mpi_resize+2+44674+_000360_hash+NULL
84607 +_000361_hash+mptctl_getiocinfo+2+28545+_000361_hash+NULL
84608 +_000362_hash+mtdchar_readoob+4+31200+_000362_hash+NULL
84609 +_000363_hash+mtdchar_write+3+56831+_002688_hash+NULL+nohasharray
84610 +_000364_hash+mtdchar_writeoob+4+3393+_000364_hash+NULL
84611 +_000365_hash+mtd_device_parse_register+5+5024+_000365_hash+NULL
84612 +_000366_hash+mtf_test_write+3+18844+_000366_hash+NULL
84613 +_000367_hash+mtrr_write+3+59622+_000367_hash+NULL
84614 +_000368_hash+musb_test_mode_write+3+33518+_000368_hash+NULL
84615 +_000369_hash+mwifiex_get_common_rates+3+17131+_000369_hash+NULL
84616 +_000370_hash+mwifiex_update_curr_bss_params+5+16908+_000370_hash+NULL
84617 +_000371_hash+nand_bch_init+2-3+16280+_001341_hash+NULL+nohasharray
84618 +_000373_hash+ncp_file_write+3+3813+_000373_hash+NULL
84619 +_000374_hash+ncp__vol2io+5+4804+_000374_hash+NULL
84620 +_000375_hash+nes_alloc_fast_reg_page_list+2+33523+_000375_hash+NULL
84621 +_000376_hash+nfc_targets_found+3+29886+_000376_hash+NULL
84622 +_000377_hash+nfs4_acl_new+1+49806+_000377_hash+NULL
84623 +_000378_hash+nfs4_write_cached_acl+4+15070+_000378_hash+NULL
84624 +_000379_hash+nfsd_cache_update+3+59574+_000379_hash+NULL
84625 +_000380_hash+nfsd_symlink+6+63442+_000380_hash+NULL
84626 +_000381_hash+nfs_idmap_get_desc+2-4+42990+_000381_hash+NULL
84627 +_000383_hash+nfs_readdir_make_qstr+3+12509+_000383_hash+NULL
84628 +_000384_hash+note_last_dentry+3+12285+_000384_hash+NULL
84629 +_000385_hash+ntfs_copy_from_user+3-5+15072+_000385_hash+NULL
84630 +_000387_hash+__ntfs_copy_from_user_iovec_inatomic+3-4+38153+_000387_hash+NULL
84631 +_000389_hash+ntfs_ucstonls+3+23097+_000389_hash+NULL
84632 +_000390_hash+nvme_alloc_iod+1+56027+_000390_hash+NULL
84633 +_000391_hash+nvram_write+3+3894+_000391_hash+NULL
84634 +_000392_hash+o2hb_debug_create+4+18744+_000392_hash+NULL
84635 +_000393_hash+o2net_send_message_vec+4+879+_001792_hash+NULL+nohasharray
84636 +_000394_hash+ocfs2_control_cfu+2+37750+_000394_hash+NULL
84637 +_000395_hash+oom_adjust_write+3+41116+_000395_hash+NULL
84638 +_000396_hash+oom_score_adj_write+3+42594+_000396_hash+NULL
84639 +_000397_hash+opera1_xilinx_rw+5+31453+_000397_hash+NULL
84640 +_000398_hash+oprofilefs_ulong_from_user+3+57251+_000398_hash+NULL
84641 +_000399_hash+opticon_write+4+60775+_000399_hash+NULL
84642 +_000400_hash+orig_node_add_if+2+32833+_000400_hash+NULL
84643 +_000401_hash+orig_node_del_if+2+28371+_000401_hash+NULL
84644 +_000402_hash+p9_check_zc_errors+4+15534+_000402_hash+NULL
84645 +_000403_hash+packet_buffer_init+2+1607+_000403_hash+NULL
84646 +_000404_hash+packet_setsockopt+5+17662+_000404_hash+NULL
84647 +_000405_hash+parse_command+2+37079+_000405_hash+NULL
84648 +_000406_hash+pcbit_writecmd+2+12332+_000406_hash+NULL
84649 +_000407_hash+pcmcia_replace_cis+3+57066+_000407_hash+NULL
84650 +_000408_hash+pgctrl_write+3+50453+_000408_hash+NULL
84651 +_000409_hash+pg_write+3+40766+_000409_hash+NULL
84652 +_000410_hash+pidlist_allocate+1+64404+_000410_hash+NULL
84653 +_000411_hash+pipe_iov_copy_from_user+3+23102+_000411_hash+NULL
84654 +_000412_hash+pipe_iov_copy_to_user+3+3447+_000412_hash+NULL
84655 +_000413_hash+pkt_add+3+39897+_000413_hash+NULL
84656 +_000414_hash+pktgen_if_write+3+55628+_000414_hash+NULL
84657 +_000415_hash+platform_device_add_data+3+310+_000415_hash+NULL
84658 +_000416_hash+platform_device_add_resources+3+13289+_000416_hash+NULL
84659 +_000417_hash+pm_qos_power_write+3+52513+_000417_hash+NULL
84660 +_000418_hash+pnpbios_proc_write+3+19758+_000418_hash+NULL
84661 +_000419_hash+pool_allocate+3+42012+_000419_hash+NULL
84662 +_000420_hash+posix_acl_alloc+1+48063+_000420_hash+NULL
84663 +_000421_hash+ppp_cp_parse_cr+4+5214+_000421_hash+NULL
84664 +_000422_hash+ppp_write+3+34034+_000422_hash+NULL
84665 +_000423_hash+pp_read+3+33210+_000423_hash+NULL
84666 +_000424_hash+pp_write+3+39554+_000424_hash+NULL
84667 +_000425_hash+printer_req_alloc+2+62687+_001807_hash+NULL+nohasharray
84668 +_000426_hash+printer_write+3+60276+_000426_hash+NULL
84669 +_000427_hash+prism2_set_genericelement+3+29277+_000427_hash+NULL
84670 +_000428_hash+__probe_kernel_read+3+61119+_000428_hash+NULL
84671 +_000429_hash+__probe_kernel_write+3+29842+_000429_hash+NULL
84672 +_000430_hash+proc_coredump_filter_write+3+25625+_000430_hash+NULL
84673 +_000431_hash+_proc_do_string+2+6376+_000431_hash+NULL
84674 +_000432_hash+process_vm_rw_pages+5-6+15954+_000432_hash+NULL
84675 +_000434_hash+proc_loginuid_write+3+63648+_000434_hash+NULL
84676 +_000435_hash+proc_pid_attr_write+3+63845+_000435_hash+NULL
84677 +_000436_hash+proc_scsi_devinfo_write+3+32064+_000436_hash+NULL
84678 +_000437_hash+proc_scsi_write+3+29142+_000437_hash+NULL
84679 +_000438_hash+proc_scsi_write_proc+3+267+_000438_hash+NULL
84680 +_000439_hash+pstore_mkfile+5+50830+_000439_hash+NULL
84681 +_000440_hash+pti_char_write+3+60960+_000440_hash+NULL
84682 +_000441_hash+ptrace_writedata+4+45021+_000441_hash+NULL
84683 +_000442_hash+pt_write+3+40159+_000442_hash+NULL
84684 +_000443_hash+pvr2_ioread_set_sync_key+3+59882+_000443_hash+NULL
84685 +_000444_hash+pvr2_stream_buffer_count+2+33719+_000444_hash+NULL
84686 +_000445_hash+qdisc_class_hash_alloc+1+18262+_000445_hash+NULL
84687 +_000446_hash+r3964_write+4+57662+_000446_hash+NULL
84688 +_000447_hash+raw_seticmpfilter+3+6888+_000447_hash+NULL
84689 +_000448_hash+raw_setsockopt+5+45800+_000448_hash+NULL
84690 +_000449_hash+rawv6_seticmpfilter+5+12137+_000449_hash+NULL
84691 +_000450_hash+ray_cs_essid_proc_write+3+17875+_000450_hash+NULL
84692 +_000451_hash+rbd_add+3+16366+_000451_hash+NULL
84693 +_000452_hash+rbd_snap_add+4+19678+_000452_hash+NULL
84694 +_000453_hash+rdma_set_ib_paths+3+45592+_000453_hash+NULL
84695 +_000454_hash+rds_page_copy_user+4+35691+_000454_hash+NULL
84696 +_000455_hash+read+3+9397+_000455_hash+NULL
84697 +_000456_hash+read_buf+2+20469+_000456_hash+NULL
84698 +_000457_hash+read_cis_cache+4+29735+_000457_hash+NULL
84699 +_000458_hash+realloc_buffer+2+25816+_000458_hash+NULL
84700 +_000459_hash+realloc_packet_buffer+2+25569+_000459_hash+NULL
84701 +_000460_hash+receive_DataRequest+3+9904+_000460_hash+NULL
84702 +_000461_hash+recent_mt_proc_write+3+8206+_000461_hash+NULL
84703 +_000462_hash+regmap_access_read_file+3+37223+_000462_hash+NULL
84704 +_000463_hash+regmap_bulk_write+4+59049+_000463_hash+NULL
84705 +_000464_hash+regmap_map_read_file+3+37685+_000464_hash+NULL
84706 +_000465_hash+regset_tls_set+4+18459+_000465_hash+NULL
84707 +_000466_hash+reg_w_buf+3+27724+_000466_hash+NULL
84708 +_000467_hash+reg_w_ixbuf+4+34736+_000467_hash+NULL
84709 +_000468_hash+remote_settings_file_write+3+22987+_000468_hash+NULL
84710 +_000469_hash+request_key_auth_new+3+38092+_000469_hash+NULL
84711 +_000470_hash+restore_i387_fxsave+2+17528+_000470_hash+NULL
84712 +_000471_hash+revalidate+2+19043+_000471_hash+NULL
84713 +_000472_hash+rfcomm_sock_setsockopt+5+18254+_000472_hash+NULL
84714 +_000473_hash+rndis_add_response+2+58544+_000473_hash+NULL
84715 +_000474_hash+rndis_set_oid+4+6547+_000474_hash+NULL
84716 +_000475_hash+rngapi_reset+3+34366+_002911_hash+NULL+nohasharray
84717 +_000476_hash+roccat_common_receive+4+53407+_000476_hash+NULL
84718 +_000477_hash+roccat_common_send+4+12284+_000477_hash+NULL
84719 +_000478_hash+rpc_malloc+2+43573+_000478_hash+NULL
84720 +_000479_hash+rt2x00debug_write_bbp+3+8212+_000479_hash+NULL
84721 +_000480_hash+rt2x00debug_write_csr+3+64753+_000480_hash+NULL
84722 +_000481_hash+rt2x00debug_write_eeprom+3+23091+_000481_hash+NULL
84723 +_000482_hash+rt2x00debug_write_rf+3+38195+_000482_hash+NULL
84724 +_000483_hash+rts51x_read_mem+4+26577+_000483_hash+NULL
84725 +_000484_hash+rts51x_read_status+4+11830+_000484_hash+NULL
84726 +_000485_hash+rts51x_write_mem+4+17598+_000485_hash+NULL
84727 +_000486_hash+rw_copy_check_uvector+3+34271+_000486_hash+NULL
84728 +_000487_hash+rxrpc_request_key+3+27235+_000487_hash+NULL
84729 +_000488_hash+rxrpc_server_keyring+3+16431+_000488_hash+NULL
84730 +_000489_hash+savemem+3+58129+_000489_hash+NULL
84731 +_000490_hash+sb16_copy_from_user+10-7-6+55836+_000490_hash+NULL
84732 +_000493_hash+sched_autogroup_write+3+10984+_000493_hash+NULL
84733 +_000494_hash+scsi_mode_select+6+37330+_000494_hash+NULL
84734 +_000495_hash+scsi_tgt_copy_sense+3+26933+_000495_hash+NULL
84735 +_000496_hash+sctp_auth_create_key+1+51641+_000496_hash+NULL
84736 +_000497_hash+sctp_getsockopt_delayed_ack+2+9232+_000497_hash+NULL
84737 +_000498_hash+sctp_getsockopt_local_addrs+2+25178+_000498_hash+NULL
84738 +_000499_hash+sctp_make_abort_user+3+29654+_000499_hash+NULL
84739 +_000500_hash+sctp_setsockopt_active_key+3+43755+_000500_hash+NULL
84740 +_000501_hash+sctp_setsockopt_adaptation_layer+3+26935+_001925_hash+NULL+nohasharray
84741 +_000502_hash+sctp_setsockopt_associnfo+3+51684+_000502_hash+NULL
84742 +_000503_hash+sctp_setsockopt_auth_chunk+3+30843+_000503_hash+NULL
84743 +_000504_hash+sctp_setsockopt_auth_key+3+3793+_000504_hash+NULL
84744 +_000505_hash+sctp_setsockopt_autoclose+3+5775+_000505_hash+NULL
84745 +_000506_hash+sctp_setsockopt_bindx+3+49870+_000506_hash+NULL
84746 +_000507_hash+__sctp_setsockopt_connectx+3+46949+_000507_hash+NULL
84747 +_000508_hash+sctp_setsockopt_context+3+31091+_000508_hash+NULL
84748 +_000509_hash+sctp_setsockopt_default_send_param+3+49578+_000509_hash+&_000247_hash
84749 +_000510_hash+sctp_setsockopt_delayed_ack+3+40129+_000510_hash+NULL
84750 +_000511_hash+sctp_setsockopt_del_key+3+42304+_002281_hash+NULL+nohasharray
84751 +_000512_hash+sctp_setsockopt_events+3+18862+_000512_hash+NULL
84752 +_000513_hash+sctp_setsockopt_hmac_ident+3+11687+_000513_hash+NULL
84753 +_000514_hash+sctp_setsockopt_initmsg+3+1383+_000514_hash+NULL
84754 +_000515_hash+sctp_setsockopt_maxburst+3+28041+_000515_hash+NULL
84755 +_000516_hash+sctp_setsockopt_maxseg+3+11829+_000516_hash+NULL
84756 +_000517_hash+sctp_setsockopt_peer_addr_params+3+734+_000517_hash+NULL
84757 +_000518_hash+sctp_setsockopt_peer_primary_addr+3+13440+_000518_hash+NULL
84758 +_000519_hash+sctp_setsockopt_rtoinfo+3+30941+_000519_hash+NULL
84759 +_000520_hash+security_context_to_sid_core+2+29248+_000520_hash+NULL
84760 +_000521_hash+sel_commit_bools_write+3+46077+_000521_hash+NULL
84761 +_000522_hash+sel_write_avc_cache_threshold+3+2256+_000522_hash+NULL
84762 +_000523_hash+sel_write_bool+3+46996+_000523_hash+NULL
84763 +_000524_hash+sel_write_checkreqprot+3+60774+_000524_hash+NULL
84764 +_000525_hash+sel_write_disable+3+10511+_000525_hash+NULL
84765 +_000526_hash+sel_write_enforce+3+48998+_000526_hash+NULL
84766 +_000527_hash+sel_write_load+3+63830+_000527_hash+NULL
84767 +_000528_hash+send_bulk_static_data+3+61932+_000528_hash+NULL
84768 +_000529_hash+send_control_msg+6+48498+_000529_hash+NULL
84769 +_000530_hash+set_aoe_iflist+2+42737+_000530_hash+NULL
84770 +_000531_hash+setkey_unaligned+3+39474+_000531_hash+NULL
84771 +_000532_hash+set_registers+3+53582+_000532_hash+NULL
84772 +_000533_hash+setsockopt+5+54539+_000533_hash+NULL
84773 +_000534_hash+setup_req+3+5848+_000534_hash+NULL
84774 +_000535_hash+setup_window+7+59178+_000535_hash+NULL
84775 +_000536_hash+setxattr+4+37006+_000536_hash+NULL
84776 +_000537_hash+sfq_alloc+1+2861+_000537_hash+NULL
84777 +_000538_hash+sg_kmalloc+1+50240+_000538_hash+NULL
84778 +_000539_hash+sgl_map_user_pages+2+30610+_000539_hash+NULL
84779 +_000540_hash+shash_setkey_unaligned+3+8620+_000540_hash+NULL
84780 +_000541_hash+shmem_xattr_alloc+2+61190+_000541_hash+NULL
84781 +_000542_hash+sierra_setup_urb+5+46029+_000542_hash+NULL
84782 +_000543_hash+simple_transaction_get+3+50633+_000543_hash+NULL
84783 +_000544_hash+simple_write_to_buffer+2-5+3122+_000544_hash+NULL
84784 +_000546_hash+sisusb_send_bulk_msg+3+17864+_000546_hash+NULL
84785 +_000547_hash+skb_add_data+3+48363+_000547_hash+NULL
84786 +_000548_hash+skb_do_copy_data_nocache+5+12465+_000548_hash+NULL
84787 +_000549_hash+sl_alloc_bufs+2+50380+_000549_hash+NULL
84788 +_000550_hash+sl_realloc_bufs+2+64086+_000550_hash+NULL
84789 +_000551_hash+smk_write_ambient+3+45691+_000551_hash+NULL
84790 +_000552_hash+smk_write_cipso+3+17989+_000552_hash+NULL
84791 +_000553_hash+smk_write_direct+3+46363+_000553_hash+NULL
84792 +_000554_hash+smk_write_doi+3+49621+_000554_hash+NULL
84793 +_000555_hash+smk_write_load_list+3+52280+_000555_hash+NULL
84794 +_000556_hash+smk_write_logging+3+2618+_000556_hash+NULL
84795 +_000557_hash+smk_write_netlbladdr+3+42525+_000557_hash+NULL
84796 +_000558_hash+smk_write_onlycap+3+14400+_000558_hash+NULL
84797 +_000559_hash+snd_ctl_elem_user_tlv+3+11695+_000559_hash+NULL
84798 +_000560_hash+snd_emu10k1_fx8010_read+5+9605+_000560_hash+NULL
84799 +_000561_hash+snd_emu10k1_synth_copy_from_user+3-5+9061+_000561_hash+NULL
84800 +_000563_hash+snd_gus_dram_poke+4+18525+_000563_hash+NULL
84801 +_000564_hash+snd_hdsp_playback_copy+5+20676+_000564_hash+NULL
84802 +_000565_hash+snd_info_entry_write+3+63474+_000565_hash+NULL
84803 +_000566_hash+snd_korg1212_copy_from+6+36169+_000566_hash+NULL
84804 +_000567_hash+snd_mem_proc_write+3+9786+_000567_hash+NULL
84805 +_000568_hash+snd_midi_channel_init_set+1+30092+_000568_hash+NULL
84806 +_000569_hash+snd_midi_event_new+1+9893+_000750_hash+NULL+nohasharray
84807 +_000570_hash+snd_opl4_mem_proc_write+5+9670+_000570_hash+NULL
84808 +_000571_hash+snd_pcm_aio_read+3+13900+_000571_hash+NULL
84809 +_000572_hash+snd_pcm_aio_write+3+28738+_000572_hash+NULL
84810 +_000573_hash+snd_pcm_oss_write1+3+10872+_000573_hash+NULL
84811 +_000574_hash+snd_pcm_oss_write2+3+27332+_000574_hash+NULL
84812 +_000575_hash+snd_rawmidi_kernel_write1+4+56847+_000575_hash+NULL
84813 +_000576_hash+snd_rme9652_playback_copy+5+20970+_000576_hash+NULL
84814 +_000577_hash+snd_sb_csp_load_user+3+45190+_000577_hash+NULL
84815 +_000578_hash+snd_usb_ctl_msg+8+8436+_000578_hash+NULL
84816 +_000579_hash+sock_bindtodevice+3+50942+_000579_hash+NULL
84817 +_000580_hash+sock_kmalloc+2+62205+_000580_hash+NULL
84818 +_000581_hash+spidev_write+3+44510+_000581_hash+NULL
84819 +_000582_hash+squashfs_read_table+3+16945+_000582_hash+NULL
84820 +_000583_hash+srpt_alloc_ioctx+2-3+51042+_000583_hash+NULL
84821 +_000585_hash+srpt_alloc_ioctx_ring+2+49330+_000585_hash+NULL
84822 +_000586_hash+st5481_setup_isocpipes+6-4+61340+_000586_hash+NULL
84823 +_000587_hash+sta_agg_status_write+3+45164+_000587_hash+NULL
84824 +_000588_hash+svc_setsockopt+5+36876+_000588_hash+NULL
84825 +_000589_hash+sys_add_key+4+61288+_000589_hash+NULL
84826 +_000590_hash+sys_modify_ldt+3+18824+_000590_hash+NULL
84827 +_000591_hash+sys_semtimedop+3+4486+_000591_hash+NULL
84828 +_000592_hash+sys_setdomainname+2+4373+_000592_hash+NULL
84829 +_000593_hash+sys_sethostname+2+42962+_000593_hash+NULL
84830 +_000594_hash+tda10048_writeregbulk+4+11050+_000594_hash+NULL
84831 +_000595_hash+tipc_log_resize+1+34803+_000595_hash+NULL
84832 +_000596_hash+tomoyo_write_self+3+45161+_000596_hash+NULL
84833 +_000597_hash+tower_write+3+8580+_000597_hash+NULL
84834 +_000598_hash+tpm_write+3+50798+_000598_hash+NULL
84835 +_000599_hash+trusted_instantiate+3+4710+_000599_hash+NULL
84836 +_000600_hash+trusted_update+3+12664+_000600_hash+NULL
84837 +_000601_hash+tt_changes_fill_buffer+3+62649+_000601_hash+NULL
84838 +_000602_hash+tty_buffer_alloc+2+45437+_000602_hash+NULL
84839 +_000603_hash+__tun_chr_ioctl+4+22300+_000603_hash+NULL
84840 +_000604_hash+ubi_more_leb_change_data+4+63534+_000604_hash+NULL
84841 +_000605_hash+ubi_more_update_data+4+39189+_000605_hash+NULL
84842 +_000606_hash+ubi_resize_volume+2+50172+_000606_hash+NULL
84843 +_000607_hash+udf_alloc_i_data+2+35786+_000607_hash+NULL
84844 +_000608_hash+uea_idma_write+3+64139+_000608_hash+NULL
84845 +_000609_hash+uea_request+4+47613+_000609_hash+NULL
84846 +_000610_hash+uea_send_modem_cmd+3+3888+_000610_hash+NULL
84847 +_000611_hash+uio_write+3+43202+_000611_hash+NULL
84848 +_000612_hash+um_idi_write+3+18293+_000612_hash+NULL
84849 +_000613_hash+us122l_ctl_msg+8+13330+_000613_hash+NULL
84850 +_000614_hash+usb_alloc_urb+1+43436+_000614_hash+NULL
84851 +_000615_hash+usblp_new_writeurb+2+22894+_000615_hash+NULL
84852 +_000616_hash+usblp_write+3+23178+_000616_hash+NULL
84853 +_000617_hash+usbtest_alloc_urb+3-5+34446+_000617_hash+NULL
84854 +_000619_hash+usbtmc_write+3+64340+_000619_hash+NULL
84855 +_000620_hash+user_instantiate+3+26131+_000620_hash+NULL
84856 +_000621_hash+user_update+3+41332+_000621_hash+NULL
84857 +_000622_hash+uvc_simplify_fraction+3+31303+_000622_hash+NULL
84858 +_000623_hash+uwb_rc_cmd_done+4+35892+_000623_hash+NULL
84859 +_000624_hash+uwb_rc_neh_grok_event+3+55799+_000624_hash+NULL
84860 +_000625_hash+v9fs_alloc_rdir_buf+2+42150+_000625_hash+NULL
84861 +_000626_hash+__vb2_perform_fileio+3+63033+_000626_hash+NULL
84862 +_000627_hash+vc_do_resize+3-4+48842+_000627_hash+NULL
84863 +_000629_hash+vcs_write+3+3910+_000629_hash+NULL
84864 +_000630_hash+vfd_write+3+14717+_000630_hash+NULL
84865 +_000631_hash+vga_arb_write+3+36112+_000631_hash+NULL
84866 +_000632_hash+vga_switcheroo_debugfs_write+3+33984+_000632_hash+NULL
84867 +_000633_hash+vhci_get_user+3+45039+_000633_hash+NULL
84868 +_000634_hash+video_proc_write+3+6724+_000634_hash+NULL
84869 +_000635_hash+vlsi_alloc_ring+3-4+57003+_000635_hash+NULL
84870 +_000637_hash+__vmalloc+1+61168+_000637_hash+NULL
84871 +_000638_hash+vmalloc_32+1+1135+_000638_hash+NULL
84872 +_000639_hash+vmalloc_32_user+1+37519+_000639_hash+NULL
84873 +_000640_hash+vmalloc_exec+1+36132+_000640_hash+NULL
84874 +_000641_hash+vmalloc_node+1+58700+_000641_hash+NULL
84875 +_000642_hash+__vmalloc_node_flags+1+30352+_000642_hash+NULL
84876 +_000643_hash+vmalloc_user+1+32308+_000643_hash+NULL
84877 +_000644_hash+vol_cdev_direct_write+3+20751+_000644_hash+NULL
84878 +_000645_hash+vp_request_msix_vectors+2+28849+_000645_hash+NULL
84879 +_000646_hash+vring_add_indirect+3-4+20737+_000646_hash+NULL
84880 +_000648_hash+vring_new_virtqueue+1+9671+_000648_hash+NULL
84881 +_000649_hash+vxge_os_dma_malloc+2+46184+_000649_hash+NULL
84882 +_000650_hash+vxge_os_dma_malloc_async+3+56348+_000650_hash+NULL
84883 +_000651_hash+wdm_write+3+53735+_000651_hash+NULL
84884 +_000652_hash+wiimote_hid_send+3+48528+_000652_hash+NULL
84885 +_000653_hash+wl1273_fm_fops_write+3+60621+_000653_hash+NULL
84886 +_000654_hash+wlc_phy_loadsampletable_nphy+3+64367+_000654_hash+NULL
84887 +_000655_hash+write+3+62671+_000655_hash+NULL
84888 +_000656_hash+write_flush+3+50803+_000656_hash+NULL
84889 +_000657_hash+write_rio+3+54837+_000657_hash+NULL
84890 +_000658_hash+x25_asy_change_mtu+2+26928+_000658_hash+NULL
84891 +_000659_hash+xdi_copy_from_user+4+8395+_000659_hash+NULL
84892 +_000660_hash+xfrm_dst_alloc_copy+3+3034+_000660_hash+NULL
84893 +_000661_hash+xfrm_user_policy+4+62573+_000661_hash+NULL
84894 +_000662_hash+xfs_attrmulti_attr_set+4+59346+_000662_hash+NULL
84895 +_000663_hash+xfs_handle_to_dentry+3+12135+_000663_hash+NULL
84896 +_000664_hash+__xip_file_write+3+2733+_000664_hash+NULL
84897 +_000665_hash+xprt_rdma_allocate+2+31372+_000665_hash+NULL
84898 +_000666_hash+zd_usb_iowrite16v_async+3+23984+_000666_hash+NULL
84899 +_000667_hash+zd_usb_read_fw+4+22049+_000667_hash+NULL
84900 +_000668_hash+zerocopy_sg_from_iovec+3+11828+_000668_hash+NULL
84901 +_000669_hash+zoran_write+3+22404+_000669_hash+NULL
84902 +_000671_hash+acpi_ex_allocate_name_string+2-1+7685+_002855_hash+NULL+nohasharray
84903 +_000672_hash+acpi_os_allocate_zeroed+1+37422+_000672_hash+NULL
84904 +_000673_hash+acpi_ut_initialize_buffer+2+47143+_002314_hash+NULL+nohasharray
84905 +_000674_hash+ad7879_spi_xfer+3+36311+_000674_hash+NULL
84906 +_000675_hash+add_new_gdb+3+27643+_000675_hash+NULL
84907 +_000676_hash+add_numbered_child+5+14273+_000676_hash+NULL
84908 +_000677_hash+add_res_range+4+21310+_000677_hash+NULL
84909 +_000678_hash+addtgt+3+54703+_000678_hash+NULL
84910 +_000679_hash+add_uuid+4+49831+_000679_hash+NULL
84911 +_000680_hash+afs_cell_alloc+2+24052+_000680_hash+NULL
84912 +_000681_hash+aggr_recv_addba_req_evt+4+38037+_000681_hash+NULL
84913 +_000682_hash+agp_create_memory+1+1075+_000682_hash+NULL
84914 +_000683_hash+agp_create_user_memory+1+62955+_000683_hash+NULL
84915 +_000684_hash+alg_setsockopt+5+20985+_000684_hash+NULL
84916 +_000685_hash+alloc_async+1+14208+_000685_hash+NULL
84917 +_000686_hash+___alloc_bootmem_nopanic+1+53626+_000686_hash+NULL
84918 +_000687_hash+alloc_buf+1+34532+_000687_hash+NULL
84919 +_000688_hash+alloc_chunk+1+49575+_000688_hash+NULL
84920 +_000689_hash+alloc_context+1+41283+_000689_hash+NULL
84921 +_000690_hash+alloc_ctrl_packet+1+44667+_000690_hash+NULL
84922 +_000691_hash+alloc_data_packet+1+46698+_000691_hash+NULL
84923 +_000692_hash+alloc_dca_provider+2+59670+_000692_hash+NULL
84924 +_000693_hash+__alloc_dev_table+2+54343+_000693_hash+NULL
84925 +_000694_hash+alloc_ep+1+17269+_000694_hash+NULL
84926 +_000695_hash+__alloc_extent_buffer+3+15093+_000695_hash+NULL
84927 +_000696_hash+alloc_group_attrs+2+9194+_000719_hash+NULL+nohasharray
84928 +_000697_hash+alloc_large_system_hash+2+64490+_000697_hash+NULL
84929 +_000698_hash+alloc_netdev_mqs+1+30030+_000698_hash+NULL
84930 +_000699_hash+__alloc_objio_seg+1+7203+_000699_hash+NULL
84931 +_000700_hash+alloc_ring+2-4+15345+_000700_hash+NULL
84932 +_000701_hash+alloc_ring+2-4+39151+_000701_hash+NULL
84933 +_000704_hash+alloc_session+1-2+64171+_000704_hash+NULL
84934 +_000708_hash+alloc_smp_req+1+51337+_000708_hash+NULL
84935 +_000709_hash+alloc_smp_resp+1+3566+_000709_hash+NULL
84936 +_000710_hash+alloc_ts_config+1+45775+_000710_hash+NULL
84937 +_000711_hash+alloc_upcall+2+62186+_000711_hash+NULL
84938 +_000712_hash+altera_drscan+2+48698+_000712_hash+NULL
84939 +_000713_hash+altera_irscan+2+62396+_000713_hash+NULL
84940 +_000714_hash+altera_set_dr_post+2+54291+_000714_hash+NULL
84941 +_000715_hash+altera_set_dr_pre+2+64862+_000715_hash+NULL
84942 +_000716_hash+altera_set_ir_post+2+20948+_000716_hash+NULL
84943 +_000717_hash+altera_set_ir_pre+2+54103+_000717_hash+NULL
84944 +_000718_hash+altera_swap_dr+2+50090+_000718_hash+NULL
84945 +_000719_hash+altera_swap_ir+2+9194+_000719_hash+&_000696_hash
84946 +_000720_hash+amd_create_gatt_pages+1+20537+_000720_hash+NULL
84947 +_000721_hash+aoechr_write+3+62883+_001352_hash+NULL+nohasharray
84948 +_000722_hash+applesmc_create_nodes+2+49392+_000722_hash+NULL
84949 +_000723_hash+array_zalloc+1-2+7519+_000723_hash+NULL
84950 +_000725_hash+arvo_sysfs_read+6+31617+_000725_hash+NULL
84951 +_000726_hash+arvo_sysfs_write+6+3311+_000726_hash+NULL
84952 +_000727_hash+asd_store_update_bios+4+10165+_000727_hash+NULL
84953 +_000728_hash+ata_host_alloc+2+46094+_000728_hash+NULL
84954 +_000729_hash+atalk_sendmsg+4+21677+_000729_hash+NULL
84955 +_000730_hash+ath6kl_cfg80211_connect_event+7-9-8+13443+_000730_hash+NULL
84956 +_000731_hash+ath6kl_mgmt_tx+9+21153+_000731_hash+NULL
84957 +_000732_hash+ath6kl_wmi_roam_tbl_event_rx+3+43440+_000732_hash+NULL
84958 +_000733_hash+ath6kl_wmi_send_mgmt_cmd+7+17347+_000733_hash+NULL
84959 +_000734_hash+ath_descdma_setup+5+12257+_000734_hash+NULL
84960 +_000735_hash+ath_rx_edma_init+2+65483+_000735_hash+NULL
84961 +_000736_hash+ati_create_gatt_pages+1+4722+_003185_hash+NULL+nohasharray
84962 +_000737_hash+au0828_init_isoc+2-3+61917+_000737_hash+NULL
84963 +_000739_hash+audit_init_entry+1+38644+_000739_hash+NULL
84964 +_000740_hash+ax25_sendmsg+4+62770+_000740_hash+NULL
84965 +_000741_hash+b1_alloc_card+1+36155+_000741_hash+NULL
84966 +_000742_hash+b43_nphy_load_samples+3+36481+_000742_hash+NULL
84967 +_000743_hash+bio_copy_user_iov+4+37660+_000743_hash+NULL
84968 +_000744_hash+__bio_map_kern+2-3+47379+_000744_hash+NULL
84969 +_000746_hash+blk_register_region+1-2+51424+_000746_hash+NULL
84970 +_000748_hash+bm_entry_write+3+28338+_000748_hash+NULL
84971 +_000749_hash+bm_realloc_pages+2+9431+_000749_hash+NULL
84972 +_000750_hash+bm_register_write+3+9893+_000750_hash+&_000569_hash
84973 +_000751_hash+bm_status_write+3+12964+_000751_hash+NULL
84974 +_000752_hash+br_mdb_rehash+2+42643+_000752_hash+NULL
84975 +_000753_hash+btrfs_copy_from_user+3+43806+_000753_hash+NULL
84976 +_000754_hash+btrfs_insert_delayed_dir_index+4+63720+_000754_hash+NULL
84977 +_000755_hash+__btrfs_map_block+3+49839+_000755_hash+NULL
84978 +_000756_hash+__c4iw_init_resource_fifo+3+8334+_000756_hash+NULL
84979 +_000757_hash+cache_downcall+3+13666+_000757_hash+NULL
84980 +_000758_hash+cache_slow_downcall+2+8570+_000758_hash+NULL
84981 +_000759_hash+ca_extend+2+64541+_000759_hash+NULL
84982 +_000760_hash+caif_seqpkt_sendmsg+4+22961+_000760_hash+NULL
84983 +_000761_hash+caif_stream_sendmsg+4+9110+_000761_hash+NULL
84984 +_000762_hash+carl9170_cmd_buf+3+950+_000762_hash+NULL
84985 +_000763_hash+cdev_add+2-3+38176+_000763_hash+NULL
84986 +_000765_hash+cdrom_read_cdda+4+50478+_000765_hash+NULL
84987 +_000766_hash+ceph_dns_resolve_name+1+62488+_000766_hash+NULL
84988 +_000767_hash+ceph_msgpool_get+2+54258+_000767_hash+NULL
84989 +_000768_hash+cfg80211_connect_result+4-6+56515+_000768_hash+NULL
84990 +_000770_hash+cfg80211_disconnected+4+57+_000770_hash+NULL
84991 +_000771_hash+cfg80211_inform_bss+8+19332+_000771_hash+NULL
84992 +_000772_hash+cfg80211_inform_bss_frame+4+41078+_000772_hash+NULL
84993 +_000773_hash+cfg80211_mlme_register_mgmt+5+19852+_000773_hash+NULL
84994 +_000774_hash+cfg80211_roamed_bss+4-6+50198+_000774_hash+NULL
84995 +_000776_hash+cifs_readdata_alloc+1+50318+_000776_hash+NULL
84996 +_000777_hash+cifs_readv_from_socket+3+19109+_000777_hash+NULL
84997 +_000778_hash+cifs_writedata_alloc+1+32880+_003119_hash+NULL+nohasharray
84998 +_000779_hash+cnic_alloc_dma+3+34641+_000779_hash+NULL
84999 +_000780_hash+configfs_write_file+3+61621+_000780_hash+NULL
85000 +_000781_hash+construct_key+3+11329+_000781_hash+NULL
85001 +_000782_hash+context_alloc+3+24645+_000782_hash+NULL
85002 +_000783_hash+copy_to_user+3+57835+_000783_hash+NULL
85003 +_000784_hash+create_attr_set+1+22861+_000784_hash+NULL
85004 +_000785_hash+create_bounce_buffer+3+39155+_000785_hash+NULL
85005 +_000786_hash+create_gpadl_header+2+19064+_000786_hash+NULL
85006 +_000787_hash+_create_sg_bios+4+31244+_000787_hash+NULL
85007 +_000788_hash+cryptd_alloc_instance+2-3+18048+_000788_hash+NULL
85008 +_000790_hash+crypto_ahash_setkey+3+55134+_000790_hash+NULL
85009 +_000791_hash+crypto_alloc_instance2+3+25277+_000791_hash+NULL
85010 +_000792_hash+crypto_shash_setkey+3+60483+_000792_hash+NULL
85011 +_000793_hash+cx231xx_init_bulk+3-2+47024+_000793_hash+NULL
85012 +_000794_hash+cx231xx_init_isoc+2-3+56453+_000794_hash+NULL
85013 +_000796_hash+cx231xx_init_vbi_isoc+2-3+28053+_000796_hash+NULL
85014 +_000798_hash+cxgb_alloc_mem+1+24007+_000798_hash+NULL
85015 +_000799_hash+cxgbi_device_portmap_create+3+25747+_000799_hash+NULL
85016 +_000800_hash+cxgbi_device_register+1-2+36746+_000800_hash+NULL
85017 +_000802_hash+__cxio_init_resource_fifo+3+23447+_000802_hash+NULL
85018 +_000803_hash+dccp_sendmsg+4+56058+_000803_hash+NULL
85019 +_000804_hash+ddp_make_gl+1+12179+_000804_hash+NULL
85020 +_000805_hash+depth_write+3+3021+_000805_hash+NULL
85021 +_000806_hash+dev_irnet_write+3+11398+_000806_hash+NULL
85022 +_000807_hash+dev_set_alias+3+50084+_000807_hash+NULL
85023 +_000808_hash+dev_write+3+7708+_000808_hash+NULL
85024 +_000809_hash+dfs_global_file_write+3+6112+_000809_hash+NULL
85025 +_000810_hash+dgram_sendmsg+4+45679+_000810_hash+NULL
85026 +_000811_hash+disconnect+4+32521+_000811_hash+NULL
85027 +_000812_hash+dma_attach+6-7+50831+_000812_hash+NULL
85028 +_000814_hash+dn_sendmsg+4+38390+_000814_hash+NULL
85029 +_000815_hash+do_dccp_setsockopt+5+54377+_003160_hash+NULL+nohasharray
85030 +_000816_hash+do_jffs2_setxattr+5+25910+_000816_hash+NULL
85031 +_000817_hash+do_msgsnd+4+1387+_000817_hash+NULL
85032 +_000818_hash+do_raw_setsockopt+5+55215+_000818_hash+NULL
85033 +_000819_hash+do_readv_writev+4+51849+_000819_hash+NULL
85034 +_000820_hash+do_sync+1+9604+_000820_hash+NULL
85035 +_000821_hash+dup_array+3+33551+_000821_hash+NULL
85036 +_000822_hash+dvb_audio_write+3+51275+_000822_hash+NULL
85037 +_000823_hash+dvb_ca_en50221_init+4+45718+_000823_hash+NULL
85038 +_000824_hash+dvb_video_write+3+754+_000824_hash+NULL
85039 +_000825_hash+econet_sendmsg+4+51430+_000825_hash+NULL
85040 +_000826_hash+ecryptfs_decode_and_decrypt_filename+5+10379+_000826_hash+NULL
85041 +_000827_hash+ecryptfs_encrypt_and_encode_filename+6+2109+_000827_hash+NULL
85042 +_000828_hash+ecryptfs_send_message_locked+2+31801+_000828_hash+NULL
85043 +_000829_hash+edac_device_alloc_ctl_info+1+5941+_000829_hash+NULL
85044 +_000830_hash+edac_mc_alloc+1+54846+_000830_hash+NULL
85045 +_000831_hash+edac_pci_alloc_ctl_info+1+63388+_000831_hash+NULL
85046 +_000832_hash+efivar_create_sysfs_entry+2+19485+_000832_hash+NULL
85047 +_000833_hash+em28xx_alloc_isoc+4+46892+_000833_hash+NULL
85048 +_000834_hash+enable_write+3+30456+_000834_hash+NULL
85049 +_000835_hash+enclosure_register+3+57412+_000835_hash+NULL
85050 +_000836_hash+ext4_kvzalloc+1+47605+_000836_hash+NULL
85051 +_000837_hash+extend_netdev_table+2+31680+_000837_hash+NULL
85052 +_000838_hash+__feat_register_sp+6+64712+_000838_hash+NULL
85053 +_000839_hash+__ffs_ep0_read_events+3+48868+_000839_hash+NULL
85054 +_000840_hash+ffs_ep0_write+3+9438+_000840_hash+NULL
85055 +_000841_hash+ffs_epfile_read+3+18775+_000841_hash+NULL
85056 +_000842_hash+ffs_epfile_write+3+48014+_000842_hash+NULL
85057 +_000843_hash+fib_info_hash_alloc+1+9075+_000843_hash+NULL
85058 +_000844_hash+fillonedir+3+41746+_000844_hash+NULL
85059 +_000845_hash+flexcop_device_kmalloc+1+54793+_000845_hash+NULL
85060 +_000846_hash+frame_alloc+4+15981+_000846_hash+NULL
85061 +_000847_hash+fw_node_create+2+9559+_000847_hash+NULL
85062 +_000848_hash+garmin_read_process+3+27509+_000848_hash+NULL
85063 +_000849_hash+garp_request_join+4+7471+_000849_hash+NULL
85064 +_000850_hash+get_derived_key+4+61100+_000850_hash+NULL
85065 +_000851_hash+get_entry+4+16003+_000851_hash+NULL
85066 +_000852_hash+get_free_de+2+33714+_000852_hash+NULL
85067 +_000853_hash+get_new_cssid+2+51665+_000853_hash+NULL
85068 +_000854_hash+getxattr+4+24398+_000854_hash+NULL
85069 +_000855_hash+gspca_dev_probe2+4+59833+_000855_hash+NULL
85070 +_000856_hash+hcd_alloc_coherent+5+55862+_000856_hash+NULL
85071 +_000857_hash+hci_sock_sendmsg+4+37420+_000857_hash+NULL
85072 +_000858_hash+hid_register_field+2-3+4874+_000858_hash+NULL
85073 +_000860_hash+hid_report_raw_event+4+7024+_000860_hash+NULL
85074 +_000861_hash+hpi_alloc_control_cache+1+35351+_000861_hash+NULL
85075 +_000862_hash+hugetlbfs_read_actor+2-5-4+34547+_000862_hash+NULL
85076 +_000865_hash+hvc_alloc+4+12579+_000865_hash+NULL
85077 +_000866_hash+__hwahc_dev_set_key+5+46328+_000866_hash+NULL
85078 +_000867_hash+i2400m_zrealloc_2x+3+54166+_001430_hash+NULL+nohasharray
85079 +_000868_hash+ib_alloc_device+1+26483+_000868_hash+NULL
85080 +_000869_hash+ib_create_send_mad+5+1196+_000869_hash+NULL
85081 +_000870_hash+ibmasm_new_command+2+25714+_000870_hash+NULL
85082 +_000871_hash+ib_send_cm_drep+3+50186+_000871_hash+NULL
85083 +_000872_hash+ib_send_cm_mra+4+60202+_000872_hash+NULL
85084 +_000873_hash+ib_send_cm_rtu+3+63138+_000873_hash+NULL
85085 +_000874_hash+ieee80211_key_alloc+3+19065+_000874_hash+NULL
85086 +_000875_hash+ieee80211_mgmt_tx+9+46860+_000875_hash+NULL
85087 +_000876_hash+ieee80211_send_probe_req+6-4+6924+_000876_hash+NULL
85088 +_000877_hash+if_writecmd+2+815+_000877_hash+NULL
85089 +_000878_hash+init_bch+1-2+64130+_000878_hash+NULL
85090 +_000880_hash+init_ipath+1+48187+_000880_hash+NULL
85091 +_000881_hash+init_list_set+2-3+39188+_000881_hash+NULL
85092 +_000883_hash+init_q+4+132+_000883_hash+NULL
85093 +_000884_hash+init_state+2+60165+_000884_hash+NULL
85094 +_000885_hash+init_tag_map+3+57515+_000885_hash+NULL
85095 +_000886_hash+input_ff_create+2+21240+_000886_hash+NULL
85096 +_000887_hash+input_mt_init_slots+2+31183+_000887_hash+NULL
85097 +_000888_hash+interfaces+2+38859+_000888_hash+NULL
85098 +_000889_hash+ioat2_alloc_ring+2+11172+_000889_hash+NULL
85099 +_000890_hash+ip_generic_getfrag+3-4+12187+_000890_hash+NULL
85100 +_000892_hash+ipr_alloc_ucode_buffer+1+40199+_000892_hash+NULL
85101 +_000893_hash+ip_set_alloc+1+57953+_000893_hash+NULL
85102 +_000894_hash+ipv6_flowlabel_opt+3+58135+_001125_hash+NULL+nohasharray
85103 +_000895_hash+ipv6_renew_options+5+28867+_000895_hash+NULL
85104 +_000896_hash+ipxrtr_route_packet+4+54036+_000896_hash+NULL
85105 +_000897_hash+irda_sendmsg+4+4388+_000897_hash+NULL
85106 +_000898_hash+irda_sendmsg_dgram+4+38563+_000898_hash+NULL
85107 +_000899_hash+irda_sendmsg_ultra+4+42047+_000899_hash+NULL
85108 +_000900_hash+irias_add_octseq_attrib+4+29983+_000900_hash+NULL
85109 +_000901_hash+irq_alloc_generic_chip+2+26650+_000901_hash+NULL
85110 +_000902_hash+irq_domain_add_linear+2+29236+_000902_hash+NULL
85111 +_000903_hash+iscsi_alloc_session+3+49390+_000903_hash+NULL
85112 +_000904_hash+iscsi_create_conn+2+50425+_000904_hash+NULL
85113 +_000905_hash+iscsi_create_endpoint+1+15193+_000905_hash+NULL
85114 +_000906_hash+iscsi_create_iface+5+38510+_000906_hash+NULL
85115 +_000907_hash+iscsi_decode_text_input+4+58292+_000907_hash+NULL
85116 +_000908_hash+iscsi_pool_init+2-4+54913+_000908_hash+NULL
85117 +_000910_hash+iscsit_dump_data_payload+2+38683+_000910_hash+NULL
85118 +_000911_hash+isdn_write+3+45863+_000911_hash+NULL
85119 +_000912_hash+isku_receive+4+54130+_000912_hash+NULL
85120 +_000913_hash+isku_send+4+41542+_000913_hash+NULL
85121 +_000914_hash+islpci_mgt_transaction+5+23610+_000914_hash+NULL
85122 +_000915_hash+iso_sched_alloc+1+13377+_002079_hash+NULL+nohasharray
85123 +_000916_hash+ivtv_v4l2_write+3+39226+_000916_hash+NULL
85124 +_000917_hash+iwl_trans_txq_alloc+3+36147+_000917_hash+NULL
85125 +_000918_hash+iwmct_fw_parser_init+4+37876+_000918_hash+NULL
85126 +_000919_hash+iwm_notif_send+6+12295+_000919_hash+NULL
85127 +_000920_hash+iwm_ntf_calib_res+3+11686+_000920_hash+NULL
85128 +_000921_hash+iwm_umac_set_config_var+4+17320+_000921_hash+NULL
85129 +_000922_hash+ixgbe_alloc_q_vector+3-5+45428+_000922_hash+NULL
85130 +_000924_hash+jbd2_journal_init_revoke+2+51088+_000924_hash+NULL
85131 +_000925_hash+jffs2_write_dirent+5+37311+_000925_hash+NULL
85132 +_000926_hash+journal_init_revoke+2+56933+_000926_hash+NULL
85133 +_000927_hash+keyctl_instantiate_key+3+41855+_000927_hash+NULL
85134 +_000928_hash+keyctl_instantiate_key_iov+3+16969+_000928_hash+NULL
85135 +_000929_hash+__kfifo_from_user+3+20399+_000929_hash+NULL
85136 +_000930_hash+kimage_crash_alloc+3+3233+_000930_hash+NULL
85137 +_000931_hash+kimage_normal_alloc+3+31140+_000931_hash+NULL
85138 +_000932_hash+kmem_realloc+2+37489+_000932_hash+NULL
85139 +_000933_hash+kmem_zalloc+1+11510+_000933_hash+NULL
85140 +_000934_hash+koneplus_send+4+18226+_000934_hash+NULL
85141 +_000935_hash+koneplus_sysfs_read+6+42792+_000935_hash+NULL
85142 +_000936_hash+kovaplus_send+4+10009+_000936_hash+NULL
85143 +_000937_hash+kvm_read_guest_page_mmu+6+37611+_000937_hash+NULL
85144 +_000938_hash+kvm_set_irq_routing+3+48704+_000938_hash+NULL
85145 +_000939_hash+kvm_write_guest_cached+4+11106+_000939_hash+NULL
85146 +_000940_hash+kvm_write_guest_page+5+63555+_002809_hash+NULL+nohasharray
85147 +_000941_hash+l2cap_skbuff_fromiovec+3-4+35003+_000941_hash+NULL
85148 +_000943_hash+l2tp_ip_sendmsg+4+50411+_000943_hash+NULL
85149 +_000944_hash+l2tp_session_create+1+25286+_000944_hash+NULL
85150 +_000945_hash+lc_create+3+48662+_000945_hash+NULL
85151 +_000946_hash+leaf_dealloc+3+29566+_000946_hash+NULL
85152 +_000947_hash+linear_conf+2+23485+_003314_hash+NULL+nohasharray
85153 +_000948_hash+lirc_buffer_init+2-3+53282+_000948_hash+NULL
85154 +_000950_hash+llc_ui_sendmsg+4+24987+_000950_hash+NULL
85155 +_000951_hash+lpfc_sli4_queue_alloc+3+62646+_000951_hash+NULL
85156 +_000952_hash+mce_request_packet+3+1073+_000952_hash+NULL
85157 +_000953_hash+mdiobus_alloc_size+1+52259+_000953_hash+NULL
85158 +_000954_hash+media_entity_init+2-4+15870+_001556_hash+NULL+nohasharray
85159 +_000956_hash+memstick_alloc_host+1+142+_000956_hash+NULL
85160 +_000957_hash+mesh_table_alloc+1+22305+_000957_hash+NULL
85161 +_000958_hash+mfd_add_devices+4+56753+_000958_hash+NULL
85162 +_000959_hash+mISDN_sock_sendmsg+4+41035+_000959_hash+NULL
85163 +_000960_hash+mmc_alloc_host+1+48097+_000960_hash+NULL
85164 +_000961_hash+mmc_test_alloc_mem+3+28102+_000961_hash+NULL
85165 +_000962_hash+mpi_alloc+1+18094+_000962_hash+NULL
85166 +_000963_hash+mpihelp_mul_karatsuba_case+5-3+23918+_000963_hash+NULL
85167 +_000964_hash+mpihelp_mul_n+4+16405+_000964_hash+NULL
85168 +_000965_hash+mpi_set_bit+2+15104+_000965_hash+NULL
85169 +_000966_hash+mpi_set_highbit+2+37327+_001420_hash+NULL+nohasharray
85170 +_000967_hash+mtd_concat_create+2+14416+_000967_hash+NULL
85171 +_000968_hash+mvumi_alloc_mem_resource+3+47750+_000968_hash+NULL
85172 +_000969_hash+mwifiex_11n_create_rx_reorder_tbl+4+63806+_000969_hash+NULL
85173 +_000970_hash+mwifiex_alloc_sdio_mpa_buffers+2-3+60961+_000970_hash+NULL
85174 +_000972_hash+mwl8k_cmd_set_beacon+4+23110+_000972_hash+NULL
85175 +_000973_hash+neigh_hash_alloc+1+17595+_000973_hash+NULL
85176 +_000974_hash+netlink_sendmsg+4+33708+_001172_hash+NULL+nohasharray
85177 +_000975_hash+netxen_alloc_sds_rings+2+13417+_000975_hash+NULL
85178 +_000976_hash+new_bind_ctl+2+35324+_000976_hash+NULL
85179 +_000977_hash+new_dir+3+31919+_000977_hash+NULL
85180 +_000978_hash+new_tape_buffer+2+32866+_000978_hash+NULL
85181 +_000979_hash+nfc_llcp_build_tlv+3+19536+_000979_hash+NULL
85182 +_000980_hash+nfc_llcp_send_i_frame+3+59130+_000980_hash+NULL
85183 +_000981_hash+nfs4_alloc_slots+1+2454+_000981_hash+NULL
85184 +_000982_hash+nfsctl_transaction_write+3+64800+_000982_hash+NULL
85185 +_000983_hash+nfs_idmap_request_key+3+30208+_000983_hash+NULL
85186 +_000984_hash+nfs_readdata_alloc+1+9990+_000984_hash+NULL
85187 +_000985_hash+nfs_writedata_alloc+1+62868+_000985_hash+NULL
85188 +_000986_hash+nl_pid_hash_zalloc+1+23314+_000986_hash+NULL
85189 +_000987_hash+nr_sendmsg+4+53656+_000987_hash+NULL
85190 +_000988_hash+nsm_create_handle+4+38060+_000988_hash+NULL
85191 +_000989_hash+ntfs_copy_from_user_iovec+3-6+49829+_000989_hash+NULL
85192 +_000991_hash+ntfs_file_buffered_write+4-6+41442+_000991_hash+NULL
85193 +_000993_hash+__ntfs_malloc+1+34022+_000993_hash+NULL
85194 +_000994_hash+nvme_alloc_queue+3+46865+_000994_hash+NULL
85195 +_000995_hash+ocfs2_acl_from_xattr+2+21604+_000995_hash+NULL
85196 +_000996_hash+ocfs2_control_message+3+19564+_000996_hash+NULL
85197 +_000997_hash+opera1_usb_i2c_msgxfer+4+64521+_000997_hash+NULL
85198 +_000998_hash+_ore_get_io_state+3+2166+_000998_hash+NULL
85199 +_000999_hash+orig_hash_add_if+2+53676+_000999_hash+NULL
85200 +_001000_hash+orig_hash_del_if+2+45080+_001000_hash+NULL
85201 +_001001_hash+orinoco_set_key+5-7+17878+_001001_hash+NULL
85202 +_001003_hash+osdmap_set_max_osd+2+57630+_001003_hash+NULL
85203 +_001004_hash+_osd_realloc_seg+3+54352+_001004_hash+NULL
85204 +_001005_hash+OSDSetBlock+2-4+38986+_001005_hash+NULL
85205 +_001007_hash+osst_execute+7-6+17607+_001007_hash+NULL
85206 +_001008_hash+osst_write+3+31581+_001008_hash+NULL
85207 +_001009_hash+otp_read+2-5-4+10594+_001009_hash+NULL
85208 +_001012_hash+ovs_vport_alloc+1+33475+_001012_hash+NULL
85209 +_001013_hash+packet_sendmsg_spkt+4+28885+_001013_hash+NULL
85210 +_001014_hash+pair_device+4+61175+_001708_hash+NULL+nohasharray
85211 +_001015_hash+pccard_store_cis+6+18176+_001015_hash+NULL
85212 +_001016_hash+pci_add_cap_save_buffer+3+3426+_001016_hash+NULL
85213 +_001017_hash+pcnet32_realloc_rx_ring+3+36598+_001017_hash+NULL
85214 +_001018_hash+pcnet32_realloc_tx_ring+3+38428+_001018_hash+NULL
85215 +_001019_hash+pcpu_mem_zalloc+1+22948+_001019_hash+NULL
85216 +_001020_hash+pep_sendmsg+4+62524+_001020_hash+NULL
85217 +_001021_hash+pfkey_sendmsg+4+47394+_001021_hash+NULL
85218 +_001022_hash+pidlist_resize+2+496+_001022_hash+NULL
85219 +_001023_hash+pin_code_reply+4+46510+_001023_hash+NULL
85220 +_001024_hash+ping_getfrag+3-4+8360+_001024_hash+NULL
85221 +_001026_hash+pipe_set_size+2+5204+_001026_hash+NULL
85222 +_001027_hash+pkt_bio_alloc+1+48284+_001027_hash+NULL
85223 +_001028_hash+platform_create_bundle+4-6+12785+_001028_hash+NULL
85224 +_001030_hash+play_iframe+3+8219+_001030_hash+NULL
85225 +_001031_hash+pm8001_store_update_fw+4+55716+_001031_hash+NULL
85226 +_001032_hash+pmcraid_alloc_sglist+1+9864+_001032_hash+NULL
85227 +_001033_hash+pn533_dep_link_up+5+7659+_001033_hash+NULL
85228 +_001034_hash+pnp_alloc+1+24869+_001419_hash+NULL+nohasharray
85229 +_001035_hash+pn_sendmsg+4+12640+_001035_hash+NULL
85230 +_001036_hash+pppoe_sendmsg+4+48039+_001036_hash+NULL
85231 +_001037_hash+pppol2tp_sendmsg+4+56420+_001037_hash+NULL
85232 +_001038_hash+process_vm_rw+3-5+47533+_001038_hash+NULL
85233 +_001040_hash+process_vm_rw_single_vec+1-2+26213+_001040_hash+NULL
85234 +_001042_hash+proc_write+3+51003+_001042_hash+NULL
85235 +_001043_hash+profile_load+3+58267+_001043_hash+NULL
85236 +_001044_hash+profile_remove+3+8556+_001044_hash+NULL
85237 +_001045_hash+profile_replace+3+14652+_001045_hash+NULL
85238 +_001046_hash+pscsi_get_bio+1+56103+_001046_hash+NULL
85239 +_001047_hash+pyra_send+4+12061+_001047_hash+NULL
85240 +_001048_hash+qc_capture+3+19298+_001048_hash+NULL
85241 +_001049_hash+qla4xxx_alloc_work+2+44813+_001049_hash+NULL
85242 +_001050_hash+qlcnic_alloc_msix_entries+2+46160+_001050_hash+NULL
85243 +_001051_hash+qlcnic_alloc_sds_rings+2+26795+_001051_hash+NULL
85244 +_001052_hash+queue_received_packet+5+9657+_001052_hash+NULL
85245 +_001053_hash+raw_send_hdrinc+4+58803+_001053_hash+NULL
85246 +_001054_hash+raw_sendmsg+4+23078+_001054_hash+&_000022_hash
85247 +_001055_hash+rawsock_sendmsg+4+60010+_001055_hash+NULL
85248 +_001056_hash+rawv6_send_hdrinc+3+35425+_001056_hash+NULL
85249 +_001057_hash+rb_alloc+1+3102+_001057_hash+NULL
85250 +_001058_hash+rbd_alloc_coll+1+33678+_001058_hash+NULL
85251 +_001059_hash+rbd_create_rw_ops+2+4605+_001059_hash+NULL
85252 +_001060_hash+rds_ib_inc_copy_to_user+3+55007+_001060_hash+NULL
85253 +_001061_hash+rds_iw_inc_copy_to_user+3+29214+_001061_hash+NULL
85254 +_001062_hash+rds_message_alloc+1+10517+_001062_hash+NULL
85255 +_001063_hash+rds_message_copy_from_user+3+45510+_001063_hash+NULL
85256 +_001064_hash+rds_message_inc_copy_to_user+3+26540+_001064_hash+NULL
85257 +_001065_hash+redrat3_transmit_ir+3+64244+_001065_hash+NULL
85258 +_001066_hash+regcache_rbtree_insert_to_block+5+58009+_001066_hash+NULL
85259 +_001067_hash+_regmap_raw_write+4+42652+_001067_hash+NULL
85260 +_001068_hash+regmap_register_patch+3+21681+_001068_hash+NULL
85261 +_001069_hash+relay_alloc_page_array+1+52735+_001069_hash+NULL
85262 +_001070_hash+remove_uuid+4+64505+_001070_hash+NULL
85263 +_001071_hash+reshape_ring+2+29147+_001071_hash+NULL
85264 +_001072_hash+RESIZE_IF_NEEDED+2+56286+_001072_hash+NULL
85265 +_001073_hash+resize_stripes+2+61650+_001073_hash+NULL
85266 +_001074_hash+rfcomm_sock_sendmsg+4+37661+_001074_hash+NULL
85267 +_001075_hash+rose_sendmsg+4+20249+_001075_hash+NULL
85268 +_001076_hash+rxrpc_send_data+5+21553+_001076_hash+NULL
85269 +_001077_hash+rxrpc_setsockopt+5+50286+_001077_hash+NULL
85270 +_001078_hash+saa7146_vmalloc_build_pgtable+2+19780+_001078_hash+NULL
85271 +_001079_hash+saa7164_buffer_alloc_user+2+9627+_001079_hash+NULL
85272 +_001081_hash+sco_send_frame+3+41815+_001081_hash+NULL
85273 +_001082_hash+scsi_host_alloc+2+63041+_001082_hash+NULL
85274 +_001083_hash+scsi_tgt_kspace_exec+8+9522+_001083_hash+NULL
85275 +_001084_hash+sctp_sendmsg+4+61919+_001084_hash+NULL
85276 +_001085_hash+sctp_setsockopt+5+44788+_001085_hash+NULL
85277 +_001086_hash+sctp_setsockopt_connectx+3+6073+_001086_hash+NULL
85278 +_001087_hash+sctp_setsockopt_connectx_old+3+22631+_001087_hash+NULL
85279 +_001088_hash+sctp_tsnmap_init+2+36446+_001088_hash+NULL
85280 +_001089_hash+sctp_user_addto_chunk+2-3+62047+_001089_hash+NULL
85281 +_001091_hash+security_context_to_sid+2+19839+_001091_hash+NULL
85282 +_001092_hash+security_context_to_sid_default+2+3492+_003366_hash+NULL+nohasharray
85283 +_001093_hash+security_context_to_sid_force+2+20724+_001093_hash+NULL
85284 +_001094_hash+selinux_transaction_write+3+59038+_001094_hash+NULL
85285 +_001095_hash+sel_write_access+3+51704+_001095_hash+NULL
85286 +_001096_hash+sel_write_create+3+11353+_001096_hash+NULL
85287 +_001097_hash+sel_write_member+3+28800+_001097_hash+NULL
85288 +_001098_hash+sel_write_relabel+3+55195+_001098_hash+NULL
85289 +_001099_hash+sel_write_user+3+45060+_001099_hash+NULL
85290 +_001100_hash+__seq_open_private+3+40715+_001100_hash+NULL
85291 +_001101_hash+serverworks_create_gatt_pages+1+46582+_001101_hash+NULL
85292 +_001102_hash+set_connectable+4+56458+_001102_hash+NULL
85293 +_001103_hash+set_dev_class+4+39645+_001697_hash+NULL+nohasharray
85294 +_001104_hash+set_discoverable+4+48141+_001104_hash+NULL
85295 +_001105_hash+setkey+3+14987+_001105_hash+NULL
85296 +_001106_hash+set_le+4+30581+_001106_hash+NULL
85297 +_001107_hash+set_link_security+4+4502+_001107_hash+NULL
85298 +_001108_hash+set_local_name+4+55757+_001108_hash+NULL
85299 +_001109_hash+set_powered+4+12129+_001109_hash+NULL
85300 +_001110_hash+set_ssp+4+62411+_001110_hash+NULL
85301 +_001111_hash+sg_build_sgat+3+60179+_001111_hash+&_000305_hash
85302 +_001112_hash+sg_read_oxfer+3+51724+_001112_hash+NULL
85303 +_001113_hash+shmem_xattr_set+4+11843+_001113_hash+NULL
85304 +_001114_hash+simple_alloc_urb+3+60420+_001114_hash+NULL
85305 +_001115_hash+sisusb_send_bridge_packet+2+11649+_001115_hash+NULL
85306 +_001116_hash+sisusb_send_packet+2+20891+_001116_hash+NULL
85307 +_001117_hash+skb_add_data_nocache+4+4682+_001117_hash+NULL
85308 +_001118_hash+skb_copy_datagram_from_iovec+2-5-4+52014+_001118_hash+NULL
85309 +_001121_hash+skb_copy_to_page_nocache+6+58624+_001121_hash+NULL
85310 +_001122_hash+sk_chk_filter+2+42095+_001122_hash+NULL
85311 +_001123_hash+skcipher_sendmsg+4+30290+_001123_hash+NULL
85312 +_001124_hash+sl_change_mtu+2+7396+_001124_hash+NULL
85313 +_001125_hash+slhc_init+1-2+58135+_001125_hash+&_000894_hash
85314 +_001127_hash+sm501_create_subdev+3-4+48668+_001127_hash+NULL
85315 +_001129_hash+smk_write_access+3+49561+_001129_hash+NULL
85316 +_001130_hash+snapshot_write+3+28351+_001130_hash+NULL
85317 +_001131_hash+snd_ac97_pcm_assign+2+30218+_001131_hash+NULL
85318 +_001132_hash+snd_card_create+4+64418+_001411_hash+NULL+nohasharray
85319 +_001133_hash+snd_emux_create_port+3+42533+_001133_hash+NULL
85320 +_001134_hash+snd_gus_dram_write+4+38784+_001134_hash+NULL
85321 +_001135_hash+snd_midi_channel_alloc_set+1+28153+_001135_hash+NULL
85322 +_001136_hash+_snd_pcm_lib_alloc_vmalloc_buffer+2+17820+_001136_hash+NULL
85323 +_001137_hash+snd_pcm_oss_sync1+2+45298+_001137_hash+NULL
85324 +_001138_hash+snd_pcm_oss_write+3+38108+_001138_hash+NULL
85325 +_001139_hash+snd_pcm_plugin_build+5+25505+_001139_hash+NULL
85326 +_001140_hash+snd_rawmidi_kernel_write+3+25106+_001140_hash+NULL
85327 +_001141_hash+snd_rawmidi_write+3+28008+_001141_hash+NULL
85328 +_001142_hash+snd_rme32_playback_copy+5+43732+_001142_hash+NULL
85329 +_001143_hash+snd_rme96_playback_copy+5+13111+_001143_hash+NULL
85330 +_001144_hash+snd_seq_device_new+4+31753+_001144_hash+NULL
85331 +_001145_hash+snd_seq_oss_readq_new+2+14283+_001145_hash+NULL
85332 +_001146_hash+snd_vx_create+4+40948+_001146_hash+NULL
85333 +_001147_hash+sock_setsockopt+5+50088+_001147_hash+NULL
85334 +_001148_hash+sound_write+3+5102+_001148_hash+NULL
85335 +_001149_hash+_sp2d_alloc+1+16944+_001149_hash+NULL
85336 +_001150_hash+spi_alloc_master+2+45223+_001150_hash+NULL
85337 +_001151_hash+spidev_message+3+5518+_001151_hash+NULL
85338 +_001152_hash+spi_register_board_info+2+35651+_001152_hash+NULL
85339 +_001153_hash+squashfs_cache_init+2+41656+_001153_hash+NULL
85340 +_001154_hash+squashfs_read_data+6+59440+_001154_hash+NULL
85341 +_001155_hash+srp_alloc_iu+2+44227+_001155_hash+NULL
85342 +_001156_hash+srp_iu_pool_alloc+2+17920+_001156_hash+NULL
85343 +_001157_hash+srp_ring_alloc+2+26760+_001157_hash+NULL
85344 +_001159_hash+start_isoc_chain+2+565+_001159_hash+NULL
85345 +_001160_hash+stk_prepare_sio_buffers+2+57168+_001160_hash+NULL
85346 +_001161_hash+store_iwmct_log_level+4+60209+_001161_hash+NULL
85347 +_001162_hash+store_iwmct_log_level_fw+4+1974+_001162_hash+NULL
85348 +_001163_hash+st_write+3+16874+_001163_hash+NULL
85349 +_001164_hash+svc_pool_map_alloc_arrays+2+47181+_001164_hash+NULL
85350 +_001165_hash+symtab_init+2+61050+_001165_hash+NULL
85351 +_001166_hash+sys_bind+3+10799+_001166_hash+NULL
85352 +_001167_hash+sys_connect+3+15291+_003291_hash+NULL+nohasharray
85353 +_001168_hash+sys_flistxattr+3+41407+_001168_hash+NULL
85354 +_001169_hash+sys_fsetxattr+4+49736+_001169_hash+NULL
85355 +_001170_hash+sysfs_write_file+3+57116+_001170_hash+NULL
85356 +_001171_hash+sys_ipc+3+4889+_001171_hash+NULL
85357 +_001172_hash+sys_keyctl+4+33708+_001172_hash+&_000974_hash
85358 +_001173_hash+sys_listxattr+3+27833+_001173_hash+NULL
85359 +_001174_hash+sys_llistxattr+3+4532+_001174_hash+NULL
85360 +_001175_hash+sys_lsetxattr+4+61177+_001175_hash+NULL
85361 +_001176_hash+sys_mq_timedsend+3+57661+_001176_hash+NULL
85362 +_001177_hash+sys_sched_setaffinity+2+32046+_001177_hash+NULL
85363 +_001178_hash+sys_semop+3+39457+_001178_hash+NULL
85364 +_001179_hash+sys_sendto+6+20809+_001179_hash+NULL
85365 +_001180_hash+sys_setxattr+4+37880+_001180_hash+NULL
85366 +_001181_hash+t4_alloc_mem+1+32342+_001181_hash+NULL
85367 +_001182_hash+tcf_hash_create+4+54360+_001182_hash+NULL
85368 +_001183_hash+__team_options_register+3+63941+_001183_hash+NULL
85369 +_001184_hash+test_unaligned_bulk+3+52333+_001184_hash+NULL
85370 +_001185_hash+tifm_alloc_adapter+1+10903+_001185_hash+NULL
85371 +_001186_hash+timeout_write+3+50991+_001186_hash+NULL
85372 +_001187_hash+tipc_link_send_sections_fast+4+37920+_001187_hash+NULL
85373 +_001188_hash+tipc_subseq_alloc+1+5957+_001188_hash+NULL
85374 +_001189_hash+tm6000_read_write_usb+7+50774+_002917_hash+NULL+nohasharray
85375 +_001190_hash+tnode_alloc+1+49407+_001190_hash+NULL
85376 +_001191_hash+tomoyo_commit_ok+2+20167+_001191_hash+NULL
85377 +_001192_hash+tomoyo_scan_bprm+2-4+15642+_001192_hash+NULL
85378 +_001194_hash+tps65910_i2c_write+3+39531+_001194_hash+NULL
85379 +_001195_hash+ts_write+3+64336+_001195_hash+NULL
85380 +_001196_hash+ttusb2_msg+4+3100+_001196_hash+NULL
85381 +_001197_hash+tty_write+3+5494+_001197_hash+NULL
85382 +_001198_hash+ubi_dbg_check_all_ff+4+59810+_001198_hash+NULL
85383 +_001199_hash+ubi_dbg_check_write+5+48525+_001199_hash+NULL
85384 +_001200_hash+ubifs_setxattr+4+59650+_001370_hash+NULL+nohasharray
85385 +_001201_hash+udf_sb_alloc_partition_maps+2+62313+_001201_hash+NULL
85386 +_001202_hash+udplite_getfrag+3-4+14479+_001202_hash+NULL
85387 +_001204_hash+ulong_write_file+3+26485+_001204_hash+NULL
85388 +_001205_hash+unix_dgram_sendmsg+4+45699+_001205_hash+NULL
85389 +_001206_hash+unix_stream_sendmsg+4+61455+_001206_hash+NULL
85390 +_001207_hash+unlink_queued+3-4+645+_001207_hash+NULL
85391 +_001208_hash+update_pmkid+4+2481+_001208_hash+NULL
85392 +_001209_hash+usb_alloc_coherent+2+65444+_001209_hash+NULL
85393 +_001210_hash+uvc_alloc_buffers+2+9656+_001210_hash+NULL
85394 +_001211_hash+uvc_alloc_entity+3-4+20836+_001211_hash+NULL
85395 +_001212_hash+v4l2_ctrl_new+7+38725+_001212_hash+NULL
85396 +_001213_hash+v4l2_event_subscribe+3+19510+_001213_hash+NULL
85397 +_001214_hash+vb2_read+3+42703+_001214_hash+NULL
85398 +_001215_hash+vb2_write+3+31948+_001215_hash+NULL
85399 +_001216_hash+vc_resize+2-3+3585+_001216_hash+NULL
85400 +_001218_hash+__vhost_add_used_n+3+26554+_001218_hash+NULL
85401 +_001219_hash+__videobuf_alloc_vb+1+27062+_001219_hash+NULL
85402 +_001220_hash+videobuf_dma_init_kernel+3+6963+_001220_hash+NULL
85403 +_001221_hash+virtqueue_add_buf+3-4+59470+_001221_hash+NULL
85404 +_001223_hash+vmalloc+1+15464+_001223_hash+NULL
85405 +_001224_hash+vmalloc_to_sg+2+58354+_001224_hash+NULL
85406 +_001225_hash+vol_cdev_write+3+40915+_001225_hash+NULL
85407 +_001226_hash+vxge_device_register+4+7752+_001226_hash+NULL
85408 +_001227_hash+__vxge_hw_channel_allocate+3+55462+_001227_hash+NULL
85409 +_001228_hash+vzalloc+1+47421+_001228_hash+NULL
85410 +_001229_hash+vzalloc_node+1+23424+_001229_hash+NULL
85411 +_001230_hash+wa_nep_queue+2+8858+_001230_hash+NULL
85412 +_001231_hash+__wa_xfer_setup_segs+2+56725+_001231_hash+NULL
85413 +_001232_hash+wiphy_new+2+2482+_001232_hash+NULL
85414 +_001233_hash+wpan_phy_alloc+1+48056+_001233_hash+NULL
85415 +_001234_hash+wusb_ccm_mac+7+32199+_001234_hash+NULL
85416 +_001235_hash+x25_sendmsg+4+12487+_001235_hash+NULL
85417 +_001236_hash+xfrm_hash_alloc+1+10997+_001236_hash+NULL
85418 +_001237_hash+_xfs_buf_get_pages+2+46811+_001237_hash+NULL
85419 +_001238_hash+xfs_da_buf_make+1+55845+_001238_hash+NULL
85420 +_001239_hash+xfs_da_grow_inode_int+3+21785+_001239_hash+NULL
85421 +_001240_hash+xfs_dir_cilookup_result+3+64288+_003139_hash+NULL+nohasharray
85422 +_001241_hash+xfs_iext_add_indirect_multi+3+32400+_001241_hash+NULL
85423 +_001242_hash+xfs_iext_inline_to_direct+2+12384+_001242_hash+NULL
85424 +_001243_hash+xfs_iroot_realloc+2+46826+_001243_hash+NULL
85425 +_001244_hash+xhci_alloc_stream_info+3+63902+_001244_hash+NULL
85426 +_001245_hash+xlog_recover_add_to_trans+4+62839+_001245_hash+NULL
85427 +_001246_hash+xprt_alloc+2+1475+_001246_hash+NULL
85428 +_001247_hash+xt_alloc_table_info+1+57903+_001247_hash+NULL
85429 +_001248_hash+_zd_iowrite32v_async_locked+3+39034+_001248_hash+NULL
85430 +_001249_hash+zd_usb_iowrite16v+3+49744+_001249_hash+NULL
85431 +_001250_hash+acpi_ds_build_internal_package_obj+3+58271+_001250_hash+NULL
85432 +_001251_hash+acpi_system_read_event+3+55362+_001251_hash+NULL
85433 +_001252_hash+acpi_ut_create_buffer_object+1+42030+_001252_hash+NULL
85434 +_001253_hash+acpi_ut_create_package_object+1+17594+_001253_hash+NULL
85435 +_001254_hash+acpi_ut_create_string_object+1+15360+_001254_hash+NULL
85436 +_001255_hash+ad7879_spi_multi_read+3+8218+_001255_hash+NULL
85437 +_001256_hash+add_child+4+45201+_001256_hash+NULL
85438 +_001257_hash+add_port+2+54941+_001257_hash+NULL
85439 +_001258_hash+adu_read+3+24177+_001258_hash+NULL
85440 +_001259_hash+afs_cell_create+2+27346+_001259_hash+NULL
85441 +_001260_hash+agp_generic_alloc_user+1+9470+_001260_hash+NULL
85442 +_001261_hash+alloc_agpphysmem_i8xx+1+39427+_001261_hash+NULL
85443 +_001262_hash+allocate_cnodes+1+5329+_001262_hash+NULL
85444 +_001263_hash+___alloc_bootmem+1+11410+_001263_hash+NULL
85445 +_001264_hash+__alloc_bootmem_nopanic+1+65397+_001264_hash+NULL
85446 +_001265_hash+alloc_bulk_urbs_generic+5+12127+_001265_hash+NULL
85447 +_001266_hash+alloc_candev+1-2+7776+_001266_hash+NULL
85448 +_001268_hash+____alloc_ei_netdev+1+51475+_001268_hash+NULL
85449 +_001269_hash+alloc_etherdev_mqs+1+36450+_001269_hash+NULL
85450 +_001270_hash+alloc_extent_buffer+3+52824+_001270_hash+NULL
85451 +_001271_hash+alloc_fcdev+1+18780+_001271_hash+NULL
85452 +_001272_hash+alloc_fddidev+1+15382+_001272_hash+NULL
85453 +_001273_hash+alloc_hippi_dev+1+51320+_001273_hash+NULL
85454 +_001274_hash+alloc_irdadev+1+19140+_001274_hash+NULL
85455 +_001275_hash+alloc_ltalkdev+1+38071+_001275_hash+NULL
85456 +_001276_hash+alloc_one_pg_vec_page+1+10747+_001276_hash+NULL
85457 +_001277_hash+alloc_orinocodev+1+21371+_001277_hash+NULL
85458 +_001279_hash+alloc_trdev+1+16399+_001279_hash+NULL
85459 +_001280_hash+async_setkey+3+35521+_001280_hash+NULL
85460 +_001281_hash+ata_host_alloc_pinfo+3+17325+_001281_hash+NULL
85461 +_001284_hash+ath6kl_connect_event+7-9-8+14267+_001284_hash+NULL
85462 +_001285_hash+ath6kl_fwlog_block_read+3+49836+_001285_hash+NULL
85463 +_001286_hash+ath6kl_fwlog_read+3+32101+_001286_hash+NULL
85464 +_001287_hash+ath_rx_init+2+43564+_001287_hash+NULL
85465 +_001288_hash+ath_tx_init+2+60515+_001288_hash+NULL
85466 +_001289_hash+atm_get_addr+3+31221+_001289_hash+NULL
85467 +_001290_hash+av7110_ipack_init+2+46655+_001290_hash+NULL
85468 +_001291_hash+bdx_rxdb_create+1+46525+_001291_hash+NULL
85469 +_001292_hash+bdx_tx_db_init+2+41719+_001292_hash+NULL
85470 +_001293_hash+bio_map_kern+3+64751+_001293_hash+NULL
85471 +_001294_hash+bits_to_user+3+47733+_001294_hash+NULL
85472 +_001295_hash+__blk_queue_init_tags+2+9778+_001295_hash+NULL
85473 +_001296_hash+blk_queue_resize_tags+2+28670+_001296_hash+NULL
85474 +_001297_hash+blk_rq_map_user_iov+5+16772+_001297_hash+NULL
85475 +_001298_hash+bm_init+2+13529+_001298_hash+NULL
85476 +_001299_hash+brcmf_alloc_wdev+1+60347+_001299_hash+NULL
85477 +_001300_hash+btrfs_insert_dir_item+4+59304+_001300_hash+NULL
85478 +_001301_hash+btrfs_map_block+3+64379+_001301_hash+NULL
85479 +_001302_hash+c4_add_card+3+54968+_001302_hash+NULL
85480 +_001303_hash+cache_read+3+24790+_001303_hash+NULL
85481 +_001304_hash+cache_write+3+13589+_001304_hash+NULL
85482 +_001305_hash+calc_hmac+3+32010+_001305_hash+NULL
85483 +_001306_hash+ccid_getsockopt_builtin_ccids+2+53634+_001306_hash+NULL
85484 +_001307_hash+ceph_copy_page_vector_to_user+4+31270+_001307_hash+NULL
85485 +_001308_hash+ceph_read_dir+3+17005+_001308_hash+NULL
85486 +_001309_hash+cfg80211_roamed+5-7+32632+_001309_hash+NULL
85487 +_001311_hash+ci_ll_init+3+12930+_001311_hash+NULL
85488 +_001312_hash+coda_psdev_read+3+35029+_001312_hash+NULL
85489 +_001313_hash+construct_key_and_link+4+8321+_001313_hash+NULL
85490 +_001314_hash+copy_counters_to_user+5+17027+_001824_hash+NULL+nohasharray
85491 +_001315_hash+copy_entries_to_user+1+52367+_001315_hash+NULL
85492 +_001316_hash+copy_from_buf+4+27308+_001316_hash+NULL
85493 +_001317_hash+copy_oldmem_page+3-1+26164+_001317_hash+NULL
85494 +_001318_hash+copy_to_user_fromio+3+57432+_001318_hash+NULL
85495 +_001319_hash+cryptd_hash_setkey+3+42781+_001319_hash+NULL
85496 +_001320_hash+crypto_authenc_esn_setkey+3+6985+_001320_hash+NULL
85497 +_001321_hash+crypto_authenc_setkey+3+80+_003311_hash+NULL+nohasharray
85498 +_001322_hash+cx18_copy_buf_to_user+4+22735+_001322_hash+NULL
85499 +_001324_hash+cxgbi_ddp_reserve+4+30091+_001324_hash+NULL
85500 +_001325_hash+datablob_hmac_append+3+40038+_001325_hash+NULL
85501 +_001326_hash+datablob_hmac_verify+4+24786+_001326_hash+NULL
85502 +_001327_hash+dataflash_read_fact_otp+3-2+33204+_001327_hash+NULL
85503 +_001328_hash+dataflash_read_user_otp+3-2+14536+_001328_hash+&_000201_hash
85504 +_001329_hash+dccp_feat_register_sp+5+17914+_001329_hash+NULL
85505 +_001330_hash+ddb_input_read+3+9743+_001330_hash+NULL
85506 +_001331_hash+dev_read+3+56369+_001331_hash+NULL
85507 +_001332_hash+diva_os_copy_to_user+4+48508+_001332_hash+NULL
85508 +_001333_hash+diva_os_malloc+2+16406+_001333_hash+NULL
85509 +_001334_hash+dlm_dir_lookup+4+56662+_001334_hash+NULL
85510 +_001335_hash+dm_vcalloc+1-2+16814+_001335_hash+NULL
85511 +_001337_hash+do_proc_readlink+3+14096+_001337_hash+NULL
85512 +_001338_hash+do_readlink+2+43518+_001338_hash+NULL
85513 +_001339_hash+__do_replace+5+37227+_001339_hash+NULL
85514 +_001340_hash+do_sigpending+2+9766+_001340_hash+NULL
85515 +_001341_hash+drbd_setsockopt+5+16280+_001341_hash+&_000371_hash
85516 +_001342_hash+dsp_buffer_alloc+2+11684+_001342_hash+NULL
85517 +_001343_hash+dump_midi+3+51040+_001343_hash+NULL
85518 +_001344_hash+dvb_dmxdev_set_buffer_size+2+55643+_001344_hash+NULL
85519 +_001345_hash+dvb_dvr_set_buffer_size+2+9840+_001345_hash+NULL
85520 +_001346_hash+dvb_ringbuffer_pkt_read_user+3-5-2+4303+_001346_hash+NULL
85521 +_001348_hash+dvb_ringbuffer_read_user+3+56702+_001348_hash+NULL
85522 +_001349_hash+ecryptfs_filldir+3+6622+_001349_hash+NULL
85523 +_001350_hash+ecryptfs_readlink+3+40775+_001350_hash+NULL
85524 +_001351_hash+ecryptfs_send_message+2+18322+_001351_hash+NULL
85525 +_001352_hash+em28xx_init_isoc+4+62883+_001352_hash+&_000721_hash
85526 +_001353_hash+et61x251_read+3+25420+_001353_hash+NULL
85527 +_001354_hash+ext4_add_new_descs+3+19509+_001354_hash+NULL
85528 +_001355_hash+fat_ioctl_filldir+3+36621+_001355_hash+NULL
85529 +_001356_hash+fd_copyout+3+59323+_001356_hash+NULL
85530 +_001357_hash+f_hidg_read+3+6238+_001357_hash+NULL
85531 +_001358_hash+filldir+3+55137+_001358_hash+NULL
85532 +_001359_hash+filldir64+3+46469+_001359_hash+NULL
85533 +_001360_hash+fops_read+3+40672+_001360_hash+NULL
85534 +_001361_hash+from_buffer+3+18625+_001361_hash+NULL
85535 +_001362_hash+fsm_init+2+16134+_001362_hash+NULL
85536 +_001363_hash+get_subdir+3+62581+_001363_hash+NULL
85537 +_001364_hash+gspca_dev_probe+4+2570+_001364_hash+NULL
85538 +_001365_hash+handle_received_packet+3+22457+_001365_hash+NULL
85539 +_001366_hash+hash_setkey+3+48310+_001366_hash+NULL
85540 +_001367_hash+hdlcdrv_register+2+6792+_001367_hash+NULL
85541 +_001368_hash+hdpvr_read+3+9273+_001368_hash+NULL
85542 +_001369_hash+hid_input_report+4+32458+_001369_hash+NULL
85543 +_001370_hash+hidraw_read+3+59650+_001370_hash+&_001200_hash
85544 +_001371_hash+HiSax_readstatus+2+15752+_001371_hash+NULL
85545 +_001373_hash+__hwahc_op_set_gtk+4+42038+_001373_hash+NULL
85546 +_001374_hash+__hwahc_op_set_ptk+5+36510+_001374_hash+NULL
85547 +_001375_hash+ib_copy_to_udata+3+27525+_001375_hash+NULL
85548 +_001376_hash+idetape_chrdev_read+3+2097+_001376_hash+NULL
85549 +_001377_hash+ieee80211_alloc_hw+1+43829+_001377_hash+NULL
85550 +_001378_hash+ieee80211_bss_info_update+4+13991+_001378_hash+NULL
85551 +_001379_hash+ilo_read+3+32531+_001379_hash+NULL
85552 +_001380_hash+init_map_ipmac+3-4+63896+_001380_hash+NULL
85553 +_001382_hash+init_tid_tabs+2-4-3+13252+_001382_hash+NULL
85554 +_001385_hash+iowarrior_read+3+53483+_001385_hash+NULL
85555 +_001386_hash+ipv6_getsockopt_sticky+5+56711+_001386_hash+NULL
85556 +_001387_hash+ipwireless_send_packet+4+8328+_001387_hash+NULL
85557 +_001388_hash+ipx_sendmsg+4+1362+_001388_hash+NULL
85558 +_001389_hash+iscsi_conn_setup+2+35159+_001389_hash+NULL
85559 +_001390_hash+iscsi_create_session+3+51647+_001390_hash+NULL
85560 +_001391_hash+iscsi_host_alloc+2+36671+_001391_hash+NULL
85561 +_001392_hash+iscsi_session_setup+4-5+196+_001392_hash+NULL
85562 +_001394_hash+iscsit_find_cmd_from_itt_or_dump+3+17194+_001701_hash+NULL+nohasharray
85563 +_001395_hash+isdn_ppp_read+4+50356+_001395_hash+NULL
85564 +_001396_hash+isku_sysfs_read+6+58806+_001396_hash+NULL
85565 +_001397_hash+isku_sysfs_write+6+49767+_001397_hash+NULL
85566 +_001398_hash+iso_alloc_urb+4-5+45206+_001398_hash+NULL
85567 +_001400_hash+ivtv_copy_buf_to_user+4+6159+_001400_hash+NULL
85568 +_001401_hash+iwm_rx_handle+3+24899+_001401_hash+NULL
85569 +_001402_hash+iwm_wdev_alloc+1+38415+_001402_hash+NULL
85570 +_001403_hash+jbd2_alloc+1+41359+_001403_hash+NULL
85571 +_001404_hash+jffs2_do_link+6+42048+_001404_hash+NULL
85572 +_001405_hash+jffs2_do_unlink+4+62020+_001405_hash+NULL
85573 +_001406_hash+jffs2_security_setxattr+4+62107+_001406_hash+NULL
85574 +_001407_hash+jffs2_trusted_setxattr+4+17048+_001407_hash+NULL
85575 +_001408_hash+jffs2_user_setxattr+4+10182+_001408_hash+NULL
85576 +_001409_hash+kernel_setsockopt+5+35913+_001409_hash+NULL
85577 +_001410_hash+keyctl_describe_key+3+36853+_001410_hash+NULL
85578 +_001411_hash+keyctl_get_security+3+64418+_001411_hash+&_001132_hash
85579 +_001412_hash+keyring_read+3+13438+_001412_hash+NULL
85580 +_001413_hash+kfifo_copy_to_user+3+20646+_001413_hash+NULL
85581 +_001414_hash+kmem_zalloc_large+1+56128+_001414_hash+NULL
85582 +_001415_hash+kmp_init+2+41373+_001415_hash+NULL
85583 +_001416_hash+koneplus_sysfs_write+6+35993+_001416_hash+NULL
85584 +_001417_hash+kvm_clear_guest_page+4+2308+_001417_hash+NULL
85585 +_001418_hash+kvm_read_nested_guest_page+5+13337+_001418_hash+NULL
85586 +_001419_hash+l2cap_create_basic_pdu+3+24869+_001419_hash+&_001034_hash
85587 +_001420_hash+l2cap_create_connless_pdu+3+37327+_001420_hash+&_000966_hash
85588 +_001421_hash+l2cap_create_iframe_pdu+3+51801+_001421_hash+NULL
85589 +_001422_hash+__lgwrite+4+57669+_001422_hash+NULL
85590 +_001423_hash+libfc_host_alloc+2+7917+_001423_hash+NULL
85591 +_001424_hash+llcp_sock_sendmsg+4+1092+_001424_hash+NULL
85592 +_001425_hash+macvtap_get_user+4+28185+_001425_hash+NULL
85593 +_001426_hash+mcam_v4l_read+3+36513+_001426_hash+NULL
85594 +_001427_hash+mce_async_out+3+58056+_001427_hash+NULL
85595 +_001428_hash+mce_flush_rx_buffer+2+14976+_001428_hash+NULL
85596 +_001429_hash+mdc800_device_read+3+22896+_001429_hash+NULL
85597 +_001430_hash+memcpy_toiovec+3+54166+_001430_hash+&_000867_hash
85598 +_001431_hash+memcpy_toiovecend+3-4+19736+_001431_hash+NULL
85599 +_001433_hash+mgt_set_varlen+4+60916+_001433_hash+NULL
85600 +_001434_hash+mlx4_en_create_rx_ring+3+62498+_001434_hash+NULL
85601 +_001435_hash+mlx4_en_create_tx_ring+4+48501+_001435_hash+NULL
85602 +_001436_hash+mon_bin_get_event+4+52863+_001436_hash+NULL
85603 +_001437_hash+mousedev_read+3+47123+_001437_hash+NULL
85604 +_001438_hash+move_addr_to_user+2+2868+_001438_hash+NULL
85605 +_001439_hash+mpihelp_mul+5-3+27805+_001439_hash+NULL
85606 +_001441_hash+mpi_lshift_limbs+2+9337+_001441_hash+NULL
85607 +_001442_hash+msnd_fifo_alloc+2+23179+_001442_hash+NULL
85608 +_001443_hash+mtdswap_init+2+55719+_001443_hash+NULL
85609 +_001444_hash+neigh_hash_grow+2+17283+_001444_hash+NULL
85610 +_001445_hash+nfs4_realloc_slot_table+2+22859+_001445_hash+NULL
85611 +_001446_hash+nfs_idmap_get_key+2+39616+_001446_hash+NULL
85612 +_001447_hash+nsm_get_handle+4+52089+_001447_hash+NULL
85613 +_001448_hash+ntfs_malloc_nofs+1+49572+_001448_hash+NULL
85614 +_001449_hash+ntfs_malloc_nofs_nofail+1+63631+_001449_hash+NULL
85615 +_001450_hash+nvme_create_queue+3+170+_001450_hash+NULL
85616 +_001451_hash+ocfs2_control_write+3+54737+_001451_hash+NULL
85617 +_001452_hash+orinoco_add_extscan_result+3+18207+_001452_hash+NULL
85618 +_001454_hash+override_release+2+52032+_001454_hash+NULL
85619 +_001455_hash+packet_snd+3+13634+_001455_hash+NULL
85620 +_001456_hash+pcbit_stat+2+27364+_001456_hash+NULL
85621 +_001457_hash+pcpu_extend_area_map+2+12589+_001457_hash+NULL
85622 +_001458_hash+pg_read+3+17276+_001458_hash+NULL
85623 +_001459_hash+picolcd_debug_eeprom_read+3+14549+_001459_hash+NULL
85624 +_001460_hash+pkt_alloc_packet_data+1+37928+_001460_hash+NULL
85625 +_001461_hash+pmcraid_build_passthrough_ioadls+2+62034+_001461_hash+NULL
85626 +_001462_hash+pms_capture+4+27142+_001462_hash+NULL
85627 +_001463_hash+posix_clock_register+2+5662+_001463_hash+NULL
85628 +_001464_hash+printer_read+3+54851+_001464_hash+NULL
85629 +_001465_hash+__proc_file_read+3+54978+_001465_hash+NULL
85630 +_001466_hash+pt_read+3+49136+_001466_hash+NULL
85631 +_001467_hash+put_cmsg+4+36589+_001467_hash+NULL
85632 +_001468_hash+pvr2_ioread_read+3+10720+_001505_hash+NULL+nohasharray
85633 +_001469_hash+pwc_video_read+3+51735+_001469_hash+NULL
85634 +_001470_hash+px_raw_event+4+49371+_001470_hash+NULL
85635 +_001471_hash+qcam_read+3+13977+_001471_hash+NULL
85636 +_001472_hash+rawv6_sendmsg+4+20080+_001472_hash+NULL
85637 +_001473_hash+rds_sendmsg+4+40976+_001473_hash+NULL
85638 +_001474_hash+read_flush+3+43851+_001474_hash+NULL
85639 +_001475_hash+read_profile+3+27859+_001475_hash+NULL
85640 +_001476_hash+read_vmcore+3+26501+_001476_hash+NULL
85641 +_001477_hash+redirected_tty_write+3+65297+_001477_hash+NULL
85642 +_001478_hash+__register_chrdev+2-3+54223+_001478_hash+NULL
85643 +_001480_hash+regmap_raw_write+4+53803+_001480_hash+NULL
85644 +_001481_hash+reiserfs_allocate_list_bitmaps+3+21732+_001481_hash+NULL
85645 +_001482_hash+reiserfs_resize+2+34377+_001482_hash+NULL
85646 +_001483_hash+request_key_auth_read+3+24109+_001483_hash+NULL
85647 +_001484_hash+rfkill_fop_read+3+54711+_001484_hash+NULL
85648 +_001485_hash+rng_dev_read+3+41581+_001485_hash+NULL
85649 +_001486_hash+roccat_read+3+41093+_001486_hash+NULL
85650 +_001487_hash+sco_sock_sendmsg+4+62542+_001487_hash+NULL
85651 +_001488_hash+scsi_register+2+49094+_001488_hash+NULL
85652 +_001489_hash+sctp_getsockopt_events+2+3607+_001489_hash+NULL
85653 +_001490_hash+sctp_getsockopt_maxburst+2+42941+_001490_hash+NULL
85654 +_001491_hash+sctp_getsockopt_maxseg+2+10737+_001491_hash+NULL
85655 +_001492_hash+sctpprobe_read+3+17741+_001492_hash+NULL
85656 +_001493_hash+sdhci_alloc_host+2+7509+_001493_hash+NULL
85657 +_001494_hash+selinux_inode_post_setxattr+4+26037+_001494_hash+NULL
85658 +_001495_hash+selinux_inode_setsecurity+4+18148+_001495_hash+NULL
85659 +_001496_hash+selinux_inode_setxattr+4+10708+_001496_hash+NULL
85660 +_001497_hash+selinux_secctx_to_secid+2+63744+_001497_hash+NULL
85661 +_001498_hash+selinux_setprocattr+4+55611+_001498_hash+NULL
85662 +_001499_hash+sel_write_context+3+25726+_002397_hash+NULL+nohasharray
85663 +_001500_hash+seq_copy_in_user+3+18543+_001500_hash+NULL
85664 +_001501_hash+seq_open_net+4+8968+_001594_hash+NULL+nohasharray
85665 +_001502_hash+seq_open_private+3+61589+_001502_hash+NULL
85666 +_001503_hash+set_arg+3+42824+_001503_hash+NULL
85667 +_001504_hash+sg_read+3+25799+_001504_hash+NULL
85668 +_001505_hash+shash_async_setkey+3+10720+_001505_hash+&_001468_hash
85669 +_001506_hash+shash_compat_setkey+3+12267+_001506_hash+NULL
85670 +_001507_hash+shmem_setxattr+4+55867+_001507_hash+NULL
85671 +_001508_hash+simple_read_from_buffer+2-5+55957+_001508_hash+NULL
85672 +_001511_hash+sm_checker_extend+2+23615+_001511_hash+NULL
85673 +_001512_hash+sn9c102_read+3+29305+_001512_hash+NULL
85674 +_001513_hash+snd_es1938_capture_copy+5+25930+_001513_hash+NULL
85675 +_001514_hash+snd_gus_dram_peek+4+9062+_001514_hash+NULL
85676 +_001515_hash+snd_hdsp_capture_copy+5+4011+_001515_hash+NULL
85677 +_001516_hash+snd_korg1212_copy_to+6+92+_001516_hash+NULL
85678 +_001517_hash+snd_opl4_mem_proc_read+5+63774+_001517_hash+NULL
85679 +_001518_hash+snd_pcm_alloc_vmalloc_buffer+2+44595+_001518_hash+NULL
85680 +_001519_hash+snd_pcm_oss_read1+3+63771+_001519_hash+NULL
85681 +_001520_hash+snd_rawmidi_kernel_read1+4+36740+_001520_hash+NULL
85682 +_001521_hash+snd_rme9652_capture_copy+5+10287+_001521_hash+NULL
85683 +_001522_hash+srp_target_alloc+3+37288+_001522_hash+NULL
85684 +_001523_hash+stk_allocate_buffers+2+16291+_001523_hash+NULL
85685 +_001524_hash+store_ifalias+4+35088+_001524_hash+NULL
85686 +_001525_hash+store_msg+3+56417+_001525_hash+NULL
85687 +_001526_hash+str_to_user+2+11411+_001526_hash+NULL
85688 +_001527_hash+subbuf_read_actor+3+2071+_001527_hash+NULL
85689 +_001528_hash+sys_fgetxattr+4+25166+_001528_hash+NULL
85690 +_001529_hash+sys_gethostname+2+49698+_001529_hash+NULL
85691 +_001530_hash+sys_getxattr+4+37418+_001530_hash+NULL
85692 +_001531_hash+sys_kexec_load+2+14222+_001531_hash+NULL
85693 +_001532_hash+sys_msgsnd+3+44537+_001532_hash+&_000129_hash
85694 +_001533_hash+sys_process_vm_readv+3-5+19090+_003125_hash+NULL+nohasharray
85695 +_001535_hash+sys_process_vm_writev+3-5+4928+_001535_hash+NULL
85696 +_001537_hash+sys_sched_getaffinity+2+60033+_001537_hash+NULL
85697 +_001538_hash+sys_setsockopt+5+35320+_001538_hash+NULL
85698 +_001539_hash+t3_init_l2t+1+8261+_001539_hash+NULL
85699 +_001540_hash+team_options_register+3+20091+_001540_hash+NULL
85700 +_001541_hash+tipc_send2name+6+16809+_001541_hash+NULL
85701 +_001542_hash+tipc_send2port+5+63935+_001542_hash+NULL
85702 +_001543_hash+tipc_send+4+51238+_001543_hash+NULL
85703 +_001544_hash+tm6000_i2c_recv_regs16+5+2949+_001544_hash+NULL
85704 +_001545_hash+tm6000_i2c_recv_regs+5+46215+_001545_hash+NULL
85705 +_001546_hash+tm6000_i2c_send_regs+5+20250+_001546_hash+NULL
85706 +_001547_hash+tnode_new+3+44757+_001547_hash+NULL
85707 +_001548_hash+tomoyo_read_self+3+33539+_001548_hash+NULL
85708 +_001549_hash+tomoyo_update_domain+2+5498+_001549_hash+NULL
85709 +_001550_hash+tomoyo_update_policy+2+40458+_001550_hash+NULL
85710 +_001551_hash+tpm_read+3+50344+_001551_hash+NULL
85711 +_001552_hash+TSS_rawhmac+3+17486+_001552_hash+NULL
85712 +_001553_hash+tt3650_ci_msg+4+57219+_001553_hash+NULL
85713 +_001554_hash+tun_get_user+3+33178+_001554_hash+NULL
85714 +_001555_hash+ubi_dbg_dump_flash+4+3870+_001555_hash+NULL
85715 +_001556_hash+ubi_io_write+4-5+15870+_001556_hash+&_000954_hash
85716 +_001558_hash+uio_read+3+49300+_001558_hash+NULL
85717 +_001559_hash+unix_seqpacket_sendmsg+4+27893+_001559_hash+NULL
85718 +_001560_hash+unlink1+3+63059+_001560_hash+NULL
85719 +_001562_hash+usb_allocate_stream_buffers+3+8964+_001562_hash+NULL
85720 +_001563_hash+usbdev_read+3+45114+_001563_hash+NULL
85721 +_001564_hash+usblp_read+3+57342+_003306_hash+NULL+nohasharray
85722 +_001565_hash+usbtmc_read+3+32377+_001565_hash+NULL
85723 +_001566_hash+usbvision_v4l2_read+3+34386+_001566_hash+NULL
85724 +_001567_hash+_usb_writeN_sync+4+31682+_001567_hash+NULL
85725 +_001568_hash+user_read+3+51881+_001568_hash+NULL
85726 +_001569_hash+v4l_stk_read+3+39672+_001569_hash+NULL
85727 +_001570_hash+vcs_read+3+8017+_001570_hash+NULL
85728 +_001571_hash+vdma_mem_alloc+1+6171+_001571_hash+NULL
85729 +_001572_hash+venus_create+4+20555+_001572_hash+NULL
85730 +_001573_hash+venus_link+5+32165+_001573_hash+NULL
85731 +_001574_hash+venus_lookup+4+8121+_001574_hash+NULL
85732 +_001575_hash+venus_mkdir+4+8967+_001575_hash+NULL
85733 +_001576_hash+venus_remove+4+59781+_001576_hash+NULL
85734 +_001577_hash+venus_rename+4-5+17707+_003279_hash+NULL+nohasharray
85735 +_001579_hash+venus_rmdir+4+45564+_001579_hash+NULL
85736 +_001580_hash+venus_symlink+4-6+23570+_001580_hash+NULL
85737 +_001582_hash+vfs_readlink+3+54368+_001582_hash+NULL
85738 +_001583_hash+vfs_readv+3+38011+_001583_hash+NULL
85739 +_001584_hash+vfs_writev+3+25278+_001584_hash+NULL
85740 +_001585_hash+vga_arb_read+3+4886+_001585_hash+NULL
85741 +_001586_hash+vhci_put_user+4+12604+_001586_hash+NULL
85742 +_001587_hash+vhost_add_used_n+3+10760+_001587_hash+NULL
85743 +_001588_hash+__videobuf_copy_to_user+4+15423+_001588_hash+NULL
85744 +_001589_hash+videobuf_pages_to_sg+2+3708+_001589_hash+NULL
85745 +_001590_hash+videobuf_vmalloc_to_sg+2+4548+_001590_hash+NULL
85746 +_001591_hash+virtnet_send_command+5-6+61993+_001591_hash+NULL
85747 +_001593_hash+vmbus_establish_gpadl+3+4495+_001593_hash+NULL
85748 +_001594_hash+vol_cdev_read+3+8968+_001594_hash+&_001501_hash
85749 +_001595_hash+w9966_v4l_read+3+31148+_001595_hash+NULL
85750 +_001596_hash+wdm_read+3+6549+_001596_hash+NULL
85751 +_001597_hash+wusb_prf+7+54261+_001597_hash+&_000063_hash
85752 +_001598_hash+xdi_copy_to_user+4+48900+_001598_hash+NULL
85753 +_001599_hash+xfs_buf_get_uncached+2+51477+_001599_hash+NULL
85754 +_001600_hash+xfs_efd_init+3+5463+_001600_hash+NULL
85755 +_001601_hash+xfs_efi_init+2+5476+_001601_hash+NULL
85756 +_001602_hash+xfs_iext_realloc_direct+2+20521+_001602_hash+NULL
85757 +_001603_hash+xfs_iext_realloc_indirect+2+59211+_001603_hash+NULL
85758 +_001604_hash+xfs_inumbers_fmt+3+12817+_001604_hash+NULL
85759 +_001605_hash+xlog_recover_add_to_cont_trans+4+44102+_001605_hash+NULL
85760 +_001606_hash+xz_dec_lzma2_create+2+36353+_002745_hash+NULL+nohasharray
85761 +_001607_hash+_zd_iowrite32v_locked+3+44725+_001607_hash+NULL
85762 +_001608_hash+aat2870_reg_read_file+3+12221+_001608_hash+NULL
85763 +_001609_hash+add_sctp_bind_addr+3+12269+_001609_hash+NULL
85764 +_001610_hash+aes_decrypt_fail_read+3+54815+_001610_hash+NULL
85765 +_001611_hash+aes_decrypt_interrupt_read+3+19910+_001611_hash+NULL
85766 +_001612_hash+aes_decrypt_packets_read+3+10155+_001612_hash+NULL
85767 +_001613_hash+aes_encrypt_fail_read+3+32562+_001613_hash+NULL
85768 +_001614_hash+aes_encrypt_interrupt_read+3+39919+_001614_hash+NULL
85769 +_001615_hash+aes_encrypt_packets_read+3+48666+_001615_hash+NULL
85770 +_001616_hash+afs_cell_lookup+2+8482+_001616_hash+NULL
85771 +_001617_hash+agp_allocate_memory+2+58761+_001617_hash+NULL
85772 +_001618_hash+__alloc_bootmem+1+31498+_001618_hash+NULL
85773 +_001619_hash+__alloc_bootmem_low+1+43423+_003150_hash+NULL+nohasharray
85774 +_001620_hash+__alloc_bootmem_node_nopanic+2+6432+_001620_hash+NULL
85775 +_001621_hash+alloc_cc770dev+1+48186+_001621_hash+NULL
85776 +_001622_hash+__alloc_ei_netdev+1+29338+_001622_hash+NULL
85777 +_001623_hash+__alloc_eip_netdev+1+51549+_001623_hash+NULL
85778 +_001624_hash+alloc_libipw+1+22708+_001624_hash+NULL
85779 +_001625_hash+alloc_pg_vec+2+8533+_001625_hash+NULL
85780 +_001626_hash+alloc_sja1000dev+1+17868+_001626_hash+NULL
85781 +_001627_hash+alloc_targets+2+8074+_001627_hash+NULL
85782 +_001630_hash+ath6kl_disconnect_timeout_read+3+3650+_001630_hash+NULL
85783 +_001631_hash+ath6kl_endpoint_stats_read+3+41554+_001631_hash+NULL
85784 +_001632_hash+ath6kl_fwlog_mask_read+3+2050+_001632_hash+NULL
85785 +_001633_hash+ath6kl_keepalive_read+3+44303+_001633_hash+NULL
85786 +_001634_hash+ath6kl_listen_int_read+3+10355+_001634_hash+NULL
85787 +_001635_hash+ath6kl_lrssi_roam_read+3+61022+_001635_hash+NULL
85788 +_001636_hash+ath6kl_regdump_read+3+14393+_001636_hash+NULL
85789 +_001637_hash+ath6kl_regread_read+3+25884+_001637_hash+NULL
85790 +_001638_hash+ath6kl_regwrite_read+3+48747+_001638_hash+NULL
85791 +_001639_hash+ath6kl_roam_table_read+3+26166+_001639_hash+NULL
85792 +_001640_hash+ath9k_debugfs_read_buf+3+25316+_001640_hash+NULL
85793 +_001641_hash+atk_debugfs_ggrp_read+3+29522+_001641_hash+NULL
85794 +_001642_hash+b43_debugfs_read+3+24425+_001642_hash+NULL
85795 +_001643_hash+b43legacy_debugfs_read+3+2473+_001643_hash+NULL
85796 +_001644_hash+bcm_recvmsg+4+43992+_001644_hash+NULL
85797 +_001645_hash+bfad_debugfs_read+3+13119+_001645_hash+NULL
85798 +_001646_hash+bfad_debugfs_read_regrd+3+57830+_001646_hash+NULL
85799 +_001647_hash+blk_init_tags+1+30592+_001647_hash+NULL
85800 +_001648_hash+blk_queue_init_tags+2+44355+_002686_hash+NULL+nohasharray
85801 +_001649_hash+blk_rq_map_kern+4+47004+_001649_hash+NULL
85802 +_001650_hash+bm_entry_read+3+10976+_001650_hash+NULL
85803 +_001651_hash+bm_status_read+3+19583+_001651_hash+NULL
85804 +_001652_hash+bnad_debugfs_read+3+50665+_001652_hash+NULL
85805 +_001653_hash+bnad_debugfs_read_regrd+3+51308+_001653_hash+NULL
85806 +_001654_hash+btmrvl_curpsmode_read+3+46939+_001654_hash+NULL
85807 +_001655_hash+btmrvl_gpiogap_read+3+4718+_001655_hash+NULL
85808 +_001656_hash+btmrvl_hscfgcmd_read+3+56303+_001656_hash+NULL
85809 +_001657_hash+btmrvl_hscmd_read+3+1614+_001657_hash+NULL
85810 +_001658_hash+btmrvl_hsmode_read+3+1647+_001658_hash+NULL
85811 +_001659_hash+btmrvl_hsstate_read+3+920+_001659_hash+NULL
85812 +_001660_hash+btmrvl_pscmd_read+3+24308+_001660_hash+NULL
85813 +_001661_hash+btmrvl_psmode_read+3+22395+_001661_hash+NULL
85814 +_001662_hash+btmrvl_psstate_read+3+50683+_001662_hash+NULL
85815 +_001663_hash+btmrvl_txdnldready_read+3+413+_001663_hash+NULL
85816 +_001664_hash+btrfs_add_link+5+9973+_001664_hash+NULL
85817 +_001665_hash+btrfs_discard_extent+2+38547+_001665_hash+NULL
85818 +_001666_hash+btrfs_find_create_tree_block+3+55812+_001666_hash+NULL
85819 +_001667_hash+btrfsic_map_block+2+56751+_001667_hash+NULL
85820 +_001668_hash+caif_stream_recvmsg+4+13173+_001668_hash+NULL
85821 +_001669_hash+carl9170_alloc+1+27+_001669_hash+NULL
85822 +_001670_hash+carl9170_debugfs_read+3+47738+_001670_hash+NULL
85823 +_001671_hash+cgroup_read_s64+5+19570+_001671_hash+NULL
85824 +_001672_hash+cgroup_read_u64+5+45532+_001672_hash+NULL
85825 +_001673_hash+channel_type_read+3+47308+_001673_hash+NULL
85826 +_001674_hash+codec_list_read_file+3+24910+_001674_hash+NULL
85827 +_001675_hash+configfs_read_file+3+1683+_001675_hash+NULL
85828 +_001676_hash+cpuset_common_file_read+5+8800+_001676_hash+NULL
85829 +_001677_hash+create_subvol+4+2347+_001677_hash+NULL
85830 +_001678_hash+cx18_copy_mdl_to_user+4+45549+_001678_hash+NULL
85831 +_001679_hash+dai_list_read_file+3+25421+_001679_hash+NULL
85832 +_001680_hash+dapm_bias_read_file+3+64715+_001680_hash+NULL
85833 +_001681_hash+dapm_widget_power_read_file+3+59950+_001754_hash+NULL+nohasharray
85834 +_001684_hash+dbgfs_frame+3+45917+_001684_hash+NULL
85835 +_001685_hash+dbgfs_state+3+38894+_001685_hash+NULL
85836 +_001686_hash+debugfs_read+3+62535+_001686_hash+NULL
85837 +_001687_hash+debug_output+3+18575+_001687_hash+NULL
85838 +_001688_hash+debug_read+3+19322+_001688_hash+NULL
85839 +_001689_hash+dfs_file_read+3+18116+_001689_hash+NULL
85840 +_001690_hash+dma_memcpy_pg_to_iovec+6+1725+_001690_hash+NULL
85841 +_001691_hash+dma_memcpy_to_iovec+5+12173+_001691_hash+NULL
85842 +_001692_hash+dma_rx_errors_read+3+52045+_001692_hash+NULL
85843 +_001693_hash+dma_rx_requested_read+3+65354+_001693_hash+NULL
85844 +_001694_hash+dma_show_regs+3+35266+_001694_hash+NULL
85845 +_001695_hash+dma_tx_errors_read+3+46060+_001695_hash+NULL
85846 +_001696_hash+dma_tx_requested_read+3+16110+_001775_hash+NULL+nohasharray
85847 +_001697_hash+dm_exception_table_init+2+39645+_001697_hash+&_001103_hash
85848 +_001698_hash+dn_recvmsg+4+17213+_001698_hash+NULL
85849 +_001699_hash+dns_resolver_read+3+54658+_001699_hash+NULL
85850 +_001700_hash+do_msgrcv+4+5590+_001700_hash+NULL
85851 +_001701_hash+driver_state_read+3+17194+_001701_hash+&_001394_hash
85852 +_001702_hash+dvb_demux_do_ioctl+3+34871+_001702_hash+NULL
85853 +_001703_hash+dvb_dmxdev_buffer_read+4+20682+_001703_hash+NULL
85854 +_001704_hash+dvb_dvr_do_ioctl+3+43355+_001704_hash+NULL
85855 +_001705_hash+econet_recvmsg+4+40978+_001705_hash+NULL
85856 +_001706_hash+event_calibration_read+3+21083+_001706_hash+NULL
85857 +_001707_hash+event_heart_beat_read+3+48961+_001707_hash+NULL
85858 +_001708_hash+event_oom_late_read+3+61175+_001708_hash+&_001014_hash
85859 +_001709_hash+event_phy_transmit_error_read+3+10471+_001709_hash+NULL
85860 +_001710_hash+event_rx_mem_empty_read+3+40363+_001710_hash+NULL
85861 +_001711_hash+event_rx_mismatch_read+3+38518+_001711_hash+NULL
85862 +_001712_hash+event_rx_pool_read+3+25792+_001712_hash+NULL
85863 +_001713_hash+event_tx_stuck_read+3+19305+_001713_hash+NULL
85864 +_001714_hash+excessive_retries_read+3+60425+_001714_hash+NULL
85865 +_001715_hash+fallback_on_nodma_alloc+2+35332+_001715_hash+NULL
85866 +_001716_hash+filter_read+3+61692+_001716_hash+NULL
85867 +_001717_hash+format_devstat_counter+3+32550+_001717_hash+NULL
85868 +_001718_hash+fragmentation_threshold_read+3+61718+_001718_hash+NULL
85869 +_001719_hash+fuse_conn_limit_read+3+20084+_001719_hash+NULL
85870 +_001720_hash+fuse_conn_waiting_read+3+49762+_001720_hash+NULL
85871 +_001721_hash+generic_readlink+3+32654+_001721_hash+NULL
85872 +_001722_hash+gpio_power_read+3+36059+_001722_hash+NULL
85873 +_001723_hash+hash_recvmsg+4+50924+_001723_hash+NULL
85874 +_001724_hash+ht40allow_map_read+3+55209+_002830_hash+NULL+nohasharray
85875 +_001725_hash+hwflags_read+3+52318+_001725_hash+NULL
85876 +_001726_hash+hysdn_conf_read+3+42324+_003205_hash+NULL+nohasharray
85877 +_001727_hash+i2400m_rx_stats_read+3+57706+_001727_hash+NULL
85878 +_001728_hash+i2400m_tx_stats_read+3+28527+_001728_hash+NULL
85879 +_001729_hash+idmouse_read+3+63374+_001729_hash+NULL
85880 +_001730_hash+ieee80211_if_read+3+6785+_001730_hash+NULL
85881 +_001731_hash+ieee80211_rx_bss_info+3+61630+_001731_hash+NULL
85882 +_001732_hash+ikconfig_read_current+3+1658+_001732_hash+NULL
85883 +_001733_hash+il3945_sta_dbgfs_stats_table_read+3+48802+_001733_hash+NULL
85884 +_001734_hash+il3945_ucode_general_stats_read+3+46111+_001734_hash+NULL
85885 +_001735_hash+il3945_ucode_rx_stats_read+3+3048+_001735_hash+NULL
85886 +_001736_hash+il3945_ucode_tx_stats_read+3+36016+_001736_hash+NULL
85887 +_001737_hash+il4965_rs_sta_dbgfs_rate_scale_data_read+3+37792+_001737_hash+NULL
85888 +_001738_hash+il4965_rs_sta_dbgfs_scale_table_read+3+38564+_001738_hash+NULL
85889 +_001739_hash+il4965_rs_sta_dbgfs_stats_table_read+3+49206+_001739_hash+NULL
85890 +_001740_hash+il4965_ucode_general_stats_read+3+56277+_001740_hash+NULL
85891 +_001741_hash+il4965_ucode_rx_stats_read+3+61948+_001741_hash+NULL
85892 +_001742_hash+il4965_ucode_tx_stats_read+3+12064+_001742_hash+NULL
85893 +_001743_hash+il_dbgfs_chain_noise_read+3+38044+_001743_hash+NULL
85894 +_001744_hash+il_dbgfs_channels_read+3+25005+_001744_hash+NULL
85895 +_001745_hash+il_dbgfs_disable_ht40_read+3+42386+_001745_hash+NULL
85896 +_001746_hash+il_dbgfs_fh_reg_read+3+40993+_001746_hash+NULL
85897 +_001747_hash+il_dbgfs_force_reset_read+3+57517+_001747_hash+NULL
85898 +_001748_hash+il_dbgfs_interrupt_read+3+3351+_001748_hash+NULL
85899 +_001749_hash+il_dbgfs_missed_beacon_read+3+59956+_001749_hash+NULL
85900 +_001750_hash+il_dbgfs_nvm_read+3+12288+_001750_hash+NULL
85901 +_001751_hash+il_dbgfs_power_save_status_read+3+43165+_001751_hash+NULL
85902 +_001752_hash+il_dbgfs_qos_read+3+33615+_001752_hash+NULL
85903 +_001753_hash+il_dbgfs_rxon_filter_flags_read+3+19281+_001753_hash+NULL
85904 +_001754_hash+il_dbgfs_rxon_flags_read+3+59950+_001754_hash+&_001681_hash
85905 +_001755_hash+il_dbgfs_rx_queue_read+3+11221+_001755_hash+NULL
85906 +_001756_hash+il_dbgfs_rx_stats_read+3+15243+_001756_hash+NULL
85907 +_001757_hash+il_dbgfs_sensitivity_read+3+2370+_001757_hash+NULL
85908 +_001758_hash+il_dbgfs_sram_read+3+62296+_001758_hash+NULL
85909 +_001759_hash+il_dbgfs_stations_read+3+21532+_001759_hash+NULL
85910 +_001760_hash+il_dbgfs_status_read+3+58388+_001760_hash+NULL
85911 +_001761_hash+il_dbgfs_tx_queue_read+3+55668+_001761_hash+NULL
85912 +_001762_hash+il_dbgfs_tx_stats_read+3+32913+_001762_hash+NULL
85913 +_001763_hash+ima_show_htable_value+2+57136+_001763_hash+NULL
85914 +_001765_hash+ipw_write+3+59807+_001765_hash+NULL
85915 +_001766_hash+irda_recvmsg_stream+4+35280+_001766_hash+NULL
85916 +_001767_hash+iscsi_tcp_conn_setup+2+16376+_001767_hash+NULL
85917 +_001768_hash+isr_cmd_cmplt_read+3+53439+_001768_hash+NULL
85918 +_001769_hash+isr_commands_read+3+41398+_001769_hash+NULL
85919 +_001770_hash+isr_decrypt_done_read+3+49490+_001770_hash+NULL
85920 +_001771_hash+isr_dma0_done_read+3+8574+_001771_hash+NULL
85921 +_001772_hash+isr_dma1_done_read+3+48159+_001772_hash+NULL
85922 +_001773_hash+isr_fiqs_read+3+34687+_001773_hash+NULL
85923 +_001774_hash+isr_host_acknowledges_read+3+54136+_001774_hash+NULL
85924 +_001775_hash+isr_hw_pm_mode_changes_read+3+16110+_001775_hash+&_001696_hash
85925 +_001776_hash+isr_irqs_read+3+9181+_001776_hash+NULL
85926 +_001777_hash+isr_low_rssi_read+3+64789+_001777_hash+NULL
85927 +_001778_hash+isr_pci_pm_read+3+30271+_001778_hash+NULL
85928 +_001779_hash+isr_rx_headers_read+3+38325+_001779_hash+NULL
85929 +_001780_hash+isr_rx_mem_overflow_read+3+43025+_001780_hash+NULL
85930 +_001781_hash+isr_rx_procs_read+3+31804+_001781_hash+NULL
85931 +_001782_hash+isr_rx_rdys_read+3+35283+_001782_hash+NULL
85932 +_001783_hash+isr_tx_exch_complete_read+3+16103+_001783_hash+NULL
85933 +_001784_hash+isr_tx_procs_read+3+23084+_001784_hash+NULL
85934 +_001785_hash+isr_wakeups_read+3+49607+_001785_hash+NULL
85935 +_001786_hash+ivtv_read+3+57796+_001786_hash+NULL
85936 +_001787_hash+iwl_dbgfs_bt_traffic_read+3+35534+_001787_hash+NULL
85937 +_001788_hash+iwl_dbgfs_chain_noise_read+3+46355+_001788_hash+NULL
85938 +_001789_hash+iwl_dbgfs_channels_read+3+6784+_001789_hash+NULL
85939 +_001790_hash+iwl_dbgfs_current_sleep_command_read+3+2081+_001790_hash+NULL
85940 +_001791_hash+iwl_dbgfs_disable_ht40_read+3+35761+_001791_hash+NULL
85941 +_001792_hash+iwl_dbgfs_fh_reg_read+3+879+_001792_hash+&_000393_hash
85942 +_001793_hash+iwl_dbgfs_force_reset_read+3+62628+_001793_hash+NULL
85943 +_001794_hash+iwl_dbgfs_interrupt_read+3+23574+_001794_hash+NULL
85944 +_001795_hash+iwl_dbgfs_log_event_read+3+2107+_001795_hash+NULL
85945 +_001796_hash+iwl_dbgfs_missed_beacon_read+3+50584+_001796_hash+NULL
85946 +_001797_hash+iwl_dbgfs_nvm_read+3+23845+_001797_hash+NULL
85947 +_001798_hash+iwl_dbgfs_plcp_delta_read+3+55407+_001798_hash+NULL
85948 +_001799_hash+iwl_dbgfs_power_save_status_read+3+54392+_001799_hash+NULL
85949 +_001800_hash+iwl_dbgfs_protection_mode_read+3+13943+_001800_hash+NULL
85950 +_001801_hash+iwl_dbgfs_qos_read+3+11753+_001801_hash+NULL
85951 +_001802_hash+iwl_dbgfs_reply_tx_error_read+3+19205+_001802_hash+NULL
85952 +_001803_hash+iwl_dbgfs_rx_handlers_read+3+18708+_001803_hash+NULL
85953 +_001804_hash+iwl_dbgfs_rxon_filter_flags_read+3+28832+_001804_hash+NULL
85954 +_001805_hash+iwl_dbgfs_rxon_flags_read+3+20795+_001805_hash+NULL
85955 +_001806_hash+iwl_dbgfs_rx_queue_read+3+19943+_001806_hash+NULL
85956 +_001807_hash+iwl_dbgfs_rx_statistics_read+3+62687+_001807_hash+&_000425_hash
85957 +_001808_hash+iwl_dbgfs_sensitivity_read+3+63116+_003026_hash+NULL+nohasharray
85958 +_001809_hash+iwl_dbgfs_sleep_level_override_read+3+3038+_001809_hash+NULL
85959 +_001810_hash+iwl_dbgfs_sram_read+3+44505+_001810_hash+NULL
85960 +_001811_hash+iwl_dbgfs_stations_read+3+9309+_001811_hash+NULL
85961 +_001812_hash+iwl_dbgfs_status_read+3+5171+_001812_hash+NULL
85962 +_001813_hash+iwl_dbgfs_temperature_read+3+29224+_001813_hash+NULL
85963 +_001814_hash+iwl_dbgfs_thermal_throttling_read+3+38779+_001814_hash+NULL
85964 +_001815_hash+iwl_dbgfs_traffic_log_read+3+58870+_001815_hash+NULL
85965 +_001816_hash+iwl_dbgfs_tx_queue_read+3+4635+_001816_hash+NULL
85966 +_001817_hash+iwl_dbgfs_tx_statistics_read+3+314+_001817_hash+NULL
85967 +_001818_hash+iwl_dbgfs_ucode_bt_stats_read+3+42820+_001818_hash+NULL
85968 +_001819_hash+iwl_dbgfs_ucode_general_stats_read+3+49199+_001819_hash+NULL
85969 +_001820_hash+iwl_dbgfs_ucode_rx_stats_read+3+58023+_001820_hash+NULL
85970 +_001821_hash+iwl_dbgfs_ucode_tracing_read+3+47983+_001821_hash+&_000349_hash
85971 +_001822_hash+iwl_dbgfs_ucode_tx_stats_read+3+31611+_001822_hash+NULL
85972 +_001823_hash+iwl_dbgfs_wowlan_sram_read+3+540+_001823_hash+NULL
85973 +_001824_hash+iwm_if_alloc+1+17027+_001824_hash+&_001314_hash
85974 +_001825_hash+kernel_readv+3+35617+_001825_hash+NULL
85975 +_001826_hash+key_algorithm_read+3+57946+_001826_hash+NULL
85976 +_001827_hash+key_icverrors_read+3+20895+_001827_hash+NULL
85977 +_001828_hash+key_key_read+3+3241+_001828_hash+NULL
85978 +_001829_hash+key_replays_read+3+62746+_001829_hash+NULL
85979 +_001830_hash+key_rx_spec_read+3+12736+_001830_hash+NULL
85980 +_001831_hash+key_tx_spec_read+3+4862+_001831_hash+NULL
85981 +_001832_hash+__kfifo_to_user+3+36555+_002199_hash+NULL+nohasharray
85982 +_001833_hash+__kfifo_to_user_r+3+39123+_001833_hash+NULL
85983 +_001834_hash+kmem_zalloc_greedy+2-3+65268+_001834_hash+NULL
85984 +_001836_hash+l2cap_chan_send+3+49995+_001836_hash+NULL
85985 +_001837_hash+l2cap_sar_segment_sdu+3+27701+_001837_hash+NULL
85986 +_001838_hash+lbs_debugfs_read+3+30721+_001838_hash+NULL
85987 +_001839_hash+lbs_dev_info+3+51023+_001839_hash+NULL
85988 +_001840_hash+lbs_host_sleep_read+3+31013+_001840_hash+NULL
85989 +_001841_hash+lbs_rdbbp_read+3+45805+_001841_hash+NULL
85990 +_001842_hash+lbs_rdmac_read+3+418+_001842_hash+NULL
85991 +_001843_hash+lbs_rdrf_read+3+41431+_001843_hash+NULL
85992 +_001844_hash+lbs_sleepparams_read+3+10840+_001844_hash+NULL
85993 +_001845_hash+lbs_threshold_read+5+21046+_001845_hash+NULL
85994 +_001846_hash+libfc_vport_create+2+4415+_001846_hash+NULL
85995 +_001847_hash+lkdtm_debugfs_read+3+45752+_001847_hash+NULL
85996 +_001848_hash+llcp_sock_recvmsg+4+13556+_001848_hash+NULL
85997 +_001849_hash+long_retry_limit_read+3+59766+_001849_hash+NULL
85998 +_001850_hash+lpfc_debugfs_dif_err_read+3+36303+_001850_hash+NULL
85999 +_001851_hash+lpfc_debugfs_read+3+16566+_001851_hash+NULL
86000 +_001852_hash+lpfc_idiag_baracc_read+3+58466+_002447_hash+NULL+nohasharray
86001 +_001853_hash+lpfc_idiag_ctlacc_read+3+33943+_001853_hash+NULL
86002 +_001854_hash+lpfc_idiag_drbacc_read+3+15948+_001854_hash+NULL
86003 +_001855_hash+lpfc_idiag_extacc_read+3+48301+_001855_hash+NULL
86004 +_001856_hash+lpfc_idiag_mbxacc_read+3+28061+_001856_hash+NULL
86005 +_001857_hash+lpfc_idiag_pcicfg_read+3+50334+_001857_hash+NULL
86006 +_001858_hash+lpfc_idiag_queacc_read+3+13950+_001858_hash+NULL
86007 +_001859_hash+lpfc_idiag_queinfo_read+3+55662+_001859_hash+NULL
86008 +_001860_hash+mac80211_format_buffer+2+41010+_001860_hash+NULL
86009 +_001861_hash+macvtap_put_user+4+55609+_001861_hash+NULL
86010 +_001862_hash+macvtap_sendmsg+4+30629+_001862_hash+NULL
86011 +_001863_hash+mic_calc_failure_read+3+59700+_001863_hash+NULL
86012 +_001864_hash+mic_rx_pkts_read+3+27972+_001864_hash+NULL
86013 +_001865_hash+minstrel_stats_read+3+17290+_001865_hash+NULL
86014 +_001866_hash+mmc_ext_csd_read+3+13205+_001866_hash+NULL
86015 +_001867_hash+mon_bin_read+3+6841+_001867_hash+NULL
86016 +_001868_hash+mon_stat_read+3+25238+_001868_hash+NULL
86017 +_001870_hash+mqueue_read_file+3+6228+_001870_hash+NULL
86018 +_001871_hash+mwifiex_debug_read+3+53074+_001871_hash+NULL
86019 +_001872_hash+mwifiex_getlog_read+3+54269+_001872_hash+NULL
86020 +_001873_hash+mwifiex_info_read+3+53447+_001873_hash+NULL
86021 +_001874_hash+mwifiex_rdeeprom_read+3+51429+_001874_hash+NULL
86022 +_001875_hash+mwifiex_regrdwr_read+3+34472+_001875_hash+NULL
86023 +_001876_hash+nfsd_vfs_read+6+62605+_003003_hash+NULL+nohasharray
86024 +_001877_hash+nfsd_vfs_write+6+54577+_001877_hash+NULL
86025 +_001878_hash+nfs_idmap_lookup_id+2+10660+_001878_hash+NULL
86026 +_001879_hash+o2hb_debug_read+3+37851+_001879_hash+NULL
86027 +_001880_hash+o2net_debug_read+3+52105+_001880_hash+NULL
86028 +_001881_hash+ocfs2_control_read+3+56405+_001881_hash+NULL
86029 +_001882_hash+ocfs2_debug_read+3+14507+_001882_hash+NULL
86030 +_001883_hash+ocfs2_readlink+3+50656+_001883_hash+NULL
86031 +_001884_hash+oom_adjust_read+3+25127+_001884_hash+NULL
86032 +_001885_hash+oom_score_adj_read+3+39921+_002116_hash+NULL+nohasharray
86033 +_001886_hash+oprofilefs_str_to_user+3+42182+_001886_hash+NULL
86034 +_001887_hash+oprofilefs_ulong_to_user+3+11582+_001887_hash+NULL
86035 +_001888_hash+_osd_req_list_objects+6+4204+_001888_hash+NULL
86036 +_001889_hash+osd_req_read_kern+5+59990+_001889_hash+NULL
86037 +_001890_hash+osd_req_write_kern+5+53486+_001890_hash+NULL
86038 +_001891_hash+p54_init_common+1+23850+_001891_hash+NULL
86039 +_001892_hash+packet_sendmsg+4+24954+_001892_hash+NULL
86040 +_001893_hash+page_readlink+3+23346+_001893_hash+NULL
86041 +_001894_hash+pcf50633_write_block+3+2124+_001894_hash+NULL
86042 +_001895_hash+platform_list_read_file+3+34734+_001895_hash+NULL
86043 +_001896_hash+pm860x_bulk_write+3+43875+_001896_hash+NULL
86044 +_001897_hash+pm_qos_power_read+3+55891+_001897_hash+NULL
86045 +_001898_hash+pms_read+3+53873+_001898_hash+NULL
86046 +_001899_hash+port_show_regs+3+5904+_001899_hash+NULL
86047 +_001900_hash+proc_coredump_filter_read+3+39153+_001900_hash+NULL
86048 +_001901_hash+proc_fdinfo_read+3+62043+_001901_hash+NULL
86049 +_001902_hash+proc_info_read+3+63344+_001902_hash+NULL
86050 +_001903_hash+proc_loginuid_read+3+15631+_001903_hash+NULL
86051 +_001904_hash+proc_pid_attr_read+3+10173+_001904_hash+NULL
86052 +_001905_hash+proc_pid_readlink+3+52186+_001905_hash+NULL
86053 +_001906_hash+proc_read+3+43614+_001906_hash+NULL
86054 +_001907_hash+proc_self_readlink+3+38094+_001907_hash+NULL
86055 +_001908_hash+proc_sessionid_read+3+6911+_002038_hash+NULL+nohasharray
86056 +_001909_hash+provide_user_output+3+41105+_001909_hash+NULL
86057 +_001910_hash+ps_pspoll_max_apturn_read+3+6699+_001910_hash+NULL
86058 +_001911_hash+ps_pspoll_timeouts_read+3+11776+_001911_hash+NULL
86059 +_001912_hash+ps_pspoll_utilization_read+3+5361+_001912_hash+NULL
86060 +_001913_hash+pstore_file_read+3+57288+_001913_hash+NULL
86061 +_001914_hash+ps_upsd_max_apturn_read+3+19918+_001914_hash+NULL
86062 +_001915_hash+ps_upsd_max_sptime_read+3+63362+_001915_hash+NULL
86063 +_001916_hash+ps_upsd_timeouts_read+3+28924+_001916_hash+NULL
86064 +_001917_hash+ps_upsd_utilization_read+3+51669+_001917_hash+NULL
86065 +_001918_hash+pvr2_v4l2_read+3+18006+_001918_hash+NULL
86066 +_001919_hash+pwr_disable_ps_read+3+13176+_001919_hash+NULL
86067 +_001920_hash+pwr_elp_enter_read+3+5324+_001920_hash+NULL
86068 +_001921_hash+pwr_enable_ps_read+3+17686+_001921_hash+NULL
86069 +_001922_hash+pwr_fix_tsf_ps_read+3+26627+_001922_hash+NULL
86070 +_001923_hash+pwr_missing_bcns_read+3+25824+_001923_hash+NULL
86071 +_001924_hash+pwr_power_save_off_read+3+18355+_001924_hash+NULL
86072 +_001925_hash+pwr_ps_enter_read+3+26935+_001925_hash+&_000501_hash
86073 +_001926_hash+pwr_rcvd_awake_beacons_read+3+50505+_001926_hash+NULL
86074 +_001927_hash+pwr_rcvd_beacons_read+3+52836+_001927_hash+NULL
86075 +_001928_hash+pwr_tx_without_ps_read+3+48423+_001928_hash+NULL
86076 +_001929_hash+pwr_tx_with_ps_read+3+60851+_001929_hash+NULL
86077 +_001930_hash+pwr_wake_on_host_read+3+26321+_001930_hash+NULL
86078 +_001931_hash+pwr_wake_on_timer_exp_read+3+22640+_001931_hash+NULL
86079 +_001932_hash+queues_read+3+24877+_001932_hash+NULL
86080 +_001933_hash+raw_recvmsg+4+17277+_001933_hash+NULL
86081 +_001934_hash+rcname_read+3+25919+_001934_hash+NULL
86082 +_001935_hash+read_4k_modal_eeprom+3+30212+_001935_hash+NULL
86083 +_001936_hash+read_9287_modal_eeprom+3+59327+_001936_hash+NULL
86084 +_001937_hash+reada_find_extent+2+63486+_001937_hash+NULL
86085 +_001938_hash+read_def_modal_eeprom+3+14041+_001938_hash+NULL
86086 +_001939_hash+read_enabled_file_bool+3+37744+_001939_hash+NULL
86087 +_001940_hash+read_file_ani+3+23161+_001940_hash+NULL
86088 +_001941_hash+read_file_antenna+3+13574+_001941_hash+NULL
86089 +_001942_hash+read_file_base_eeprom+3+42168+_001942_hash+NULL
86090 +_001943_hash+read_file_beacon+3+32595+_001943_hash+NULL
86091 +_001944_hash+read_file_blob+3+57406+_001944_hash+NULL
86092 +_001945_hash+read_file_bool+3+4180+_001945_hash+NULL
86093 +_001946_hash+read_file_credit_dist_stats+3+54367+_001946_hash+NULL
86094 +_001947_hash+read_file_debug+3+58256+_001947_hash+NULL
86095 +_001948_hash+read_file_disable_ani+3+6536+_001948_hash+NULL
86096 +_001949_hash+read_file_dma+3+9530+_001949_hash+NULL
86097 +_001950_hash+read_file_dump_nfcal+3+18766+_001950_hash+NULL
86098 +_001951_hash+read_file_frameerrors+3+64001+_001951_hash+NULL
86099 +_001952_hash+read_file_interrupt+3+61742+_001959_hash+NULL+nohasharray
86100 +_001953_hash+read_file_misc+3+9948+_001953_hash+NULL
86101 +_001954_hash+read_file_modal_eeprom+3+39909+_001954_hash+NULL
86102 +_001955_hash+read_file_queue+3+40895+_001955_hash+NULL
86103 +_001956_hash+read_file_rcstat+3+22854+_001956_hash+NULL
86104 +_001957_hash+read_file_recv+3+48232+_001957_hash+NULL
86105 +_001958_hash+read_file_regidx+3+33370+_001958_hash+NULL
86106 +_001959_hash+read_file_regval+3+61742+_001959_hash+&_001952_hash
86107 +_001960_hash+read_file_reset+3+52310+_001960_hash+NULL
86108 +_001961_hash+read_file_rx_chainmask+3+41605+_001961_hash+NULL
86109 +_001962_hash+read_file_slot+3+50111+_001962_hash+NULL
86110 +_001963_hash+read_file_stations+3+35795+_001963_hash+NULL
86111 +_001964_hash+read_file_tgt_int_stats+3+20697+_001964_hash+NULL
86112 +_001965_hash+read_file_tgt_rx_stats+3+33944+_001965_hash+NULL
86113 +_001966_hash+read_file_tgt_stats+3+8959+_001966_hash+NULL
86114 +_001967_hash+read_file_tgt_tx_stats+3+51847+_001967_hash+NULL
86115 +_001968_hash+read_file_tx_chainmask+3+3829+_001968_hash+NULL
86116 +_001969_hash+read_file_war_stats+3+292+_001969_hash+NULL
86117 +_001970_hash+read_file_xmit+3+21487+_001970_hash+NULL
86118 +_001971_hash+read_from_oldmem+2+3337+_001971_hash+NULL
86119 +_001972_hash+read_oldmem+3+55658+_001972_hash+NULL
86120 +_001973_hash+regmap_name_read_file+3+39379+_001973_hash+NULL
86121 +_001974_hash+repair_io_failure+4+4815+_001974_hash+NULL
86122 +_001975_hash+request_key_and_link+4+42693+_001975_hash+NULL
86123 +_001976_hash+res_counter_read+4+33499+_001976_hash+NULL
86124 +_001977_hash+retry_count_read+3+52129+_001977_hash+NULL
86125 +_001978_hash+rs_sta_dbgfs_rate_scale_data_read+3+47165+_001978_hash+NULL
86126 +_001979_hash+rs_sta_dbgfs_scale_table_read+3+40262+_001979_hash+NULL
86127 +_001980_hash+rs_sta_dbgfs_stats_table_read+3+56573+_001980_hash+NULL
86128 +_001981_hash+rts_threshold_read+3+44384+_001981_hash+NULL
86129 +_001982_hash+rx_dropped_read+3+44799+_001982_hash+NULL
86130 +_001983_hash+rx_fcs_err_read+3+62844+_001983_hash+NULL
86131 +_001984_hash+rx_hdr_overflow_read+3+64407+_001984_hash+NULL
86132 +_001985_hash+rx_hw_stuck_read+3+57179+_001985_hash+NULL
86133 +_001986_hash+rx_out_of_mem_read+3+10157+_001986_hash+NULL
86134 +_001987_hash+rx_path_reset_read+3+23801+_001987_hash+NULL
86135 +_001988_hash+rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read+3+55106+_001988_hash+NULL
86136 +_001989_hash+rxpipe_descr_host_int_trig_rx_data_read+3+22001+_003089_hash+NULL+nohasharray
86137 +_001990_hash+rxpipe_missed_beacon_host_int_trig_rx_data_read+3+63405+_001990_hash+NULL
86138 +_001991_hash+rxpipe_rx_prep_beacon_drop_read+3+2403+_001991_hash+NULL
86139 +_001992_hash+rxpipe_tx_xfr_host_int_trig_rx_data_read+3+35538+_001992_hash+NULL
86140 +_001993_hash+rx_reset_counter_read+3+58001+_001993_hash+NULL
86141 +_001994_hash+rx_xfr_hint_trig_read+3+40283+_001994_hash+NULL
86142 +_001995_hash+s5m_bulk_write+3+4833+_001995_hash+NULL
86143 +_001996_hash+scrub_setup_recheck_block+3-4+56245+_001996_hash+NULL
86144 +_001998_hash+scsi_adjust_queue_depth+3+12802+_001998_hash+NULL
86145 +_001999_hash+selinux_inode_notifysecctx+3+36896+_001999_hash+NULL
86146 +_002000_hash+sel_read_avc_cache_threshold+3+33942+_002000_hash+NULL
86147 +_002001_hash+sel_read_avc_hash_stats+3+1984+_002001_hash+NULL
86148 +_002002_hash+sel_read_bool+3+24236+_002002_hash+NULL
86149 +_002003_hash+sel_read_checkreqprot+3+33068+_002003_hash+NULL
86150 +_002004_hash+sel_read_class+3+12669+_002541_hash+NULL+nohasharray
86151 +_002005_hash+sel_read_enforce+3+2828+_002005_hash+NULL
86152 +_002006_hash+sel_read_handle_status+3+56139+_002006_hash+NULL
86153 +_002007_hash+sel_read_handle_unknown+3+57933+_002007_hash+NULL
86154 +_002008_hash+sel_read_initcon+3+32362+_002008_hash+NULL
86155 +_002009_hash+sel_read_mls+3+25369+_002009_hash+NULL
86156 +_002010_hash+sel_read_perm+3+42302+_002010_hash+NULL
86157 +_002011_hash+sel_read_policy+3+55947+_002011_hash+NULL
86158 +_002012_hash+sel_read_policycap+3+28544+_002012_hash+NULL
86159 +_002013_hash+sel_read_policyvers+3+55+_003257_hash+NULL+nohasharray
86160 +_002014_hash+send_msg+4+37323+_002014_hash+NULL
86161 +_002015_hash+send_packet+4+52960+_002015_hash+NULL
86162 +_002016_hash+short_retry_limit_read+3+4687+_002016_hash+NULL
86163 +_002017_hash+simple_attr_read+3+24738+_002017_hash+NULL
86164 +_002018_hash+simple_transaction_read+3+17076+_002018_hash+NULL
86165 +_002019_hash+skb_copy_datagram_const_iovec+2-5-4+48102+_002019_hash+NULL
86166 +_002022_hash+skb_copy_datagram_iovec+2-4+5806+_002022_hash+NULL
86167 +_002024_hash+smk_read_ambient+3+61220+_002024_hash+NULL
86168 +_002025_hash+smk_read_direct+3+15803+_002025_hash+NULL
86169 +_002026_hash+smk_read_doi+3+30813+_002026_hash+NULL
86170 +_002027_hash+smk_read_logging+3+37804+_002027_hash+NULL
86171 +_002028_hash+smk_read_onlycap+3+3855+_002028_hash+NULL
86172 +_002029_hash+snapshot_read+3+22601+_002029_hash+NULL
86173 +_002030_hash+snd_cs4281_BA0_read+5+6847+_002030_hash+NULL
86174 +_002031_hash+snd_cs4281_BA1_read+5+20323+_002031_hash+NULL
86175 +_002032_hash+snd_cs46xx_io_read+5+45734+_002032_hash+NULL
86176 +_002033_hash+snd_gus_dram_read+4+56686+_002033_hash+NULL
86177 +_002034_hash+snd_pcm_oss_read+3+28317+_002034_hash+NULL
86178 +_002035_hash+snd_rme32_capture_copy+5+39653+_002035_hash+NULL
86179 +_002036_hash+snd_rme96_capture_copy+5+58484+_002036_hash+NULL
86180 +_002037_hash+snd_soc_hw_bulk_write_raw+4+14245+_002037_hash+NULL
86181 +_002038_hash+spi_show_regs+3+6911+_002038_hash+&_001908_hash
86182 +_002039_hash+sta_agg_status_read+3+14058+_002039_hash+NULL
86183 +_002040_hash+sta_connected_time_read+3+17435+_002040_hash+NULL
86184 +_002041_hash+sta_flags_read+3+56710+_002041_hash+NULL
86185 +_002042_hash+sta_ht_capa_read+3+10366+_002042_hash+NULL
86186 +_002043_hash+sta_last_seq_ctrl_read+3+19106+_002043_hash+NULL
86187 +_002044_hash+sta_num_ps_buf_frames_read+3+1488+_002044_hash+NULL
86188 +_002045_hash+st_read+3+51251+_002045_hash+NULL
86189 +_002046_hash+supply_map_read_file+3+10608+_002046_hash+NULL
86190 +_002047_hash+sysfs_read_file+3+42113+_002047_hash+NULL
86191 +_002048_hash+sys_lgetxattr+4+45531+_002048_hash+NULL
86192 +_002049_hash+sys_preadv+3+17100+_002049_hash+NULL
86193 +_002050_hash+sys_pwritev+3+41722+_002050_hash+NULL
86194 +_002051_hash+sys_readv+3+50664+_002051_hash+NULL
86195 +_002052_hash+sys_rt_sigpending+2+24961+_002052_hash+NULL
86196 +_002053_hash+sys_writev+3+28384+_002053_hash+NULL
86197 +_002054_hash+test_iso_queue+5+62534+_002054_hash+NULL
86198 +_002055_hash+ts_read+3+44687+_002055_hash+NULL
86199 +_002056_hash+TSS_authhmac+3+12839+_002056_hash+NULL
86200 +_002057_hash+TSS_checkhmac1+5+31429+_002057_hash+NULL
86201 +_002058_hash+TSS_checkhmac2+5-7+40520+_002058_hash+NULL
86202 +_002060_hash+tt3650_ci_msg_locked+4+8013+_002060_hash+NULL
86203 +_002061_hash+tun_sendmsg+4+10337+_002061_hash+NULL
86204 +_002062_hash+tx_internal_desc_overflow_read+3+47300+_002062_hash+NULL
86205 +_002063_hash+tx_queue_len_read+3+1463+_002063_hash+NULL
86206 +_002064_hash+tx_queue_status_read+3+44978+_002064_hash+NULL
86207 +_002065_hash+ubi_io_write_data+4-5+40305+_002065_hash+NULL
86208 +_002067_hash+uhci_debug_read+3+5911+_002067_hash+NULL
86209 +_002068_hash+unix_stream_recvmsg+4+35210+_002068_hash+NULL
86210 +_002069_hash+uvc_debugfs_stats_read+3+56651+_002069_hash+NULL
86211 +_002070_hash+vhost_add_used_and_signal_n+4+8038+_002070_hash+NULL
86212 +_002071_hash+vifs_state_read+3+33762+_002071_hash+NULL
86213 +_002072_hash+vmbus_open+2-3+12154+_002072_hash+NULL
86214 +_002074_hash+waiters_read+3+40902+_002074_hash+NULL
86215 +_002075_hash+wep_addr_key_count_read+3+20174+_002075_hash+NULL
86216 +_002076_hash+wep_decrypt_fail_read+3+58567+_002076_hash+NULL
86217 +_002077_hash+wep_default_key_count_read+3+43035+_002077_hash+NULL
86218 +_002078_hash+wep_interrupt_read+3+41492+_002078_hash+NULL
86219 +_002079_hash+wep_key_not_found_read+3+13377+_002079_hash+&_000915_hash
86220 +_002080_hash+wep_packets_read+3+18751+_002080_hash+NULL
86221 +_002081_hash+wl1271_format_buffer+2+20834+_002081_hash+NULL
86222 +_002082_hash+wm8994_bulk_write+3+13615+_002082_hash+NULL
86223 +_002083_hash+wusb_prf_256+7+29203+_002083_hash+NULL
86224 +_002084_hash+wusb_prf_64+7+51065+_002084_hash+NULL
86225 +_002085_hash+xfs_buf_read_uncached+4+27519+_002085_hash+NULL
86226 +_002086_hash+xfs_iext_add+3+41422+_002086_hash+NULL
86227 +_002087_hash+xfs_iext_remove_direct+3+40744+_002087_hash+NULL
86228 +_002088_hash+xfs_trans_get_efd+3+51148+_002088_hash+NULL
86229 +_002089_hash+xfs_trans_get_efi+2+7898+_002089_hash+NULL
86230 +_002090_hash+xlog_get_bp+2+23229+_002090_hash+NULL
86231 +_002091_hash+xz_dec_init+2+29029+_002091_hash+NULL
86232 +_002092_hash+aac_change_queue_depth+2+825+_002092_hash+NULL
86233 +_002093_hash+agp_allocate_memory_wrap+1+16576+_002093_hash+NULL
86234 +_002094_hash+arcmsr_adjust_disk_queue_depth+2+16756+_002094_hash+NULL
86235 +_002095_hash+atalk_recvmsg+4+22053+_002095_hash+NULL
86236 +_002097_hash+atomic_read_file+3+16227+_002097_hash+NULL
86237 +_002098_hash+ax25_recvmsg+4+64441+_002098_hash+NULL
86238 +_002099_hash+beacon_interval_read+3+7091+_002099_hash+NULL
86239 +_002100_hash+btrfs_init_new_buffer+4+55761+_002100_hash+NULL
86240 +_002101_hash+btrfs_mksubvol+3+39479+_002101_hash+NULL
86241 +_002102_hash+bt_sock_recvmsg+4+12316+_002102_hash+NULL
86242 +_002103_hash+bt_sock_stream_recvmsg+4+52518+_002103_hash+NULL
86243 +_002104_hash+caif_seqpkt_recvmsg+4+32241+_002104_hash+NULL
86244 +_002105_hash+cpu_type_read+3+36540+_002105_hash+NULL
86245 +_002106_hash+cx18_read+3+23699+_002106_hash+NULL
86246 +_002107_hash+dccp_recvmsg+4+16056+_002107_hash+NULL
86247 +_002108_hash+depth_read+3+31112+_002108_hash+NULL
86248 +_002109_hash+dfs_global_file_read+3+7787+_002109_hash+NULL
86249 +_002110_hash+dgram_recvmsg+4+23104+_002110_hash+NULL
86250 +_002111_hash+dma_skb_copy_datagram_iovec+3-5+21516+_002111_hash+NULL
86251 +_002113_hash+dtim_interval_read+3+654+_002113_hash+NULL
86252 +_002114_hash+dynamic_ps_timeout_read+3+10110+_002114_hash+NULL
86253 +_002115_hash+enable_read+3+2117+_002115_hash+NULL
86254 +_002116_hash+exofs_read_kern+6+39921+_002116_hash+&_001885_hash
86255 +_002117_hash+fc_change_queue_depth+2+36841+_002117_hash+NULL
86256 +_002118_hash+forced_ps_read+3+31685+_002118_hash+NULL
86257 +_002119_hash+frequency_read+3+64031+_003106_hash+NULL+nohasharray
86258 +_002120_hash+get_alua_req+3+4166+_002120_hash+NULL
86259 +_002121_hash+get_rdac_req+3+45882+_002121_hash+NULL
86260 +_002122_hash+hci_sock_recvmsg+4+7072+_002122_hash+NULL
86261 +_002123_hash+hpsa_change_queue_depth+2+15449+_002123_hash+NULL
86262 +_002124_hash+hptiop_adjust_disk_queue_depth+2+20122+_002124_hash+NULL
86263 +_002125_hash+ide_queue_pc_tail+5+11673+_002125_hash+NULL
86264 +_002126_hash+ide_raw_taskfile+4+42355+_002126_hash+NULL
86265 +_002127_hash+idetape_queue_rw_tail+3+29562+_002127_hash+NULL
86266 +_002128_hash+ieee80211_if_read_aid+3+9705+_002128_hash+NULL
86267 +_002129_hash+ieee80211_if_read_auto_open_plinks+3+38268+_002129_hash+NULL
86268 +_002130_hash+ieee80211_if_read_ave_beacon+3+64924+_002130_hash+NULL
86269 +_002131_hash+ieee80211_if_read_bssid+3+35161+_002131_hash+NULL
86270 +_002132_hash+ieee80211_if_read_channel_type+3+23884+_002132_hash+NULL
86271 +_002133_hash+ieee80211_if_read_dot11MeshConfirmTimeout+3+60670+_002133_hash+NULL
86272 +_002134_hash+ieee80211_if_read_dot11MeshGateAnnouncementProtocol+3+14486+_002134_hash+NULL
86273 +_002135_hash+ieee80211_if_read_dot11MeshHoldingTimeout+3+47356+_002135_hash+NULL
86274 +_002136_hash+ieee80211_if_read_dot11MeshHWMPactivePathTimeout+3+7368+_002136_hash+NULL
86275 +_002137_hash+ieee80211_if_read_dot11MeshHWMPmaxPREQretries+3+59829+_002137_hash+NULL
86276 +_002138_hash+ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime+3+1589+_002138_hash+NULL
86277 +_002139_hash+ieee80211_if_read_dot11MeshHWMPperrMinInterval+3+17346+_002139_hash+NULL
86278 +_002140_hash+ieee80211_if_read_dot11MeshHWMPpreqMinInterval+3+24208+_002140_hash+NULL
86279 +_002141_hash+ieee80211_if_read_dot11MeshHWMPRannInterval+3+2249+_002141_hash+NULL
86280 +_002142_hash+ieee80211_if_read_dot11MeshHWMPRootMode+3+51441+_002142_hash+NULL
86281 +_002143_hash+ieee80211_if_read_dot11MeshMaxPeerLinks+3+23878+_002143_hash+NULL
86282 +_002144_hash+ieee80211_if_read_dot11MeshMaxRetries+3+12756+_002144_hash+NULL
86283 +_002145_hash+ieee80211_if_read_dot11MeshRetryTimeout+3+52168+_002145_hash+NULL
86284 +_002146_hash+ieee80211_if_read_dot11MeshTTL+3+58307+_002146_hash+NULL
86285 +_002147_hash+ieee80211_if_read_dropped_frames_congestion+3+32603+_002147_hash+NULL
86286 +_002148_hash+ieee80211_if_read_dropped_frames_no_route+3+33383+_002148_hash+NULL
86287 +_002149_hash+ieee80211_if_read_dropped_frames_ttl+3+44500+_002149_hash+NULL
86288 +_002150_hash+ieee80211_if_read_drop_unencrypted+3+37053+_002150_hash+NULL
86289 +_002151_hash+ieee80211_if_read_dtim_count+3+38419+_002151_hash+NULL
86290 +_002152_hash+ieee80211_if_read_element_ttl+3+18869+_002152_hash+NULL
86291 +_002153_hash+ieee80211_if_read_estab_plinks+3+32533+_002153_hash+NULL
86292 +_002154_hash+ieee80211_if_read_flags+3+57470+_002389_hash+NULL+nohasharray
86293 +_002155_hash+ieee80211_if_read_fwded_frames+3+36520+_002155_hash+NULL
86294 +_002156_hash+ieee80211_if_read_fwded_mcast+3+39571+_002156_hash+&_000151_hash
86295 +_002157_hash+ieee80211_if_read_fwded_unicast+3+59740+_002859_hash+NULL+nohasharray
86296 +_002158_hash+ieee80211_if_read_last_beacon+3+31257+_002158_hash+NULL
86297 +_002159_hash+ieee80211_if_read_min_discovery_timeout+3+13946+_002159_hash+NULL
86298 +_002160_hash+ieee80211_if_read_num_buffered_multicast+3+12716+_002160_hash+NULL
86299 +_002161_hash+ieee80211_if_read_num_sta_authorized+3+56177+_002161_hash+NULL
86300 +_002162_hash+ieee80211_if_read_num_sta_ps+3+34722+_002162_hash+NULL
86301 +_002163_hash+ieee80211_if_read_path_refresh_time+3+25545+_002163_hash+NULL
86302 +_002164_hash+ieee80211_if_read_peer+3+45233+_002164_hash+NULL
86303 +_002165_hash+ieee80211_if_read_rc_rateidx_mask_2ghz+3+61570+_002165_hash+NULL
86304 +_002166_hash+ieee80211_if_read_rc_rateidx_mask_5ghz+3+27183+_002166_hash+NULL
86305 +_002167_hash+ieee80211_if_read_rc_rateidx_mcs_mask_2ghz+3+37675+_002167_hash+NULL
86306 +_002168_hash+ieee80211_if_read_rc_rateidx_mcs_mask_5ghz+3+44423+_002168_hash+NULL
86307 +_002169_hash+ieee80211_if_read_rssi_threshold+3+49260+_002169_hash+NULL
86308 +_002170_hash+ieee80211_if_read_smps+3+27416+_002170_hash+NULL
86309 +_002171_hash+ieee80211_if_read_state+3+9813+_002280_hash+NULL+nohasharray
86310 +_002172_hash+ieee80211_if_read_tkip_mic_test+3+19565+_002172_hash+NULL
86311 +_002173_hash+ieee80211_if_read_tsf+3+16420+_002173_hash+NULL
86312 +_002174_hash+ieee80211_if_read_uapsd_max_sp_len+3+15067+_002174_hash+NULL
86313 +_002175_hash+ieee80211_if_read_uapsd_queues+3+55150+_002175_hash+NULL
86314 +_002176_hash+ieee80211_rx_mgmt_beacon+3+24430+_002176_hash+NULL
86315 +_002177_hash+ieee80211_rx_mgmt_probe_resp+3+6918+_002177_hash+NULL
86316 +_002178_hash+ima_show_htable_violations+3+10619+_002178_hash+NULL
86317 +_002179_hash+ima_show_measurements_count+3+23536+_002179_hash+NULL
86318 +_002180_hash+insert_one_name+7+61668+_002180_hash+NULL
86319 +_002181_hash+ipr_change_queue_depth+2+6431+_002181_hash+NULL
86320 +_002182_hash+ip_recv_error+3+23109+_002182_hash+NULL
86321 +_002183_hash+ipv6_recv_error+3+56347+_002183_hash+NULL
86322 +_002184_hash+ipv6_recv_rxpmtu+3+7142+_002184_hash+NULL
86323 +_002185_hash+ipx_recvmsg+4+44366+_002185_hash+NULL
86324 +_002186_hash+irda_recvmsg_dgram+4+32631+_002186_hash+NULL
86325 +_002187_hash+iscsi_change_queue_depth+2+23416+_002187_hash+NULL
86326 +_002188_hash+ivtv_read_pos+3+34400+_002188_hash+&_000303_hash
86327 +_002189_hash+key_conf_hw_key_idx_read+3+25003+_002189_hash+NULL
86328 +_002190_hash+key_conf_keyidx_read+3+42443+_002190_hash+NULL
86329 +_002191_hash+key_conf_keylen_read+3+49758+_002191_hash+NULL
86330 +_002192_hash+key_flags_read+3+25931+_002192_hash+NULL
86331 +_002193_hash+key_ifindex_read+3+31411+_002193_hash+NULL
86332 +_002194_hash+key_tx_rx_count_read+3+44742+_002194_hash+NULL
86333 +_002195_hash+l2cap_sock_sendmsg+4+63427+_002195_hash+NULL
86334 +_002196_hash+l2tp_ip_recvmsg+4+22681+_002196_hash+NULL
86335 +_002197_hash+llc_ui_recvmsg+4+3826+_002197_hash+NULL
86336 +_002198_hash+lpfc_change_queue_depth+2+25905+_002198_hash+NULL
86337 +_002199_hash+macvtap_do_read+4+36555+_002199_hash+&_001832_hash
86338 +_002200_hash+megaraid_change_queue_depth+2+64815+_002200_hash+NULL
86339 +_002201_hash+megasas_change_queue_depth+2+32747+_002201_hash+NULL
86340 +_002202_hash+mptscsih_change_queue_depth+2+26036+_002202_hash+NULL
86341 +_002203_hash+NCR_700_change_queue_depth+2+31742+_002203_hash+NULL
86342 +_002204_hash+netlink_recvmsg+4+61600+_002204_hash+NULL
86343 +_002205_hash+nfsctl_transaction_read+3+48250+_002205_hash+NULL
86344 +_002206_hash+nfs_map_group_to_gid+3+15892+_002206_hash+NULL
86345 +_002207_hash+nfs_map_name_to_uid+3+51132+_002207_hash+NULL
86346 +_002208_hash+nr_recvmsg+4+12649+_002208_hash+NULL
86347 +_002209_hash+osd_req_list_collection_objects+5+36664+_002209_hash+NULL
86348 +_002210_hash+osd_req_list_partition_objects+5+56464+_002210_hash+NULL
86349 +_002212_hash+packet_recv_error+3+16669+_002212_hash+NULL
86350 +_002213_hash+packet_recvmsg+4+47700+_002213_hash+NULL
86351 +_002214_hash+pep_recvmsg+4+19402+_002214_hash+NULL
86352 +_002215_hash+pfkey_recvmsg+4+53604+_002215_hash+NULL
86353 +_002216_hash+ping_recvmsg+4+25597+_002216_hash+NULL
86354 +_002217_hash+pmcraid_change_queue_depth+2+9116+_002217_hash+NULL
86355 +_002218_hash+pn_recvmsg+4+30887+_002218_hash+NULL
86356 +_002219_hash+pointer_size_read+3+51863+_002219_hash+NULL
86357 +_002220_hash+power_read+3+15939+_002220_hash+NULL
86358 +_002221_hash+pppoe_recvmsg+4+15073+_002221_hash+NULL
86359 +_002222_hash+pppol2tp_recvmsg+4+57742+_002222_hash+NULL
86360 +_002223_hash+qla2x00_adjust_sdev_qdepth_up+2+20097+_002223_hash+NULL
86361 +_002224_hash+qla2x00_change_queue_depth+2+24742+_002224_hash+NULL
86362 +_002225_hash+raw_recvmsg+4+52529+_002225_hash+NULL
86363 +_002226_hash+rawsock_recvmsg+4+12144+_002226_hash+NULL
86364 +_002227_hash+rawv6_recvmsg+4+30265+_002227_hash+NULL
86365 +_002228_hash+reada_add_block+2+54247+_002228_hash+NULL
86366 +_002229_hash+readahead_tree_block+3+36285+_002229_hash+NULL
86367 +_002230_hash+reada_tree_block_flagged+3+18402+_002230_hash+NULL
86368 +_002231_hash+read_tree_block+3+841+_002231_hash+NULL
86369 +_002232_hash+recover_peb+6-7+29238+_002232_hash+NULL
86370 +_002234_hash+recv_msg+4+48709+_002234_hash+NULL
86371 +_002235_hash+recv_stream+4+30138+_002235_hash+NULL
86372 +_002236_hash+_req_append_segment+2+41031+_002236_hash+NULL
86373 +_002237_hash+request_key_async+4+6990+_002237_hash+NULL
86374 +_002238_hash+request_key_async_with_auxdata+4+46624+_002238_hash+NULL
86375 +_002239_hash+request_key_with_auxdata+4+24515+_002239_hash+NULL
86376 +_002240_hash+rose_recvmsg+4+2368+_002240_hash+NULL
86377 +_002241_hash+rxrpc_recvmsg+4+26233+_002241_hash+NULL
86378 +_002242_hash+rx_streaming_always_read+3+49401+_002242_hash+NULL
86379 +_002243_hash+rx_streaming_interval_read+3+55291+_002243_hash+NULL
86380 +_002244_hash+sas_change_queue_depth+2+18555+_002244_hash+NULL
86381 +_002245_hash+scsi_activate_tcq+2+42640+_002245_hash+NULL
86382 +_002246_hash+scsi_deactivate_tcq+2+47086+_002246_hash+NULL
86383 +_002247_hash+scsi_execute+5+33596+_002247_hash+NULL
86384 +_002248_hash+_scsih_adjust_queue_depth+2+1083+_002248_hash+NULL
86385 +_002249_hash+scsi_init_shared_tag_map+2+59812+_002249_hash+NULL
86386 +_002250_hash+scsi_track_queue_full+2+44239+_002250_hash+NULL
86387 +_002251_hash+sctp_recvmsg+4+23265+_002251_hash+NULL
86388 +_002252_hash+send_stream+4+3397+_002252_hash+NULL
86389 +_002253_hash+skb_copy_and_csum_datagram_iovec+2+24466+_002253_hash+NULL
86390 +_002255_hash+snd_gf1_mem_proc_dump+5+16926+_002255_hash+NULL
86391 +_002256_hash+split_scan_timeout_read+3+20029+_002256_hash+NULL
86392 +_002257_hash+sta_dev_read+3+14782+_002257_hash+NULL
86393 +_002258_hash+sta_inactive_ms_read+3+25690+_002258_hash+NULL
86394 +_002259_hash+sta_last_signal_read+3+31818+_002259_hash+NULL
86395 +_002260_hash+stats_dot11ACKFailureCount_read+3+45558+_002260_hash+NULL
86396 +_002261_hash+stats_dot11FCSErrorCount_read+3+28154+_002261_hash+NULL
86397 +_002262_hash+stats_dot11RTSFailureCount_read+3+43948+_002262_hash+NULL
86398 +_002263_hash+stats_dot11RTSSuccessCount_read+3+33065+_002263_hash+NULL
86399 +_002264_hash+storvsc_connect_to_vsp+2+22+_002264_hash+NULL
86400 +_002265_hash+suspend_dtim_interval_read+3+64971+_002265_hash+NULL
86401 +_002266_hash+sys_msgrcv+3+959+_002266_hash+NULL
86402 +_002267_hash+tcm_loop_change_queue_depth+2+42454+_002267_hash+NULL
86403 +_002268_hash+tcp_copy_to_iovec+3+28344+_002268_hash+NULL
86404 +_002269_hash+tcp_recvmsg+4+31238+_002269_hash+NULL
86405 +_002270_hash+timeout_read+3+47915+_002270_hash+NULL
86406 +_002271_hash+total_ps_buffered_read+3+16365+_002271_hash+NULL
86407 +_002272_hash+tun_put_user+4+59849+_002272_hash+NULL
86408 +_002273_hash+twa_change_queue_depth+2+48808+_002273_hash+NULL
86409 +_002274_hash+tw_change_queue_depth+2+11116+_002274_hash+NULL
86410 +_002275_hash+twl_change_queue_depth+2+41342+_002275_hash+NULL
86411 +_002276_hash+ubi_eba_write_leb+5-6+19826+_002276_hash+NULL
86412 +_002278_hash+ubi_eba_write_leb_st+5+27896+_002278_hash+NULL
86413 +_002279_hash+udp_recvmsg+4+42558+_002279_hash+NULL
86414 +_002280_hash+udpv6_recvmsg+4+9813+_002280_hash+&_002171_hash
86415 +_002281_hash+ulong_read_file+3+42304+_002281_hash+&_000511_hash
86416 +_002282_hash+unix_dgram_recvmsg+4+14952+_002282_hash+NULL
86417 +_002283_hash+user_power_read+3+39414+_002283_hash+NULL
86418 +_002284_hash+vcc_recvmsg+4+37198+_002284_hash+NULL
86419 +_002285_hash+wep_iv_read+3+54744+_002285_hash+NULL
86420 +_002286_hash+x25_recvmsg+4+42777+_002286_hash+NULL
86421 +_002287_hash+xfs_iext_insert+3+18667+_002287_hash+NULL
86422 +_002288_hash+xfs_iext_remove+3+50909+_002288_hash+NULL
86423 +_002289_hash+xlog_find_verify_log_record+2+18870+_002289_hash+NULL
86424 +_002290_hash+btrfs_alloc_free_block+3+29982+_002290_hash+NULL
86425 +_002291_hash+cx18_read_pos+3+4683+_002291_hash+NULL
86426 +_002292_hash+l2cap_sock_recvmsg+4+59886+_002292_hash+NULL
86427 +_002293_hash+osd_req_list_dev_partitions+4+60027+_002293_hash+NULL
86428 +_002294_hash+osd_req_list_partition_collections+5+38223+_002294_hash+NULL
86429 +_002295_hash+osst_do_scsi+4+44410+_002295_hash+NULL
86430 +_002296_hash+qla2x00_handle_queue_full+2+24365+_002296_hash+NULL
86431 +_002297_hash+rfcomm_sock_recvmsg+4+22227+_002297_hash+NULL
86432 +_002298_hash+scsi_execute_req+5+42088+_002298_hash+NULL
86433 +_002299_hash+_scsih_change_queue_depth+2+26230+_002299_hash+NULL
86434 +_002300_hash+spi_execute+5+28736+_002300_hash+NULL
86435 +_002301_hash+submit_inquiry+3+42108+_002301_hash+NULL
86436 +_002302_hash+tcp_dma_try_early_copy+3+37651+_002302_hash+NULL
86437 +_002303_hash+tun_do_read+4+50800+_002303_hash+NULL
86438 +_002304_hash+ubi_eba_atomic_leb_change+5+13041+_002304_hash+NULL
86439 +_002305_hash+ubi_leb_write+4-5+41691+_002305_hash+NULL
86440 +_002307_hash+unix_seqpacket_recvmsg+4+23062+_002307_hash+NULL
86441 +_002308_hash+write_leb+5+36957+_002308_hash+NULL
86442 +_002309_hash+ch_do_scsi+4+31171+_002309_hash+NULL
86443 +_002310_hash+dbg_leb_write+4-5+20478+_002310_hash+NULL
86444 +_002312_hash+scsi_mode_sense+5+16835+_002312_hash+NULL
86445 +_002313_hash+scsi_vpd_inquiry+4+30040+_002313_hash+NULL
86446 +_002314_hash+ses_recv_diag+4+47143+_002314_hash+&_000673_hash
86447 +_002315_hash+ses_send_diag+4+64527+_002315_hash+NULL
86448 +_002316_hash+spi_dv_device_echo_buffer+2-3+39846+_002316_hash+NULL
86449 +_002318_hash+ubifs_leb_write+4-5+61226+_002318_hash+NULL
86450 +_002320_hash+ubi_leb_change+4+14899+_002320_hash+NULL
86451 +_002321_hash+ubi_write+4-5+30809+_002321_hash+NULL
86452 +_002322_hash+dbg_leb_change+4+19969+_002322_hash+NULL
86453 +_002323_hash+gluebi_write+3+27905+_002323_hash+NULL
86454 +_002324_hash+scsi_get_vpd_page+4+51951+_002324_hash+NULL
86455 +_002325_hash+sd_do_mode_sense+5+11507+_002325_hash+NULL
86456 +_002326_hash+ubifs_leb_change+4+22399+_002436_hash+NULL+nohasharray
86457 +_002327_hash+ubifs_write_node+5+15088+_002327_hash+NULL
86458 +_002328_hash+fixup_leb+3+43256+_002328_hash+NULL
86459 +_002329_hash+recover_head+3+17904+_002329_hash+NULL
86460 +_002330_hash+alloc_cpu_rmap+1+65363+_002330_hash+NULL
86461 +_002331_hash+alloc_ebda_hpc+1-2+50046+_002331_hash+NULL
86462 +_002333_hash+alloc_sched_domains+1+28972+_002333_hash+NULL
86463 +_002334_hash+amthi_read+4+45831+_002334_hash+NULL
86464 +_002335_hash+bcm_char_read+3+31750+_002335_hash+NULL
86465 +_002336_hash+BcmCopySection+5+2035+_002336_hash+NULL
86466 +_002337_hash+buffer_from_user+3+51826+_002337_hash+NULL
86467 +_002338_hash+buffer_to_user+3+35439+_002338_hash+NULL
86468 +_002339_hash+c4iw_init_resource_fifo+3+48090+_002339_hash+NULL
86469 +_002340_hash+c4iw_init_resource_fifo_random+3+25547+_002340_hash+NULL
86470 +_002341_hash+card_send_command+3+40757+_002341_hash+NULL
86471 +_002342_hash+chd_dec_fetch_cdata+3+50926+_002342_hash+NULL
86472 +_002343_hash+crystalhd_create_dio_pool+2+3427+_002343_hash+NULL
86473 +_002344_hash+crystalhd_user_data+3+18407+_002344_hash+NULL
86474 +_002345_hash+cxio_init_resource_fifo+3+28764+_002345_hash+NULL
86475 +_002346_hash+cxio_init_resource_fifo_random+3+47151+_002346_hash+NULL
86476 +_002347_hash+do_pages_stat+2+4437+_002347_hash+NULL
86477 +_002348_hash+do_read_log_to_user+4+3236+_002348_hash+NULL
86478 +_002349_hash+do_write_log_from_user+3+39362+_002349_hash+NULL
86479 +_002350_hash+dt3155_read+3+59226+_002350_hash+NULL
86480 +_002351_hash+easycap_alsa_vmalloc+2+14426+_002351_hash+NULL
86481 +_002352_hash+evm_read_key+3+54674+_002352_hash+NULL
86482 +_002353_hash+evm_write_key+3+27715+_002353_hash+NULL
86483 +_002354_hash+fir16_create+3+5574+_002354_hash+NULL
86484 +_002355_hash+iio_allocate_device+1+18821+_002355_hash+NULL
86485 +_002356_hash+__iio_allocate_kfifo+2-3+55738+_002356_hash+NULL
86486 +_002358_hash+__iio_allocate_sw_ring_buffer+3+4843+_002358_hash+NULL
86487 +_002359_hash+iio_debugfs_read_reg+3+60908+_002359_hash+NULL
86488 +_002360_hash+iio_debugfs_write_reg+3+22742+_002360_hash+NULL
86489 +_002361_hash+iio_event_chrdev_read+3+54757+_002361_hash+NULL
86490 +_002362_hash+iio_read_first_n_kfifo+2+57910+_002362_hash+NULL
86491 +_002363_hash+iio_read_first_n_sw_rb+2+51911+_002363_hash+NULL
86492 +_002364_hash+ioapic_setup_resources+1+35255+_002364_hash+NULL
86493 +_002365_hash+keymap_store+4+45406+_002365_hash+NULL
86494 +_002366_hash+kzalloc_node+1+24352+_002366_hash+NULL
86495 +_002367_hash+line6_alloc_sysex_buffer+4+28225+_002367_hash+NULL
86496 +_002368_hash+line6_dumpreq_initbuf+3+53123+_002368_hash+NULL
86497 +_002369_hash+line6_midibuf_init+2+52425+_002369_hash+NULL
86498 +_002370_hash+lirc_write+3+20604+_002370_hash+NULL
86499 +_002371_hash+_malloc+1+54077+_002371_hash+NULL
86500 +_002372_hash+mei_read+3+6507+_002372_hash+NULL
86501 +_002373_hash+mei_write+3+4005+_002373_hash+NULL
86502 +_002374_hash+mempool_create_node+1+44715+_002374_hash+NULL
86503 +_002375_hash+msg_set+3+51725+_002375_hash+NULL
86504 +_002376_hash+newpart+6+47485+_002376_hash+NULL
86505 +_002377_hash+OS_kmalloc+1+36909+_002377_hash+NULL
86506 +_002378_hash+pcpu_alloc_bootmem+2+62074+_002378_hash+NULL
86507 +_002379_hash+pcpu_get_vm_areas+3+50085+_002379_hash+NULL
86508 +_002380_hash+resource_from_user+3+30341+_002380_hash+NULL
86509 +_002381_hash+sca3000_read_data+4+57064+_002381_hash+NULL
86510 +_002382_hash+sca3000_read_first_n_hw_rb+2+11479+_002382_hash+NULL
86511 +_002383_hash+send_midi_async+3+57463+_002383_hash+NULL
86512 +_002384_hash+sep_create_dcb_dmatables_context+6+37551+_002384_hash+NULL
86513 +_002385_hash+sep_create_dcb_dmatables_context_kernel+6+49728+_002385_hash+NULL
86514 +_002386_hash+sep_create_msgarea_context+4+33829+_002386_hash+NULL
86515 +_002387_hash+sep_lli_table_secure_dma+2-3+64042+_002387_hash+NULL
86516 +_002389_hash+sep_lock_user_pages+2-3+57470+_002389_hash+&_002154_hash
86517 +_002391_hash+sep_prepare_input_output_dma_table_in_dcb+4-5+63087+_002391_hash+NULL
86518 +_002393_hash+sep_read+3+17161+_002393_hash+NULL
86519 +_002394_hash+TransmitTcb+4+12989+_002394_hash+NULL
86520 +_002395_hash+ValidateDSDParamsChecksum+3+63654+_002395_hash+NULL
86521 +_002396_hash+Wb35Reg_BurstWrite+4+62327+_002396_hash+NULL
86522 +_002397_hash+__alloc_bootmem_low_node+2+25726+_002397_hash+&_001499_hash
86523 +_002398_hash+__alloc_bootmem_node+2+1992+_002398_hash+NULL
86524 +_002399_hash+alloc_irq_cpu_rmap+1+28459+_002399_hash+NULL
86525 +_002400_hash+alloc_ring+2-4+18278+_002400_hash+NULL
86526 +_002402_hash+c4iw_init_resource+2-3+30393+_002402_hash+NULL
86527 +_002404_hash+cxio_hal_init_resource+2-7-6+29771+_002404_hash+&_000284_hash
86528 +_002407_hash+cxio_hal_init_rhdl_resource+1+25104+_002407_hash+NULL
86529 +_002408_hash+disk_expand_part_tbl+2+30561+_002408_hash+NULL
86530 +_002409_hash+InterfaceTransmitPacket+3+42058+_002409_hash+NULL
86531 +_002410_hash+line6_dumpreq_init+3+34473+_002410_hash+NULL
86532 +_002411_hash+mempool_create+1+29437+_002411_hash+NULL
86533 +_002412_hash+pcpu_fc_alloc+2+11818+_002412_hash+NULL
86534 +_002413_hash+pod_alloc_sysex_buffer+3+31651+_002413_hash+NULL
86535 +_002414_hash+r8712_usbctrl_vendorreq+6+48489+_002414_hash+NULL
86536 +_002415_hash+r871x_set_wpa_ie+3+7000+_002415_hash+NULL
86537 +_002416_hash+sys_move_pages+2+42626+_002416_hash+NULL
86538 +_002417_hash+variax_alloc_sysex_buffer+3+15237+_002417_hash+NULL
86539 +_002418_hash+vme_user_write+3+15587+_002418_hash+NULL
86540 +_002419_hash+add_partition+2+55588+_002419_hash+NULL
86541 +_002420_hash+__alloc_bootmem_node_high+2+65076+_002420_hash+NULL
86542 +_002421_hash+ceph_msgpool_init+3+33312+_002421_hash+NULL
86543 +_002423_hash+mempool_create_kmalloc_pool+1+41650+_002423_hash+NULL
86544 +_002424_hash+mempool_create_page_pool+1+30189+_002424_hash+NULL
86545 +_002425_hash+mempool_create_slab_pool+1+62907+_002425_hash+NULL
86546 +_002426_hash+variax_set_raw2+4+32374+_002426_hash+NULL
86547 +_002427_hash+bioset_create+1+5580+_002427_hash+NULL
86548 +_002428_hash+bioset_integrity_create+2+62708+_002428_hash+NULL
86549 +_002429_hash+biovec_create_pools+2+9575+_002429_hash+NULL
86550 +_002430_hash+i2o_pool_alloc+4+55485+_002430_hash+NULL
86551 +_002431_hash+prison_create+1+43623+_002431_hash+NULL
86552 +_002432_hash+unlink_simple+3+47506+_002432_hash+NULL
86553 +_002433_hash+alloc_ieee80211+1+20063+_002433_hash+NULL
86554 +_002434_hash+alloc_ieee80211_rsl+1+34564+_002434_hash+NULL
86555 +_002435_hash+alloc_page_cgroup+1+2919+_002435_hash+NULL
86556 +_002436_hash+alloc_private+2+22399+_002436_hash+&_002326_hash
86557 +_002437_hash+alloc_rtllib+1+51136+_002437_hash+NULL
86558 +_002438_hash+alloc_rx_desc_ring+2+18016+_002438_hash+NULL
86559 +_002439_hash+alloc_subdevices+2+43300+_002439_hash+NULL
86560 +_002440_hash+atomic_counters_read+3+48827+_002440_hash+NULL
86561 +_002441_hash+atomic_stats_read+3+36228+_002441_hash+NULL
86562 +_002442_hash+capabilities_read+3+58457+_002442_hash+NULL
86563 +_002443_hash+comedi_read+3+13199+_002443_hash+NULL
86564 +_002444_hash+comedi_write+3+47926+_002444_hash+NULL
86565 +_002445_hash+compat_do_arpt_set_ctl+4+12184+_002445_hash+NULL
86566 +_002446_hash+compat_do_ip6t_set_ctl+4+3184+_002446_hash+NULL
86567 +_002447_hash+compat_do_ipt_set_ctl+4+58466+_002447_hash+&_001852_hash
86568 +_002448_hash+compat_filldir+3+32999+_002448_hash+NULL
86569 +_002449_hash+compat_filldir64+3+35354+_002449_hash+NULL
86570 +_002450_hash+compat_fillonedir+3+15620+_002450_hash+NULL
86571 +_002451_hash+compat_rw_copy_check_uvector+3+25242+_002451_hash+NULL
86572 +_002452_hash+compat_sock_setsockopt+5+23+_002452_hash+NULL
86573 +_002453_hash+compat_sys_kexec_load+2+35674+_002453_hash+NULL
86574 +_002454_hash+compat_sys_keyctl+4+9639+_002454_hash+NULL
86575 +_002455_hash+compat_sys_move_pages+2+5861+_002455_hash+NULL
86576 +_002456_hash+compat_sys_mq_timedsend+3+31060+_002456_hash+NULL
86577 +_002457_hash+compat_sys_msgrcv+2+7482+_002457_hash+NULL
86578 +_002458_hash+compat_sys_msgsnd+2+10738+_002458_hash+NULL
86579 +_002459_hash+compat_sys_semtimedop+3+3606+_002459_hash+NULL
86580 +_002460_hash+__copy_in_user+3+34790+_002460_hash+NULL
86581 +_002461_hash+copy_in_user+3+57502+_002461_hash+NULL
86582 +_002462_hash+dev_counters_read+3+19216+_002462_hash+NULL
86583 +_002463_hash+dev_names_read+3+38509+_002463_hash+NULL
86584 +_002464_hash+do_arpt_set_ctl+4+51053+_002464_hash+NULL
86585 +_002465_hash+do_ip6t_set_ctl+4+60040+_002465_hash+NULL
86586 +_002466_hash+do_ipt_set_ctl+4+56238+_002466_hash+NULL
86587 +_002467_hash+drbd_bm_resize+2+20522+_002467_hash+NULL
86588 +_002468_hash+driver_names_read+3+60399+_002468_hash+NULL
86589 +_002469_hash+driver_stats_read+3+8944+_002469_hash+NULL
86590 +_002470_hash+__earlyonly_bootmem_alloc+2+23824+_002470_hash+NULL
86591 +_002471_hash+evtchn_read+3+3569+_002471_hash+NULL
86592 +_002472_hash+ext_sd_execute_read_data+9+48589+_002472_hash+NULL
86593 +_002473_hash+ext_sd_execute_write_data+9+8175+_002473_hash+NULL
86594 +_002474_hash+fat_compat_ioctl_filldir+3+36328+_002474_hash+NULL
86595 +_002475_hash+firmwareUpload+3+32794+_002475_hash+NULL
86596 +_002476_hash+flash_read+3+57843+_002476_hash+NULL
86597 +_002477_hash+flash_write+3+62354+_002477_hash+NULL
86598 +_002478_hash+gather_array+3+56641+_002478_hash+NULL
86599 +_002479_hash+ghash_async_setkey+3+60001+_002479_hash+NULL
86600 +_002480_hash+gntdev_alloc_map+2+35145+_002480_hash+NULL
86601 +_002481_hash+gnttab_map+2+56439+_002481_hash+NULL
86602 +_002482_hash+gru_alloc_gts+2-3+60056+_002482_hash+NULL
86603 +_002484_hash+handle_eviocgbit+3+44193+_002484_hash+NULL
86604 +_002485_hash+hid_parse_report+3+51737+_002485_hash+NULL
86605 +_002486_hash+ieee80211_alloc_txb+1-2+52477+_002486_hash+NULL
86606 +_002487_hash+ieee80211_wx_set_gen_ie+3+51399+_002487_hash+NULL
86607 +_002488_hash+ieee80211_wx_set_gen_ie_rsl+3+3521+_002488_hash+NULL
86608 +_002489_hash+init_cdev+1+8274+_002489_hash+NULL
86609 +_002490_hash+init_per_cpu+1+17880+_002490_hash+NULL
86610 +_002491_hash+ipath_create_cq+2+45586+_002491_hash+NULL
86611 +_002492_hash+ipath_get_base_info+3+7043+_002492_hash+NULL
86612 +_002493_hash+ipath_init_qp_table+2+25167+_002493_hash+NULL
86613 +_002494_hash+ipath_resize_cq+2+712+_002494_hash+NULL
86614 +_002495_hash+ni_gpct_device_construct+5+610+_002495_hash+NULL
86615 +_002496_hash+options_write+3+47243+_002496_hash+NULL
86616 +_002497_hash+portcntrs_1_read+3+47253+_002497_hash+NULL
86617 +_002498_hash+portcntrs_2_read+3+56586+_002498_hash+NULL
86618 +_002499_hash+portnames_read+3+41958+_002499_hash+NULL
86619 +_002500_hash+ptc_proc_write+3+12076+_002500_hash+NULL
86620 +_002501_hash+put_cmsg_compat+4+35937+_002501_hash+NULL
86621 +_002502_hash+qib_alloc_devdata+2+51819+_002502_hash+NULL
86622 +_002503_hash+qib_alloc_fast_reg_page_list+2+10507+_002503_hash+NULL
86623 +_002504_hash+qib_cdev_init+1+34778+_002504_hash+NULL
86624 +_002505_hash+qib_create_cq+2+27497+_002505_hash+NULL
86625 +_002506_hash+qib_diag_write+3+62133+_002506_hash+NULL
86626 +_002507_hash+qib_get_base_info+3+11369+_002507_hash+NULL
86627 +_002508_hash+qib_resize_cq+2+53090+_002508_hash+NULL
86628 +_002509_hash+qsfp_1_read+3+21915+_002509_hash+NULL
86629 +_002510_hash+qsfp_2_read+3+31491+_002510_hash+NULL
86630 +_002511_hash+queue_reply+3+22416+_002511_hash+NULL
86631 +_002512_hash+Realloc+2+34961+_002512_hash+NULL
86632 +_002513_hash+rfc4106_set_key+3+54519+_002513_hash+NULL
86633 +_002514_hash+rtllib_alloc_txb+1-2+21687+_002514_hash+NULL
86634 +_002515_hash+rtllib_wx_set_gen_ie+3+59808+_002515_hash+NULL
86635 +_002516_hash+rts51x_transfer_data_partial+6+5735+_002516_hash+NULL
86636 +_002517_hash+sparse_early_usemaps_alloc_node+4+9269+_002517_hash+NULL
86637 +_002518_hash+split+2+11691+_002518_hash+NULL
86638 +_002519_hash+stats_read_ul+3+32751+_002519_hash+NULL
86639 +_002520_hash+store_debug_level+3+35652+_002520_hash+NULL
86640 +_002521_hash+sys32_ipc+3+7238+_002521_hash+NULL
86641 +_002522_hash+sys32_rt_sigpending+2+25814+_002522_hash+NULL
86642 +_002523_hash+tunables_read+3+36385+_002523_hash+NULL
86643 +_002524_hash+tunables_write+3+59563+_002524_hash+NULL
86644 +_002525_hash+u32_array_read+3+2219+_002525_hash+NULL
86645 +_002526_hash+usb_buffer_alloc+2+36276+_002526_hash+NULL
86646 +_002527_hash+xenbus_file_write+3+6282+_002527_hash+NULL
86647 +_002528_hash+xpc_kmalloc_cacheline_aligned+1+42895+_002528_hash+NULL
86648 +_002529_hash+xpc_kzalloc_cacheline_aligned+1+65433+_002529_hash+NULL
86649 +_002530_hash+xsd_read+3+15653+_002530_hash+NULL
86650 +_002531_hash+compat_do_readv_writev+4+49102+_002531_hash+NULL
86651 +_002532_hash+compat_keyctl_instantiate_key_iov+3+57431+_003110_hash+NULL+nohasharray
86652 +_002533_hash+compat_process_vm_rw+3-5+22254+_002533_hash+NULL
86653 +_002535_hash+compat_sys_setsockopt+5+3326+_002535_hash+NULL
86654 +_002536_hash+ipath_cdev_init+1+37752+_002536_hash+NULL
86655 +_002537_hash+ms_read_multiple_pages+4-5+8052+_002537_hash+NULL
86656 +_002539_hash+ms_write_multiple_pages+5-6+10362+_002539_hash+NULL
86657 +_002541_hash+sparse_mem_maps_populate_node+4+12669+_002541_hash+&_002004_hash
86658 +_002542_hash+vmemmap_alloc_block+1+43245+_002542_hash+NULL
86659 +_002543_hash+xd_read_multiple_pages+4-5+11422+_002543_hash+NULL
86660 +_002545_hash+xd_write_multiple_pages+5-6+53633+_002545_hash+NULL
86661 +_002546_hash+compat_readv+3+30273+_002546_hash+NULL
86662 +_002547_hash+compat_sys_process_vm_readv+3-5+15374+_002547_hash+NULL
86663 +_002549_hash+compat_sys_process_vm_writev+3-5+41194+_002549_hash+NULL
86664 +_002551_hash+compat_writev+3+60063+_002551_hash+NULL
86665 +_002552_hash+ms_rw_multi_sector+4+7459+_002552_hash+NULL
86666 +_002553_hash+sparse_early_mem_maps_alloc_node+4+36971+_002553_hash+NULL
86667 +_002554_hash+vmemmap_alloc_block_buf+1+61126+_002554_hash+NULL
86668 +_002555_hash+xd_rw+4+49020+_002555_hash+NULL
86669 +_002556_hash+compat_sys_preadv64+3+24283+_002556_hash+NULL
86670 +_002557_hash+compat_sys_pwritev64+3+51151+_002557_hash+NULL
86671 +_002558_hash+compat_sys_readv+3+20911+_002558_hash+NULL
86672 +_002559_hash+compat_sys_writev+3+5784+_002559_hash+NULL
86673 +_002560_hash+ms_rw+4+17220+_002560_hash+NULL
86674 +_002561_hash+compat_sys_preadv+3+583+_002561_hash+NULL
86675 +_002562_hash+compat_sys_pwritev+3+17886+_002562_hash+NULL
86676 +_002563_hash+alloc_apertures+1+56561+_002563_hash+NULL
86677 +_002564_hash+bin_uuid+3+28999+_002564_hash+NULL
86678 +_002565_hash+__copy_from_user_inatomic_nocache+3+49921+_002565_hash+NULL
86679 +_002566_hash+do_dmabuf_dirty_sou+7+3017+_002566_hash+NULL
86680 +_002567_hash+do_surface_dirty_sou+7+39678+_002567_hash+NULL
86681 +_002568_hash+drm_agp_bind_pages+3+56748+_002568_hash+NULL
86682 +_002569_hash+drm_calloc_large+1-2+65421+_002569_hash+NULL
86683 +_002571_hash+drm_fb_helper_init+3-4+19044+_002571_hash+NULL
86684 +_002573_hash+drm_ht_create+2+18853+_002573_hash+NULL
86685 +_002574_hash+drm_malloc_ab+1-2+16831+_002574_hash+NULL
86686 +_002576_hash+drm_mode_crtc_set_gamma_size+2+31881+_002576_hash+NULL
86687 +_002577_hash+drm_plane_init+6+28731+_002577_hash+NULL
86688 +_002578_hash+drm_property_create+4+51239+_002578_hash+NULL
86689 +_002579_hash+drm_property_create_blob+2+7414+_002579_hash+NULL
86690 +_002580_hash+drm_vblank_init+2+11362+_002580_hash+NULL
86691 +_002581_hash+drm_vmalloc_dma+1+14550+_002581_hash+NULL
86692 +_002582_hash+fb_alloc_cmap_gfp+2+20792+_002582_hash+NULL
86693 +_002583_hash+fbcon_prepare_logo+5+6246+_002583_hash+NULL
86694 +_002584_hash+fb_read+3+33506+_002584_hash+NULL
86695 +_002585_hash+fb_write+3+46924+_002585_hash+NULL
86696 +_002586_hash+framebuffer_alloc+1+59145+_002586_hash+NULL
86697 +_002587_hash+i915_cache_sharing_read+3+24775+_002587_hash+NULL
86698 +_002588_hash+i915_cache_sharing_write+3+57961+_002588_hash+NULL
86699 +_002589_hash+i915_max_freq_read+3+20581+_002589_hash+NULL
86700 +_002590_hash+i915_max_freq_write+3+11350+_002590_hash+NULL
86701 +_002591_hash+i915_wedged_read+3+35474+_002591_hash+NULL
86702 +_002592_hash+i915_wedged_write+3+47771+_002592_hash+NULL
86703 +_002593_hash+p9_client_read+5+19750+_002593_hash+NULL
86704 +_002594_hash+probe_kernel_write+3+17481+_002594_hash+NULL
86705 +_002595_hash+sched_feat_write+3+55202+_002595_hash+NULL
86706 +_002596_hash+sd_alloc_ctl_entry+1+29708+_002596_hash+NULL
86707 +_002597_hash+tstats_write+3+60432+_002597_hash+&_000009_hash
86708 +_002598_hash+ttm_bo_fbdev_io+4+9805+_002598_hash+NULL
86709 +_002599_hash+ttm_bo_io+5+47000+_002599_hash+NULL
86710 +_002600_hash+ttm_dma_page_pool_free+2+34135+_002600_hash+NULL
86711 +_002601_hash+ttm_page_pool_free+2+61661+_002601_hash+NULL
86712 +_002602_hash+vmw_execbuf_process+5+22885+_002602_hash+NULL
86713 +_002603_hash+vmw_fifo_reserve+2+12141+_002603_hash+NULL
86714 +_002604_hash+vmw_kms_present+9+38130+_002604_hash+NULL
86715 +_002605_hash+vmw_kms_readback+6+5727+_002605_hash+NULL
86716 +_002606_hash+do_dmabuf_dirty_ldu+6+52241+_002606_hash+NULL
86717 +_002607_hash+drm_mode_create_tv_properties+2+23122+_002607_hash+NULL
86718 +_002608_hash+drm_property_create_enum+5+29201+_002608_hash+NULL
86719 +_002609_hash+fast_user_write+5+20494+_002609_hash+NULL
86720 +_002610_hash+fb_alloc_cmap+2+6554+_002610_hash+NULL
86721 +_002611_hash+i915_gem_execbuffer_relocate_slow+7+25355+_002611_hash+NULL
86722 +_002612_hash+kgdb_hex2mem+3+24755+_002612_hash+NULL
86723 +_002613_hash+ttm_object_device_init+2+10321+_002613_hash+NULL
86724 +_002614_hash+ttm_object_file_init+2+27804+_002614_hash+NULL
86725 +_002615_hash+vmw_cursor_update_image+3-4+16332+_002615_hash+NULL
86726 +_002617_hash+vmw_gmr2_bind+3+21305+_002617_hash+NULL
86727 +_002618_hash+vmw_cursor_update_dmabuf+3-4+32045+_002618_hash+NULL
86728 +_002620_hash+vmw_gmr_bind+3+44130+_002620_hash+NULL
86729 +_002621_hash+vmw_du_crtc_cursor_set+4-5+28479+_002621_hash+NULL
86730 +_002622_hash+__module_alloc+1+50004+_002622_hash+NULL
86731 +_002623_hash+module_alloc_update_bounds_rw+1+63233+_002623_hash+NULL
86732 +_002624_hash+module_alloc_update_bounds_rx+1+58634+_002624_hash+NULL
86733 +_002625_hash+acpi_system_write_alarm+3+40205+_002625_hash+NULL
86734 +_002626_hash+create_table+2+16213+_002626_hash+NULL
86735 +_002627_hash+mem_read+3+57631+_002627_hash+NULL
86736 +_002628_hash+mem_write+3+22232+_002628_hash+NULL
86737 +_002629_hash+proc_fault_inject_read+3+36802+_002629_hash+NULL
86738 +_002630_hash+proc_fault_inject_write+3+21058+_002630_hash+NULL
86739 +_002631_hash+v9fs_fid_readn+4+60544+_002631_hash+NULL
86740 +_002632_hash+v9fs_file_read+3+40858+_002632_hash+NULL
86741 +_002633_hash+__devres_alloc+2+25598+_002633_hash+NULL
86742 +_002634_hash+alloc_dummy_extent_buffer+2+56374+_002634_hash+NULL
86743 +_002635_hash+alloc_fdtable+1+17389+_002635_hash+NULL
86744 +_002636_hash+alloc_large_system_hash+2+22391+_002636_hash+NULL
86745 +_002637_hash+alloc_ldt+2+21972+_002637_hash+NULL
86746 +_002638_hash+__alloc_skb+1+23940+_002638_hash+NULL
86747 +_002639_hash+__ata_change_queue_depth+3+23484+_002639_hash+NULL
86748 +_002640_hash+btrfs_alloc_free_block+3+8986+_002640_hash+NULL
86749 +_002641_hash+btrfs_find_device_for_logical+2+44993+_002641_hash+NULL
86750 +_002642_hash+ccid3_hc_rx_getsockopt+3+62331+_002642_hash+NULL
86751 +_002643_hash+ccid3_hc_tx_getsockopt+3+16314+_002643_hash+NULL
86752 +_002644_hash+cifs_readdata_alloc+1+26360+_002644_hash+NULL
86753 +_002645_hash+cistpl_vers_1+4+15023+_002645_hash+NULL
86754 +_002646_hash+cmm_read+3+57520+_002646_hash+NULL
86755 +_002647_hash+cosa_read+3+25966+_002647_hash+NULL
86756 +_002648_hash+dm_table_create+3+35687+_002648_hash+NULL
86757 +_002649_hash+dpcm_state_read_file+3+65489+_002649_hash+NULL
86758 +_002651_hash+edac_mc_alloc+4+3611+_002651_hash+NULL
86759 +_002652_hash+ep0_read+3+38095+_002652_hash+NULL
86760 +_002653_hash+event_buffer_read+3+48772+_002765_hash+NULL+nohasharray
86761 +_002654_hash+extend_netdev_table+2+21453+_002654_hash+NULL
86762 +_002655_hash+extract_entropy_user+3+26952+_002655_hash+NULL
86763 +_002656_hash+fcoe_ctlr_device_add+3+1793+_002656_hash+NULL
86764 +_002657_hash+fd_do_readv+3+51297+_002657_hash+NULL
86765 +_002658_hash+fd_do_writev+3+29329+_002658_hash+NULL
86766 +_002659_hash+ffs_ep0_read+3+2672+_002659_hash+NULL
86767 +_002660_hash+fill_readbuf+3+32464+_002660_hash+NULL
86768 +_002661_hash+fw_iso_buffer_alloc+2+13704+_002661_hash+NULL
86769 +_002662_hash+get_fd_set+1+3866+_002662_hash+NULL
86770 +_002663_hash+hidraw_report_event+3+20503+_002663_hash+NULL
86771 +_002664_hash+ieee80211_if_read_ht_opmode+3+29044+_002664_hash+NULL
86772 +_002665_hash+ieee80211_if_read_num_mcast_sta+3+12419+_002665_hash+NULL
86773 +_002666_hash+iwl_dbgfs_calib_disabled_read+3+22649+_002666_hash+NULL
86774 +_002667_hash+iwl_dbgfs_rf_reset_read+3+26512+_002667_hash+NULL
86775 +_002668_hash+ixgbe_alloc_q_vector+4-6+24439+_002668_hash+NULL
86776 +_002670_hash+joydev_handle_JSIOCSAXMAP+3+48898+_002836_hash+NULL+nohasharray
86777 +_002671_hash+joydev_handle_JSIOCSBTNMAP+3+15643+_002671_hash+NULL
86778 +_002672_hash+__kfifo_from_user_r+3+60345+_002672_hash+NULL
86779 +_002673_hash+kstrtoint_from_user+2+8778+_002673_hash+NULL
86780 +_002674_hash+kstrtol_from_user+2+10168+_002674_hash+NULL
86781 +_002675_hash+kstrtoll_from_user+2+19500+_002675_hash+NULL
86782 +_002676_hash+kstrtos16_from_user+2+28300+_002676_hash+NULL
86783 +_002677_hash+kstrtos8_from_user+2+58268+_002677_hash+NULL
86784 +_002678_hash+kstrtou16_from_user+2+54274+_002678_hash+NULL
86785 +_002679_hash+kstrtou8_from_user+2+55599+_002679_hash+NULL
86786 +_002680_hash+kstrtouint_from_user+2+10536+_002680_hash+NULL
86787 +_002681_hash+kstrtoul_from_user+2+64569+_002681_hash+NULL
86788 +_002682_hash+kstrtoull_from_user+2+63026+_002682_hash+NULL
86789 +_002683_hash+l2cap_create_iframe_pdu+3+40055+_002683_hash+NULL
86790 +_002684_hash+l2tp_ip6_recvmsg+4+62874+_002684_hash+NULL
86791 +_002685_hash+mem_cgroup_read+5+22461+_002685_hash+NULL
86792 +_002686_hash+nfs_fscache_get_super_cookie+3+44355+_002686_hash+&_001648_hash
86793 +_002687_hash+nfs_pgarray_set+2+1085+_002687_hash+NULL
86794 +_002688_hash+ntfs_rl_realloc+3+56831+_002688_hash+&_000363_hash
86795 +_002689_hash+ntfs_rl_realloc_nofail+3+32173+_002689_hash+NULL
86796 +_002690_hash+pn533_dep_link_up+5+22154+_002690_hash+NULL
86797 +_002691_hash+port_fops_write+3+54627+_002691_hash+NULL
86798 +_002692_hash+ptp_read+4+63251+_002692_hash+NULL
86799 +_002693_hash+qla4xxx_change_queue_depth+2+1268+_002693_hash+NULL
86800 +_002694_hash+reqsk_queue_alloc+2+40272+_002694_hash+NULL
86801 +_002695_hash+resize_info_buffer+2+62889+_002695_hash+NULL
86802 +_002696_hash+rfkill_fop_write+3+64808+_002696_hash+NULL
86803 +_002697_hash+rt2x00debug_write_rfcsr+3+41473+_002697_hash+NULL
86804 +_002698_hash+rvmalloc+1+46873+_002698_hash+NULL
86805 +_002699_hash+rw_copy_check_uvector+3+45748+_003398_hash+NULL+nohasharray
86806 +_002700_hash+sctp_getsockopt_active_key+2+45483+_002700_hash+NULL
86807 +_002701_hash+sctp_getsockopt_adaptation_layer+2+45375+_002701_hash+NULL
86808 +_002702_hash+sctp_getsockopt_assoc_ids+2+9043+_002702_hash+NULL
86809 +_002703_hash+sctp_getsockopt_associnfo+2+58169+_002703_hash+NULL
86810 +_002704_hash+sctp_getsockopt_assoc_number+2+6384+_002704_hash+NULL
86811 +_002705_hash+sctp_getsockopt_auto_asconf+2+46584+_002705_hash+NULL
86812 +_002706_hash+sctp_getsockopt_context+2+52490+_002706_hash+NULL
86813 +_002707_hash+sctp_getsockopt_default_send_param+2+63056+_002707_hash+NULL
86814 +_002708_hash+sctp_getsockopt_disable_fragments+2+12330+_002708_hash+NULL
86815 +_002709_hash+sctp_getsockopt_fragment_interleave+2+51215+_002709_hash+NULL
86816 +_002710_hash+sctp_getsockopt_initmsg+2+26042+_002710_hash+NULL
86817 +_002711_hash+sctp_getsockopt_mappedv4+2+20044+_002711_hash+NULL
86818 +_002712_hash+sctp_getsockopt_nodelay+2+9560+_002712_hash+NULL
86819 +_002713_hash+sctp_getsockopt_partial_delivery_point+2+60952+_002713_hash+NULL
86820 +_002714_hash+sctp_getsockopt_peeloff+2+59190+_002714_hash+NULL
86821 +_002715_hash+sctp_getsockopt_peer_addr_info+2+6024+_002715_hash+NULL
86822 +_002716_hash+sctp_getsockopt_peer_addr_params+2+53645+_002716_hash+NULL
86823 +_002717_hash+sctp_getsockopt_primary_addr+2+24639+_002717_hash+NULL
86824 +_002718_hash+sctp_getsockopt_rtoinfo+2+62027+_002718_hash+NULL
86825 +_002719_hash+sctp_getsockopt_sctp_status+2+56540+_002719_hash+NULL
86826 +_002720_hash+self_check_write+5+50856+_002720_hash+NULL
86827 +_002721_hash+smk_read_mapped+3+7562+_002721_hash+NULL
86828 +_002722_hash+smk_set_cipso+3+20379+_002722_hash+NULL
86829 +_002723_hash+smk_user_access+3+24440+_002723_hash+NULL
86830 +_002724_hash+smk_write_mapped+3+13519+_002724_hash+NULL
86831 +_002725_hash+smk_write_rules_list+3+18565+_002725_hash+NULL
86832 +_002726_hash+snd_mixart_BA0_read+5+45069+_002726_hash+NULL
86833 +_002727_hash+snd_mixart_BA1_read+5+5082+_002727_hash+NULL
86834 +_002728_hash+snd_pcm_oss_read2+3+54387+_002728_hash+NULL
86835 +_002729_hash+syslog_print+2+307+_002729_hash+NULL
86836 +_002730_hash+tcp_dma_try_early_copy+3+4457+_002730_hash+NULL
86837 +_002731_hash+tcp_send_rcvq+3+11316+_002731_hash+NULL
86838 +_002732_hash+tomoyo_init_log+2+61526+_002732_hash+NULL
86839 +_002733_hash+ubi_dump_flash+4+46381+_002733_hash+NULL
86840 +_002734_hash+ubi_eba_atomic_leb_change+5+60379+_002734_hash+NULL
86841 +_002735_hash+ubi_eba_write_leb+5-6+36029+_002735_hash+NULL
86842 +_002737_hash+ubi_eba_write_leb_st+5+44343+_002737_hash+NULL
86843 +_002738_hash+ubi_self_check_all_ff+4+41959+_002738_hash+NULL
86844 +_002739_hash+unix_bind+3+15668+_002739_hash+NULL
86845 +_002740_hash+usbvision_rvmalloc+1+19655+_002740_hash+NULL
86846 +_002742_hash+v4l2_ctrl_new+7+24927+_002742_hash+NULL
86847 +_002743_hash+v4l2_event_subscribe+3+53687+_002743_hash+NULL
86848 +_002744_hash+v9fs_direct_read+3+45546+_002744_hash+NULL
86849 +_002745_hash+v9fs_file_readn+4+36353+_002745_hash+&_001606_hash
86850 +_002746_hash+__videobuf_alloc_vb+1+5665+_002746_hash+NULL
86851 +_002747_hash+wm8350_write+3+24480+_002747_hash+NULL
86852 +_002748_hash+xfs_buf_read_uncached+3+42844+_002748_hash+NULL
86853 +_002749_hash+yurex_write+3+8761+_002749_hash+NULL
86854 +_002750_hash+alloc_skb+1+55439+_002750_hash+NULL
86855 +_002751_hash+alloc_skb_fclone+1+3467+_002751_hash+NULL
86856 +_002752_hash+ata_scsi_change_queue_depth+2+23126+_002752_hash+NULL
86857 +_002753_hash+ath6kl_disconnect_timeout_write+3+794+_002753_hash+NULL
86858 +_002754_hash+ath6kl_keepalive_write+3+45600+_002754_hash+NULL
86859 +_002755_hash+ath6kl_lrssi_roam_write+3+8362+_002755_hash+NULL
86860 +_002756_hash+ath6kl_regread_write+3+14220+_002756_hash+NULL
86861 +_002757_hash+core_sys_select+1+47494+_002757_hash+NULL
86862 +_002758_hash+do_syslog+3+56807+_002758_hash+NULL
86863 +_002759_hash+expand_fdtable+2+39273+_002759_hash+NULL
86864 +_002760_hash+fd_execute_cmd+3+1132+_002760_hash+NULL
86865 +_002761_hash+get_chars+3+40373+_002761_hash+NULL
86866 +_002762_hash+hid_report_raw_event+4+2762+_002762_hash+NULL
86867 +_002763_hash+inet_csk_listen_start+2+38233+_002763_hash+NULL
86868 +_002764_hash+kstrtou32_from_user+2+30361+_002764_hash+NULL
86869 +_002765_hash+l2cap_segment_sdu+4+48772+_002765_hash+&_002653_hash
86870 +_002766_hash+__netdev_alloc_skb+2+18595+_002766_hash+NULL
86871 +_002767_hash+nfs_readdata_alloc+2+65015+_002767_hash+NULL
86872 +_002768_hash+nfs_writedata_alloc+2+12133+_002768_hash+NULL
86873 +_002769_hash+ntfs_rl_append+2-4+6037+_002769_hash+NULL
86874 +_002771_hash+ntfs_rl_insert+2-4+4931+_002771_hash+NULL
86875 +_002773_hash+ntfs_rl_replace+2-4+14136+_002773_hash+NULL
86876 +_002775_hash+ntfs_rl_split+2-4+52328+_002775_hash+NULL
86877 +_002777_hash+port_fops_read+3+49626+_002777_hash+NULL
86878 +_002778_hash+random_read+3+13815+_002778_hash+NULL
86879 +_002779_hash+sg_proc_write_adio+3+45704+_002779_hash+NULL
86880 +_002780_hash+sg_proc_write_dressz+3+46316+_002780_hash+NULL
86881 +_002781_hash+tcp_sendmsg+4+30296+_002781_hash+NULL
86882 +_002782_hash+tomoyo_write_log2+2+34318+_002782_hash+NULL
86883 +_002783_hash+ubi_leb_change+4+10289+_002783_hash+NULL
86884 +_002784_hash+ubi_leb_write+4-5+5478+_002784_hash+NULL
86885 +_002786_hash+urandom_read+3+30462+_002786_hash+NULL
86886 +_002787_hash+v9fs_cached_file_read+3+2514+_002787_hash+NULL
86887 +_002788_hash+__videobuf_alloc_cached+1+12740+_002788_hash+NULL
86888 +_002789_hash+__videobuf_alloc_uncached+1+55711+_002789_hash+NULL
86889 +_002790_hash+wm8350_block_write+3+19727+_002790_hash+NULL
86890 +_002791_hash+alloc_tx+2+32143+_002791_hash+NULL
86891 +_002792_hash+alloc_wr+1-2+24635+_002792_hash+NULL
86892 +_002794_hash+ath6kl_endpoint_stats_write+3+59621+_002794_hash+NULL
86893 +_002795_hash+ath6kl_fwlog_mask_write+3+24810+_002795_hash+NULL
86894 +_002796_hash+ath9k_wmi_cmd+4+327+_002796_hash+NULL
86895 +_002797_hash+atm_alloc_charge+2+19517+_002879_hash+NULL+nohasharray
86896 +_002798_hash+ax25_output+2+22736+_002798_hash+NULL
86897 +_002799_hash+bcsp_prepare_pkt+3+12961+_002799_hash+NULL
86898 +_002800_hash+bt_skb_alloc+1+6404+_002800_hash+NULL
86899 +_002801_hash+capinc_tty_write+3+28539+_002801_hash+NULL
86900 +_002802_hash+cfpkt_create_pfx+1-2+23594+_002802_hash+NULL
86901 +_002804_hash+cmd_complete+6+51629+_002804_hash+NULL
86902 +_002805_hash+cmtp_add_msgpart+4+9252+_002805_hash+NULL
86903 +_002806_hash+cmtp_send_interopmsg+7+376+_002806_hash+NULL
86904 +_002807_hash+cxgb3_get_cpl_reply_skb+2+10620+_002807_hash+NULL
86905 +_002808_hash+dbg_leb_change+4+23555+_002808_hash+NULL
86906 +_002809_hash+dbg_leb_write+4-5+63555+_002809_hash+&_000940_hash
86907 +_002811_hash+dccp_listen_start+2+35918+_002811_hash+NULL
86908 +_002812_hash+__dev_alloc_skb+1+28681+_002812_hash+NULL
86909 +_002813_hash+diva_os_alloc_message_buffer+1+64568+_002813_hash+NULL
86910 +_002814_hash+dn_alloc_skb+2+6631+_002814_hash+NULL
86911 +_002815_hash+do_pselect+1+62061+_002815_hash+NULL
86912 +_002816_hash+_fc_frame_alloc+1+43568+_002816_hash+NULL
86913 +_002817_hash+find_skb+2+20431+_002817_hash+NULL
86914 +_002818_hash+fm_send_cmd+5+39639+_002818_hash+NULL
86915 +_002819_hash+gem_alloc_skb+2+51715+_002819_hash+NULL
86916 +_002820_hash+get_packet+3+41914+_002820_hash+NULL
86917 +_002821_hash+get_packet+3+5747+_002821_hash+NULL
86918 +_002822_hash+get_packet_pg+4+28023+_002822_hash+NULL
86919 +_002823_hash+get_skb+2+63008+_002823_hash+NULL
86920 +_002824_hash+hidp_queue_report+3+1881+_002824_hash+NULL
86921 +_002825_hash+__hidp_send_ctrl_message+4+28303+_002825_hash+NULL
86922 +_002826_hash+hycapi_rx_capipkt+3+11602+_002826_hash+NULL
86923 +_002827_hash+i2400m_net_rx+5+27170+_002827_hash+NULL
86924 +_002828_hash+igmpv3_newpack+2+35912+_002828_hash+NULL
86925 +_002829_hash+inet_listen+2+14723+_002829_hash+NULL
86926 +_002830_hash+isdn_net_ciscohdlck_alloc_skb+2+55209+_002830_hash+&_001724_hash
86927 +_002831_hash+isdn_ppp_ccp_xmit_reset+6+63297+_002831_hash+NULL
86928 +_002832_hash+kmsg_read+3+46514+_002832_hash+NULL
86929 +_002833_hash+_l2_alloc_skb+1+11883+_002833_hash+NULL
86930 +_002834_hash+l3_alloc_skb+1+32289+_002834_hash+NULL
86931 +_002835_hash+llc_alloc_frame+4+64366+_002835_hash+NULL
86932 +_002836_hash+mac_drv_rx_init+2+48898+_002836_hash+&_002670_hash
86933 +_002837_hash+mgmt_event+4+12810+_002837_hash+NULL
86934 +_002838_hash+mI_alloc_skb+1+24770+_002838_hash+NULL
86935 +_002839_hash+nci_skb_alloc+2+49757+_002839_hash+NULL
86936 +_002840_hash+netdev_alloc_skb+2+62437+_002840_hash+NULL
86937 +_002841_hash+__netdev_alloc_skb_ip_align+2+55067+_002841_hash+NULL
86938 +_002842_hash+new_skb+1+21148+_002842_hash+NULL
86939 +_002843_hash+nfc_alloc_recv_skb+1+10244+_002843_hash+NULL
86940 +_002844_hash+nfcwilink_skb_alloc+1+16167+_002844_hash+NULL
86941 +_002845_hash+nfulnl_alloc_skb+2+65207+_002845_hash+NULL
86942 +_002846_hash+ni65_alloc_mem+3+10664+_002846_hash+NULL
86943 +_002847_hash+pep_alloc_skb+3+46303+_002847_hash+NULL
86944 +_002848_hash+pn_raw_send+2+54330+_002848_hash+NULL
86945 +_002849_hash+__pskb_copy+2+9038+_002849_hash+NULL
86946 +_002850_hash+refill_pool+2+19477+_002850_hash+NULL
86947 +_002851_hash+rfcomm_wmalloc+2+58090+_002851_hash+NULL
86948 +_002852_hash+rx+4+57944+_002852_hash+NULL
86949 +_002853_hash+sctp_ulpevent_new+1+33377+_002853_hash+NULL
86950 +_002854_hash+send_command+4+10832+_002854_hash+NULL
86951 +_002855_hash+skb_copy_expand+2-3+7685+_002855_hash+&_000671_hash
86952 +_002857_hash+sk_stream_alloc_skb+2+57622+_002857_hash+NULL
86953 +_002858_hash+sock_alloc_send_pskb+2+21246+_002858_hash+NULL
86954 +_002859_hash+sock_rmalloc+2+59740+_002859_hash+&_002157_hash
86955 +_002860_hash+sock_wmalloc+2+16472+_002860_hash+NULL
86956 +_002861_hash+solos_param_store+4+34755+_002861_hash+NULL
86957 +_002862_hash+sys_select+1+38827+_002862_hash+NULL
86958 +_002863_hash+sys_syslog+3+10746+_002863_hash+NULL
86959 +_002864_hash+t4vf_pktgl_to_skb+2+39005+_002864_hash+NULL
86960 +_002865_hash+tcp_collapse+5-6+63294+_002865_hash+NULL
86961 +_002867_hash+tipc_cfg_reply_alloc+1+27606+_002867_hash+NULL
86962 +_002868_hash+ubifs_leb_change+4+17789+_002868_hash+NULL
86963 +_002869_hash+ubifs_leb_write+4-5+22679+_002869_hash+NULL
86964 +_002871_hash+ulog_alloc_skb+1+23427+_002871_hash+NULL
86965 +_002872_hash+_alloc_mISDN_skb+3+52232+_002872_hash+NULL
86966 +_002873_hash+ath9k_multi_regread+4+65056+_002873_hash+NULL
86967 +_002874_hash+ath_rxbuf_alloc+2+24745+_002874_hash+NULL
86968 +_002875_hash+ax25_send_frame+2+19964+_002875_hash+NULL
86969 +_002876_hash+bchannel_get_rxbuf+2+37213+_002876_hash+NULL
86970 +_002877_hash+cfpkt_create+1+18197+_002877_hash+NULL
86971 +_002878_hash+console_store+4+36007+_002878_hash+NULL
86972 +_002879_hash+dev_alloc_skb+1+19517+_002879_hash+&_002797_hash
86973 +_002880_hash+dn_nsp_do_disc+2-6+49474+_002880_hash+NULL
86974 +_002882_hash+do_write_orph_node+2+64343+_002882_hash+NULL
86975 +_002883_hash+dsp_cmx_send_member+2+15625+_002883_hash+NULL
86976 +_002884_hash+fc_frame_alloc+2+1596+_002884_hash+NULL
86977 +_002885_hash+fc_frame_alloc_fill+2+59394+_002885_hash+NULL
86978 +_002886_hash+fmc_send_cmd+5+20435+_002886_hash+NULL
86979 +_002887_hash+hci_send_cmd+3+43810+_002887_hash+NULL
86980 +_002888_hash+hci_si_event+3+1404+_002888_hash+NULL
86981 +_002889_hash+hfcpci_empty_bfifo+4+62323+_002889_hash+NULL
86982 +_002890_hash+hidp_send_ctrl_message+4+43702+_002890_hash+NULL
86983 +_002891_hash+hysdn_sched_rx+3+60533+_002891_hash+NULL
86984 +_002892_hash+inet_dccp_listen+2+28565+_002892_hash+NULL
86985 +_002893_hash+ip6_append_data+4-5+36490+_002893_hash+NULL
86986 +_002894_hash+__ip_append_data+7-8+36191+_002894_hash+NULL
86987 +_002895_hash+l1oip_socket_recv+6+56537+_002895_hash+NULL
86988 +_002896_hash+l2cap_build_cmd+4+48676+_002896_hash+NULL
86989 +_002897_hash+l2down_create+4+21755+_002897_hash+NULL
86990 +_002898_hash+l2up_create+3+6430+_002898_hash+NULL
86991 +_002899_hash+ldisc_receive+4+41516+_002899_hash+NULL
86992 +_002902_hash+lro_gen_skb+6+2644+_002902_hash+NULL
86993 +_002903_hash+macvtap_alloc_skb+2-4-3+50629+_002903_hash+NULL
86994 +_002906_hash+mgmt_device_found+10+14146+_002906_hash+NULL
86995 +_002907_hash+nci_send_cmd+3+58206+_002907_hash+NULL
86996 +_002908_hash+netdev_alloc_skb_ip_align+2+40811+_002908_hash+NULL
86997 +_002909_hash+nfcwilink_send_bts_cmd+3+10802+_002909_hash+NULL
86998 +_002910_hash+nfqnl_mangle+2+14583+_002910_hash+NULL
86999 +_002911_hash+p54_alloc_skb+3+34366+_002911_hash+&_000475_hash
87000 +_002912_hash+packet_alloc_skb+2-5-4+62602+_002912_hash+NULL
87001 +_002915_hash+pep_indicate+5+38611+_002915_hash+NULL
87002 +_002916_hash+pep_reply+5+50582+_002916_hash+NULL
87003 +_002917_hash+pipe_handler_request+5+50774+_002917_hash+&_001189_hash
87004 +_002918_hash+ql_process_mac_rx_page+4+15543+_002918_hash+NULL
87005 +_002919_hash+ql_process_mac_rx_skb+4+6689+_002919_hash+NULL
87006 +_002920_hash+rfcomm_tty_write+3+51603+_002920_hash+NULL
87007 +_002921_hash+send_mpa_reject+3+7135+_002921_hash+NULL
87008 +_002922_hash+send_mpa_reply+3+32372+_002922_hash+NULL
87009 +_002923_hash+set_rxd_buffer_pointer+8+9950+_002923_hash+NULL
87010 +_002924_hash+sge_rx+3+50594+_002924_hash+NULL
87011 +_002925_hash+skb_cow_data+2+11565+_002925_hash+NULL
87012 +_002926_hash+smp_build_cmd+3+45853+_002926_hash+NULL
87013 +_002927_hash+sock_alloc_send_skb+2+23720+_002927_hash+NULL
87014 +_002928_hash+sys_pselect6+1+57449+_002928_hash+NULL
87015 +_002929_hash+tcp_fragment+3+20436+_002929_hash+NULL
87016 +_002930_hash+teiup_create+3+43201+_002930_hash+NULL
87017 +_002931_hash+tg3_run_loopback+2+30093+_002931_hash+NULL
87018 +_002932_hash+tun_alloc_skb+2-4-3+41216+_002932_hash+NULL
87019 +_002935_hash+ubifs_write_node+5+11258+_002935_hash+NULL
87020 +_002936_hash+use_pool+2+64607+_002936_hash+NULL
87021 +_002937_hash+vxge_rx_alloc+3+52024+_002937_hash+NULL
87022 +_002938_hash+add_packet+3+54433+_002938_hash+NULL
87023 +_002939_hash+add_rx_skb+3+8257+_002939_hash+NULL
87024 +_002940_hash+ath6kl_buf_alloc+1+57304+_002940_hash+NULL
87025 +_002941_hash+bat_iv_ogm_aggregate_new+2+2620+_002941_hash+NULL
87026 +_002942_hash+bnx2fc_process_l2_frame_compl+3+65072+_002942_hash+NULL
87027 +_002943_hash+brcmu_pkt_buf_get_skb+1+5556+_002943_hash+NULL
87028 +_002944_hash+br_send_bpdu+3+29669+_002944_hash+NULL
87029 +_002945_hash+bt_skb_send_alloc+2+6581+_002945_hash+NULL
87030 +_002946_hash+c4iw_reject_cr+3+28174+_002946_hash+NULL
87031 +_002947_hash+carl9170_rx_copy_data+2+21656+_002947_hash+NULL
87032 +_002948_hash+cfpkt_add_body+3+44630+_002948_hash+NULL
87033 +_002949_hash+cfpkt_append+3+61206+_002949_hash+NULL
87034 +_002950_hash+cosa_net_setup_rx+2+38594+_002950_hash+NULL
87035 +_002951_hash+cxgb4_pktgl_to_skb+2+61899+_002951_hash+NULL
87036 +_002952_hash+dn_alloc_send_pskb+2+4465+_002952_hash+NULL
87037 +_002953_hash+dn_nsp_return_disc+2+60296+_002953_hash+NULL
87038 +_002954_hash+dn_nsp_send_disc+2+23469+_002954_hash+NULL
87039 +_002955_hash+dsp_tone_hw_message+3+17678+_002955_hash+NULL
87040 +_002956_hash+dvb_net_sec+3+37884+_002956_hash+NULL
87041 +_002957_hash+e1000_check_copybreak+3+62448+_002957_hash+NULL
87042 +_002958_hash+fast_rx_path+3+59214+_002958_hash+NULL
87043 +_002959_hash+fc_fcp_frame_alloc+2+12624+_002959_hash+NULL
87044 +_002960_hash+fcoe_ctlr_send_keep_alive+3+15308+_002960_hash+NULL
87045 +_002961_hash+fwnet_incoming_packet+3+40380+_002961_hash+NULL
87046 +_002962_hash+fwnet_pd_new+4+39947+_002962_hash+NULL
87047 +_002963_hash+got_frame+2+16028+_002963_hash+NULL
87048 +_002964_hash+gsm_mux_rx_netchar+3+33336+_002964_hash+NULL
87049 +_002965_hash+hdlcdev_rx+3+997+_002965_hash+NULL
87050 +_002966_hash+hdlc_empty_fifo+2+18397+_002966_hash+NULL
87051 +_002967_hash+hfc_empty_fifo+2+57972+_002967_hash+NULL
87052 +_002968_hash+hfcpci_empty_fifo+4+2427+_002968_hash+NULL
87053 +_002969_hash+hfcsusb_rx_frame+3+52745+_002969_hash+NULL
87054 +_002970_hash+hidp_output_raw_report+3+5629+_002970_hash+NULL
87055 +_002971_hash+hscx_empty_fifo+2+13360+_002971_hash+NULL
87056 +_002972_hash+hysdn_rx_netpkt+3+16136+_002972_hash+NULL
87057 +_002973_hash+ieee80211_fragment+4+33112+_002973_hash+NULL
87058 +_002974_hash+ieee80211_probereq_get+4-6+29069+_002974_hash+NULL
87059 +_002976_hash+ieee80211_send_auth+5+24121+_002976_hash+NULL
87060 +_002977_hash+ieee80211_set_probe_resp+3+10077+_002977_hash+NULL
87061 +_002978_hash+ieee80211_tdls_mgmt+8+9581+_002978_hash+NULL
87062 +_002979_hash+ip6_ufo_append_data+5-7-6+4780+_002979_hash+NULL
87063 +_002982_hash+ip_ufo_append_data+6-8-7+12775+_002982_hash+NULL
87064 +_002985_hash+ipw_packet_received_skb+2+1230+_002985_hash+NULL
87065 +_002986_hash+iwch_reject_cr+3+23901+_002986_hash+NULL
87066 +_002987_hash+iwm_rx_packet_alloc+3+9898+_002987_hash+NULL
87067 +_002988_hash+ixgb_check_copybreak+3+5847+_002988_hash+NULL
87068 +_002989_hash+l1oip_socket_parse+4+4507+_002989_hash+NULL
87069 +_002990_hash+l2cap_send_cmd+4+14548+_002990_hash+NULL
87070 +_002991_hash+l2tp_ip6_sendmsg+4+7461+_002991_hash+NULL
87071 +_002993_hash+lowpan_fragment_xmit+3-4+22095+_002993_hash+NULL
87072 +_002996_hash+mcs_unwrap_fir+3+25733+_002996_hash+NULL
87073 +_002997_hash+mcs_unwrap_mir+3+9455+_002997_hash+NULL
87074 +_002998_hash+mld_newpack+2+50950+_002998_hash+NULL
87075 +_002999_hash+nfc_alloc_send_skb+4+3167+_002999_hash+NULL
87076 +_003000_hash+p54_download_eeprom+4+43842+_003000_hash+NULL
87077 +_003002_hash+ppp_tx_cp+5+62044+_003002_hash+NULL
87078 +_003003_hash+prism2_send_mgmt+4+62605+_003003_hash+&_001876_hash
87079 +_003004_hash+prism2_sta_send_mgmt+5+43916+_003004_hash+NULL
87080 +_003005_hash+_queue_data+4+54983+_003005_hash+NULL
87081 +_003006_hash+read_dma+3+55086+_003006_hash+NULL
87082 +_003007_hash+read_fifo+3+826+_003007_hash+NULL
87083 +_003008_hash+receive_copy+3+12216+_003008_hash+NULL
87084 +_003009_hash+rtl8169_try_rx_copy+3+705+_003009_hash+NULL
87085 +_003010_hash+_rtl92s_firmware_downloadcode+3+14021+_003010_hash+NULL
87086 +_003011_hash+rx_data+4+60442+_003011_hash+NULL
87087 +_003012_hash+sis190_try_rx_copy+3+57069+_003012_hash+NULL
87088 +_003013_hash+skge_rx_get+3+40598+_003013_hash+NULL
87089 +_003014_hash+tcp_mark_head_lost+2+35895+_003014_hash+NULL
87090 +_003015_hash+tcp_match_skb_to_sack+3-4+23568+_003015_hash+NULL
87091 +_003017_hash+tso_fragment+3+29050+_003017_hash+NULL
87092 +_003018_hash+tt_response_fill_table+1+57902+_003018_hash+NULL
87093 +_003020_hash+udpv6_sendmsg+4+22316+_003020_hash+NULL
87094 +_003021_hash+velocity_rx_copy+2+34583+_003021_hash+NULL
87095 +_003022_hash+W6692_empty_Bfifo+2+47804+_003022_hash+NULL
87096 +_003023_hash+zd_mac_rx+3+38296+_003023_hash+NULL
87097 +_003024_hash+ath6kl_wmi_get_new_buf+1+52304+_003024_hash+NULL
87098 +_003025_hash+bat_iv_ogm_queue_add+3+30870+_003025_hash+NULL
87099 +_003026_hash+brcmf_alloc_pkt_and_read+2+63116+_003026_hash+&_001808_hash
87100 +_003027_hash+brcmf_sdcard_recv_buf+6+38179+_003027_hash+NULL
87101 +_003028_hash+brcmf_sdcard_rwdata+5+65041+_003028_hash+NULL
87102 +_003029_hash+brcmf_sdcard_send_buf+6+7713+_003029_hash+NULL
87103 +_003030_hash+carl9170_handle_mpdu+3+11056+_003030_hash+NULL
87104 +_003031_hash+cfpkt_add_trail+3+27260+_003031_hash+NULL
87105 +_003032_hash+cfpkt_pad_trail+2+55511+_003032_hash+NULL
87106 +_003033_hash+dvb_net_sec_callback+2+28786+_003033_hash+NULL
87107 +_003034_hash+fwnet_receive_packet+9+50537+_003034_hash+NULL
87108 +_003035_hash+handle_rx_packet+3+58993+_003035_hash+NULL
87109 +_003036_hash+HDLC_irq+2+8709+_003036_hash+NULL
87110 +_003037_hash+hdlc_rpr_irq+2+10240+_003037_hash+NULL
87111 +_003043_hash+ipwireless_network_packet_received+4+51277+_003043_hash+NULL
87112 +_003044_hash+l2cap_bredr_sig_cmd+3+49065+_003044_hash+NULL
87113 +_003045_hash+l2cap_sock_alloc_skb_cb+2+33532+_003045_hash+NULL
87114 +_003046_hash+llcp_allocate_pdu+3+19866+_003046_hash+NULL
87115 +_003047_hash+ppp_cp_event+6+2965+_003047_hash+NULL
87116 +_003048_hash+receive_client_update_packet+3+49104+_003048_hash+NULL
87117 +_003049_hash+receive_server_sync_packet+3+59021+_003049_hash+NULL
87118 +_003050_hash+sky2_receive+2+13407+_003050_hash+NULL
87119 +_003051_hash+tcp_sacktag_walk+5-6+49703+_003051_hash+NULL
87120 +_003053_hash+tcp_write_xmit+2+64602+_003053_hash+NULL
87121 +_003054_hash+ath6kl_wmi_add_wow_pattern_cmd+4+12842+_003054_hash+NULL
87122 +_003055_hash+ath6kl_wmi_beginscan_cmd+8+25462+_003055_hash+NULL
87123 +_003056_hash+ath6kl_wmi_send_probe_response_cmd+6+31728+_003056_hash+NULL
87124 +_003057_hash+ath6kl_wmi_set_appie_cmd+5+39266+_003057_hash+NULL
87125 +_003058_hash+ath6kl_wmi_set_ie_cmd+6+37260+_003058_hash+NULL
87126 +_003059_hash+ath6kl_wmi_startscan_cmd+8+33674+_003059_hash+NULL
87127 +_003060_hash+ath6kl_wmi_test_cmd+3+27312+_003060_hash+NULL
87128 +_003061_hash+brcmf_sdbrcm_membytes+3-5+37324+_003061_hash+NULL
87129 +_003063_hash+brcmf_sdbrcm_read_control+3+22721+_003063_hash+NULL
87130 +_003064_hash+brcmf_tx_frame+3+20978+_003064_hash+NULL
87131 +_003065_hash+__carl9170_rx+3+56784+_003065_hash+NULL
87132 +_003066_hash+cfpkt_setlen+2+49343+_003066_hash+NULL
87133 +_003067_hash+hdlc_irq_one+2+3944+_003067_hash+NULL
87134 +_003069_hash+tcp_push_one+2+48816+_003069_hash+NULL
87135 +_003070_hash+__tcp_push_pending_frames+2+48148+_003070_hash+NULL
87136 +_003071_hash+brcmf_sdbrcm_bus_txctl+3+42492+_003071_hash+NULL
87137 +_003072_hash+carl9170_rx+3+13272+_003072_hash+NULL
87138 +_003073_hash+carl9170_rx_stream+3+1334+_003073_hash+NULL
87139 +_003074_hash+tcp_push+3+10680+_003074_hash+NULL
87140 +_003075_hash+create_log+2+8225+_003075_hash+NULL
87141 +_003076_hash+expand_files+2+17080+_003076_hash+NULL
87142 +_003077_hash+iio_device_alloc+1+41440+_003077_hash+NULL
87143 +_003078_hash+OS_mem_token_alloc+1+14276+_003078_hash+NULL
87144 +_003079_hash+packet_came+3+18072+_003079_hash+NULL
87145 +_003080_hash+softsynth_write+3+3455+_003080_hash+NULL
87146 +_003081_hash+alloc_fd+1+37637+_003081_hash+NULL
87147 +_003082_hash+sys_dup3+2+33421+_003082_hash+NULL
87148 +_003083_hash+do_fcntl+3+31468+_003083_hash+NULL
87149 +_003084_hash+sys_dup2+2+25284+_003084_hash+NULL
87150 +_003085_hash+sys_fcntl+3+19267+_003085_hash+NULL
87151 +_003086_hash+sys_fcntl64+3+29031+_003086_hash+NULL
87152 +_003087_hash+cmpk_message_handle_tx+4+54024+_003087_hash+NULL
87153 +_003088_hash+comedi_buf_alloc+3+24822+_003088_hash+NULL
87154 +_003089_hash+compat_rw_copy_check_uvector+3+22001+_003089_hash+&_001989_hash
87155 +_003090_hash+compat_sys_fcntl64+3+60256+_003090_hash+NULL
87156 +_003091_hash+evtchn_write+3+43278+_003091_hash+NULL
87157 +_003092_hash+fw_download_code+3+13249+_003092_hash+NULL
87158 +_003093_hash+fwSendNullPacket+2+54618+_003093_hash+NULL
87159 +_003095_hash+ieee80211_authentication_req+3+63973+_003095_hash+NULL
87160 +_003097_hash+rtllib_authentication_req+3+26713+_003097_hash+NULL
87161 +_003098_hash+SendTxCommandPacket+3+42901+_003098_hash+NULL
87162 +_003099_hash+snd_nm256_capture_copy+5+28622+_003099_hash+NULL
87163 +_003100_hash+snd_nm256_playback_copy+5+38567+_003100_hash+NULL
87164 +_003101_hash+tomoyo_init_log+2+14806+_003101_hash+NULL
87165 +_003102_hash+usbdux_attach_common+4+51764+_003271_hash+NULL+nohasharray
87166 +_003103_hash+compat_sys_fcntl+3+15654+_003103_hash+NULL
87167 +_003104_hash+ieee80211_auth_challenge+3+18810+_003104_hash+NULL
87168 +_003105_hash+ieee80211_rtl_auth_challenge+3+61897+_003105_hash+NULL
87169 +_003106_hash+resize_async_buffer+4+64031+_003106_hash+&_002119_hash
87170 +_003107_hash+rtllib_auth_challenge+3+12493+_003107_hash+NULL
87171 +_003108_hash+tomoyo_write_log2+2+11732+_003108_hash+NULL
87172 +_003109_hash+allocate_probes+1+40204+_003109_hash+NULL
87173 +_003110_hash+alloc_ftrace_hash+1+57431+_003110_hash+&_002532_hash
87174 +_003111_hash+__alloc_preds+2+9492+_003111_hash+NULL
87175 +_003112_hash+__alloc_pred_stack+2+26687+_003112_hash+NULL
87176 +_003113_hash+alloc_sched_domains+1+47756+_003113_hash+NULL
87177 +_003114_hash+alloc_trace_probe+6+38720+_003114_hash+NULL
87178 +_003115_hash+alloc_trace_uprobe+3+13870+_003115_hash+NULL
87179 +_003116_hash+arcfb_write+3+8702+_003116_hash+NULL
87180 +_003117_hash+ath6kl_sdio_alloc_prep_scat_req+2+51986+_003117_hash+NULL
87181 +_003118_hash+ath6kl_usb_post_recv_transfers+2+32892+_003118_hash+NULL
87182 +_003119_hash+ath6kl_usb_submit_ctrl_in+6+32880+_003119_hash+&_000778_hash
87183 +_003120_hash+ath6kl_usb_submit_ctrl_out+6+9978+_003120_hash+NULL
87184 +_003121_hash+auok190xfb_write+3+37001+_003121_hash+NULL
87185 +_003122_hash+beacon_interval_write+3+17952+_003122_hash+NULL
87186 +_003123_hash+blk_dropped_read+3+4168+_003123_hash+NULL
87187 +_003124_hash+blk_msg_write+3+13655+_003124_hash+NULL
87188 +_003125_hash+brcmf_usbdev_qinit+2+19090+_003125_hash+&_001533_hash
87189 +_003126_hash+brcmf_usb_dl_cmd+4+53130+_003126_hash+NULL
87190 +_003127_hash+broadsheetfb_write+3+39976+_003127_hash+NULL
87191 +_003128_hash+broadsheet_spiflash_rewrite_sector+2+54864+_003128_hash+NULL
87192 +_003129_hash+cyttsp_probe+4+1940+_003129_hash+NULL
87193 +_003130_hash+da9052_group_write+3+4534+_003130_hash+NULL
87194 +_003131_hash+dccpprobe_read+3+52549+_003131_hash+NULL
87195 +_003132_hash+drm_property_create_bitmask+5+30195+_003132_hash+NULL
87196 +_003133_hash+dtim_interval_write+3+30489+_003133_hash+NULL
87197 +_003134_hash+dynamic_ps_timeout_write+3+37713+_003134_hash+NULL
87198 +_003135_hash+event_enable_read+3+7074+_003135_hash+NULL
87199 +_003136_hash+event_enable_write+3+45238+_003136_hash+NULL
87200 +_003137_hash+event_filter_read+3+23494+_003137_hash+NULL
87201 +_003138_hash+event_filter_write+3+56609+_003138_hash+NULL
87202 +_003139_hash+event_id_read+3+64288+_003139_hash+&_001240_hash
87203 +_003140_hash+f_audio_buffer_alloc+1+41110+_003140_hash+NULL
87204 +_003141_hash+fb_sys_read+3+13778+_003141_hash+NULL
87205 +_003142_hash+fb_sys_write+3+33130+_003142_hash+NULL
87206 +_003143_hash+forced_ps_write+3+37209+_003143_hash+NULL
87207 +_003144_hash+__fprog_create+2+41263+_003144_hash+NULL
87208 +_003145_hash+fq_codel_zalloc+1+15378+_003145_hash+NULL
87209 +_003146_hash+ftrace_pid_write+3+39710+_003146_hash+NULL
87210 +_003147_hash+ftrace_profile_read+3+21327+_003147_hash+NULL
87211 +_003148_hash+ftrace_profile_write+3+53327+_003148_hash+NULL
87212 +_003149_hash+ftrace_write+3+29551+_003149_hash+NULL
87213 +_003150_hash+gdm_wimax_netif_rx+3+43423+_003150_hash+&_001619_hash
87214 +_003151_hash+gpio_power_write+3+1991+_003151_hash+NULL
87215 +_003152_hash+hecubafb_write+3+26942+_003152_hash+NULL
87216 +_003153_hash+hsc_msg_alloc+1+60990+_003153_hash+NULL
87217 +_003154_hash+hsc_write+3+55875+_003154_hash+NULL
87218 +_003155_hash+hsi_alloc_controller+1+41802+_003155_hash+NULL
87219 +_003156_hash+hsi_register_board_info+2+13820+_003156_hash+NULL
87220 +_003157_hash+i915_ring_stop_read+3+42549+_003157_hash+NULL
87221 +_003158_hash+i915_ring_stop_write+3+59010+_003158_hash+NULL
87222 +_003159_hash+ieee802154_alloc_device+1+13767+_003159_hash+NULL
87223 +_003160_hash+intel_sdvo_write_cmd+4+54377+_003160_hash+&_000815_hash
87224 +_003161_hash+ivtvfb_write+3+40023+_003161_hash+NULL
87225 +_003162_hash+metronomefb_write+3+8823+_003162_hash+NULL
87226 +_003163_hash+mwifiex_usb_submit_rx_urb+2+54558+_003163_hash+NULL
87227 +_003164_hash+nfc_hci_hcp_message_tx+6+14534+_003164_hash+NULL
87228 +_003165_hash+nfc_hci_set_param+5+40697+_003165_hash+NULL
87229 +_003166_hash+nfc_shdlc_alloc_skb+2+12741+_003166_hash+NULL
87230 +_003167_hash+odev_update+2+50169+_003167_hash+NULL
87231 +_003168_hash+oz_add_farewell+5+20652+_003168_hash+NULL
87232 +_003169_hash+oz_cdev_read+3+20659+_003169_hash+NULL
87233 +_003170_hash+oz_cdev_write+3+33852+_003170_hash+NULL
87234 +_003171_hash+oz_ep_alloc+2+5587+_003171_hash+NULL
87235 +_003172_hash+oz_events_read+3+47535+_003172_hash+NULL
87236 +_003173_hash+pmcraid_copy_sglist+3+38431+_003173_hash+NULL
87237 +_003174_hash+prctl_set_mm+3+64538+_003174_hash+NULL
87238 +_003175_hash+ptp_filter_init+2+36780+_003175_hash+NULL
87239 +_003176_hash+rb_simple_read+3+45972+_003176_hash+NULL
87240 +_003177_hash+rb_simple_write+3+20890+_003177_hash+NULL
87241 +_003178_hash+read_file_dfs+3+43145+_003178_hash+NULL
87242 +_003179_hash+rx_streaming_always_write+3+32357+_003179_hash+NULL
87243 +_003180_hash+rx_streaming_interval_write+3+50120+_003180_hash+NULL
87244 +_003181_hash+shmem_pread_fast+3+34147+_003181_hash+NULL
87245 +_003182_hash+shmem_pread_slow+3+3198+_003182_hash+NULL
87246 +_003183_hash+shmem_pwrite_fast+3+46842+_003183_hash+NULL
87247 +_003184_hash+shmem_pwrite_slow+3+31741+_003184_hash+NULL
87248 +_003185_hash+show_header+3+4722+_003185_hash+&_000736_hash
87249 +_003186_hash+split_scan_timeout_write+3+52128+_003186_hash+NULL
87250 +_003187_hash+stack_max_size_read+3+1445+_003187_hash+NULL
87251 +_003188_hash+stack_max_size_write+3+36068+_003188_hash+NULL
87252 +_003189_hash+subsystem_filter_read+3+62310+_003189_hash+NULL
87253 +_003190_hash+subsystem_filter_write+3+13022+_003190_hash+NULL
87254 +_003191_hash+suspend_dtim_interval_write+3+48854+_003191_hash+NULL
87255 +_003192_hash+system_enable_read+3+25815+_003192_hash+NULL
87256 +_003193_hash+system_enable_write+3+61396+_003193_hash+NULL
87257 +_003194_hash+trace_options_core_read+3+47390+_003194_hash+NULL
87258 +_003195_hash+trace_options_core_write+3+61551+_003195_hash+NULL
87259 +_003196_hash+trace_options_read+3+11419+_003196_hash+NULL
87260 +_003197_hash+trace_options_write+3+48275+_003197_hash+NULL
87261 +_003198_hash+trace_parser_get_init+2+31379+_003198_hash+NULL
87262 +_003199_hash+traceprobe_probes_write+3+64969+_003199_hash+NULL
87263 +_003200_hash+trace_seq_to_user+3+65398+_003200_hash+NULL
87264 +_003201_hash+tracing_buffers_read+3+11124+_003201_hash+NULL
87265 +_003202_hash+tracing_clock_write+3+27961+_003202_hash+NULL
87266 +_003203_hash+tracing_cpumask_read+3+7010+_003203_hash+NULL
87267 +_003204_hash+tracing_ctrl_read+3+46922+_003204_hash+NULL
87268 +_003205_hash+tracing_ctrl_write+3+42324+_003205_hash+&_001726_hash
87269 +_003206_hash+tracing_entries_read+3+8345+_003206_hash+NULL
87270 +_003207_hash+tracing_entries_write+3+60563+_003207_hash+NULL
87271 +_003208_hash+tracing_max_lat_read+3+8890+_003208_hash+NULL
87272 +_003209_hash+tracing_max_lat_write+3+8728+_003209_hash+NULL
87273 +_003210_hash+tracing_read_dyn_info+3+45468+_003210_hash+NULL
87274 +_003211_hash+tracing_readme_read+3+16493+_003211_hash+NULL
87275 +_003212_hash+tracing_saved_cmdlines_read+3+21434+_003212_hash+NULL
87276 +_003213_hash+tracing_set_trace_read+3+44122+_003213_hash+NULL
87277 +_003214_hash+tracing_set_trace_write+3+57096+_003214_hash+NULL
87278 +_003215_hash+tracing_stats_read+3+34537+_003215_hash+NULL
87279 +_003216_hash+tracing_total_entries_read+3+62817+_003216_hash+NULL
87280 +_003217_hash+tracing_trace_options_write+3+153+_003217_hash+NULL
87281 +_003218_hash+ttm_put_pages+2+9179+_003218_hash+NULL
87282 +_003219_hash+udl_prime_create+2+57159+_003219_hash+NULL
87283 +_003220_hash+ufx_alloc_urb_list+3+10349+_003220_hash+NULL
87284 +_003221_hash+u_memcpya+2-3+30139+_003221_hash+NULL
87285 +_003223_hash+viafb_dfph_proc_write+3+49288+_003223_hash+NULL
87286 +_003224_hash+viafb_dfpl_proc_write+3+627+_003224_hash+NULL
87287 +_003225_hash+viafb_dvp0_proc_write+3+23023+_003225_hash+NULL
87288 +_003226_hash+viafb_dvp1_proc_write+3+48864+_003226_hash+NULL
87289 +_003227_hash+viafb_vt1636_proc_write+3+16018+_003227_hash+NULL
87290 +_003228_hash+vivi_read+3+23073+_003228_hash+NULL
87291 +_003229_hash+wl1271_rx_filter_alloc_field+5+46721+_003229_hash+NULL
87292 +_003230_hash+wl12xx_cmd_build_probe_req+6-8+3098+_003230_hash+NULL
87293 +_003232_hash+wlcore_alloc_hw+1+7785+_003232_hash+NULL
87294 +_003233_hash+alloc_and_copy_ftrace_hash+1+29368+_003233_hash+NULL
87295 +_003234_hash+create_trace_probe+1+20175+_003234_hash+NULL
87296 +_003235_hash+create_trace_uprobe+1+13184+_003235_hash+NULL
87297 +_003236_hash+intel_sdvo_set_value+4+2311+_003236_hash+NULL
87298 +_003237_hash+mmio_read+4+40348+_003237_hash+NULL
87299 +_003238_hash+nfc_hci_execute_cmd+5+43882+_003238_hash+NULL
87300 +_003239_hash+nfc_hci_send_event+5+21452+_003239_hash+NULL
87301 +_003240_hash+nfc_hci_send_response+5+56462+_003240_hash+NULL
87302 +_003241_hash+picolcd_fb_write+3+2318+_003241_hash+NULL
87303 +_003242_hash+probes_write+3+29711+_003242_hash+NULL
87304 +_003243_hash+sys_prctl+4+8766+_003243_hash+NULL
87305 +_003244_hash+tracing_read_pipe+3+35312+_003244_hash+NULL
87306 +_003245_hash+brcmf_usb_attach+1-2+44656+_003245_hash+NULL
87307 +_003247_hash+dlfb_ops_write+3+64150+_003247_hash+NULL
87308 +_003248_hash+nfc_hci_send_cmd+5+55714+_003248_hash+NULL
87309 +_003249_hash+ufx_ops_write+3+54848+_003249_hash+NULL
87310 +_003250_hash+viafb_iga1_odev_proc_write+3+36241+_003250_hash+NULL
87311 +_003251_hash+viafb_iga2_odev_proc_write+3+2363+_003251_hash+NULL
87312 +_003252_hash+xenfb_write+3+43412+_003252_hash+NULL
87313 +_003253_hash+acl_alloc+1+35979+_003253_hash+NULL
87314 +_003254_hash+acl_alloc_stack_init+1+60630+_003254_hash+NULL
87315 +_003255_hash+acl_alloc_num+1-2+60778+_003255_hash+NULL
87316 +_003257_hash+padzero+1+55+_003257_hash+&_002013_hash
87317 +_003258_hash+__get_vm_area_node+1+55305+_003258_hash+NULL
87318 +_003259_hash+get_vm_area+1+18080+_003259_hash+NULL
87319 +_003260_hash+__get_vm_area+1+61599+_003260_hash+NULL
87320 +_003261_hash+get_vm_area_caller+1+10527+_003261_hash+NULL
87321 +_003262_hash+__get_vm_area_caller+1+56416+_003302_hash+NULL+nohasharray
87322 +_003263_hash+alloc_vm_area+1+36149+_003263_hash+NULL
87323 +_003264_hash+__ioremap_caller+1-2+21800+_003264_hash+NULL
87324 +_003266_hash+vmap+2+15025+_003266_hash+NULL
87325 +_003267_hash+ioremap_cache+1-2+47189+_003267_hash+NULL
87326 +_003269_hash+ioremap_nocache+1-2+2439+_003269_hash+NULL
87327 +_003271_hash+ioremap_prot+1-2+51764+_003271_hash+&_003102_hash
87328 +_003273_hash+ioremap_wc+1-2+62695+_003273_hash+NULL
87329 +_003274_hash+acpi_os_ioremap+1-2+49523+_003274_hash+NULL
87330 +_003276_hash+ca91cx42_alloc_resource+2+10502+_003276_hash+NULL
87331 +_003277_hash+devm_ioremap_nocache+2-3+2036+_003277_hash+NULL
87332 +_003279_hash+__einj_error_trigger+1+17707+_003279_hash+&_001577_hash
87333 +_003280_hash+io_mapping_map_wc+2+19284+_003280_hash+NULL
87334 +_003281_hash+ioremap+1-2+23172+_003281_hash+NULL
87335 +_003283_hash+lguest_map+1-2+42008+_003283_hash+NULL
87336 +_003285_hash+msix_map_region+3+3411+_003285_hash+NULL
87337 +_003286_hash+pci_iomap+3+47575+_003286_hash+NULL
87338 +_003287_hash+sfi_map_memory+1-2+5183+_003287_hash+NULL
87339 +_003289_hash+tsi148_alloc_resource+2+24563+_003289_hash+NULL
87340 +_003290_hash+vb2_vmalloc_get_userptr+3+31374+_003290_hash+NULL
87341 +_003291_hash+xlate_dev_mem_ptr+1+15291+_003291_hash+&_001167_hash
87342 +_003292_hash+a4t_cs_init+3+27734+_003292_hash+NULL
87343 +_003293_hash+aac_nark_ioremap+2+50163+_003293_hash+&_000314_hash
87344 +_003294_hash+aac_rkt_ioremap+2+3333+_003294_hash+NULL
87345 +_003295_hash+aac_rx_ioremap+2+52410+_003295_hash+NULL
87346 +_003296_hash+aac_sa_ioremap+2+13596+_003296_hash+&_000288_hash
87347 +_003297_hash+aac_src_ioremap+2+41688+_003297_hash+NULL
87348 +_003298_hash+aac_srcv_ioremap+2+6659+_003298_hash+NULL
87349 +_003299_hash+acpi_map+1-2+58725+_003299_hash+NULL
87350 +_003301_hash+acpi_os_read_memory+1-3+54186+_003301_hash+NULL
87351 +_003302_hash+acpi_os_write_memory+1-3+56416+_003302_hash+&_003262_hash
87352 +_003303_hash+c101_run+2+37279+_003303_hash+NULL
87353 +_003304_hash+ca91cx42_master_set+4+23146+_003304_hash+NULL
87354 +_003305_hash+check586+2+29914+_003305_hash+NULL
87355 +_003306_hash+check_mirror+1-2+57342+_003306_hash+&_001564_hash
87356 +_003308_hash+cru_detect+1+11272+_003308_hash+NULL
87357 +_003309_hash+cs553x_init_one+3+58886+_003309_hash+NULL
87358 +_003310_hash+cycx_setup+4+47562+_003310_hash+NULL
87359 +_003311_hash+DepcaSignature+2+80+_003311_hash+&_001321_hash
87360 +_003312_hash+devm_ioremap+2-3+29235+_003312_hash+NULL
87361 +_003314_hash+divasa_remap_pci_bar+3-4+23485+_003314_hash+&_000947_hash
87362 +_003316_hash+dma_declare_coherent_memory+2-4+14244+_003316_hash+NULL
87363 +_003318_hash+doc_probe+1+23285+_003318_hash+NULL
87364 +_003319_hash+DoC_Probe+1+57534+_003319_hash+NULL
87365 +_003320_hash+ems_pcmcia_add_card+2+62627+_003320_hash+NULL
87366 +_003321_hash+gdth_init_isa+1+28091+_003321_hash+NULL
87367 +_003322_hash+gdth_search_isa+1+58595+_003322_hash+NULL
87368 +_003323_hash+isp1760_register+1-2+628+_003323_hash+NULL
87369 +_003325_hash+mthca_map_reg+2-3+5664+_003325_hash+NULL
87370 +_003327_hash+n2_run+3+53459+_003327_hash+NULL
87371 +_003328_hash+pcim_iomap+3+58334+_003328_hash+NULL
87372 +_003329_hash+probe_bios+1+17467+_003329_hash+NULL
87373 +_003330_hash+register_device+2-3+60015+_003330_hash+NULL
87374 +_003332_hash+remap_pci_mem+1-2+15966+_003332_hash+NULL
87375 +_003334_hash+rtl_port_map+1-2+2385+_003334_hash+NULL
87376 +_003336_hash+sfi_map_table+1+5462+_003336_hash+NULL
87377 +_003337_hash+sriov_enable_migration+2+14889+_003337_hash+NULL
87378 +_003338_hash+ssb_bus_scan+2+36578+_003338_hash+NULL
87379 +_003339_hash+ssb_ioremap+2+5228+_003339_hash+NULL
87380 +_003340_hash+tpm_tis_init+2-3+15304+_003340_hash+NULL
87381 +_003342_hash+tsi148_master_set+4+14685+_003342_hash+NULL
87382 +_003343_hash+acpi_os_map_memory+1-2+11161+_003343_hash+NULL
87383 +_003345_hash+com90xx_found+3+13974+_003345_hash+NULL
87384 +_003346_hash+dmam_declare_coherent_memory+2-4+43679+_003346_hash+NULL
87385 +_003348_hash+gdth_isa_probe_one+1+48925+_003348_hash+NULL
87386 +_003349_hash+sfi_check_table+1+6772+_003349_hash+NULL
87387 +_003350_hash+sfi_sysfs_install_table+1+51688+_003350_hash+NULL
87388 +_003351_hash+sriov_enable+2+59689+_003351_hash+NULL
87389 +_003352_hash+ssb_bus_register+3+65183+_003352_hash+NULL
87390 +_003353_hash+acpi_ex_system_memory_space_handler+2+31192+_003353_hash+NULL
87391 +_003354_hash+acpi_tb_check_xsdt+1+21862+_003354_hash+NULL
87392 +_003355_hash+acpi_tb_install_table+1+12988+_003355_hash+NULL
87393 +_003356_hash+acpi_tb_parse_root_table+1+53455+_003356_hash+NULL
87394 +_003357_hash+check_vendor_extension+1+3254+_003357_hash+NULL
87395 +_003358_hash+pci_enable_sriov+2+35745+_003358_hash+NULL
87396 +_003359_hash+ssb_bus_pcmciabus_register+3+56020+_003359_hash+NULL
87397 +_003360_hash+ssb_bus_ssbbus_register+2+2217+_003360_hash+NULL
87398 +_003361_hash+lpfc_sli_probe_sriov_nr_virtfn+2+26004+_003361_hash+NULL
87399 +_003364_hash+alloc_vm_area+1+15989+_003364_hash+NULL
87400 +_003366_hash+efi_ioremap+1-2+3492+_003366_hash+&_001092_hash
87401 +_003368_hash+init_chip_wc_pat+2+62768+_003368_hash+NULL
87402 +_003369_hash+io_mapping_create_wc+1-2+1354+_003369_hash+NULL
87403 +_003371_hash+iommu_map_mmio_space+1+30919+_003371_hash+NULL
87404 +_003372_hash+arch_gnttab_map_shared+3+41306+_003372_hash+NULL
87405 +_003373_hash+arch_gnttab_map_status+3+49812+_003373_hash+NULL
87406 +_003374_hash+intel_render_ring_init_dri+2-3+45446+_003374_hash+NULL
87407 +_003376_hash+persistent_ram_iomap+1-2+47156+_003376_hash+NULL
87408 +_003378_hash+sparse_early_usemaps_alloc_pgdat_section+2+62304+_003378_hash+NULL
87409 +_003379_hash+ttm_bo_ioremap+2-3+31082+_003379_hash+NULL
87410 +_003381_hash+ttm_bo_kmap_ttm+3+5922+_003381_hash+NULL
87411 +_003382_hash+atyfb_setup_generic+3+49151+_003382_hash+NULL
87412 +_003383_hash+do_test+1+15766+_003383_hash+NULL
87413 +_003384_hash+mga_ioremap+1-2+8571+_003384_hash+NULL
87414 +_003386_hash+mid_get_vbt_data_r0+2+10876+_003386_hash+NULL
87415 +_003387_hash+mid_get_vbt_data_r10+2+6308+_003387_hash+NULL
87416 +_003388_hash+mid_get_vbt_data_r1+2+26170+_003388_hash+NULL
87417 +_003389_hash+persistent_ram_buffer_map+1-2+11332+_003389_hash+NULL
87418 +_003391_hash+read_vbt_r0+1+503+_003391_hash+NULL
87419 +_003392_hash+read_vbt_r10+1+60679+_003392_hash+NULL
87420 +_003393_hash+tpci200_slot_map_space+2+3848+_003393_hash+NULL
87421 +_003394_hash+ttm_bo_kmap+2-3+60118+_003394_hash+NULL
87422 +_003395_hash+persistent_ram_new+1-2+14588+_003395_hash+NULL
87423 +_003396_hash+mpt_lan_receive_post_turbo+2+13592+_003396_hash+NULL
87424 +_003397_hash+v4l2_ctrl_new_int_menu+4+41151+_003397_hash+NULL
87425 +_003398_hash+v4l2_ctrl_new_std+5+45748+_003398_hash+&_002699_hash
87426 +_003399_hash+v4l2_ctrl_new_std_menu+4+6221+_003399_hash+NULL
87427 +_003400_hash+xhci_alloc_streams+5+37586+_003400_hash+NULL
87428 +_003401_hash+cx2341x_ctrl_new_menu+3+49700+_003401_hash+NULL
87429 +_003402_hash+cx2341x_ctrl_new_std+4+57061+_003402_hash+NULL
87430 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
87431 new file mode 100644
87432 index 0000000..314cdac
87433 --- /dev/null
87434 +++ b/tools/gcc/size_overflow_plugin.c
87435 @@ -0,0 +1,1741 @@
87436 +/*
87437 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
87438 + * Licensed under the GPL v2, or (at your option) v3
87439 + *
87440 + * Homepage:
87441 + * http://www.grsecurity.net/~ephox/overflow_plugin/
87442 + *
87443 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
87444 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
87445 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
87446 + *
87447 + * Usage:
87448 + * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c
87449 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
87450 + */
87451 +
87452 +#include "gcc-plugin.h"
87453 +#include "config.h"
87454 +#include "system.h"
87455 +#include "coretypes.h"
87456 +#include "tree.h"
87457 +#include "tree-pass.h"
87458 +#include "intl.h"
87459 +#include "plugin-version.h"
87460 +#include "tm.h"
87461 +#include "toplev.h"
87462 +#include "function.h"
87463 +#include "tree-flow.h"
87464 +#include "plugin.h"
87465 +#include "gimple.h"
87466 +#include "c-common.h"
87467 +#include "diagnostic.h"
87468 +#include "cfgloop.h"
87469 +
87470 +struct size_overflow_hash {
87471 + const struct size_overflow_hash * const next;
87472 + const char * const name;
87473 + const unsigned int param;
87474 +};
87475 +
87476 +#include "size_overflow_hash.h"
87477 +
87478 +enum marked {
87479 + MARKED_NO, MARKED_YES, MARKED_NOT_INTENTIONAL
87480 +};
87481 +
87482 +enum overflow_reason {
87483 + OVERFLOW_NONE, OVERFLOW_INTENTIONAL
87484 +};
87485 +
87486 +#define __unused __attribute__((__unused__))
87487 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
87488 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
87489 +#define BEFORE_STMT true
87490 +#define AFTER_STMT false
87491 +#define CREATE_NEW_VAR NULL_TREE
87492 +#define CODES_LIMIT 32
87493 +#define MAX_PARAM 16
87494 +#define MY_STMT GF_PLF_1
87495 +#define NO_CAST_CHECK GF_PLF_2
87496 +
87497 +#if BUILDING_GCC_VERSION == 4005
87498 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
87499 +#endif
87500 +
87501 +int plugin_is_GPL_compatible;
87502 +void debug_gimple_stmt(gimple gs);
87503 +
87504 +static tree expand(struct pointer_set_t *visited, enum overflow_reason* overflowed, tree lhs);
87505 +static tree report_size_overflow_decl;
87506 +static const_tree const_char_ptr_type_node;
87507 +static unsigned int handle_function(void);
87508 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, enum overflow_reason* overflowed, bool before);
87509 +static tree get_size_overflow_type(gimple stmt, const_tree node);
87510 +static tree dup_assign(struct pointer_set_t *visited, enum overflow_reason* overflowed, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3);
87511 +
87512 +static struct plugin_info size_overflow_plugin_info = {
87513 + .version = "20120910beta",
87514 + .help = "no-size-overflow\tturn off size overflow checking\n",
87515 +};
87516 +
87517 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
87518 +{
87519 + unsigned int arg_count;
87520 +
87521 + if (TREE_CODE(*node) == FUNCTION_DECL)
87522 + arg_count = type_num_arguments(TREE_TYPE(*node));
87523 + else if (TREE_CODE(*node) == FUNCTION_TYPE || TREE_CODE(*node) == METHOD_TYPE)
87524 + arg_count = type_num_arguments(*node);
87525 + else {
87526 + *no_add_attrs = true;
87527 + error("%qE attribute only applies to functions", name);
87528 + return NULL_TREE;
87529 + }
87530 +
87531 + for (; args; args = TREE_CHAIN(args)) {
87532 + tree position = TREE_VALUE(args);
87533 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
87534 + error("handle_size_overflow_attribute: overflow parameter outside range.");
87535 + *no_add_attrs = true;
87536 + }
87537 + }
87538 + return NULL_TREE;
87539 +}
87540 +
87541 +static struct attribute_spec size_overflow_attr = {
87542 + .name = "size_overflow",
87543 + .min_length = 1,
87544 + .max_length = -1,
87545 + .decl_required = true,
87546 + .type_required = false,
87547 + .function_type_required = false,
87548 + .handler = handle_size_overflow_attribute,
87549 +#if BUILDING_GCC_VERSION >= 4007
87550 + .affects_type_identity = false
87551 +#endif
87552 +};
87553 +
87554 +static struct attribute_spec intentional_overflow_attr = {
87555 + .name = "intentional_overflow",
87556 + .min_length = 1,
87557 + .max_length = -1,
87558 + .decl_required = true,
87559 + .type_required = false,
87560 + .function_type_required = false,
87561 + .handler = NULL,
87562 +#if BUILDING_GCC_VERSION >= 4007
87563 + .affects_type_identity = false
87564 +#endif
87565 +};
87566 +
87567 +static void register_attributes(void __unused *event_data, void __unused *data)
87568 +{
87569 + register_attribute(&size_overflow_attr);
87570 + register_attribute(&intentional_overflow_attr);
87571 +}
87572 +
87573 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
87574 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
87575 +{
87576 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
87577 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
87578 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
87579 +
87580 + unsigned int m = 0x57559429;
87581 + unsigned int n = 0x5052acdb;
87582 + const unsigned int *key4 = (const unsigned int *)key;
87583 + unsigned int h = len;
87584 + unsigned int k = len + seed + n;
87585 + unsigned long long p;
87586 +
87587 + while (len >= 8) {
87588 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
87589 + len -= 8;
87590 + }
87591 + if (len >= 4) {
87592 + cwmixb(key4[0]) key4 += 1;
87593 + len -= 4;
87594 + }
87595 + if (len)
87596 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
87597 + cwmixb(h ^ (k + n));
87598 + return k ^ h;
87599 +
87600 +#undef cwfold
87601 +#undef cwmixa
87602 +#undef cwmixb
87603 +}
87604 +
87605 +static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
87606 +{
87607 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
87608 + unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
87609 + return fn ^ codes;
87610 +}
87611 +
87612 +static inline const_tree get_original_function_decl(const_tree fndecl)
87613 +{
87614 + if (DECL_ABSTRACT_ORIGIN(fndecl))
87615 + return DECL_ABSTRACT_ORIGIN(fndecl);
87616 + return fndecl;
87617 +}
87618 +
87619 +static inline gimple get_def_stmt(const_tree node)
87620 +{
87621 + gcc_assert(node != NULL_TREE);
87622 + gcc_assert(TREE_CODE(node) == SSA_NAME);
87623 + return SSA_NAME_DEF_STMT(node);
87624 +}
87625 +
87626 +static unsigned char get_tree_code(const_tree type)
87627 +{
87628 + switch (TREE_CODE(type)) {
87629 + case ARRAY_TYPE:
87630 + return 0;
87631 + case BOOLEAN_TYPE:
87632 + return 1;
87633 + case ENUMERAL_TYPE:
87634 + return 2;
87635 + case FUNCTION_TYPE:
87636 + return 3;
87637 + case INTEGER_TYPE:
87638 + return 4;
87639 + case POINTER_TYPE:
87640 + return 5;
87641 + case RECORD_TYPE:
87642 + return 6;
87643 + case UNION_TYPE:
87644 + return 7;
87645 + case VOID_TYPE:
87646 + return 8;
87647 + case REAL_TYPE:
87648 + return 9;
87649 + case VECTOR_TYPE:
87650 + return 10;
87651 + case REFERENCE_TYPE:
87652 + return 11;
87653 + case OFFSET_TYPE:
87654 + return 12;
87655 + case COMPLEX_TYPE:
87656 + return 13;
87657 + default:
87658 + debug_tree((tree)type);
87659 + gcc_unreachable();
87660 + }
87661 +}
87662 +
87663 +static size_t add_type_codes(const_tree type, unsigned char *tree_codes, size_t len)
87664 +{
87665 + gcc_assert(type != NULL_TREE);
87666 +
87667 + while (type && len < CODES_LIMIT) {
87668 + tree_codes[len] = get_tree_code(type);
87669 + len++;
87670 + type = TREE_TYPE(type);
87671 + }
87672 + return len;
87673 +}
87674 +
87675 +static unsigned int get_function_decl(const_tree fndecl, unsigned char *tree_codes)
87676 +{
87677 + const_tree arg, result, type = TREE_TYPE(fndecl);
87678 + enum tree_code code = TREE_CODE(type);
87679 + size_t len = 0;
87680 +
87681 + gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
87682 +
87683 + arg = TYPE_ARG_TYPES(type);
87684 + // skip builtins __builtin_constant_p
87685 + if (!arg && DECL_BUILT_IN(fndecl))
87686 + return 0;
87687 + gcc_assert(arg != NULL_TREE);
87688 +
87689 + if (TREE_CODE_CLASS(code) == tcc_type)
87690 + result = type;
87691 + else
87692 + result = DECL_RESULT(fndecl);
87693 +
87694 + gcc_assert(result != NULL_TREE);
87695 + len = add_type_codes(TREE_TYPE(result), tree_codes, len);
87696 +
87697 + while (arg && len < CODES_LIMIT) {
87698 + len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
87699 + arg = TREE_CHAIN(arg);
87700 + }
87701 +
87702 + gcc_assert(len != 0);
87703 + return len;
87704 +}
87705 +
87706 +static const struct size_overflow_hash *get_function_hash(const_tree fndecl)
87707 +{
87708 + unsigned int hash;
87709 + const struct size_overflow_hash *entry;
87710 + unsigned char tree_codes[CODES_LIMIT];
87711 + size_t len;
87712 + const char *func_name = NAME(fndecl);
87713 +
87714 + len = get_function_decl(fndecl, tree_codes);
87715 + if (len == 0)
87716 + return NULL;
87717 +
87718 + hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
87719 +
87720 + entry = size_overflow_hash[hash];
87721 + while (entry) {
87722 + if (!strcmp(entry->name, func_name))
87723 + return entry;
87724 + entry = entry->next;
87725 + }
87726 +
87727 + return NULL;
87728 +}
87729 +
87730 +static void check_arg_type(const_tree arg)
87731 +{
87732 + const_tree type = TREE_TYPE(arg);
87733 + enum tree_code code = TREE_CODE(type);
87734 +
87735 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
87736 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
87737 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
87738 +}
87739 +
87740 +static int find_arg_number(const_tree arg, const_tree func)
87741 +{
87742 + tree var;
87743 + bool match = false;
87744 + unsigned int argnum = 1;
87745 +
87746 + if (TREE_CODE(arg) == SSA_NAME)
87747 + arg = SSA_NAME_VAR(arg);
87748 +
87749 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
87750 + if (strcmp(NAME(arg), NAME(var))) {
87751 + argnum++;
87752 + continue;
87753 + }
87754 + check_arg_type(var);
87755 +
87756 + match = true;
87757 + break;
87758 + }
87759 + if (!match) {
87760 + warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
87761 + return 0;
87762 + }
87763 + return argnum;
87764 +}
87765 +
87766 +static void print_missing_msg(const_tree func, unsigned int argnum)
87767 +{
87768 + unsigned int new_hash;
87769 + size_t len;
87770 + unsigned char tree_codes[CODES_LIMIT];
87771 + location_t loc = DECL_SOURCE_LOCATION(func);
87772 + const char *curfunc = NAME(func);
87773 +
87774 + len = get_function_decl(func, tree_codes);
87775 + new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
87776 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, new_hash);
87777 +}
87778 +
87779 +static unsigned int search_missing_attribute(const_tree arg)
87780 +{
87781 + const_tree type = TREE_TYPE(arg);
87782 + const_tree func = get_original_function_decl(current_function_decl);
87783 + unsigned int argnum;
87784 + const struct size_overflow_hash *hash;
87785 +
87786 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
87787 +
87788 + if (TREE_CODE(type) == POINTER_TYPE)
87789 + return 0;
87790 +
87791 + argnum = find_arg_number(arg, func);
87792 + if (argnum == 0)
87793 + return 0;
87794 +
87795 + if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
87796 + return argnum;
87797 +
87798 + hash = get_function_hash(func);
87799 + if (!hash || !(hash->param & (1U << argnum))) {
87800 + print_missing_msg(func, argnum);
87801 + return 0;
87802 + }
87803 + return argnum;
87804 +}
87805 +
87806 +static tree create_new_var(tree type)
87807 +{
87808 + tree new_var = create_tmp_var(type, "cicus");
87809 +
87810 + add_referenced_var(new_var);
87811 + mark_sym_for_renaming(new_var);
87812 + return new_var;
87813 +}
87814 +
87815 +static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
87816 +{
87817 + gimple assign;
87818 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
87819 + tree type = TREE_TYPE(rhs1);
87820 + tree lhs = create_new_var(type);
87821 +
87822 + assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
87823 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
87824 +
87825 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
87826 + update_stmt(assign);
87827 + gimple_set_plf(assign, MY_STMT, true);
87828 + return assign;
87829 +}
87830 +
87831 +static bool is_bool(const_tree node)
87832 +{
87833 + const_tree type;
87834 +
87835 + if (node == NULL_TREE)
87836 + return false;
87837 +
87838 + type = TREE_TYPE(node);
87839 + if (!INTEGRAL_TYPE_P(type))
87840 + return false;
87841 + if (TREE_CODE(type) == BOOLEAN_TYPE)
87842 + return true;
87843 + if (TYPE_PRECISION(type) == 1)
87844 + return true;
87845 + return false;
87846 +}
87847 +
87848 +static tree cast_a_tree(tree type, tree var)
87849 +{
87850 + gcc_assert(type != NULL_TREE);
87851 + gcc_assert(var != NULL_TREE);
87852 + gcc_assert(fold_convertible_p(type, var));
87853 +
87854 + return fold_convert(type, var);
87855 +}
87856 +
87857 +static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before)
87858 +{
87859 + gimple assign;
87860 +
87861 + gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
87862 + if (gsi_end_p(*gsi) && before == BEFORE_STMT)
87863 + gcc_unreachable();
87864 +
87865 + if (lhs == CREATE_NEW_VAR)
87866 + lhs = create_new_var(dst_type);
87867 +
87868 + assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
87869 +
87870 + if (!gsi_end_p(*gsi)) {
87871 + location_t loc = gimple_location(gsi_stmt(*gsi));
87872 + gimple_set_location(assign, loc);
87873 + }
87874 +
87875 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
87876 +
87877 + if (before)
87878 + gsi_insert_before(gsi, assign, GSI_NEW_STMT);
87879 + else
87880 + gsi_insert_after(gsi, assign, GSI_NEW_STMT);
87881 + update_stmt(assign);
87882 + gimple_set_plf(assign, MY_STMT, true);
87883 +
87884 + return assign;
87885 +}
87886 +
87887 +static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before)
87888 +{
87889 + const_gimple assign;
87890 + gimple_stmt_iterator gsi;
87891 +
87892 + if (new_rhs1 == NULL_TREE)
87893 + return NULL_TREE;
87894 +
87895 + if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) {
87896 + gsi = gsi_for_stmt(stmt);
87897 + assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before);
87898 + return gimple_get_lhs(assign);
87899 + }
87900 + return new_rhs1;
87901 +}
87902 +
87903 +static tree follow_overflow_type_and_dup(struct pointer_set_t *visited, enum overflow_reason* overflowed, gimple stmt, const_tree node, tree new_rhs1, tree new_rhs2, tree new_rhs3)
87904 +{
87905 + tree size_overflow_type = get_size_overflow_type(stmt, node);
87906 +
87907 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
87908 +
87909 + if (new_rhs2 != NULL_TREE)
87910 + new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
87911 +
87912 + if (new_rhs3 != NULL_TREE)
87913 + new_rhs3 = cast_to_new_size_overflow_type(stmt, new_rhs3, size_overflow_type, BEFORE_STMT);
87914 +
87915 + return dup_assign(visited, overflowed, stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3);
87916 +}
87917 +
87918 +static enum marked is_already_marked(const_tree fndecl, unsigned int argnum)
87919 +{
87920 + const_tree attr, p;
87921 +
87922 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(fndecl));
87923 + if (!attr || !TREE_VALUE(attr))
87924 + return MARKED_NO;
87925 +
87926 + p = TREE_VALUE(attr);
87927 + if (!TREE_INT_CST_LOW(TREE_VALUE(p)))
87928 + return MARKED_NOT_INTENTIONAL;
87929 +
87930 + do {
87931 + if (argnum == TREE_INT_CST_LOW(TREE_VALUE(p)))
87932 + return MARKED_YES;
87933 + p = TREE_CHAIN(p);
87934 + } while (p);
87935 +
87936 + return MARKED_NO;
87937 +}
87938 +
87939 +static const_tree search_field_decl(const_tree comp_ref)
87940 +{
87941 + const_tree field = NULL_TREE;
87942 + unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
87943 +
87944 + for (i = 0; i < len; i++) {
87945 + field = TREE_OPERAND(comp_ref, i);
87946 + if (TREE_CODE(field) == FIELD_DECL)
87947 + break;
87948 + }
87949 + gcc_assert(TREE_CODE(field) == FIELD_DECL);
87950 + return field;
87951 +}
87952 +
87953 +static void handle_component_ref(enum overflow_reason* overflowed, const_gimple stmt, const_tree binary_dup_rhs)
87954 +{
87955 + const_tree rhs, field, attr;
87956 +
87957 + if (gimple_code(stmt) != GIMPLE_ASSIGN)
87958 + return;
87959 +
87960 + gcc_assert(gimple_num_ops(stmt) == 2 || gimple_num_ops(stmt) == 3);
87961 +
87962 + if (gimple_num_ops(stmt) == 2)
87963 + rhs = gimple_assign_rhs1(stmt);
87964 + else
87965 + rhs = binary_dup_rhs;
87966 +
87967 + if (TREE_CODE(rhs) != COMPONENT_REF)
87968 + return;
87969 +
87970 + field = search_field_decl(rhs);
87971 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(field));
87972 + if (!attr || !TREE_VALUE(attr))
87973 + return;
87974 +
87975 + *overflowed = OVERFLOW_INTENTIONAL;
87976 +}
87977 +
87978 +static tree create_assign(struct pointer_set_t *visited, enum overflow_reason* overflowed, gimple oldstmt, tree rhs1, bool before)
87979 +{
87980 + unsigned int argnum;
87981 + const_tree oldstmt_rhs1;
87982 + tree size_overflow_type, lhs;
87983 + enum tree_code code;
87984 + gimple stmt;
87985 + gimple_stmt_iterator gsi;
87986 +
87987 + handle_component_ref(overflowed, oldstmt, rhs1);
87988 +
87989 + if (*overflowed == OVERFLOW_INTENTIONAL)
87990 + return NULL_TREE;
87991 +
87992 + if (rhs1 == NULL_TREE) {
87993 + debug_gimple_stmt(oldstmt);
87994 + error("create_assign: rhs1 is NULL_TREE");
87995 + gcc_unreachable();
87996 + }
87997 +
87998 + if (gimple_code(oldstmt) == GIMPLE_ASM)
87999 + lhs = rhs1;
88000 + else
88001 + lhs = gimple_get_lhs(oldstmt);
88002 +
88003 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
88004 + code = TREE_CODE(oldstmt_rhs1);
88005 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP)) {
88006 + argnum = search_missing_attribute(oldstmt_rhs1);
88007 + if (argnum && is_already_marked(get_original_function_decl(current_function_decl), argnum) == MARKED_YES) {
88008 + *overflowed = OVERFLOW_INTENTIONAL;
88009 + return NULL_TREE;
88010 + }
88011 +
88012 + }
88013 +
88014 + gsi = gsi_for_stmt(oldstmt);
88015 + pointer_set_insert(visited, oldstmt);
88016 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
88017 + basic_block next_bb, cur_bb;
88018 + const_edge e;
88019 +
88020 + gcc_assert(before == false);
88021 + gcc_assert(stmt_can_throw_internal(oldstmt));
88022 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
88023 + gcc_assert(!gsi_end_p(gsi));
88024 +
88025 + cur_bb = gimple_bb(oldstmt);
88026 + next_bb = cur_bb->next_bb;
88027 + e = find_edge(cur_bb, next_bb);
88028 + gcc_assert(e != NULL);
88029 + gcc_assert(e->flags & EDGE_FALLTHRU);
88030 +
88031 + gsi = gsi_after_labels(next_bb);
88032 + gcc_assert(!gsi_end_p(gsi));
88033 +
88034 + before = true;
88035 + oldstmt = gsi_stmt(gsi);
88036 + pointer_set_insert(visited, oldstmt);
88037 + }
88038 +
88039 + size_overflow_type = get_size_overflow_type(oldstmt, lhs);
88040 +
88041 + stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before);
88042 + gimple_set_plf(stmt, MY_STMT, true);
88043 + return gimple_get_lhs(stmt);
88044 +}
88045 +
88046 +static tree dup_assign(struct pointer_set_t *visited, enum overflow_reason* overflowed, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3)
88047 +{
88048 + gimple stmt;
88049 + gimple_stmt_iterator gsi;
88050 + tree new_var, lhs = gimple_get_lhs(oldstmt);
88051 +
88052 + if (*overflowed == OVERFLOW_INTENTIONAL)
88053 + return NULL_TREE;
88054 +
88055 + if (gimple_plf(oldstmt, MY_STMT))
88056 + return lhs;
88057 +
88058 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
88059 + rhs1 = gimple_assign_rhs1(oldstmt);
88060 + rhs1 = create_assign(visited, overflowed, oldstmt, rhs1, BEFORE_STMT);
88061 + }
88062 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
88063 + rhs2 = gimple_assign_rhs2(oldstmt);
88064 + rhs2 = create_assign(visited, overflowed, oldstmt, rhs2, BEFORE_STMT);
88065 + }
88066 +
88067 + stmt = gimple_copy(oldstmt);
88068 + gimple_set_location(stmt, gimple_location(oldstmt));
88069 + gimple_set_plf(stmt, MY_STMT, true);
88070 +
88071 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
88072 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
88073 +
88074 + if (is_bool(lhs))
88075 + new_var = SSA_NAME_VAR(lhs);
88076 + else
88077 + new_var = create_new_var(size_overflow_type);
88078 + new_var = make_ssa_name(new_var, stmt);
88079 + gimple_set_lhs(stmt, new_var);
88080 +
88081 + if (rhs1 != NULL_TREE) {
88082 + if (!gimple_assign_cast_p(oldstmt))
88083 + rhs1 = cast_a_tree(size_overflow_type, rhs1);
88084 + gimple_assign_set_rhs1(stmt, rhs1);
88085 + }
88086 +
88087 + if (rhs2 != NULL_TREE)
88088 + gimple_assign_set_rhs2(stmt, rhs2);
88089 +#if BUILDING_GCC_VERSION >= 4007
88090 + if (rhs3 != NULL_TREE)
88091 + gimple_assign_set_rhs3(stmt, rhs3);
88092 +#endif
88093 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
88094 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
88095 +
88096 + gsi = gsi_for_stmt(oldstmt);
88097 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
88098 + update_stmt(stmt);
88099 + pointer_set_insert(visited, oldstmt);
88100 + return gimple_get_lhs(stmt);
88101 +}
88102 +
88103 +static gimple overflow_create_phi_node(gimple oldstmt, tree result)
88104 +{
88105 + basic_block bb;
88106 + gimple phi;
88107 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
88108 +
88109 + bb = gsi_bb(gsi);
88110 +
88111 + phi = create_phi_node(result, bb);
88112 + gsi = gsi_last(phi_nodes(bb));
88113 + gsi_remove(&gsi, false);
88114 +
88115 + gsi = gsi_for_stmt(oldstmt);
88116 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
88117 + gimple_set_bb(phi, bb);
88118 + gimple_set_plf(phi, MY_STMT, true);
88119 + return phi;
88120 +}
88121 +
88122 +static basic_block create_a_first_bb(void)
88123 +{
88124 + basic_block first_bb;
88125 +
88126 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
88127 + if (dom_info_available_p(CDI_DOMINATORS))
88128 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
88129 + return first_bb;
88130 +}
88131 +
88132 +static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i)
88133 +{
88134 + basic_block bb;
88135 + const_gimple newstmt;
88136 + gimple_stmt_iterator gsi;
88137 + bool before = BEFORE_STMT;
88138 +
88139 + if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) {
88140 + gsi = gsi_for_stmt(get_def_stmt(arg));
88141 + newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT);
88142 + return gimple_get_lhs(newstmt);
88143 + }
88144 +
88145 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
88146 + gsi = gsi_after_labels(bb);
88147 + if (bb->index == 0) {
88148 + bb = create_a_first_bb();
88149 + gsi = gsi_start_bb(bb);
88150 + }
88151 + if (gsi_end_p(gsi))
88152 + before = AFTER_STMT;
88153 + newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before);
88154 + return gimple_get_lhs(newstmt);
88155 +}
88156 +
88157 +static const_gimple handle_new_phi_arg(const_tree arg, tree new_var, tree new_rhs)
88158 +{
88159 + gimple newstmt;
88160 + gimple_stmt_iterator gsi;
88161 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
88162 + gimple def_newstmt = get_def_stmt(new_rhs);
88163 +
88164 + gsi_insert = gsi_insert_after;
88165 + gsi = gsi_for_stmt(def_newstmt);
88166 +
88167 + switch (gimple_code(get_def_stmt(arg))) {
88168 + case GIMPLE_PHI:
88169 + newstmt = gimple_build_assign(new_var, new_rhs);
88170 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
88171 + gsi_insert = gsi_insert_before;
88172 + break;
88173 + case GIMPLE_ASM:
88174 + case GIMPLE_CALL:
88175 + newstmt = gimple_build_assign(new_var, new_rhs);
88176 + break;
88177 + case GIMPLE_ASSIGN:
88178 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
88179 + break;
88180 + default:
88181 + /* unknown gimple_code (handle_build_new_phi_arg) */
88182 + gcc_unreachable();
88183 + }
88184 +
88185 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
88186 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
88187 + gimple_set_plf(newstmt, MY_STMT, true);
88188 + update_stmt(newstmt);
88189 + return newstmt;
88190 +}
88191 +
88192 +static tree build_new_phi_arg(struct pointer_set_t *visited, enum overflow_reason* overflowed, tree size_overflow_type, tree arg, tree new_var)
88193 +{
88194 + const_gimple newstmt;
88195 + tree new_rhs;
88196 +
88197 + new_rhs = expand(visited, overflowed, arg);
88198 + if (new_rhs == NULL_TREE || *overflowed == OVERFLOW_INTENTIONAL)
88199 + return NULL_TREE;
88200 +
88201 + new_rhs = cast_to_new_size_overflow_type(get_def_stmt(new_rhs), new_rhs, size_overflow_type, AFTER_STMT);
88202 +
88203 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
88204 + return gimple_get_lhs(newstmt);
88205 +}
88206 +
88207 +static tree build_new_phi(struct pointer_set_t *visited, enum overflow_reason* overflowed, tree orig_result)
88208 +{
88209 + gimple phi, oldstmt = get_def_stmt(orig_result);
88210 + tree new_result, size_overflow_type;
88211 + unsigned int i;
88212 + unsigned int n = gimple_phi_num_args(oldstmt);
88213 +
88214 + size_overflow_type = get_size_overflow_type(oldstmt, orig_result);
88215 +
88216 + new_result = create_new_var(size_overflow_type);
88217 +
88218 + pointer_set_insert(visited, oldstmt);
88219 + phi = overflow_create_phi_node(oldstmt, new_result);
88220 + for (i = 0; i < n; i++) {
88221 + tree arg, lhs;
88222 +
88223 + arg = gimple_phi_arg_def(oldstmt, i);
88224 + if (is_gimple_constant(arg))
88225 + arg = cast_a_tree(size_overflow_type, arg);
88226 + lhs = build_new_phi_arg(visited, overflowed, size_overflow_type, arg, new_result);
88227 + if (*overflowed == OVERFLOW_INTENTIONAL)
88228 + return NULL_TREE;
88229 + if (lhs == NULL_TREE)
88230 + lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_result, i);
88231 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
88232 + }
88233 +
88234 + update_stmt(phi);
88235 + return gimple_phi_result(phi);
88236 +}
88237 +
88238 +static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
88239 +{
88240 + const_gimple assign;
88241 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
88242 + tree origtype = TREE_TYPE(orig_rhs);
88243 +
88244 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
88245 +
88246 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
88247 + return gimple_get_lhs(assign);
88248 +}
88249 +
88250 +static void change_rhs1(gimple stmt, tree new_rhs1)
88251 +{
88252 + tree assign_rhs;
88253 + const_tree rhs = gimple_assign_rhs1(stmt);
88254 +
88255 + assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1);
88256 + gimple_assign_set_rhs1(stmt, assign_rhs);
88257 + update_stmt(stmt);
88258 +}
88259 +
88260 +static bool check_mode_type(const_gimple stmt)
88261 +{
88262 + const_tree lhs = gimple_get_lhs(stmt);
88263 + const_tree lhs_type = TREE_TYPE(lhs);
88264 + const_tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt));
88265 + enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
88266 + enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
88267 +
88268 + if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type))
88269 + return false;
88270 +
88271 + if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
88272 + return false;
88273 +
88274 + return true;
88275 +}
88276 +
88277 +static bool check_undefined_integer_operation(const_gimple stmt)
88278 +{
88279 + const_gimple def_stmt;
88280 + const_tree lhs = gimple_get_lhs(stmt);
88281 + const_tree rhs1 = gimple_assign_rhs1(stmt);
88282 + const_tree rhs1_type = TREE_TYPE(rhs1);
88283 + const_tree lhs_type = TREE_TYPE(lhs);
88284 +
88285 + if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
88286 + return false;
88287 +
88288 + def_stmt = get_def_stmt(rhs1);
88289 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN)
88290 + return false;
88291 +
88292 + if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
88293 + return false;
88294 + return true;
88295 +}
88296 +
88297 +static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
88298 +{
88299 + const_tree rhs1, lhs, rhs1_type, lhs_type;
88300 + enum machine_mode lhs_mode, rhs_mode;
88301 + gimple def_stmt = get_def_stmt(no_const_rhs);
88302 +
88303 + if (!gimple_assign_cast_p(def_stmt))
88304 + return false;
88305 +
88306 + rhs1 = gimple_assign_rhs1(def_stmt);
88307 + lhs = gimple_get_lhs(def_stmt);
88308 + rhs1_type = TREE_TYPE(rhs1);
88309 + lhs_type = TREE_TYPE(lhs);
88310 + rhs_mode = TYPE_MODE(rhs1_type);
88311 + lhs_mode = TYPE_MODE(lhs_type);
88312 + if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
88313 + return false;
88314 +
88315 + return true;
88316 +}
88317 +
88318 +static tree handle_unary_rhs(struct pointer_set_t *visited, enum overflow_reason* overflowed, gimple stmt)
88319 +{
88320 + tree size_overflow_type, lhs = gimple_get_lhs(stmt);
88321 + tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt);
88322 + const_tree rhs1_type = TREE_TYPE(rhs1);
88323 + const_tree lhs_type = TREE_TYPE(lhs);
88324 +
88325 + new_rhs1 = expand(visited, overflowed, rhs1);
88326 +
88327 + if (*overflowed == OVERFLOW_INTENTIONAL)
88328 + return NULL_TREE;
88329 +
88330 + if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE)
88331 + return create_assign(visited, overflowed, stmt, lhs, AFTER_STMT);
88332 +
88333 + if (gimple_plf(stmt, MY_STMT))
88334 + return lhs;
88335 +
88336 + if (gimple_plf(stmt, NO_CAST_CHECK))
88337 + return follow_overflow_type_and_dup(visited, overflowed, stmt, rhs1, new_rhs1, NULL_TREE, NULL_TREE);
88338 +
88339 + if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
88340 + size_overflow_type = get_size_overflow_type(stmt, rhs1);
88341 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
88342 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, overflowed, BEFORE_STMT);
88343 + return create_assign(visited, overflowed, stmt, lhs, AFTER_STMT);
88344 + }
88345 +
88346 + if (!gimple_assign_cast_p(stmt) || check_undefined_integer_operation(stmt))
88347 + return follow_overflow_type_and_dup(visited, overflowed, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
88348 +
88349 + size_overflow_type = get_size_overflow_type(stmt, rhs1);
88350 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
88351 +
88352 + change_rhs1(stmt, new_rhs1);
88353 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, overflowed, BEFORE_STMT);
88354 +
88355 + rhs1 = gimple_assign_rhs1(stmt);
88356 + rhs1_type = TREE_TYPE(rhs1);
88357 + if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type))
88358 + return create_assign(visited, overflowed, stmt, rhs1, AFTER_STMT);
88359 +
88360 + if (!check_mode_type(stmt))
88361 + return create_assign(visited, overflowed, stmt, lhs, AFTER_STMT);
88362 +
88363 + size_overflow_type = get_size_overflow_type(stmt, lhs);
88364 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
88365 +
88366 + check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, overflowed, BEFORE_STMT);
88367 +
88368 + return create_assign(visited, overflowed, stmt, lhs, AFTER_STMT);
88369 +}
88370 +
88371 +static tree handle_unary_ops(struct pointer_set_t *visited, enum overflow_reason* overflowed, tree lhs)
88372 +{
88373 + gimple def_stmt = get_def_stmt(lhs);
88374 + tree rhs1 = gimple_assign_rhs1(def_stmt);
88375 +
88376 + if (is_gimple_constant(rhs1))
88377 + return create_assign(visited, overflowed, def_stmt, lhs, AFTER_STMT);
88378 +
88379 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
88380 + switch (TREE_CODE(rhs1)) {
88381 + case SSA_NAME:
88382 + return handle_unary_rhs(visited, overflowed, def_stmt);
88383 + case ARRAY_REF:
88384 + case BIT_FIELD_REF:
88385 + case ADDR_EXPR:
88386 + case COMPONENT_REF:
88387 + case INDIRECT_REF:
88388 +#if BUILDING_GCC_VERSION >= 4006
88389 + case MEM_REF:
88390 +#endif
88391 + case PARM_DECL:
88392 + case TARGET_MEM_REF:
88393 + case VAR_DECL:
88394 + return create_assign(visited, overflowed, def_stmt, lhs, AFTER_STMT);
88395 +
88396 + default:
88397 + debug_gimple_stmt(def_stmt);
88398 + debug_tree(rhs1);
88399 + gcc_unreachable();
88400 + }
88401 +}
88402 +
88403 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
88404 +{
88405 + gimple cond_stmt;
88406 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
88407 +
88408 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
88409 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
88410 + update_stmt(cond_stmt);
88411 +}
88412 +
88413 +static tree create_string_param(tree string)
88414 +{
88415 + tree i_type, a_type;
88416 + const int length = TREE_STRING_LENGTH(string);
88417 +
88418 + gcc_assert(length > 0);
88419 +
88420 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
88421 + a_type = build_array_type(char_type_node, i_type);
88422 +
88423 + TREE_TYPE(string) = a_type;
88424 + TREE_CONSTANT(string) = 1;
88425 + TREE_READONLY(string) = 1;
88426 +
88427 + return build1(ADDR_EXPR, ptr_type_node, string);
88428 +}
88429 +
88430 +static void insert_cond_result(basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
88431 +{
88432 + gimple func_stmt;
88433 + const_gimple def_stmt;
88434 + const_tree loc_line;
88435 + tree loc_file, ssa_name, current_func;
88436 + expanded_location xloc;
88437 + char ssa_name_buf[256];
88438 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
88439 +
88440 + def_stmt = get_def_stmt(arg);
88441 + xloc = expand_location(gimple_location(def_stmt));
88442 +
88443 + if (!gimple_has_location(def_stmt)) {
88444 + xloc = expand_location(gimple_location(stmt));
88445 + if (!gimple_has_location(stmt))
88446 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
88447 + }
88448 +
88449 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
88450 +
88451 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
88452 + loc_file = create_string_param(loc_file);
88453 +
88454 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
88455 + current_func = create_string_param(current_func);
88456 +
88457 + snprintf(ssa_name_buf, 256, "%s_%u (%s)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max");
88458 + ssa_name = build_string(256, ssa_name_buf);
88459 + ssa_name = create_string_param(ssa_name);
88460 +
88461 + // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
88462 + func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
88463 +
88464 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
88465 +}
88466 +
88467 +static void __unused print_the_code_insertions(const_gimple stmt)
88468 +{
88469 + location_t loc = gimple_location(stmt);
88470 +
88471 + inform(loc, "Integer size_overflow check applied here.");
88472 +}
88473 +
88474 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
88475 +{
88476 + basic_block cond_bb, join_bb, bb_true;
88477 + edge e;
88478 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
88479 +
88480 + cond_bb = gimple_bb(stmt);
88481 + if (before)
88482 + gsi_prev(&gsi);
88483 + if (gsi_end_p(gsi))
88484 + e = split_block_after_labels(cond_bb);
88485 + else
88486 + e = split_block(cond_bb, gsi_stmt(gsi));
88487 + cond_bb = e->src;
88488 + join_bb = e->dest;
88489 + e->flags = EDGE_FALSE_VALUE;
88490 + e->probability = REG_BR_PROB_BASE;
88491 +
88492 + bb_true = create_empty_bb(cond_bb);
88493 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
88494 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
88495 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
88496 +
88497 + if (dom_info_available_p(CDI_DOMINATORS)) {
88498 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
88499 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
88500 + }
88501 +
88502 + if (current_loops != NULL) {
88503 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
88504 + add_bb_to_loop(bb_true, cond_bb->loop_father);
88505 + }
88506 +
88507 + insert_cond(cond_bb, arg, cond_code, type_value);
88508 + insert_cond_result(bb_true, stmt, arg, min);
88509 +
88510 +// print_the_code_insertions(stmt);
88511 +}
88512 +
88513 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, enum overflow_reason* overflowed, bool before)
88514 +{
88515 + const_tree rhs_type = TREE_TYPE(rhs);
88516 + tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
88517 +
88518 + gcc_assert(rhs_type != NULL_TREE);
88519 + if (TREE_CODE(rhs_type) == POINTER_TYPE)
88520 + return;
88521 +
88522 + if (*overflowed == OVERFLOW_INTENTIONAL)
88523 + return;
88524 +
88525 + gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
88526 +
88527 + type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
88528 + type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
88529 +
88530 + gcc_assert(!TREE_OVERFLOW(type_max));
88531 +
88532 + cast_rhs_type = TREE_TYPE(cast_rhs);
88533 + type_max_type = TREE_TYPE(type_max);
88534 + type_min_type = TREE_TYPE(type_min);
88535 + gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type));
88536 + gcc_assert(useless_type_conversion_p(type_max_type, type_min_type));
88537 +
88538 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
88539 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
88540 +}
88541 +
88542 +static tree get_size_overflow_type_for_intentional_overflow(gimple def_stmt, tree change_rhs)
88543 +{
88544 + gimple change_rhs_def_stmt;
88545 + tree lhs = gimple_get_lhs(def_stmt);
88546 + tree lhs_type = TREE_TYPE(lhs);
88547 + tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt));
88548 + tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt));
88549 +
88550 + if (change_rhs == NULL_TREE)
88551 + return get_size_overflow_type(def_stmt, lhs);
88552 +
88553 + change_rhs_def_stmt = get_def_stmt(change_rhs);
88554 +
88555 + if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
88556 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
88557 +
88558 + if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR)
88559 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
88560 +
88561 + if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
88562 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
88563 +
88564 + if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) {
88565 + debug_gimple_stmt(def_stmt);
88566 + gcc_unreachable();
88567 + }
88568 +
88569 + return get_size_overflow_type(def_stmt, lhs);
88570 +}
88571 +
88572 +static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
88573 +{
88574 + if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
88575 + return false;
88576 + if (!is_gimple_constant(rhs))
88577 + return false;
88578 + return true;
88579 +}
88580 +
88581 +static tree get_cast_def_stmt_rhs(const_tree new_rhs)
88582 +{
88583 + gimple def_stmt;
88584 +
88585 + def_stmt = get_def_stmt(new_rhs);
88586 + // get_size_overflow_type
88587 + if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode))
88588 + gcc_assert(gimple_assign_cast_p(def_stmt));
88589 + return gimple_assign_rhs1(def_stmt);
88590 +}
88591 +
88592 +static tree cast_to_int_TI_type_and_check(enum overflow_reason* overflowed, gimple stmt, tree new_rhs)
88593 +{
88594 + gimple_stmt_iterator gsi;
88595 + const_gimple cast_stmt;
88596 + gimple def_stmt;
88597 + enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs));
88598 +
88599 + if (mode != TImode && mode != DImode) {
88600 + def_stmt = get_def_stmt(new_rhs);
88601 + gcc_assert(gimple_assign_cast_p(def_stmt));
88602 + new_rhs = gimple_assign_rhs1(def_stmt);
88603 + mode = TYPE_MODE(TREE_TYPE(new_rhs));
88604 + }
88605 +
88606 + gcc_assert(mode == TImode || mode == DImode);
88607 +
88608 + if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node))
88609 + return new_rhs;
88610 +
88611 + gsi = gsi_for_stmt(stmt);
88612 + cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
88613 + new_rhs = gimple_get_lhs(cast_stmt);
88614 +
88615 + if (mode == DImode)
88616 + return new_rhs;
88617 +
88618 + check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, overflowed, BEFORE_STMT);
88619 +
88620 + return new_rhs;
88621 +}
88622 +
88623 +static bool is_an_integer_trunction(const_gimple stmt)
88624 +{
88625 + gimple rhs1_def_stmt, rhs2_def_stmt;
88626 + const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1;
88627 + enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode;
88628 + const_tree rhs1 = gimple_assign_rhs1(stmt);
88629 + const_tree rhs2 = gimple_assign_rhs2(stmt);
88630 + enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1));
88631 + enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2));
88632 +
88633 + if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
88634 + return false;
88635 +
88636 + gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
88637 +
88638 + if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode)
88639 + return false;
88640 +
88641 + rhs1_def_stmt = get_def_stmt(rhs1);
88642 + rhs2_def_stmt = get_def_stmt(rhs2);
88643 + if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
88644 + return false;
88645 +
88646 + rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
88647 + rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
88648 + rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
88649 + rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
88650 + if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode)
88651 + return false;
88652 +
88653 + gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
88654 + gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
88655 + return true;
88656 +}
88657 +
88658 +static tree handle_integer_truncation(struct pointer_set_t *visited, enum overflow_reason* overflowed, const_tree lhs)
88659 +{
88660 + tree new_rhs1, new_rhs2;
88661 + tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
88662 + tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type;
88663 + gimple assign, stmt = get_def_stmt(lhs);
88664 + tree rhs1 = gimple_assign_rhs1(stmt);
88665 + tree rhs2 = gimple_assign_rhs2(stmt);
88666 +
88667 + if (!is_an_integer_trunction(stmt))
88668 + return NULL_TREE;
88669 +
88670 + new_rhs1 = expand(visited, overflowed, rhs1);
88671 + new_rhs2 = expand(visited, overflowed, rhs2);
88672 +
88673 + if (*overflowed == OVERFLOW_INTENTIONAL)
88674 + return NULL_TREE;
88675 +
88676 + new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1);
88677 + new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2);
88678 +
88679 + new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1);
88680 + new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1);
88681 +
88682 + if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) {
88683 + new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(overflowed, stmt, new_rhs1_def_stmt_rhs1);
88684 + new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(overflowed, stmt, new_rhs2_def_stmt_rhs1);
88685 + }
88686 +
88687 + assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
88688 + new_lhs = gimple_get_lhs(assign);
88689 + check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, overflowed, AFTER_STMT);
88690 +
88691 + return follow_overflow_type_and_dup(visited, overflowed, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
88692 +}
88693 +
88694 +static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
88695 +{
88696 + const_gimple def_stmt;
88697 +
88698 + if (TREE_CODE(rhs) != SSA_NAME)
88699 + return false;
88700 +
88701 + if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
88702 + return false;
88703 +
88704 + def_stmt = get_def_stmt(rhs);
88705 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
88706 + return false;
88707 +
88708 + return true;
88709 +}
88710 +
88711 +static tree handle_intentional_overflow(struct pointer_set_t *visited, enum overflow_reason* overflowed, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs1, tree new_rhs2)
88712 +{
88713 + tree new_rhs, size_overflow_type, orig_rhs;
88714 + void (*gimple_assign_set_rhs)(gimple, tree);
88715 + tree rhs1 = gimple_assign_rhs1(stmt);
88716 + tree rhs2 = gimple_assign_rhs2(stmt);
88717 + tree lhs = gimple_get_lhs(stmt);
88718 +
88719 + if (change_rhs == NULL_TREE)
88720 + return create_assign(visited, overflowed, stmt, lhs, AFTER_STMT);
88721 +
88722 + if (new_rhs2 == NULL_TREE) {
88723 + size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs1);
88724 + new_rhs2 = cast_a_tree(size_overflow_type, rhs2);
88725 + orig_rhs = rhs1;
88726 + gimple_assign_set_rhs = &gimple_assign_set_rhs1;
88727 + } else {
88728 + size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs2);
88729 + new_rhs1 = cast_a_tree(size_overflow_type, rhs1);
88730 + orig_rhs = rhs2;
88731 + gimple_assign_set_rhs = &gimple_assign_set_rhs2;
88732 + }
88733 +
88734 + change_rhs = cast_to_new_size_overflow_type(stmt, change_rhs, size_overflow_type, BEFORE_STMT);
88735 +
88736 + if (check_overflow)
88737 + check_size_overflow(stmt, size_overflow_type, change_rhs, orig_rhs, overflowed, BEFORE_STMT);
88738 +
88739 + new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
88740 + gimple_assign_set_rhs(stmt, new_rhs);
88741 + update_stmt(stmt);
88742 +
88743 + return create_assign(visited, overflowed, stmt, lhs, AFTER_STMT);
88744 +}
88745 +
88746 +static tree handle_binary_ops(struct pointer_set_t *visited, enum overflow_reason* overflowed, tree lhs)
88747 +{
88748 + tree rhs1, rhs2, new_lhs;
88749 + gimple def_stmt = get_def_stmt(lhs);
88750 + tree new_rhs1 = NULL_TREE;
88751 + tree new_rhs2 = NULL_TREE;
88752 +
88753 + rhs1 = gimple_assign_rhs1(def_stmt);
88754 + rhs2 = gimple_assign_rhs2(def_stmt);
88755 +
88756 + /* no DImode/TImode division in the 32/64 bit kernel */
88757 + switch (gimple_assign_rhs_code(def_stmt)) {
88758 + case RDIV_EXPR:
88759 + case TRUNC_DIV_EXPR:
88760 + case CEIL_DIV_EXPR:
88761 + case FLOOR_DIV_EXPR:
88762 + case ROUND_DIV_EXPR:
88763 + case TRUNC_MOD_EXPR:
88764 + case CEIL_MOD_EXPR:
88765 + case FLOOR_MOD_EXPR:
88766 + case ROUND_MOD_EXPR:
88767 + case EXACT_DIV_EXPR:
88768 + case POINTER_PLUS_EXPR:
88769 + case BIT_AND_EXPR:
88770 + return create_assign(visited, overflowed, def_stmt, lhs, AFTER_STMT);
88771 + default:
88772 + break;
88773 + }
88774 +
88775 + new_lhs = handle_integer_truncation(visited, overflowed, lhs);
88776 + if (new_lhs != NULL_TREE)
88777 + return new_lhs;
88778 +
88779 + if (TREE_CODE(rhs1) == SSA_NAME)
88780 + new_rhs1 = expand(visited, overflowed, rhs1);
88781 + if (TREE_CODE(rhs2) == SSA_NAME)
88782 + new_rhs2 = expand(visited, overflowed, rhs2);
88783 +
88784 + if (*overflowed == OVERFLOW_INTENTIONAL)
88785 + return NULL_TREE;
88786 +
88787 + if (is_a_neg_overflow(def_stmt, rhs2))
88788 + return handle_intentional_overflow(visited, overflowed, true, def_stmt, new_rhs1, new_rhs1, NULL_TREE);
88789 + if (is_a_neg_overflow(def_stmt, rhs1))
88790 + return handle_intentional_overflow(visited, overflowed, true, def_stmt, new_rhs2, NULL_TREE, new_rhs2);
88791 +
88792 + if (is_a_constant_overflow(def_stmt, rhs2))
88793 + return handle_intentional_overflow(visited, overflowed, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, new_rhs1, NULL_TREE);
88794 + if (is_a_constant_overflow(def_stmt, rhs1))
88795 + return handle_intentional_overflow(visited, overflowed, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, NULL_TREE, new_rhs2);
88796 +
88797 + return follow_overflow_type_and_dup(visited, overflowed, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
88798 +}
88799 +
88800 +#if BUILDING_GCC_VERSION >= 4007
88801 +static tree get_new_rhs(struct pointer_set_t *visited, enum overflow_reason * overflowed, tree size_overflow_type, tree rhs)
88802 +{
88803 + if (is_gimple_constant(rhs))
88804 + return cast_a_tree(size_overflow_type, rhs);
88805 + if (TREE_CODE(rhs) != SSA_NAME)
88806 + return NULL_TREE;
88807 + return expand(visited, overflowed, rhs);
88808 +}
88809 +
88810 +static tree handle_ternary_ops(struct pointer_set_t *visited, enum overflow_reason* overflowed, tree lhs)
88811 +{
88812 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
88813 + gimple def_stmt = get_def_stmt(lhs);
88814 +
88815 + size_overflow_type = get_size_overflow_type(def_stmt, lhs);
88816 +
88817 + rhs1 = gimple_assign_rhs1(def_stmt);
88818 + rhs2 = gimple_assign_rhs2(def_stmt);
88819 + rhs3 = gimple_assign_rhs3(def_stmt);
88820 + new_rhs1 = get_new_rhs(visited, overflowed, size_overflow_type, rhs1);
88821 + new_rhs2 = get_new_rhs(visited, overflowed, size_overflow_type, rhs2);
88822 + new_rhs3 = get_new_rhs(visited, overflowed, size_overflow_type, rhs3);
88823 +
88824 + if (*overflowed == OVERFLOW_INTENTIONAL)
88825 + return NULL_TREE;
88826 +
88827 + return follow_overflow_type_and_dup(visited, overflowed, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
88828 +}
88829 +#endif
88830 +
88831 +static tree get_size_overflow_type(gimple stmt, const_tree node)
88832 +{
88833 + const_tree type;
88834 +
88835 + gcc_assert(node != NULL_TREE);
88836 +
88837 + type = TREE_TYPE(node);
88838 +
88839 + if (gimple_plf(stmt, MY_STMT))
88840 + return TREE_TYPE(node);
88841 +
88842 + switch (TYPE_MODE(type)) {
88843 + case QImode:
88844 + return (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node;
88845 + case HImode:
88846 + return (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node;
88847 + case SImode:
88848 + return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
88849 + case DImode:
88850 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
88851 + return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
88852 + return (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node;
88853 + default:
88854 + debug_tree((tree)node);
88855 + error("get_size_overflow_type: unsupported gcc configuration.");
88856 + gcc_unreachable();
88857 + }
88858 +}
88859 +
88860 +static tree expand_visited(gimple def_stmt)
88861 +{
88862 + const_gimple next_stmt;
88863 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
88864 +
88865 + gsi_next(&gsi);
88866 + next_stmt = gsi_stmt(gsi);
88867 +
88868 + switch (gimple_code(next_stmt)) {
88869 + case GIMPLE_ASSIGN:
88870 + return gimple_get_lhs(next_stmt);
88871 + case GIMPLE_PHI:
88872 + return gimple_phi_result(next_stmt);
88873 + case GIMPLE_CALL:
88874 + return gimple_call_lhs(next_stmt);
88875 + default:
88876 + return NULL_TREE;
88877 + }
88878 +}
88879 +
88880 +static tree expand(struct pointer_set_t *visited, enum overflow_reason* overflowed, tree lhs)
88881 +{
88882 + gimple def_stmt;
88883 + unsigned int argnum;
88884 + enum tree_code code = TREE_CODE(TREE_TYPE(lhs));
88885 +
88886 + if (is_gimple_constant(lhs))
88887 + return NULL_TREE;
88888 +
88889 + if (TREE_CODE(lhs) == ADDR_EXPR)
88890 + return NULL_TREE;
88891 +
88892 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
88893 +
88894 + if (TREE_CODE(SSA_NAME_VAR(lhs)) == PARM_DECL) {
88895 + argnum = search_missing_attribute(lhs);
88896 + if (argnum && is_already_marked(get_original_function_decl(current_function_decl), argnum) == MARKED_YES) {
88897 + *overflowed = OVERFLOW_INTENTIONAL;
88898 + return NULL_TREE;
88899 + }
88900 + }
88901 +
88902 + def_stmt = get_def_stmt(lhs);
88903 +
88904 + if (!def_stmt)
88905 + return NULL_TREE;
88906 +
88907 + if (gimple_plf(def_stmt, MY_STMT))
88908 + return lhs;
88909 +
88910 + if (pointer_set_contains(visited, def_stmt))
88911 + return expand_visited(def_stmt);
88912 +
88913 + switch (gimple_code(def_stmt)) {
88914 + case GIMPLE_NOP:
88915 + argnum = search_missing_attribute(lhs);
88916 + if (!argnum)
88917 + return NULL_TREE;
88918 + if (is_already_marked(get_original_function_decl(current_function_decl), argnum) == MARKED_YES)
88919 + *overflowed = OVERFLOW_INTENTIONAL;
88920 + return NULL_TREE;
88921 + case GIMPLE_PHI:
88922 + return build_new_phi(visited, overflowed, lhs);
88923 + case GIMPLE_CALL:
88924 + case GIMPLE_ASM:
88925 + return create_assign(visited, overflowed, def_stmt, lhs, AFTER_STMT);
88926 + case GIMPLE_ASSIGN:
88927 + switch (gimple_num_ops(def_stmt)) {
88928 + case 2:
88929 + return handle_unary_ops(visited, overflowed, lhs);
88930 + case 3:
88931 + return handle_binary_ops(visited, overflowed, lhs);
88932 +#if BUILDING_GCC_VERSION >= 4007
88933 + case 4:
88934 + return handle_ternary_ops(visited, overflowed, lhs);
88935 +#endif
88936 + }
88937 + default:
88938 + debug_gimple_stmt(def_stmt);
88939 + error("expand: unknown gimple code");
88940 + gcc_unreachable();
88941 + }
88942 +}
88943 +
88944 +static void change_function_arg(gimple stmt, const_tree origarg, unsigned int argnum, tree newarg)
88945 +{
88946 + const_gimple assign;
88947 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
88948 + tree origtype = TREE_TYPE(origarg);
88949 +
88950 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
88951 +
88952 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
88953 +
88954 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
88955 + update_stmt(stmt);
88956 +}
88957 +
88958 +static tree get_function_arg(unsigned int argnum, const_gimple stmt, const_tree fndecl)
88959 +{
88960 + const char *origid;
88961 + tree arg;
88962 + const_tree origarg;
88963 +
88964 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
88965 + gcc_assert(gimple_call_num_args(stmt) > argnum);
88966 + return gimple_call_arg(stmt, argnum);
88967 + }
88968 +
88969 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
88970 + while (origarg && argnum) {
88971 + argnum--;
88972 + origarg = TREE_CHAIN(origarg);
88973 + }
88974 +
88975 + gcc_assert(argnum == 0);
88976 +
88977 + gcc_assert(origarg != NULL_TREE);
88978 + origid = NAME(origarg);
88979 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
88980 + if (!strcmp(origid, NAME(arg)))
88981 + return arg;
88982 + }
88983 + return NULL_TREE;
88984 +}
88985 +
88986 +static void handle_function_arg(gimple stmt, const_tree fndecl, unsigned int argnum)
88987 +{
88988 + struct pointer_set_t *visited;
88989 + tree arg, newarg;
88990 + enum overflow_reason overflowed = OVERFLOW_NONE;
88991 + location_t loc;
88992 + enum marked is_marked;
88993 +
88994 + arg = get_function_arg(argnum, stmt, fndecl);
88995 + if (arg == NULL_TREE)
88996 + return;
88997 +
88998 + if (is_gimple_constant(arg))
88999 + return;
89000 + if (TREE_CODE(arg) != SSA_NAME)
89001 + return;
89002 +
89003 + check_arg_type(arg);
89004 +
89005 + visited = pointer_set_create();
89006 + newarg = expand(visited, &overflowed, arg);
89007 + pointer_set_destroy(visited);
89008 +
89009 + is_marked = is_already_marked(fndecl, argnum + 1);
89010 + if ((overflowed == OVERFLOW_INTENTIONAL && is_marked == MARKED_YES) || is_marked == MARKED_NOT_INTENTIONAL)
89011 + return;
89012 +
89013 + if (overflowed == OVERFLOW_INTENTIONAL) {
89014 + loc = DECL_SOURCE_LOCATION(fndecl);
89015 + inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", NAME(fndecl), argnum + 1);
89016 + return;
89017 + }
89018 +
89019 + if (newarg == NULL_TREE)
89020 + return;
89021 +
89022 + change_function_arg(stmt, arg, argnum, newarg);
89023 +
89024 + check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, &overflowed, BEFORE_STMT);
89025 +}
89026 +
89027 +static void handle_function_by_attribute(gimple stmt, const_tree attr, const_tree fndecl)
89028 +{
89029 + tree p = TREE_VALUE(attr);
89030 + do {
89031 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
89032 + p = TREE_CHAIN(p);
89033 + } while (p);
89034 +}
89035 +
89036 +static void handle_function_by_hash(gimple stmt, const_tree fndecl)
89037 +{
89038 + const_tree orig_fndecl;
89039 + unsigned int num;
89040 + const struct size_overflow_hash *hash;
89041 +
89042 + orig_fndecl = get_original_function_decl(fndecl);
89043 + hash = get_function_hash(orig_fndecl);
89044 + if (!hash)
89045 + return;
89046 +
89047 + for (num = 1; num <= MAX_PARAM; num++)
89048 + if (hash->param & (1U << num))
89049 + handle_function_arg(stmt, fndecl, num - 1);
89050 +}
89051 +
89052 +static void set_plf_false(void)
89053 +{
89054 + basic_block bb;
89055 +
89056 + FOR_ALL_BB(bb) {
89057 + gimple_stmt_iterator si;
89058 +
89059 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
89060 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
89061 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
89062 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
89063 + }
89064 +}
89065 +
89066 +static unsigned int handle_function(void)
89067 +{
89068 + basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
89069 +
89070 + set_plf_false();
89071 +
89072 + do {
89073 + gimple_stmt_iterator gsi;
89074 + next = bb->next_bb;
89075 +
89076 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
89077 + const_tree fndecl, attr;
89078 + gimple stmt = gsi_stmt(gsi);
89079 +
89080 + if (!(is_gimple_call(stmt)))
89081 + continue;
89082 + fndecl = gimple_call_fndecl(stmt);
89083 + if (fndecl == NULL_TREE)
89084 + continue;
89085 + if (gimple_call_num_args(stmt) == 0)
89086 + continue;
89087 + attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
89088 + if (!attr || !TREE_VALUE(attr))
89089 + handle_function_by_hash(stmt, fndecl);
89090 + else
89091 + handle_function_by_attribute(stmt, attr, fndecl);
89092 + gsi = gsi_for_stmt(stmt);
89093 + next = gimple_bb(stmt)->next_bb;
89094 + }
89095 + bb = next;
89096 + } while (bb);
89097 + return 0;
89098 +}
89099 +
89100 +static struct gimple_opt_pass size_overflow_pass = {
89101 + .pass = {
89102 + .type = GIMPLE_PASS,
89103 + .name = "size_overflow",
89104 + .gate = NULL,
89105 + .execute = handle_function,
89106 + .sub = NULL,
89107 + .next = NULL,
89108 + .static_pass_number = 0,
89109 + .tv_id = TV_NONE,
89110 + .properties_required = PROP_cfg | PROP_referenced_vars,
89111 + .properties_provided = 0,
89112 + .properties_destroyed = 0,
89113 + .todo_flags_start = 0,
89114 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
89115 + }
89116 +};
89117 +
89118 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
89119 +{
89120 + tree fntype;
89121 +
89122 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
89123 +
89124 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
89125 + fntype = build_function_type_list(void_type_node,
89126 + const_char_ptr_type_node,
89127 + unsigned_type_node,
89128 + const_char_ptr_type_node,
89129 + const_char_ptr_type_node,
89130 + NULL_TREE);
89131 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
89132 +
89133 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
89134 + TREE_PUBLIC(report_size_overflow_decl) = 1;
89135 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
89136 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
89137 + TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
89138 +}
89139 +
89140 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
89141 +{
89142 + int i;
89143 + const char * const plugin_name = plugin_info->base_name;
89144 + const int argc = plugin_info->argc;
89145 + const struct plugin_argument * const argv = plugin_info->argv;
89146 + bool enable = true;
89147 +
89148 + struct register_pass_info size_overflow_pass_info = {
89149 + .pass = &size_overflow_pass.pass,
89150 + .reference_pass_name = "ssa",
89151 + .ref_pass_instance_number = 1,
89152 + .pos_op = PASS_POS_INSERT_AFTER
89153 + };
89154 +
89155 + if (!plugin_default_version_check(version, &gcc_version)) {
89156 + error(G_("incompatible gcc/plugin versions"));
89157 + return 1;
89158 + }
89159 +
89160 + for (i = 0; i < argc; ++i) {
89161 + if (!strcmp(argv[i].key, "no-size-overflow")) {
89162 + enable = false;
89163 + continue;
89164 + }
89165 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
89166 + }
89167 +
89168 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
89169 + if (enable) {
89170 + register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
89171 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
89172 + }
89173 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
89174 +
89175 + return 0;
89176 +}
89177 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
89178 new file mode 100644
89179 index 0000000..38d2014
89180 --- /dev/null
89181 +++ b/tools/gcc/stackleak_plugin.c
89182 @@ -0,0 +1,313 @@
89183 +/*
89184 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
89185 + * Licensed under the GPL v2
89186 + *
89187 + * Note: the choice of the license means that the compilation process is
89188 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
89189 + * but for the kernel it doesn't matter since it doesn't link against
89190 + * any of the gcc libraries
89191 + *
89192 + * gcc plugin to help implement various PaX features
89193 + *
89194 + * - track lowest stack pointer
89195 + *
89196 + * TODO:
89197 + * - initialize all local variables
89198 + *
89199 + * BUGS:
89200 + * - none known
89201 + */
89202 +#include "gcc-plugin.h"
89203 +#include "config.h"
89204 +#include "system.h"
89205 +#include "coretypes.h"
89206 +#include "tree.h"
89207 +#include "tree-pass.h"
89208 +#include "flags.h"
89209 +#include "intl.h"
89210 +#include "toplev.h"
89211 +#include "plugin.h"
89212 +//#include "expr.h" where are you...
89213 +#include "diagnostic.h"
89214 +#include "plugin-version.h"
89215 +#include "tm.h"
89216 +#include "function.h"
89217 +#include "basic-block.h"
89218 +#include "gimple.h"
89219 +#include "rtl.h"
89220 +#include "emit-rtl.h"
89221 +
89222 +extern void print_gimple_stmt(FILE *, gimple, int, int);
89223 +
89224 +int plugin_is_GPL_compatible;
89225 +
89226 +static int track_frame_size = -1;
89227 +static const char track_function[] = "pax_track_stack";
89228 +static const char check_function[] = "pax_check_alloca";
89229 +static bool init_locals;
89230 +
89231 +static struct plugin_info stackleak_plugin_info = {
89232 + .version = "201203140940",
89233 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
89234 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
89235 +};
89236 +
89237 +static bool gate_stackleak_track_stack(void);
89238 +static unsigned int execute_stackleak_tree_instrument(void);
89239 +static unsigned int execute_stackleak_final(void);
89240 +
89241 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
89242 + .pass = {
89243 + .type = GIMPLE_PASS,
89244 + .name = "stackleak_tree_instrument",
89245 + .gate = gate_stackleak_track_stack,
89246 + .execute = execute_stackleak_tree_instrument,
89247 + .sub = NULL,
89248 + .next = NULL,
89249 + .static_pass_number = 0,
89250 + .tv_id = TV_NONE,
89251 + .properties_required = PROP_gimple_leh | PROP_cfg,
89252 + .properties_provided = 0,
89253 + .properties_destroyed = 0,
89254 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
89255 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
89256 + }
89257 +};
89258 +
89259 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
89260 + .pass = {
89261 + .type = RTL_PASS,
89262 + .name = "stackleak_final",
89263 + .gate = gate_stackleak_track_stack,
89264 + .execute = execute_stackleak_final,
89265 + .sub = NULL,
89266 + .next = NULL,
89267 + .static_pass_number = 0,
89268 + .tv_id = TV_NONE,
89269 + .properties_required = 0,
89270 + .properties_provided = 0,
89271 + .properties_destroyed = 0,
89272 + .todo_flags_start = 0,
89273 + .todo_flags_finish = TODO_dump_func
89274 + }
89275 +};
89276 +
89277 +static bool gate_stackleak_track_stack(void)
89278 +{
89279 + return track_frame_size >= 0;
89280 +}
89281 +
89282 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
89283 +{
89284 + gimple check_alloca;
89285 + tree fntype, fndecl, alloca_size;
89286 +
89287 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
89288 + fndecl = build_fn_decl(check_function, fntype);
89289 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
89290 +
89291 + // insert call to void pax_check_alloca(unsigned long size)
89292 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
89293 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
89294 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
89295 +}
89296 +
89297 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
89298 +{
89299 + gimple track_stack;
89300 + tree fntype, fndecl;
89301 +
89302 + fntype = build_function_type_list(void_type_node, NULL_TREE);
89303 + fndecl = build_fn_decl(track_function, fntype);
89304 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
89305 +
89306 + // insert call to void pax_track_stack(void)
89307 + track_stack = gimple_build_call(fndecl, 0);
89308 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
89309 +}
89310 +
89311 +#if BUILDING_GCC_VERSION == 4005
89312 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
89313 +{
89314 + tree fndecl;
89315 +
89316 + if (!is_gimple_call(stmt))
89317 + return false;
89318 + fndecl = gimple_call_fndecl(stmt);
89319 + if (!fndecl)
89320 + return false;
89321 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
89322 + return false;
89323 +// print_node(stderr, "pax", fndecl, 4);
89324 + return DECL_FUNCTION_CODE(fndecl) == code;
89325 +}
89326 +#endif
89327 +
89328 +static bool is_alloca(gimple stmt)
89329 +{
89330 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
89331 + return true;
89332 +
89333 +#if BUILDING_GCC_VERSION >= 4007
89334 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
89335 + return true;
89336 +#endif
89337 +
89338 + return false;
89339 +}
89340 +
89341 +static unsigned int execute_stackleak_tree_instrument(void)
89342 +{
89343 + basic_block bb, entry_bb;
89344 + bool prologue_instrumented = false, is_leaf = true;
89345 +
89346 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
89347 +
89348 + // 1. loop through BBs and GIMPLE statements
89349 + FOR_EACH_BB(bb) {
89350 + gimple_stmt_iterator gsi;
89351 +
89352 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
89353 + gimple stmt;
89354 +
89355 + stmt = gsi_stmt(gsi);
89356 +
89357 + if (is_gimple_call(stmt))
89358 + is_leaf = false;
89359 +
89360 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
89361 + if (!is_alloca(stmt))
89362 + continue;
89363 +
89364 + // 2. insert stack overflow check before each __builtin_alloca call
89365 + stackleak_check_alloca(&gsi);
89366 +
89367 + // 3. insert track call after each __builtin_alloca call
89368 + stackleak_add_instrumentation(&gsi);
89369 + if (bb == entry_bb)
89370 + prologue_instrumented = true;
89371 + }
89372 + }
89373 +
89374 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
89375 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
89376 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
89377 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
89378 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
89379 + return 0;
89380 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
89381 + return 0;
89382 +
89383 + // 4. insert track call at the beginning
89384 + if (!prologue_instrumented) {
89385 + gimple_stmt_iterator gsi;
89386 +
89387 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
89388 + if (dom_info_available_p(CDI_DOMINATORS))
89389 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
89390 + gsi = gsi_start_bb(bb);
89391 + stackleak_add_instrumentation(&gsi);
89392 + }
89393 +
89394 + return 0;
89395 +}
89396 +
89397 +static unsigned int execute_stackleak_final(void)
89398 +{
89399 + rtx insn;
89400 +
89401 + if (cfun->calls_alloca)
89402 + return 0;
89403 +
89404 + // keep calls only if function frame is big enough
89405 + if (get_frame_size() >= track_frame_size)
89406 + return 0;
89407 +
89408 + // 1. find pax_track_stack calls
89409 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
89410 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
89411 + rtx body;
89412 +
89413 + if (!CALL_P(insn))
89414 + continue;
89415 + body = PATTERN(insn);
89416 + if (GET_CODE(body) != CALL)
89417 + continue;
89418 + body = XEXP(body, 0);
89419 + if (GET_CODE(body) != MEM)
89420 + continue;
89421 + body = XEXP(body, 0);
89422 + if (GET_CODE(body) != SYMBOL_REF)
89423 + continue;
89424 + if (strcmp(XSTR(body, 0), track_function))
89425 + continue;
89426 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
89427 + // 2. delete call
89428 + insn = delete_insn_and_edges(insn);
89429 +#if BUILDING_GCC_VERSION >= 4007
89430 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
89431 + insn = delete_insn_and_edges(insn);
89432 +#endif
89433 + }
89434 +
89435 +// print_simple_rtl(stderr, get_insns());
89436 +// print_rtl(stderr, get_insns());
89437 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
89438 +
89439 + return 0;
89440 +}
89441 +
89442 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
89443 +{
89444 + const char * const plugin_name = plugin_info->base_name;
89445 + const int argc = plugin_info->argc;
89446 + const struct plugin_argument * const argv = plugin_info->argv;
89447 + int i;
89448 + struct register_pass_info stackleak_tree_instrument_pass_info = {
89449 + .pass = &stackleak_tree_instrument_pass.pass,
89450 +// .reference_pass_name = "tree_profile",
89451 + .reference_pass_name = "optimized",
89452 + .ref_pass_instance_number = 1,
89453 + .pos_op = PASS_POS_INSERT_BEFORE
89454 + };
89455 + struct register_pass_info stackleak_final_pass_info = {
89456 + .pass = &stackleak_final_rtl_opt_pass.pass,
89457 + .reference_pass_name = "final",
89458 + .ref_pass_instance_number = 1,
89459 + .pos_op = PASS_POS_INSERT_BEFORE
89460 + };
89461 +
89462 + if (!plugin_default_version_check(version, &gcc_version)) {
89463 + error(G_("incompatible gcc/plugin versions"));
89464 + return 1;
89465 + }
89466 +
89467 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
89468 +
89469 + for (i = 0; i < argc; ++i) {
89470 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
89471 + if (!argv[i].value) {
89472 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
89473 + continue;
89474 + }
89475 + track_frame_size = atoi(argv[i].value);
89476 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
89477 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
89478 + continue;
89479 + }
89480 + if (!strcmp(argv[i].key, "initialize-locals")) {
89481 + if (argv[i].value) {
89482 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
89483 + continue;
89484 + }
89485 + init_locals = true;
89486 + continue;
89487 + }
89488 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
89489 + }
89490 +
89491 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
89492 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
89493 +
89494 + return 0;
89495 +}
89496 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
89497 index 6789d78..4afd019e 100644
89498 --- a/tools/perf/util/include/asm/alternative-asm.h
89499 +++ b/tools/perf/util/include/asm/alternative-asm.h
89500 @@ -5,4 +5,7 @@
89501
89502 #define altinstruction_entry #
89503
89504 + .macro pax_force_retaddr rip=0, reload=0
89505 + .endm
89506 +
89507 #endif
89508 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
89509 index af0f22f..9a7d479 100644
89510 --- a/usr/gen_init_cpio.c
89511 +++ b/usr/gen_init_cpio.c
89512 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
89513 int retval;
89514 int rc = -1;
89515 int namesize;
89516 - int i;
89517 + unsigned int i;
89518
89519 mode |= S_IFREG;
89520
89521 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
89522 *env_var = *expanded = '\0';
89523 strncat(env_var, start + 2, end - start - 2);
89524 strncat(expanded, new_location, start - new_location);
89525 - strncat(expanded, getenv(env_var), PATH_MAX);
89526 - strncat(expanded, end + 1, PATH_MAX);
89527 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
89528 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
89529 strncpy(new_location, expanded, PATH_MAX);
89530 + new_location[PATH_MAX] = 0;
89531 } else
89532 break;
89533 }
89534 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
89535 index 44ee712..a01c4b8 100644
89536 --- a/virt/kvm/kvm_main.c
89537 +++ b/virt/kvm/kvm_main.c
89538 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
89539
89540 static cpumask_var_t cpus_hardware_enabled;
89541 static int kvm_usage_count = 0;
89542 -static atomic_t hardware_enable_failed;
89543 +static atomic_unchecked_t hardware_enable_failed;
89544
89545 struct kmem_cache *kvm_vcpu_cache;
89546 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
89547 @@ -703,7 +703,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
89548 /* We can read the guest memory with __xxx_user() later on. */
89549 if (user_alloc &&
89550 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
89551 - !access_ok(VERIFY_WRITE,
89552 + !__access_ok(VERIFY_WRITE,
89553 (void __user *)(unsigned long)mem->userspace_addr,
89554 mem->memory_size)))
89555 goto out;
89556 @@ -2291,7 +2291,7 @@ static void hardware_enable_nolock(void *junk)
89557
89558 if (r) {
89559 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
89560 - atomic_inc(&hardware_enable_failed);
89561 + atomic_inc_unchecked(&hardware_enable_failed);
89562 printk(KERN_INFO "kvm: enabling virtualization on "
89563 "CPU%d failed\n", cpu);
89564 }
89565 @@ -2345,10 +2345,10 @@ static int hardware_enable_all(void)
89566
89567 kvm_usage_count++;
89568 if (kvm_usage_count == 1) {
89569 - atomic_set(&hardware_enable_failed, 0);
89570 + atomic_set_unchecked(&hardware_enable_failed, 0);
89571 on_each_cpu(hardware_enable_nolock, NULL, 1);
89572
89573 - if (atomic_read(&hardware_enable_failed)) {
89574 + if (atomic_read_unchecked(&hardware_enable_failed)) {
89575 hardware_disable_all_nolock();
89576 r = -EBUSY;
89577 }
89578 @@ -2709,7 +2709,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
89579 kvm_arch_vcpu_put(vcpu);
89580 }
89581
89582 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
89583 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
89584 struct module *module)
89585 {
89586 int r;
89587 @@ -2772,7 +2772,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
89588 if (!vcpu_align)
89589 vcpu_align = __alignof__(struct kvm_vcpu);
89590 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
89591 - 0, NULL);
89592 + SLAB_USERCOPY, NULL);
89593 if (!kvm_vcpu_cache) {
89594 r = -ENOMEM;
89595 goto out_free_3;
89596 @@ -2782,9 +2782,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
89597 if (r)
89598 goto out_free;
89599
89600 - kvm_chardev_ops.owner = module;
89601 - kvm_vm_fops.owner = module;
89602 - kvm_vcpu_fops.owner = module;
89603 + pax_open_kernel();
89604 + *(void **)&kvm_chardev_ops.owner = module;
89605 + *(void **)&kvm_vm_fops.owner = module;
89606 + *(void **)&kvm_vcpu_fops.owner = module;
89607 + pax_close_kernel();
89608
89609 r = misc_register(&kvm_dev);
89610 if (r) {