]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.5.5-201210022020.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.5.5-201210022020.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b4a898f..cd023f2 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 @@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56 +ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60 @@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64 +builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70 +clut_vga16.c
71 +common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78 +config.c
79 config.mak
80 config.mak.autogen
81 +config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85 @@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89 +dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93 +exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97 @@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101 +gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108 +hash
109 +hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113 @@ -145,7 +163,7 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117 -kconfig
118 +kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 @@ -153,7 +171,7 @@ kxgettext
123 lkc_defs.h
124 lex.c
125 lex.*.c
126 -linux
127 +lib1funcs.S
128 logo_*.c
129 logo_*_clut224.c
130 logo_*_mono.c
131 @@ -164,14 +182,15 @@ machtypes.h
132 map
133 map_hugetlb
134 maui_boot.h
135 -media
136 mconf
137 +mdp
138 miboot*
139 mk_elfconfig
140 mkboot
141 mkbugboot
142 mkcpustr
143 mkdep
144 +mkpiggy
145 mkprep
146 mkregtable
147 mktables
148 @@ -188,6 +207,8 @@ oui.c*
149 page-types
150 parse.c
151 parse.h
152 +parse-events*
153 +pasyms.h
154 patches*
155 pca200e.bin
156 pca200e_ecd.bin2
157 @@ -197,6 +218,7 @@ perf-archive
158 piggyback
159 piggy.gzip
160 piggy.S
161 +pmu-*
162 pnmtologo
163 ppc_defs.h*
164 pss_boot.h
165 @@ -206,7 +228,10 @@ r200_reg_safe.h
166 r300_reg_safe.h
167 r420_reg_safe.h
168 r600_reg_safe.h
169 +realmode.lds
170 +realmode.relocs
171 recordmcount
172 +regdb.c
173 relocs
174 rlim_names.h
175 rn50_reg_safe.h
176 @@ -216,8 +241,11 @@ series
177 setup
178 setup.bin
179 setup.elf
180 +size_overflow_hash.h
181 sImage
182 +slabinfo
183 sm_tbl*
184 +sortextable
185 split-include
186 syscalltab.h
187 tables.c
188 @@ -227,6 +255,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192 +user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196 @@ -238,13 +267,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200 +vdsox32.lds
201 +vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208 +vmlinux.bin.bz2
209 vmlinux.lds
210 +vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214 @@ -252,9 +285,11 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218 +utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222 zImage*
223 zconf.hash.c
224 +zconf.lex.c
225 zoffset.h
226 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
227 index a92c5eb..7530459 100644
228 --- a/Documentation/kernel-parameters.txt
229 +++ b/Documentation/kernel-parameters.txt
230 @@ -2051,6 +2051,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
231 the specified number of seconds. This is to be used if
232 your oopses keep scrolling off the screen.
233
234 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
235 + virtualization environments that don't cope well with the
236 + expand down segment used by UDEREF on X86-32 or the frequent
237 + page table updates on X86-64.
238 +
239 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
240 +
241 pcbit= [HW,ISDN]
242
243 pcd. [PARIDE]
244 diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
245 index 13d6166..8c235b6 100644
246 --- a/Documentation/sysctl/fs.txt
247 +++ b/Documentation/sysctl/fs.txt
248 @@ -163,16 +163,22 @@ This value can be used to query and set the core dump mode for setuid
249 or otherwise protected/tainted binaries. The modes are
250
251 0 - (default) - traditional behaviour. Any process which has changed
252 - privilege levels or is execute only will not be dumped
253 + privilege levels or is execute only will not be dumped.
254 1 - (debug) - all processes dump core when possible. The core dump is
255 owned by the current user and no security is applied. This is
256 intended for system debugging situations only. Ptrace is unchecked.
257 + This is insecure as it allows regular users to examine the memory
258 + contents of privileged processes.
259 2 - (suidsafe) - any binary which normally would not be dumped is dumped
260 - readable by root only. This allows the end user to remove
261 - such a dump but not access it directly. For security reasons
262 - core dumps in this mode will not overwrite one another or
263 - other files. This mode is appropriate when administrators are
264 - attempting to debug problems in a normal environment.
265 + anyway, but only if the "core_pattern" kernel sysctl is set to
266 + either a pipe handler or a fully qualified path. (For more details
267 + on this limitation, see CVE-2006-2451.) This mode is appropriate
268 + when administrators are attempting to debug problems in a normal
269 + environment, and either have a core dump pipe handler that knows
270 + to treat privileged core dumps with care, or specific directory
271 + defined for catching core dumps. If a core dump happens without
272 + a pipe handler or fully qualifid path, a message will be emitted
273 + to syslog warning about the lack of a correct setting.
274
275 ==============================================================
276
277 diff --git a/Makefile b/Makefile
278 index b3dfc85..61372d3 100644
279 --- a/Makefile
280 +++ b/Makefile
281 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
282
283 HOSTCC = gcc
284 HOSTCXX = g++
285 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
286 -HOSTCXXFLAGS = -O2
287 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
288 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
289 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
290
291 # Decide whether to build built-in, modular, or both.
292 # Normally, just do built-in.
293 @@ -404,8 +405,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
294 # Rules shared between *config targets and build targets
295
296 # Basic helpers built in scripts/
297 -PHONY += scripts_basic
298 -scripts_basic:
299 +PHONY += scripts_basic gcc-plugins
300 +scripts_basic: gcc-plugins
301 $(Q)$(MAKE) $(build)=scripts/basic
302 $(Q)rm -f .tmp_quiet_recordmcount
303
304 @@ -561,6 +562,60 @@ else
305 KBUILD_CFLAGS += -O2
306 endif
307
308 +ifndef DISABLE_PAX_PLUGINS
309 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
310 +ifneq ($(PLUGINCC),)
311 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
312 +ifndef CONFIG_UML
313 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
314 +endif
315 +endif
316 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
317 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
318 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
319 +endif
320 +ifdef CONFIG_KALLOCSTAT_PLUGIN
321 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
322 +endif
323 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
324 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
325 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
326 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
327 +endif
328 +ifdef CONFIG_CHECKER_PLUGIN
329 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
330 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
331 +endif
332 +endif
333 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
334 +ifdef CONFIG_PAX_SIZE_OVERFLOW
335 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
336 +endif
337 +ifdef CONFIG_PAX_LATENT_ENTROPY
338 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
339 +endif
340 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
341 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
342 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
343 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
344 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
345 +ifeq ($(KBUILD_EXTMOD),)
346 +gcc-plugins:
347 + $(Q)$(MAKE) $(build)=tools/gcc
348 +else
349 +gcc-plugins: ;
350 +endif
351 +else
352 +gcc-plugins:
353 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
354 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
355 +else
356 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
357 +endif
358 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
359 +endif
360 +endif
361 +
362 include $(srctree)/arch/$(SRCARCH)/Makefile
363
364 ifdef CONFIG_READABLE_ASM
365 @@ -715,7 +770,7 @@ export mod_strip_cmd
366
367
368 ifeq ($(KBUILD_EXTMOD),)
369 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
370 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
371
372 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
373 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
374 @@ -762,6 +817,8 @@ endif
375
376 # The actual objects are generated when descending,
377 # make sure no implicit rule kicks in
378 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
379 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
380 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
381
382 # Handle descending into subdirectories listed in $(vmlinux-dirs)
383 @@ -771,7 +828,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
384 # Error messages still appears in the original language
385
386 PHONY += $(vmlinux-dirs)
387 -$(vmlinux-dirs): prepare scripts
388 +$(vmlinux-dirs): gcc-plugins prepare scripts
389 $(Q)$(MAKE) $(build)=$@
390
391 # Store (new) KERNELRELASE string in include/config/kernel.release
392 @@ -815,6 +872,7 @@ prepare0: archprepare FORCE
393 $(Q)$(MAKE) $(build)=.
394
395 # All the preparing..
396 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
397 prepare: prepare0
398
399 # Generate some files
400 @@ -922,6 +980,8 @@ all: modules
401 # using awk while concatenating to the final file.
402
403 PHONY += modules
404 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
405 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
406 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
407 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
408 @$(kecho) ' Building modules, stage 2.';
409 @@ -937,7 +997,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
410
411 # Target to prepare building external modules
412 PHONY += modules_prepare
413 -modules_prepare: prepare scripts
414 +modules_prepare: gcc-plugins prepare scripts
415
416 # Target to install modules
417 PHONY += modules_install
418 @@ -994,7 +1054,7 @@ CLEAN_DIRS += $(MODVERDIR)
419 MRPROPER_DIRS += include/config usr/include include/generated \
420 arch/*/include/generated
421 MRPROPER_FILES += .config .config.old .version .old_version \
422 - include/linux/version.h \
423 + include/linux/version.h tools/gcc/size_overflow_hash.h\
424 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
425
426 # clean - Delete most, but leave enough to build external modules
427 @@ -1032,6 +1092,7 @@ distclean: mrproper
428 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
429 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
430 -o -name '.*.rej' \
431 + -o -name '.*.rej' -o -name '*.so' \
432 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
433 -type f -print | xargs rm -f
434
435 @@ -1192,6 +1253,8 @@ PHONY += $(module-dirs) modules
436 $(module-dirs): crmodverdir $(objtree)/Module.symvers
437 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
438
439 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
440 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
441 modules: $(module-dirs)
442 @$(kecho) ' Building modules, stage 2.';
443 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
444 @@ -1326,17 +1389,21 @@ else
445 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
446 endif
447
448 -%.s: %.c prepare scripts FORCE
449 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
450 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
451 +%.s: %.c gcc-plugins prepare scripts FORCE
452 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
453 %.i: %.c prepare scripts FORCE
454 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
455 -%.o: %.c prepare scripts FORCE
456 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
457 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
458 +%.o: %.c gcc-plugins prepare scripts FORCE
459 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
460 %.lst: %.c prepare scripts FORCE
461 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
462 -%.s: %.S prepare scripts FORCE
463 +%.s: %.S gcc-plugins prepare scripts FORCE
464 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
465 -%.o: %.S prepare scripts FORCE
466 +%.o: %.S gcc-plugins prepare scripts FORCE
467 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
468 %.symtypes: %.c prepare scripts FORCE
469 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
470 @@ -1346,11 +1413,15 @@ endif
471 $(cmd_crmodverdir)
472 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
473 $(build)=$(build-dir)
474 -%/: prepare scripts FORCE
475 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
476 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
477 +%/: gcc-plugins prepare scripts FORCE
478 $(cmd_crmodverdir)
479 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
480 $(build)=$(build-dir)
481 -%.ko: prepare scripts FORCE
482 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
483 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
484 +%.ko: gcc-plugins prepare scripts FORCE
485 $(cmd_crmodverdir)
486 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
487 $(build)=$(build-dir) $(@:.ko=.o)
488 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
489 index c2cbe4f..f7264b4 100644
490 --- a/arch/alpha/include/asm/atomic.h
491 +++ b/arch/alpha/include/asm/atomic.h
492 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
493 #define atomic_dec(v) atomic_sub(1,(v))
494 #define atomic64_dec(v) atomic64_sub(1,(v))
495
496 +#define atomic64_read_unchecked(v) atomic64_read(v)
497 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
498 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
499 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
500 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
501 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
502 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
503 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
504 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
505 +
506 #define smp_mb__before_atomic_dec() smp_mb()
507 #define smp_mb__after_atomic_dec() smp_mb()
508 #define smp_mb__before_atomic_inc() smp_mb()
509 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
510 index ad368a9..fbe0f25 100644
511 --- a/arch/alpha/include/asm/cache.h
512 +++ b/arch/alpha/include/asm/cache.h
513 @@ -4,19 +4,19 @@
514 #ifndef __ARCH_ALPHA_CACHE_H
515 #define __ARCH_ALPHA_CACHE_H
516
517 +#include <linux/const.h>
518
519 /* Bytes per L1 (data) cache line. */
520 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
521 -# define L1_CACHE_BYTES 64
522 # define L1_CACHE_SHIFT 6
523 #else
524 /* Both EV4 and EV5 are write-through, read-allocate,
525 direct-mapped, physical.
526 */
527 -# define L1_CACHE_BYTES 32
528 # define L1_CACHE_SHIFT 5
529 #endif
530
531 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
532 #define SMP_CACHE_BYTES L1_CACHE_BYTES
533
534 #endif
535 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
536 index 968d999..d36b2df 100644
537 --- a/arch/alpha/include/asm/elf.h
538 +++ b/arch/alpha/include/asm/elf.h
539 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
540
541 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
542
543 +#ifdef CONFIG_PAX_ASLR
544 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
545 +
546 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
547 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
548 +#endif
549 +
550 /* $0 is set by ld.so to a pointer to a function which might be
551 registered using atexit. This provides a mean for the dynamic
552 linker to call DT_FINI functions for shared libraries that have
553 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
554 index bc2a0da..8ad11ee 100644
555 --- a/arch/alpha/include/asm/pgalloc.h
556 +++ b/arch/alpha/include/asm/pgalloc.h
557 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
558 pgd_set(pgd, pmd);
559 }
560
561 +static inline void
562 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
563 +{
564 + pgd_populate(mm, pgd, pmd);
565 +}
566 +
567 extern pgd_t *pgd_alloc(struct mm_struct *mm);
568
569 static inline void
570 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
571 index 81a4342..348b927 100644
572 --- a/arch/alpha/include/asm/pgtable.h
573 +++ b/arch/alpha/include/asm/pgtable.h
574 @@ -102,6 +102,17 @@ struct vm_area_struct;
575 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
576 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
577 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
578 +
579 +#ifdef CONFIG_PAX_PAGEEXEC
580 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
581 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
582 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
583 +#else
584 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
585 +# define PAGE_COPY_NOEXEC PAGE_COPY
586 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
587 +#endif
588 +
589 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
590
591 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
592 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
593 index 2fd00b7..cfd5069 100644
594 --- a/arch/alpha/kernel/module.c
595 +++ b/arch/alpha/kernel/module.c
596 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
597
598 /* The small sections were sorted to the end of the segment.
599 The following should definitely cover them. */
600 - gp = (u64)me->module_core + me->core_size - 0x8000;
601 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
602 got = sechdrs[me->arch.gotsecindex].sh_addr;
603
604 for (i = 0; i < n; i++) {
605 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
606 index 98a1036..fb54ccf 100644
607 --- a/arch/alpha/kernel/osf_sys.c
608 +++ b/arch/alpha/kernel/osf_sys.c
609 @@ -1312,7 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
610 /* At this point: (!vma || addr < vma->vm_end). */
611 if (limit - len < addr)
612 return -ENOMEM;
613 - if (!vma || addr + len <= vma->vm_start)
614 + if (check_heap_stack_gap(vma, addr, len))
615 return addr;
616 addr = vma->vm_end;
617 vma = vma->vm_next;
618 @@ -1348,6 +1348,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
619 merely specific addresses, but regions of memory -- perhaps
620 this feature should be incorporated into all ports? */
621
622 +#ifdef CONFIG_PAX_RANDMMAP
623 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
624 +#endif
625 +
626 if (addr) {
627 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
628 if (addr != (unsigned long) -ENOMEM)
629 @@ -1355,8 +1359,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
630 }
631
632 /* Next, try allocating at TASK_UNMAPPED_BASE. */
633 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
634 - len, limit);
635 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
636 +
637 if (addr != (unsigned long) -ENOMEM)
638 return addr;
639
640 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
641 index 5eecab1..609abc0 100644
642 --- a/arch/alpha/mm/fault.c
643 +++ b/arch/alpha/mm/fault.c
644 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
645 __reload_thread(pcb);
646 }
647
648 +#ifdef CONFIG_PAX_PAGEEXEC
649 +/*
650 + * PaX: decide what to do with offenders (regs->pc = fault address)
651 + *
652 + * returns 1 when task should be killed
653 + * 2 when patched PLT trampoline was detected
654 + * 3 when unpatched PLT trampoline was detected
655 + */
656 +static int pax_handle_fetch_fault(struct pt_regs *regs)
657 +{
658 +
659 +#ifdef CONFIG_PAX_EMUPLT
660 + int err;
661 +
662 + do { /* PaX: patched PLT emulation #1 */
663 + unsigned int ldah, ldq, jmp;
664 +
665 + err = get_user(ldah, (unsigned int *)regs->pc);
666 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
667 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
668 +
669 + if (err)
670 + break;
671 +
672 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
673 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
674 + jmp == 0x6BFB0000U)
675 + {
676 + unsigned long r27, addr;
677 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
678 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
679 +
680 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
681 + err = get_user(r27, (unsigned long *)addr);
682 + if (err)
683 + break;
684 +
685 + regs->r27 = r27;
686 + regs->pc = r27;
687 + return 2;
688 + }
689 + } while (0);
690 +
691 + do { /* PaX: patched PLT emulation #2 */
692 + unsigned int ldah, lda, br;
693 +
694 + err = get_user(ldah, (unsigned int *)regs->pc);
695 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
696 + err |= get_user(br, (unsigned int *)(regs->pc+8));
697 +
698 + if (err)
699 + break;
700 +
701 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
702 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
703 + (br & 0xFFE00000U) == 0xC3E00000U)
704 + {
705 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
706 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
707 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
708 +
709 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
710 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
711 + return 2;
712 + }
713 + } while (0);
714 +
715 + do { /* PaX: unpatched PLT emulation */
716 + unsigned int br;
717 +
718 + err = get_user(br, (unsigned int *)regs->pc);
719 +
720 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
721 + unsigned int br2, ldq, nop, jmp;
722 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
723 +
724 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
725 + err = get_user(br2, (unsigned int *)addr);
726 + err |= get_user(ldq, (unsigned int *)(addr+4));
727 + err |= get_user(nop, (unsigned int *)(addr+8));
728 + err |= get_user(jmp, (unsigned int *)(addr+12));
729 + err |= get_user(resolver, (unsigned long *)(addr+16));
730 +
731 + if (err)
732 + break;
733 +
734 + if (br2 == 0xC3600000U &&
735 + ldq == 0xA77B000CU &&
736 + nop == 0x47FF041FU &&
737 + jmp == 0x6B7B0000U)
738 + {
739 + regs->r28 = regs->pc+4;
740 + regs->r27 = addr+16;
741 + regs->pc = resolver;
742 + return 3;
743 + }
744 + }
745 + } while (0);
746 +#endif
747 +
748 + return 1;
749 +}
750 +
751 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
752 +{
753 + unsigned long i;
754 +
755 + printk(KERN_ERR "PAX: bytes at PC: ");
756 + for (i = 0; i < 5; i++) {
757 + unsigned int c;
758 + if (get_user(c, (unsigned int *)pc+i))
759 + printk(KERN_CONT "???????? ");
760 + else
761 + printk(KERN_CONT "%08x ", c);
762 + }
763 + printk("\n");
764 +}
765 +#endif
766
767 /*
768 * This routine handles page faults. It determines the address,
769 @@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
770 good_area:
771 si_code = SEGV_ACCERR;
772 if (cause < 0) {
773 - if (!(vma->vm_flags & VM_EXEC))
774 + if (!(vma->vm_flags & VM_EXEC)) {
775 +
776 +#ifdef CONFIG_PAX_PAGEEXEC
777 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
778 + goto bad_area;
779 +
780 + up_read(&mm->mmap_sem);
781 + switch (pax_handle_fetch_fault(regs)) {
782 +
783 +#ifdef CONFIG_PAX_EMUPLT
784 + case 2:
785 + case 3:
786 + return;
787 +#endif
788 +
789 + }
790 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
791 + do_group_exit(SIGKILL);
792 +#else
793 goto bad_area;
794 +#endif
795 +
796 + }
797 } else if (!cause) {
798 /* Allow reads even for write-only mappings */
799 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
800 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
801 index c79f61f..9ac0642 100644
802 --- a/arch/arm/include/asm/atomic.h
803 +++ b/arch/arm/include/asm/atomic.h
804 @@ -17,17 +17,35 @@
805 #include <asm/barrier.h>
806 #include <asm/cmpxchg.h>
807
808 +#ifdef CONFIG_GENERIC_ATOMIC64
809 +#include <asm-generic/atomic64.h>
810 +#endif
811 +
812 #define ATOMIC_INIT(i) { (i) }
813
814 #ifdef __KERNEL__
815
816 +#define _ASM_EXTABLE(from, to) \
817 +" .pushsection __ex_table,\"a\"\n"\
818 +" .align 3\n" \
819 +" .long " #from ", " #to"\n" \
820 +" .popsection"
821 +
822 /*
823 * On ARM, ordinary assignment (str instruction) doesn't clear the local
824 * strex/ldrex monitor on some implementations. The reason we can use it for
825 * atomic_set() is the clrex or dummy strex done on every exception return.
826 */
827 #define atomic_read(v) (*(volatile int *)&(v)->counter)
828 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
829 +{
830 + return v->counter;
831 +}
832 #define atomic_set(v,i) (((v)->counter) = (i))
833 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
834 +{
835 + v->counter = i;
836 +}
837
838 #if __LINUX_ARM_ARCH__ >= 6
839
840 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
841 int result;
842
843 __asm__ __volatile__("@ atomic_add\n"
844 +"1: ldrex %1, [%3]\n"
845 +" adds %0, %1, %4\n"
846 +
847 +#ifdef CONFIG_PAX_REFCOUNT
848 +" bvc 3f\n"
849 +"2: bkpt 0xf103\n"
850 +"3:\n"
851 +#endif
852 +
853 +" strex %1, %0, [%3]\n"
854 +" teq %1, #0\n"
855 +" bne 1b"
856 +
857 +#ifdef CONFIG_PAX_REFCOUNT
858 +"\n4:\n"
859 + _ASM_EXTABLE(2b, 4b)
860 +#endif
861 +
862 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
863 + : "r" (&v->counter), "Ir" (i)
864 + : "cc");
865 +}
866 +
867 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
868 +{
869 + unsigned long tmp;
870 + int result;
871 +
872 + __asm__ __volatile__("@ atomic_add_unchecked\n"
873 "1: ldrex %0, [%3]\n"
874 " add %0, %0, %4\n"
875 " strex %1, %0, [%3]\n"
876 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
877 smp_mb();
878
879 __asm__ __volatile__("@ atomic_add_return\n"
880 +"1: ldrex %1, [%3]\n"
881 +" adds %0, %1, %4\n"
882 +
883 +#ifdef CONFIG_PAX_REFCOUNT
884 +" bvc 3f\n"
885 +" mov %0, %1\n"
886 +"2: bkpt 0xf103\n"
887 +"3:\n"
888 +#endif
889 +
890 +" strex %1, %0, [%3]\n"
891 +" teq %1, #0\n"
892 +" bne 1b"
893 +
894 +#ifdef CONFIG_PAX_REFCOUNT
895 +"\n4:\n"
896 + _ASM_EXTABLE(2b, 4b)
897 +#endif
898 +
899 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
900 + : "r" (&v->counter), "Ir" (i)
901 + : "cc");
902 +
903 + smp_mb();
904 +
905 + return result;
906 +}
907 +
908 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
909 +{
910 + unsigned long tmp;
911 + int result;
912 +
913 + smp_mb();
914 +
915 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
916 "1: ldrex %0, [%3]\n"
917 " add %0, %0, %4\n"
918 " strex %1, %0, [%3]\n"
919 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
920 int result;
921
922 __asm__ __volatile__("@ atomic_sub\n"
923 +"1: ldrex %1, [%3]\n"
924 +" subs %0, %1, %4\n"
925 +
926 +#ifdef CONFIG_PAX_REFCOUNT
927 +" bvc 3f\n"
928 +"2: bkpt 0xf103\n"
929 +"3:\n"
930 +#endif
931 +
932 +" strex %1, %0, [%3]\n"
933 +" teq %1, #0\n"
934 +" bne 1b"
935 +
936 +#ifdef CONFIG_PAX_REFCOUNT
937 +"\n4:\n"
938 + _ASM_EXTABLE(2b, 4b)
939 +#endif
940 +
941 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
942 + : "r" (&v->counter), "Ir" (i)
943 + : "cc");
944 +}
945 +
946 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
947 +{
948 + unsigned long tmp;
949 + int result;
950 +
951 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
952 "1: ldrex %0, [%3]\n"
953 " sub %0, %0, %4\n"
954 " strex %1, %0, [%3]\n"
955 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
956 smp_mb();
957
958 __asm__ __volatile__("@ atomic_sub_return\n"
959 -"1: ldrex %0, [%3]\n"
960 -" sub %0, %0, %4\n"
961 +"1: ldrex %1, [%3]\n"
962 +" subs %0, %1, %4\n"
963 +
964 +#ifdef CONFIG_PAX_REFCOUNT
965 +" bvc 3f\n"
966 +" mov %0, %1\n"
967 +"2: bkpt 0xf103\n"
968 +"3:\n"
969 +#endif
970 +
971 " strex %1, %0, [%3]\n"
972 " teq %1, #0\n"
973 " bne 1b"
974 +
975 +#ifdef CONFIG_PAX_REFCOUNT
976 +"\n4:\n"
977 + _ASM_EXTABLE(2b, 4b)
978 +#endif
979 +
980 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
981 : "r" (&v->counter), "Ir" (i)
982 : "cc");
983 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
984 return oldval;
985 }
986
987 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
988 +{
989 + unsigned long oldval, res;
990 +
991 + smp_mb();
992 +
993 + do {
994 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
995 + "ldrex %1, [%3]\n"
996 + "mov %0, #0\n"
997 + "teq %1, %4\n"
998 + "strexeq %0, %5, [%3]\n"
999 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1000 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1001 + : "cc");
1002 + } while (res);
1003 +
1004 + smp_mb();
1005 +
1006 + return oldval;
1007 +}
1008 +
1009 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1010 {
1011 unsigned long tmp, tmp2;
1012 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1013
1014 return val;
1015 }
1016 +
1017 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1018 +{
1019 + return atomic_add_return(i, v);
1020 +}
1021 +
1022 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1023 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1024 +{
1025 + (void) atomic_add_return(i, v);
1026 +}
1027
1028 static inline int atomic_sub_return(int i, atomic_t *v)
1029 {
1030 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1031 return val;
1032 }
1033 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1034 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1035 +{
1036 + (void) atomic_sub_return(i, v);
1037 +}
1038
1039 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1040 {
1041 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1042 return ret;
1043 }
1044
1045 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1046 +{
1047 + return atomic_cmpxchg(v, old, new);
1048 +}
1049 +
1050 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1051 {
1052 unsigned long flags;
1053 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 #endif /* __LINUX_ARM_ARCH__ */
1055
1056 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1057 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1058 +{
1059 + return xchg(&v->counter, new);
1060 +}
1061
1062 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1063 {
1064 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1065 }
1066
1067 #define atomic_inc(v) atomic_add(1, v)
1068 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1069 +{
1070 + atomic_add_unchecked(1, v);
1071 +}
1072 #define atomic_dec(v) atomic_sub(1, v)
1073 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1074 +{
1075 + atomic_sub_unchecked(1, v);
1076 +}
1077
1078 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1079 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1080 +{
1081 + return atomic_add_return_unchecked(1, v) == 0;
1082 +}
1083 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1084 #define atomic_inc_return(v) (atomic_add_return(1, v))
1085 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1086 +{
1087 + return atomic_add_return_unchecked(1, v);
1088 +}
1089 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1090 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1091
1092 @@ -241,6 +428,14 @@ typedef struct {
1093 u64 __aligned(8) counter;
1094 } atomic64_t;
1095
1096 +#ifdef CONFIG_PAX_REFCOUNT
1097 +typedef struct {
1098 + u64 __aligned(8) counter;
1099 +} atomic64_unchecked_t;
1100 +#else
1101 +typedef atomic64_t atomic64_unchecked_t;
1102 +#endif
1103 +
1104 #define ATOMIC64_INIT(i) { (i) }
1105
1106 static inline u64 atomic64_read(const atomic64_t *v)
1107 @@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1108 return result;
1109 }
1110
1111 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1112 +{
1113 + u64 result;
1114 +
1115 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1116 +" ldrexd %0, %H0, [%1]"
1117 + : "=&r" (result)
1118 + : "r" (&v->counter), "Qo" (v->counter)
1119 + );
1120 +
1121 + return result;
1122 +}
1123 +
1124 static inline void atomic64_set(atomic64_t *v, u64 i)
1125 {
1126 u64 tmp;
1127 @@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1128 : "cc");
1129 }
1130
1131 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1132 +{
1133 + u64 tmp;
1134 +
1135 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1136 +"1: ldrexd %0, %H0, [%2]\n"
1137 +" strexd %0, %3, %H3, [%2]\n"
1138 +" teq %0, #0\n"
1139 +" bne 1b"
1140 + : "=&r" (tmp), "=Qo" (v->counter)
1141 + : "r" (&v->counter), "r" (i)
1142 + : "cc");
1143 +}
1144 +
1145 static inline void atomic64_add(u64 i, atomic64_t *v)
1146 {
1147 u64 result;
1148 @@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1149 __asm__ __volatile__("@ atomic64_add\n"
1150 "1: ldrexd %0, %H0, [%3]\n"
1151 " adds %0, %0, %4\n"
1152 +" adcs %H0, %H0, %H4\n"
1153 +
1154 +#ifdef CONFIG_PAX_REFCOUNT
1155 +" bvc 3f\n"
1156 +"2: bkpt 0xf103\n"
1157 +"3:\n"
1158 +#endif
1159 +
1160 +" strexd %1, %0, %H0, [%3]\n"
1161 +" teq %1, #0\n"
1162 +" bne 1b"
1163 +
1164 +#ifdef CONFIG_PAX_REFCOUNT
1165 +"\n4:\n"
1166 + _ASM_EXTABLE(2b, 4b)
1167 +#endif
1168 +
1169 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1170 + : "r" (&v->counter), "r" (i)
1171 + : "cc");
1172 +}
1173 +
1174 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1175 +{
1176 + u64 result;
1177 + unsigned long tmp;
1178 +
1179 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1180 +"1: ldrexd %0, %H0, [%3]\n"
1181 +" adds %0, %0, %4\n"
1182 " adc %H0, %H0, %H4\n"
1183 " strexd %1, %0, %H0, [%3]\n"
1184 " teq %1, #0\n"
1185 @@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1186
1187 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1188 {
1189 - u64 result;
1190 - unsigned long tmp;
1191 + u64 result, tmp;
1192
1193 smp_mb();
1194
1195 __asm__ __volatile__("@ atomic64_add_return\n"
1196 +"1: ldrexd %1, %H1, [%3]\n"
1197 +" adds %0, %1, %4\n"
1198 +" adcs %H0, %H1, %H4\n"
1199 +
1200 +#ifdef CONFIG_PAX_REFCOUNT
1201 +" bvc 3f\n"
1202 +" mov %0, %1\n"
1203 +" mov %H0, %H1\n"
1204 +"2: bkpt 0xf103\n"
1205 +"3:\n"
1206 +#endif
1207 +
1208 +" strexd %1, %0, %H0, [%3]\n"
1209 +" teq %1, #0\n"
1210 +" bne 1b"
1211 +
1212 +#ifdef CONFIG_PAX_REFCOUNT
1213 +"\n4:\n"
1214 + _ASM_EXTABLE(2b, 4b)
1215 +#endif
1216 +
1217 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1218 + : "r" (&v->counter), "r" (i)
1219 + : "cc");
1220 +
1221 + smp_mb();
1222 +
1223 + return result;
1224 +}
1225 +
1226 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1227 +{
1228 + u64 result;
1229 + unsigned long tmp;
1230 +
1231 + smp_mb();
1232 +
1233 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1234 "1: ldrexd %0, %H0, [%3]\n"
1235 " adds %0, %0, %4\n"
1236 " adc %H0, %H0, %H4\n"
1237 @@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1238 __asm__ __volatile__("@ atomic64_sub\n"
1239 "1: ldrexd %0, %H0, [%3]\n"
1240 " subs %0, %0, %4\n"
1241 +" sbcs %H0, %H0, %H4\n"
1242 +
1243 +#ifdef CONFIG_PAX_REFCOUNT
1244 +" bvc 3f\n"
1245 +"2: bkpt 0xf103\n"
1246 +"3:\n"
1247 +#endif
1248 +
1249 +" strexd %1, %0, %H0, [%3]\n"
1250 +" teq %1, #0\n"
1251 +" bne 1b"
1252 +
1253 +#ifdef CONFIG_PAX_REFCOUNT
1254 +"\n4:\n"
1255 + _ASM_EXTABLE(2b, 4b)
1256 +#endif
1257 +
1258 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1259 + : "r" (&v->counter), "r" (i)
1260 + : "cc");
1261 +}
1262 +
1263 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1264 +{
1265 + u64 result;
1266 + unsigned long tmp;
1267 +
1268 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1269 +"1: ldrexd %0, %H0, [%3]\n"
1270 +" subs %0, %0, %4\n"
1271 " sbc %H0, %H0, %H4\n"
1272 " strexd %1, %0, %H0, [%3]\n"
1273 " teq %1, #0\n"
1274 @@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1275
1276 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1277 {
1278 - u64 result;
1279 - unsigned long tmp;
1280 + u64 result, tmp;
1281
1282 smp_mb();
1283
1284 __asm__ __volatile__("@ atomic64_sub_return\n"
1285 -"1: ldrexd %0, %H0, [%3]\n"
1286 -" subs %0, %0, %4\n"
1287 -" sbc %H0, %H0, %H4\n"
1288 +"1: ldrexd %1, %H1, [%3]\n"
1289 +" subs %0, %1, %4\n"
1290 +" sbcs %H0, %H1, %H4\n"
1291 +
1292 +#ifdef CONFIG_PAX_REFCOUNT
1293 +" bvc 3f\n"
1294 +" mov %0, %1\n"
1295 +" mov %H0, %H1\n"
1296 +"2: bkpt 0xf103\n"
1297 +"3:\n"
1298 +#endif
1299 +
1300 " strexd %1, %0, %H0, [%3]\n"
1301 " teq %1, #0\n"
1302 " bne 1b"
1303 +
1304 +#ifdef CONFIG_PAX_REFCOUNT
1305 +"\n4:\n"
1306 + _ASM_EXTABLE(2b, 4b)
1307 +#endif
1308 +
1309 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1310 : "r" (&v->counter), "r" (i)
1311 : "cc");
1312 @@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1313 return oldval;
1314 }
1315
1316 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1317 +{
1318 + u64 oldval;
1319 + unsigned long res;
1320 +
1321 + smp_mb();
1322 +
1323 + do {
1324 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1325 + "ldrexd %1, %H1, [%3]\n"
1326 + "mov %0, #0\n"
1327 + "teq %1, %4\n"
1328 + "teqeq %H1, %H4\n"
1329 + "strexdeq %0, %5, %H5, [%3]"
1330 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1331 + : "r" (&ptr->counter), "r" (old), "r" (new)
1332 + : "cc");
1333 + } while (res);
1334 +
1335 + smp_mb();
1336 +
1337 + return oldval;
1338 +}
1339 +
1340 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1341 {
1342 u64 result;
1343 @@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1344
1345 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1346 {
1347 - u64 result;
1348 - unsigned long tmp;
1349 + u64 result, tmp;
1350
1351 smp_mb();
1352
1353 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1354 -"1: ldrexd %0, %H0, [%3]\n"
1355 -" subs %0, %0, #1\n"
1356 -" sbc %H0, %H0, #0\n"
1357 +"1: ldrexd %1, %H1, [%3]\n"
1358 +" subs %0, %1, #1\n"
1359 +" sbcs %H0, %H1, #0\n"
1360 +
1361 +#ifdef CONFIG_PAX_REFCOUNT
1362 +" bvc 3f\n"
1363 +" mov %0, %1\n"
1364 +" mov %H0, %H1\n"
1365 +"2: bkpt 0xf103\n"
1366 +"3:\n"
1367 +#endif
1368 +
1369 " teq %H0, #0\n"
1370 -" bmi 2f\n"
1371 +" bmi 4f\n"
1372 " strexd %1, %0, %H0, [%3]\n"
1373 " teq %1, #0\n"
1374 " bne 1b\n"
1375 -"2:"
1376 +"4:\n"
1377 +
1378 +#ifdef CONFIG_PAX_REFCOUNT
1379 + _ASM_EXTABLE(2b, 4b)
1380 +#endif
1381 +
1382 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1383 : "r" (&v->counter)
1384 : "cc");
1385 @@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1386 " teq %0, %5\n"
1387 " teqeq %H0, %H5\n"
1388 " moveq %1, #0\n"
1389 -" beq 2f\n"
1390 +" beq 4f\n"
1391 " adds %0, %0, %6\n"
1392 -" adc %H0, %H0, %H6\n"
1393 +" adcs %H0, %H0, %H6\n"
1394 +
1395 +#ifdef CONFIG_PAX_REFCOUNT
1396 +" bvc 3f\n"
1397 +"2: bkpt 0xf103\n"
1398 +"3:\n"
1399 +#endif
1400 +
1401 " strexd %2, %0, %H0, [%4]\n"
1402 " teq %2, #0\n"
1403 " bne 1b\n"
1404 -"2:"
1405 +"4:\n"
1406 +
1407 +#ifdef CONFIG_PAX_REFCOUNT
1408 + _ASM_EXTABLE(2b, 4b)
1409 +#endif
1410 +
1411 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1412 : "r" (&v->counter), "r" (u), "r" (a)
1413 : "cc");
1414 @@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1415
1416 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1417 #define atomic64_inc(v) atomic64_add(1LL, (v))
1418 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1419 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1420 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1421 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1422 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1423 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1424 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1425 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1426 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1427 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1428 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1429 index 75fe66b..2255c86 100644
1430 --- a/arch/arm/include/asm/cache.h
1431 +++ b/arch/arm/include/asm/cache.h
1432 @@ -4,8 +4,10 @@
1433 #ifndef __ASMARM_CACHE_H
1434 #define __ASMARM_CACHE_H
1435
1436 +#include <linux/const.h>
1437 +
1438 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1439 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1440 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1441
1442 /*
1443 * Memory returned by kmalloc() may be used for DMA, so we must make
1444 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1445 index e4448e1..7bc86b7 100644
1446 --- a/arch/arm/include/asm/cacheflush.h
1447 +++ b/arch/arm/include/asm/cacheflush.h
1448 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1449 void (*dma_unmap_area)(const void *, size_t, int);
1450
1451 void (*dma_flush_range)(const void *, const void *);
1452 -};
1453 +} __no_const;
1454
1455 /*
1456 * Select the calling method
1457 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1458 index 7eb18c1..e38b6d2 100644
1459 --- a/arch/arm/include/asm/cmpxchg.h
1460 +++ b/arch/arm/include/asm/cmpxchg.h
1461 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1462
1463 #define xchg(ptr,x) \
1464 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1465 +#define xchg_unchecked(ptr,x) \
1466 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1467
1468 #include <asm-generic/cmpxchg-local.h>
1469
1470 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1471 index 38050b1..9d90e8b 100644
1472 --- a/arch/arm/include/asm/elf.h
1473 +++ b/arch/arm/include/asm/elf.h
1474 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1475 the loader. We need to make sure that it is out of the way of the program
1476 that it will "exec", and that there is sufficient room for the brk. */
1477
1478 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1479 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1480 +
1481 +#ifdef CONFIG_PAX_ASLR
1482 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1483 +
1484 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1485 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1486 +#endif
1487
1488 /* When the program starts, a1 contains a pointer to a function to be
1489 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1490 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1491 extern void elf_set_personality(const struct elf32_hdr *);
1492 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1493
1494 -struct mm_struct;
1495 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1496 -#define arch_randomize_brk arch_randomize_brk
1497 -
1498 #endif
1499 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1500 index e51b1e8..32a3113 100644
1501 --- a/arch/arm/include/asm/kmap_types.h
1502 +++ b/arch/arm/include/asm/kmap_types.h
1503 @@ -21,6 +21,7 @@ enum km_type {
1504 KM_L1_CACHE,
1505 KM_L2_CACHE,
1506 KM_KDB,
1507 + KM_CLEARPAGE,
1508 KM_TYPE_NR
1509 };
1510
1511 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1512 index 53426c6..c7baff3 100644
1513 --- a/arch/arm/include/asm/outercache.h
1514 +++ b/arch/arm/include/asm/outercache.h
1515 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1516 #endif
1517 void (*set_debug)(unsigned long);
1518 void (*resume)(void);
1519 -};
1520 +} __no_const;
1521
1522 #ifdef CONFIG_OUTER_CACHE
1523
1524 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1525 index ecf9019..b71d9a1 100644
1526 --- a/arch/arm/include/asm/page.h
1527 +++ b/arch/arm/include/asm/page.h
1528 @@ -114,7 +114,7 @@ struct cpu_user_fns {
1529 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1530 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1531 unsigned long vaddr, struct vm_area_struct *vma);
1532 -};
1533 +} __no_const;
1534
1535 #ifdef MULTI_USER
1536 extern struct cpu_user_fns cpu_user;
1537 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1538 index 943504f..bf8d667 100644
1539 --- a/arch/arm/include/asm/pgalloc.h
1540 +++ b/arch/arm/include/asm/pgalloc.h
1541 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1542 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1543 }
1544
1545 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1546 +{
1547 + pud_populate(mm, pud, pmd);
1548 +}
1549 +
1550 #else /* !CONFIG_ARM_LPAE */
1551
1552 /*
1553 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1554 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1555 #define pmd_free(mm, pmd) do { } while (0)
1556 #define pud_populate(mm,pmd,pte) BUG()
1557 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1558
1559 #endif /* CONFIG_ARM_LPAE */
1560
1561 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1562 index af7b0bd..6750a8c 100644
1563 --- a/arch/arm/include/asm/thread_info.h
1564 +++ b/arch/arm/include/asm/thread_info.h
1565 @@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1566 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1567 #define TIF_SYSCALL_TRACE 8
1568 #define TIF_SYSCALL_AUDIT 9
1569 +
1570 +/* within 8 bits of TIF_SYSCALL_TRACE
1571 + to meet flexible second operand requirements
1572 +*/
1573 +#define TIF_GRSEC_SETXID 10
1574 +
1575 #define TIF_POLLING_NRFLAG 16
1576 #define TIF_USING_IWMMXT 17
1577 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1578 @@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1579 #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
1580 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1581 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1582 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1583
1584 /* Checks for any syscall work in entry-common.S */
1585 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1586 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1587 + _TIF_GRSEC_SETXID)
1588
1589 /*
1590 * Change these and you break ASM code in entry-common.S
1591 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1592 index 0a070e9..9e9e129 100644
1593 --- a/arch/arm/include/asm/uaccess.h
1594 +++ b/arch/arm/include/asm/uaccess.h
1595 @@ -22,6 +22,8 @@
1596 #define VERIFY_READ 0
1597 #define VERIFY_WRITE 1
1598
1599 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1600 +
1601 /*
1602 * The exception table consists of pairs of addresses: the first is the
1603 * address of an instruction that is allowed to fault, and the second is
1604 @@ -401,8 +403,23 @@ do { \
1605
1606
1607 #ifdef CONFIG_MMU
1608 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1609 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1610 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1611 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1612 +
1613 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1614 +{
1615 + if (!__builtin_constant_p(n))
1616 + check_object_size(to, n, false);
1617 + return ___copy_from_user(to, from, n);
1618 +}
1619 +
1620 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1621 +{
1622 + if (!__builtin_constant_p(n))
1623 + check_object_size(from, n, true);
1624 + return ___copy_to_user(to, from, n);
1625 +}
1626 +
1627 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1628 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1629 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1630 @@ -417,6 +434,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1631
1632 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1633 {
1634 + if ((long)n < 0)
1635 + return n;
1636 +
1637 if (access_ok(VERIFY_READ, from, n))
1638 n = __copy_from_user(to, from, n);
1639 else /* security hole - plug it */
1640 @@ -426,6 +446,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1641
1642 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1643 {
1644 + if ((long)n < 0)
1645 + return n;
1646 +
1647 if (access_ok(VERIFY_WRITE, to, n))
1648 n = __copy_to_user(to, from, n);
1649 return n;
1650 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1651 index b57c75e..ed2d6b2 100644
1652 --- a/arch/arm/kernel/armksyms.c
1653 +++ b/arch/arm/kernel/armksyms.c
1654 @@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1655 #ifdef CONFIG_MMU
1656 EXPORT_SYMBOL(copy_page);
1657
1658 -EXPORT_SYMBOL(__copy_from_user);
1659 -EXPORT_SYMBOL(__copy_to_user);
1660 +EXPORT_SYMBOL(___copy_from_user);
1661 +EXPORT_SYMBOL(___copy_to_user);
1662 EXPORT_SYMBOL(__clear_user);
1663
1664 EXPORT_SYMBOL(__get_user_1);
1665 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1666 index 693b744..e684262 100644
1667 --- a/arch/arm/kernel/process.c
1668 +++ b/arch/arm/kernel/process.c
1669 @@ -28,7 +28,6 @@
1670 #include <linux/tick.h>
1671 #include <linux/utsname.h>
1672 #include <linux/uaccess.h>
1673 -#include <linux/random.h>
1674 #include <linux/hw_breakpoint.h>
1675 #include <linux/cpuidle.h>
1676
1677 @@ -256,9 +255,10 @@ void machine_power_off(void)
1678 machine_shutdown();
1679 if (pm_power_off)
1680 pm_power_off();
1681 + BUG();
1682 }
1683
1684 -void machine_restart(char *cmd)
1685 +__noreturn void machine_restart(char *cmd)
1686 {
1687 machine_shutdown();
1688
1689 @@ -501,12 +501,6 @@ unsigned long get_wchan(struct task_struct *p)
1690 return 0;
1691 }
1692
1693 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1694 -{
1695 - unsigned long range_end = mm->brk + 0x02000000;
1696 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1697 -}
1698 -
1699 #ifdef CONFIG_MMU
1700 /*
1701 * The vectors page is always readable from user space for the
1702 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1703 index 14e3826..d832d89 100644
1704 --- a/arch/arm/kernel/ptrace.c
1705 +++ b/arch/arm/kernel/ptrace.c
1706 @@ -907,10 +907,19 @@ long arch_ptrace(struct task_struct *child, long request,
1707 return ret;
1708 }
1709
1710 +#ifdef CONFIG_GRKERNSEC_SETXID
1711 +extern void gr_delayed_cred_worker(void);
1712 +#endif
1713 +
1714 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1715 {
1716 unsigned long ip;
1717
1718 +#ifdef CONFIG_GRKERNSEC_SETXID
1719 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1720 + gr_delayed_cred_worker();
1721 +#endif
1722 +
1723 if (why)
1724 audit_syscall_exit(regs);
1725 else
1726 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1727 index e15d83b..8c466dd 100644
1728 --- a/arch/arm/kernel/setup.c
1729 +++ b/arch/arm/kernel/setup.c
1730 @@ -112,13 +112,13 @@ struct processor processor __read_mostly;
1731 struct cpu_tlb_fns cpu_tlb __read_mostly;
1732 #endif
1733 #ifdef MULTI_USER
1734 -struct cpu_user_fns cpu_user __read_mostly;
1735 +struct cpu_user_fns cpu_user __read_only;
1736 #endif
1737 #ifdef MULTI_CACHE
1738 -struct cpu_cache_fns cpu_cache __read_mostly;
1739 +struct cpu_cache_fns cpu_cache __read_only;
1740 #endif
1741 #ifdef CONFIG_OUTER_CACHE
1742 -struct outer_cache_fns outer_cache __read_mostly;
1743 +struct outer_cache_fns outer_cache __read_only;
1744 EXPORT_SYMBOL(outer_cache);
1745 #endif
1746
1747 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1748 index eeb7520..1cbe902 100644
1749 --- a/arch/arm/kernel/traps.c
1750 +++ b/arch/arm/kernel/traps.c
1751 @@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1752
1753 static DEFINE_RAW_SPINLOCK(die_lock);
1754
1755 +extern void gr_handle_kernel_exploit(void);
1756 +
1757 /*
1758 * This function is protected against re-entrancy.
1759 */
1760 @@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1761 panic("Fatal exception in interrupt");
1762 if (panic_on_oops)
1763 panic("Fatal exception");
1764 +
1765 + gr_handle_kernel_exploit();
1766 +
1767 if (ret != NOTIFY_STOP)
1768 do_exit(SIGSEGV);
1769 }
1770 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1771 index 66a477a..bee61d3 100644
1772 --- a/arch/arm/lib/copy_from_user.S
1773 +++ b/arch/arm/lib/copy_from_user.S
1774 @@ -16,7 +16,7 @@
1775 /*
1776 * Prototype:
1777 *
1778 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1779 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1780 *
1781 * Purpose:
1782 *
1783 @@ -84,11 +84,11 @@
1784
1785 .text
1786
1787 -ENTRY(__copy_from_user)
1788 +ENTRY(___copy_from_user)
1789
1790 #include "copy_template.S"
1791
1792 -ENDPROC(__copy_from_user)
1793 +ENDPROC(___copy_from_user)
1794
1795 .pushsection .fixup,"ax"
1796 .align 0
1797 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1798 index 6ee2f67..d1cce76 100644
1799 --- a/arch/arm/lib/copy_page.S
1800 +++ b/arch/arm/lib/copy_page.S
1801 @@ -10,6 +10,7 @@
1802 * ASM optimised string functions
1803 */
1804 #include <linux/linkage.h>
1805 +#include <linux/const.h>
1806 #include <asm/assembler.h>
1807 #include <asm/asm-offsets.h>
1808 #include <asm/cache.h>
1809 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1810 index d066df6..df28194 100644
1811 --- a/arch/arm/lib/copy_to_user.S
1812 +++ b/arch/arm/lib/copy_to_user.S
1813 @@ -16,7 +16,7 @@
1814 /*
1815 * Prototype:
1816 *
1817 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1818 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1819 *
1820 * Purpose:
1821 *
1822 @@ -88,11 +88,11 @@
1823 .text
1824
1825 ENTRY(__copy_to_user_std)
1826 -WEAK(__copy_to_user)
1827 +WEAK(___copy_to_user)
1828
1829 #include "copy_template.S"
1830
1831 -ENDPROC(__copy_to_user)
1832 +ENDPROC(___copy_to_user)
1833 ENDPROC(__copy_to_user_std)
1834
1835 .pushsection .fixup,"ax"
1836 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1837 index 025f742..8432b08 100644
1838 --- a/arch/arm/lib/uaccess_with_memcpy.c
1839 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1840 @@ -104,7 +104,7 @@ out:
1841 }
1842
1843 unsigned long
1844 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1845 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1846 {
1847 /*
1848 * This test is stubbed out of the main function above to keep
1849 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
1850 index f261cd2..4ae63fb 100644
1851 --- a/arch/arm/mach-kirkwood/common.c
1852 +++ b/arch/arm/mach-kirkwood/common.c
1853 @@ -128,7 +128,7 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
1854 clk_gate_ops.disable(hw);
1855 }
1856
1857 -static struct clk_ops clk_gate_fn_ops;
1858 +static clk_ops_no_const clk_gate_fn_ops;
1859
1860 static struct clk __init *clk_register_gate_fn(struct device *dev,
1861 const char *name,
1862 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1863 index 2c5d0ed..7d9099c 100644
1864 --- a/arch/arm/mach-omap2/board-n8x0.c
1865 +++ b/arch/arm/mach-omap2/board-n8x0.c
1866 @@ -594,7 +594,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1867 }
1868 #endif
1869
1870 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1871 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1872 .late_init = n8x0_menelaus_late_init,
1873 };
1874
1875 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1876 index c3bd834..e81ef02 100644
1877 --- a/arch/arm/mm/fault.c
1878 +++ b/arch/arm/mm/fault.c
1879 @@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1880 }
1881 #endif
1882
1883 +#ifdef CONFIG_PAX_PAGEEXEC
1884 + if (fsr & FSR_LNX_PF) {
1885 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1886 + do_group_exit(SIGKILL);
1887 + }
1888 +#endif
1889 +
1890 tsk->thread.address = addr;
1891 tsk->thread.error_code = fsr;
1892 tsk->thread.trap_no = 14;
1893 @@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1894 }
1895 #endif /* CONFIG_MMU */
1896
1897 +#ifdef CONFIG_PAX_PAGEEXEC
1898 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1899 +{
1900 + long i;
1901 +
1902 + printk(KERN_ERR "PAX: bytes at PC: ");
1903 + for (i = 0; i < 20; i++) {
1904 + unsigned char c;
1905 + if (get_user(c, (__force unsigned char __user *)pc+i))
1906 + printk(KERN_CONT "?? ");
1907 + else
1908 + printk(KERN_CONT "%02x ", c);
1909 + }
1910 + printk("\n");
1911 +
1912 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1913 + for (i = -1; i < 20; i++) {
1914 + unsigned long c;
1915 + if (get_user(c, (__force unsigned long __user *)sp+i))
1916 + printk(KERN_CONT "???????? ");
1917 + else
1918 + printk(KERN_CONT "%08lx ", c);
1919 + }
1920 + printk("\n");
1921 +}
1922 +#endif
1923 +
1924 /*
1925 * First Level Translation Fault Handler
1926 *
1927 @@ -574,6 +608,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1928 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1929 struct siginfo info;
1930
1931 +#ifdef CONFIG_PAX_REFCOUNT
1932 + if (fsr_fs(ifsr) == 2) {
1933 + unsigned int bkpt;
1934 +
1935 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1936 + current->thread.error_code = ifsr;
1937 + current->thread.trap_no = 0;
1938 + pax_report_refcount_overflow(regs);
1939 + fixup_exception(regs);
1940 + return;
1941 + }
1942 + }
1943 +#endif
1944 +
1945 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1946 return;
1947
1948 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1949 index ce8cb19..3ec539d 100644
1950 --- a/arch/arm/mm/mmap.c
1951 +++ b/arch/arm/mm/mmap.c
1952 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1953 if (len > TASK_SIZE)
1954 return -ENOMEM;
1955
1956 +#ifdef CONFIG_PAX_RANDMMAP
1957 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1958 +#endif
1959 +
1960 if (addr) {
1961 if (do_align)
1962 addr = COLOUR_ALIGN(addr, pgoff);
1963 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1964 addr = PAGE_ALIGN(addr);
1965
1966 vma = find_vma(mm, addr);
1967 - if (TASK_SIZE - len >= addr &&
1968 - (!vma || addr + len <= vma->vm_start))
1969 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1970 return addr;
1971 }
1972 if (len > mm->cached_hole_size) {
1973 - start_addr = addr = mm->free_area_cache;
1974 + start_addr = addr = mm->free_area_cache;
1975 } else {
1976 - start_addr = addr = mm->mmap_base;
1977 - mm->cached_hole_size = 0;
1978 + start_addr = addr = mm->mmap_base;
1979 + mm->cached_hole_size = 0;
1980 }
1981
1982 full_search:
1983 @@ -124,14 +127,14 @@ full_search:
1984 * Start a new search - just in case we missed
1985 * some holes.
1986 */
1987 - if (start_addr != TASK_UNMAPPED_BASE) {
1988 - start_addr = addr = TASK_UNMAPPED_BASE;
1989 + if (start_addr != mm->mmap_base) {
1990 + start_addr = addr = mm->mmap_base;
1991 mm->cached_hole_size = 0;
1992 goto full_search;
1993 }
1994 return -ENOMEM;
1995 }
1996 - if (!vma || addr + len <= vma->vm_start) {
1997 + if (check_heap_stack_gap(vma, addr, len)) {
1998 /*
1999 * Remember the place where we stopped the search:
2000 */
2001 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2002
2003 if (mmap_is_legacy()) {
2004 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2005 +
2006 +#ifdef CONFIG_PAX_RANDMMAP
2007 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2008 + mm->mmap_base += mm->delta_mmap;
2009 +#endif
2010 +
2011 mm->get_unmapped_area = arch_get_unmapped_area;
2012 mm->unmap_area = arch_unmap_area;
2013 } else {
2014 mm->mmap_base = mmap_base(random_factor);
2015 +
2016 +#ifdef CONFIG_PAX_RANDMMAP
2017 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2018 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2019 +#endif
2020 +
2021 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2022 mm->unmap_area = arch_unmap_area_topdown;
2023 }
2024 diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2025 index fd556f7..af2e7d2 100644
2026 --- a/arch/arm/plat-orion/include/plat/addr-map.h
2027 +++ b/arch/arm/plat-orion/include/plat/addr-map.h
2028 @@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2029 value in bridge_virt_base */
2030 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2031 const int win);
2032 -};
2033 +} __no_const;
2034
2035 /*
2036 * Information needed to setup one address mapping.
2037 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2038 index 71a6827..e7fbc23 100644
2039 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2040 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2041 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
2042 int (*started)(unsigned ch);
2043 int (*flush)(unsigned ch);
2044 int (*stop)(unsigned ch);
2045 -};
2046 +} __no_const;
2047
2048 extern void *samsung_dmadev_get_ops(void);
2049 extern void *s3c_dma_get_ops(void);
2050 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2051 index 5f28cae..3d23723 100644
2052 --- a/arch/arm/plat-samsung/include/plat/ehci.h
2053 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
2054 @@ -14,7 +14,7 @@
2055 struct s5p_ehci_platdata {
2056 int (*phy_init)(struct platform_device *pdev, int type);
2057 int (*phy_exit)(struct platform_device *pdev, int type);
2058 -};
2059 +} __no_const;
2060
2061 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2062
2063 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2064 index c3a58a1..78fbf54 100644
2065 --- a/arch/avr32/include/asm/cache.h
2066 +++ b/arch/avr32/include/asm/cache.h
2067 @@ -1,8 +1,10 @@
2068 #ifndef __ASM_AVR32_CACHE_H
2069 #define __ASM_AVR32_CACHE_H
2070
2071 +#include <linux/const.h>
2072 +
2073 #define L1_CACHE_SHIFT 5
2074 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2075 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2076
2077 /*
2078 * Memory returned by kmalloc() may be used for DMA, so we must make
2079 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2080 index 3b3159b..425ea94 100644
2081 --- a/arch/avr32/include/asm/elf.h
2082 +++ b/arch/avr32/include/asm/elf.h
2083 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2084 the loader. We need to make sure that it is out of the way of the program
2085 that it will "exec", and that there is sufficient room for the brk. */
2086
2087 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2088 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2089
2090 +#ifdef CONFIG_PAX_ASLR
2091 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2092 +
2093 +#define PAX_DELTA_MMAP_LEN 15
2094 +#define PAX_DELTA_STACK_LEN 15
2095 +#endif
2096
2097 /* This yields a mask that user programs can use to figure out what
2098 instruction set this CPU supports. This could be done in user space,
2099 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2100 index b7f5c68..556135c 100644
2101 --- a/arch/avr32/include/asm/kmap_types.h
2102 +++ b/arch/avr32/include/asm/kmap_types.h
2103 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2104 D(11) KM_IRQ1,
2105 D(12) KM_SOFTIRQ0,
2106 D(13) KM_SOFTIRQ1,
2107 -D(14) KM_TYPE_NR
2108 +D(14) KM_CLEARPAGE,
2109 +D(15) KM_TYPE_NR
2110 };
2111
2112 #undef D
2113 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2114 index f7040a1..db9f300 100644
2115 --- a/arch/avr32/mm/fault.c
2116 +++ b/arch/avr32/mm/fault.c
2117 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2118
2119 int exception_trace = 1;
2120
2121 +#ifdef CONFIG_PAX_PAGEEXEC
2122 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2123 +{
2124 + unsigned long i;
2125 +
2126 + printk(KERN_ERR "PAX: bytes at PC: ");
2127 + for (i = 0; i < 20; i++) {
2128 + unsigned char c;
2129 + if (get_user(c, (unsigned char *)pc+i))
2130 + printk(KERN_CONT "???????? ");
2131 + else
2132 + printk(KERN_CONT "%02x ", c);
2133 + }
2134 + printk("\n");
2135 +}
2136 +#endif
2137 +
2138 /*
2139 * This routine handles page faults. It determines the address and the
2140 * problem, and then passes it off to one of the appropriate routines.
2141 @@ -156,6 +173,16 @@ bad_area:
2142 up_read(&mm->mmap_sem);
2143
2144 if (user_mode(regs)) {
2145 +
2146 +#ifdef CONFIG_PAX_PAGEEXEC
2147 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2148 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2149 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2150 + do_group_exit(SIGKILL);
2151 + }
2152 + }
2153 +#endif
2154 +
2155 if (exception_trace && printk_ratelimit())
2156 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2157 "sp %08lx ecr %lu\n",
2158 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2159 index 568885a..f8008df 100644
2160 --- a/arch/blackfin/include/asm/cache.h
2161 +++ b/arch/blackfin/include/asm/cache.h
2162 @@ -7,6 +7,7 @@
2163 #ifndef __ARCH_BLACKFIN_CACHE_H
2164 #define __ARCH_BLACKFIN_CACHE_H
2165
2166 +#include <linux/const.h>
2167 #include <linux/linkage.h> /* for asmlinkage */
2168
2169 /*
2170 @@ -14,7 +15,7 @@
2171 * Blackfin loads 32 bytes for cache
2172 */
2173 #define L1_CACHE_SHIFT 5
2174 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2175 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2176 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2177
2178 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2179 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2180 index aea2718..3639a60 100644
2181 --- a/arch/cris/include/arch-v10/arch/cache.h
2182 +++ b/arch/cris/include/arch-v10/arch/cache.h
2183 @@ -1,8 +1,9 @@
2184 #ifndef _ASM_ARCH_CACHE_H
2185 #define _ASM_ARCH_CACHE_H
2186
2187 +#include <linux/const.h>
2188 /* Etrax 100LX have 32-byte cache-lines. */
2189 -#define L1_CACHE_BYTES 32
2190 #define L1_CACHE_SHIFT 5
2191 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2192
2193 #endif /* _ASM_ARCH_CACHE_H */
2194 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2195 index 7caf25d..ee65ac5 100644
2196 --- a/arch/cris/include/arch-v32/arch/cache.h
2197 +++ b/arch/cris/include/arch-v32/arch/cache.h
2198 @@ -1,11 +1,12 @@
2199 #ifndef _ASM_CRIS_ARCH_CACHE_H
2200 #define _ASM_CRIS_ARCH_CACHE_H
2201
2202 +#include <linux/const.h>
2203 #include <arch/hwregs/dma.h>
2204
2205 /* A cache-line is 32 bytes. */
2206 -#define L1_CACHE_BYTES 32
2207 #define L1_CACHE_SHIFT 5
2208 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2209
2210 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
2211
2212 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2213 index b86329d..6709906 100644
2214 --- a/arch/frv/include/asm/atomic.h
2215 +++ b/arch/frv/include/asm/atomic.h
2216 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2217 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2218 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2219
2220 +#define atomic64_read_unchecked(v) atomic64_read(v)
2221 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2222 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2223 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2224 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2225 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2226 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2227 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2228 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2229 +
2230 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2231 {
2232 int c, old;
2233 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2234 index 2797163..c2a401d 100644
2235 --- a/arch/frv/include/asm/cache.h
2236 +++ b/arch/frv/include/asm/cache.h
2237 @@ -12,10 +12,11 @@
2238 #ifndef __ASM_CACHE_H
2239 #define __ASM_CACHE_H
2240
2241 +#include <linux/const.h>
2242
2243 /* bytes per L1 cache line */
2244 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2245 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2246 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2247
2248 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2249 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2250 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2251 index f8e16b2..c73ff79 100644
2252 --- a/arch/frv/include/asm/kmap_types.h
2253 +++ b/arch/frv/include/asm/kmap_types.h
2254 @@ -23,6 +23,7 @@ enum km_type {
2255 KM_IRQ1,
2256 KM_SOFTIRQ0,
2257 KM_SOFTIRQ1,
2258 + KM_CLEARPAGE,
2259 KM_TYPE_NR
2260 };
2261
2262 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2263 index 385fd30..6c3d97e 100644
2264 --- a/arch/frv/mm/elf-fdpic.c
2265 +++ b/arch/frv/mm/elf-fdpic.c
2266 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2267 if (addr) {
2268 addr = PAGE_ALIGN(addr);
2269 vma = find_vma(current->mm, addr);
2270 - if (TASK_SIZE - len >= addr &&
2271 - (!vma || addr + len <= vma->vm_start))
2272 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2273 goto success;
2274 }
2275
2276 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2277 for (; vma; vma = vma->vm_next) {
2278 if (addr > limit)
2279 break;
2280 - if (addr + len <= vma->vm_start)
2281 + if (check_heap_stack_gap(vma, addr, len))
2282 goto success;
2283 addr = vma->vm_end;
2284 }
2285 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2286 for (; vma; vma = vma->vm_next) {
2287 if (addr > limit)
2288 break;
2289 - if (addr + len <= vma->vm_start)
2290 + if (check_heap_stack_gap(vma, addr, len))
2291 goto success;
2292 addr = vma->vm_end;
2293 }
2294 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2295 index c635028..6d9445a 100644
2296 --- a/arch/h8300/include/asm/cache.h
2297 +++ b/arch/h8300/include/asm/cache.h
2298 @@ -1,8 +1,10 @@
2299 #ifndef __ARCH_H8300_CACHE_H
2300 #define __ARCH_H8300_CACHE_H
2301
2302 +#include <linux/const.h>
2303 +
2304 /* bytes per L1 cache line */
2305 -#define L1_CACHE_BYTES 4
2306 +#define L1_CACHE_BYTES _AC(4,UL)
2307
2308 /* m68k-elf-gcc 2.95.2 doesn't like these */
2309
2310 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2311 index 0f01de2..d37d309 100644
2312 --- a/arch/hexagon/include/asm/cache.h
2313 +++ b/arch/hexagon/include/asm/cache.h
2314 @@ -21,9 +21,11 @@
2315 #ifndef __ASM_CACHE_H
2316 #define __ASM_CACHE_H
2317
2318 +#include <linux/const.h>
2319 +
2320 /* Bytes per L1 cache line */
2321 -#define L1_CACHE_SHIFT (5)
2322 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2323 +#define L1_CACHE_SHIFT 5
2324 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2325
2326 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2327 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2328 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2329 index 6e6fe18..a6ae668 100644
2330 --- a/arch/ia64/include/asm/atomic.h
2331 +++ b/arch/ia64/include/asm/atomic.h
2332 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2333 #define atomic64_inc(v) atomic64_add(1, (v))
2334 #define atomic64_dec(v) atomic64_sub(1, (v))
2335
2336 +#define atomic64_read_unchecked(v) atomic64_read(v)
2337 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2338 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2339 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2340 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2341 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2342 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2343 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2344 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2345 +
2346 /* Atomic operations are already serializing */
2347 #define smp_mb__before_atomic_dec() barrier()
2348 #define smp_mb__after_atomic_dec() barrier()
2349 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2350 index 988254a..e1ee885 100644
2351 --- a/arch/ia64/include/asm/cache.h
2352 +++ b/arch/ia64/include/asm/cache.h
2353 @@ -1,6 +1,7 @@
2354 #ifndef _ASM_IA64_CACHE_H
2355 #define _ASM_IA64_CACHE_H
2356
2357 +#include <linux/const.h>
2358
2359 /*
2360 * Copyright (C) 1998-2000 Hewlett-Packard Co
2361 @@ -9,7 +10,7 @@
2362
2363 /* Bytes per L1 (data) cache line. */
2364 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2365 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2366 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2367
2368 #ifdef CONFIG_SMP
2369 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2370 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2371 index b5298eb..67c6e62 100644
2372 --- a/arch/ia64/include/asm/elf.h
2373 +++ b/arch/ia64/include/asm/elf.h
2374 @@ -42,6 +42,13 @@
2375 */
2376 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2377
2378 +#ifdef CONFIG_PAX_ASLR
2379 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2380 +
2381 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2382 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2383 +#endif
2384 +
2385 #define PT_IA_64_UNWIND 0x70000001
2386
2387 /* IA-64 relocations: */
2388 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2389 index 96a8d92..617a1cf 100644
2390 --- a/arch/ia64/include/asm/pgalloc.h
2391 +++ b/arch/ia64/include/asm/pgalloc.h
2392 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2393 pgd_val(*pgd_entry) = __pa(pud);
2394 }
2395
2396 +static inline void
2397 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2398 +{
2399 + pgd_populate(mm, pgd_entry, pud);
2400 +}
2401 +
2402 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2403 {
2404 return quicklist_alloc(0, GFP_KERNEL, NULL);
2405 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2406 pud_val(*pud_entry) = __pa(pmd);
2407 }
2408
2409 +static inline void
2410 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2411 +{
2412 + pud_populate(mm, pud_entry, pmd);
2413 +}
2414 +
2415 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2416 {
2417 return quicklist_alloc(0, GFP_KERNEL, NULL);
2418 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2419 index 815810c..d60bd4c 100644
2420 --- a/arch/ia64/include/asm/pgtable.h
2421 +++ b/arch/ia64/include/asm/pgtable.h
2422 @@ -12,7 +12,7 @@
2423 * David Mosberger-Tang <davidm@hpl.hp.com>
2424 */
2425
2426 -
2427 +#include <linux/const.h>
2428 #include <asm/mman.h>
2429 #include <asm/page.h>
2430 #include <asm/processor.h>
2431 @@ -142,6 +142,17 @@
2432 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2433 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2434 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2435 +
2436 +#ifdef CONFIG_PAX_PAGEEXEC
2437 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2438 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2439 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2440 +#else
2441 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2442 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2443 +# define PAGE_COPY_NOEXEC PAGE_COPY
2444 +#endif
2445 +
2446 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2447 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2448 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2449 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2450 index 54ff557..70c88b7 100644
2451 --- a/arch/ia64/include/asm/spinlock.h
2452 +++ b/arch/ia64/include/asm/spinlock.h
2453 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2454 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2455
2456 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2457 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2458 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2459 }
2460
2461 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2462 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2463 index 449c8c0..432a3d2 100644
2464 --- a/arch/ia64/include/asm/uaccess.h
2465 +++ b/arch/ia64/include/asm/uaccess.h
2466 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2467 const void *__cu_from = (from); \
2468 long __cu_len = (n); \
2469 \
2470 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2471 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2472 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2473 __cu_len; \
2474 })
2475 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2476 long __cu_len = (n); \
2477 \
2478 __chk_user_ptr(__cu_from); \
2479 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2480 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2481 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2482 __cu_len; \
2483 })
2484 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2485 index 24603be..948052d 100644
2486 --- a/arch/ia64/kernel/module.c
2487 +++ b/arch/ia64/kernel/module.c
2488 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2489 void
2490 module_free (struct module *mod, void *module_region)
2491 {
2492 - if (mod && mod->arch.init_unw_table &&
2493 - module_region == mod->module_init) {
2494 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2495 unw_remove_unwind_table(mod->arch.init_unw_table);
2496 mod->arch.init_unw_table = NULL;
2497 }
2498 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2499 }
2500
2501 static inline int
2502 +in_init_rx (const struct module *mod, uint64_t addr)
2503 +{
2504 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2505 +}
2506 +
2507 +static inline int
2508 +in_init_rw (const struct module *mod, uint64_t addr)
2509 +{
2510 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2511 +}
2512 +
2513 +static inline int
2514 in_init (const struct module *mod, uint64_t addr)
2515 {
2516 - return addr - (uint64_t) mod->module_init < mod->init_size;
2517 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2518 +}
2519 +
2520 +static inline int
2521 +in_core_rx (const struct module *mod, uint64_t addr)
2522 +{
2523 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2524 +}
2525 +
2526 +static inline int
2527 +in_core_rw (const struct module *mod, uint64_t addr)
2528 +{
2529 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2530 }
2531
2532 static inline int
2533 in_core (const struct module *mod, uint64_t addr)
2534 {
2535 - return addr - (uint64_t) mod->module_core < mod->core_size;
2536 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2537 }
2538
2539 static inline int
2540 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2541 break;
2542
2543 case RV_BDREL:
2544 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2545 + if (in_init_rx(mod, val))
2546 + val -= (uint64_t) mod->module_init_rx;
2547 + else if (in_init_rw(mod, val))
2548 + val -= (uint64_t) mod->module_init_rw;
2549 + else if (in_core_rx(mod, val))
2550 + val -= (uint64_t) mod->module_core_rx;
2551 + else if (in_core_rw(mod, val))
2552 + val -= (uint64_t) mod->module_core_rw;
2553 break;
2554
2555 case RV_LTV:
2556 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2557 * addresses have been selected...
2558 */
2559 uint64_t gp;
2560 - if (mod->core_size > MAX_LTOFF)
2561 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2562 /*
2563 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2564 * at the end of the module.
2565 */
2566 - gp = mod->core_size - MAX_LTOFF / 2;
2567 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2568 else
2569 - gp = mod->core_size / 2;
2570 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2571 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2572 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2573 mod->arch.gp = gp;
2574 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2575 }
2576 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2577 index d9439ef..b9a4303 100644
2578 --- a/arch/ia64/kernel/sys_ia64.c
2579 +++ b/arch/ia64/kernel/sys_ia64.c
2580 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2581 if (REGION_NUMBER(addr) == RGN_HPAGE)
2582 addr = 0;
2583 #endif
2584 +
2585 +#ifdef CONFIG_PAX_RANDMMAP
2586 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2587 + addr = mm->free_area_cache;
2588 + else
2589 +#endif
2590 +
2591 if (!addr)
2592 addr = mm->free_area_cache;
2593
2594 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2595 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2596 /* At this point: (!vma || addr < vma->vm_end). */
2597 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2598 - if (start_addr != TASK_UNMAPPED_BASE) {
2599 + if (start_addr != mm->mmap_base) {
2600 /* Start a new search --- just in case we missed some holes. */
2601 - addr = TASK_UNMAPPED_BASE;
2602 + addr = mm->mmap_base;
2603 goto full_search;
2604 }
2605 return -ENOMEM;
2606 }
2607 - if (!vma || addr + len <= vma->vm_start) {
2608 + if (check_heap_stack_gap(vma, addr, len)) {
2609 /* Remember the address where we stopped this search: */
2610 mm->free_area_cache = addr + len;
2611 return addr;
2612 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2613 index 0ccb28f..8992469 100644
2614 --- a/arch/ia64/kernel/vmlinux.lds.S
2615 +++ b/arch/ia64/kernel/vmlinux.lds.S
2616 @@ -198,7 +198,7 @@ SECTIONS {
2617 /* Per-cpu data: */
2618 . = ALIGN(PERCPU_PAGE_SIZE);
2619 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2620 - __phys_per_cpu_start = __per_cpu_load;
2621 + __phys_per_cpu_start = per_cpu_load;
2622 /*
2623 * ensure percpu data fits
2624 * into percpu page size
2625 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2626 index 02d29c2..ea893df 100644
2627 --- a/arch/ia64/mm/fault.c
2628 +++ b/arch/ia64/mm/fault.c
2629 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2630 return pte_present(pte);
2631 }
2632
2633 +#ifdef CONFIG_PAX_PAGEEXEC
2634 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2635 +{
2636 + unsigned long i;
2637 +
2638 + printk(KERN_ERR "PAX: bytes at PC: ");
2639 + for (i = 0; i < 8; i++) {
2640 + unsigned int c;
2641 + if (get_user(c, (unsigned int *)pc+i))
2642 + printk(KERN_CONT "???????? ");
2643 + else
2644 + printk(KERN_CONT "%08x ", c);
2645 + }
2646 + printk("\n");
2647 +}
2648 +#endif
2649 +
2650 void __kprobes
2651 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2652 {
2653 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2654 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2655 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2656
2657 - if ((vma->vm_flags & mask) != mask)
2658 + if ((vma->vm_flags & mask) != mask) {
2659 +
2660 +#ifdef CONFIG_PAX_PAGEEXEC
2661 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2662 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2663 + goto bad_area;
2664 +
2665 + up_read(&mm->mmap_sem);
2666 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2667 + do_group_exit(SIGKILL);
2668 + }
2669 +#endif
2670 +
2671 goto bad_area;
2672
2673 + }
2674 +
2675 /*
2676 * If for any reason at all we couldn't handle the fault, make
2677 * sure we exit gracefully rather than endlessly redo the
2678 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2679 index 5ca674b..e0e1b70 100644
2680 --- a/arch/ia64/mm/hugetlbpage.c
2681 +++ b/arch/ia64/mm/hugetlbpage.c
2682 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2683 /* At this point: (!vmm || addr < vmm->vm_end). */
2684 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2685 return -ENOMEM;
2686 - if (!vmm || (addr + len) <= vmm->vm_start)
2687 + if (check_heap_stack_gap(vmm, addr, len))
2688 return addr;
2689 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2690 }
2691 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2692 index 0eab454..bd794f2 100644
2693 --- a/arch/ia64/mm/init.c
2694 +++ b/arch/ia64/mm/init.c
2695 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2696 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2697 vma->vm_end = vma->vm_start + PAGE_SIZE;
2698 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2699 +
2700 +#ifdef CONFIG_PAX_PAGEEXEC
2701 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2702 + vma->vm_flags &= ~VM_EXEC;
2703 +
2704 +#ifdef CONFIG_PAX_MPROTECT
2705 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2706 + vma->vm_flags &= ~VM_MAYEXEC;
2707 +#endif
2708 +
2709 + }
2710 +#endif
2711 +
2712 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2713 down_write(&current->mm->mmap_sem);
2714 if (insert_vm_struct(current->mm, vma)) {
2715 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2716 index 40b3ee9..8c2c112 100644
2717 --- a/arch/m32r/include/asm/cache.h
2718 +++ b/arch/m32r/include/asm/cache.h
2719 @@ -1,8 +1,10 @@
2720 #ifndef _ASM_M32R_CACHE_H
2721 #define _ASM_M32R_CACHE_H
2722
2723 +#include <linux/const.h>
2724 +
2725 /* L1 cache line size */
2726 #define L1_CACHE_SHIFT 4
2727 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2728 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2729
2730 #endif /* _ASM_M32R_CACHE_H */
2731 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2732 index 82abd15..d95ae5d 100644
2733 --- a/arch/m32r/lib/usercopy.c
2734 +++ b/arch/m32r/lib/usercopy.c
2735 @@ -14,6 +14,9 @@
2736 unsigned long
2737 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2738 {
2739 + if ((long)n < 0)
2740 + return n;
2741 +
2742 prefetch(from);
2743 if (access_ok(VERIFY_WRITE, to, n))
2744 __copy_user(to,from,n);
2745 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2746 unsigned long
2747 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2748 {
2749 + if ((long)n < 0)
2750 + return n;
2751 +
2752 prefetchw(to);
2753 if (access_ok(VERIFY_READ, from, n))
2754 __copy_user_zeroing(to,from,n);
2755 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2756 index 0395c51..5f26031 100644
2757 --- a/arch/m68k/include/asm/cache.h
2758 +++ b/arch/m68k/include/asm/cache.h
2759 @@ -4,9 +4,11 @@
2760 #ifndef __ARCH_M68K_CACHE_H
2761 #define __ARCH_M68K_CACHE_H
2762
2763 +#include <linux/const.h>
2764 +
2765 /* bytes per L1 cache line */
2766 #define L1_CACHE_SHIFT 4
2767 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2768 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2769
2770 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2771
2772 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2773 index 4efe96a..60e8699 100644
2774 --- a/arch/microblaze/include/asm/cache.h
2775 +++ b/arch/microblaze/include/asm/cache.h
2776 @@ -13,11 +13,12 @@
2777 #ifndef _ASM_MICROBLAZE_CACHE_H
2778 #define _ASM_MICROBLAZE_CACHE_H
2779
2780 +#include <linux/const.h>
2781 #include <asm/registers.h>
2782
2783 #define L1_CACHE_SHIFT 5
2784 /* word-granular cache in microblaze */
2785 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2786 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2787
2788 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2789
2790 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2791 index 3f4c5cb..3439c6e 100644
2792 --- a/arch/mips/include/asm/atomic.h
2793 +++ b/arch/mips/include/asm/atomic.h
2794 @@ -21,6 +21,10 @@
2795 #include <asm/cmpxchg.h>
2796 #include <asm/war.h>
2797
2798 +#ifdef CONFIG_GENERIC_ATOMIC64
2799 +#include <asm-generic/atomic64.h>
2800 +#endif
2801 +
2802 #define ATOMIC_INIT(i) { (i) }
2803
2804 /*
2805 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2806 */
2807 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2808
2809 +#define atomic64_read_unchecked(v) atomic64_read(v)
2810 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2811 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2812 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2813 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2814 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2815 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2816 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2817 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2818 +
2819 #endif /* CONFIG_64BIT */
2820
2821 /*
2822 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2823 index b4db69f..8f3b093 100644
2824 --- a/arch/mips/include/asm/cache.h
2825 +++ b/arch/mips/include/asm/cache.h
2826 @@ -9,10 +9,11 @@
2827 #ifndef _ASM_CACHE_H
2828 #define _ASM_CACHE_H
2829
2830 +#include <linux/const.h>
2831 #include <kmalloc.h>
2832
2833 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2834 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2835 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2836
2837 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2838 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2839 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2840 index 455c0ac..ad65fbe 100644
2841 --- a/arch/mips/include/asm/elf.h
2842 +++ b/arch/mips/include/asm/elf.h
2843 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2844 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2845 #endif
2846
2847 +#ifdef CONFIG_PAX_ASLR
2848 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2849 +
2850 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2851 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2852 +#endif
2853 +
2854 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2855 struct linux_binprm;
2856 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2857 int uses_interp);
2858
2859 -struct mm_struct;
2860 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2861 -#define arch_randomize_brk arch_randomize_brk
2862 -
2863 #endif /* _ASM_ELF_H */
2864 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2865 index c1f6afa..38cc6e9 100644
2866 --- a/arch/mips/include/asm/exec.h
2867 +++ b/arch/mips/include/asm/exec.h
2868 @@ -12,6 +12,6 @@
2869 #ifndef _ASM_EXEC_H
2870 #define _ASM_EXEC_H
2871
2872 -extern unsigned long arch_align_stack(unsigned long sp);
2873 +#define arch_align_stack(x) ((x) & ~0xfUL)
2874
2875 #endif /* _ASM_EXEC_H */
2876 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2877 index da9bd7d..91aa7ab 100644
2878 --- a/arch/mips/include/asm/page.h
2879 +++ b/arch/mips/include/asm/page.h
2880 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2881 #ifdef CONFIG_CPU_MIPS32
2882 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2883 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2884 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2885 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2886 #else
2887 typedef struct { unsigned long long pte; } pte_t;
2888 #define pte_val(x) ((x).pte)
2889 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2890 index 881d18b..cea38bc 100644
2891 --- a/arch/mips/include/asm/pgalloc.h
2892 +++ b/arch/mips/include/asm/pgalloc.h
2893 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2894 {
2895 set_pud(pud, __pud((unsigned long)pmd));
2896 }
2897 +
2898 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2899 +{
2900 + pud_populate(mm, pud, pmd);
2901 +}
2902 #endif
2903
2904 /*
2905 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2906 index ca97e0e..cd08920 100644
2907 --- a/arch/mips/include/asm/thread_info.h
2908 +++ b/arch/mips/include/asm/thread_info.h
2909 @@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2910 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2911 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2912 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2913 +/* li takes a 32bit immediate */
2914 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2915 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2916
2917 #ifdef CONFIG_MIPS32_O32
2918 @@ -134,15 +136,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2919 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2920 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2921 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2922 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2923 +
2924 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2925
2926 /* work to do in syscall_trace_leave() */
2927 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2928 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2929
2930 /* work to do on interrupt/exception return */
2931 #define _TIF_WORK_MASK (0x0000ffef & \
2932 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2933 /* work to do on any return to u-space */
2934 -#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2935 +#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2936
2937 #endif /* __KERNEL__ */
2938
2939 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2940 index 9fdd8bc..4bd7f1a 100644
2941 --- a/arch/mips/kernel/binfmt_elfn32.c
2942 +++ b/arch/mips/kernel/binfmt_elfn32.c
2943 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2944 #undef ELF_ET_DYN_BASE
2945 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2946
2947 +#ifdef CONFIG_PAX_ASLR
2948 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2949 +
2950 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2951 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2952 +#endif
2953 +
2954 #include <asm/processor.h>
2955 #include <linux/module.h>
2956 #include <linux/elfcore.h>
2957 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2958 index ff44823..97f8906 100644
2959 --- a/arch/mips/kernel/binfmt_elfo32.c
2960 +++ b/arch/mips/kernel/binfmt_elfo32.c
2961 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2962 #undef ELF_ET_DYN_BASE
2963 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2964
2965 +#ifdef CONFIG_PAX_ASLR
2966 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2967 +
2968 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2969 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2970 +#endif
2971 +
2972 #include <asm/processor.h>
2973
2974 /*
2975 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2976 index e9a5fd7..378809a 100644
2977 --- a/arch/mips/kernel/process.c
2978 +++ b/arch/mips/kernel/process.c
2979 @@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2980 out:
2981 return pc;
2982 }
2983 -
2984 -/*
2985 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2986 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2987 - */
2988 -unsigned long arch_align_stack(unsigned long sp)
2989 -{
2990 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2991 - sp -= get_random_int() & ~PAGE_MASK;
2992 -
2993 - return sp & ALMASK;
2994 -}
2995 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2996 index 4812c6d..2069554 100644
2997 --- a/arch/mips/kernel/ptrace.c
2998 +++ b/arch/mips/kernel/ptrace.c
2999 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
3000 return arch;
3001 }
3002
3003 +#ifdef CONFIG_GRKERNSEC_SETXID
3004 +extern void gr_delayed_cred_worker(void);
3005 +#endif
3006 +
3007 /*
3008 * Notification of system call entry/exit
3009 * - triggered by current->work.syscall_trace
3010 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3011 /* do the secure computing check first */
3012 secure_computing_strict(regs->regs[2]);
3013
3014 +#ifdef CONFIG_GRKERNSEC_SETXID
3015 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3016 + gr_delayed_cred_worker();
3017 +#endif
3018 +
3019 if (!(current->ptrace & PT_PTRACED))
3020 goto out;
3021
3022 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3023 index a632bc1..0b77c7c 100644
3024 --- a/arch/mips/kernel/scall32-o32.S
3025 +++ b/arch/mips/kernel/scall32-o32.S
3026 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3027
3028 stack_done:
3029 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3030 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3031 + li t1, _TIF_SYSCALL_WORK
3032 and t0, t1
3033 bnez t0, syscall_trace_entry # -> yes
3034
3035 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3036 index 3b5a5e9..e1ee86d 100644
3037 --- a/arch/mips/kernel/scall64-64.S
3038 +++ b/arch/mips/kernel/scall64-64.S
3039 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3040
3041 sd a3, PT_R26(sp) # save a3 for syscall restarting
3042
3043 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3044 + li t1, _TIF_SYSCALL_WORK
3045 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3046 and t0, t1, t0
3047 bnez t0, syscall_trace_entry
3048 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3049 index 6be6f70..1859577 100644
3050 --- a/arch/mips/kernel/scall64-n32.S
3051 +++ b/arch/mips/kernel/scall64-n32.S
3052 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3053
3054 sd a3, PT_R26(sp) # save a3 for syscall restarting
3055
3056 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3057 + li t1, _TIF_SYSCALL_WORK
3058 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3059 and t0, t1, t0
3060 bnez t0, n32_syscall_trace_entry
3061 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3062 index 5422855..74e63a3 100644
3063 --- a/arch/mips/kernel/scall64-o32.S
3064 +++ b/arch/mips/kernel/scall64-o32.S
3065 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3066 PTR 4b, bad_stack
3067 .previous
3068
3069 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3070 + li t1, _TIF_SYSCALL_WORK
3071 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3072 and t0, t1, t0
3073 bnez t0, trace_a_syscall
3074 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3075 index c14f6df..537e729 100644
3076 --- a/arch/mips/mm/fault.c
3077 +++ b/arch/mips/mm/fault.c
3078 @@ -27,6 +27,23 @@
3079 #include <asm/highmem.h> /* For VMALLOC_END */
3080 #include <linux/kdebug.h>
3081
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3084 +{
3085 + unsigned long i;
3086 +
3087 + printk(KERN_ERR "PAX: bytes at PC: ");
3088 + for (i = 0; i < 5; i++) {
3089 + unsigned int c;
3090 + if (get_user(c, (unsigned int *)pc+i))
3091 + printk(KERN_CONT "???????? ");
3092 + else
3093 + printk(KERN_CONT "%08x ", c);
3094 + }
3095 + printk("\n");
3096 +}
3097 +#endif
3098 +
3099 /*
3100 * This routine handles page faults. It determines the address,
3101 * and the problem, and then passes it off to one of the appropriate
3102 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3103 index 302d779..7d35bf8 100644
3104 --- a/arch/mips/mm/mmap.c
3105 +++ b/arch/mips/mm/mmap.c
3106 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3107 do_color_align = 1;
3108
3109 /* requesting a specific address */
3110 +
3111 +#ifdef CONFIG_PAX_RANDMMAP
3112 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3113 +#endif
3114 +
3115 if (addr) {
3116 if (do_color_align)
3117 addr = COLOUR_ALIGN(addr, pgoff);
3118 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3119 addr = PAGE_ALIGN(addr);
3120
3121 vma = find_vma(mm, addr);
3122 - if (TASK_SIZE - len >= addr &&
3123 - (!vma || addr + len <= vma->vm_start))
3124 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3125 return addr;
3126 }
3127
3128 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3129 /* At this point: (!vma || addr < vma->vm_end). */
3130 if (TASK_SIZE - len < addr)
3131 return -ENOMEM;
3132 - if (!vma || addr + len <= vma->vm_start)
3133 + if (check_heap_stack_gap(vmm, addr, len))
3134 return addr;
3135 addr = vma->vm_end;
3136 if (do_color_align)
3137 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3138 /* make sure it can fit in the remaining address space */
3139 if (likely(addr > len)) {
3140 vma = find_vma(mm, addr - len);
3141 - if (!vma || addr <= vma->vm_start) {
3142 + if (check_heap_stack_gap(vmm, addr - len, len))
3143 /* cache the address as a hint for next time */
3144 return mm->free_area_cache = addr - len;
3145 }
3146 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3147 * return with success:
3148 */
3149 vma = find_vma(mm, addr);
3150 - if (likely(!vma || addr + len <= vma->vm_start)) {
3151 + if (check_heap_stack_gap(vmm, addr, len)) {
3152 /* cache the address as a hint for next time */
3153 return mm->free_area_cache = addr;
3154 }
3155 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3156 mm->unmap_area = arch_unmap_area_topdown;
3157 }
3158 }
3159 -
3160 -static inline unsigned long brk_rnd(void)
3161 -{
3162 - unsigned long rnd = get_random_int();
3163 -
3164 - rnd = rnd << PAGE_SHIFT;
3165 - /* 8MB for 32bit, 256MB for 64bit */
3166 - if (TASK_IS_32BIT_ADDR)
3167 - rnd = rnd & 0x7ffffful;
3168 - else
3169 - rnd = rnd & 0xffffffful;
3170 -
3171 - return rnd;
3172 -}
3173 -
3174 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3175 -{
3176 - unsigned long base = mm->brk;
3177 - unsigned long ret;
3178 -
3179 - ret = PAGE_ALIGN(base + brk_rnd());
3180 -
3181 - if (ret < mm->brk)
3182 - return mm->brk;
3183 -
3184 - return ret;
3185 -}
3186 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3187 index 967d144..db12197 100644
3188 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3189 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3190 @@ -11,12 +11,14 @@
3191 #ifndef _ASM_PROC_CACHE_H
3192 #define _ASM_PROC_CACHE_H
3193
3194 +#include <linux/const.h>
3195 +
3196 /* L1 cache */
3197
3198 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3199 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3200 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3201 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3202 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3203 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3204
3205 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3206 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3207 index bcb5df2..84fabd2 100644
3208 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3209 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3210 @@ -16,13 +16,15 @@
3211 #ifndef _ASM_PROC_CACHE_H
3212 #define _ASM_PROC_CACHE_H
3213
3214 +#include <linux/const.h>
3215 +
3216 /*
3217 * L1 cache
3218 */
3219 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3220 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3221 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3222 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3223 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3224 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3225
3226 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3227 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3228 index 4ce7a01..449202a 100644
3229 --- a/arch/openrisc/include/asm/cache.h
3230 +++ b/arch/openrisc/include/asm/cache.h
3231 @@ -19,11 +19,13 @@
3232 #ifndef __ASM_OPENRISC_CACHE_H
3233 #define __ASM_OPENRISC_CACHE_H
3234
3235 +#include <linux/const.h>
3236 +
3237 /* FIXME: How can we replace these with values from the CPU...
3238 * they shouldn't be hard-coded!
3239 */
3240
3241 -#define L1_CACHE_BYTES 16
3242 #define L1_CACHE_SHIFT 4
3243 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3244
3245 #endif /* __ASM_OPENRISC_CACHE_H */
3246 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3247 index af9cf30..2aae9b2 100644
3248 --- a/arch/parisc/include/asm/atomic.h
3249 +++ b/arch/parisc/include/asm/atomic.h
3250 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3251
3252 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3253
3254 +#define atomic64_read_unchecked(v) atomic64_read(v)
3255 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3256 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3257 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3258 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3259 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3260 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3261 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3262 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3263 +
3264 #endif /* !CONFIG_64BIT */
3265
3266
3267 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3268 index 47f11c7..3420df2 100644
3269 --- a/arch/parisc/include/asm/cache.h
3270 +++ b/arch/parisc/include/asm/cache.h
3271 @@ -5,6 +5,7 @@
3272 #ifndef __ARCH_PARISC_CACHE_H
3273 #define __ARCH_PARISC_CACHE_H
3274
3275 +#include <linux/const.h>
3276
3277 /*
3278 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3279 @@ -15,13 +16,13 @@
3280 * just ruin performance.
3281 */
3282 #ifdef CONFIG_PA20
3283 -#define L1_CACHE_BYTES 64
3284 #define L1_CACHE_SHIFT 6
3285 #else
3286 -#define L1_CACHE_BYTES 32
3287 #define L1_CACHE_SHIFT 5
3288 #endif
3289
3290 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3291 +
3292 #ifndef __ASSEMBLY__
3293
3294 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3295 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3296 index 19f6cb1..6c78cf2 100644
3297 --- a/arch/parisc/include/asm/elf.h
3298 +++ b/arch/parisc/include/asm/elf.h
3299 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3300
3301 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3302
3303 +#ifdef CONFIG_PAX_ASLR
3304 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3305 +
3306 +#define PAX_DELTA_MMAP_LEN 16
3307 +#define PAX_DELTA_STACK_LEN 16
3308 +#endif
3309 +
3310 /* This yields a mask that user programs can use to figure out what
3311 instruction set this CPU supports. This could be done in user space,
3312 but it's not easy, and we've already done it here. */
3313 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3314 index fc987a1..6e068ef 100644
3315 --- a/arch/parisc/include/asm/pgalloc.h
3316 +++ b/arch/parisc/include/asm/pgalloc.h
3317 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3318 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3319 }
3320
3321 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3322 +{
3323 + pgd_populate(mm, pgd, pmd);
3324 +}
3325 +
3326 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3327 {
3328 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3329 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3330 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3331 #define pmd_free(mm, x) do { } while (0)
3332 #define pgd_populate(mm, pmd, pte) BUG()
3333 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3334
3335 #endif
3336
3337 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3338 index ee99f23..802b0a1 100644
3339 --- a/arch/parisc/include/asm/pgtable.h
3340 +++ b/arch/parisc/include/asm/pgtable.h
3341 @@ -212,6 +212,17 @@ struct vm_area_struct;
3342 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3343 #define PAGE_COPY PAGE_EXECREAD
3344 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3345 +
3346 +#ifdef CONFIG_PAX_PAGEEXEC
3347 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3348 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3349 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3350 +#else
3351 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3352 +# define PAGE_COPY_NOEXEC PAGE_COPY
3353 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3354 +#endif
3355 +
3356 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3357 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3358 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3359 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
3360 index 4ba2c93..f5e3974 100644
3361 --- a/arch/parisc/include/asm/uaccess.h
3362 +++ b/arch/parisc/include/asm/uaccess.h
3363 @@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
3364 const void __user *from,
3365 unsigned long n)
3366 {
3367 - int sz = __compiletime_object_size(to);
3368 + size_t sz = __compiletime_object_size(to);
3369 int ret = -EFAULT;
3370
3371 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
3372 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
3373 ret = __copy_from_user(to, from, n);
3374 else
3375 copy_from_user_overflow();
3376 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3377 index 5e34ccf..672bc9c 100644
3378 --- a/arch/parisc/kernel/module.c
3379 +++ b/arch/parisc/kernel/module.c
3380 @@ -98,16 +98,38 @@
3381
3382 /* three functions to determine where in the module core
3383 * or init pieces the location is */
3384 +static inline int in_init_rx(struct module *me, void *loc)
3385 +{
3386 + return (loc >= me->module_init_rx &&
3387 + loc < (me->module_init_rx + me->init_size_rx));
3388 +}
3389 +
3390 +static inline int in_init_rw(struct module *me, void *loc)
3391 +{
3392 + return (loc >= me->module_init_rw &&
3393 + loc < (me->module_init_rw + me->init_size_rw));
3394 +}
3395 +
3396 static inline int in_init(struct module *me, void *loc)
3397 {
3398 - return (loc >= me->module_init &&
3399 - loc <= (me->module_init + me->init_size));
3400 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3401 +}
3402 +
3403 +static inline int in_core_rx(struct module *me, void *loc)
3404 +{
3405 + return (loc >= me->module_core_rx &&
3406 + loc < (me->module_core_rx + me->core_size_rx));
3407 +}
3408 +
3409 +static inline int in_core_rw(struct module *me, void *loc)
3410 +{
3411 + return (loc >= me->module_core_rw &&
3412 + loc < (me->module_core_rw + me->core_size_rw));
3413 }
3414
3415 static inline int in_core(struct module *me, void *loc)
3416 {
3417 - return (loc >= me->module_core &&
3418 - loc <= (me->module_core + me->core_size));
3419 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3420 }
3421
3422 static inline int in_local(struct module *me, void *loc)
3423 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3424 }
3425
3426 /* align things a bit */
3427 - me->core_size = ALIGN(me->core_size, 16);
3428 - me->arch.got_offset = me->core_size;
3429 - me->core_size += gots * sizeof(struct got_entry);
3430 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3431 + me->arch.got_offset = me->core_size_rw;
3432 + me->core_size_rw += gots * sizeof(struct got_entry);
3433
3434 - me->core_size = ALIGN(me->core_size, 16);
3435 - me->arch.fdesc_offset = me->core_size;
3436 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3437 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3438 + me->arch.fdesc_offset = me->core_size_rw;
3439 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3440
3441 me->arch.got_max = gots;
3442 me->arch.fdesc_max = fdescs;
3443 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3444
3445 BUG_ON(value == 0);
3446
3447 - got = me->module_core + me->arch.got_offset;
3448 + got = me->module_core_rw + me->arch.got_offset;
3449 for (i = 0; got[i].addr; i++)
3450 if (got[i].addr == value)
3451 goto out;
3452 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3453 #ifdef CONFIG_64BIT
3454 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3455 {
3456 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3457 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3458
3459 if (!value) {
3460 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3461 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3462
3463 /* Create new one */
3464 fdesc->addr = value;
3465 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3466 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3467 return (Elf_Addr)fdesc;
3468 }
3469 #endif /* CONFIG_64BIT */
3470 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3471
3472 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3473 end = table + sechdrs[me->arch.unwind_section].sh_size;
3474 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3475 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3476
3477 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3478 me->arch.unwind_section, table, end, gp);
3479 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3480 index c9b9322..02d8940 100644
3481 --- a/arch/parisc/kernel/sys_parisc.c
3482 +++ b/arch/parisc/kernel/sys_parisc.c
3483 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3484 /* At this point: (!vma || addr < vma->vm_end). */
3485 if (TASK_SIZE - len < addr)
3486 return -ENOMEM;
3487 - if (!vma || addr + len <= vma->vm_start)
3488 + if (check_heap_stack_gap(vma, addr, len))
3489 return addr;
3490 addr = vma->vm_end;
3491 }
3492 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3493 /* At this point: (!vma || addr < vma->vm_end). */
3494 if (TASK_SIZE - len < addr)
3495 return -ENOMEM;
3496 - if (!vma || addr + len <= vma->vm_start)
3497 + if (check_heap_stack_gap(vma, addr, len))
3498 return addr;
3499 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3500 if (addr < vma->vm_end) /* handle wraparound */
3501 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3502 if (flags & MAP_FIXED)
3503 return addr;
3504 if (!addr)
3505 - addr = TASK_UNMAPPED_BASE;
3506 + addr = current->mm->mmap_base;
3507
3508 if (filp) {
3509 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3510 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3511 index 45ba99f..8e22c33 100644
3512 --- a/arch/parisc/kernel/traps.c
3513 +++ b/arch/parisc/kernel/traps.c
3514 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3515
3516 down_read(&current->mm->mmap_sem);
3517 vma = find_vma(current->mm,regs->iaoq[0]);
3518 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3519 - && (vma->vm_flags & VM_EXEC)) {
3520 -
3521 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3522 fault_address = regs->iaoq[0];
3523 fault_space = regs->iasq[0];
3524
3525 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3526 index 18162ce..94de376 100644
3527 --- a/arch/parisc/mm/fault.c
3528 +++ b/arch/parisc/mm/fault.c
3529 @@ -15,6 +15,7 @@
3530 #include <linux/sched.h>
3531 #include <linux/interrupt.h>
3532 #include <linux/module.h>
3533 +#include <linux/unistd.h>
3534
3535 #include <asm/uaccess.h>
3536 #include <asm/traps.h>
3537 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3538 static unsigned long
3539 parisc_acctyp(unsigned long code, unsigned int inst)
3540 {
3541 - if (code == 6 || code == 16)
3542 + if (code == 6 || code == 7 || code == 16)
3543 return VM_EXEC;
3544
3545 switch (inst & 0xf0000000) {
3546 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3547 }
3548 #endif
3549
3550 +#ifdef CONFIG_PAX_PAGEEXEC
3551 +/*
3552 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3553 + *
3554 + * returns 1 when task should be killed
3555 + * 2 when rt_sigreturn trampoline was detected
3556 + * 3 when unpatched PLT trampoline was detected
3557 + */
3558 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3559 +{
3560 +
3561 +#ifdef CONFIG_PAX_EMUPLT
3562 + int err;
3563 +
3564 + do { /* PaX: unpatched PLT emulation */
3565 + unsigned int bl, depwi;
3566 +
3567 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3568 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3569 +
3570 + if (err)
3571 + break;
3572 +
3573 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3574 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3575 +
3576 + err = get_user(ldw, (unsigned int *)addr);
3577 + err |= get_user(bv, (unsigned int *)(addr+4));
3578 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3579 +
3580 + if (err)
3581 + break;
3582 +
3583 + if (ldw == 0x0E801096U &&
3584 + bv == 0xEAC0C000U &&
3585 + ldw2 == 0x0E881095U)
3586 + {
3587 + unsigned int resolver, map;
3588 +
3589 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3590 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3591 + if (err)
3592 + break;
3593 +
3594 + regs->gr[20] = instruction_pointer(regs)+8;
3595 + regs->gr[21] = map;
3596 + regs->gr[22] = resolver;
3597 + regs->iaoq[0] = resolver | 3UL;
3598 + regs->iaoq[1] = regs->iaoq[0] + 4;
3599 + return 3;
3600 + }
3601 + }
3602 + } while (0);
3603 +#endif
3604 +
3605 +#ifdef CONFIG_PAX_EMUTRAMP
3606 +
3607 +#ifndef CONFIG_PAX_EMUSIGRT
3608 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3609 + return 1;
3610 +#endif
3611 +
3612 + do { /* PaX: rt_sigreturn emulation */
3613 + unsigned int ldi1, ldi2, bel, nop;
3614 +
3615 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3616 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3617 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3618 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3619 +
3620 + if (err)
3621 + break;
3622 +
3623 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3624 + ldi2 == 0x3414015AU &&
3625 + bel == 0xE4008200U &&
3626 + nop == 0x08000240U)
3627 + {
3628 + regs->gr[25] = (ldi1 & 2) >> 1;
3629 + regs->gr[20] = __NR_rt_sigreturn;
3630 + regs->gr[31] = regs->iaoq[1] + 16;
3631 + regs->sr[0] = regs->iasq[1];
3632 + regs->iaoq[0] = 0x100UL;
3633 + regs->iaoq[1] = regs->iaoq[0] + 4;
3634 + regs->iasq[0] = regs->sr[2];
3635 + regs->iasq[1] = regs->sr[2];
3636 + return 2;
3637 + }
3638 + } while (0);
3639 +#endif
3640 +
3641 + return 1;
3642 +}
3643 +
3644 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3645 +{
3646 + unsigned long i;
3647 +
3648 + printk(KERN_ERR "PAX: bytes at PC: ");
3649 + for (i = 0; i < 5; i++) {
3650 + unsigned int c;
3651 + if (get_user(c, (unsigned int *)pc+i))
3652 + printk(KERN_CONT "???????? ");
3653 + else
3654 + printk(KERN_CONT "%08x ", c);
3655 + }
3656 + printk("\n");
3657 +}
3658 +#endif
3659 +
3660 int fixup_exception(struct pt_regs *regs)
3661 {
3662 const struct exception_table_entry *fix;
3663 @@ -192,8 +303,33 @@ good_area:
3664
3665 acc_type = parisc_acctyp(code,regs->iir);
3666
3667 - if ((vma->vm_flags & acc_type) != acc_type)
3668 + if ((vma->vm_flags & acc_type) != acc_type) {
3669 +
3670 +#ifdef CONFIG_PAX_PAGEEXEC
3671 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3672 + (address & ~3UL) == instruction_pointer(regs))
3673 + {
3674 + up_read(&mm->mmap_sem);
3675 + switch (pax_handle_fetch_fault(regs)) {
3676 +
3677 +#ifdef CONFIG_PAX_EMUPLT
3678 + case 3:
3679 + return;
3680 +#endif
3681 +
3682 +#ifdef CONFIG_PAX_EMUTRAMP
3683 + case 2:
3684 + return;
3685 +#endif
3686 +
3687 + }
3688 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3689 + do_group_exit(SIGKILL);
3690 + }
3691 +#endif
3692 +
3693 goto bad_area;
3694 + }
3695
3696 /*
3697 * If for any reason at all we couldn't handle the fault, make
3698 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3699 index da29032..f76c24c 100644
3700 --- a/arch/powerpc/include/asm/atomic.h
3701 +++ b/arch/powerpc/include/asm/atomic.h
3702 @@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3703 return t1;
3704 }
3705
3706 +#define atomic64_read_unchecked(v) atomic64_read(v)
3707 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3708 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3709 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3710 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3711 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3712 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3713 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3714 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3715 +
3716 #endif /* __powerpc64__ */
3717
3718 #endif /* __KERNEL__ */
3719 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3720 index 9e495c9..b6878e5 100644
3721 --- a/arch/powerpc/include/asm/cache.h
3722 +++ b/arch/powerpc/include/asm/cache.h
3723 @@ -3,6 +3,7 @@
3724
3725 #ifdef __KERNEL__
3726
3727 +#include <linux/const.h>
3728
3729 /* bytes per L1 cache line */
3730 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3731 @@ -22,7 +23,7 @@
3732 #define L1_CACHE_SHIFT 7
3733 #endif
3734
3735 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3736 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3737
3738 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3739
3740 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3741 index 3bf9cca..e7457d0 100644
3742 --- a/arch/powerpc/include/asm/elf.h
3743 +++ b/arch/powerpc/include/asm/elf.h
3744 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3745 the loader. We need to make sure that it is out of the way of the program
3746 that it will "exec", and that there is sufficient room for the brk. */
3747
3748 -extern unsigned long randomize_et_dyn(unsigned long base);
3749 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3750 +#define ELF_ET_DYN_BASE (0x20000000)
3751 +
3752 +#ifdef CONFIG_PAX_ASLR
3753 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3754 +
3755 +#ifdef __powerpc64__
3756 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3757 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3758 +#else
3759 +#define PAX_DELTA_MMAP_LEN 15
3760 +#define PAX_DELTA_STACK_LEN 15
3761 +#endif
3762 +#endif
3763
3764 /*
3765 * Our registers are always unsigned longs, whether we're a 32 bit
3766 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3767 (0x7ff >> (PAGE_SHIFT - 12)) : \
3768 (0x3ffff >> (PAGE_SHIFT - 12)))
3769
3770 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3771 -#define arch_randomize_brk arch_randomize_brk
3772 -
3773 #endif /* __KERNEL__ */
3774
3775 /*
3776 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3777 index 8196e9c..d83a9f3 100644
3778 --- a/arch/powerpc/include/asm/exec.h
3779 +++ b/arch/powerpc/include/asm/exec.h
3780 @@ -4,6 +4,6 @@
3781 #ifndef _ASM_POWERPC_EXEC_H
3782 #define _ASM_POWERPC_EXEC_H
3783
3784 -extern unsigned long arch_align_stack(unsigned long sp);
3785 +#define arch_align_stack(x) ((x) & ~0xfUL)
3786
3787 #endif /* _ASM_POWERPC_EXEC_H */
3788 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3789 index bca8fdc..61e9580 100644
3790 --- a/arch/powerpc/include/asm/kmap_types.h
3791 +++ b/arch/powerpc/include/asm/kmap_types.h
3792 @@ -27,6 +27,7 @@ enum km_type {
3793 KM_PPC_SYNC_PAGE,
3794 KM_PPC_SYNC_ICACHE,
3795 KM_KDB,
3796 + KM_CLEARPAGE,
3797 KM_TYPE_NR
3798 };
3799
3800 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3801 index d4a7f64..451de1c 100644
3802 --- a/arch/powerpc/include/asm/mman.h
3803 +++ b/arch/powerpc/include/asm/mman.h
3804 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3805 }
3806 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3807
3808 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3809 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3810 {
3811 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3812 }
3813 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3814 index f072e97..b436dee 100644
3815 --- a/arch/powerpc/include/asm/page.h
3816 +++ b/arch/powerpc/include/asm/page.h
3817 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3818 * and needs to be executable. This means the whole heap ends
3819 * up being executable.
3820 */
3821 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3822 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3823 +#define VM_DATA_DEFAULT_FLAGS32 \
3824 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3825 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3826
3827 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3828 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3829 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3830 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3831 #endif
3832
3833 +#define ktla_ktva(addr) (addr)
3834 +#define ktva_ktla(addr) (addr)
3835 +
3836 /*
3837 * Use the top bit of the higher-level page table entries to indicate whether
3838 * the entries we point to contain hugepages. This works because we know that
3839 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3840 index fed85e6..da5c71b 100644
3841 --- a/arch/powerpc/include/asm/page_64.h
3842 +++ b/arch/powerpc/include/asm/page_64.h
3843 @@ -146,15 +146,18 @@ do { \
3844 * stack by default, so in the absence of a PT_GNU_STACK program header
3845 * we turn execute permission off.
3846 */
3847 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3848 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3849 +#define VM_STACK_DEFAULT_FLAGS32 \
3850 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3851 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3852
3853 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3854 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3855
3856 +#ifndef CONFIG_PAX_PAGEEXEC
3857 #define VM_STACK_DEFAULT_FLAGS \
3858 (is_32bit_task() ? \
3859 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3860 +#endif
3861
3862 #include <asm-generic/getorder.h>
3863
3864 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3865 index 292725c..f87ae14 100644
3866 --- a/arch/powerpc/include/asm/pgalloc-64.h
3867 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3868 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3869 #ifndef CONFIG_PPC_64K_PAGES
3870
3871 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3872 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3873
3874 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3875 {
3876 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3877 pud_set(pud, (unsigned long)pmd);
3878 }
3879
3880 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3881 +{
3882 + pud_populate(mm, pud, pmd);
3883 +}
3884 +
3885 #define pmd_populate(mm, pmd, pte_page) \
3886 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3887 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3888 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3889 #else /* CONFIG_PPC_64K_PAGES */
3890
3891 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3892 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3893
3894 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3895 pte_t *pte)
3896 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3897 index 2e0e411..7899c68 100644
3898 --- a/arch/powerpc/include/asm/pgtable.h
3899 +++ b/arch/powerpc/include/asm/pgtable.h
3900 @@ -2,6 +2,7 @@
3901 #define _ASM_POWERPC_PGTABLE_H
3902 #ifdef __KERNEL__
3903
3904 +#include <linux/const.h>
3905 #ifndef __ASSEMBLY__
3906 #include <asm/processor.h> /* For TASK_SIZE */
3907 #include <asm/mmu.h>
3908 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3909 index 4aad413..85d86bf 100644
3910 --- a/arch/powerpc/include/asm/pte-hash32.h
3911 +++ b/arch/powerpc/include/asm/pte-hash32.h
3912 @@ -21,6 +21,7 @@
3913 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3914 #define _PAGE_USER 0x004 /* usermode access allowed */
3915 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3916 +#define _PAGE_EXEC _PAGE_GUARDED
3917 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3918 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3919 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3920 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3921 index 360585d..c3930ef 100644
3922 --- a/arch/powerpc/include/asm/reg.h
3923 +++ b/arch/powerpc/include/asm/reg.h
3924 @@ -212,6 +212,7 @@
3925 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3926 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3927 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3928 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3929 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3930 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3931 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3932 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3933 index 68831e9..379c695 100644
3934 --- a/arch/powerpc/include/asm/thread_info.h
3935 +++ b/arch/powerpc/include/asm/thread_info.h
3936 @@ -91,12 +91,14 @@ static inline struct thread_info *current_thread_info(void)
3937 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3938 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3939 #define TIF_SINGLESTEP 8 /* singlestepping active */
3940 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3941 #define TIF_SECCOMP 10 /* secure computing */
3942 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3943 #define TIF_NOERROR 12 /* Force successful syscall return */
3944 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3945 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3946 +#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3947 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3948 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3949
3950 /* as above, but as bit values */
3951 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3952 @@ -113,8 +115,10 @@ static inline struct thread_info *current_thread_info(void)
3953 #define _TIF_NOERROR (1<<TIF_NOERROR)
3954 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3955 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3956 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3957 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3958 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3959 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
3960 + _TIF_GRSEC_SETXID)
3961
3962 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3963 _TIF_NOTIFY_RESUME)
3964 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3965 index 17bb40c..353c98b 100644
3966 --- a/arch/powerpc/include/asm/uaccess.h
3967 +++ b/arch/powerpc/include/asm/uaccess.h
3968 @@ -13,6 +13,8 @@
3969 #define VERIFY_READ 0
3970 #define VERIFY_WRITE 1
3971
3972 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3973 +
3974 /*
3975 * The fs value determines whether argument validity checking should be
3976 * performed or not. If get_fs() == USER_DS, checking is performed, with
3977 @@ -329,52 +331,6 @@ do { \
3978 extern unsigned long __copy_tofrom_user(void __user *to,
3979 const void __user *from, unsigned long size);
3980
3981 -#ifndef __powerpc64__
3982 -
3983 -static inline unsigned long copy_from_user(void *to,
3984 - const void __user *from, unsigned long n)
3985 -{
3986 - unsigned long over;
3987 -
3988 - if (access_ok(VERIFY_READ, from, n))
3989 - return __copy_tofrom_user((__force void __user *)to, from, n);
3990 - if ((unsigned long)from < TASK_SIZE) {
3991 - over = (unsigned long)from + n - TASK_SIZE;
3992 - return __copy_tofrom_user((__force void __user *)to, from,
3993 - n - over) + over;
3994 - }
3995 - return n;
3996 -}
3997 -
3998 -static inline unsigned long copy_to_user(void __user *to,
3999 - const void *from, unsigned long n)
4000 -{
4001 - unsigned long over;
4002 -
4003 - if (access_ok(VERIFY_WRITE, to, n))
4004 - return __copy_tofrom_user(to, (__force void __user *)from, n);
4005 - if ((unsigned long)to < TASK_SIZE) {
4006 - over = (unsigned long)to + n - TASK_SIZE;
4007 - return __copy_tofrom_user(to, (__force void __user *)from,
4008 - n - over) + over;
4009 - }
4010 - return n;
4011 -}
4012 -
4013 -#else /* __powerpc64__ */
4014 -
4015 -#define __copy_in_user(to, from, size) \
4016 - __copy_tofrom_user((to), (from), (size))
4017 -
4018 -extern unsigned long copy_from_user(void *to, const void __user *from,
4019 - unsigned long n);
4020 -extern unsigned long copy_to_user(void __user *to, const void *from,
4021 - unsigned long n);
4022 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
4023 - unsigned long n);
4024 -
4025 -#endif /* __powerpc64__ */
4026 -
4027 static inline unsigned long __copy_from_user_inatomic(void *to,
4028 const void __user *from, unsigned long n)
4029 {
4030 @@ -398,6 +354,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4031 if (ret == 0)
4032 return 0;
4033 }
4034 +
4035 + if (!__builtin_constant_p(n))
4036 + check_object_size(to, n, false);
4037 +
4038 return __copy_tofrom_user((__force void __user *)to, from, n);
4039 }
4040
4041 @@ -424,6 +384,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4042 if (ret == 0)
4043 return 0;
4044 }
4045 +
4046 + if (!__builtin_constant_p(n))
4047 + check_object_size(from, n, true);
4048 +
4049 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4050 }
4051
4052 @@ -441,6 +405,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4053 return __copy_to_user_inatomic(to, from, size);
4054 }
4055
4056 +#ifndef __powerpc64__
4057 +
4058 +static inline unsigned long __must_check copy_from_user(void *to,
4059 + const void __user *from, unsigned long n)
4060 +{
4061 + unsigned long over;
4062 +
4063 + if ((long)n < 0)
4064 + return n;
4065 +
4066 + if (access_ok(VERIFY_READ, from, n)) {
4067 + if (!__builtin_constant_p(n))
4068 + check_object_size(to, n, false);
4069 + return __copy_tofrom_user((__force void __user *)to, from, n);
4070 + }
4071 + if ((unsigned long)from < TASK_SIZE) {
4072 + over = (unsigned long)from + n - TASK_SIZE;
4073 + if (!__builtin_constant_p(n - over))
4074 + check_object_size(to, n - over, false);
4075 + return __copy_tofrom_user((__force void __user *)to, from,
4076 + n - over) + over;
4077 + }
4078 + return n;
4079 +}
4080 +
4081 +static inline unsigned long __must_check copy_to_user(void __user *to,
4082 + const void *from, unsigned long n)
4083 +{
4084 + unsigned long over;
4085 +
4086 + if ((long)n < 0)
4087 + return n;
4088 +
4089 + if (access_ok(VERIFY_WRITE, to, n)) {
4090 + if (!__builtin_constant_p(n))
4091 + check_object_size(from, n, true);
4092 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4093 + }
4094 + if ((unsigned long)to < TASK_SIZE) {
4095 + over = (unsigned long)to + n - TASK_SIZE;
4096 + if (!__builtin_constant_p(n))
4097 + check_object_size(from, n - over, true);
4098 + return __copy_tofrom_user(to, (__force void __user *)from,
4099 + n - over) + over;
4100 + }
4101 + return n;
4102 +}
4103 +
4104 +#else /* __powerpc64__ */
4105 +
4106 +#define __copy_in_user(to, from, size) \
4107 + __copy_tofrom_user((to), (from), (size))
4108 +
4109 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4110 +{
4111 + if ((long)n < 0 || n > INT_MAX)
4112 + return n;
4113 +
4114 + if (!__builtin_constant_p(n))
4115 + check_object_size(to, n, false);
4116 +
4117 + if (likely(access_ok(VERIFY_READ, from, n)))
4118 + n = __copy_from_user(to, from, n);
4119 + else
4120 + memset(to, 0, n);
4121 + return n;
4122 +}
4123 +
4124 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4125 +{
4126 + if ((long)n < 0 || n > INT_MAX)
4127 + return n;
4128 +
4129 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4130 + if (!__builtin_constant_p(n))
4131 + check_object_size(from, n, true);
4132 + n = __copy_to_user(to, from, n);
4133 + }
4134 + return n;
4135 +}
4136 +
4137 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4138 + unsigned long n);
4139 +
4140 +#endif /* __powerpc64__ */
4141 +
4142 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4143
4144 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4145 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4146 index 7215cc2..a9730c1 100644
4147 --- a/arch/powerpc/kernel/exceptions-64e.S
4148 +++ b/arch/powerpc/kernel/exceptions-64e.S
4149 @@ -661,6 +661,7 @@ storage_fault_common:
4150 std r14,_DAR(r1)
4151 std r15,_DSISR(r1)
4152 addi r3,r1,STACK_FRAME_OVERHEAD
4153 + bl .save_nvgprs
4154 mr r4,r14
4155 mr r5,r15
4156 ld r14,PACA_EXGEN+EX_R14(r13)
4157 @@ -669,8 +670,7 @@ storage_fault_common:
4158 cmpdi r3,0
4159 bne- 1f
4160 b .ret_from_except_lite
4161 -1: bl .save_nvgprs
4162 - mr r5,r3
4163 +1: mr r5,r3
4164 addi r3,r1,STACK_FRAME_OVERHEAD
4165 ld r4,_DAR(r1)
4166 bl .bad_page_fault
4167 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4168 index 1c06d29..c2a339b 100644
4169 --- a/arch/powerpc/kernel/exceptions-64s.S
4170 +++ b/arch/powerpc/kernel/exceptions-64s.S
4171 @@ -888,10 +888,10 @@ handle_page_fault:
4172 11: ld r4,_DAR(r1)
4173 ld r5,_DSISR(r1)
4174 addi r3,r1,STACK_FRAME_OVERHEAD
4175 + bl .save_nvgprs
4176 bl .do_page_fault
4177 cmpdi r3,0
4178 beq+ 12f
4179 - bl .save_nvgprs
4180 mr r5,r3
4181 addi r3,r1,STACK_FRAME_OVERHEAD
4182 lwz r4,_DAR(r1)
4183 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4184 index 2e3200c..72095ce 100644
4185 --- a/arch/powerpc/kernel/module_32.c
4186 +++ b/arch/powerpc/kernel/module_32.c
4187 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4188 me->arch.core_plt_section = i;
4189 }
4190 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4191 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4192 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4193 return -ENOEXEC;
4194 }
4195
4196 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4197
4198 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4199 /* Init, or core PLT? */
4200 - if (location >= mod->module_core
4201 - && location < mod->module_core + mod->core_size)
4202 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4203 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4204 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4205 - else
4206 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4207 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4208 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4209 + else {
4210 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4211 + return ~0UL;
4212 + }
4213
4214 /* Find this entry, or if that fails, the next avail. entry */
4215 while (entry->jump[0]) {
4216 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4217 index 1a1f2dd..f4d1bb4 100644
4218 --- a/arch/powerpc/kernel/process.c
4219 +++ b/arch/powerpc/kernel/process.c
4220 @@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4221 * Lookup NIP late so we have the best change of getting the
4222 * above info out without failing
4223 */
4224 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4225 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4226 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4227 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4228 #endif
4229 show_stack(current, (unsigned long *) regs->gpr[1]);
4230 if (!user_mode(regs))
4231 @@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4232 newsp = stack[0];
4233 ip = stack[STACK_FRAME_LR_SAVE];
4234 if (!firstframe || ip != lr) {
4235 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4236 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4237 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4238 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4239 - printk(" (%pS)",
4240 + printk(" (%pA)",
4241 (void *)current->ret_stack[curr_frame].ret);
4242 curr_frame--;
4243 }
4244 @@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4245 struct pt_regs *regs = (struct pt_regs *)
4246 (sp + STACK_FRAME_OVERHEAD);
4247 lr = regs->link;
4248 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4249 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4250 regs->trap, (void *)regs->nip, (void *)lr);
4251 firstframe = 1;
4252 }
4253 @@ -1246,58 +1246,3 @@ void __ppc64_runlatch_off(void)
4254 mtspr(SPRN_CTRLT, ctrl);
4255 }
4256 #endif /* CONFIG_PPC64 */
4257 -
4258 -unsigned long arch_align_stack(unsigned long sp)
4259 -{
4260 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4261 - sp -= get_random_int() & ~PAGE_MASK;
4262 - return sp & ~0xf;
4263 -}
4264 -
4265 -static inline unsigned long brk_rnd(void)
4266 -{
4267 - unsigned long rnd = 0;
4268 -
4269 - /* 8MB for 32bit, 1GB for 64bit */
4270 - if (is_32bit_task())
4271 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4272 - else
4273 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4274 -
4275 - return rnd << PAGE_SHIFT;
4276 -}
4277 -
4278 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4279 -{
4280 - unsigned long base = mm->brk;
4281 - unsigned long ret;
4282 -
4283 -#ifdef CONFIG_PPC_STD_MMU_64
4284 - /*
4285 - * If we are using 1TB segments and we are allowed to randomise
4286 - * the heap, we can put it above 1TB so it is backed by a 1TB
4287 - * segment. Otherwise the heap will be in the bottom 1TB
4288 - * which always uses 256MB segments and this may result in a
4289 - * performance penalty.
4290 - */
4291 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4292 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4293 -#endif
4294 -
4295 - ret = PAGE_ALIGN(base + brk_rnd());
4296 -
4297 - if (ret < mm->brk)
4298 - return mm->brk;
4299 -
4300 - return ret;
4301 -}
4302 -
4303 -unsigned long randomize_et_dyn(unsigned long base)
4304 -{
4305 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4306 -
4307 - if (ret < base)
4308 - return base;
4309 -
4310 - return ret;
4311 -}
4312 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4313 index c10fc28..c4ef063 100644
4314 --- a/arch/powerpc/kernel/ptrace.c
4315 +++ b/arch/powerpc/kernel/ptrace.c
4316 @@ -1660,6 +1660,10 @@ long arch_ptrace(struct task_struct *child, long request,
4317 return ret;
4318 }
4319
4320 +#ifdef CONFIG_GRKERNSEC_SETXID
4321 +extern void gr_delayed_cred_worker(void);
4322 +#endif
4323 +
4324 /*
4325 * We must return the syscall number to actually look up in the table.
4326 * This can be -1L to skip running any syscall at all.
4327 @@ -1670,6 +1674,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4328
4329 secure_computing_strict(regs->gpr[0]);
4330
4331 +#ifdef CONFIG_GRKERNSEC_SETXID
4332 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4333 + gr_delayed_cred_worker();
4334 +#endif
4335 +
4336 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4337 tracehook_report_syscall_entry(regs))
4338 /*
4339 @@ -1704,6 +1713,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4340 {
4341 int step;
4342
4343 +#ifdef CONFIG_GRKERNSEC_SETXID
4344 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4345 + gr_delayed_cred_worker();
4346 +#endif
4347 +
4348 audit_syscall_exit(regs);
4349
4350 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4351 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4352 index 8b4c049..dcd6ef3 100644
4353 --- a/arch/powerpc/kernel/signal_32.c
4354 +++ b/arch/powerpc/kernel/signal_32.c
4355 @@ -852,7 +852,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4356 /* Save user registers on the stack */
4357 frame = &rt_sf->uc.uc_mcontext;
4358 addr = frame;
4359 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4360 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4361 if (save_user_regs(regs, frame, 0, 1))
4362 goto badframe;
4363 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4364 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4365 index d183f87..1867f1a 100644
4366 --- a/arch/powerpc/kernel/signal_64.c
4367 +++ b/arch/powerpc/kernel/signal_64.c
4368 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4369 current->thread.fpscr.val = 0;
4370
4371 /* Set up to return from userspace. */
4372 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4373 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4374 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4375 } else {
4376 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4377 diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
4378 index f2496f2..4e3cc47 100644
4379 --- a/arch/powerpc/kernel/syscalls.c
4380 +++ b/arch/powerpc/kernel/syscalls.c
4381 @@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
4382 long ret;
4383
4384 if (personality(current->personality) == PER_LINUX32
4385 - && personality == PER_LINUX)
4386 - personality = PER_LINUX32;
4387 + && personality(personality) == PER_LINUX)
4388 + personality = (personality & ~PER_MASK) | PER_LINUX32;
4389 ret = sys_personality(personality);
4390 - if (ret == PER_LINUX32)
4391 - ret = PER_LINUX;
4392 + if (personality(ret) == PER_LINUX32)
4393 + ret = (ret & ~PER_MASK) | PER_LINUX;
4394 return ret;
4395 }
4396 #endif
4397 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4398 index ae0843f..f16372c 100644
4399 --- a/arch/powerpc/kernel/traps.c
4400 +++ b/arch/powerpc/kernel/traps.c
4401 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4402 return flags;
4403 }
4404
4405 +extern void gr_handle_kernel_exploit(void);
4406 +
4407 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4408 int signr)
4409 {
4410 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4411 panic("Fatal exception in interrupt");
4412 if (panic_on_oops)
4413 panic("Fatal exception");
4414 +
4415 + gr_handle_kernel_exploit();
4416 +
4417 do_exit(signr);
4418 }
4419
4420 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4421 index 9eb5b9b..e45498a 100644
4422 --- a/arch/powerpc/kernel/vdso.c
4423 +++ b/arch/powerpc/kernel/vdso.c
4424 @@ -34,6 +34,7 @@
4425 #include <asm/firmware.h>
4426 #include <asm/vdso.h>
4427 #include <asm/vdso_datapage.h>
4428 +#include <asm/mman.h>
4429
4430 #include "setup.h"
4431
4432 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4433 vdso_base = VDSO32_MBASE;
4434 #endif
4435
4436 - current->mm->context.vdso_base = 0;
4437 + current->mm->context.vdso_base = ~0UL;
4438
4439 /* vDSO has a problem and was disabled, just don't "enable" it for the
4440 * process
4441 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4442 vdso_base = get_unmapped_area(NULL, vdso_base,
4443 (vdso_pages << PAGE_SHIFT) +
4444 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4445 - 0, 0);
4446 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4447 if (IS_ERR_VALUE(vdso_base)) {
4448 rc = vdso_base;
4449 goto fail_mmapsem;
4450 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4451 index 5eea6f3..5d10396 100644
4452 --- a/arch/powerpc/lib/usercopy_64.c
4453 +++ b/arch/powerpc/lib/usercopy_64.c
4454 @@ -9,22 +9,6 @@
4455 #include <linux/module.h>
4456 #include <asm/uaccess.h>
4457
4458 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4459 -{
4460 - if (likely(access_ok(VERIFY_READ, from, n)))
4461 - n = __copy_from_user(to, from, n);
4462 - else
4463 - memset(to, 0, n);
4464 - return n;
4465 -}
4466 -
4467 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4468 -{
4469 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4470 - n = __copy_to_user(to, from, n);
4471 - return n;
4472 -}
4473 -
4474 unsigned long copy_in_user(void __user *to, const void __user *from,
4475 unsigned long n)
4476 {
4477 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4478 return n;
4479 }
4480
4481 -EXPORT_SYMBOL(copy_from_user);
4482 -EXPORT_SYMBOL(copy_to_user);
4483 EXPORT_SYMBOL(copy_in_user);
4484
4485 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4486 index 08ffcf5..a0ab912 100644
4487 --- a/arch/powerpc/mm/fault.c
4488 +++ b/arch/powerpc/mm/fault.c
4489 @@ -32,6 +32,10 @@
4490 #include <linux/perf_event.h>
4491 #include <linux/magic.h>
4492 #include <linux/ratelimit.h>
4493 +#include <linux/slab.h>
4494 +#include <linux/pagemap.h>
4495 +#include <linux/compiler.h>
4496 +#include <linux/unistd.h>
4497
4498 #include <asm/firmware.h>
4499 #include <asm/page.h>
4500 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4501 }
4502 #endif
4503
4504 +#ifdef CONFIG_PAX_PAGEEXEC
4505 +/*
4506 + * PaX: decide what to do with offenders (regs->nip = fault address)
4507 + *
4508 + * returns 1 when task should be killed
4509 + */
4510 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4511 +{
4512 + return 1;
4513 +}
4514 +
4515 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4516 +{
4517 + unsigned long i;
4518 +
4519 + printk(KERN_ERR "PAX: bytes at PC: ");
4520 + for (i = 0; i < 5; i++) {
4521 + unsigned int c;
4522 + if (get_user(c, (unsigned int __user *)pc+i))
4523 + printk(KERN_CONT "???????? ");
4524 + else
4525 + printk(KERN_CONT "%08x ", c);
4526 + }
4527 + printk("\n");
4528 +}
4529 +#endif
4530 +
4531 /*
4532 * Check whether the instruction at regs->nip is a store using
4533 * an update addressing form which will update r1.
4534 @@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4535 * indicate errors in DSISR but can validly be set in SRR1.
4536 */
4537 if (trap == 0x400)
4538 - error_code &= 0x48200000;
4539 + error_code &= 0x58200000;
4540 else
4541 is_write = error_code & DSISR_ISSTORE;
4542 #else
4543 @@ -366,7 +397,7 @@ good_area:
4544 * "undefined". Of those that can be set, this is the only
4545 * one which seems bad.
4546 */
4547 - if (error_code & 0x10000000)
4548 + if (error_code & DSISR_GUARDED)
4549 /* Guarded storage error. */
4550 goto bad_area;
4551 #endif /* CONFIG_8xx */
4552 @@ -381,7 +412,7 @@ good_area:
4553 * processors use the same I/D cache coherency mechanism
4554 * as embedded.
4555 */
4556 - if (error_code & DSISR_PROTFAULT)
4557 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4558 goto bad_area;
4559 #endif /* CONFIG_PPC_STD_MMU */
4560
4561 @@ -463,6 +494,23 @@ bad_area:
4562 bad_area_nosemaphore:
4563 /* User mode accesses cause a SIGSEGV */
4564 if (user_mode(regs)) {
4565 +
4566 +#ifdef CONFIG_PAX_PAGEEXEC
4567 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4568 +#ifdef CONFIG_PPC_STD_MMU
4569 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4570 +#else
4571 + if (is_exec && regs->nip == address) {
4572 +#endif
4573 + switch (pax_handle_fetch_fault(regs)) {
4574 + }
4575 +
4576 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4577 + do_group_exit(SIGKILL);
4578 + }
4579 + }
4580 +#endif
4581 +
4582 _exception(SIGSEGV, regs, code, address);
4583 return 0;
4584 }
4585 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4586 index 67a42ed..1c7210c 100644
4587 --- a/arch/powerpc/mm/mmap_64.c
4588 +++ b/arch/powerpc/mm/mmap_64.c
4589 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4590 */
4591 if (mmap_is_legacy()) {
4592 mm->mmap_base = TASK_UNMAPPED_BASE;
4593 +
4594 +#ifdef CONFIG_PAX_RANDMMAP
4595 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4596 + mm->mmap_base += mm->delta_mmap;
4597 +#endif
4598 +
4599 mm->get_unmapped_area = arch_get_unmapped_area;
4600 mm->unmap_area = arch_unmap_area;
4601 } else {
4602 mm->mmap_base = mmap_base();
4603 +
4604 +#ifdef CONFIG_PAX_RANDMMAP
4605 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4606 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4607 +#endif
4608 +
4609 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4610 mm->unmap_area = arch_unmap_area_topdown;
4611 }
4612 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4613 index 73709f7..6b90313 100644
4614 --- a/arch/powerpc/mm/slice.c
4615 +++ b/arch/powerpc/mm/slice.c
4616 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4617 if ((mm->task_size - len) < addr)
4618 return 0;
4619 vma = find_vma(mm, addr);
4620 - return (!vma || (addr + len) <= vma->vm_start);
4621 + return check_heap_stack_gap(vma, addr, len);
4622 }
4623
4624 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4625 @@ -256,7 +256,7 @@ full_search:
4626 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4627 continue;
4628 }
4629 - if (!vma || addr + len <= vma->vm_start) {
4630 + if (check_heap_stack_gap(vma, addr, len)) {
4631 /*
4632 * Remember the place where we stopped the search:
4633 */
4634 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4635 }
4636 }
4637
4638 - addr = mm->mmap_base;
4639 - while (addr > len) {
4640 + if (mm->mmap_base < len)
4641 + addr = -ENOMEM;
4642 + else
4643 + addr = mm->mmap_base - len;
4644 +
4645 + while (!IS_ERR_VALUE(addr)) {
4646 /* Go down by chunk size */
4647 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4648 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4649
4650 /* Check for hit with different page size */
4651 mask = slice_range_to_mask(addr, len);
4652 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4653 * return with success:
4654 */
4655 vma = find_vma(mm, addr);
4656 - if (!vma || (addr + len) <= vma->vm_start) {
4657 + if (check_heap_stack_gap(vma, addr, len)) {
4658 /* remember the address as a hint for next time */
4659 if (use_cache)
4660 mm->free_area_cache = addr;
4661 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4662 mm->cached_hole_size = vma->vm_start - addr;
4663
4664 /* try just below the current vma->vm_start */
4665 - addr = vma->vm_start;
4666 + addr = skip_heap_stack_gap(vma, len);
4667 }
4668
4669 /*
4670 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4671 if (fixed && addr > (mm->task_size - len))
4672 return -EINVAL;
4673
4674 +#ifdef CONFIG_PAX_RANDMMAP
4675 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4676 + addr = 0;
4677 +#endif
4678 +
4679 /* If hint, make sure it matches our alignment restrictions */
4680 if (!fixed && addr) {
4681 addr = _ALIGN_UP(addr, 1ul << pshift);
4682 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4683 index 748347b..81bc6c7 100644
4684 --- a/arch/s390/include/asm/atomic.h
4685 +++ b/arch/s390/include/asm/atomic.h
4686 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4687 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4688 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4689
4690 +#define atomic64_read_unchecked(v) atomic64_read(v)
4691 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4692 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4693 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4694 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4695 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4696 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4697 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4698 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4699 +
4700 #define smp_mb__before_atomic_dec() smp_mb()
4701 #define smp_mb__after_atomic_dec() smp_mb()
4702 #define smp_mb__before_atomic_inc() smp_mb()
4703 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4704 index 2a30d5a..5e5586f 100644
4705 --- a/arch/s390/include/asm/cache.h
4706 +++ b/arch/s390/include/asm/cache.h
4707 @@ -11,8 +11,10 @@
4708 #ifndef __ARCH_S390_CACHE_H
4709 #define __ARCH_S390_CACHE_H
4710
4711 -#define L1_CACHE_BYTES 256
4712 +#include <linux/const.h>
4713 +
4714 #define L1_CACHE_SHIFT 8
4715 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4716 #define NET_SKB_PAD 32
4717
4718 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4719 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4720 index 06151e6..598f9a5 100644
4721 --- a/arch/s390/include/asm/elf.h
4722 +++ b/arch/s390/include/asm/elf.h
4723 @@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4724 the loader. We need to make sure that it is out of the way of the program
4725 that it will "exec", and that there is sufficient room for the brk. */
4726
4727 -extern unsigned long randomize_et_dyn(unsigned long base);
4728 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4729 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4730 +
4731 +#ifdef CONFIG_PAX_ASLR
4732 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4733 +
4734 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4735 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4736 +#endif
4737
4738 /* This yields a mask that user programs can use to figure out what
4739 instruction set this CPU supports. */
4740 @@ -182,7 +188,8 @@ extern char elf_platform[];
4741 #define ELF_PLATFORM (elf_platform)
4742
4743 #ifndef CONFIG_64BIT
4744 -#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
4745 +#define SET_PERSONALITY(ex) \
4746 + set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
4747 #else /* CONFIG_64BIT */
4748 #define SET_PERSONALITY(ex) \
4749 do { \
4750 @@ -210,7 +217,4 @@ struct linux_binprm;
4751 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4752 int arch_setup_additional_pages(struct linux_binprm *, int);
4753
4754 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4755 -#define arch_randomize_brk arch_randomize_brk
4756 -
4757 #endif
4758 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4759 index c4a93d6..4d2a9b4 100644
4760 --- a/arch/s390/include/asm/exec.h
4761 +++ b/arch/s390/include/asm/exec.h
4762 @@ -7,6 +7,6 @@
4763 #ifndef __ASM_EXEC_H
4764 #define __ASM_EXEC_H
4765
4766 -extern unsigned long arch_align_stack(unsigned long sp);
4767 +#define arch_align_stack(x) ((x) & ~0xfUL)
4768
4769 #endif /* __ASM_EXEC_H */
4770 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4771 index 1f3a79b..44d7f9c 100644
4772 --- a/arch/s390/include/asm/uaccess.h
4773 +++ b/arch/s390/include/asm/uaccess.h
4774 @@ -241,6 +241,10 @@ static inline unsigned long __must_check
4775 copy_to_user(void __user *to, const void *from, unsigned long n)
4776 {
4777 might_fault();
4778 +
4779 + if ((long)n < 0)
4780 + return n;
4781 +
4782 if (access_ok(VERIFY_WRITE, to, n))
4783 n = __copy_to_user(to, from, n);
4784 return n;
4785 @@ -266,6 +270,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4786 static inline unsigned long __must_check
4787 __copy_from_user(void *to, const void __user *from, unsigned long n)
4788 {
4789 + if ((long)n < 0)
4790 + return n;
4791 +
4792 if (__builtin_constant_p(n) && (n <= 256))
4793 return uaccess.copy_from_user_small(n, from, to);
4794 else
4795 @@ -297,10 +304,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
4796 static inline unsigned long __must_check
4797 copy_from_user(void *to, const void __user *from, unsigned long n)
4798 {
4799 - unsigned int sz = __compiletime_object_size(to);
4800 + size_t sz = __compiletime_object_size(to);
4801
4802 might_fault();
4803 - if (unlikely(sz != -1 && sz < n)) {
4804 +
4805 + if ((long)n < 0)
4806 + return n;
4807 +
4808 + if (unlikely(sz != (size_t)-1 && sz < n)) {
4809 copy_from_user_overflow();
4810 return n;
4811 }
4812 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4813 index dfcb343..eda788a 100644
4814 --- a/arch/s390/kernel/module.c
4815 +++ b/arch/s390/kernel/module.c
4816 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4817
4818 /* Increase core size by size of got & plt and set start
4819 offsets for got and plt. */
4820 - me->core_size = ALIGN(me->core_size, 4);
4821 - me->arch.got_offset = me->core_size;
4822 - me->core_size += me->arch.got_size;
4823 - me->arch.plt_offset = me->core_size;
4824 - me->core_size += me->arch.plt_size;
4825 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4826 + me->arch.got_offset = me->core_size_rw;
4827 + me->core_size_rw += me->arch.got_size;
4828 + me->arch.plt_offset = me->core_size_rx;
4829 + me->core_size_rx += me->arch.plt_size;
4830 return 0;
4831 }
4832
4833 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4834 if (info->got_initialized == 0) {
4835 Elf_Addr *gotent;
4836
4837 - gotent = me->module_core + me->arch.got_offset +
4838 + gotent = me->module_core_rw + me->arch.got_offset +
4839 info->got_offset;
4840 *gotent = val;
4841 info->got_initialized = 1;
4842 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4843 else if (r_type == R_390_GOTENT ||
4844 r_type == R_390_GOTPLTENT)
4845 *(unsigned int *) loc =
4846 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4847 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4848 else if (r_type == R_390_GOT64 ||
4849 r_type == R_390_GOTPLT64)
4850 *(unsigned long *) loc = val;
4851 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4852 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4853 if (info->plt_initialized == 0) {
4854 unsigned int *ip;
4855 - ip = me->module_core + me->arch.plt_offset +
4856 + ip = me->module_core_rx + me->arch.plt_offset +
4857 info->plt_offset;
4858 #ifndef CONFIG_64BIT
4859 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4860 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4861 val - loc + 0xffffUL < 0x1ffffeUL) ||
4862 (r_type == R_390_PLT32DBL &&
4863 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4864 - val = (Elf_Addr) me->module_core +
4865 + val = (Elf_Addr) me->module_core_rx +
4866 me->arch.plt_offset +
4867 info->plt_offset;
4868 val += rela->r_addend - loc;
4869 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4870 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4871 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4872 val = val + rela->r_addend -
4873 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4874 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4875 if (r_type == R_390_GOTOFF16)
4876 *(unsigned short *) loc = val;
4877 else if (r_type == R_390_GOTOFF32)
4878 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4879 break;
4880 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4881 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4882 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4883 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4884 rela->r_addend - loc;
4885 if (r_type == R_390_GOTPC)
4886 *(unsigned int *) loc = val;
4887 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4888 index 60055ce..ee4b252 100644
4889 --- a/arch/s390/kernel/process.c
4890 +++ b/arch/s390/kernel/process.c
4891 @@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4892 }
4893 return 0;
4894 }
4895 -
4896 -unsigned long arch_align_stack(unsigned long sp)
4897 -{
4898 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4899 - sp -= get_random_int() & ~PAGE_MASK;
4900 - return sp & ~0xf;
4901 -}
4902 -
4903 -static inline unsigned long brk_rnd(void)
4904 -{
4905 - /* 8MB for 32bit, 1GB for 64bit */
4906 - if (is_32bit_task())
4907 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4908 - else
4909 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4910 -}
4911 -
4912 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4913 -{
4914 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4915 -
4916 - if (ret < mm->brk)
4917 - return mm->brk;
4918 - return ret;
4919 -}
4920 -
4921 -unsigned long randomize_et_dyn(unsigned long base)
4922 -{
4923 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4924 -
4925 - if (!(current->flags & PF_RANDOMIZE))
4926 - return base;
4927 - if (ret < base)
4928 - return base;
4929 - return ret;
4930 -}
4931 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4932 index a64fe53..5c66963 100644
4933 --- a/arch/s390/mm/mmap.c
4934 +++ b/arch/s390/mm/mmap.c
4935 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4936 */
4937 if (mmap_is_legacy()) {
4938 mm->mmap_base = TASK_UNMAPPED_BASE;
4939 +
4940 +#ifdef CONFIG_PAX_RANDMMAP
4941 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4942 + mm->mmap_base += mm->delta_mmap;
4943 +#endif
4944 +
4945 mm->get_unmapped_area = arch_get_unmapped_area;
4946 mm->unmap_area = arch_unmap_area;
4947 } else {
4948 mm->mmap_base = mmap_base();
4949 +
4950 +#ifdef CONFIG_PAX_RANDMMAP
4951 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4952 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4953 +#endif
4954 +
4955 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4956 mm->unmap_area = arch_unmap_area_topdown;
4957 }
4958 @@ -174,10 +186,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4959 */
4960 if (mmap_is_legacy()) {
4961 mm->mmap_base = TASK_UNMAPPED_BASE;
4962 +
4963 +#ifdef CONFIG_PAX_RANDMMAP
4964 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4965 + mm->mmap_base += mm->delta_mmap;
4966 +#endif
4967 +
4968 mm->get_unmapped_area = s390_get_unmapped_area;
4969 mm->unmap_area = arch_unmap_area;
4970 } else {
4971 mm->mmap_base = mmap_base();
4972 +
4973 +#ifdef CONFIG_PAX_RANDMMAP
4974 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4975 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4976 +#endif
4977 +
4978 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4979 mm->unmap_area = arch_unmap_area_topdown;
4980 }
4981 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4982 index ae3d59f..f65f075 100644
4983 --- a/arch/score/include/asm/cache.h
4984 +++ b/arch/score/include/asm/cache.h
4985 @@ -1,7 +1,9 @@
4986 #ifndef _ASM_SCORE_CACHE_H
4987 #define _ASM_SCORE_CACHE_H
4988
4989 +#include <linux/const.h>
4990 +
4991 #define L1_CACHE_SHIFT 4
4992 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4993 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4994
4995 #endif /* _ASM_SCORE_CACHE_H */
4996 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4997 index f9f3cd5..58ff438 100644
4998 --- a/arch/score/include/asm/exec.h
4999 +++ b/arch/score/include/asm/exec.h
5000 @@ -1,6 +1,6 @@
5001 #ifndef _ASM_SCORE_EXEC_H
5002 #define _ASM_SCORE_EXEC_H
5003
5004 -extern unsigned long arch_align_stack(unsigned long sp);
5005 +#define arch_align_stack(x) (x)
5006
5007 #endif /* _ASM_SCORE_EXEC_H */
5008 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5009 index 2707023..1c2a3b7 100644
5010 --- a/arch/score/kernel/process.c
5011 +++ b/arch/score/kernel/process.c
5012 @@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
5013
5014 return task_pt_regs(task)->cp0_epc;
5015 }
5016 -
5017 -unsigned long arch_align_stack(unsigned long sp)
5018 -{
5019 - return sp;
5020 -}
5021 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5022 index ef9e555..331bd29 100644
5023 --- a/arch/sh/include/asm/cache.h
5024 +++ b/arch/sh/include/asm/cache.h
5025 @@ -9,10 +9,11 @@
5026 #define __ASM_SH_CACHE_H
5027 #ifdef __KERNEL__
5028
5029 +#include <linux/const.h>
5030 #include <linux/init.h>
5031 #include <cpu/cache.h>
5032
5033 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5034 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5035
5036 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5037
5038 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5039 index afeb710..d1d1289 100644
5040 --- a/arch/sh/mm/mmap.c
5041 +++ b/arch/sh/mm/mmap.c
5042 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5043 addr = PAGE_ALIGN(addr);
5044
5045 vma = find_vma(mm, addr);
5046 - if (TASK_SIZE - len >= addr &&
5047 - (!vma || addr + len <= vma->vm_start))
5048 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5049 return addr;
5050 }
5051
5052 @@ -106,7 +105,7 @@ full_search:
5053 }
5054 return -ENOMEM;
5055 }
5056 - if (likely(!vma || addr + len <= vma->vm_start)) {
5057 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5058 /*
5059 * Remember the place where we stopped the search:
5060 */
5061 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5062 addr = PAGE_ALIGN(addr);
5063
5064 vma = find_vma(mm, addr);
5065 - if (TASK_SIZE - len >= addr &&
5066 - (!vma || addr + len <= vma->vm_start))
5067 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5068 return addr;
5069 }
5070
5071 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5072 /* make sure it can fit in the remaining address space */
5073 if (likely(addr > len)) {
5074 vma = find_vma(mm, addr-len);
5075 - if (!vma || addr <= vma->vm_start) {
5076 + if (check_heap_stack_gap(vma, addr - len, len)) {
5077 /* remember the address as a hint for next time */
5078 return (mm->free_area_cache = addr-len);
5079 }
5080 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5081 if (unlikely(mm->mmap_base < len))
5082 goto bottomup;
5083
5084 - addr = mm->mmap_base-len;
5085 - if (do_colour_align)
5086 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5087 + addr = mm->mmap_base - len;
5088
5089 do {
5090 + if (do_colour_align)
5091 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5092 /*
5093 * Lookup failure means no vma is above this address,
5094 * else if new region fits below vma->vm_start,
5095 * return with success:
5096 */
5097 vma = find_vma(mm, addr);
5098 - if (likely(!vma || addr+len <= vma->vm_start)) {
5099 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5100 /* remember the address as a hint for next time */
5101 return (mm->free_area_cache = addr);
5102 }
5103 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5104 mm->cached_hole_size = vma->vm_start - addr;
5105
5106 /* try just below the current vma->vm_start */
5107 - addr = vma->vm_start-len;
5108 - if (do_colour_align)
5109 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5110 - } while (likely(len < vma->vm_start));
5111 + addr = skip_heap_stack_gap(vma, len);
5112 + } while (!IS_ERR_VALUE(addr));
5113
5114 bottomup:
5115 /*
5116 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5117 index ce35a1c..2e7b8f9 100644
5118 --- a/arch/sparc/include/asm/atomic_64.h
5119 +++ b/arch/sparc/include/asm/atomic_64.h
5120 @@ -14,18 +14,40 @@
5121 #define ATOMIC64_INIT(i) { (i) }
5122
5123 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5124 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5125 +{
5126 + return v->counter;
5127 +}
5128 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5129 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5130 +{
5131 + return v->counter;
5132 +}
5133
5134 #define atomic_set(v, i) (((v)->counter) = i)
5135 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5136 +{
5137 + v->counter = i;
5138 +}
5139 #define atomic64_set(v, i) (((v)->counter) = i)
5140 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5141 +{
5142 + v->counter = i;
5143 +}
5144
5145 extern void atomic_add(int, atomic_t *);
5146 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5147 extern void atomic64_add(long, atomic64_t *);
5148 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5149 extern void atomic_sub(int, atomic_t *);
5150 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5151 extern void atomic64_sub(long, atomic64_t *);
5152 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5153
5154 extern int atomic_add_ret(int, atomic_t *);
5155 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5156 extern long atomic64_add_ret(long, atomic64_t *);
5157 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5158 extern int atomic_sub_ret(int, atomic_t *);
5159 extern long atomic64_sub_ret(long, atomic64_t *);
5160
5161 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5162 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5163
5164 #define atomic_inc_return(v) atomic_add_ret(1, v)
5165 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5166 +{
5167 + return atomic_add_ret_unchecked(1, v);
5168 +}
5169 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5170 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5171 +{
5172 + return atomic64_add_ret_unchecked(1, v);
5173 +}
5174
5175 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5176 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5177
5178 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5179 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5180 +{
5181 + return atomic_add_ret_unchecked(i, v);
5182 +}
5183 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5184 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5185 +{
5186 + return atomic64_add_ret_unchecked(i, v);
5187 +}
5188
5189 /*
5190 * atomic_inc_and_test - increment and test
5191 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5192 * other cases.
5193 */
5194 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5195 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5196 +{
5197 + return atomic_inc_return_unchecked(v) == 0;
5198 +}
5199 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5200
5201 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5202 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5203 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5204
5205 #define atomic_inc(v) atomic_add(1, v)
5206 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5207 +{
5208 + atomic_add_unchecked(1, v);
5209 +}
5210 #define atomic64_inc(v) atomic64_add(1, v)
5211 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5212 +{
5213 + atomic64_add_unchecked(1, v);
5214 +}
5215
5216 #define atomic_dec(v) atomic_sub(1, v)
5217 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5218 +{
5219 + atomic_sub_unchecked(1, v);
5220 +}
5221 #define atomic64_dec(v) atomic64_sub(1, v)
5222 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5223 +{
5224 + atomic64_sub_unchecked(1, v);
5225 +}
5226
5227 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5228 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5229
5230 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5231 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5232 +{
5233 + return cmpxchg(&v->counter, old, new);
5234 +}
5235 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5236 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5237 +{
5238 + return xchg(&v->counter, new);
5239 +}
5240
5241 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5242 {
5243 - int c, old;
5244 + int c, old, new;
5245 c = atomic_read(v);
5246 for (;;) {
5247 - if (unlikely(c == (u)))
5248 + if (unlikely(c == u))
5249 break;
5250 - old = atomic_cmpxchg((v), c, c + (a));
5251 +
5252 + asm volatile("addcc %2, %0, %0\n"
5253 +
5254 +#ifdef CONFIG_PAX_REFCOUNT
5255 + "tvs %%icc, 6\n"
5256 +#endif
5257 +
5258 + : "=r" (new)
5259 + : "0" (c), "ir" (a)
5260 + : "cc");
5261 +
5262 + old = atomic_cmpxchg(v, c, new);
5263 if (likely(old == c))
5264 break;
5265 c = old;
5266 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5267 #define atomic64_cmpxchg(v, o, n) \
5268 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5269 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5270 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5271 +{
5272 + return xchg(&v->counter, new);
5273 +}
5274
5275 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5276 {
5277 - long c, old;
5278 + long c, old, new;
5279 c = atomic64_read(v);
5280 for (;;) {
5281 - if (unlikely(c == (u)))
5282 + if (unlikely(c == u))
5283 break;
5284 - old = atomic64_cmpxchg((v), c, c + (a));
5285 +
5286 + asm volatile("addcc %2, %0, %0\n"
5287 +
5288 +#ifdef CONFIG_PAX_REFCOUNT
5289 + "tvs %%xcc, 6\n"
5290 +#endif
5291 +
5292 + : "=r" (new)
5293 + : "0" (c), "ir" (a)
5294 + : "cc");
5295 +
5296 + old = atomic64_cmpxchg(v, c, new);
5297 if (likely(old == c))
5298 break;
5299 c = old;
5300 }
5301 - return c != (u);
5302 + return c != u;
5303 }
5304
5305 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5306 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5307 index 5bb6991..5c2132e 100644
5308 --- a/arch/sparc/include/asm/cache.h
5309 +++ b/arch/sparc/include/asm/cache.h
5310 @@ -7,10 +7,12 @@
5311 #ifndef _SPARC_CACHE_H
5312 #define _SPARC_CACHE_H
5313
5314 +#include <linux/const.h>
5315 +
5316 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5317
5318 #define L1_CACHE_SHIFT 5
5319 -#define L1_CACHE_BYTES 32
5320 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5321
5322 #ifdef CONFIG_SPARC32
5323 #define SMP_CACHE_BYTES_SHIFT 5
5324 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5325 index 2d4d755..81b6662 100644
5326 --- a/arch/sparc/include/asm/elf_32.h
5327 +++ b/arch/sparc/include/asm/elf_32.h
5328 @@ -114,6 +114,13 @@ typedef struct {
5329
5330 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5331
5332 +#ifdef CONFIG_PAX_ASLR
5333 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5334 +
5335 +#define PAX_DELTA_MMAP_LEN 16
5336 +#define PAX_DELTA_STACK_LEN 16
5337 +#endif
5338 +
5339 /* This yields a mask that user programs can use to figure out what
5340 instruction set this cpu supports. This can NOT be done in userspace
5341 on Sparc. */
5342 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5343 index 7df8b7f..4946269 100644
5344 --- a/arch/sparc/include/asm/elf_64.h
5345 +++ b/arch/sparc/include/asm/elf_64.h
5346 @@ -180,6 +180,13 @@ typedef struct {
5347 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5348 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5349
5350 +#ifdef CONFIG_PAX_ASLR
5351 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5352 +
5353 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5354 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5355 +#endif
5356 +
5357 extern unsigned long sparc64_elf_hwcap;
5358 #define ELF_HWCAP sparc64_elf_hwcap
5359
5360 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5361 index e5b169b46..e90b4fa 100644
5362 --- a/arch/sparc/include/asm/pgalloc_32.h
5363 +++ b/arch/sparc/include/asm/pgalloc_32.h
5364 @@ -46,6 +46,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
5365 }
5366
5367 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5368 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5369
5370 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
5371 unsigned long address)
5372 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5373 index 40b2d7a..22a665b 100644
5374 --- a/arch/sparc/include/asm/pgalloc_64.h
5375 +++ b/arch/sparc/include/asm/pgalloc_64.h
5376 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5377 }
5378
5379 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5380 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5381
5382 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5383 {
5384 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5385 index cbbbed5..97f72f9 100644
5386 --- a/arch/sparc/include/asm/pgtable_32.h
5387 +++ b/arch/sparc/include/asm/pgtable_32.h
5388 @@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
5389 #define PAGE_SHARED SRMMU_PAGE_SHARED
5390 #define PAGE_COPY SRMMU_PAGE_COPY
5391 #define PAGE_READONLY SRMMU_PAGE_RDONLY
5392 +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
5393 +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
5394 +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
5395 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
5396
5397 /* Top-level page directory */
5398 @@ -61,18 +64,18 @@ extern unsigned long ptr_in_current_pgd;
5399
5400 /* xwr */
5401 #define __P000 PAGE_NONE
5402 -#define __P001 PAGE_READONLY
5403 -#define __P010 PAGE_COPY
5404 -#define __P011 PAGE_COPY
5405 +#define __P001 PAGE_READONLY_NOEXEC
5406 +#define __P010 PAGE_COPY_NOEXEC
5407 +#define __P011 PAGE_COPY_NOEXEC
5408 #define __P100 PAGE_READONLY
5409 #define __P101 PAGE_READONLY
5410 #define __P110 PAGE_COPY
5411 #define __P111 PAGE_COPY
5412
5413 #define __S000 PAGE_NONE
5414 -#define __S001 PAGE_READONLY
5415 -#define __S010 PAGE_SHARED
5416 -#define __S011 PAGE_SHARED
5417 +#define __S001 PAGE_READONLY_NOEXEC
5418 +#define __S010 PAGE_SHARED_NOEXEC
5419 +#define __S011 PAGE_SHARED_NOEXEC
5420 #define __S100 PAGE_READONLY
5421 #define __S101 PAGE_READONLY
5422 #define __S110 PAGE_SHARED
5423 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5424 index 79da178..c2eede8 100644
5425 --- a/arch/sparc/include/asm/pgtsrmmu.h
5426 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5427 @@ -115,6 +115,11 @@
5428 SRMMU_EXEC | SRMMU_REF)
5429 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5430 SRMMU_EXEC | SRMMU_REF)
5431 +
5432 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5433 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5434 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5435 +
5436 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5437 SRMMU_DIRTY | SRMMU_REF)
5438
5439 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5440 index 9689176..63c18ea 100644
5441 --- a/arch/sparc/include/asm/spinlock_64.h
5442 +++ b/arch/sparc/include/asm/spinlock_64.h
5443 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5444
5445 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5446
5447 -static void inline arch_read_lock(arch_rwlock_t *lock)
5448 +static inline void arch_read_lock(arch_rwlock_t *lock)
5449 {
5450 unsigned long tmp1, tmp2;
5451
5452 __asm__ __volatile__ (
5453 "1: ldsw [%2], %0\n"
5454 " brlz,pn %0, 2f\n"
5455 -"4: add %0, 1, %1\n"
5456 +"4: addcc %0, 1, %1\n"
5457 +
5458 +#ifdef CONFIG_PAX_REFCOUNT
5459 +" tvs %%icc, 6\n"
5460 +#endif
5461 +
5462 " cas [%2], %0, %1\n"
5463 " cmp %0, %1\n"
5464 " bne,pn %%icc, 1b\n"
5465 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5466 " .previous"
5467 : "=&r" (tmp1), "=&r" (tmp2)
5468 : "r" (lock)
5469 - : "memory");
5470 + : "memory", "cc");
5471 }
5472
5473 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5474 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5475 {
5476 int tmp1, tmp2;
5477
5478 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5479 "1: ldsw [%2], %0\n"
5480 " brlz,a,pn %0, 2f\n"
5481 " mov 0, %0\n"
5482 -" add %0, 1, %1\n"
5483 +" addcc %0, 1, %1\n"
5484 +
5485 +#ifdef CONFIG_PAX_REFCOUNT
5486 +" tvs %%icc, 6\n"
5487 +#endif
5488 +
5489 " cas [%2], %0, %1\n"
5490 " cmp %0, %1\n"
5491 " bne,pn %%icc, 1b\n"
5492 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5493 return tmp1;
5494 }
5495
5496 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5497 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5498 {
5499 unsigned long tmp1, tmp2;
5500
5501 __asm__ __volatile__(
5502 "1: lduw [%2], %0\n"
5503 -" sub %0, 1, %1\n"
5504 +" subcc %0, 1, %1\n"
5505 +
5506 +#ifdef CONFIG_PAX_REFCOUNT
5507 +" tvs %%icc, 6\n"
5508 +#endif
5509 +
5510 " cas [%2], %0, %1\n"
5511 " cmp %0, %1\n"
5512 " bne,pn %%xcc, 1b\n"
5513 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5514 : "memory");
5515 }
5516
5517 -static void inline arch_write_lock(arch_rwlock_t *lock)
5518 +static inline void arch_write_lock(arch_rwlock_t *lock)
5519 {
5520 unsigned long mask, tmp1, tmp2;
5521
5522 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5523 : "memory");
5524 }
5525
5526 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5527 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5528 {
5529 __asm__ __volatile__(
5530 " stw %%g0, [%0]"
5531 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5532 : "memory");
5533 }
5534
5535 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5536 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5537 {
5538 unsigned long mask, tmp1, tmp2, result;
5539
5540 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5541 index e6cd224..3a71793 100644
5542 --- a/arch/sparc/include/asm/thread_info_32.h
5543 +++ b/arch/sparc/include/asm/thread_info_32.h
5544 @@ -49,6 +49,8 @@ struct thread_info {
5545 unsigned long w_saved;
5546
5547 struct restart_block restart_block;
5548 +
5549 + unsigned long lowest_stack;
5550 };
5551
5552 /*
5553 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5554 index cfa8c38..13f30d3 100644
5555 --- a/arch/sparc/include/asm/thread_info_64.h
5556 +++ b/arch/sparc/include/asm/thread_info_64.h
5557 @@ -63,6 +63,8 @@ struct thread_info {
5558 struct pt_regs *kern_una_regs;
5559 unsigned int kern_una_insn;
5560
5561 + unsigned long lowest_stack;
5562 +
5563 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5564 };
5565
5566 @@ -193,10 +195,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5567 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5568 /* flag bit 6 is available */
5569 #define TIF_32BIT 7 /* 32-bit binary */
5570 -/* flag bit 8 is available */
5571 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5572 #define TIF_SECCOMP 9 /* secure computing */
5573 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5574 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5575 +
5576 /* NOTE: Thread flags >= 12 should be ones we have no interest
5577 * in using in assembly, else we can't use the mask as
5578 * an immediate value in instructions such as andcc.
5579 @@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5580 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5581 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5582 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5583 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5584
5585 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5586 _TIF_DO_NOTIFY_RESUME_MASK | \
5587 _TIF_NEED_RESCHED)
5588 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5589
5590 +#define _TIF_WORK_SYSCALL \
5591 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5592 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5593 +
5594 +
5595 /*
5596 * Thread-synchronous status.
5597 *
5598 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5599 index 0167d26..9acd8ed 100644
5600 --- a/arch/sparc/include/asm/uaccess.h
5601 +++ b/arch/sparc/include/asm/uaccess.h
5602 @@ -1,5 +1,13 @@
5603 #ifndef ___ASM_SPARC_UACCESS_H
5604 #define ___ASM_SPARC_UACCESS_H
5605 +
5606 +#ifdef __KERNEL__
5607 +#ifndef __ASSEMBLY__
5608 +#include <linux/types.h>
5609 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5610 +#endif
5611 +#endif
5612 +
5613 #if defined(__sparc__) && defined(__arch64__)
5614 #include <asm/uaccess_64.h>
5615 #else
5616 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5617 index 53a28dd..50c38c3 100644
5618 --- a/arch/sparc/include/asm/uaccess_32.h
5619 +++ b/arch/sparc/include/asm/uaccess_32.h
5620 @@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5621
5622 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5623 {
5624 - if (n && __access_ok((unsigned long) to, n))
5625 + if ((long)n < 0)
5626 + return n;
5627 +
5628 + if (n && __access_ok((unsigned long) to, n)) {
5629 + if (!__builtin_constant_p(n))
5630 + check_object_size(from, n, true);
5631 return __copy_user(to, (__force void __user *) from, n);
5632 - else
5633 + } else
5634 return n;
5635 }
5636
5637 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5638 {
5639 + if ((long)n < 0)
5640 + return n;
5641 +
5642 + if (!__builtin_constant_p(n))
5643 + check_object_size(from, n, true);
5644 +
5645 return __copy_user(to, (__force void __user *) from, n);
5646 }
5647
5648 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5649 {
5650 - if (n && __access_ok((unsigned long) from, n))
5651 + if ((long)n < 0)
5652 + return n;
5653 +
5654 + if (n && __access_ok((unsigned long) from, n)) {
5655 + if (!__builtin_constant_p(n))
5656 + check_object_size(to, n, false);
5657 return __copy_user((__force void __user *) to, from, n);
5658 - else
5659 + } else
5660 return n;
5661 }
5662
5663 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5664 {
5665 + if ((long)n < 0)
5666 + return n;
5667 +
5668 return __copy_user((__force void __user *) to, from, n);
5669 }
5670
5671 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5672 index 7c831d8..d440ca7 100644
5673 --- a/arch/sparc/include/asm/uaccess_64.h
5674 +++ b/arch/sparc/include/asm/uaccess_64.h
5675 @@ -10,6 +10,7 @@
5676 #include <linux/compiler.h>
5677 #include <linux/string.h>
5678 #include <linux/thread_info.h>
5679 +#include <linux/kernel.h>
5680 #include <asm/asi.h>
5681 #include <asm/spitfire.h>
5682 #include <asm-generic/uaccess-unaligned.h>
5683 @@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5684 static inline unsigned long __must_check
5685 copy_from_user(void *to, const void __user *from, unsigned long size)
5686 {
5687 - unsigned long ret = ___copy_from_user(to, from, size);
5688 + unsigned long ret;
5689
5690 + if ((long)size < 0 || size > INT_MAX)
5691 + return size;
5692 +
5693 + if (!__builtin_constant_p(size))
5694 + check_object_size(to, size, false);
5695 +
5696 + ret = ___copy_from_user(to, from, size);
5697 if (unlikely(ret))
5698 ret = copy_from_user_fixup(to, from, size);
5699
5700 @@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5701 static inline unsigned long __must_check
5702 copy_to_user(void __user *to, const void *from, unsigned long size)
5703 {
5704 - unsigned long ret = ___copy_to_user(to, from, size);
5705 + unsigned long ret;
5706
5707 + if ((long)size < 0 || size > INT_MAX)
5708 + return size;
5709 +
5710 + if (!__builtin_constant_p(size))
5711 + check_object_size(from, size, true);
5712 +
5713 + ret = ___copy_to_user(to, from, size);
5714 if (unlikely(ret))
5715 ret = copy_to_user_fixup(to, from, size);
5716 return ret;
5717 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5718 index 6cf591b..b49e65a 100644
5719 --- a/arch/sparc/kernel/Makefile
5720 +++ b/arch/sparc/kernel/Makefile
5721 @@ -3,7 +3,7 @@
5722 #
5723
5724 asflags-y := -ansi
5725 -ccflags-y := -Werror
5726 +#ccflags-y := -Werror
5727
5728 extra-y := head_$(BITS).o
5729
5730 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5731 index cb36e82..1c1462f 100644
5732 --- a/arch/sparc/kernel/process_32.c
5733 +++ b/arch/sparc/kernel/process_32.c
5734 @@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
5735
5736 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5737 r->psr, r->pc, r->npc, r->y, print_tainted());
5738 - printk("PC: <%pS>\n", (void *) r->pc);
5739 + printk("PC: <%pA>\n", (void *) r->pc);
5740 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5741 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5742 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5743 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5744 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5745 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5746 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5747 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5748
5749 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5750 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5751 @@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5752 rw = (struct reg_window32 *) fp;
5753 pc = rw->ins[7];
5754 printk("[%08lx : ", pc);
5755 - printk("%pS ] ", (void *) pc);
5756 + printk("%pA ] ", (void *) pc);
5757 fp = rw->ins[6];
5758 } while (++count < 16);
5759 printk("\n");
5760 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5761 index aff0c72..9067b39 100644
5762 --- a/arch/sparc/kernel/process_64.c
5763 +++ b/arch/sparc/kernel/process_64.c
5764 @@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5765 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5766 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5767 if (regs->tstate & TSTATE_PRIV)
5768 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5769 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5770 }
5771
5772 void show_regs(struct pt_regs *regs)
5773 {
5774 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5775 regs->tpc, regs->tnpc, regs->y, print_tainted());
5776 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5777 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5778 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5779 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5780 regs->u_regs[3]);
5781 @@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5782 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5783 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5784 regs->u_regs[15]);
5785 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5786 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5787 show_regwindow(regs);
5788 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5789 }
5790 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5791 ((tp && tp->task) ? tp->task->pid : -1));
5792
5793 if (gp->tstate & TSTATE_PRIV) {
5794 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5795 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5796 (void *) gp->tpc,
5797 (void *) gp->o7,
5798 (void *) gp->i7,
5799 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5800 index 484daba..0674139 100644
5801 --- a/arch/sparc/kernel/ptrace_64.c
5802 +++ b/arch/sparc/kernel/ptrace_64.c
5803 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5804 return ret;
5805 }
5806
5807 +#ifdef CONFIG_GRKERNSEC_SETXID
5808 +extern void gr_delayed_cred_worker(void);
5809 +#endif
5810 +
5811 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5812 {
5813 int ret = 0;
5814 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5815 /* do the secure computing check first */
5816 secure_computing_strict(regs->u_regs[UREG_G1]);
5817
5818 +#ifdef CONFIG_GRKERNSEC_SETXID
5819 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5820 + gr_delayed_cred_worker();
5821 +#endif
5822 +
5823 if (test_thread_flag(TIF_SYSCALL_TRACE))
5824 ret = tracehook_report_syscall_entry(regs);
5825
5826 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5827
5828 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5829 {
5830 +#ifdef CONFIG_GRKERNSEC_SETXID
5831 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5832 + gr_delayed_cred_worker();
5833 +#endif
5834 +
5835 audit_syscall_exit(regs);
5836
5837 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5838 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5839 index 0c9b31b..7cb7aee 100644
5840 --- a/arch/sparc/kernel/sys_sparc_32.c
5841 +++ b/arch/sparc/kernel/sys_sparc_32.c
5842 @@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5843 if (len > TASK_SIZE - PAGE_SIZE)
5844 return -ENOMEM;
5845 if (!addr)
5846 - addr = TASK_UNMAPPED_BASE;
5847 + addr = current->mm->mmap_base;
5848
5849 if (flags & MAP_SHARED)
5850 addr = COLOUR_ALIGN(addr);
5851 @@ -65,7 +65,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5852 /* At this point: (!vmm || addr < vmm->vm_end). */
5853 if (TASK_SIZE - PAGE_SIZE - len < addr)
5854 return -ENOMEM;
5855 - if (!vmm || addr + len <= vmm->vm_start)
5856 + if (check_heap_stack_gap(vmm, addr, len))
5857 return addr;
5858 addr = vmm->vm_end;
5859 if (flags & MAP_SHARED)
5860 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5861 index 275f74f..81bf5b8 100644
5862 --- a/arch/sparc/kernel/sys_sparc_64.c
5863 +++ b/arch/sparc/kernel/sys_sparc_64.c
5864 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5865 /* We do not accept a shared mapping if it would violate
5866 * cache aliasing constraints.
5867 */
5868 - if ((flags & MAP_SHARED) &&
5869 + if ((filp || (flags & MAP_SHARED)) &&
5870 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5871 return -EINVAL;
5872 return addr;
5873 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5874 if (filp || (flags & MAP_SHARED))
5875 do_color_align = 1;
5876
5877 +#ifdef CONFIG_PAX_RANDMMAP
5878 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5879 +#endif
5880 +
5881 if (addr) {
5882 if (do_color_align)
5883 addr = COLOUR_ALIGN(addr, pgoff);
5884 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5885 addr = PAGE_ALIGN(addr);
5886
5887 vma = find_vma(mm, addr);
5888 - if (task_size - len >= addr &&
5889 - (!vma || addr + len <= vma->vm_start))
5890 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5891 return addr;
5892 }
5893
5894 if (len > mm->cached_hole_size) {
5895 - start_addr = addr = mm->free_area_cache;
5896 + start_addr = addr = mm->free_area_cache;
5897 } else {
5898 - start_addr = addr = TASK_UNMAPPED_BASE;
5899 + start_addr = addr = mm->mmap_base;
5900 mm->cached_hole_size = 0;
5901 }
5902
5903 @@ -174,14 +177,14 @@ full_search:
5904 vma = find_vma(mm, VA_EXCLUDE_END);
5905 }
5906 if (unlikely(task_size < addr)) {
5907 - if (start_addr != TASK_UNMAPPED_BASE) {
5908 - start_addr = addr = TASK_UNMAPPED_BASE;
5909 + if (start_addr != mm->mmap_base) {
5910 + start_addr = addr = mm->mmap_base;
5911 mm->cached_hole_size = 0;
5912 goto full_search;
5913 }
5914 return -ENOMEM;
5915 }
5916 - if (likely(!vma || addr + len <= vma->vm_start)) {
5917 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5918 /*
5919 * Remember the place where we stopped the search:
5920 */
5921 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5922 /* We do not accept a shared mapping if it would violate
5923 * cache aliasing constraints.
5924 */
5925 - if ((flags & MAP_SHARED) &&
5926 + if ((filp || (flags & MAP_SHARED)) &&
5927 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5928 return -EINVAL;
5929 return addr;
5930 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5931 addr = PAGE_ALIGN(addr);
5932
5933 vma = find_vma(mm, addr);
5934 - if (task_size - len >= addr &&
5935 - (!vma || addr + len <= vma->vm_start))
5936 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5937 return addr;
5938 }
5939
5940 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5941 /* make sure it can fit in the remaining address space */
5942 if (likely(addr > len)) {
5943 vma = find_vma(mm, addr-len);
5944 - if (!vma || addr <= vma->vm_start) {
5945 + if (check_heap_stack_gap(vma, addr - len, len)) {
5946 /* remember the address as a hint for next time */
5947 return (mm->free_area_cache = addr-len);
5948 }
5949 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5950 if (unlikely(mm->mmap_base < len))
5951 goto bottomup;
5952
5953 - addr = mm->mmap_base-len;
5954 - if (do_color_align)
5955 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5956 + addr = mm->mmap_base - len;
5957
5958 do {
5959 + if (do_color_align)
5960 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5961 /*
5962 * Lookup failure means no vma is above this address,
5963 * else if new region fits below vma->vm_start,
5964 * return with success:
5965 */
5966 vma = find_vma(mm, addr);
5967 - if (likely(!vma || addr+len <= vma->vm_start)) {
5968 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5969 /* remember the address as a hint for next time */
5970 return (mm->free_area_cache = addr);
5971 }
5972 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5973 mm->cached_hole_size = vma->vm_start - addr;
5974
5975 /* try just below the current vma->vm_start */
5976 - addr = vma->vm_start-len;
5977 - if (do_color_align)
5978 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5979 - } while (likely(len < vma->vm_start));
5980 + addr = skip_heap_stack_gap(vma, len);
5981 + } while (!IS_ERR_VALUE(addr));
5982
5983 bottomup:
5984 /*
5985 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5986 gap == RLIM_INFINITY ||
5987 sysctl_legacy_va_layout) {
5988 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5989 +
5990 +#ifdef CONFIG_PAX_RANDMMAP
5991 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5992 + mm->mmap_base += mm->delta_mmap;
5993 +#endif
5994 +
5995 mm->get_unmapped_area = arch_get_unmapped_area;
5996 mm->unmap_area = arch_unmap_area;
5997 } else {
5998 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5999 gap = (task_size / 6 * 5);
6000
6001 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6002 +
6003 +#ifdef CONFIG_PAX_RANDMMAP
6004 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6005 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6006 +#endif
6007 +
6008 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6009 mm->unmap_area = arch_unmap_area_topdown;
6010 }
6011 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6012 index 1d7e274..b39c527 100644
6013 --- a/arch/sparc/kernel/syscalls.S
6014 +++ b/arch/sparc/kernel/syscalls.S
6015 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6016 #endif
6017 .align 32
6018 1: ldx [%g6 + TI_FLAGS], %l5
6019 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6020 + andcc %l5, _TIF_WORK_SYSCALL, %g0
6021 be,pt %icc, rtrap
6022 nop
6023 call syscall_trace_leave
6024 @@ -179,7 +179,7 @@ linux_sparc_syscall32:
6025
6026 srl %i5, 0, %o5 ! IEU1
6027 srl %i2, 0, %o2 ! IEU0 Group
6028 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6029 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6030 bne,pn %icc, linux_syscall_trace32 ! CTI
6031 mov %i0, %l5 ! IEU1
6032 call %l7 ! CTI Group brk forced
6033 @@ -202,7 +202,7 @@ linux_sparc_syscall:
6034
6035 mov %i3, %o3 ! IEU1
6036 mov %i4, %o4 ! IEU0 Group
6037 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6038 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6039 bne,pn %icc, linux_syscall_trace ! CTI Group
6040 mov %i0, %l5 ! IEU0
6041 2: call %l7 ! CTI Group brk forced
6042 @@ -226,7 +226,7 @@ ret_sys_call:
6043
6044 cmp %o0, -ERESTART_RESTARTBLOCK
6045 bgeu,pn %xcc, 1f
6046 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6047 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6048 80:
6049 /* System call success, clear Carry condition code. */
6050 andn %g3, %g2, %g3
6051 @@ -241,7 +241,7 @@ ret_sys_call:
6052 /* System call failure, set Carry condition code.
6053 * Also, get abs(errno) to return to the process.
6054 */
6055 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6056 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6057 sub %g0, %o0, %o0
6058 or %g3, %g2, %g3
6059 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6060 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6061 index a5785ea..405c5f7 100644
6062 --- a/arch/sparc/kernel/traps_32.c
6063 +++ b/arch/sparc/kernel/traps_32.c
6064 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6065 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6066 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6067
6068 +extern void gr_handle_kernel_exploit(void);
6069 +
6070 void die_if_kernel(char *str, struct pt_regs *regs)
6071 {
6072 static int die_counter;
6073 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6074 count++ < 30 &&
6075 (((unsigned long) rw) >= PAGE_OFFSET) &&
6076 !(((unsigned long) rw) & 0x7)) {
6077 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
6078 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
6079 (void *) rw->ins[7]);
6080 rw = (struct reg_window32 *)rw->ins[6];
6081 }
6082 }
6083 printk("Instruction DUMP:");
6084 instruction_dump ((unsigned long *) regs->pc);
6085 - if(regs->psr & PSR_PS)
6086 + if(regs->psr & PSR_PS) {
6087 + gr_handle_kernel_exploit();
6088 do_exit(SIGKILL);
6089 + }
6090 do_exit(SIGSEGV);
6091 }
6092
6093 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6094 index 3b05e66..6ea2917 100644
6095 --- a/arch/sparc/kernel/traps_64.c
6096 +++ b/arch/sparc/kernel/traps_64.c
6097 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6098 i + 1,
6099 p->trapstack[i].tstate, p->trapstack[i].tpc,
6100 p->trapstack[i].tnpc, p->trapstack[i].tt);
6101 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6102 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6103 }
6104 }
6105
6106 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6107
6108 lvl -= 0x100;
6109 if (regs->tstate & TSTATE_PRIV) {
6110 +
6111 +#ifdef CONFIG_PAX_REFCOUNT
6112 + if (lvl == 6)
6113 + pax_report_refcount_overflow(regs);
6114 +#endif
6115 +
6116 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6117 die_if_kernel(buffer, regs);
6118 }
6119 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6120 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6121 {
6122 char buffer[32];
6123 -
6124 +
6125 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6126 0, lvl, SIGTRAP) == NOTIFY_STOP)
6127 return;
6128
6129 +#ifdef CONFIG_PAX_REFCOUNT
6130 + if (lvl == 6)
6131 + pax_report_refcount_overflow(regs);
6132 +#endif
6133 +
6134 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6135
6136 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6137 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6138 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6139 printk("%s" "ERROR(%d): ",
6140 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6141 - printk("TPC<%pS>\n", (void *) regs->tpc);
6142 + printk("TPC<%pA>\n", (void *) regs->tpc);
6143 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6144 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6145 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6146 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6147 smp_processor_id(),
6148 (type & 0x1) ? 'I' : 'D',
6149 regs->tpc);
6150 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6151 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6152 panic("Irrecoverable Cheetah+ parity error.");
6153 }
6154
6155 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6156 smp_processor_id(),
6157 (type & 0x1) ? 'I' : 'D',
6158 regs->tpc);
6159 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6160 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6161 }
6162
6163 struct sun4v_error_entry {
6164 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6165
6166 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6167 regs->tpc, tl);
6168 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6169 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6170 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6171 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6172 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6173 (void *) regs->u_regs[UREG_I7]);
6174 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6175 "pte[%lx] error[%lx]\n",
6176 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6177
6178 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6179 regs->tpc, tl);
6180 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6181 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6182 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6183 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6184 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6185 (void *) regs->u_regs[UREG_I7]);
6186 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6187 "pte[%lx] error[%lx]\n",
6188 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6189 fp = (unsigned long)sf->fp + STACK_BIAS;
6190 }
6191
6192 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6193 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6194 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6195 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6196 int index = tsk->curr_ret_stack;
6197 if (tsk->ret_stack && index >= graph) {
6198 pc = tsk->ret_stack[index - graph].ret;
6199 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6200 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6201 graph++;
6202 }
6203 }
6204 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6205 return (struct reg_window *) (fp + STACK_BIAS);
6206 }
6207
6208 +extern void gr_handle_kernel_exploit(void);
6209 +
6210 void die_if_kernel(char *str, struct pt_regs *regs)
6211 {
6212 static int die_counter;
6213 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6214 while (rw &&
6215 count++ < 30 &&
6216 kstack_valid(tp, (unsigned long) rw)) {
6217 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6218 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6219 (void *) rw->ins[7]);
6220
6221 rw = kernel_stack_up(rw);
6222 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6223 }
6224 user_instruction_dump ((unsigned int __user *) regs->tpc);
6225 }
6226 - if (regs->tstate & TSTATE_PRIV)
6227 + if (regs->tstate & TSTATE_PRIV) {
6228 + gr_handle_kernel_exploit();
6229 do_exit(SIGKILL);
6230 + }
6231 do_exit(SIGSEGV);
6232 }
6233 EXPORT_SYMBOL(die_if_kernel);
6234 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6235 index f81d038..e7a4680 100644
6236 --- a/arch/sparc/kernel/unaligned_64.c
6237 +++ b/arch/sparc/kernel/unaligned_64.c
6238 @@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs *regs)
6239 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6240
6241 if (__ratelimit(&ratelimit)) {
6242 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6243 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6244 regs->tpc, (void *) regs->tpc);
6245 }
6246 }
6247 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6248 index dff4096..bd9a388 100644
6249 --- a/arch/sparc/lib/Makefile
6250 +++ b/arch/sparc/lib/Makefile
6251 @@ -2,7 +2,7 @@
6252 #
6253
6254 asflags-y := -ansi -DST_DIV0=0x02
6255 -ccflags-y := -Werror
6256 +#ccflags-y := -Werror
6257
6258 lib-$(CONFIG_SPARC32) += ashrdi3.o
6259 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6260 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6261 index 4d502da..527c48d 100644
6262 --- a/arch/sparc/lib/atomic_64.S
6263 +++ b/arch/sparc/lib/atomic_64.S
6264 @@ -17,7 +17,12 @@
6265 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
6266 BACKOFF_SETUP(%o2)
6267 1: lduw [%o1], %g1
6268 - add %g1, %o0, %g7
6269 + addcc %g1, %o0, %g7
6270 +
6271 +#ifdef CONFIG_PAX_REFCOUNT
6272 + tvs %icc, 6
6273 +#endif
6274 +
6275 cas [%o1], %g1, %g7
6276 cmp %g1, %g7
6277 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6278 @@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
6279 2: BACKOFF_SPIN(%o2, %o3, 1b)
6280 ENDPROC(atomic_add)
6281
6282 +ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6283 + BACKOFF_SETUP(%o2)
6284 +1: lduw [%o1], %g1
6285 + add %g1, %o0, %g7
6286 + cas [%o1], %g1, %g7
6287 + cmp %g1, %g7
6288 + bne,pn %icc, 2f
6289 + nop
6290 + retl
6291 + nop
6292 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6293 +ENDPROC(atomic_add_unchecked)
6294 +
6295 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6296 BACKOFF_SETUP(%o2)
6297 1: lduw [%o1], %g1
6298 - sub %g1, %o0, %g7
6299 + subcc %g1, %o0, %g7
6300 +
6301 +#ifdef CONFIG_PAX_REFCOUNT
6302 + tvs %icc, 6
6303 +#endif
6304 +
6305 cas [%o1], %g1, %g7
6306 cmp %g1, %g7
6307 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6308 @@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6309 2: BACKOFF_SPIN(%o2, %o3, 1b)
6310 ENDPROC(atomic_sub)
6311
6312 +ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
6313 + BACKOFF_SETUP(%o2)
6314 +1: lduw [%o1], %g1
6315 + sub %g1, %o0, %g7
6316 + cas [%o1], %g1, %g7
6317 + cmp %g1, %g7
6318 + bne,pn %icc, 2f
6319 + nop
6320 + retl
6321 + nop
6322 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6323 +ENDPROC(atomic_sub_unchecked)
6324 +
6325 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6326 BACKOFF_SETUP(%o2)
6327 1: lduw [%o1], %g1
6328 - add %g1, %o0, %g7
6329 + addcc %g1, %o0, %g7
6330 +
6331 +#ifdef CONFIG_PAX_REFCOUNT
6332 + tvs %icc, 6
6333 +#endif
6334 +
6335 cas [%o1], %g1, %g7
6336 cmp %g1, %g7
6337 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6338 @@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6339 2: BACKOFF_SPIN(%o2, %o3, 1b)
6340 ENDPROC(atomic_add_ret)
6341
6342 +ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6343 + BACKOFF_SETUP(%o2)
6344 +1: lduw [%o1], %g1
6345 + addcc %g1, %o0, %g7
6346 + cas [%o1], %g1, %g7
6347 + cmp %g1, %g7
6348 + bne,pn %icc, 2f
6349 + add %g7, %o0, %g7
6350 + sra %g7, 0, %o0
6351 + retl
6352 + nop
6353 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6354 +ENDPROC(atomic_add_ret_unchecked)
6355 +
6356 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
6357 BACKOFF_SETUP(%o2)
6358 1: lduw [%o1], %g1
6359 - sub %g1, %o0, %g7
6360 + subcc %g1, %o0, %g7
6361 +
6362 +#ifdef CONFIG_PAX_REFCOUNT
6363 + tvs %icc, 6
6364 +#endif
6365 +
6366 cas [%o1], %g1, %g7
6367 cmp %g1, %g7
6368 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6369 @@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
6370 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
6371 BACKOFF_SETUP(%o2)
6372 1: ldx [%o1], %g1
6373 - add %g1, %o0, %g7
6374 + addcc %g1, %o0, %g7
6375 +
6376 +#ifdef CONFIG_PAX_REFCOUNT
6377 + tvs %xcc, 6
6378 +#endif
6379 +
6380 casx [%o1], %g1, %g7
6381 cmp %g1, %g7
6382 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6383 @@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
6384 2: BACKOFF_SPIN(%o2, %o3, 1b)
6385 ENDPROC(atomic64_add)
6386
6387 +ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6388 + BACKOFF_SETUP(%o2)
6389 +1: ldx [%o1], %g1
6390 + addcc %g1, %o0, %g7
6391 + casx [%o1], %g1, %g7
6392 + cmp %g1, %g7
6393 + bne,pn %xcc, 2f
6394 + nop
6395 + retl
6396 + nop
6397 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6398 +ENDPROC(atomic64_add_unchecked)
6399 +
6400 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6401 BACKOFF_SETUP(%o2)
6402 1: ldx [%o1], %g1
6403 - sub %g1, %o0, %g7
6404 + subcc %g1, %o0, %g7
6405 +
6406 +#ifdef CONFIG_PAX_REFCOUNT
6407 + tvs %xcc, 6
6408 +#endif
6409 +
6410 casx [%o1], %g1, %g7
6411 cmp %g1, %g7
6412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6413 @@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6414 2: BACKOFF_SPIN(%o2, %o3, 1b)
6415 ENDPROC(atomic64_sub)
6416
6417 +ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
6418 + BACKOFF_SETUP(%o2)
6419 +1: ldx [%o1], %g1
6420 + subcc %g1, %o0, %g7
6421 + casx [%o1], %g1, %g7
6422 + cmp %g1, %g7
6423 + bne,pn %xcc, 2f
6424 + nop
6425 + retl
6426 + nop
6427 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6428 +ENDPROC(atomic64_sub_unchecked)
6429 +
6430 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6431 BACKOFF_SETUP(%o2)
6432 1: ldx [%o1], %g1
6433 - add %g1, %o0, %g7
6434 + addcc %g1, %o0, %g7
6435 +
6436 +#ifdef CONFIG_PAX_REFCOUNT
6437 + tvs %xcc, 6
6438 +#endif
6439 +
6440 casx [%o1], %g1, %g7
6441 cmp %g1, %g7
6442 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6443 @@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6444 2: BACKOFF_SPIN(%o2, %o3, 1b)
6445 ENDPROC(atomic64_add_ret)
6446
6447 +ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6448 + BACKOFF_SETUP(%o2)
6449 +1: ldx [%o1], %g1
6450 + addcc %g1, %o0, %g7
6451 + casx [%o1], %g1, %g7
6452 + cmp %g1, %g7
6453 + bne,pn %xcc, 2f
6454 + add %g7, %o0, %g7
6455 + mov %g7, %o0
6456 + retl
6457 + nop
6458 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6459 +ENDPROC(atomic64_add_ret_unchecked)
6460 +
6461 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
6462 BACKOFF_SETUP(%o2)
6463 1: ldx [%o1], %g1
6464 - sub %g1, %o0, %g7
6465 + subcc %g1, %o0, %g7
6466 +
6467 +#ifdef CONFIG_PAX_REFCOUNT
6468 + tvs %xcc, 6
6469 +#endif
6470 +
6471 casx [%o1], %g1, %g7
6472 cmp %g1, %g7
6473 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6474 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6475 index 3b31218..345c609 100644
6476 --- a/arch/sparc/lib/ksyms.c
6477 +++ b/arch/sparc/lib/ksyms.c
6478 @@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
6479
6480 /* Atomic counter implementation. */
6481 EXPORT_SYMBOL(atomic_add);
6482 +EXPORT_SYMBOL(atomic_add_unchecked);
6483 EXPORT_SYMBOL(atomic_add_ret);
6484 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6485 EXPORT_SYMBOL(atomic_sub);
6486 +EXPORT_SYMBOL(atomic_sub_unchecked);
6487 EXPORT_SYMBOL(atomic_sub_ret);
6488 EXPORT_SYMBOL(atomic64_add);
6489 +EXPORT_SYMBOL(atomic64_add_unchecked);
6490 EXPORT_SYMBOL(atomic64_add_ret);
6491 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6492 EXPORT_SYMBOL(atomic64_sub);
6493 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6494 EXPORT_SYMBOL(atomic64_sub_ret);
6495
6496 /* Atomic bit operations. */
6497 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6498 index 30c3ecc..736f015 100644
6499 --- a/arch/sparc/mm/Makefile
6500 +++ b/arch/sparc/mm/Makefile
6501 @@ -2,7 +2,7 @@
6502 #
6503
6504 asflags-y := -ansi
6505 -ccflags-y := -Werror
6506 +#ccflags-y := -Werror
6507
6508 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6509 obj-y += fault_$(BITS).o
6510 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6511 index f46cf6b..7235ec9 100644
6512 --- a/arch/sparc/mm/fault_32.c
6513 +++ b/arch/sparc/mm/fault_32.c
6514 @@ -21,6 +21,9 @@
6515 #include <linux/perf_event.h>
6516 #include <linux/interrupt.h>
6517 #include <linux/kdebug.h>
6518 +#include <linux/slab.h>
6519 +#include <linux/pagemap.h>
6520 +#include <linux/compiler.h>
6521
6522 #include <asm/page.h>
6523 #include <asm/pgtable.h>
6524 @@ -177,6 +180,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6525 return safe_compute_effective_address(regs, insn);
6526 }
6527
6528 +#ifdef CONFIG_PAX_PAGEEXEC
6529 +#ifdef CONFIG_PAX_DLRESOLVE
6530 +static void pax_emuplt_close(struct vm_area_struct *vma)
6531 +{
6532 + vma->vm_mm->call_dl_resolve = 0UL;
6533 +}
6534 +
6535 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6536 +{
6537 + unsigned int *kaddr;
6538 +
6539 + vmf->page = alloc_page(GFP_HIGHUSER);
6540 + if (!vmf->page)
6541 + return VM_FAULT_OOM;
6542 +
6543 + kaddr = kmap(vmf->page);
6544 + memset(kaddr, 0, PAGE_SIZE);
6545 + kaddr[0] = 0x9DE3BFA8U; /* save */
6546 + flush_dcache_page(vmf->page);
6547 + kunmap(vmf->page);
6548 + return VM_FAULT_MAJOR;
6549 +}
6550 +
6551 +static const struct vm_operations_struct pax_vm_ops = {
6552 + .close = pax_emuplt_close,
6553 + .fault = pax_emuplt_fault
6554 +};
6555 +
6556 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6557 +{
6558 + int ret;
6559 +
6560 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6561 + vma->vm_mm = current->mm;
6562 + vma->vm_start = addr;
6563 + vma->vm_end = addr + PAGE_SIZE;
6564 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6565 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6566 + vma->vm_ops = &pax_vm_ops;
6567 +
6568 + ret = insert_vm_struct(current->mm, vma);
6569 + if (ret)
6570 + return ret;
6571 +
6572 + ++current->mm->total_vm;
6573 + return 0;
6574 +}
6575 +#endif
6576 +
6577 +/*
6578 + * PaX: decide what to do with offenders (regs->pc = fault address)
6579 + *
6580 + * returns 1 when task should be killed
6581 + * 2 when patched PLT trampoline was detected
6582 + * 3 when unpatched PLT trampoline was detected
6583 + */
6584 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6585 +{
6586 +
6587 +#ifdef CONFIG_PAX_EMUPLT
6588 + int err;
6589 +
6590 + do { /* PaX: patched PLT emulation #1 */
6591 + unsigned int sethi1, sethi2, jmpl;
6592 +
6593 + err = get_user(sethi1, (unsigned int *)regs->pc);
6594 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6595 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6596 +
6597 + if (err)
6598 + break;
6599 +
6600 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6601 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6602 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6603 + {
6604 + unsigned int addr;
6605 +
6606 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6607 + addr = regs->u_regs[UREG_G1];
6608 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6609 + regs->pc = addr;
6610 + regs->npc = addr+4;
6611 + return 2;
6612 + }
6613 + } while (0);
6614 +
6615 + do { /* PaX: patched PLT emulation #2 */
6616 + unsigned int ba;
6617 +
6618 + err = get_user(ba, (unsigned int *)regs->pc);
6619 +
6620 + if (err)
6621 + break;
6622 +
6623 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
6624 + unsigned int addr;
6625 +
6626 + if ((ba & 0xFFC00000U) == 0x30800000U)
6627 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6628 + else
6629 + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6630 + regs->pc = addr;
6631 + regs->npc = addr+4;
6632 + return 2;
6633 + }
6634 + } while (0);
6635 +
6636 + do { /* PaX: patched PLT emulation #3 */
6637 + unsigned int sethi, bajmpl, nop;
6638 +
6639 + err = get_user(sethi, (unsigned int *)regs->pc);
6640 + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
6641 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6642 +
6643 + if (err)
6644 + break;
6645 +
6646 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6647 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
6648 + nop == 0x01000000U)
6649 + {
6650 + unsigned int addr;
6651 +
6652 + addr = (sethi & 0x003FFFFFU) << 10;
6653 + regs->u_regs[UREG_G1] = addr;
6654 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
6655 + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6656 + else
6657 + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6658 + regs->pc = addr;
6659 + regs->npc = addr+4;
6660 + return 2;
6661 + }
6662 + } while (0);
6663 +
6664 + do { /* PaX: unpatched PLT emulation step 1 */
6665 + unsigned int sethi, ba, nop;
6666 +
6667 + err = get_user(sethi, (unsigned int *)regs->pc);
6668 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6669 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6670 +
6671 + if (err)
6672 + break;
6673 +
6674 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6675 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6676 + nop == 0x01000000U)
6677 + {
6678 + unsigned int addr, save, call;
6679 +
6680 + if ((ba & 0xFFC00000U) == 0x30800000U)
6681 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6682 + else
6683 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6684 +
6685 + err = get_user(save, (unsigned int *)addr);
6686 + err |= get_user(call, (unsigned int *)(addr+4));
6687 + err |= get_user(nop, (unsigned int *)(addr+8));
6688 + if (err)
6689 + break;
6690 +
6691 +#ifdef CONFIG_PAX_DLRESOLVE
6692 + if (save == 0x9DE3BFA8U &&
6693 + (call & 0xC0000000U) == 0x40000000U &&
6694 + nop == 0x01000000U)
6695 + {
6696 + struct vm_area_struct *vma;
6697 + unsigned long call_dl_resolve;
6698 +
6699 + down_read(&current->mm->mmap_sem);
6700 + call_dl_resolve = current->mm->call_dl_resolve;
6701 + up_read(&current->mm->mmap_sem);
6702 + if (likely(call_dl_resolve))
6703 + goto emulate;
6704 +
6705 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6706 +
6707 + down_write(&current->mm->mmap_sem);
6708 + if (current->mm->call_dl_resolve) {
6709 + call_dl_resolve = current->mm->call_dl_resolve;
6710 + up_write(&current->mm->mmap_sem);
6711 + if (vma)
6712 + kmem_cache_free(vm_area_cachep, vma);
6713 + goto emulate;
6714 + }
6715 +
6716 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6717 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6718 + up_write(&current->mm->mmap_sem);
6719 + if (vma)
6720 + kmem_cache_free(vm_area_cachep, vma);
6721 + return 1;
6722 + }
6723 +
6724 + if (pax_insert_vma(vma, call_dl_resolve)) {
6725 + up_write(&current->mm->mmap_sem);
6726 + kmem_cache_free(vm_area_cachep, vma);
6727 + return 1;
6728 + }
6729 +
6730 + current->mm->call_dl_resolve = call_dl_resolve;
6731 + up_write(&current->mm->mmap_sem);
6732 +
6733 +emulate:
6734 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6735 + regs->pc = call_dl_resolve;
6736 + regs->npc = addr+4;
6737 + return 3;
6738 + }
6739 +#endif
6740 +
6741 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6742 + if ((save & 0xFFC00000U) == 0x05000000U &&
6743 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6744 + nop == 0x01000000U)
6745 + {
6746 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6747 + regs->u_regs[UREG_G2] = addr + 4;
6748 + addr = (save & 0x003FFFFFU) << 10;
6749 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6750 + regs->pc = addr;
6751 + regs->npc = addr+4;
6752 + return 3;
6753 + }
6754 + }
6755 + } while (0);
6756 +
6757 + do { /* PaX: unpatched PLT emulation step 2 */
6758 + unsigned int save, call, nop;
6759 +
6760 + err = get_user(save, (unsigned int *)(regs->pc-4));
6761 + err |= get_user(call, (unsigned int *)regs->pc);
6762 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6763 + if (err)
6764 + break;
6765 +
6766 + if (save == 0x9DE3BFA8U &&
6767 + (call & 0xC0000000U) == 0x40000000U &&
6768 + nop == 0x01000000U)
6769 + {
6770 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6771 +
6772 + regs->u_regs[UREG_RETPC] = regs->pc;
6773 + regs->pc = dl_resolve;
6774 + regs->npc = dl_resolve+4;
6775 + return 3;
6776 + }
6777 + } while (0);
6778 +#endif
6779 +
6780 + return 1;
6781 +}
6782 +
6783 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6784 +{
6785 + unsigned long i;
6786 +
6787 + printk(KERN_ERR "PAX: bytes at PC: ");
6788 + for (i = 0; i < 8; i++) {
6789 + unsigned int c;
6790 + if (get_user(c, (unsigned int *)pc+i))
6791 + printk(KERN_CONT "???????? ");
6792 + else
6793 + printk(KERN_CONT "%08x ", c);
6794 + }
6795 + printk("\n");
6796 +}
6797 +#endif
6798 +
6799 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6800 int text_fault)
6801 {
6802 @@ -248,6 +522,24 @@ good_area:
6803 if (!(vma->vm_flags & VM_WRITE))
6804 goto bad_area;
6805 } else {
6806 +
6807 +#ifdef CONFIG_PAX_PAGEEXEC
6808 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6809 + up_read(&mm->mmap_sem);
6810 + switch (pax_handle_fetch_fault(regs)) {
6811 +
6812 +#ifdef CONFIG_PAX_EMUPLT
6813 + case 2:
6814 + case 3:
6815 + return;
6816 +#endif
6817 +
6818 + }
6819 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6820 + do_group_exit(SIGKILL);
6821 + }
6822 +#endif
6823 +
6824 /* Allow reads even for write-only mappings */
6825 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
6826 goto bad_area;
6827 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6828 index 1fe0429..8dd5dd5 100644
6829 --- a/arch/sparc/mm/fault_64.c
6830 +++ b/arch/sparc/mm/fault_64.c
6831 @@ -21,6 +21,9 @@
6832 #include <linux/kprobes.h>
6833 #include <linux/kdebug.h>
6834 #include <linux/percpu.h>
6835 +#include <linux/slab.h>
6836 +#include <linux/pagemap.h>
6837 +#include <linux/compiler.h>
6838
6839 #include <asm/page.h>
6840 #include <asm/pgtable.h>
6841 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6842 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6843 regs->tpc);
6844 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6845 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6846 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6847 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6848 dump_stack();
6849 unhandled_fault(regs->tpc, current, regs);
6850 @@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6851 show_regs(regs);
6852 }
6853
6854 +#ifdef CONFIG_PAX_PAGEEXEC
6855 +#ifdef CONFIG_PAX_DLRESOLVE
6856 +static void pax_emuplt_close(struct vm_area_struct *vma)
6857 +{
6858 + vma->vm_mm->call_dl_resolve = 0UL;
6859 +}
6860 +
6861 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6862 +{
6863 + unsigned int *kaddr;
6864 +
6865 + vmf->page = alloc_page(GFP_HIGHUSER);
6866 + if (!vmf->page)
6867 + return VM_FAULT_OOM;
6868 +
6869 + kaddr = kmap(vmf->page);
6870 + memset(kaddr, 0, PAGE_SIZE);
6871 + kaddr[0] = 0x9DE3BFA8U; /* save */
6872 + flush_dcache_page(vmf->page);
6873 + kunmap(vmf->page);
6874 + return VM_FAULT_MAJOR;
6875 +}
6876 +
6877 +static const struct vm_operations_struct pax_vm_ops = {
6878 + .close = pax_emuplt_close,
6879 + .fault = pax_emuplt_fault
6880 +};
6881 +
6882 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6883 +{
6884 + int ret;
6885 +
6886 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6887 + vma->vm_mm = current->mm;
6888 + vma->vm_start = addr;
6889 + vma->vm_end = addr + PAGE_SIZE;
6890 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6891 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6892 + vma->vm_ops = &pax_vm_ops;
6893 +
6894 + ret = insert_vm_struct(current->mm, vma);
6895 + if (ret)
6896 + return ret;
6897 +
6898 + ++current->mm->total_vm;
6899 + return 0;
6900 +}
6901 +#endif
6902 +
6903 +/*
6904 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6905 + *
6906 + * returns 1 when task should be killed
6907 + * 2 when patched PLT trampoline was detected
6908 + * 3 when unpatched PLT trampoline was detected
6909 + */
6910 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6911 +{
6912 +
6913 +#ifdef CONFIG_PAX_EMUPLT
6914 + int err;
6915 +
6916 + do { /* PaX: patched PLT emulation #1 */
6917 + unsigned int sethi1, sethi2, jmpl;
6918 +
6919 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6920 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6921 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6922 +
6923 + if (err)
6924 + break;
6925 +
6926 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6927 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6928 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6929 + {
6930 + unsigned long addr;
6931 +
6932 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6933 + addr = regs->u_regs[UREG_G1];
6934 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6935 +
6936 + if (test_thread_flag(TIF_32BIT))
6937 + addr &= 0xFFFFFFFFUL;
6938 +
6939 + regs->tpc = addr;
6940 + regs->tnpc = addr+4;
6941 + return 2;
6942 + }
6943 + } while (0);
6944 +
6945 + do { /* PaX: patched PLT emulation #2 */
6946 + unsigned int ba;
6947 +
6948 + err = get_user(ba, (unsigned int *)regs->tpc);
6949 +
6950 + if (err)
6951 + break;
6952 +
6953 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
6954 + unsigned long addr;
6955 +
6956 + if ((ba & 0xFFC00000U) == 0x30800000U)
6957 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6958 + else
6959 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6960 +
6961 + if (test_thread_flag(TIF_32BIT))
6962 + addr &= 0xFFFFFFFFUL;
6963 +
6964 + regs->tpc = addr;
6965 + regs->tnpc = addr+4;
6966 + return 2;
6967 + }
6968 + } while (0);
6969 +
6970 + do { /* PaX: patched PLT emulation #3 */
6971 + unsigned int sethi, bajmpl, nop;
6972 +
6973 + err = get_user(sethi, (unsigned int *)regs->tpc);
6974 + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
6975 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6976 +
6977 + if (err)
6978 + break;
6979 +
6980 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6981 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
6982 + nop == 0x01000000U)
6983 + {
6984 + unsigned long addr;
6985 +
6986 + addr = (sethi & 0x003FFFFFU) << 10;
6987 + regs->u_regs[UREG_G1] = addr;
6988 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
6989 + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6990 + else
6991 + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6992 +
6993 + if (test_thread_flag(TIF_32BIT))
6994 + addr &= 0xFFFFFFFFUL;
6995 +
6996 + regs->tpc = addr;
6997 + regs->tnpc = addr+4;
6998 + return 2;
6999 + }
7000 + } while (0);
7001 +
7002 + do { /* PaX: patched PLT emulation #4 */
7003 + unsigned int sethi, mov1, call, mov2;
7004 +
7005 + err = get_user(sethi, (unsigned int *)regs->tpc);
7006 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7007 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
7008 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7009 +
7010 + if (err)
7011 + break;
7012 +
7013 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7014 + mov1 == 0x8210000FU &&
7015 + (call & 0xC0000000U) == 0x40000000U &&
7016 + mov2 == 0x9E100001U)
7017 + {
7018 + unsigned long addr;
7019 +
7020 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7021 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7022 +
7023 + if (test_thread_flag(TIF_32BIT))
7024 + addr &= 0xFFFFFFFFUL;
7025 +
7026 + regs->tpc = addr;
7027 + regs->tnpc = addr+4;
7028 + return 2;
7029 + }
7030 + } while (0);
7031 +
7032 + do { /* PaX: patched PLT emulation #5 */
7033 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7034 +
7035 + err = get_user(sethi, (unsigned int *)regs->tpc);
7036 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7037 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7038 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7039 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7040 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7041 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7042 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7043 +
7044 + if (err)
7045 + break;
7046 +
7047 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7048 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7049 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7050 + (or1 & 0xFFFFE000U) == 0x82106000U &&
7051 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7052 + sllx == 0x83287020U &&
7053 + jmpl == 0x81C04005U &&
7054 + nop == 0x01000000U)
7055 + {
7056 + unsigned long addr;
7057 +
7058 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7059 + regs->u_regs[UREG_G1] <<= 32;
7060 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7061 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7062 + regs->tpc = addr;
7063 + regs->tnpc = addr+4;
7064 + return 2;
7065 + }
7066 + } while (0);
7067 +
7068 + do { /* PaX: patched PLT emulation #6 */
7069 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7070 +
7071 + err = get_user(sethi, (unsigned int *)regs->tpc);
7072 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7073 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7074 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7075 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
7076 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7077 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7078 +
7079 + if (err)
7080 + break;
7081 +
7082 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7083 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7084 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7085 + sllx == 0x83287020U &&
7086 + (or & 0xFFFFE000U) == 0x8A116000U &&
7087 + jmpl == 0x81C04005U &&
7088 + nop == 0x01000000U)
7089 + {
7090 + unsigned long addr;
7091 +
7092 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7093 + regs->u_regs[UREG_G1] <<= 32;
7094 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7095 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7096 + regs->tpc = addr;
7097 + regs->tnpc = addr+4;
7098 + return 2;
7099 + }
7100 + } while (0);
7101 +
7102 + do { /* PaX: unpatched PLT emulation step 1 */
7103 + unsigned int sethi, ba, nop;
7104 +
7105 + err = get_user(sethi, (unsigned int *)regs->tpc);
7106 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7107 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7108 +
7109 + if (err)
7110 + break;
7111 +
7112 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7113 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7114 + nop == 0x01000000U)
7115 + {
7116 + unsigned long addr;
7117 + unsigned int save, call;
7118 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7119 +
7120 + if ((ba & 0xFFC00000U) == 0x30800000U)
7121 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7122 + else
7123 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7124 +
7125 + if (test_thread_flag(TIF_32BIT))
7126 + addr &= 0xFFFFFFFFUL;
7127 +
7128 + err = get_user(save, (unsigned int *)addr);
7129 + err |= get_user(call, (unsigned int *)(addr+4));
7130 + err |= get_user(nop, (unsigned int *)(addr+8));
7131 + if (err)
7132 + break;
7133 +
7134 +#ifdef CONFIG_PAX_DLRESOLVE
7135 + if (save == 0x9DE3BFA8U &&
7136 + (call & 0xC0000000U) == 0x40000000U &&
7137 + nop == 0x01000000U)
7138 + {
7139 + struct vm_area_struct *vma;
7140 + unsigned long call_dl_resolve;
7141 +
7142 + down_read(&current->mm->mmap_sem);
7143 + call_dl_resolve = current->mm->call_dl_resolve;
7144 + up_read(&current->mm->mmap_sem);
7145 + if (likely(call_dl_resolve))
7146 + goto emulate;
7147 +
7148 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7149 +
7150 + down_write(&current->mm->mmap_sem);
7151 + if (current->mm->call_dl_resolve) {
7152 + call_dl_resolve = current->mm->call_dl_resolve;
7153 + up_write(&current->mm->mmap_sem);
7154 + if (vma)
7155 + kmem_cache_free(vm_area_cachep, vma);
7156 + goto emulate;
7157 + }
7158 +
7159 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7160 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7161 + up_write(&current->mm->mmap_sem);
7162 + if (vma)
7163 + kmem_cache_free(vm_area_cachep, vma);
7164 + return 1;
7165 + }
7166 +
7167 + if (pax_insert_vma(vma, call_dl_resolve)) {
7168 + up_write(&current->mm->mmap_sem);
7169 + kmem_cache_free(vm_area_cachep, vma);
7170 + return 1;
7171 + }
7172 +
7173 + current->mm->call_dl_resolve = call_dl_resolve;
7174 + up_write(&current->mm->mmap_sem);
7175 +
7176 +emulate:
7177 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7178 + regs->tpc = call_dl_resolve;
7179 + regs->tnpc = addr+4;
7180 + return 3;
7181 + }
7182 +#endif
7183 +
7184 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7185 + if ((save & 0xFFC00000U) == 0x05000000U &&
7186 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7187 + nop == 0x01000000U)
7188 + {
7189 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7190 + regs->u_regs[UREG_G2] = addr + 4;
7191 + addr = (save & 0x003FFFFFU) << 10;
7192 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7193 +
7194 + if (test_thread_flag(TIF_32BIT))
7195 + addr &= 0xFFFFFFFFUL;
7196 +
7197 + regs->tpc = addr;
7198 + regs->tnpc = addr+4;
7199 + return 3;
7200 + }
7201 +
7202 + /* PaX: 64-bit PLT stub */
7203 + err = get_user(sethi1, (unsigned int *)addr);
7204 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7205 + err |= get_user(or1, (unsigned int *)(addr+8));
7206 + err |= get_user(or2, (unsigned int *)(addr+12));
7207 + err |= get_user(sllx, (unsigned int *)(addr+16));
7208 + err |= get_user(add, (unsigned int *)(addr+20));
7209 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7210 + err |= get_user(nop, (unsigned int *)(addr+28));
7211 + if (err)
7212 + break;
7213 +
7214 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7215 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7216 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7217 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7218 + sllx == 0x89293020U &&
7219 + add == 0x8A010005U &&
7220 + jmpl == 0x89C14000U &&
7221 + nop == 0x01000000U)
7222 + {
7223 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7224 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7225 + regs->u_regs[UREG_G4] <<= 32;
7226 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7227 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7228 + regs->u_regs[UREG_G4] = addr + 24;
7229 + addr = regs->u_regs[UREG_G5];
7230 + regs->tpc = addr;
7231 + regs->tnpc = addr+4;
7232 + return 3;
7233 + }
7234 + }
7235 + } while (0);
7236 +
7237 +#ifdef CONFIG_PAX_DLRESOLVE
7238 + do { /* PaX: unpatched PLT emulation step 2 */
7239 + unsigned int save, call, nop;
7240 +
7241 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7242 + err |= get_user(call, (unsigned int *)regs->tpc);
7243 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7244 + if (err)
7245 + break;
7246 +
7247 + if (save == 0x9DE3BFA8U &&
7248 + (call & 0xC0000000U) == 0x40000000U &&
7249 + nop == 0x01000000U)
7250 + {
7251 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7252 +
7253 + if (test_thread_flag(TIF_32BIT))
7254 + dl_resolve &= 0xFFFFFFFFUL;
7255 +
7256 + regs->u_regs[UREG_RETPC] = regs->tpc;
7257 + regs->tpc = dl_resolve;
7258 + regs->tnpc = dl_resolve+4;
7259 + return 3;
7260 + }
7261 + } while (0);
7262 +#endif
7263 +
7264 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7265 + unsigned int sethi, ba, nop;
7266 +
7267 + err = get_user(sethi, (unsigned int *)regs->tpc);
7268 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7269 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7270 +
7271 + if (err)
7272 + break;
7273 +
7274 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7275 + (ba & 0xFFF00000U) == 0x30600000U &&
7276 + nop == 0x01000000U)
7277 + {
7278 + unsigned long addr;
7279 +
7280 + addr = (sethi & 0x003FFFFFU) << 10;
7281 + regs->u_regs[UREG_G1] = addr;
7282 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7283 +
7284 + if (test_thread_flag(TIF_32BIT))
7285 + addr &= 0xFFFFFFFFUL;
7286 +
7287 + regs->tpc = addr;
7288 + regs->tnpc = addr+4;
7289 + return 2;
7290 + }
7291 + } while (0);
7292 +
7293 +#endif
7294 +
7295 + return 1;
7296 +}
7297 +
7298 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7299 +{
7300 + unsigned long i;
7301 +
7302 + printk(KERN_ERR "PAX: bytes at PC: ");
7303 + for (i = 0; i < 8; i++) {
7304 + unsigned int c;
7305 + if (get_user(c, (unsigned int *)pc+i))
7306 + printk(KERN_CONT "???????? ");
7307 + else
7308 + printk(KERN_CONT "%08x ", c);
7309 + }
7310 + printk("\n");
7311 +}
7312 +#endif
7313 +
7314 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7315 {
7316 struct mm_struct *mm = current->mm;
7317 @@ -343,6 +806,29 @@ retry:
7318 if (!vma)
7319 goto bad_area;
7320
7321 +#ifdef CONFIG_PAX_PAGEEXEC
7322 + /* PaX: detect ITLB misses on non-exec pages */
7323 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7324 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7325 + {
7326 + if (address != regs->tpc)
7327 + goto good_area;
7328 +
7329 + up_read(&mm->mmap_sem);
7330 + switch (pax_handle_fetch_fault(regs)) {
7331 +
7332 +#ifdef CONFIG_PAX_EMUPLT
7333 + case 2:
7334 + case 3:
7335 + return;
7336 +#endif
7337 +
7338 + }
7339 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7340 + do_group_exit(SIGKILL);
7341 + }
7342 +#endif
7343 +
7344 /* Pure DTLB misses do not tell us whether the fault causing
7345 * load/store/atomic was a write or not, it only says that there
7346 * was no match. So in such a case we (carefully) read the
7347 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7348 index 07e1453..0a7d9e9 100644
7349 --- a/arch/sparc/mm/hugetlbpage.c
7350 +++ b/arch/sparc/mm/hugetlbpage.c
7351 @@ -67,7 +67,7 @@ full_search:
7352 }
7353 return -ENOMEM;
7354 }
7355 - if (likely(!vma || addr + len <= vma->vm_start)) {
7356 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7357 /*
7358 * Remember the place where we stopped the search:
7359 */
7360 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7361 /* make sure it can fit in the remaining address space */
7362 if (likely(addr > len)) {
7363 vma = find_vma(mm, addr-len);
7364 - if (!vma || addr <= vma->vm_start) {
7365 + if (check_heap_stack_gap(vma, addr - len, len)) {
7366 /* remember the address as a hint for next time */
7367 return (mm->free_area_cache = addr-len);
7368 }
7369 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7370 if (unlikely(mm->mmap_base < len))
7371 goto bottomup;
7372
7373 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7374 + addr = mm->mmap_base - len;
7375
7376 do {
7377 + addr &= HPAGE_MASK;
7378 /*
7379 * Lookup failure means no vma is above this address,
7380 * else if new region fits below vma->vm_start,
7381 * return with success:
7382 */
7383 vma = find_vma(mm, addr);
7384 - if (likely(!vma || addr+len <= vma->vm_start)) {
7385 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7386 /* remember the address as a hint for next time */
7387 return (mm->free_area_cache = addr);
7388 }
7389 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7390 mm->cached_hole_size = vma->vm_start - addr;
7391
7392 /* try just below the current vma->vm_start */
7393 - addr = (vma->vm_start-len) & HPAGE_MASK;
7394 - } while (likely(len < vma->vm_start));
7395 + addr = skip_heap_stack_gap(vma, len);
7396 + } while (!IS_ERR_VALUE(addr));
7397
7398 bottomup:
7399 /*
7400 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7401 if (addr) {
7402 addr = ALIGN(addr, HPAGE_SIZE);
7403 vma = find_vma(mm, addr);
7404 - if (task_size - len >= addr &&
7405 - (!vma || addr + len <= vma->vm_start))
7406 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7407 return addr;
7408 }
7409 if (mm->get_unmapped_area == arch_get_unmapped_area)
7410 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7411 index f4500c6..889656c 100644
7412 --- a/arch/tile/include/asm/atomic_64.h
7413 +++ b/arch/tile/include/asm/atomic_64.h
7414 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7415
7416 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7417
7418 +#define atomic64_read_unchecked(v) atomic64_read(v)
7419 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7420 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7421 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7422 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7423 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7424 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7425 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7426 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7427 +
7428 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7429 #define smp_mb__before_atomic_dec() smp_mb()
7430 #define smp_mb__after_atomic_dec() smp_mb()
7431 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7432 index 392e533..536b092 100644
7433 --- a/arch/tile/include/asm/cache.h
7434 +++ b/arch/tile/include/asm/cache.h
7435 @@ -15,11 +15,12 @@
7436 #ifndef _ASM_TILE_CACHE_H
7437 #define _ASM_TILE_CACHE_H
7438
7439 +#include <linux/const.h>
7440 #include <arch/chip.h>
7441
7442 /* bytes per L1 data cache line */
7443 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7444 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7445 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7446
7447 /* bytes per L2 cache line */
7448 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7449 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
7450 index 9ab078a..d6635c2 100644
7451 --- a/arch/tile/include/asm/uaccess.h
7452 +++ b/arch/tile/include/asm/uaccess.h
7453 @@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
7454 const void __user *from,
7455 unsigned long n)
7456 {
7457 - int sz = __compiletime_object_size(to);
7458 + size_t sz = __compiletime_object_size(to);
7459
7460 - if (likely(sz == -1 || sz >= n))
7461 + if (likely(sz == (size_t)-1 || sz >= n))
7462 n = _copy_from_user(to, from, n);
7463 else
7464 copy_from_user_overflow();
7465 diff --git a/arch/um/Makefile b/arch/um/Makefile
7466 index 0970910..9f65c40 100644
7467 --- a/arch/um/Makefile
7468 +++ b/arch/um/Makefile
7469 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7470 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7471 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7472
7473 +ifdef CONSTIFY_PLUGIN
7474 +USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
7475 +endif
7476 +
7477 #This will adjust *FLAGS accordingly to the platform.
7478 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7479
7480 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7481 index 19e1bdd..3665b77 100644
7482 --- a/arch/um/include/asm/cache.h
7483 +++ b/arch/um/include/asm/cache.h
7484 @@ -1,6 +1,7 @@
7485 #ifndef __UM_CACHE_H
7486 #define __UM_CACHE_H
7487
7488 +#include <linux/const.h>
7489
7490 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7491 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7492 @@ -12,6 +13,6 @@
7493 # define L1_CACHE_SHIFT 5
7494 #endif
7495
7496 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7497 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7498
7499 #endif
7500 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7501 index 6c03acd..a5e0215 100644
7502 --- a/arch/um/include/asm/kmap_types.h
7503 +++ b/arch/um/include/asm/kmap_types.h
7504 @@ -23,6 +23,7 @@ enum km_type {
7505 KM_IRQ1,
7506 KM_SOFTIRQ0,
7507 KM_SOFTIRQ1,
7508 + KM_CLEARPAGE,
7509 KM_TYPE_NR
7510 };
7511
7512 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7513 index 7cfc3ce..cbd1a58 100644
7514 --- a/arch/um/include/asm/page.h
7515 +++ b/arch/um/include/asm/page.h
7516 @@ -14,6 +14,9 @@
7517 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7518 #define PAGE_MASK (~(PAGE_SIZE-1))
7519
7520 +#define ktla_ktva(addr) (addr)
7521 +#define ktva_ktla(addr) (addr)
7522 +
7523 #ifndef __ASSEMBLY__
7524
7525 struct page;
7526 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7527 index 0032f92..cd151e0 100644
7528 --- a/arch/um/include/asm/pgtable-3level.h
7529 +++ b/arch/um/include/asm/pgtable-3level.h
7530 @@ -58,6 +58,7 @@
7531 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7532 #define pud_populate(mm, pud, pmd) \
7533 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7534 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7535
7536 #ifdef CONFIG_64BIT
7537 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7538 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7539 index ccb9a9d..cc425bb 100644
7540 --- a/arch/um/kernel/process.c
7541 +++ b/arch/um/kernel/process.c
7542 @@ -407,22 +407,6 @@ int singlestepping(void * t)
7543 return 2;
7544 }
7545
7546 -/*
7547 - * Only x86 and x86_64 have an arch_align_stack().
7548 - * All other arches have "#define arch_align_stack(x) (x)"
7549 - * in their asm/system.h
7550 - * As this is included in UML from asm-um/system-generic.h,
7551 - * we can use it to behave as the subarch does.
7552 - */
7553 -#ifndef arch_align_stack
7554 -unsigned long arch_align_stack(unsigned long sp)
7555 -{
7556 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7557 - sp -= get_random_int() % 8192;
7558 - return sp & ~0xf;
7559 -}
7560 -#endif
7561 -
7562 unsigned long get_wchan(struct task_struct *p)
7563 {
7564 unsigned long stack_page, sp, ip;
7565 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7566 index ad8f795..2c7eec6 100644
7567 --- a/arch/unicore32/include/asm/cache.h
7568 +++ b/arch/unicore32/include/asm/cache.h
7569 @@ -12,8 +12,10 @@
7570 #ifndef __UNICORE_CACHE_H__
7571 #define __UNICORE_CACHE_H__
7572
7573 -#define L1_CACHE_SHIFT (5)
7574 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7575 +#include <linux/const.h>
7576 +
7577 +#define L1_CACHE_SHIFT 5
7578 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7579
7580 /*
7581 * Memory returned by kmalloc() may be used for DMA, so we must make
7582 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7583 index c70684f..698fa4b 100644
7584 --- a/arch/x86/Kconfig
7585 +++ b/arch/x86/Kconfig
7586 @@ -218,7 +218,7 @@ config X86_HT
7587
7588 config X86_32_LAZY_GS
7589 def_bool y
7590 - depends on X86_32 && !CC_STACKPROTECTOR
7591 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7592
7593 config ARCH_HWEIGHT_CFLAGS
7594 string
7595 @@ -1047,7 +1047,7 @@ choice
7596
7597 config NOHIGHMEM
7598 bool "off"
7599 - depends on !X86_NUMAQ
7600 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7601 ---help---
7602 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7603 However, the address space of 32-bit x86 processors is only 4
7604 @@ -1084,7 +1084,7 @@ config NOHIGHMEM
7605
7606 config HIGHMEM4G
7607 bool "4GB"
7608 - depends on !X86_NUMAQ
7609 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7610 ---help---
7611 Select this if you have a 32-bit processor and between 1 and 4
7612 gigabytes of physical RAM.
7613 @@ -1138,7 +1138,7 @@ config PAGE_OFFSET
7614 hex
7615 default 0xB0000000 if VMSPLIT_3G_OPT
7616 default 0x80000000 if VMSPLIT_2G
7617 - default 0x78000000 if VMSPLIT_2G_OPT
7618 + default 0x70000000 if VMSPLIT_2G_OPT
7619 default 0x40000000 if VMSPLIT_1G
7620 default 0xC0000000
7621 depends on X86_32
7622 @@ -1526,6 +1526,7 @@ config SECCOMP
7623
7624 config CC_STACKPROTECTOR
7625 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7626 + depends on X86_64 || !PAX_MEMORY_UDEREF
7627 ---help---
7628 This option turns on the -fstack-protector GCC feature. This
7629 feature puts, at the beginning of functions, a canary value on
7630 @@ -1583,6 +1584,7 @@ config KEXEC_JUMP
7631 config PHYSICAL_START
7632 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7633 default "0x1000000"
7634 + range 0x400000 0x40000000
7635 ---help---
7636 This gives the physical address where the kernel is loaded.
7637
7638 @@ -1646,6 +1648,7 @@ config X86_NEED_RELOCS
7639 config PHYSICAL_ALIGN
7640 hex "Alignment value to which kernel should be aligned" if X86_32
7641 default "0x1000000"
7642 + range 0x400000 0x1000000 if PAX_KERNEXEC
7643 range 0x2000 0x1000000
7644 ---help---
7645 This value puts the alignment restrictions on physical address
7646 @@ -1677,9 +1680,10 @@ config HOTPLUG_CPU
7647 Say N if you want to disable CPU hotplug.
7648
7649 config COMPAT_VDSO
7650 - def_bool y
7651 + def_bool n
7652 prompt "Compat VDSO support"
7653 depends on X86_32 || IA32_EMULATION
7654 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7655 ---help---
7656 Map the 32-bit VDSO to the predictable old-style address too.
7657
7658 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7659 index 706e12e..62e4feb 100644
7660 --- a/arch/x86/Kconfig.cpu
7661 +++ b/arch/x86/Kconfig.cpu
7662 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7663
7664 config X86_F00F_BUG
7665 def_bool y
7666 - depends on M586MMX || M586TSC || M586 || M486 || M386
7667 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7668
7669 config X86_INVD_BUG
7670 def_bool y
7671 @@ -358,7 +358,7 @@ config X86_POPAD_OK
7672
7673 config X86_ALIGNMENT_16
7674 def_bool y
7675 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7676 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7677
7678 config X86_INTEL_USERCOPY
7679 def_bool y
7680 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
7681 # generates cmov.
7682 config X86_CMOV
7683 def_bool y
7684 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7685 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7686
7687 config X86_MINIMUM_CPU_FAMILY
7688 int
7689 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7690 index e46c214..ab62fd1 100644
7691 --- a/arch/x86/Kconfig.debug
7692 +++ b/arch/x86/Kconfig.debug
7693 @@ -84,7 +84,7 @@ config X86_PTDUMP
7694 config DEBUG_RODATA
7695 bool "Write protect kernel read-only data structures"
7696 default y
7697 - depends on DEBUG_KERNEL
7698 + depends on DEBUG_KERNEL && BROKEN
7699 ---help---
7700 Mark the kernel read-only data as write-protected in the pagetables,
7701 in order to catch accidental (and incorrect) writes to such const
7702 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7703
7704 config DEBUG_SET_MODULE_RONX
7705 bool "Set loadable kernel module data as NX and text as RO"
7706 - depends on MODULES
7707 + depends on MODULES && BROKEN
7708 ---help---
7709 This option helps catch unintended modifications to loadable
7710 kernel module's text and read-only data. It also prevents execution
7711 @@ -275,7 +275,7 @@ config OPTIMIZE_INLINING
7712
7713 config DEBUG_STRICT_USER_COPY_CHECKS
7714 bool "Strict copy size checks"
7715 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
7716 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
7717 ---help---
7718 Enabling this option turns a certain set of sanity checks for user
7719 copy operations into compile time failures.
7720 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7721 index 1f25214..39422b3 100644
7722 --- a/arch/x86/Makefile
7723 +++ b/arch/x86/Makefile
7724 @@ -46,6 +46,7 @@ else
7725 UTS_MACHINE := x86_64
7726 CHECKFLAGS += -D__x86_64__ -m64
7727
7728 + biarch := $(call cc-option,-m64)
7729 KBUILD_AFLAGS += -m64
7730 KBUILD_CFLAGS += -m64
7731
7732 @@ -222,3 +223,12 @@ define archhelp
7733 echo ' FDARGS="..." arguments for the booted kernel'
7734 echo ' FDINITRD=file initrd for the booted kernel'
7735 endef
7736 +
7737 +define OLD_LD
7738 +
7739 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7740 +*** Please upgrade your binutils to 2.18 or newer
7741 +endef
7742 +
7743 +archprepare:
7744 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7745 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7746 index 5a747dd..00bece7 100644
7747 --- a/arch/x86/boot/Makefile
7748 +++ b/arch/x86/boot/Makefile
7749 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7750 $(call cc-option, -fno-stack-protector) \
7751 $(call cc-option, -mpreferred-stack-boundary=2)
7752 KBUILD_CFLAGS += $(call cc-option, -m32)
7753 +ifdef CONSTIFY_PLUGIN
7754 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
7755 +endif
7756 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7757 GCOV_PROFILE := n
7758
7759 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7760 index 878e4b9..20537ab 100644
7761 --- a/arch/x86/boot/bitops.h
7762 +++ b/arch/x86/boot/bitops.h
7763 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7764 u8 v;
7765 const u32 *p = (const u32 *)addr;
7766
7767 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7768 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7769 return v;
7770 }
7771
7772 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7773
7774 static inline void set_bit(int nr, void *addr)
7775 {
7776 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7777 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7778 }
7779
7780 #endif /* BOOT_BITOPS_H */
7781 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7782 index 18997e5..83d9c67 100644
7783 --- a/arch/x86/boot/boot.h
7784 +++ b/arch/x86/boot/boot.h
7785 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7786 static inline u16 ds(void)
7787 {
7788 u16 seg;
7789 - asm("movw %%ds,%0" : "=rm" (seg));
7790 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7791 return seg;
7792 }
7793
7794 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7795 static inline int memcmp(const void *s1, const void *s2, size_t len)
7796 {
7797 u8 diff;
7798 - asm("repe; cmpsb; setnz %0"
7799 + asm volatile("repe; cmpsb; setnz %0"
7800 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7801 return diff;
7802 }
7803 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7804 index e398bb5..80fc805 100644
7805 --- a/arch/x86/boot/compressed/Makefile
7806 +++ b/arch/x86/boot/compressed/Makefile
7807 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7808 KBUILD_CFLAGS += $(cflags-y)
7809 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7810 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7811 +ifdef CONSTIFY_PLUGIN
7812 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
7813 +endif
7814
7815 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7816 GCOV_PROFILE := n
7817 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7818 index 4e85f5f..39fa641 100644
7819 --- a/arch/x86/boot/compressed/eboot.c
7820 +++ b/arch/x86/boot/compressed/eboot.c
7821 @@ -142,7 +142,6 @@ again:
7822 *addr = max_addr;
7823 }
7824
7825 -free_pool:
7826 efi_call_phys1(sys_table->boottime->free_pool, map);
7827
7828 fail:
7829 @@ -206,7 +205,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7830 if (i == map_size / desc_size)
7831 status = EFI_NOT_FOUND;
7832
7833 -free_pool:
7834 efi_call_phys1(sys_table->boottime->free_pool, map);
7835 fail:
7836 return status;
7837 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7838 index c85e3ac..6f5aa80 100644
7839 --- a/arch/x86/boot/compressed/head_32.S
7840 +++ b/arch/x86/boot/compressed/head_32.S
7841 @@ -106,7 +106,7 @@ preferred_addr:
7842 notl %eax
7843 andl %eax, %ebx
7844 #else
7845 - movl $LOAD_PHYSICAL_ADDR, %ebx
7846 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7847 #endif
7848
7849 /* Target address to relocate to for decompression */
7850 @@ -192,7 +192,7 @@ relocated:
7851 * and where it was actually loaded.
7852 */
7853 movl %ebp, %ebx
7854 - subl $LOAD_PHYSICAL_ADDR, %ebx
7855 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7856 jz 2f /* Nothing to be done if loaded at compiled addr. */
7857 /*
7858 * Process relocations.
7859 @@ -200,8 +200,7 @@ relocated:
7860
7861 1: subl $4, %edi
7862 movl (%edi), %ecx
7863 - testl %ecx, %ecx
7864 - jz 2f
7865 + jecxz 2f
7866 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7867 jmp 1b
7868 2:
7869 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7870 index 87e03a1..0d94c76 100644
7871 --- a/arch/x86/boot/compressed/head_64.S
7872 +++ b/arch/x86/boot/compressed/head_64.S
7873 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7874 notl %eax
7875 andl %eax, %ebx
7876 #else
7877 - movl $LOAD_PHYSICAL_ADDR, %ebx
7878 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7879 #endif
7880
7881 /* Target address to relocate to for decompression */
7882 @@ -263,7 +263,7 @@ preferred_addr:
7883 notq %rax
7884 andq %rax, %rbp
7885 #else
7886 - movq $LOAD_PHYSICAL_ADDR, %rbp
7887 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7888 #endif
7889
7890 /* Target address to relocate to for decompression */
7891 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7892 index 7116dcb..d9ae1d7 100644
7893 --- a/arch/x86/boot/compressed/misc.c
7894 +++ b/arch/x86/boot/compressed/misc.c
7895 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7896 case PT_LOAD:
7897 #ifdef CONFIG_RELOCATABLE
7898 dest = output;
7899 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7900 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7901 #else
7902 dest = (void *)(phdr->p_paddr);
7903 #endif
7904 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7905 error("Destination address too large");
7906 #endif
7907 #ifndef CONFIG_RELOCATABLE
7908 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7909 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7910 error("Wrong destination address");
7911 #endif
7912
7913 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7914 index 4d3ff03..e4972ff 100644
7915 --- a/arch/x86/boot/cpucheck.c
7916 +++ b/arch/x86/boot/cpucheck.c
7917 @@ -74,7 +74,7 @@ static int has_fpu(void)
7918 u16 fcw = -1, fsw = -1;
7919 u32 cr0;
7920
7921 - asm("movl %%cr0,%0" : "=r" (cr0));
7922 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7923 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7924 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7925 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7926 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7927 {
7928 u32 f0, f1;
7929
7930 - asm("pushfl ; "
7931 + asm volatile("pushfl ; "
7932 "pushfl ; "
7933 "popl %0 ; "
7934 "movl %0,%1 ; "
7935 @@ -115,7 +115,7 @@ static void get_flags(void)
7936 set_bit(X86_FEATURE_FPU, cpu.flags);
7937
7938 if (has_eflag(X86_EFLAGS_ID)) {
7939 - asm("cpuid"
7940 + asm volatile("cpuid"
7941 : "=a" (max_intel_level),
7942 "=b" (cpu_vendor[0]),
7943 "=d" (cpu_vendor[1]),
7944 @@ -124,7 +124,7 @@ static void get_flags(void)
7945
7946 if (max_intel_level >= 0x00000001 &&
7947 max_intel_level <= 0x0000ffff) {
7948 - asm("cpuid"
7949 + asm volatile("cpuid"
7950 : "=a" (tfms),
7951 "=c" (cpu.flags[4]),
7952 "=d" (cpu.flags[0])
7953 @@ -136,7 +136,7 @@ static void get_flags(void)
7954 cpu.model += ((tfms >> 16) & 0xf) << 4;
7955 }
7956
7957 - asm("cpuid"
7958 + asm volatile("cpuid"
7959 : "=a" (max_amd_level)
7960 : "a" (0x80000000)
7961 : "ebx", "ecx", "edx");
7962 @@ -144,7 +144,7 @@ static void get_flags(void)
7963 if (max_amd_level >= 0x80000001 &&
7964 max_amd_level <= 0x8000ffff) {
7965 u32 eax = 0x80000001;
7966 - asm("cpuid"
7967 + asm volatile("cpuid"
7968 : "+a" (eax),
7969 "=c" (cpu.flags[6]),
7970 "=d" (cpu.flags[1])
7971 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7972 u32 ecx = MSR_K7_HWCR;
7973 u32 eax, edx;
7974
7975 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7976 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7977 eax &= ~(1 << 15);
7978 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7979 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7980
7981 get_flags(); /* Make sure it really did something */
7982 err = check_flags();
7983 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7984 u32 ecx = MSR_VIA_FCR;
7985 u32 eax, edx;
7986
7987 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7988 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7989 eax |= (1<<1)|(1<<7);
7990 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7991 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7992
7993 set_bit(X86_FEATURE_CX8, cpu.flags);
7994 err = check_flags();
7995 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7996 u32 eax, edx;
7997 u32 level = 1;
7998
7999 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8000 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8001 - asm("cpuid"
8002 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8003 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8004 + asm volatile("cpuid"
8005 : "+a" (level), "=d" (cpu.flags[0])
8006 : : "ecx", "ebx");
8007 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8008 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8009
8010 err = check_flags();
8011 }
8012 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8013 index efe5acf..22a3784 100644
8014 --- a/arch/x86/boot/header.S
8015 +++ b/arch/x86/boot/header.S
8016 @@ -391,10 +391,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
8017 # single linked list of
8018 # struct setup_data
8019
8020 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8021 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8022
8023 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8024 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
8025 +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
8026 +#else
8027 #define VO_INIT_SIZE (VO__end - VO__text)
8028 +#endif
8029 #if ZO_INIT_SIZE > VO_INIT_SIZE
8030 #define INIT_SIZE ZO_INIT_SIZE
8031 #else
8032 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8033 index db75d07..8e6d0af 100644
8034 --- a/arch/x86/boot/memory.c
8035 +++ b/arch/x86/boot/memory.c
8036 @@ -19,7 +19,7 @@
8037
8038 static int detect_memory_e820(void)
8039 {
8040 - int count = 0;
8041 + unsigned int count = 0;
8042 struct biosregs ireg, oreg;
8043 struct e820entry *desc = boot_params.e820_map;
8044 static struct e820entry buf; /* static so it is zeroed */
8045 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8046 index 11e8c6e..fdbb1ed 100644
8047 --- a/arch/x86/boot/video-vesa.c
8048 +++ b/arch/x86/boot/video-vesa.c
8049 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8050
8051 boot_params.screen_info.vesapm_seg = oreg.es;
8052 boot_params.screen_info.vesapm_off = oreg.di;
8053 + boot_params.screen_info.vesapm_size = oreg.cx;
8054 }
8055
8056 /*
8057 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8058 index 43eda28..5ab5fdb 100644
8059 --- a/arch/x86/boot/video.c
8060 +++ b/arch/x86/boot/video.c
8061 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8062 static unsigned int get_entry(void)
8063 {
8064 char entry_buf[4];
8065 - int i, len = 0;
8066 + unsigned int i, len = 0;
8067 int key;
8068 unsigned int v;
8069
8070 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8071 index 5b577d5..3c1fed4 100644
8072 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8073 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8074 @@ -8,6 +8,8 @@
8075 * including this sentence is retained in full.
8076 */
8077
8078 +#include <asm/alternative-asm.h>
8079 +
8080 .extern crypto_ft_tab
8081 .extern crypto_it_tab
8082 .extern crypto_fl_tab
8083 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8084 je B192; \
8085 leaq 32(r9),r9;
8086
8087 +#define ret pax_force_retaddr 0, 1; ret
8088 +
8089 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8090 movq r1,r2; \
8091 movq r3,r4; \
8092 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8093 index 3470624..201259d 100644
8094 --- a/arch/x86/crypto/aesni-intel_asm.S
8095 +++ b/arch/x86/crypto/aesni-intel_asm.S
8096 @@ -31,6 +31,7 @@
8097
8098 #include <linux/linkage.h>
8099 #include <asm/inst.h>
8100 +#include <asm/alternative-asm.h>
8101
8102 #ifdef __x86_64__
8103 .data
8104 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8105 pop %r14
8106 pop %r13
8107 pop %r12
8108 + pax_force_retaddr 0, 1
8109 ret
8110 +ENDPROC(aesni_gcm_dec)
8111
8112
8113 /*****************************************************************************
8114 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8115 pop %r14
8116 pop %r13
8117 pop %r12
8118 + pax_force_retaddr 0, 1
8119 ret
8120 +ENDPROC(aesni_gcm_enc)
8121
8122 #endif
8123
8124 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8125 pxor %xmm1, %xmm0
8126 movaps %xmm0, (TKEYP)
8127 add $0x10, TKEYP
8128 + pax_force_retaddr_bts
8129 ret
8130
8131 .align 4
8132 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8133 shufps $0b01001110, %xmm2, %xmm1
8134 movaps %xmm1, 0x10(TKEYP)
8135 add $0x20, TKEYP
8136 + pax_force_retaddr_bts
8137 ret
8138
8139 .align 4
8140 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8141
8142 movaps %xmm0, (TKEYP)
8143 add $0x10, TKEYP
8144 + pax_force_retaddr_bts
8145 ret
8146
8147 .align 4
8148 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8149 pxor %xmm1, %xmm2
8150 movaps %xmm2, (TKEYP)
8151 add $0x10, TKEYP
8152 + pax_force_retaddr_bts
8153 ret
8154
8155 /*
8156 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8157 #ifndef __x86_64__
8158 popl KEYP
8159 #endif
8160 + pax_force_retaddr 0, 1
8161 ret
8162 +ENDPROC(aesni_set_key)
8163
8164 /*
8165 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8166 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8167 popl KLEN
8168 popl KEYP
8169 #endif
8170 + pax_force_retaddr 0, 1
8171 ret
8172 +ENDPROC(aesni_enc)
8173
8174 /*
8175 * _aesni_enc1: internal ABI
8176 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8177 AESENC KEY STATE
8178 movaps 0x70(TKEYP), KEY
8179 AESENCLAST KEY STATE
8180 + pax_force_retaddr_bts
8181 ret
8182
8183 /*
8184 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8185 AESENCLAST KEY STATE2
8186 AESENCLAST KEY STATE3
8187 AESENCLAST KEY STATE4
8188 + pax_force_retaddr_bts
8189 ret
8190
8191 /*
8192 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8193 popl KLEN
8194 popl KEYP
8195 #endif
8196 + pax_force_retaddr 0, 1
8197 ret
8198 +ENDPROC(aesni_dec)
8199
8200 /*
8201 * _aesni_dec1: internal ABI
8202 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8203 AESDEC KEY STATE
8204 movaps 0x70(TKEYP), KEY
8205 AESDECLAST KEY STATE
8206 + pax_force_retaddr_bts
8207 ret
8208
8209 /*
8210 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8211 AESDECLAST KEY STATE2
8212 AESDECLAST KEY STATE3
8213 AESDECLAST KEY STATE4
8214 + pax_force_retaddr_bts
8215 ret
8216
8217 /*
8218 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8219 popl KEYP
8220 popl LEN
8221 #endif
8222 + pax_force_retaddr 0, 1
8223 ret
8224 +ENDPROC(aesni_ecb_enc)
8225
8226 /*
8227 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8228 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8229 popl KEYP
8230 popl LEN
8231 #endif
8232 + pax_force_retaddr 0, 1
8233 ret
8234 +ENDPROC(aesni_ecb_dec)
8235
8236 /*
8237 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8238 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8239 popl LEN
8240 popl IVP
8241 #endif
8242 + pax_force_retaddr 0, 1
8243 ret
8244 +ENDPROC(aesni_cbc_enc)
8245
8246 /*
8247 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8248 @@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8249 popl LEN
8250 popl IVP
8251 #endif
8252 + pax_force_retaddr 0, 1
8253 ret
8254 +ENDPROC(aesni_cbc_dec)
8255
8256 #ifdef __x86_64__
8257 .align 16
8258 @@ -2526,6 +2553,7 @@ _aesni_inc_init:
8259 mov $1, TCTR_LOW
8260 MOVQ_R64_XMM TCTR_LOW INC
8261 MOVQ_R64_XMM CTR TCTR_LOW
8262 + pax_force_retaddr_bts
8263 ret
8264
8265 /*
8266 @@ -2554,6 +2582,7 @@ _aesni_inc:
8267 .Linc_low:
8268 movaps CTR, IV
8269 PSHUFB_XMM BSWAP_MASK IV
8270 + pax_force_retaddr_bts
8271 ret
8272
8273 /*
8274 @@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8275 .Lctr_enc_ret:
8276 movups IV, (IVP)
8277 .Lctr_enc_just_ret:
8278 + pax_force_retaddr 0, 1
8279 ret
8280 +ENDPROC(aesni_ctr_enc)
8281 #endif
8282 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8283 index 391d245..67f35c2 100644
8284 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8285 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8286 @@ -20,6 +20,8 @@
8287 *
8288 */
8289
8290 +#include <asm/alternative-asm.h>
8291 +
8292 .file "blowfish-x86_64-asm.S"
8293 .text
8294
8295 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8296 jnz __enc_xor;
8297
8298 write_block();
8299 + pax_force_retaddr 0, 1
8300 ret;
8301 __enc_xor:
8302 xor_block();
8303 + pax_force_retaddr 0, 1
8304 ret;
8305
8306 .align 8
8307 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8308
8309 movq %r11, %rbp;
8310
8311 + pax_force_retaddr 0, 1
8312 ret;
8313
8314 /**********************************************************************
8315 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8316
8317 popq %rbx;
8318 popq %rbp;
8319 + pax_force_retaddr 0, 1
8320 ret;
8321
8322 __enc_xor4:
8323 @@ -349,6 +355,7 @@ __enc_xor4:
8324
8325 popq %rbx;
8326 popq %rbp;
8327 + pax_force_retaddr 0, 1
8328 ret;
8329
8330 .align 8
8331 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8332 popq %rbx;
8333 popq %rbp;
8334
8335 + pax_force_retaddr 0, 1
8336 ret;
8337
8338 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8339 index 0b33743..7a56206 100644
8340 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8341 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8342 @@ -20,6 +20,8 @@
8343 *
8344 */
8345
8346 +#include <asm/alternative-asm.h>
8347 +
8348 .file "camellia-x86_64-asm_64.S"
8349 .text
8350
8351 @@ -229,12 +231,14 @@ __enc_done:
8352 enc_outunpack(mov, RT1);
8353
8354 movq RRBP, %rbp;
8355 + pax_force_retaddr 0, 1
8356 ret;
8357
8358 __enc_xor:
8359 enc_outunpack(xor, RT1);
8360
8361 movq RRBP, %rbp;
8362 + pax_force_retaddr 0, 1
8363 ret;
8364
8365 .global camellia_dec_blk;
8366 @@ -275,6 +279,7 @@ __dec_rounds16:
8367 dec_outunpack();
8368
8369 movq RRBP, %rbp;
8370 + pax_force_retaddr 0, 1
8371 ret;
8372
8373 /**********************************************************************
8374 @@ -468,6 +473,7 @@ __enc2_done:
8375
8376 movq RRBP, %rbp;
8377 popq %rbx;
8378 + pax_force_retaddr 0, 1
8379 ret;
8380
8381 __enc2_xor:
8382 @@ -475,6 +481,7 @@ __enc2_xor:
8383
8384 movq RRBP, %rbp;
8385 popq %rbx;
8386 + pax_force_retaddr 0, 1
8387 ret;
8388
8389 .global camellia_dec_blk_2way;
8390 @@ -517,4 +524,5 @@ __dec2_rounds16:
8391
8392 movq RRBP, %rbp;
8393 movq RXOR, %rbx;
8394 + pax_force_retaddr 0, 1
8395 ret;
8396 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8397 index 6214a9b..1f4fc9a 100644
8398 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8399 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8400 @@ -1,3 +1,5 @@
8401 +#include <asm/alternative-asm.h>
8402 +
8403 # enter ECRYPT_encrypt_bytes
8404 .text
8405 .p2align 5
8406 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8407 add %r11,%rsp
8408 mov %rdi,%rax
8409 mov %rsi,%rdx
8410 + pax_force_retaddr 0, 1
8411 ret
8412 # bytesatleast65:
8413 ._bytesatleast65:
8414 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8415 add %r11,%rsp
8416 mov %rdi,%rax
8417 mov %rsi,%rdx
8418 + pax_force_retaddr
8419 ret
8420 # enter ECRYPT_ivsetup
8421 .text
8422 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8423 add %r11,%rsp
8424 mov %rdi,%rax
8425 mov %rsi,%rdx
8426 + pax_force_retaddr
8427 ret
8428 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8429 index 3ee1ff0..cbc568b 100644
8430 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8431 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8432 @@ -24,6 +24,8 @@
8433 *
8434 */
8435
8436 +#include <asm/alternative-asm.h>
8437 +
8438 .file "serpent-sse2-x86_64-asm_64.S"
8439 .text
8440
8441 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8442 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8443 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8444
8445 + pax_force_retaddr
8446 ret;
8447
8448 __enc_xor8:
8449 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8450 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8451
8452 + pax_force_retaddr
8453 ret;
8454
8455 .align 8
8456 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8457 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8458 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8459
8460 + pax_force_retaddr
8461 ret;
8462 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8463 index b2c2f57..8470cab 100644
8464 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8465 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8466 @@ -28,6 +28,8 @@
8467 * (at your option) any later version.
8468 */
8469
8470 +#include <asm/alternative-asm.h>
8471 +
8472 #define CTX %rdi // arg1
8473 #define BUF %rsi // arg2
8474 #define CNT %rdx // arg3
8475 @@ -104,6 +106,7 @@
8476 pop %r12
8477 pop %rbp
8478 pop %rbx
8479 + pax_force_retaddr 0, 1
8480 ret
8481
8482 .size \name, .-\name
8483 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8484 index 5b012a2..36d5364 100644
8485 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8486 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8487 @@ -20,6 +20,8 @@
8488 *
8489 */
8490
8491 +#include <asm/alternative-asm.h>
8492 +
8493 .file "twofish-x86_64-asm-3way.S"
8494 .text
8495
8496 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8497 popq %r13;
8498 popq %r14;
8499 popq %r15;
8500 + pax_force_retaddr 0, 1
8501 ret;
8502
8503 __enc_xor3:
8504 @@ -271,6 +274,7 @@ __enc_xor3:
8505 popq %r13;
8506 popq %r14;
8507 popq %r15;
8508 + pax_force_retaddr 0, 1
8509 ret;
8510
8511 .global twofish_dec_blk_3way
8512 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8513 popq %r13;
8514 popq %r14;
8515 popq %r15;
8516 + pax_force_retaddr 0, 1
8517 ret;
8518
8519 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8520 index 7bcf3fc..f53832f 100644
8521 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8522 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8523 @@ -21,6 +21,7 @@
8524 .text
8525
8526 #include <asm/asm-offsets.h>
8527 +#include <asm/alternative-asm.h>
8528
8529 #define a_offset 0
8530 #define b_offset 4
8531 @@ -268,6 +269,7 @@ twofish_enc_blk:
8532
8533 popq R1
8534 movq $1,%rax
8535 + pax_force_retaddr 0, 1
8536 ret
8537
8538 twofish_dec_blk:
8539 @@ -319,4 +321,5 @@ twofish_dec_blk:
8540
8541 popq R1
8542 movq $1,%rax
8543 + pax_force_retaddr 0, 1
8544 ret
8545 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8546 index 07b3a68..bd2a388 100644
8547 --- a/arch/x86/ia32/ia32_aout.c
8548 +++ b/arch/x86/ia32/ia32_aout.c
8549 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8550 unsigned long dump_start, dump_size;
8551 struct user32 dump;
8552
8553 + memset(&dump, 0, sizeof(dump));
8554 +
8555 fs = get_fs();
8556 set_fs(KERNEL_DS);
8557 has_dumped = 1;
8558 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8559 index 673ac9b..7a8c5df 100644
8560 --- a/arch/x86/ia32/ia32_signal.c
8561 +++ b/arch/x86/ia32/ia32_signal.c
8562 @@ -162,7 +162,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8563 }
8564 seg = get_fs();
8565 set_fs(KERNEL_DS);
8566 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8567 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8568 set_fs(seg);
8569 if (ret >= 0 && uoss_ptr) {
8570 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8571 @@ -361,7 +361,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8572 */
8573 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8574 size_t frame_size,
8575 - void **fpstate)
8576 + void __user **fpstate)
8577 {
8578 unsigned long sp;
8579
8580 @@ -382,7 +382,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8581
8582 if (used_math()) {
8583 sp = sp - sig_xstate_ia32_size;
8584 - *fpstate = (struct _fpstate_ia32 *) sp;
8585 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8586 if (save_i387_xstate_ia32(*fpstate) < 0)
8587 return (void __user *) -1L;
8588 }
8589 @@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8590 sp -= frame_size;
8591 /* Align the stack pointer according to the i386 ABI,
8592 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8593 - sp = ((sp + 4) & -16ul) - 4;
8594 + sp = ((sp - 12) & -16ul) - 4;
8595 return (void __user *) sp;
8596 }
8597
8598 @@ -448,7 +448,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8599 * These are actually not used anymore, but left because some
8600 * gdb versions depend on them as a marker.
8601 */
8602 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8603 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8604 } put_user_catch(err);
8605
8606 if (err)
8607 @@ -490,7 +490,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8608 0xb8,
8609 __NR_ia32_rt_sigreturn,
8610 0x80cd,
8611 - 0,
8612 + 0
8613 };
8614
8615 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8616 @@ -520,16 +520,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8617
8618 if (ka->sa.sa_flags & SA_RESTORER)
8619 restorer = ka->sa.sa_restorer;
8620 + else if (current->mm->context.vdso)
8621 + /* Return stub is in 32bit vsyscall page */
8622 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8623 else
8624 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8625 - rt_sigreturn);
8626 + restorer = &frame->retcode;
8627 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8628
8629 /*
8630 * Not actually used anymore, but left because some gdb
8631 * versions need it.
8632 */
8633 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8634 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8635 } put_user_catch(err);
8636
8637 if (err)
8638 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8639 index 20e5f7b..f33c779 100644
8640 --- a/arch/x86/ia32/ia32entry.S
8641 +++ b/arch/x86/ia32/ia32entry.S
8642 @@ -14,8 +14,10 @@
8643 #include <asm/segment.h>
8644 #include <asm/irqflags.h>
8645 #include <asm/asm.h>
8646 +#include <asm/pgtable.h>
8647 #include <linux/linkage.h>
8648 #include <linux/err.h>
8649 +#include <asm/alternative-asm.h>
8650
8651 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8652 #include <linux/elf-em.h>
8653 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
8654 ENDPROC(native_irq_enable_sysexit)
8655 #endif
8656
8657 + .macro pax_enter_kernel_user
8658 + pax_set_fptr_mask
8659 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8660 + call pax_enter_kernel_user
8661 +#endif
8662 + .endm
8663 +
8664 + .macro pax_exit_kernel_user
8665 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8666 + call pax_exit_kernel_user
8667 +#endif
8668 +#ifdef CONFIG_PAX_RANDKSTACK
8669 + pushq %rax
8670 + pushq %r11
8671 + call pax_randomize_kstack
8672 + popq %r11
8673 + popq %rax
8674 +#endif
8675 + .endm
8676 +
8677 +.macro pax_erase_kstack
8678 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8679 + call pax_erase_kstack
8680 +#endif
8681 +.endm
8682 +
8683 /*
8684 * 32bit SYSENTER instruction entry.
8685 *
8686 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
8687 CFI_REGISTER rsp,rbp
8688 SWAPGS_UNSAFE_STACK
8689 movq PER_CPU_VAR(kernel_stack), %rsp
8690 - addq $(KERNEL_STACK_OFFSET),%rsp
8691 - /*
8692 - * No need to follow this irqs on/off section: the syscall
8693 - * disabled irqs, here we enable it straight after entry:
8694 - */
8695 - ENABLE_INTERRUPTS(CLBR_NONE)
8696 movl %ebp,%ebp /* zero extension */
8697 pushq_cfi $__USER32_DS
8698 /*CFI_REL_OFFSET ss,0*/
8699 @@ -134,22 +156,42 @@ ENTRY(ia32_sysenter_target)
8700 CFI_REL_OFFSET rsp,0
8701 pushfq_cfi
8702 /*CFI_REL_OFFSET rflags,0*/
8703 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8704 - CFI_REGISTER rip,r10
8705 + orl $X86_EFLAGS_IF,(%rsp)
8706 + GET_THREAD_INFO(%r11)
8707 + movl TI_sysenter_return(%r11), %r11d
8708 + CFI_REGISTER rip,r11
8709 pushq_cfi $__USER32_CS
8710 /*CFI_REL_OFFSET cs,0*/
8711 movl %eax, %eax
8712 - pushq_cfi %r10
8713 + pushq_cfi %r11
8714 CFI_REL_OFFSET rip,0
8715 pushq_cfi %rax
8716 cld
8717 SAVE_ARGS 0,1,0
8718 + pax_enter_kernel_user
8719 +
8720 +#ifdef CONFIG_PAX_RANDKSTACK
8721 + pax_erase_kstack
8722 +#endif
8723 +
8724 + /*
8725 + * No need to follow this irqs on/off section: the syscall
8726 + * disabled irqs, here we enable it straight after entry:
8727 + */
8728 + ENABLE_INTERRUPTS(CLBR_NONE)
8729 /* no need to do an access_ok check here because rbp has been
8730 32bit zero extended */
8731 +
8732 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8733 + mov $PAX_USER_SHADOW_BASE,%r11
8734 + add %r11,%rbp
8735 +#endif
8736 +
8737 1: movl (%rbp),%ebp
8738 _ASM_EXTABLE(1b,ia32_badarg)
8739 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8740 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8741 + GET_THREAD_INFO(%r11)
8742 + orl $TS_COMPAT,TI_status(%r11)
8743 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8744 CFI_REMEMBER_STATE
8745 jnz sysenter_tracesys
8746 cmpq $(IA32_NR_syscalls-1),%rax
8747 @@ -159,12 +201,15 @@ sysenter_do_call:
8748 sysenter_dispatch:
8749 call *ia32_sys_call_table(,%rax,8)
8750 movq %rax,RAX-ARGOFFSET(%rsp)
8751 + GET_THREAD_INFO(%r11)
8752 DISABLE_INTERRUPTS(CLBR_NONE)
8753 TRACE_IRQS_OFF
8754 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8755 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8756 jnz sysexit_audit
8757 sysexit_from_sys_call:
8758 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8759 + pax_exit_kernel_user
8760 + pax_erase_kstack
8761 + andl $~TS_COMPAT,TI_status(%r11)
8762 /* clear IF, that popfq doesn't enable interrupts early */
8763 andl $~0x200,EFLAGS-R11(%rsp)
8764 movl RIP-R11(%rsp),%edx /* User %eip */
8765 @@ -190,6 +235,9 @@ sysexit_from_sys_call:
8766 movl %eax,%esi /* 2nd arg: syscall number */
8767 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8768 call __audit_syscall_entry
8769 +
8770 + pax_erase_kstack
8771 +
8772 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8773 cmpq $(IA32_NR_syscalls-1),%rax
8774 ja ia32_badsys
8775 @@ -201,7 +249,7 @@ sysexit_from_sys_call:
8776 .endm
8777
8778 .macro auditsys_exit exit
8779 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8780 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8781 jnz ia32_ret_from_sys_call
8782 TRACE_IRQS_ON
8783 sti
8784 @@ -212,11 +260,12 @@ sysexit_from_sys_call:
8785 1: setbe %al /* 1 if error, 0 if not */
8786 movzbl %al,%edi /* zero-extend that into %edi */
8787 call __audit_syscall_exit
8788 + GET_THREAD_INFO(%r11)
8789 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8790 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8791 cli
8792 TRACE_IRQS_OFF
8793 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8794 + testl %edi,TI_flags(%r11)
8795 jz \exit
8796 CLEAR_RREGS -ARGOFFSET
8797 jmp int_with_check
8798 @@ -234,7 +283,7 @@ sysexit_audit:
8799
8800 sysenter_tracesys:
8801 #ifdef CONFIG_AUDITSYSCALL
8802 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8803 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8804 jz sysenter_auditsys
8805 #endif
8806 SAVE_REST
8807 @@ -246,6 +295,9 @@ sysenter_tracesys:
8808 RESTORE_REST
8809 cmpq $(IA32_NR_syscalls-1),%rax
8810 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
8811 +
8812 + pax_erase_kstack
8813 +
8814 jmp sysenter_do_call
8815 CFI_ENDPROC
8816 ENDPROC(ia32_sysenter_target)
8817 @@ -273,19 +325,25 @@ ENDPROC(ia32_sysenter_target)
8818 ENTRY(ia32_cstar_target)
8819 CFI_STARTPROC32 simple
8820 CFI_SIGNAL_FRAME
8821 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8822 + CFI_DEF_CFA rsp,0
8823 CFI_REGISTER rip,rcx
8824 /*CFI_REGISTER rflags,r11*/
8825 SWAPGS_UNSAFE_STACK
8826 movl %esp,%r8d
8827 CFI_REGISTER rsp,r8
8828 movq PER_CPU_VAR(kernel_stack),%rsp
8829 + SAVE_ARGS 8*6,0,0
8830 + pax_enter_kernel_user
8831 +
8832 +#ifdef CONFIG_PAX_RANDKSTACK
8833 + pax_erase_kstack
8834 +#endif
8835 +
8836 /*
8837 * No need to follow this irqs on/off section: the syscall
8838 * disabled irqs and here we enable it straight after entry:
8839 */
8840 ENABLE_INTERRUPTS(CLBR_NONE)
8841 - SAVE_ARGS 8,0,0
8842 movl %eax,%eax /* zero extension */
8843 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8844 movq %rcx,RIP-ARGOFFSET(%rsp)
8845 @@ -301,10 +359,17 @@ ENTRY(ia32_cstar_target)
8846 /* no need to do an access_ok check here because r8 has been
8847 32bit zero extended */
8848 /* hardware stack frame is complete now */
8849 +
8850 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8851 + mov $PAX_USER_SHADOW_BASE,%r11
8852 + add %r11,%r8
8853 +#endif
8854 +
8855 1: movl (%r8),%r9d
8856 _ASM_EXTABLE(1b,ia32_badarg)
8857 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8858 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8859 + GET_THREAD_INFO(%r11)
8860 + orl $TS_COMPAT,TI_status(%r11)
8861 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8862 CFI_REMEMBER_STATE
8863 jnz cstar_tracesys
8864 cmpq $IA32_NR_syscalls-1,%rax
8865 @@ -314,12 +379,15 @@ cstar_do_call:
8866 cstar_dispatch:
8867 call *ia32_sys_call_table(,%rax,8)
8868 movq %rax,RAX-ARGOFFSET(%rsp)
8869 + GET_THREAD_INFO(%r11)
8870 DISABLE_INTERRUPTS(CLBR_NONE)
8871 TRACE_IRQS_OFF
8872 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8873 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8874 jnz sysretl_audit
8875 sysretl_from_sys_call:
8876 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8877 + pax_exit_kernel_user
8878 + pax_erase_kstack
8879 + andl $~TS_COMPAT,TI_status(%r11)
8880 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8881 movl RIP-ARGOFFSET(%rsp),%ecx
8882 CFI_REGISTER rip,rcx
8883 @@ -347,7 +415,7 @@ sysretl_audit:
8884
8885 cstar_tracesys:
8886 #ifdef CONFIG_AUDITSYSCALL
8887 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8888 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8889 jz cstar_auditsys
8890 #endif
8891 xchgl %r9d,%ebp
8892 @@ -361,6 +429,9 @@ cstar_tracesys:
8893 xchgl %ebp,%r9d
8894 cmpq $(IA32_NR_syscalls-1),%rax
8895 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
8896 +
8897 + pax_erase_kstack
8898 +
8899 jmp cstar_do_call
8900 END(ia32_cstar_target)
8901
8902 @@ -401,19 +472,26 @@ ENTRY(ia32_syscall)
8903 CFI_REL_OFFSET rip,RIP-RIP
8904 PARAVIRT_ADJUST_EXCEPTION_FRAME
8905 SWAPGS
8906 - /*
8907 - * No need to follow this irqs on/off section: the syscall
8908 - * disabled irqs and here we enable it straight after entry:
8909 - */
8910 - ENABLE_INTERRUPTS(CLBR_NONE)
8911 movl %eax,%eax
8912 pushq_cfi %rax
8913 cld
8914 /* note the registers are not zero extended to the sf.
8915 this could be a problem. */
8916 SAVE_ARGS 0,1,0
8917 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8918 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8919 + pax_enter_kernel_user
8920 +
8921 +#ifdef CONFIG_PAX_RANDKSTACK
8922 + pax_erase_kstack
8923 +#endif
8924 +
8925 + /*
8926 + * No need to follow this irqs on/off section: the syscall
8927 + * disabled irqs and here we enable it straight after entry:
8928 + */
8929 + ENABLE_INTERRUPTS(CLBR_NONE)
8930 + GET_THREAD_INFO(%r11)
8931 + orl $TS_COMPAT,TI_status(%r11)
8932 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8933 jnz ia32_tracesys
8934 cmpq $(IA32_NR_syscalls-1),%rax
8935 ja ia32_badsys
8936 @@ -436,6 +514,9 @@ ia32_tracesys:
8937 RESTORE_REST
8938 cmpq $(IA32_NR_syscalls-1),%rax
8939 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
8940 +
8941 + pax_erase_kstack
8942 +
8943 jmp ia32_do_call
8944 END(ia32_syscall)
8945
8946 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8947 index 4540bec..714d913 100644
8948 --- a/arch/x86/ia32/sys_ia32.c
8949 +++ b/arch/x86/ia32/sys_ia32.c
8950 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8951 */
8952 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8953 {
8954 - typeof(ubuf->st_uid) uid = 0;
8955 - typeof(ubuf->st_gid) gid = 0;
8956 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8957 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8958 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
8959 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
8960 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8961 @@ -287,7 +287,7 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
8962 return ret;
8963 }
8964
8965 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8966 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8967 int options)
8968 {
8969 return compat_sys_wait4(pid, stat_addr, options, NULL);
8970 @@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8971 mm_segment_t old_fs = get_fs();
8972
8973 set_fs(KERNEL_DS);
8974 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8975 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8976 set_fs(old_fs);
8977 if (put_compat_timespec(&t, interval))
8978 return -EFAULT;
8979 @@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8980 mm_segment_t old_fs = get_fs();
8981
8982 set_fs(KERNEL_DS);
8983 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8984 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8985 set_fs(old_fs);
8986 if (!ret) {
8987 switch (_NSIG_WORDS) {
8988 @@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8989 if (copy_siginfo_from_user32(&info, uinfo))
8990 return -EFAULT;
8991 set_fs(KERNEL_DS);
8992 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8993 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8994 set_fs(old_fs);
8995 return ret;
8996 }
8997 @@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8998 return -EFAULT;
8999
9000 set_fs(KERNEL_DS);
9001 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9002 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9003 count);
9004 set_fs(old_fs);
9005
9006 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9007 index 952bd01..7692c6f 100644
9008 --- a/arch/x86/include/asm/alternative-asm.h
9009 +++ b/arch/x86/include/asm/alternative-asm.h
9010 @@ -15,6 +15,45 @@
9011 .endm
9012 #endif
9013
9014 +#ifdef KERNEXEC_PLUGIN
9015 + .macro pax_force_retaddr_bts rip=0
9016 + btsq $63,\rip(%rsp)
9017 + .endm
9018 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9019 + .macro pax_force_retaddr rip=0, reload=0
9020 + btsq $63,\rip(%rsp)
9021 + .endm
9022 + .macro pax_force_fptr ptr
9023 + btsq $63,\ptr
9024 + .endm
9025 + .macro pax_set_fptr_mask
9026 + .endm
9027 +#endif
9028 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9029 + .macro pax_force_retaddr rip=0, reload=0
9030 + .if \reload
9031 + pax_set_fptr_mask
9032 + .endif
9033 + orq %r10,\rip(%rsp)
9034 + .endm
9035 + .macro pax_force_fptr ptr
9036 + orq %r10,\ptr
9037 + .endm
9038 + .macro pax_set_fptr_mask
9039 + movabs $0x8000000000000000,%r10
9040 + .endm
9041 +#endif
9042 +#else
9043 + .macro pax_force_retaddr rip=0, reload=0
9044 + .endm
9045 + .macro pax_force_fptr ptr
9046 + .endm
9047 + .macro pax_force_retaddr_bts rip=0
9048 + .endm
9049 + .macro pax_set_fptr_mask
9050 + .endm
9051 +#endif
9052 +
9053 .macro altinstruction_entry orig alt feature orig_len alt_len
9054 .long \orig - .
9055 .long \alt - .
9056 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9057 index 49331be..9706065 100644
9058 --- a/arch/x86/include/asm/alternative.h
9059 +++ b/arch/x86/include/asm/alternative.h
9060 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9061 ".section .discard,\"aw\",@progbits\n" \
9062 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9063 ".previous\n" \
9064 - ".section .altinstr_replacement, \"ax\"\n" \
9065 + ".section .altinstr_replacement, \"a\"\n" \
9066 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9067 ".previous"
9068
9069 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9070 index eaff479..1eff9b5 100644
9071 --- a/arch/x86/include/asm/apic.h
9072 +++ b/arch/x86/include/asm/apic.h
9073 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9074
9075 #ifdef CONFIG_X86_LOCAL_APIC
9076
9077 -extern unsigned int apic_verbosity;
9078 +extern int apic_verbosity;
9079 extern int local_apic_timer_c2_ok;
9080
9081 extern int disable_apic;
9082 @@ -390,7 +390,7 @@ struct apic {
9083 */
9084 int (*x86_32_numa_cpu_node)(int cpu);
9085 #endif
9086 -};
9087 +} __do_const;
9088
9089 /*
9090 * Pointer to the local APIC driver in use on this system (there's
9091 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9092 index 20370c6..a2eb9b0 100644
9093 --- a/arch/x86/include/asm/apm.h
9094 +++ b/arch/x86/include/asm/apm.h
9095 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9096 __asm__ __volatile__(APM_DO_ZERO_SEGS
9097 "pushl %%edi\n\t"
9098 "pushl %%ebp\n\t"
9099 - "lcall *%%cs:apm_bios_entry\n\t"
9100 + "lcall *%%ss:apm_bios_entry\n\t"
9101 "setc %%al\n\t"
9102 "popl %%ebp\n\t"
9103 "popl %%edi\n\t"
9104 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9105 __asm__ __volatile__(APM_DO_ZERO_SEGS
9106 "pushl %%edi\n\t"
9107 "pushl %%ebp\n\t"
9108 - "lcall *%%cs:apm_bios_entry\n\t"
9109 + "lcall *%%ss:apm_bios_entry\n\t"
9110 "setc %%bl\n\t"
9111 "popl %%ebp\n\t"
9112 "popl %%edi\n\t"
9113 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9114 index 58cb6d4..a4b806c 100644
9115 --- a/arch/x86/include/asm/atomic.h
9116 +++ b/arch/x86/include/asm/atomic.h
9117 @@ -22,7 +22,18 @@
9118 */
9119 static inline int atomic_read(const atomic_t *v)
9120 {
9121 - return (*(volatile int *)&(v)->counter);
9122 + return (*(volatile const int *)&(v)->counter);
9123 +}
9124 +
9125 +/**
9126 + * atomic_read_unchecked - read atomic variable
9127 + * @v: pointer of type atomic_unchecked_t
9128 + *
9129 + * Atomically reads the value of @v.
9130 + */
9131 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9132 +{
9133 + return (*(volatile const int *)&(v)->counter);
9134 }
9135
9136 /**
9137 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9138 }
9139
9140 /**
9141 + * atomic_set_unchecked - set atomic variable
9142 + * @v: pointer of type atomic_unchecked_t
9143 + * @i: required value
9144 + *
9145 + * Atomically sets the value of @v to @i.
9146 + */
9147 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9148 +{
9149 + v->counter = i;
9150 +}
9151 +
9152 +/**
9153 * atomic_add - add integer to atomic variable
9154 * @i: integer value to add
9155 * @v: pointer of type atomic_t
9156 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9157 */
9158 static inline void atomic_add(int i, atomic_t *v)
9159 {
9160 - asm volatile(LOCK_PREFIX "addl %1,%0"
9161 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9162 +
9163 +#ifdef CONFIG_PAX_REFCOUNT
9164 + "jno 0f\n"
9165 + LOCK_PREFIX "subl %1,%0\n"
9166 + "int $4\n0:\n"
9167 + _ASM_EXTABLE(0b, 0b)
9168 +#endif
9169 +
9170 + : "+m" (v->counter)
9171 + : "ir" (i));
9172 +}
9173 +
9174 +/**
9175 + * atomic_add_unchecked - add integer to atomic variable
9176 + * @i: integer value to add
9177 + * @v: pointer of type atomic_unchecked_t
9178 + *
9179 + * Atomically adds @i to @v.
9180 + */
9181 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9182 +{
9183 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9184 : "+m" (v->counter)
9185 : "ir" (i));
9186 }
9187 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9188 */
9189 static inline void atomic_sub(int i, atomic_t *v)
9190 {
9191 - asm volatile(LOCK_PREFIX "subl %1,%0"
9192 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9193 +
9194 +#ifdef CONFIG_PAX_REFCOUNT
9195 + "jno 0f\n"
9196 + LOCK_PREFIX "addl %1,%0\n"
9197 + "int $4\n0:\n"
9198 + _ASM_EXTABLE(0b, 0b)
9199 +#endif
9200 +
9201 + : "+m" (v->counter)
9202 + : "ir" (i));
9203 +}
9204 +
9205 +/**
9206 + * atomic_sub_unchecked - subtract integer from atomic variable
9207 + * @i: integer value to subtract
9208 + * @v: pointer of type atomic_unchecked_t
9209 + *
9210 + * Atomically subtracts @i from @v.
9211 + */
9212 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9213 +{
9214 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9215 : "+m" (v->counter)
9216 : "ir" (i));
9217 }
9218 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9219 {
9220 unsigned char c;
9221
9222 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9223 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9224 +
9225 +#ifdef CONFIG_PAX_REFCOUNT
9226 + "jno 0f\n"
9227 + LOCK_PREFIX "addl %2,%0\n"
9228 + "int $4\n0:\n"
9229 + _ASM_EXTABLE(0b, 0b)
9230 +#endif
9231 +
9232 + "sete %1\n"
9233 : "+m" (v->counter), "=qm" (c)
9234 : "ir" (i) : "memory");
9235 return c;
9236 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9237 */
9238 static inline void atomic_inc(atomic_t *v)
9239 {
9240 - asm volatile(LOCK_PREFIX "incl %0"
9241 + asm volatile(LOCK_PREFIX "incl %0\n"
9242 +
9243 +#ifdef CONFIG_PAX_REFCOUNT
9244 + "jno 0f\n"
9245 + LOCK_PREFIX "decl %0\n"
9246 + "int $4\n0:\n"
9247 + _ASM_EXTABLE(0b, 0b)
9248 +#endif
9249 +
9250 + : "+m" (v->counter));
9251 +}
9252 +
9253 +/**
9254 + * atomic_inc_unchecked - increment atomic variable
9255 + * @v: pointer of type atomic_unchecked_t
9256 + *
9257 + * Atomically increments @v by 1.
9258 + */
9259 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9260 +{
9261 + asm volatile(LOCK_PREFIX "incl %0\n"
9262 : "+m" (v->counter));
9263 }
9264
9265 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9266 */
9267 static inline void atomic_dec(atomic_t *v)
9268 {
9269 - asm volatile(LOCK_PREFIX "decl %0"
9270 + asm volatile(LOCK_PREFIX "decl %0\n"
9271 +
9272 +#ifdef CONFIG_PAX_REFCOUNT
9273 + "jno 0f\n"
9274 + LOCK_PREFIX "incl %0\n"
9275 + "int $4\n0:\n"
9276 + _ASM_EXTABLE(0b, 0b)
9277 +#endif
9278 +
9279 + : "+m" (v->counter));
9280 +}
9281 +
9282 +/**
9283 + * atomic_dec_unchecked - decrement atomic variable
9284 + * @v: pointer of type atomic_unchecked_t
9285 + *
9286 + * Atomically decrements @v by 1.
9287 + */
9288 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9289 +{
9290 + asm volatile(LOCK_PREFIX "decl %0\n"
9291 : "+m" (v->counter));
9292 }
9293
9294 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9295 {
9296 unsigned char c;
9297
9298 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9299 + asm volatile(LOCK_PREFIX "decl %0\n"
9300 +
9301 +#ifdef CONFIG_PAX_REFCOUNT
9302 + "jno 0f\n"
9303 + LOCK_PREFIX "incl %0\n"
9304 + "int $4\n0:\n"
9305 + _ASM_EXTABLE(0b, 0b)
9306 +#endif
9307 +
9308 + "sete %1\n"
9309 : "+m" (v->counter), "=qm" (c)
9310 : : "memory");
9311 return c != 0;
9312 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9313 {
9314 unsigned char c;
9315
9316 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9317 + asm volatile(LOCK_PREFIX "incl %0\n"
9318 +
9319 +#ifdef CONFIG_PAX_REFCOUNT
9320 + "jno 0f\n"
9321 + LOCK_PREFIX "decl %0\n"
9322 + "int $4\n0:\n"
9323 + _ASM_EXTABLE(0b, 0b)
9324 +#endif
9325 +
9326 + "sete %1\n"
9327 + : "+m" (v->counter), "=qm" (c)
9328 + : : "memory");
9329 + return c != 0;
9330 +}
9331 +
9332 +/**
9333 + * atomic_inc_and_test_unchecked - increment and test
9334 + * @v: pointer of type atomic_unchecked_t
9335 + *
9336 + * Atomically increments @v by 1
9337 + * and returns true if the result is zero, or false for all
9338 + * other cases.
9339 + */
9340 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9341 +{
9342 + unsigned char c;
9343 +
9344 + asm volatile(LOCK_PREFIX "incl %0\n"
9345 + "sete %1\n"
9346 : "+m" (v->counter), "=qm" (c)
9347 : : "memory");
9348 return c != 0;
9349 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9350 {
9351 unsigned char c;
9352
9353 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9354 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9355 +
9356 +#ifdef CONFIG_PAX_REFCOUNT
9357 + "jno 0f\n"
9358 + LOCK_PREFIX "subl %2,%0\n"
9359 + "int $4\n0:\n"
9360 + _ASM_EXTABLE(0b, 0b)
9361 +#endif
9362 +
9363 + "sets %1\n"
9364 : "+m" (v->counter), "=qm" (c)
9365 : "ir" (i) : "memory");
9366 return c;
9367 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9368 goto no_xadd;
9369 #endif
9370 /* Modern 486+ processor */
9371 - return i + xadd(&v->counter, i);
9372 + return i + xadd_check_overflow(&v->counter, i);
9373
9374 #ifdef CONFIG_M386
9375 no_xadd: /* Legacy 386 processor */
9376 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9377 }
9378
9379 /**
9380 + * atomic_add_return_unchecked - add integer and return
9381 + * @i: integer value to add
9382 + * @v: pointer of type atomic_unchecked_t
9383 + *
9384 + * Atomically adds @i to @v and returns @i + @v
9385 + */
9386 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9387 +{
9388 +#ifdef CONFIG_M386
9389 + int __i;
9390 + unsigned long flags;
9391 + if (unlikely(boot_cpu_data.x86 <= 3))
9392 + goto no_xadd;
9393 +#endif
9394 + /* Modern 486+ processor */
9395 + return i + xadd(&v->counter, i);
9396 +
9397 +#ifdef CONFIG_M386
9398 +no_xadd: /* Legacy 386 processor */
9399 + raw_local_irq_save(flags);
9400 + __i = atomic_read_unchecked(v);
9401 + atomic_set_unchecked(v, i + __i);
9402 + raw_local_irq_restore(flags);
9403 + return i + __i;
9404 +#endif
9405 +}
9406 +
9407 +/**
9408 * atomic_sub_return - subtract integer and return
9409 * @v: pointer of type atomic_t
9410 * @i: integer value to subtract
9411 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9412 }
9413
9414 #define atomic_inc_return(v) (atomic_add_return(1, v))
9415 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9416 +{
9417 + return atomic_add_return_unchecked(1, v);
9418 +}
9419 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9420
9421 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9422 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9423 return cmpxchg(&v->counter, old, new);
9424 }
9425
9426 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9427 +{
9428 + return cmpxchg(&v->counter, old, new);
9429 +}
9430 +
9431 static inline int atomic_xchg(atomic_t *v, int new)
9432 {
9433 return xchg(&v->counter, new);
9434 }
9435
9436 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9437 +{
9438 + return xchg(&v->counter, new);
9439 +}
9440 +
9441 /**
9442 * __atomic_add_unless - add unless the number is already a given value
9443 * @v: pointer of type atomic_t
9444 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9445 */
9446 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9447 {
9448 - int c, old;
9449 + int c, old, new;
9450 c = atomic_read(v);
9451 for (;;) {
9452 - if (unlikely(c == (u)))
9453 + if (unlikely(c == u))
9454 break;
9455 - old = atomic_cmpxchg((v), c, c + (a));
9456 +
9457 + asm volatile("addl %2,%0\n"
9458 +
9459 +#ifdef CONFIG_PAX_REFCOUNT
9460 + "jno 0f\n"
9461 + "subl %2,%0\n"
9462 + "int $4\n0:\n"
9463 + _ASM_EXTABLE(0b, 0b)
9464 +#endif
9465 +
9466 + : "=r" (new)
9467 + : "0" (c), "ir" (a));
9468 +
9469 + old = atomic_cmpxchg(v, c, new);
9470 if (likely(old == c))
9471 break;
9472 c = old;
9473 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9474 return c;
9475 }
9476
9477 +/**
9478 + * atomic_inc_not_zero_hint - increment if not null
9479 + * @v: pointer of type atomic_t
9480 + * @hint: probable value of the atomic before the increment
9481 + *
9482 + * This version of atomic_inc_not_zero() gives a hint of probable
9483 + * value of the atomic. This helps processor to not read the memory
9484 + * before doing the atomic read/modify/write cycle, lowering
9485 + * number of bus transactions on some arches.
9486 + *
9487 + * Returns: 0 if increment was not done, 1 otherwise.
9488 + */
9489 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9490 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9491 +{
9492 + int val, c = hint, new;
9493 +
9494 + /* sanity test, should be removed by compiler if hint is a constant */
9495 + if (!hint)
9496 + return __atomic_add_unless(v, 1, 0);
9497 +
9498 + do {
9499 + asm volatile("incl %0\n"
9500 +
9501 +#ifdef CONFIG_PAX_REFCOUNT
9502 + "jno 0f\n"
9503 + "decl %0\n"
9504 + "int $4\n0:\n"
9505 + _ASM_EXTABLE(0b, 0b)
9506 +#endif
9507 +
9508 + : "=r" (new)
9509 + : "0" (c));
9510 +
9511 + val = atomic_cmpxchg(v, c, new);
9512 + if (val == c)
9513 + return 1;
9514 + c = val;
9515 + } while (c);
9516 +
9517 + return 0;
9518 +}
9519
9520 /*
9521 * atomic_dec_if_positive - decrement by 1 if old value positive
9522 @@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
9523 #endif
9524
9525 /* These are x86-specific, used by some header files */
9526 -#define atomic_clear_mask(mask, addr) \
9527 - asm volatile(LOCK_PREFIX "andl %0,%1" \
9528 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
9529 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
9530 +{
9531 + asm volatile(LOCK_PREFIX "andl %1,%0"
9532 + : "+m" (v->counter)
9533 + : "r" (~(mask))
9534 + : "memory");
9535 +}
9536
9537 -#define atomic_set_mask(mask, addr) \
9538 - asm volatile(LOCK_PREFIX "orl %0,%1" \
9539 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
9540 - : "memory")
9541 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9542 +{
9543 + asm volatile(LOCK_PREFIX "andl %1,%0"
9544 + : "+m" (v->counter)
9545 + : "r" (~(mask))
9546 + : "memory");
9547 +}
9548 +
9549 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
9550 +{
9551 + asm volatile(LOCK_PREFIX "orl %1,%0"
9552 + : "+m" (v->counter)
9553 + : "r" (mask)
9554 + : "memory");
9555 +}
9556 +
9557 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9558 +{
9559 + asm volatile(LOCK_PREFIX "orl %1,%0"
9560 + : "+m" (v->counter)
9561 + : "r" (mask)
9562 + : "memory");
9563 +}
9564
9565 /* Atomic operations are already serializing on x86 */
9566 #define smp_mb__before_atomic_dec() barrier()
9567 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9568 index b154de7..aadebd8 100644
9569 --- a/arch/x86/include/asm/atomic64_32.h
9570 +++ b/arch/x86/include/asm/atomic64_32.h
9571 @@ -12,6 +12,14 @@ typedef struct {
9572 u64 __aligned(8) counter;
9573 } atomic64_t;
9574
9575 +#ifdef CONFIG_PAX_REFCOUNT
9576 +typedef struct {
9577 + u64 __aligned(8) counter;
9578 +} atomic64_unchecked_t;
9579 +#else
9580 +typedef atomic64_t atomic64_unchecked_t;
9581 +#endif
9582 +
9583 #define ATOMIC64_INIT(val) { (val) }
9584
9585 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9586 @@ -37,21 +45,31 @@ typedef struct {
9587 ATOMIC64_DECL_ONE(sym##_386)
9588
9589 ATOMIC64_DECL_ONE(add_386);
9590 +ATOMIC64_DECL_ONE(add_unchecked_386);
9591 ATOMIC64_DECL_ONE(sub_386);
9592 +ATOMIC64_DECL_ONE(sub_unchecked_386);
9593 ATOMIC64_DECL_ONE(inc_386);
9594 +ATOMIC64_DECL_ONE(inc_unchecked_386);
9595 ATOMIC64_DECL_ONE(dec_386);
9596 +ATOMIC64_DECL_ONE(dec_unchecked_386);
9597 #endif
9598
9599 #define alternative_atomic64(f, out, in...) \
9600 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9601
9602 ATOMIC64_DECL(read);
9603 +ATOMIC64_DECL(read_unchecked);
9604 ATOMIC64_DECL(set);
9605 +ATOMIC64_DECL(set_unchecked);
9606 ATOMIC64_DECL(xchg);
9607 ATOMIC64_DECL(add_return);
9608 +ATOMIC64_DECL(add_return_unchecked);
9609 ATOMIC64_DECL(sub_return);
9610 +ATOMIC64_DECL(sub_return_unchecked);
9611 ATOMIC64_DECL(inc_return);
9612 +ATOMIC64_DECL(inc_return_unchecked);
9613 ATOMIC64_DECL(dec_return);
9614 +ATOMIC64_DECL(dec_return_unchecked);
9615 ATOMIC64_DECL(dec_if_positive);
9616 ATOMIC64_DECL(inc_not_zero);
9617 ATOMIC64_DECL(add_unless);
9618 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9619 }
9620
9621 /**
9622 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9623 + * @p: pointer to type atomic64_unchecked_t
9624 + * @o: expected value
9625 + * @n: new value
9626 + *
9627 + * Atomically sets @v to @n if it was equal to @o and returns
9628 + * the old value.
9629 + */
9630 +
9631 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9632 +{
9633 + return cmpxchg64(&v->counter, o, n);
9634 +}
9635 +
9636 +/**
9637 * atomic64_xchg - xchg atomic64 variable
9638 * @v: pointer to type atomic64_t
9639 * @n: value to assign
9640 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9641 }
9642
9643 /**
9644 + * atomic64_set_unchecked - set atomic64 variable
9645 + * @v: pointer to type atomic64_unchecked_t
9646 + * @n: value to assign
9647 + *
9648 + * Atomically sets the value of @v to @n.
9649 + */
9650 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9651 +{
9652 + unsigned high = (unsigned)(i >> 32);
9653 + unsigned low = (unsigned)i;
9654 + alternative_atomic64(set, /* no output */,
9655 + "S" (v), "b" (low), "c" (high)
9656 + : "eax", "edx", "memory");
9657 +}
9658 +
9659 +/**
9660 * atomic64_read - read atomic64 variable
9661 * @v: pointer to type atomic64_t
9662 *
9663 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9664 }
9665
9666 /**
9667 + * atomic64_read_unchecked - read atomic64 variable
9668 + * @v: pointer to type atomic64_unchecked_t
9669 + *
9670 + * Atomically reads the value of @v and returns it.
9671 + */
9672 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9673 +{
9674 + long long r;
9675 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9676 + return r;
9677 + }
9678 +
9679 +/**
9680 * atomic64_add_return - add and return
9681 * @i: integer value to add
9682 * @v: pointer to type atomic64_t
9683 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9684 return i;
9685 }
9686
9687 +/**
9688 + * atomic64_add_return_unchecked - add and return
9689 + * @i: integer value to add
9690 + * @v: pointer to type atomic64_unchecked_t
9691 + *
9692 + * Atomically adds @i to @v and returns @i + *@v
9693 + */
9694 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9695 +{
9696 + alternative_atomic64(add_return_unchecked,
9697 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9698 + ASM_NO_INPUT_CLOBBER("memory"));
9699 + return i;
9700 +}
9701 +
9702 /*
9703 * Other variants with different arithmetic operators:
9704 */
9705 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9706 return a;
9707 }
9708
9709 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9710 +{
9711 + long long a;
9712 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
9713 + "S" (v) : "memory", "ecx");
9714 + return a;
9715 +}
9716 +
9717 static inline long long atomic64_dec_return(atomic64_t *v)
9718 {
9719 long long a;
9720 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9721 }
9722
9723 /**
9724 + * atomic64_add_unchecked - add integer to atomic64 variable
9725 + * @i: integer value to add
9726 + * @v: pointer to type atomic64_unchecked_t
9727 + *
9728 + * Atomically adds @i to @v.
9729 + */
9730 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9731 +{
9732 + __alternative_atomic64(add_unchecked, add_return_unchecked,
9733 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9734 + ASM_NO_INPUT_CLOBBER("memory"));
9735 + return i;
9736 +}
9737 +
9738 +/**
9739 * atomic64_sub - subtract the atomic64 variable
9740 * @i: integer value to subtract
9741 * @v: pointer to type atomic64_t
9742 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9743 index 0e1cbfc..5623683 100644
9744 --- a/arch/x86/include/asm/atomic64_64.h
9745 +++ b/arch/x86/include/asm/atomic64_64.h
9746 @@ -18,7 +18,19 @@
9747 */
9748 static inline long atomic64_read(const atomic64_t *v)
9749 {
9750 - return (*(volatile long *)&(v)->counter);
9751 + return (*(volatile const long *)&(v)->counter);
9752 +}
9753 +
9754 +/**
9755 + * atomic64_read_unchecked - read atomic64 variable
9756 + * @v: pointer of type atomic64_unchecked_t
9757 + *
9758 + * Atomically reads the value of @v.
9759 + * Doesn't imply a read memory barrier.
9760 + */
9761 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9762 +{
9763 + return (*(volatile const long *)&(v)->counter);
9764 }
9765
9766 /**
9767 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9768 }
9769
9770 /**
9771 + * atomic64_set_unchecked - set atomic64 variable
9772 + * @v: pointer to type atomic64_unchecked_t
9773 + * @i: required value
9774 + *
9775 + * Atomically sets the value of @v to @i.
9776 + */
9777 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9778 +{
9779 + v->counter = i;
9780 +}
9781 +
9782 +/**
9783 * atomic64_add - add integer to atomic64 variable
9784 * @i: integer value to add
9785 * @v: pointer to type atomic64_t
9786 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9787 */
9788 static inline void atomic64_add(long i, atomic64_t *v)
9789 {
9790 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9791 +
9792 +#ifdef CONFIG_PAX_REFCOUNT
9793 + "jno 0f\n"
9794 + LOCK_PREFIX "subq %1,%0\n"
9795 + "int $4\n0:\n"
9796 + _ASM_EXTABLE(0b, 0b)
9797 +#endif
9798 +
9799 + : "=m" (v->counter)
9800 + : "er" (i), "m" (v->counter));
9801 +}
9802 +
9803 +/**
9804 + * atomic64_add_unchecked - add integer to atomic64 variable
9805 + * @i: integer value to add
9806 + * @v: pointer to type atomic64_unchecked_t
9807 + *
9808 + * Atomically adds @i to @v.
9809 + */
9810 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9811 +{
9812 asm volatile(LOCK_PREFIX "addq %1,%0"
9813 : "=m" (v->counter)
9814 : "er" (i), "m" (v->counter));
9815 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9816 */
9817 static inline void atomic64_sub(long i, atomic64_t *v)
9818 {
9819 - asm volatile(LOCK_PREFIX "subq %1,%0"
9820 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9821 +
9822 +#ifdef CONFIG_PAX_REFCOUNT
9823 + "jno 0f\n"
9824 + LOCK_PREFIX "addq %1,%0\n"
9825 + "int $4\n0:\n"
9826 + _ASM_EXTABLE(0b, 0b)
9827 +#endif
9828 +
9829 + : "=m" (v->counter)
9830 + : "er" (i), "m" (v->counter));
9831 +}
9832 +
9833 +/**
9834 + * atomic64_sub_unchecked - subtract the atomic64 variable
9835 + * @i: integer value to subtract
9836 + * @v: pointer to type atomic64_unchecked_t
9837 + *
9838 + * Atomically subtracts @i from @v.
9839 + */
9840 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9841 +{
9842 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9843 : "=m" (v->counter)
9844 : "er" (i), "m" (v->counter));
9845 }
9846 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9847 {
9848 unsigned char c;
9849
9850 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9851 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9852 +
9853 +#ifdef CONFIG_PAX_REFCOUNT
9854 + "jno 0f\n"
9855 + LOCK_PREFIX "addq %2,%0\n"
9856 + "int $4\n0:\n"
9857 + _ASM_EXTABLE(0b, 0b)
9858 +#endif
9859 +
9860 + "sete %1\n"
9861 : "=m" (v->counter), "=qm" (c)
9862 : "er" (i), "m" (v->counter) : "memory");
9863 return c;
9864 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9865 */
9866 static inline void atomic64_inc(atomic64_t *v)
9867 {
9868 + asm volatile(LOCK_PREFIX "incq %0\n"
9869 +
9870 +#ifdef CONFIG_PAX_REFCOUNT
9871 + "jno 0f\n"
9872 + LOCK_PREFIX "decq %0\n"
9873 + "int $4\n0:\n"
9874 + _ASM_EXTABLE(0b, 0b)
9875 +#endif
9876 +
9877 + : "=m" (v->counter)
9878 + : "m" (v->counter));
9879 +}
9880 +
9881 +/**
9882 + * atomic64_inc_unchecked - increment atomic64 variable
9883 + * @v: pointer to type atomic64_unchecked_t
9884 + *
9885 + * Atomically increments @v by 1.
9886 + */
9887 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9888 +{
9889 asm volatile(LOCK_PREFIX "incq %0"
9890 : "=m" (v->counter)
9891 : "m" (v->counter));
9892 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9893 */
9894 static inline void atomic64_dec(atomic64_t *v)
9895 {
9896 - asm volatile(LOCK_PREFIX "decq %0"
9897 + asm volatile(LOCK_PREFIX "decq %0\n"
9898 +
9899 +#ifdef CONFIG_PAX_REFCOUNT
9900 + "jno 0f\n"
9901 + LOCK_PREFIX "incq %0\n"
9902 + "int $4\n0:\n"
9903 + _ASM_EXTABLE(0b, 0b)
9904 +#endif
9905 +
9906 + : "=m" (v->counter)
9907 + : "m" (v->counter));
9908 +}
9909 +
9910 +/**
9911 + * atomic64_dec_unchecked - decrement atomic64 variable
9912 + * @v: pointer to type atomic64_t
9913 + *
9914 + * Atomically decrements @v by 1.
9915 + */
9916 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9917 +{
9918 + asm volatile(LOCK_PREFIX "decq %0\n"
9919 : "=m" (v->counter)
9920 : "m" (v->counter));
9921 }
9922 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9923 {
9924 unsigned char c;
9925
9926 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9927 + asm volatile(LOCK_PREFIX "decq %0\n"
9928 +
9929 +#ifdef CONFIG_PAX_REFCOUNT
9930 + "jno 0f\n"
9931 + LOCK_PREFIX "incq %0\n"
9932 + "int $4\n0:\n"
9933 + _ASM_EXTABLE(0b, 0b)
9934 +#endif
9935 +
9936 + "sete %1\n"
9937 : "=m" (v->counter), "=qm" (c)
9938 : "m" (v->counter) : "memory");
9939 return c != 0;
9940 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9941 {
9942 unsigned char c;
9943
9944 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9945 + asm volatile(LOCK_PREFIX "incq %0\n"
9946 +
9947 +#ifdef CONFIG_PAX_REFCOUNT
9948 + "jno 0f\n"
9949 + LOCK_PREFIX "decq %0\n"
9950 + "int $4\n0:\n"
9951 + _ASM_EXTABLE(0b, 0b)
9952 +#endif
9953 +
9954 + "sete %1\n"
9955 : "=m" (v->counter), "=qm" (c)
9956 : "m" (v->counter) : "memory");
9957 return c != 0;
9958 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9959 {
9960 unsigned char c;
9961
9962 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9963 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9964 +
9965 +#ifdef CONFIG_PAX_REFCOUNT
9966 + "jno 0f\n"
9967 + LOCK_PREFIX "subq %2,%0\n"
9968 + "int $4\n0:\n"
9969 + _ASM_EXTABLE(0b, 0b)
9970 +#endif
9971 +
9972 + "sets %1\n"
9973 : "=m" (v->counter), "=qm" (c)
9974 : "er" (i), "m" (v->counter) : "memory");
9975 return c;
9976 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9977 */
9978 static inline long atomic64_add_return(long i, atomic64_t *v)
9979 {
9980 + return i + xadd_check_overflow(&v->counter, i);
9981 +}
9982 +
9983 +/**
9984 + * atomic64_add_return_unchecked - add and return
9985 + * @i: integer value to add
9986 + * @v: pointer to type atomic64_unchecked_t
9987 + *
9988 + * Atomically adds @i to @v and returns @i + @v
9989 + */
9990 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9991 +{
9992 return i + xadd(&v->counter, i);
9993 }
9994
9995 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9996 }
9997
9998 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9999 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10000 +{
10001 + return atomic64_add_return_unchecked(1, v);
10002 +}
10003 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10004
10005 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10006 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10007 return cmpxchg(&v->counter, old, new);
10008 }
10009
10010 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10011 +{
10012 + return cmpxchg(&v->counter, old, new);
10013 +}
10014 +
10015 static inline long atomic64_xchg(atomic64_t *v, long new)
10016 {
10017 return xchg(&v->counter, new);
10018 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10019 */
10020 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10021 {
10022 - long c, old;
10023 + long c, old, new;
10024 c = atomic64_read(v);
10025 for (;;) {
10026 - if (unlikely(c == (u)))
10027 + if (unlikely(c == u))
10028 break;
10029 - old = atomic64_cmpxchg((v), c, c + (a));
10030 +
10031 + asm volatile("add %2,%0\n"
10032 +
10033 +#ifdef CONFIG_PAX_REFCOUNT
10034 + "jno 0f\n"
10035 + "sub %2,%0\n"
10036 + "int $4\n0:\n"
10037 + _ASM_EXTABLE(0b, 0b)
10038 +#endif
10039 +
10040 + : "=r" (new)
10041 + : "0" (c), "ir" (a));
10042 +
10043 + old = atomic64_cmpxchg(v, c, new);
10044 if (likely(old == c))
10045 break;
10046 c = old;
10047 }
10048 - return c != (u);
10049 + return c != u;
10050 }
10051
10052 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10053 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10054 index a6983b2..63f48a2 100644
10055 --- a/arch/x86/include/asm/bitops.h
10056 +++ b/arch/x86/include/asm/bitops.h
10057 @@ -40,7 +40,7 @@
10058 * a mask operation on a byte.
10059 */
10060 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10061 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10062 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10063 #define CONST_MASK(nr) (1 << ((nr) & 7))
10064
10065 /**
10066 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10067 index b13fe63..0dab13a 100644
10068 --- a/arch/x86/include/asm/boot.h
10069 +++ b/arch/x86/include/asm/boot.h
10070 @@ -11,10 +11,15 @@
10071 #include <asm/pgtable_types.h>
10072
10073 /* Physical address where kernel should be loaded. */
10074 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10075 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10076 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10077 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10078
10079 +#ifndef __ASSEMBLY__
10080 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
10081 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10082 +#endif
10083 +
10084 /* Minimum kernel alignment, as a power of two */
10085 #ifdef CONFIG_X86_64
10086 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10087 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10088 index 48f99f1..d78ebf9 100644
10089 --- a/arch/x86/include/asm/cache.h
10090 +++ b/arch/x86/include/asm/cache.h
10091 @@ -5,12 +5,13 @@
10092
10093 /* L1 cache line size */
10094 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10095 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10096 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10097
10098 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10099 +#define __read_only __attribute__((__section__(".data..read_only")))
10100
10101 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10102 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10103 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10104
10105 #ifdef CONFIG_X86_VSMP
10106 #ifdef CONFIG_SMP
10107 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10108 index 9863ee3..4a1f8e1 100644
10109 --- a/arch/x86/include/asm/cacheflush.h
10110 +++ b/arch/x86/include/asm/cacheflush.h
10111 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10112 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10113
10114 if (pg_flags == _PGMT_DEFAULT)
10115 - return -1;
10116 + return ~0UL;
10117 else if (pg_flags == _PGMT_WC)
10118 return _PAGE_CACHE_WC;
10119 else if (pg_flags == _PGMT_UC_MINUS)
10120 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10121 index 46fc474..b02b0f9 100644
10122 --- a/arch/x86/include/asm/checksum_32.h
10123 +++ b/arch/x86/include/asm/checksum_32.h
10124 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10125 int len, __wsum sum,
10126 int *src_err_ptr, int *dst_err_ptr);
10127
10128 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10129 + int len, __wsum sum,
10130 + int *src_err_ptr, int *dst_err_ptr);
10131 +
10132 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10133 + int len, __wsum sum,
10134 + int *src_err_ptr, int *dst_err_ptr);
10135 +
10136 /*
10137 * Note: when you get a NULL pointer exception here this means someone
10138 * passed in an incorrect kernel address to one of these functions.
10139 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10140 int *err_ptr)
10141 {
10142 might_sleep();
10143 - return csum_partial_copy_generic((__force void *)src, dst,
10144 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10145 len, sum, err_ptr, NULL);
10146 }
10147
10148 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10149 {
10150 might_sleep();
10151 if (access_ok(VERIFY_WRITE, dst, len))
10152 - return csum_partial_copy_generic(src, (__force void *)dst,
10153 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10154 len, sum, NULL, err_ptr);
10155
10156 if (len)
10157 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10158 index 99480e5..d81165b 100644
10159 --- a/arch/x86/include/asm/cmpxchg.h
10160 +++ b/arch/x86/include/asm/cmpxchg.h
10161 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10162 __compiletime_error("Bad argument size for cmpxchg");
10163 extern void __xadd_wrong_size(void)
10164 __compiletime_error("Bad argument size for xadd");
10165 +extern void __xadd_check_overflow_wrong_size(void)
10166 + __compiletime_error("Bad argument size for xadd_check_overflow");
10167 extern void __add_wrong_size(void)
10168 __compiletime_error("Bad argument size for add");
10169 +extern void __add_check_overflow_wrong_size(void)
10170 + __compiletime_error("Bad argument size for add_check_overflow");
10171
10172 /*
10173 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10174 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10175 __ret; \
10176 })
10177
10178 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10179 + ({ \
10180 + __typeof__ (*(ptr)) __ret = (arg); \
10181 + switch (sizeof(*(ptr))) { \
10182 + case __X86_CASE_L: \
10183 + asm volatile (lock #op "l %0, %1\n" \
10184 + "jno 0f\n" \
10185 + "mov %0,%1\n" \
10186 + "int $4\n0:\n" \
10187 + _ASM_EXTABLE(0b, 0b) \
10188 + : "+r" (__ret), "+m" (*(ptr)) \
10189 + : : "memory", "cc"); \
10190 + break; \
10191 + case __X86_CASE_Q: \
10192 + asm volatile (lock #op "q %q0, %1\n" \
10193 + "jno 0f\n" \
10194 + "mov %0,%1\n" \
10195 + "int $4\n0:\n" \
10196 + _ASM_EXTABLE(0b, 0b) \
10197 + : "+r" (__ret), "+m" (*(ptr)) \
10198 + : : "memory", "cc"); \
10199 + break; \
10200 + default: \
10201 + __ ## op ## _check_overflow_wrong_size(); \
10202 + } \
10203 + __ret; \
10204 + })
10205 +
10206 /*
10207 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10208 * Since this is generally used to protect other memory information, we
10209 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10210 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10211 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10212
10213 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10214 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10215 +
10216 #define __add(ptr, inc, lock) \
10217 ({ \
10218 __typeof__ (*(ptr)) __ret = (inc); \
10219 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10220 index f91e80f..7731066 100644
10221 --- a/arch/x86/include/asm/cpufeature.h
10222 +++ b/arch/x86/include/asm/cpufeature.h
10223 @@ -202,11 +202,12 @@
10224 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
10225 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
10226 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
10227 -#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
10228 +#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
10229 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
10230 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
10231 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
10232 #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
10233 +#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
10234
10235 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
10236
10237 @@ -371,7 +372,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10238 ".section .discard,\"aw\",@progbits\n"
10239 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10240 ".previous\n"
10241 - ".section .altinstr_replacement,\"ax\"\n"
10242 + ".section .altinstr_replacement,\"a\"\n"
10243 "3: movb $1,%0\n"
10244 "4:\n"
10245 ".previous\n"
10246 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10247 index 8bf1c06..f723dfd 100644
10248 --- a/arch/x86/include/asm/desc.h
10249 +++ b/arch/x86/include/asm/desc.h
10250 @@ -4,6 +4,7 @@
10251 #include <asm/desc_defs.h>
10252 #include <asm/ldt.h>
10253 #include <asm/mmu.h>
10254 +#include <asm/pgtable.h>
10255
10256 #include <linux/smp.h>
10257 #include <linux/percpu.h>
10258 @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10259
10260 desc->type = (info->read_exec_only ^ 1) << 1;
10261 desc->type |= info->contents << 2;
10262 + desc->type |= info->seg_not_present ^ 1;
10263
10264 desc->s = 1;
10265 desc->dpl = 0x3;
10266 @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10267 }
10268
10269 extern struct desc_ptr idt_descr;
10270 -extern gate_desc idt_table[];
10271 extern struct desc_ptr nmi_idt_descr;
10272 -extern gate_desc nmi_idt_table[];
10273 -
10274 -struct gdt_page {
10275 - struct desc_struct gdt[GDT_ENTRIES];
10276 -} __attribute__((aligned(PAGE_SIZE)));
10277 -
10278 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10279 +extern gate_desc idt_table[256];
10280 +extern gate_desc nmi_idt_table[256];
10281
10282 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10283 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10284 {
10285 - return per_cpu(gdt_page, cpu).gdt;
10286 + return cpu_gdt_table[cpu];
10287 }
10288
10289 #ifdef CONFIG_X86_64
10290 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10291 unsigned long base, unsigned dpl, unsigned flags,
10292 unsigned short seg)
10293 {
10294 - gate->a = (seg << 16) | (base & 0xffff);
10295 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10296 + gate->gate.offset_low = base;
10297 + gate->gate.seg = seg;
10298 + gate->gate.reserved = 0;
10299 + gate->gate.type = type;
10300 + gate->gate.s = 0;
10301 + gate->gate.dpl = dpl;
10302 + gate->gate.p = 1;
10303 + gate->gate.offset_high = base >> 16;
10304 }
10305
10306 #endif
10307 @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10308
10309 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10310 {
10311 + pax_open_kernel();
10312 memcpy(&idt[entry], gate, sizeof(*gate));
10313 + pax_close_kernel();
10314 }
10315
10316 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10317 {
10318 + pax_open_kernel();
10319 memcpy(&ldt[entry], desc, 8);
10320 + pax_close_kernel();
10321 }
10322
10323 static inline void
10324 @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10325 default: size = sizeof(*gdt); break;
10326 }
10327
10328 + pax_open_kernel();
10329 memcpy(&gdt[entry], desc, size);
10330 + pax_close_kernel();
10331 }
10332
10333 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10334 @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10335
10336 static inline void native_load_tr_desc(void)
10337 {
10338 + pax_open_kernel();
10339 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10340 + pax_close_kernel();
10341 }
10342
10343 static inline void native_load_gdt(const struct desc_ptr *dtr)
10344 @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10345 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10346 unsigned int i;
10347
10348 + pax_open_kernel();
10349 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10350 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10351 + pax_close_kernel();
10352 }
10353
10354 #define _LDT_empty(info) \
10355 @@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10356 }
10357
10358 #ifdef CONFIG_X86_64
10359 -static inline void set_nmi_gate(int gate, void *addr)
10360 +static inline void set_nmi_gate(int gate, const void *addr)
10361 {
10362 gate_desc s;
10363
10364 @@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10365 }
10366 #endif
10367
10368 -static inline void _set_gate(int gate, unsigned type, void *addr,
10369 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10370 unsigned dpl, unsigned ist, unsigned seg)
10371 {
10372 gate_desc s;
10373 @@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10374 * Pentium F0 0F bugfix can have resulted in the mapped
10375 * IDT being write-protected.
10376 */
10377 -static inline void set_intr_gate(unsigned int n, void *addr)
10378 +static inline void set_intr_gate(unsigned int n, const void *addr)
10379 {
10380 BUG_ON((unsigned)n > 0xFF);
10381 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10382 @@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10383 /*
10384 * This routine sets up an interrupt gate at directory privilege level 3.
10385 */
10386 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10387 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10388 {
10389 BUG_ON((unsigned)n > 0xFF);
10390 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10391 }
10392
10393 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10394 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10395 {
10396 BUG_ON((unsigned)n > 0xFF);
10397 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10398 }
10399
10400 -static inline void set_trap_gate(unsigned int n, void *addr)
10401 +static inline void set_trap_gate(unsigned int n, const void *addr)
10402 {
10403 BUG_ON((unsigned)n > 0xFF);
10404 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10405 @@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10406 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10407 {
10408 BUG_ON((unsigned)n > 0xFF);
10409 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10410 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10411 }
10412
10413 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10414 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10415 {
10416 BUG_ON((unsigned)n > 0xFF);
10417 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10418 }
10419
10420 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10421 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10422 {
10423 BUG_ON((unsigned)n > 0xFF);
10424 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10425 }
10426
10427 +#ifdef CONFIG_X86_32
10428 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10429 +{
10430 + struct desc_struct d;
10431 +
10432 + if (likely(limit))
10433 + limit = (limit - 1UL) >> PAGE_SHIFT;
10434 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10435 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10436 +}
10437 +#endif
10438 +
10439 #endif /* _ASM_X86_DESC_H */
10440 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10441 index 278441f..b95a174 100644
10442 --- a/arch/x86/include/asm/desc_defs.h
10443 +++ b/arch/x86/include/asm/desc_defs.h
10444 @@ -31,6 +31,12 @@ struct desc_struct {
10445 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10446 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10447 };
10448 + struct {
10449 + u16 offset_low;
10450 + u16 seg;
10451 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10452 + unsigned offset_high: 16;
10453 + } gate;
10454 };
10455 } __attribute__((packed));
10456
10457 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10458 index 3778256..c5d4fce 100644
10459 --- a/arch/x86/include/asm/e820.h
10460 +++ b/arch/x86/include/asm/e820.h
10461 @@ -69,7 +69,7 @@ struct e820map {
10462 #define ISA_START_ADDRESS 0xa0000
10463 #define ISA_END_ADDRESS 0x100000
10464
10465 -#define BIOS_BEGIN 0x000a0000
10466 +#define BIOS_BEGIN 0x000c0000
10467 #define BIOS_END 0x00100000
10468
10469 #define BIOS_ROM_BASE 0xffe00000
10470 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10471 index 5939f44..f8845f6 100644
10472 --- a/arch/x86/include/asm/elf.h
10473 +++ b/arch/x86/include/asm/elf.h
10474 @@ -243,7 +243,25 @@ extern int force_personality32;
10475 the loader. We need to make sure that it is out of the way of the program
10476 that it will "exec", and that there is sufficient room for the brk. */
10477
10478 +#ifdef CONFIG_PAX_SEGMEXEC
10479 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10480 +#else
10481 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10482 +#endif
10483 +
10484 +#ifdef CONFIG_PAX_ASLR
10485 +#ifdef CONFIG_X86_32
10486 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10487 +
10488 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10489 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10490 +#else
10491 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10492 +
10493 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10494 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10495 +#endif
10496 +#endif
10497
10498 /* This yields a mask that user programs can use to figure out what
10499 instruction set this CPU supports. This could be done in user space,
10500 @@ -296,16 +314,12 @@ do { \
10501
10502 #define ARCH_DLINFO \
10503 do { \
10504 - if (vdso_enabled) \
10505 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10506 - (unsigned long)current->mm->context.vdso); \
10507 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10508 } while (0)
10509
10510 #define ARCH_DLINFO_X32 \
10511 do { \
10512 - if (vdso_enabled) \
10513 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10514 - (unsigned long)current->mm->context.vdso); \
10515 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10516 } while (0)
10517
10518 #define AT_SYSINFO 32
10519 @@ -320,7 +334,7 @@ else \
10520
10521 #endif /* !CONFIG_X86_32 */
10522
10523 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10524 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10525
10526 #define VDSO_ENTRY \
10527 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10528 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10529 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10530 #define compat_arch_setup_additional_pages syscall32_setup_pages
10531
10532 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10533 -#define arch_randomize_brk arch_randomize_brk
10534 -
10535 /*
10536 * True on X86_32 or when emulating IA32 on X86_64
10537 */
10538 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10539 index cc70c1c..d96d011 100644
10540 --- a/arch/x86/include/asm/emergency-restart.h
10541 +++ b/arch/x86/include/asm/emergency-restart.h
10542 @@ -15,6 +15,6 @@ enum reboot_type {
10543
10544 extern enum reboot_type reboot_type;
10545
10546 -extern void machine_emergency_restart(void);
10547 +extern void machine_emergency_restart(void) __noreturn;
10548
10549 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10550 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10551 index 75f4c6d..ee3eb8f 100644
10552 --- a/arch/x86/include/asm/fpu-internal.h
10553 +++ b/arch/x86/include/asm/fpu-internal.h
10554 @@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10555 {
10556 int err;
10557
10558 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10559 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10560 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10561 +#endif
10562 +
10563 /* See comment in fxsave() below. */
10564 #ifdef CONFIG_AS_FXSAVEQ
10565 asm volatile("1: fxrstorq %[fx]\n\t"
10566 @@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10567 {
10568 int err;
10569
10570 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10571 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10572 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10573 +#endif
10574 +
10575 /*
10576 * Clear the bytes not touched by the fxsave and reserved
10577 * for the SW usage.
10578 @@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10579 "emms\n\t" /* clear stack tags */
10580 "fildl %P[addr]", /* set F?P to defined value */
10581 X86_FEATURE_FXSAVE_LEAK,
10582 - [addr] "m" (tsk->thread.fpu.has_fpu));
10583 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10584
10585 return fpu_restore_checking(&tsk->thread.fpu);
10586 }
10587 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10588 index 71ecbcb..bac10b7 100644
10589 --- a/arch/x86/include/asm/futex.h
10590 +++ b/arch/x86/include/asm/futex.h
10591 @@ -11,16 +11,18 @@
10592 #include <asm/processor.h>
10593
10594 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10595 + typecheck(u32 __user *, uaddr); \
10596 asm volatile("1:\t" insn "\n" \
10597 "2:\t.section .fixup,\"ax\"\n" \
10598 "3:\tmov\t%3, %1\n" \
10599 "\tjmp\t2b\n" \
10600 "\t.previous\n" \
10601 _ASM_EXTABLE(1b, 3b) \
10602 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10603 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10604 : "i" (-EFAULT), "0" (oparg), "1" (0))
10605
10606 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10607 + typecheck(u32 __user *, uaddr); \
10608 asm volatile("1:\tmovl %2, %0\n" \
10609 "\tmovl\t%0, %3\n" \
10610 "\t" insn "\n" \
10611 @@ -33,7 +35,7 @@
10612 _ASM_EXTABLE(1b, 4b) \
10613 _ASM_EXTABLE(2b, 4b) \
10614 : "=&a" (oldval), "=&r" (ret), \
10615 - "+m" (*uaddr), "=&r" (tem) \
10616 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10617 : "r" (oparg), "i" (-EFAULT), "1" (0))
10618
10619 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10620 @@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10621
10622 switch (op) {
10623 case FUTEX_OP_SET:
10624 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10625 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10626 break;
10627 case FUTEX_OP_ADD:
10628 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10629 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10630 uaddr, oparg);
10631 break;
10632 case FUTEX_OP_OR:
10633 @@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10634 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10635 return -EFAULT;
10636
10637 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10638 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10639 "2:\t.section .fixup, \"ax\"\n"
10640 "3:\tmov %3, %0\n"
10641 "\tjmp 2b\n"
10642 "\t.previous\n"
10643 _ASM_EXTABLE(1b, 3b)
10644 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10645 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10646 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10647 : "memory"
10648 );
10649 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10650 index eb92a6e..b98b2f4 100644
10651 --- a/arch/x86/include/asm/hw_irq.h
10652 +++ b/arch/x86/include/asm/hw_irq.h
10653 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10654 extern void enable_IO_APIC(void);
10655
10656 /* Statistics */
10657 -extern atomic_t irq_err_count;
10658 -extern atomic_t irq_mis_count;
10659 +extern atomic_unchecked_t irq_err_count;
10660 +extern atomic_unchecked_t irq_mis_count;
10661
10662 /* EISA */
10663 extern void eisa_set_level_irq(unsigned int irq);
10664 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10665 index d8e8eef..99f81ae 100644
10666 --- a/arch/x86/include/asm/io.h
10667 +++ b/arch/x86/include/asm/io.h
10668 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10669
10670 #include <linux/vmalloc.h>
10671
10672 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10673 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10674 +{
10675 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10676 +}
10677 +
10678 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10679 +{
10680 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10681 +}
10682 +
10683 /*
10684 * Convert a virtual cached pointer to an uncached pointer
10685 */
10686 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10687 index bba3cf8..06bc8da 100644
10688 --- a/arch/x86/include/asm/irqflags.h
10689 +++ b/arch/x86/include/asm/irqflags.h
10690 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10691 sti; \
10692 sysexit
10693
10694 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10695 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10696 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10697 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10698 +
10699 #else
10700 #define INTERRUPT_RETURN iret
10701 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10702 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10703 index 5478825..839e88c 100644
10704 --- a/arch/x86/include/asm/kprobes.h
10705 +++ b/arch/x86/include/asm/kprobes.h
10706 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10707 #define RELATIVEJUMP_SIZE 5
10708 #define RELATIVECALL_OPCODE 0xe8
10709 #define RELATIVE_ADDR_SIZE 4
10710 -#define MAX_STACK_SIZE 64
10711 -#define MIN_STACK_SIZE(ADDR) \
10712 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10713 - THREAD_SIZE - (unsigned long)(ADDR))) \
10714 - ? (MAX_STACK_SIZE) \
10715 - : (((unsigned long)current_thread_info()) + \
10716 - THREAD_SIZE - (unsigned long)(ADDR)))
10717 +#define MAX_STACK_SIZE 64UL
10718 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10719
10720 #define flush_insn_slot(p) do { } while (0)
10721
10722 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10723 index db7c1f2..92f130a 100644
10724 --- a/arch/x86/include/asm/kvm_host.h
10725 +++ b/arch/x86/include/asm/kvm_host.h
10726 @@ -680,7 +680,7 @@ struct kvm_x86_ops {
10727 int (*check_intercept)(struct kvm_vcpu *vcpu,
10728 struct x86_instruction_info *info,
10729 enum x86_intercept_stage stage);
10730 -};
10731 +} __do_const;
10732
10733 struct kvm_arch_async_pf {
10734 u32 token;
10735 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10736 index c8bed0d..e5721fa 100644
10737 --- a/arch/x86/include/asm/local.h
10738 +++ b/arch/x86/include/asm/local.h
10739 @@ -17,26 +17,58 @@ typedef struct {
10740
10741 static inline void local_inc(local_t *l)
10742 {
10743 - asm volatile(_ASM_INC "%0"
10744 + asm volatile(_ASM_INC "%0\n"
10745 +
10746 +#ifdef CONFIG_PAX_REFCOUNT
10747 + "jno 0f\n"
10748 + _ASM_DEC "%0\n"
10749 + "int $4\n0:\n"
10750 + _ASM_EXTABLE(0b, 0b)
10751 +#endif
10752 +
10753 : "+m" (l->a.counter));
10754 }
10755
10756 static inline void local_dec(local_t *l)
10757 {
10758 - asm volatile(_ASM_DEC "%0"
10759 + asm volatile(_ASM_DEC "%0\n"
10760 +
10761 +#ifdef CONFIG_PAX_REFCOUNT
10762 + "jno 0f\n"
10763 + _ASM_INC "%0\n"
10764 + "int $4\n0:\n"
10765 + _ASM_EXTABLE(0b, 0b)
10766 +#endif
10767 +
10768 : "+m" (l->a.counter));
10769 }
10770
10771 static inline void local_add(long i, local_t *l)
10772 {
10773 - asm volatile(_ASM_ADD "%1,%0"
10774 + asm volatile(_ASM_ADD "%1,%0\n"
10775 +
10776 +#ifdef CONFIG_PAX_REFCOUNT
10777 + "jno 0f\n"
10778 + _ASM_SUB "%1,%0\n"
10779 + "int $4\n0:\n"
10780 + _ASM_EXTABLE(0b, 0b)
10781 +#endif
10782 +
10783 : "+m" (l->a.counter)
10784 : "ir" (i));
10785 }
10786
10787 static inline void local_sub(long i, local_t *l)
10788 {
10789 - asm volatile(_ASM_SUB "%1,%0"
10790 + asm volatile(_ASM_SUB "%1,%0\n"
10791 +
10792 +#ifdef CONFIG_PAX_REFCOUNT
10793 + "jno 0f\n"
10794 + _ASM_ADD "%1,%0\n"
10795 + "int $4\n0:\n"
10796 + _ASM_EXTABLE(0b, 0b)
10797 +#endif
10798 +
10799 : "+m" (l->a.counter)
10800 : "ir" (i));
10801 }
10802 @@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10803 {
10804 unsigned char c;
10805
10806 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10807 + asm volatile(_ASM_SUB "%2,%0\n"
10808 +
10809 +#ifdef CONFIG_PAX_REFCOUNT
10810 + "jno 0f\n"
10811 + _ASM_ADD "%2,%0\n"
10812 + "int $4\n0:\n"
10813 + _ASM_EXTABLE(0b, 0b)
10814 +#endif
10815 +
10816 + "sete %1\n"
10817 : "+m" (l->a.counter), "=qm" (c)
10818 : "ir" (i) : "memory");
10819 return c;
10820 @@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10821 {
10822 unsigned char c;
10823
10824 - asm volatile(_ASM_DEC "%0; sete %1"
10825 + asm volatile(_ASM_DEC "%0\n"
10826 +
10827 +#ifdef CONFIG_PAX_REFCOUNT
10828 + "jno 0f\n"
10829 + _ASM_INC "%0\n"
10830 + "int $4\n0:\n"
10831 + _ASM_EXTABLE(0b, 0b)
10832 +#endif
10833 +
10834 + "sete %1\n"
10835 : "+m" (l->a.counter), "=qm" (c)
10836 : : "memory");
10837 return c != 0;
10838 @@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10839 {
10840 unsigned char c;
10841
10842 - asm volatile(_ASM_INC "%0; sete %1"
10843 + asm volatile(_ASM_INC "%0\n"
10844 +
10845 +#ifdef CONFIG_PAX_REFCOUNT
10846 + "jno 0f\n"
10847 + _ASM_DEC "%0\n"
10848 + "int $4\n0:\n"
10849 + _ASM_EXTABLE(0b, 0b)
10850 +#endif
10851 +
10852 + "sete %1\n"
10853 : "+m" (l->a.counter), "=qm" (c)
10854 : : "memory");
10855 return c != 0;
10856 @@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10857 {
10858 unsigned char c;
10859
10860 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10861 + asm volatile(_ASM_ADD "%2,%0\n"
10862 +
10863 +#ifdef CONFIG_PAX_REFCOUNT
10864 + "jno 0f\n"
10865 + _ASM_SUB "%2,%0\n"
10866 + "int $4\n0:\n"
10867 + _ASM_EXTABLE(0b, 0b)
10868 +#endif
10869 +
10870 + "sets %1\n"
10871 : "+m" (l->a.counter), "=qm" (c)
10872 : "ir" (i) : "memory");
10873 return c;
10874 @@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10875 #endif
10876 /* Modern 486+ processor */
10877 __i = i;
10878 - asm volatile(_ASM_XADD "%0, %1;"
10879 + asm volatile(_ASM_XADD "%0, %1\n"
10880 +
10881 +#ifdef CONFIG_PAX_REFCOUNT
10882 + "jno 0f\n"
10883 + _ASM_MOV "%0,%1\n"
10884 + "int $4\n0:\n"
10885 + _ASM_EXTABLE(0b, 0b)
10886 +#endif
10887 +
10888 : "+r" (i), "+m" (l->a.counter)
10889 : : "memory");
10890 return i + __i;
10891 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10892 index 593e51d..fa69c9a 100644
10893 --- a/arch/x86/include/asm/mman.h
10894 +++ b/arch/x86/include/asm/mman.h
10895 @@ -5,4 +5,14 @@
10896
10897 #include <asm-generic/mman.h>
10898
10899 +#ifdef __KERNEL__
10900 +#ifndef __ASSEMBLY__
10901 +#ifdef CONFIG_X86_32
10902 +#define arch_mmap_check i386_mmap_check
10903 +int i386_mmap_check(unsigned long addr, unsigned long len,
10904 + unsigned long flags);
10905 +#endif
10906 +#endif
10907 +#endif
10908 +
10909 #endif /* _ASM_X86_MMAN_H */
10910 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10911 index 5f55e69..e20bfb1 100644
10912 --- a/arch/x86/include/asm/mmu.h
10913 +++ b/arch/x86/include/asm/mmu.h
10914 @@ -9,7 +9,7 @@
10915 * we put the segment information here.
10916 */
10917 typedef struct {
10918 - void *ldt;
10919 + struct desc_struct *ldt;
10920 int size;
10921
10922 #ifdef CONFIG_X86_64
10923 @@ -18,7 +18,19 @@ typedef struct {
10924 #endif
10925
10926 struct mutex lock;
10927 - void *vdso;
10928 + unsigned long vdso;
10929 +
10930 +#ifdef CONFIG_X86_32
10931 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10932 + unsigned long user_cs_base;
10933 + unsigned long user_cs_limit;
10934 +
10935 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10936 + cpumask_t cpu_user_cs_mask;
10937 +#endif
10938 +
10939 +#endif
10940 +#endif
10941 } mm_context_t;
10942
10943 #ifdef CONFIG_SMP
10944 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10945 index cdbf367..adb37ac 100644
10946 --- a/arch/x86/include/asm/mmu_context.h
10947 +++ b/arch/x86/include/asm/mmu_context.h
10948 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10949
10950 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10951 {
10952 +
10953 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10954 + unsigned int i;
10955 + pgd_t *pgd;
10956 +
10957 + pax_open_kernel();
10958 + pgd = get_cpu_pgd(smp_processor_id());
10959 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10960 + set_pgd_batched(pgd+i, native_make_pgd(0));
10961 + pax_close_kernel();
10962 +#endif
10963 +
10964 #ifdef CONFIG_SMP
10965 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10966 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10967 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10968 struct task_struct *tsk)
10969 {
10970 unsigned cpu = smp_processor_id();
10971 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10972 + int tlbstate = TLBSTATE_OK;
10973 +#endif
10974
10975 if (likely(prev != next)) {
10976 #ifdef CONFIG_SMP
10977 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10978 + tlbstate = this_cpu_read(cpu_tlbstate.state);
10979 +#endif
10980 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10981 this_cpu_write(cpu_tlbstate.active_mm, next);
10982 #endif
10983 cpumask_set_cpu(cpu, mm_cpumask(next));
10984
10985 /* Re-load page tables */
10986 +#ifdef CONFIG_PAX_PER_CPU_PGD
10987 + pax_open_kernel();
10988 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10989 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10990 + pax_close_kernel();
10991 + load_cr3(get_cpu_pgd(cpu));
10992 +#else
10993 load_cr3(next->pgd);
10994 +#endif
10995
10996 /* stop flush ipis for the previous mm */
10997 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10998 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10999 */
11000 if (unlikely(prev->context.ldt != next->context.ldt))
11001 load_LDT_nolock(&next->context);
11002 - }
11003 +
11004 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11005 + if (!(__supported_pte_mask & _PAGE_NX)) {
11006 + smp_mb__before_clear_bit();
11007 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11008 + smp_mb__after_clear_bit();
11009 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11010 + }
11011 +#endif
11012 +
11013 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11014 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11015 + prev->context.user_cs_limit != next->context.user_cs_limit))
11016 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11017 #ifdef CONFIG_SMP
11018 + else if (unlikely(tlbstate != TLBSTATE_OK))
11019 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11020 +#endif
11021 +#endif
11022 +
11023 + }
11024 else {
11025 +
11026 +#ifdef CONFIG_PAX_PER_CPU_PGD
11027 + pax_open_kernel();
11028 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11029 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11030 + pax_close_kernel();
11031 + load_cr3(get_cpu_pgd(cpu));
11032 +#endif
11033 +
11034 +#ifdef CONFIG_SMP
11035 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11036 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
11037
11038 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11039 * tlb flush IPI delivery. We must reload CR3
11040 * to make sure to use no freed page tables.
11041 */
11042 +
11043 +#ifndef CONFIG_PAX_PER_CPU_PGD
11044 load_cr3(next->pgd);
11045 +#endif
11046 +
11047 load_LDT_nolock(&next->context);
11048 +
11049 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11050 + if (!(__supported_pte_mask & _PAGE_NX))
11051 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11052 +#endif
11053 +
11054 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11055 +#ifdef CONFIG_PAX_PAGEEXEC
11056 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11057 +#endif
11058 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11059 +#endif
11060 +
11061 }
11062 +#endif
11063 }
11064 -#endif
11065 }
11066
11067 #define activate_mm(prev, next) \
11068 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11069 index 9eae775..c914fea 100644
11070 --- a/arch/x86/include/asm/module.h
11071 +++ b/arch/x86/include/asm/module.h
11072 @@ -5,6 +5,7 @@
11073
11074 #ifdef CONFIG_X86_64
11075 /* X86_64 does not define MODULE_PROC_FAMILY */
11076 +#define MODULE_PROC_FAMILY ""
11077 #elif defined CONFIG_M386
11078 #define MODULE_PROC_FAMILY "386 "
11079 #elif defined CONFIG_M486
11080 @@ -59,8 +60,20 @@
11081 #error unknown processor family
11082 #endif
11083
11084 -#ifdef CONFIG_X86_32
11085 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11086 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11087 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11088 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11089 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11090 +#else
11091 +#define MODULE_PAX_KERNEXEC ""
11092 #endif
11093
11094 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11095 +#define MODULE_PAX_UDEREF "UDEREF "
11096 +#else
11097 +#define MODULE_PAX_UDEREF ""
11098 +#endif
11099 +
11100 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11101 +
11102 #endif /* _ASM_X86_MODULE_H */
11103 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11104 index 320f7bb..e89f8f8 100644
11105 --- a/arch/x86/include/asm/page_64_types.h
11106 +++ b/arch/x86/include/asm/page_64_types.h
11107 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11108
11109 /* duplicated to the one in bootmem.h */
11110 extern unsigned long max_pfn;
11111 -extern unsigned long phys_base;
11112 +extern const unsigned long phys_base;
11113
11114 extern unsigned long __phys_addr(unsigned long);
11115 #define __phys_reloc_hide(x) (x)
11116 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11117 index 6cbbabf..11b3aed 100644
11118 --- a/arch/x86/include/asm/paravirt.h
11119 +++ b/arch/x86/include/asm/paravirt.h
11120 @@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11121 val);
11122 }
11123
11124 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11125 +{
11126 + pgdval_t val = native_pgd_val(pgd);
11127 +
11128 + if (sizeof(pgdval_t) > sizeof(long))
11129 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11130 + val, (u64)val >> 32);
11131 + else
11132 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11133 + val);
11134 +}
11135 +
11136 static inline void pgd_clear(pgd_t *pgdp)
11137 {
11138 set_pgd(pgdp, __pgd(0));
11139 @@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11140 pv_mmu_ops.set_fixmap(idx, phys, flags);
11141 }
11142
11143 +#ifdef CONFIG_PAX_KERNEXEC
11144 +static inline unsigned long pax_open_kernel(void)
11145 +{
11146 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11147 +}
11148 +
11149 +static inline unsigned long pax_close_kernel(void)
11150 +{
11151 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11152 +}
11153 +#else
11154 +static inline unsigned long pax_open_kernel(void) { return 0; }
11155 +static inline unsigned long pax_close_kernel(void) { return 0; }
11156 +#endif
11157 +
11158 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11159
11160 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11161 @@ -965,7 +992,7 @@ extern void default_banner(void);
11162
11163 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11164 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11165 -#define PARA_INDIRECT(addr) *%cs:addr
11166 +#define PARA_INDIRECT(addr) *%ss:addr
11167 #endif
11168
11169 #define INTERRUPT_RETURN \
11170 @@ -1040,6 +1067,21 @@ extern void default_banner(void);
11171 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11172 CLBR_NONE, \
11173 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11174 +
11175 +#define GET_CR0_INTO_RDI \
11176 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11177 + mov %rax,%rdi
11178 +
11179 +#define SET_RDI_INTO_CR0 \
11180 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11181 +
11182 +#define GET_CR3_INTO_RDI \
11183 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11184 + mov %rax,%rdi
11185 +
11186 +#define SET_RDI_INTO_CR3 \
11187 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11188 +
11189 #endif /* CONFIG_X86_32 */
11190
11191 #endif /* __ASSEMBLY__ */
11192 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11193 index 8e8b9a4..f07d725 100644
11194 --- a/arch/x86/include/asm/paravirt_types.h
11195 +++ b/arch/x86/include/asm/paravirt_types.h
11196 @@ -84,20 +84,20 @@ struct pv_init_ops {
11197 */
11198 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11199 unsigned long addr, unsigned len);
11200 -};
11201 +} __no_const;
11202
11203
11204 struct pv_lazy_ops {
11205 /* Set deferred update mode, used for batching operations. */
11206 void (*enter)(void);
11207 void (*leave)(void);
11208 -};
11209 +} __no_const;
11210
11211 struct pv_time_ops {
11212 unsigned long long (*sched_clock)(void);
11213 unsigned long long (*steal_clock)(int cpu);
11214 unsigned long (*get_tsc_khz)(void);
11215 -};
11216 +} __no_const;
11217
11218 struct pv_cpu_ops {
11219 /* hooks for various privileged instructions */
11220 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
11221
11222 void (*start_context_switch)(struct task_struct *prev);
11223 void (*end_context_switch)(struct task_struct *next);
11224 -};
11225 +} __no_const;
11226
11227 struct pv_irq_ops {
11228 /*
11229 @@ -224,7 +224,7 @@ struct pv_apic_ops {
11230 unsigned long start_eip,
11231 unsigned long start_esp);
11232 #endif
11233 -};
11234 +} __no_const;
11235
11236 struct pv_mmu_ops {
11237 unsigned long (*read_cr2)(void);
11238 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
11239 struct paravirt_callee_save make_pud;
11240
11241 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11242 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11243 #endif /* PAGETABLE_LEVELS == 4 */
11244 #endif /* PAGETABLE_LEVELS >= 3 */
11245
11246 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
11247 an mfn. We can tell which is which from the index. */
11248 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11249 phys_addr_t phys, pgprot_t flags);
11250 +
11251 +#ifdef CONFIG_PAX_KERNEXEC
11252 + unsigned long (*pax_open_kernel)(void);
11253 + unsigned long (*pax_close_kernel)(void);
11254 +#endif
11255 +
11256 };
11257
11258 struct arch_spinlock;
11259 @@ -334,7 +341,7 @@ struct pv_lock_ops {
11260 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11261 int (*spin_trylock)(struct arch_spinlock *lock);
11262 void (*spin_unlock)(struct arch_spinlock *lock);
11263 -};
11264 +} __no_const;
11265
11266 /* This contains all the paravirt structures: we get a convenient
11267 * number for each function using the offset which we use to indicate
11268 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11269 index b4389a4..7024269 100644
11270 --- a/arch/x86/include/asm/pgalloc.h
11271 +++ b/arch/x86/include/asm/pgalloc.h
11272 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11273 pmd_t *pmd, pte_t *pte)
11274 {
11275 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11276 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11277 +}
11278 +
11279 +static inline void pmd_populate_user(struct mm_struct *mm,
11280 + pmd_t *pmd, pte_t *pte)
11281 +{
11282 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11283 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11284 }
11285
11286 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11287
11288 #ifdef CONFIG_X86_PAE
11289 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11290 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11291 +{
11292 + pud_populate(mm, pudp, pmd);
11293 +}
11294 #else /* !CONFIG_X86_PAE */
11295 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11296 {
11297 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11298 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11299 }
11300 +
11301 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11302 +{
11303 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11304 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11305 +}
11306 #endif /* CONFIG_X86_PAE */
11307
11308 #if PAGETABLE_LEVELS > 3
11309 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11310 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11311 }
11312
11313 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11314 +{
11315 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11316 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11317 +}
11318 +
11319 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11320 {
11321 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11322 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11323 index 98391db..8f6984e 100644
11324 --- a/arch/x86/include/asm/pgtable-2level.h
11325 +++ b/arch/x86/include/asm/pgtable-2level.h
11326 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11327
11328 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11329 {
11330 + pax_open_kernel();
11331 *pmdp = pmd;
11332 + pax_close_kernel();
11333 }
11334
11335 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11336 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11337 index cb00ccc..17e9054 100644
11338 --- a/arch/x86/include/asm/pgtable-3level.h
11339 +++ b/arch/x86/include/asm/pgtable-3level.h
11340 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11341
11342 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11343 {
11344 + pax_open_kernel();
11345 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11346 + pax_close_kernel();
11347 }
11348
11349 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11350 {
11351 + pax_open_kernel();
11352 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11353 + pax_close_kernel();
11354 }
11355
11356 /*
11357 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11358 index 49afb3f..91a8c63 100644
11359 --- a/arch/x86/include/asm/pgtable.h
11360 +++ b/arch/x86/include/asm/pgtable.h
11361 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11362
11363 #ifndef __PAGETABLE_PUD_FOLDED
11364 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11365 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11366 #define pgd_clear(pgd) native_pgd_clear(pgd)
11367 #endif
11368
11369 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11370
11371 #define arch_end_context_switch(prev) do {} while(0)
11372
11373 +#define pax_open_kernel() native_pax_open_kernel()
11374 +#define pax_close_kernel() native_pax_close_kernel()
11375 #endif /* CONFIG_PARAVIRT */
11376
11377 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11378 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11379 +
11380 +#ifdef CONFIG_PAX_KERNEXEC
11381 +static inline unsigned long native_pax_open_kernel(void)
11382 +{
11383 + unsigned long cr0;
11384 +
11385 + preempt_disable();
11386 + barrier();
11387 + cr0 = read_cr0() ^ X86_CR0_WP;
11388 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11389 + write_cr0(cr0);
11390 + return cr0 ^ X86_CR0_WP;
11391 +}
11392 +
11393 +static inline unsigned long native_pax_close_kernel(void)
11394 +{
11395 + unsigned long cr0;
11396 +
11397 + cr0 = read_cr0() ^ X86_CR0_WP;
11398 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11399 + write_cr0(cr0);
11400 + barrier();
11401 + preempt_enable_no_resched();
11402 + return cr0 ^ X86_CR0_WP;
11403 +}
11404 +#else
11405 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11406 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11407 +#endif
11408 +
11409 /*
11410 * The following only work if pte_present() is true.
11411 * Undefined behaviour if not..
11412 */
11413 +static inline int pte_user(pte_t pte)
11414 +{
11415 + return pte_val(pte) & _PAGE_USER;
11416 +}
11417 +
11418 static inline int pte_dirty(pte_t pte)
11419 {
11420 return pte_flags(pte) & _PAGE_DIRTY;
11421 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11422 return pte_clear_flags(pte, _PAGE_RW);
11423 }
11424
11425 +static inline pte_t pte_mkread(pte_t pte)
11426 +{
11427 + return __pte(pte_val(pte) | _PAGE_USER);
11428 +}
11429 +
11430 static inline pte_t pte_mkexec(pte_t pte)
11431 {
11432 - return pte_clear_flags(pte, _PAGE_NX);
11433 +#ifdef CONFIG_X86_PAE
11434 + if (__supported_pte_mask & _PAGE_NX)
11435 + return pte_clear_flags(pte, _PAGE_NX);
11436 + else
11437 +#endif
11438 + return pte_set_flags(pte, _PAGE_USER);
11439 +}
11440 +
11441 +static inline pte_t pte_exprotect(pte_t pte)
11442 +{
11443 +#ifdef CONFIG_X86_PAE
11444 + if (__supported_pte_mask & _PAGE_NX)
11445 + return pte_set_flags(pte, _PAGE_NX);
11446 + else
11447 +#endif
11448 + return pte_clear_flags(pte, _PAGE_USER);
11449 }
11450
11451 static inline pte_t pte_mkdirty(pte_t pte)
11452 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11453 #endif
11454
11455 #ifndef __ASSEMBLY__
11456 +
11457 +#ifdef CONFIG_PAX_PER_CPU_PGD
11458 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11459 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11460 +{
11461 + return cpu_pgd[cpu];
11462 +}
11463 +#endif
11464 +
11465 #include <linux/mm_types.h>
11466
11467 static inline int pte_none(pte_t pte)
11468 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11469
11470 static inline int pgd_bad(pgd_t pgd)
11471 {
11472 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11473 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11474 }
11475
11476 static inline int pgd_none(pgd_t pgd)
11477 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11478 * pgd_offset() returns a (pgd_t *)
11479 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11480 */
11481 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11482 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11483 +
11484 +#ifdef CONFIG_PAX_PER_CPU_PGD
11485 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11486 +#endif
11487 +
11488 /*
11489 * a shortcut which implies the use of the kernel's pgd, instead
11490 * of a process's
11491 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11492 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11493 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11494
11495 +#ifdef CONFIG_X86_32
11496 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11497 +#else
11498 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11499 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11500 +
11501 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11502 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11503 +#else
11504 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11505 +#endif
11506 +
11507 +#endif
11508 +
11509 #ifndef __ASSEMBLY__
11510
11511 extern int direct_gbpages;
11512 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11513 * dst and src can be on the same page, but the range must not overlap,
11514 * and must not cross a page boundary.
11515 */
11516 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11517 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11518 {
11519 - memcpy(dst, src, count * sizeof(pgd_t));
11520 + pax_open_kernel();
11521 + while (count--)
11522 + *dst++ = *src++;
11523 + pax_close_kernel();
11524 }
11525
11526 +#ifdef CONFIG_PAX_PER_CPU_PGD
11527 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11528 +#endif
11529 +
11530 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11531 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11532 +#else
11533 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11534 +#endif
11535
11536 #include <asm-generic/pgtable.h>
11537 #endif /* __ASSEMBLY__ */
11538 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11539 index 0c92113..34a77c6 100644
11540 --- a/arch/x86/include/asm/pgtable_32.h
11541 +++ b/arch/x86/include/asm/pgtable_32.h
11542 @@ -25,9 +25,6 @@
11543 struct mm_struct;
11544 struct vm_area_struct;
11545
11546 -extern pgd_t swapper_pg_dir[1024];
11547 -extern pgd_t initial_page_table[1024];
11548 -
11549 static inline void pgtable_cache_init(void) { }
11550 static inline void check_pgt_cache(void) { }
11551 void paging_init(void);
11552 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11553 # include <asm/pgtable-2level.h>
11554 #endif
11555
11556 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11557 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11558 +#ifdef CONFIG_X86_PAE
11559 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11560 +#endif
11561 +
11562 #if defined(CONFIG_HIGHPTE)
11563 #define pte_offset_map(dir, address) \
11564 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11565 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11566 /* Clear a kernel PTE and flush it from the TLB */
11567 #define kpte_clear_flush(ptep, vaddr) \
11568 do { \
11569 + pax_open_kernel(); \
11570 pte_clear(&init_mm, (vaddr), (ptep)); \
11571 + pax_close_kernel(); \
11572 __flush_tlb_one((vaddr)); \
11573 } while (0)
11574
11575 @@ -74,6 +79,9 @@ do { \
11576
11577 #endif /* !__ASSEMBLY__ */
11578
11579 +#define HAVE_ARCH_UNMAPPED_AREA
11580 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11581 +
11582 /*
11583 * kern_addr_valid() is (1) for FLATMEM and (0) for
11584 * SPARSEMEM and DISCONTIGMEM
11585 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11586 index ed5903b..c7fe163 100644
11587 --- a/arch/x86/include/asm/pgtable_32_types.h
11588 +++ b/arch/x86/include/asm/pgtable_32_types.h
11589 @@ -8,7 +8,7 @@
11590 */
11591 #ifdef CONFIG_X86_PAE
11592 # include <asm/pgtable-3level_types.h>
11593 -# define PMD_SIZE (1UL << PMD_SHIFT)
11594 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11595 # define PMD_MASK (~(PMD_SIZE - 1))
11596 #else
11597 # include <asm/pgtable-2level_types.h>
11598 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11599 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11600 #endif
11601
11602 +#ifdef CONFIG_PAX_KERNEXEC
11603 +#ifndef __ASSEMBLY__
11604 +extern unsigned char MODULES_EXEC_VADDR[];
11605 +extern unsigned char MODULES_EXEC_END[];
11606 +#endif
11607 +#include <asm/boot.h>
11608 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11609 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11610 +#else
11611 +#define ktla_ktva(addr) (addr)
11612 +#define ktva_ktla(addr) (addr)
11613 +#endif
11614 +
11615 #define MODULES_VADDR VMALLOC_START
11616 #define MODULES_END VMALLOC_END
11617 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11618 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11619 index 975f709..9f779c9 100644
11620 --- a/arch/x86/include/asm/pgtable_64.h
11621 +++ b/arch/x86/include/asm/pgtable_64.h
11622 @@ -16,10 +16,14 @@
11623
11624 extern pud_t level3_kernel_pgt[512];
11625 extern pud_t level3_ident_pgt[512];
11626 +extern pud_t level3_vmalloc_start_pgt[512];
11627 +extern pud_t level3_vmalloc_end_pgt[512];
11628 +extern pud_t level3_vmemmap_pgt[512];
11629 +extern pud_t level2_vmemmap_pgt[512];
11630 extern pmd_t level2_kernel_pgt[512];
11631 extern pmd_t level2_fixmap_pgt[512];
11632 -extern pmd_t level2_ident_pgt[512];
11633 -extern pgd_t init_level4_pgt[];
11634 +extern pmd_t level2_ident_pgt[512*2];
11635 +extern pgd_t init_level4_pgt[512];
11636
11637 #define swapper_pg_dir init_level4_pgt
11638
11639 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11640
11641 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11642 {
11643 + pax_open_kernel();
11644 *pmdp = pmd;
11645 + pax_close_kernel();
11646 }
11647
11648 static inline void native_pmd_clear(pmd_t *pmd)
11649 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11650
11651 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11652 {
11653 + pax_open_kernel();
11654 *pudp = pud;
11655 + pax_close_kernel();
11656 }
11657
11658 static inline void native_pud_clear(pud_t *pud)
11659 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11660
11661 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11662 {
11663 + pax_open_kernel();
11664 + *pgdp = pgd;
11665 + pax_close_kernel();
11666 +}
11667 +
11668 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11669 +{
11670 *pgdp = pgd;
11671 }
11672
11673 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11674 index 766ea16..5b96cb3 100644
11675 --- a/arch/x86/include/asm/pgtable_64_types.h
11676 +++ b/arch/x86/include/asm/pgtable_64_types.h
11677 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11678 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11679 #define MODULES_END _AC(0xffffffffff000000, UL)
11680 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11681 +#define MODULES_EXEC_VADDR MODULES_VADDR
11682 +#define MODULES_EXEC_END MODULES_END
11683 +
11684 +#define ktla_ktva(addr) (addr)
11685 +#define ktva_ktla(addr) (addr)
11686
11687 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11688 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11689 index 013286a..8b42f4f 100644
11690 --- a/arch/x86/include/asm/pgtable_types.h
11691 +++ b/arch/x86/include/asm/pgtable_types.h
11692 @@ -16,13 +16,12 @@
11693 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11694 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11695 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11696 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11697 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11698 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11699 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11700 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11701 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11702 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11703 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11704 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11705 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11706 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11707
11708 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11709 @@ -40,7 +39,6 @@
11710 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11711 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11712 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11713 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11714 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11715 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11716 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11717 @@ -57,8 +55,10 @@
11718
11719 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11720 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11721 -#else
11722 +#elif defined(CONFIG_KMEMCHECK)
11723 #define _PAGE_NX (_AT(pteval_t, 0))
11724 +#else
11725 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11726 #endif
11727
11728 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11729 @@ -96,6 +96,9 @@
11730 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11731 _PAGE_ACCESSED)
11732
11733 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11734 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11735 +
11736 #define __PAGE_KERNEL_EXEC \
11737 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11738 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11739 @@ -106,7 +109,7 @@
11740 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11741 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11742 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11743 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11744 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11745 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11746 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11747 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11748 @@ -168,8 +171,8 @@
11749 * bits are combined, this will alow user to access the high address mapped
11750 * VDSO in the presence of CONFIG_COMPAT_VDSO
11751 */
11752 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11753 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11754 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11755 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11756 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11757 #endif
11758
11759 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11760 {
11761 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11762 }
11763 +#endif
11764
11765 +#if PAGETABLE_LEVELS == 3
11766 +#include <asm-generic/pgtable-nopud.h>
11767 +#endif
11768 +
11769 +#if PAGETABLE_LEVELS == 2
11770 +#include <asm-generic/pgtable-nopmd.h>
11771 +#endif
11772 +
11773 +#ifndef __ASSEMBLY__
11774 #if PAGETABLE_LEVELS > 3
11775 typedef struct { pudval_t pud; } pud_t;
11776
11777 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11778 return pud.pud;
11779 }
11780 #else
11781 -#include <asm-generic/pgtable-nopud.h>
11782 -
11783 static inline pudval_t native_pud_val(pud_t pud)
11784 {
11785 return native_pgd_val(pud.pgd);
11786 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11787 return pmd.pmd;
11788 }
11789 #else
11790 -#include <asm-generic/pgtable-nopmd.h>
11791 -
11792 static inline pmdval_t native_pmd_val(pmd_t pmd)
11793 {
11794 return native_pgd_val(pmd.pud.pgd);
11795 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11796
11797 extern pteval_t __supported_pte_mask;
11798 extern void set_nx(void);
11799 -extern int nx_enabled;
11800
11801 #define pgprot_writecombine pgprot_writecombine
11802 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11803 diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
11804 index f8ab3ea..67889db 100644
11805 --- a/arch/x86/include/asm/processor-flags.h
11806 +++ b/arch/x86/include/asm/processor-flags.h
11807 @@ -63,6 +63,7 @@
11808 #define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
11809 #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
11810 #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
11811 +#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
11812
11813 /*
11814 * x86-64 Task Priority Register, CR8
11815 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11816 index 39bc577..538233f 100644
11817 --- a/arch/x86/include/asm/processor.h
11818 +++ b/arch/x86/include/asm/processor.h
11819 @@ -276,7 +276,7 @@ struct tss_struct {
11820
11821 } ____cacheline_aligned;
11822
11823 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11824 +extern struct tss_struct init_tss[NR_CPUS];
11825
11826 /*
11827 * Save the original ist values for checking stack pointers during debugging
11828 @@ -809,11 +809,18 @@ static inline void spin_lock_prefetch(const void *x)
11829 */
11830 #define TASK_SIZE PAGE_OFFSET
11831 #define TASK_SIZE_MAX TASK_SIZE
11832 +
11833 +#ifdef CONFIG_PAX_SEGMEXEC
11834 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11835 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11836 +#else
11837 #define STACK_TOP TASK_SIZE
11838 -#define STACK_TOP_MAX STACK_TOP
11839 +#endif
11840 +
11841 +#define STACK_TOP_MAX TASK_SIZE
11842
11843 #define INIT_THREAD { \
11844 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11845 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11846 .vm86_info = NULL, \
11847 .sysenter_cs = __KERNEL_CS, \
11848 .io_bitmap_ptr = NULL, \
11849 @@ -827,7 +834,7 @@ static inline void spin_lock_prefetch(const void *x)
11850 */
11851 #define INIT_TSS { \
11852 .x86_tss = { \
11853 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11854 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11855 .ss0 = __KERNEL_DS, \
11856 .ss1 = __KERNEL_CS, \
11857 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11858 @@ -838,11 +845,7 @@ static inline void spin_lock_prefetch(const void *x)
11859 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11860
11861 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11862 -#define KSTK_TOP(info) \
11863 -({ \
11864 - unsigned long *__ptr = (unsigned long *)(info); \
11865 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11866 -})
11867 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11868
11869 /*
11870 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11871 @@ -857,7 +860,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11872 #define task_pt_regs(task) \
11873 ({ \
11874 struct pt_regs *__regs__; \
11875 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11876 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11877 __regs__ - 1; \
11878 })
11879
11880 @@ -867,13 +870,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11881 /*
11882 * User space process size. 47bits minus one guard page.
11883 */
11884 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11885 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11886
11887 /* This decides where the kernel will search for a free chunk of vm
11888 * space during mmap's.
11889 */
11890 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11891 - 0xc0000000 : 0xFFFFe000)
11892 + 0xc0000000 : 0xFFFFf000)
11893
11894 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11895 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11896 @@ -884,11 +887,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11897 #define STACK_TOP_MAX TASK_SIZE_MAX
11898
11899 #define INIT_THREAD { \
11900 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11901 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11902 }
11903
11904 #define INIT_TSS { \
11905 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11906 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11907 }
11908
11909 /*
11910 @@ -916,6 +919,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11911 */
11912 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11913
11914 +#ifdef CONFIG_PAX_SEGMEXEC
11915 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11916 +#endif
11917 +
11918 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11919
11920 /* Get/set a process' ability to use the timestamp counter instruction */
11921 @@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11922 #define cpu_has_amd_erratum(x) (false)
11923 #endif /* CONFIG_CPU_SUP_AMD */
11924
11925 -extern unsigned long arch_align_stack(unsigned long sp);
11926 +#define arch_align_stack(x) ((x) & ~0xfUL)
11927 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11928
11929 void default_idle(void);
11930 bool set_pm_idle_to_default(void);
11931
11932 -void stop_this_cpu(void *dummy);
11933 +void stop_this_cpu(void *dummy) __noreturn;
11934
11935 #endif /* _ASM_X86_PROCESSOR_H */
11936 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11937 index dcfde52..dbfea06 100644
11938 --- a/arch/x86/include/asm/ptrace.h
11939 +++ b/arch/x86/include/asm/ptrace.h
11940 @@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11941 }
11942
11943 /*
11944 - * user_mode_vm(regs) determines whether a register set came from user mode.
11945 + * user_mode(regs) determines whether a register set came from user mode.
11946 * This is true if V8086 mode was enabled OR if the register set was from
11947 * protected mode with RPL-3 CS value. This tricky test checks that with
11948 * one comparison. Many places in the kernel can bypass this full check
11949 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11950 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11951 + * be used.
11952 */
11953 -static inline int user_mode(struct pt_regs *regs)
11954 +static inline int user_mode_novm(struct pt_regs *regs)
11955 {
11956 #ifdef CONFIG_X86_32
11957 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11958 #else
11959 - return !!(regs->cs & 3);
11960 + return !!(regs->cs & SEGMENT_RPL_MASK);
11961 #endif
11962 }
11963
11964 -static inline int user_mode_vm(struct pt_regs *regs)
11965 +static inline int user_mode(struct pt_regs *regs)
11966 {
11967 #ifdef CONFIG_X86_32
11968 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11969 USER_RPL;
11970 #else
11971 - return user_mode(regs);
11972 + return user_mode_novm(regs);
11973 #endif
11974 }
11975
11976 @@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11977 #ifdef CONFIG_X86_64
11978 static inline bool user_64bit_mode(struct pt_regs *regs)
11979 {
11980 + unsigned long cs = regs->cs & 0xffff;
11981 #ifndef CONFIG_PARAVIRT
11982 /*
11983 * On non-paravirt systems, this is the only long mode CPL 3
11984 * selector. We do not allow long mode selectors in the LDT.
11985 */
11986 - return regs->cs == __USER_CS;
11987 + return cs == __USER_CS;
11988 #else
11989 /* Headers are too twisted for this to go in paravirt.h. */
11990 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11991 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11992 #endif
11993 }
11994 #endif
11995 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
11996 index fce3f4a..3f69f2a 100644
11997 --- a/arch/x86/include/asm/realmode.h
11998 +++ b/arch/x86/include/asm/realmode.h
11999 @@ -30,7 +30,7 @@ struct real_mode_header {
12000 struct trampoline_header {
12001 #ifdef CONFIG_X86_32
12002 u32 start;
12003 - u16 gdt_pad;
12004 + u16 boot_cs;
12005 u16 gdt_limit;
12006 u32 gdt_base;
12007 #else
12008 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12009 index 92f29706..d0a1a53 100644
12010 --- a/arch/x86/include/asm/reboot.h
12011 +++ b/arch/x86/include/asm/reboot.h
12012 @@ -6,19 +6,19 @@
12013 struct pt_regs;
12014
12015 struct machine_ops {
12016 - void (*restart)(char *cmd);
12017 - void (*halt)(void);
12018 - void (*power_off)(void);
12019 + void (* __noreturn restart)(char *cmd);
12020 + void (* __noreturn halt)(void);
12021 + void (* __noreturn power_off)(void);
12022 void (*shutdown)(void);
12023 void (*crash_shutdown)(struct pt_regs *);
12024 - void (*emergency_restart)(void);
12025 -};
12026 + void (* __noreturn emergency_restart)(void);
12027 +} __no_const;
12028
12029 extern struct machine_ops machine_ops;
12030
12031 void native_machine_crash_shutdown(struct pt_regs *regs);
12032 void native_machine_shutdown(void);
12033 -void machine_real_restart(unsigned int type);
12034 +void __noreturn machine_real_restart(unsigned int type);
12035 /* These must match dispatch_table in reboot_32.S */
12036 #define MRR_BIOS 0
12037 #define MRR_APM 1
12038 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12039 index 2dbe4a7..ce1db00 100644
12040 --- a/arch/x86/include/asm/rwsem.h
12041 +++ b/arch/x86/include/asm/rwsem.h
12042 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12043 {
12044 asm volatile("# beginning down_read\n\t"
12045 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12046 +
12047 +#ifdef CONFIG_PAX_REFCOUNT
12048 + "jno 0f\n"
12049 + LOCK_PREFIX _ASM_DEC "(%1)\n"
12050 + "int $4\n0:\n"
12051 + _ASM_EXTABLE(0b, 0b)
12052 +#endif
12053 +
12054 /* adds 0x00000001 */
12055 " jns 1f\n"
12056 " call call_rwsem_down_read_failed\n"
12057 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12058 "1:\n\t"
12059 " mov %1,%2\n\t"
12060 " add %3,%2\n\t"
12061 +
12062 +#ifdef CONFIG_PAX_REFCOUNT
12063 + "jno 0f\n"
12064 + "sub %3,%2\n"
12065 + "int $4\n0:\n"
12066 + _ASM_EXTABLE(0b, 0b)
12067 +#endif
12068 +
12069 " jle 2f\n\t"
12070 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12071 " jnz 1b\n\t"
12072 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12073 long tmp;
12074 asm volatile("# beginning down_write\n\t"
12075 LOCK_PREFIX " xadd %1,(%2)\n\t"
12076 +
12077 +#ifdef CONFIG_PAX_REFCOUNT
12078 + "jno 0f\n"
12079 + "mov %1,(%2)\n"
12080 + "int $4\n0:\n"
12081 + _ASM_EXTABLE(0b, 0b)
12082 +#endif
12083 +
12084 /* adds 0xffff0001, returns the old value */
12085 " test %1,%1\n\t"
12086 /* was the count 0 before? */
12087 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12088 long tmp;
12089 asm volatile("# beginning __up_read\n\t"
12090 LOCK_PREFIX " xadd %1,(%2)\n\t"
12091 +
12092 +#ifdef CONFIG_PAX_REFCOUNT
12093 + "jno 0f\n"
12094 + "mov %1,(%2)\n"
12095 + "int $4\n0:\n"
12096 + _ASM_EXTABLE(0b, 0b)
12097 +#endif
12098 +
12099 /* subtracts 1, returns the old value */
12100 " jns 1f\n\t"
12101 " call call_rwsem_wake\n" /* expects old value in %edx */
12102 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12103 long tmp;
12104 asm volatile("# beginning __up_write\n\t"
12105 LOCK_PREFIX " xadd %1,(%2)\n\t"
12106 +
12107 +#ifdef CONFIG_PAX_REFCOUNT
12108 + "jno 0f\n"
12109 + "mov %1,(%2)\n"
12110 + "int $4\n0:\n"
12111 + _ASM_EXTABLE(0b, 0b)
12112 +#endif
12113 +
12114 /* subtracts 0xffff0001, returns the old value */
12115 " jns 1f\n\t"
12116 " call call_rwsem_wake\n" /* expects old value in %edx */
12117 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12118 {
12119 asm volatile("# beginning __downgrade_write\n\t"
12120 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12121 +
12122 +#ifdef CONFIG_PAX_REFCOUNT
12123 + "jno 0f\n"
12124 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12125 + "int $4\n0:\n"
12126 + _ASM_EXTABLE(0b, 0b)
12127 +#endif
12128 +
12129 /*
12130 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12131 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12132 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12133 */
12134 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12135 {
12136 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12137 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12138 +
12139 +#ifdef CONFIG_PAX_REFCOUNT
12140 + "jno 0f\n"
12141 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12142 + "int $4\n0:\n"
12143 + _ASM_EXTABLE(0b, 0b)
12144 +#endif
12145 +
12146 : "+m" (sem->count)
12147 : "er" (delta));
12148 }
12149 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12150 */
12151 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12152 {
12153 - return delta + xadd(&sem->count, delta);
12154 + return delta + xadd_check_overflow(&sem->count, delta);
12155 }
12156
12157 #endif /* __KERNEL__ */
12158 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12159 index c48a950..c6d7468 100644
12160 --- a/arch/x86/include/asm/segment.h
12161 +++ b/arch/x86/include/asm/segment.h
12162 @@ -64,10 +64,15 @@
12163 * 26 - ESPFIX small SS
12164 * 27 - per-cpu [ offset to per-cpu data area ]
12165 * 28 - stack_canary-20 [ for stack protector ]
12166 - * 29 - unused
12167 - * 30 - unused
12168 + * 29 - PCI BIOS CS
12169 + * 30 - PCI BIOS DS
12170 * 31 - TSS for double fault handler
12171 */
12172 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12173 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12174 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12175 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12176 +
12177 #define GDT_ENTRY_TLS_MIN 6
12178 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12179
12180 @@ -79,6 +84,8 @@
12181
12182 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12183
12184 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12185 +
12186 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12187
12188 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12189 @@ -104,6 +111,12 @@
12190 #define __KERNEL_STACK_CANARY 0
12191 #endif
12192
12193 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12194 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12195 +
12196 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12197 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12198 +
12199 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12200
12201 /*
12202 @@ -141,7 +154,7 @@
12203 */
12204
12205 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12206 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12207 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12208
12209
12210 #else
12211 @@ -165,6 +178,8 @@
12212 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12213 #define __USER32_DS __USER_DS
12214
12215 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12216 +
12217 #define GDT_ENTRY_TSS 8 /* needs two entries */
12218 #define GDT_ENTRY_LDT 10 /* needs two entries */
12219 #define GDT_ENTRY_TLS_MIN 12
12220 @@ -185,6 +200,7 @@
12221 #endif
12222
12223 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12224 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12225 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12226 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12227 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12228 @@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
12229 {
12230 unsigned long __limit;
12231 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12232 - return __limit + 1;
12233 + return __limit;
12234 }
12235
12236 #endif /* !__ASSEMBLY__ */
12237 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12238 index f483945..64a7851 100644
12239 --- a/arch/x86/include/asm/smp.h
12240 +++ b/arch/x86/include/asm/smp.h
12241 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12242 /* cpus sharing the last level cache: */
12243 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12244 DECLARE_PER_CPU(u16, cpu_llc_id);
12245 -DECLARE_PER_CPU(int, cpu_number);
12246 +DECLARE_PER_CPU(unsigned int, cpu_number);
12247
12248 static inline struct cpumask *cpu_sibling_mask(int cpu)
12249 {
12250 @@ -79,7 +79,7 @@ struct smp_ops {
12251
12252 void (*send_call_func_ipi)(const struct cpumask *mask);
12253 void (*send_call_func_single_ipi)(int cpu);
12254 -};
12255 +} __no_const;
12256
12257 /* Globals due to paravirt */
12258 extern void set_cpu_sibling_map(int cpu);
12259 @@ -195,14 +195,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12260 extern int safe_smp_processor_id(void);
12261
12262 #elif defined(CONFIG_X86_64_SMP)
12263 -#define raw_smp_processor_id() (this_cpu_read(cpu_number))
12264 -
12265 -#define stack_smp_processor_id() \
12266 -({ \
12267 - struct thread_info *ti; \
12268 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12269 - ti->cpu; \
12270 -})
12271 +#define raw_smp_processor_id() (this_cpu_read(cpu_number))
12272 +#define stack_smp_processor_id() raw_smp_processor_id()
12273 #define safe_smp_processor_id() smp_processor_id()
12274
12275 #endif
12276 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12277 index b315a33..8849ab0 100644
12278 --- a/arch/x86/include/asm/spinlock.h
12279 +++ b/arch/x86/include/asm/spinlock.h
12280 @@ -173,6 +173,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12281 static inline void arch_read_lock(arch_rwlock_t *rw)
12282 {
12283 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12284 +
12285 +#ifdef CONFIG_PAX_REFCOUNT
12286 + "jno 0f\n"
12287 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12288 + "int $4\n0:\n"
12289 + _ASM_EXTABLE(0b, 0b)
12290 +#endif
12291 +
12292 "jns 1f\n"
12293 "call __read_lock_failed\n\t"
12294 "1:\n"
12295 @@ -182,6 +190,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12296 static inline void arch_write_lock(arch_rwlock_t *rw)
12297 {
12298 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12299 +
12300 +#ifdef CONFIG_PAX_REFCOUNT
12301 + "jno 0f\n"
12302 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12303 + "int $4\n0:\n"
12304 + _ASM_EXTABLE(0b, 0b)
12305 +#endif
12306 +
12307 "jz 1f\n"
12308 "call __write_lock_failed\n\t"
12309 "1:\n"
12310 @@ -211,13 +227,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12311
12312 static inline void arch_read_unlock(arch_rwlock_t *rw)
12313 {
12314 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12315 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12316 +
12317 +#ifdef CONFIG_PAX_REFCOUNT
12318 + "jno 0f\n"
12319 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12320 + "int $4\n0:\n"
12321 + _ASM_EXTABLE(0b, 0b)
12322 +#endif
12323 +
12324 :"+m" (rw->lock) : : "memory");
12325 }
12326
12327 static inline void arch_write_unlock(arch_rwlock_t *rw)
12328 {
12329 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12330 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12331 +
12332 +#ifdef CONFIG_PAX_REFCOUNT
12333 + "jno 0f\n"
12334 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12335 + "int $4\n0:\n"
12336 + _ASM_EXTABLE(0b, 0b)
12337 +#endif
12338 +
12339 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12340 }
12341
12342 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12343 index 6a99859..03cb807 100644
12344 --- a/arch/x86/include/asm/stackprotector.h
12345 +++ b/arch/x86/include/asm/stackprotector.h
12346 @@ -47,7 +47,7 @@
12347 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12348 */
12349 #define GDT_STACK_CANARY_INIT \
12350 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12351 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12352
12353 /*
12354 * Initialize the stackprotector canary value.
12355 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12356
12357 static inline void load_stack_canary_segment(void)
12358 {
12359 -#ifdef CONFIG_X86_32
12360 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12361 asm volatile ("mov %0, %%gs" : : "r" (0));
12362 #endif
12363 }
12364 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12365 index 70bbe39..4ae2bd4 100644
12366 --- a/arch/x86/include/asm/stacktrace.h
12367 +++ b/arch/x86/include/asm/stacktrace.h
12368 @@ -11,28 +11,20 @@
12369
12370 extern int kstack_depth_to_print;
12371
12372 -struct thread_info;
12373 +struct task_struct;
12374 struct stacktrace_ops;
12375
12376 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12377 - unsigned long *stack,
12378 - unsigned long bp,
12379 - const struct stacktrace_ops *ops,
12380 - void *data,
12381 - unsigned long *end,
12382 - int *graph);
12383 +typedef unsigned long walk_stack_t(struct task_struct *task,
12384 + void *stack_start,
12385 + unsigned long *stack,
12386 + unsigned long bp,
12387 + const struct stacktrace_ops *ops,
12388 + void *data,
12389 + unsigned long *end,
12390 + int *graph);
12391
12392 -extern unsigned long
12393 -print_context_stack(struct thread_info *tinfo,
12394 - unsigned long *stack, unsigned long bp,
12395 - const struct stacktrace_ops *ops, void *data,
12396 - unsigned long *end, int *graph);
12397 -
12398 -extern unsigned long
12399 -print_context_stack_bp(struct thread_info *tinfo,
12400 - unsigned long *stack, unsigned long bp,
12401 - const struct stacktrace_ops *ops, void *data,
12402 - unsigned long *end, int *graph);
12403 +extern walk_stack_t print_context_stack;
12404 +extern walk_stack_t print_context_stack_bp;
12405
12406 /* Generic stack tracer with callbacks */
12407
12408 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12409 void (*address)(void *data, unsigned long address, int reliable);
12410 /* On negative return stop dumping */
12411 int (*stack)(void *data, char *name);
12412 - walk_stack_t walk_stack;
12413 + walk_stack_t *walk_stack;
12414 };
12415
12416 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12417 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12418 index 4ec45b3..a4f0a8a 100644
12419 --- a/arch/x86/include/asm/switch_to.h
12420 +++ b/arch/x86/include/asm/switch_to.h
12421 @@ -108,7 +108,7 @@ do { \
12422 "call __switch_to\n\t" \
12423 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12424 __switch_canary \
12425 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12426 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12427 "movq %%rax,%%rdi\n\t" \
12428 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12429 "jnz ret_from_fork\n\t" \
12430 @@ -119,7 +119,7 @@ do { \
12431 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12432 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12433 [_tif_fork] "i" (_TIF_FORK), \
12434 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12435 + [thread_info] "m" (current_tinfo), \
12436 [current_task] "m" (current_task) \
12437 __switch_canary_iparam \
12438 : "memory", "cc" __EXTRA_CLOBBER)
12439 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12440 index 3fda9db4..4ca1c61 100644
12441 --- a/arch/x86/include/asm/sys_ia32.h
12442 +++ b/arch/x86/include/asm/sys_ia32.h
12443 @@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12444 struct old_sigaction32 __user *);
12445 asmlinkage long sys32_alarm(unsigned int);
12446
12447 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12448 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12449 asmlinkage long sys32_sysfs(int, u32, u32);
12450
12451 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12452 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12453 index 89f794f..1422765 100644
12454 --- a/arch/x86/include/asm/thread_info.h
12455 +++ b/arch/x86/include/asm/thread_info.h
12456 @@ -10,6 +10,7 @@
12457 #include <linux/compiler.h>
12458 #include <asm/page.h>
12459 #include <asm/types.h>
12460 +#include <asm/percpu.h>
12461
12462 /*
12463 * low level task data that entry.S needs immediate access to
12464 @@ -24,7 +25,6 @@ struct exec_domain;
12465 #include <linux/atomic.h>
12466
12467 struct thread_info {
12468 - struct task_struct *task; /* main task structure */
12469 struct exec_domain *exec_domain; /* execution domain */
12470 __u32 flags; /* low level flags */
12471 __u32 status; /* thread synchronous flags */
12472 @@ -34,19 +34,13 @@ struct thread_info {
12473 mm_segment_t addr_limit;
12474 struct restart_block restart_block;
12475 void __user *sysenter_return;
12476 -#ifdef CONFIG_X86_32
12477 - unsigned long previous_esp; /* ESP of the previous stack in
12478 - case of nested (IRQ) stacks
12479 - */
12480 - __u8 supervisor_stack[0];
12481 -#endif
12482 + unsigned long lowest_stack;
12483 unsigned int sig_on_uaccess_error:1;
12484 unsigned int uaccess_err:1; /* uaccess failed */
12485 };
12486
12487 -#define INIT_THREAD_INFO(tsk) \
12488 +#define INIT_THREAD_INFO \
12489 { \
12490 - .task = &tsk, \
12491 .exec_domain = &default_exec_domain, \
12492 .flags = 0, \
12493 .cpu = 0, \
12494 @@ -57,7 +51,7 @@ struct thread_info {
12495 }, \
12496 }
12497
12498 -#define init_thread_info (init_thread_union.thread_info)
12499 +#define init_thread_info (init_thread_union.stack)
12500 #define init_stack (init_thread_union.stack)
12501
12502 #else /* !__ASSEMBLY__ */
12503 @@ -98,6 +92,7 @@ struct thread_info {
12504 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12505 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12506 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12507 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12508
12509 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12510 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12511 @@ -122,16 +117,18 @@ struct thread_info {
12512 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12513 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12514 #define _TIF_X32 (1 << TIF_X32)
12515 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12516
12517 /* work to do in syscall_trace_enter() */
12518 #define _TIF_WORK_SYSCALL_ENTRY \
12519 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12520 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12521 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12522 + _TIF_GRSEC_SETXID)
12523
12524 /* work to do in syscall_trace_leave() */
12525 #define _TIF_WORK_SYSCALL_EXIT \
12526 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12527 - _TIF_SYSCALL_TRACEPOINT)
12528 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12529
12530 /* work to do on interrupt/exception return */
12531 #define _TIF_WORK_MASK \
12532 @@ -141,7 +138,8 @@ struct thread_info {
12533
12534 /* work to do on any return to user space */
12535 #define _TIF_ALLWORK_MASK \
12536 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12537 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12538 + _TIF_GRSEC_SETXID)
12539
12540 /* Only used for 64 bit */
12541 #define _TIF_DO_NOTIFY_MASK \
12542 @@ -157,45 +155,40 @@ struct thread_info {
12543
12544 #define PREEMPT_ACTIVE 0x10000000
12545
12546 -#ifdef CONFIG_X86_32
12547 -
12548 -#define STACK_WARN (THREAD_SIZE/8)
12549 -/*
12550 - * macros/functions for gaining access to the thread information structure
12551 - *
12552 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12553 - */
12554 -#ifndef __ASSEMBLY__
12555 -
12556 -
12557 -/* how to get the current stack pointer from C */
12558 -register unsigned long current_stack_pointer asm("esp") __used;
12559 -
12560 -/* how to get the thread information struct from C */
12561 -static inline struct thread_info *current_thread_info(void)
12562 -{
12563 - return (struct thread_info *)
12564 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12565 -}
12566 -
12567 -#else /* !__ASSEMBLY__ */
12568 -
12569 +#ifdef __ASSEMBLY__
12570 /* how to get the thread information struct from ASM */
12571 #define GET_THREAD_INFO(reg) \
12572 - movl $-THREAD_SIZE, reg; \
12573 - andl %esp, reg
12574 + mov PER_CPU_VAR(current_tinfo), reg
12575
12576 /* use this one if reg already contains %esp */
12577 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12578 - andl $-THREAD_SIZE, reg
12579 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12580 +#else
12581 +/* how to get the thread information struct from C */
12582 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12583 +
12584 +static __always_inline struct thread_info *current_thread_info(void)
12585 +{
12586 + return this_cpu_read_stable(current_tinfo);
12587 +}
12588 +#endif
12589 +
12590 +#ifdef CONFIG_X86_32
12591 +
12592 +#define STACK_WARN (THREAD_SIZE/8)
12593 +/*
12594 + * macros/functions for gaining access to the thread information structure
12595 + *
12596 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12597 + */
12598 +#ifndef __ASSEMBLY__
12599 +
12600 +/* how to get the current stack pointer from C */
12601 +register unsigned long current_stack_pointer asm("esp") __used;
12602
12603 #endif
12604
12605 #else /* X86_32 */
12606
12607 -#include <asm/percpu.h>
12608 -#define KERNEL_STACK_OFFSET (5*8)
12609 -
12610 /*
12611 * macros/functions for gaining access to the thread information structure
12612 * preempt_count needs to be 1 initially, until the scheduler is functional.
12613 @@ -203,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
12614 #ifndef __ASSEMBLY__
12615 DECLARE_PER_CPU(unsigned long, kernel_stack);
12616
12617 -static inline struct thread_info *current_thread_info(void)
12618 -{
12619 - struct thread_info *ti;
12620 - ti = (void *)(this_cpu_read_stable(kernel_stack) +
12621 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12622 - return ti;
12623 -}
12624 -
12625 -#else /* !__ASSEMBLY__ */
12626 -
12627 -/* how to get the thread information struct from ASM */
12628 -#define GET_THREAD_INFO(reg) \
12629 - movq PER_CPU_VAR(kernel_stack),reg ; \
12630 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12631 -
12632 -/*
12633 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12634 - * a certain register (to be used in assembler memory operands).
12635 - */
12636 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12637 -
12638 +/* how to get the current stack pointer from C */
12639 +register unsigned long current_stack_pointer asm("rsp") __used;
12640 #endif
12641
12642 #endif /* !X86_32 */
12643 @@ -284,5 +258,12 @@ static inline bool is_ia32_task(void)
12644 extern void arch_task_cache_init(void);
12645 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12646 extern void arch_release_task_struct(struct task_struct *tsk);
12647 +
12648 +#define __HAVE_THREAD_FUNCTIONS
12649 +#define task_thread_info(task) (&(task)->tinfo)
12650 +#define task_stack_page(task) ((task)->stack)
12651 +#define setup_thread_stack(p, org) do {} while (0)
12652 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12653 +
12654 #endif
12655 #endif /* _ASM_X86_THREAD_INFO_H */
12656 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12657 index e1f3a17..1ab364d 100644
12658 --- a/arch/x86/include/asm/uaccess.h
12659 +++ b/arch/x86/include/asm/uaccess.h
12660 @@ -7,12 +7,15 @@
12661 #include <linux/compiler.h>
12662 #include <linux/thread_info.h>
12663 #include <linux/string.h>
12664 +#include <linux/sched.h>
12665 #include <asm/asm.h>
12666 #include <asm/page.h>
12667
12668 #define VERIFY_READ 0
12669 #define VERIFY_WRITE 1
12670
12671 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12672 +
12673 /*
12674 * The fs value determines whether argument validity checking should be
12675 * performed or not. If get_fs() == USER_DS, checking is performed, with
12676 @@ -28,7 +31,12 @@
12677
12678 #define get_ds() (KERNEL_DS)
12679 #define get_fs() (current_thread_info()->addr_limit)
12680 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12681 +void __set_fs(mm_segment_t x);
12682 +void set_fs(mm_segment_t x);
12683 +#else
12684 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12685 +#endif
12686
12687 #define segment_eq(a, b) ((a).seg == (b).seg)
12688
12689 @@ -76,8 +84,33 @@
12690 * checks that the pointer is in the user space range - after calling
12691 * this function, memory access functions may still return -EFAULT.
12692 */
12693 -#define access_ok(type, addr, size) \
12694 - (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
12695 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
12696 +#define access_ok(type, addr, size) \
12697 +({ \
12698 + long __size = size; \
12699 + unsigned long __addr = (unsigned long)addr; \
12700 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12701 + unsigned long __end_ao = __addr + __size - 1; \
12702 + bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
12703 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12704 + while(__addr_ao <= __end_ao) { \
12705 + char __c_ao; \
12706 + __addr_ao += PAGE_SIZE; \
12707 + if (__size > PAGE_SIZE) \
12708 + cond_resched(); \
12709 + if (__get_user(__c_ao, (char __user *)__addr)) \
12710 + break; \
12711 + if (type != VERIFY_WRITE) { \
12712 + __addr = __addr_ao; \
12713 + continue; \
12714 + } \
12715 + if (__put_user(__c_ao, (char __user *)__addr)) \
12716 + break; \
12717 + __addr = __addr_ao; \
12718 + } \
12719 + } \
12720 + __ret_ao; \
12721 +})
12722
12723 /*
12724 * The exception table consists of pairs of addresses relative to the
12725 @@ -188,12 +221,20 @@ extern int __get_user_bad(void);
12726 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12727 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12728
12729 -
12730 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12731 +#define __copyuser_seg "gs;"
12732 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12733 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12734 +#else
12735 +#define __copyuser_seg
12736 +#define __COPYUSER_SET_ES
12737 +#define __COPYUSER_RESTORE_ES
12738 +#endif
12739
12740 #ifdef CONFIG_X86_32
12741 #define __put_user_asm_u64(x, addr, err, errret) \
12742 - asm volatile("1: movl %%eax,0(%2)\n" \
12743 - "2: movl %%edx,4(%2)\n" \
12744 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12745 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12746 "3:\n" \
12747 ".section .fixup,\"ax\"\n" \
12748 "4: movl %3,%0\n" \
12749 @@ -205,8 +246,8 @@ extern int __get_user_bad(void);
12750 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12751
12752 #define __put_user_asm_ex_u64(x, addr) \
12753 - asm volatile("1: movl %%eax,0(%1)\n" \
12754 - "2: movl %%edx,4(%1)\n" \
12755 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12756 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12757 "3:\n" \
12758 _ASM_EXTABLE_EX(1b, 2b) \
12759 _ASM_EXTABLE_EX(2b, 3b) \
12760 @@ -258,7 +299,7 @@ extern void __put_user_8(void);
12761 __typeof__(*(ptr)) __pu_val; \
12762 __chk_user_ptr(ptr); \
12763 might_fault(); \
12764 - __pu_val = x; \
12765 + __pu_val = (x); \
12766 switch (sizeof(*(ptr))) { \
12767 case 1: \
12768 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12769 @@ -379,7 +420,7 @@ do { \
12770 } while (0)
12771
12772 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12773 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12774 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12775 "2:\n" \
12776 ".section .fixup,\"ax\"\n" \
12777 "3: mov %3,%0\n" \
12778 @@ -387,7 +428,7 @@ do { \
12779 " jmp 2b\n" \
12780 ".previous\n" \
12781 _ASM_EXTABLE(1b, 3b) \
12782 - : "=r" (err), ltype(x) \
12783 + : "=r" (err), ltype (x) \
12784 : "m" (__m(addr)), "i" (errret), "0" (err))
12785
12786 #define __get_user_size_ex(x, ptr, size) \
12787 @@ -412,7 +453,7 @@ do { \
12788 } while (0)
12789
12790 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12791 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12792 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12793 "2:\n" \
12794 _ASM_EXTABLE_EX(1b, 2b) \
12795 : ltype(x) : "m" (__m(addr)))
12796 @@ -429,13 +470,24 @@ do { \
12797 int __gu_err; \
12798 unsigned long __gu_val; \
12799 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12800 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12801 + (x) = (__typeof__(*(ptr)))__gu_val; \
12802 __gu_err; \
12803 })
12804
12805 /* FIXME: this hack is definitely wrong -AK */
12806 struct __large_struct { unsigned long buf[100]; };
12807 -#define __m(x) (*(struct __large_struct __user *)(x))
12808 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12809 +#define ____m(x) \
12810 +({ \
12811 + unsigned long ____x = (unsigned long)(x); \
12812 + if (____x < PAX_USER_SHADOW_BASE) \
12813 + ____x += PAX_USER_SHADOW_BASE; \
12814 + (void __user *)____x; \
12815 +})
12816 +#else
12817 +#define ____m(x) (x)
12818 +#endif
12819 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12820
12821 /*
12822 * Tell gcc we read from memory instead of writing: this is because
12823 @@ -443,7 +495,7 @@ struct __large_struct { unsigned long buf[100]; };
12824 * aliasing issues.
12825 */
12826 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12827 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12828 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12829 "2:\n" \
12830 ".section .fixup,\"ax\"\n" \
12831 "3: mov %3,%0\n" \
12832 @@ -451,10 +503,10 @@ struct __large_struct { unsigned long buf[100]; };
12833 ".previous\n" \
12834 _ASM_EXTABLE(1b, 3b) \
12835 : "=r"(err) \
12836 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12837 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12838
12839 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12840 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12841 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12842 "2:\n" \
12843 _ASM_EXTABLE_EX(1b, 2b) \
12844 : : ltype(x), "m" (__m(addr)))
12845 @@ -493,8 +545,12 @@ struct __large_struct { unsigned long buf[100]; };
12846 * On error, the variable @x is set to zero.
12847 */
12848
12849 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12850 +#define __get_user(x, ptr) get_user((x), (ptr))
12851 +#else
12852 #define __get_user(x, ptr) \
12853 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12854 +#endif
12855
12856 /**
12857 * __put_user: - Write a simple value into user space, with less checking.
12858 @@ -516,8 +572,12 @@ struct __large_struct { unsigned long buf[100]; };
12859 * Returns zero on success, or -EFAULT on error.
12860 */
12861
12862 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12863 +#define __put_user(x, ptr) put_user((x), (ptr))
12864 +#else
12865 #define __put_user(x, ptr) \
12866 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12867 +#endif
12868
12869 #define __get_user_unaligned __get_user
12870 #define __put_user_unaligned __put_user
12871 @@ -535,7 +595,7 @@ struct __large_struct { unsigned long buf[100]; };
12872 #define get_user_ex(x, ptr) do { \
12873 unsigned long __gue_val; \
12874 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12875 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12876 + (x) = (__typeof__(*(ptr)))__gue_val; \
12877 } while (0)
12878
12879 #ifdef CONFIG_X86_WP_WORKS_OK
12880 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12881 index 576e39b..ccd0a39 100644
12882 --- a/arch/x86/include/asm/uaccess_32.h
12883 +++ b/arch/x86/include/asm/uaccess_32.h
12884 @@ -11,15 +11,15 @@
12885 #include <asm/page.h>
12886
12887 unsigned long __must_check __copy_to_user_ll
12888 - (void __user *to, const void *from, unsigned long n);
12889 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12890 unsigned long __must_check __copy_from_user_ll
12891 - (void *to, const void __user *from, unsigned long n);
12892 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12893 unsigned long __must_check __copy_from_user_ll_nozero
12894 - (void *to, const void __user *from, unsigned long n);
12895 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12896 unsigned long __must_check __copy_from_user_ll_nocache
12897 - (void *to, const void __user *from, unsigned long n);
12898 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12899 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12900 - (void *to, const void __user *from, unsigned long n);
12901 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12902
12903 /**
12904 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12905 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12906 static __always_inline unsigned long __must_check
12907 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12908 {
12909 + if ((long)n < 0)
12910 + return n;
12911 +
12912 if (__builtin_constant_p(n)) {
12913 unsigned long ret;
12914
12915 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12916 return ret;
12917 }
12918 }
12919 + if (!__builtin_constant_p(n))
12920 + check_object_size(from, n, true);
12921 return __copy_to_user_ll(to, from, n);
12922 }
12923
12924 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12925 __copy_to_user(void __user *to, const void *from, unsigned long n)
12926 {
12927 might_fault();
12928 +
12929 return __copy_to_user_inatomic(to, from, n);
12930 }
12931
12932 static __always_inline unsigned long
12933 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12934 {
12935 + if ((long)n < 0)
12936 + return n;
12937 +
12938 /* Avoid zeroing the tail if the copy fails..
12939 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12940 * but as the zeroing behaviour is only significant when n is not
12941 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12942 __copy_from_user(void *to, const void __user *from, unsigned long n)
12943 {
12944 might_fault();
12945 +
12946 + if ((long)n < 0)
12947 + return n;
12948 +
12949 if (__builtin_constant_p(n)) {
12950 unsigned long ret;
12951
12952 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12953 return ret;
12954 }
12955 }
12956 + if (!__builtin_constant_p(n))
12957 + check_object_size(to, n, false);
12958 return __copy_from_user_ll(to, from, n);
12959 }
12960
12961 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12962 const void __user *from, unsigned long n)
12963 {
12964 might_fault();
12965 +
12966 + if ((long)n < 0)
12967 + return n;
12968 +
12969 if (__builtin_constant_p(n)) {
12970 unsigned long ret;
12971
12972 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12973 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12974 unsigned long n)
12975 {
12976 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12977 + if ((long)n < 0)
12978 + return n;
12979 +
12980 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12981 }
12982
12983 -unsigned long __must_check copy_to_user(void __user *to,
12984 - const void *from, unsigned long n);
12985 -unsigned long __must_check _copy_from_user(void *to,
12986 - const void __user *from,
12987 - unsigned long n);
12988 -
12989 +extern void copy_to_user_overflow(void)
12990 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12991 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12992 +#else
12993 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12994 +#endif
12995 +;
12996
12997 extern void copy_from_user_overflow(void)
12998 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12999 @@ -199,21 +222,65 @@ extern void copy_from_user_overflow(void)
13000 #endif
13001 ;
13002
13003 -static inline unsigned long __must_check copy_from_user(void *to,
13004 - const void __user *from,
13005 - unsigned long n)
13006 +/**
13007 + * copy_to_user: - Copy a block of data into user space.
13008 + * @to: Destination address, in user space.
13009 + * @from: Source address, in kernel space.
13010 + * @n: Number of bytes to copy.
13011 + *
13012 + * Context: User context only. This function may sleep.
13013 + *
13014 + * Copy data from kernel space to user space.
13015 + *
13016 + * Returns number of bytes that could not be copied.
13017 + * On success, this will be zero.
13018 + */
13019 +static inline unsigned long __must_check
13020 +copy_to_user(void __user *to, const void *from, unsigned long n)
13021 {
13022 - int sz = __compiletime_object_size(to);
13023 + size_t sz = __compiletime_object_size(from);
13024
13025 - if (likely(sz == -1 || sz >= n))
13026 - n = _copy_from_user(to, from, n);
13027 - else
13028 + if (unlikely(sz != (size_t)-1 && sz < n))
13029 + copy_to_user_overflow();
13030 + else if (access_ok(VERIFY_WRITE, to, n))
13031 + n = __copy_to_user(to, from, n);
13032 + return n;
13033 +}
13034 +
13035 +/**
13036 + * copy_from_user: - Copy a block of data from user space.
13037 + * @to: Destination address, in kernel space.
13038 + * @from: Source address, in user space.
13039 + * @n: Number of bytes to copy.
13040 + *
13041 + * Context: User context only. This function may sleep.
13042 + *
13043 + * Copy data from user space to kernel space.
13044 + *
13045 + * Returns number of bytes that could not be copied.
13046 + * On success, this will be zero.
13047 + *
13048 + * If some data could not be copied, this function will pad the copied
13049 + * data to the requested size using zero bytes.
13050 + */
13051 +static inline unsigned long __must_check
13052 +copy_from_user(void *to, const void __user *from, unsigned long n)
13053 +{
13054 + size_t sz = __compiletime_object_size(to);
13055 +
13056 + if (unlikely(sz != (size_t)-1 && sz < n))
13057 copy_from_user_overflow();
13058 -
13059 + else if (access_ok(VERIFY_READ, from, n))
13060 + n = __copy_from_user(to, from, n);
13061 + else if ((long)n > 0) {
13062 + if (!__builtin_constant_p(n))
13063 + check_object_size(to, n, false);
13064 + memset(to, 0, n);
13065 + }
13066 return n;
13067 }
13068
13069 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13070 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13071 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13072 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13073
13074 #endif /* _ASM_X86_UACCESS_32_H */
13075 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13076 index 8e796fb..468c55a 100644
13077 --- a/arch/x86/include/asm/uaccess_64.h
13078 +++ b/arch/x86/include/asm/uaccess_64.h
13079 @@ -10,6 +10,9 @@
13080 #include <asm/alternative.h>
13081 #include <asm/cpufeature.h>
13082 #include <asm/page.h>
13083 +#include <asm/pgtable.h>
13084 +
13085 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13086
13087 /*
13088 * Copy To/From Userspace
13089 @@ -17,12 +20,12 @@
13090
13091 /* Handles exceptions in both to and from, but doesn't do access_ok */
13092 __must_check unsigned long
13093 -copy_user_generic_string(void *to, const void *from, unsigned len);
13094 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13095 __must_check unsigned long
13096 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13097 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13098
13099 -static __always_inline __must_check unsigned long
13100 -copy_user_generic(void *to, const void *from, unsigned len)
13101 +static __always_inline __must_check __size_overflow(3) unsigned long
13102 +copy_user_generic(void *to, const void *from, unsigned long len)
13103 {
13104 unsigned ret;
13105
13106 @@ -32,142 +35,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
13107 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13108 "=d" (len)),
13109 "1" (to), "2" (from), "3" (len)
13110 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13111 + : "memory", "rcx", "r8", "r9", "r11");
13112 return ret;
13113 }
13114
13115 +static __always_inline __must_check unsigned long
13116 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13117 +static __always_inline __must_check unsigned long
13118 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13119 __must_check unsigned long
13120 -_copy_to_user(void __user *to, const void *from, unsigned len);
13121 -__must_check unsigned long
13122 -_copy_from_user(void *to, const void __user *from, unsigned len);
13123 -__must_check unsigned long
13124 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13125 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13126 +
13127 +extern void copy_to_user_overflow(void)
13128 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13129 + __compiletime_error("copy_to_user() buffer size is not provably correct")
13130 +#else
13131 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
13132 +#endif
13133 +;
13134 +
13135 +extern void copy_from_user_overflow(void)
13136 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13137 + __compiletime_error("copy_from_user() buffer size is not provably correct")
13138 +#else
13139 + __compiletime_warning("copy_from_user() buffer size is not provably correct")
13140 +#endif
13141 +;
13142
13143 static inline unsigned long __must_check copy_from_user(void *to,
13144 const void __user *from,
13145 unsigned long n)
13146 {
13147 - int sz = __compiletime_object_size(to);
13148 -
13149 might_fault();
13150 - if (likely(sz == -1 || sz >= n))
13151 - n = _copy_from_user(to, from, n);
13152 -#ifdef CONFIG_DEBUG_VM
13153 - else
13154 - WARN(1, "Buffer overflow detected!\n");
13155 -#endif
13156 +
13157 + if (access_ok(VERIFY_READ, from, n))
13158 + n = __copy_from_user(to, from, n);
13159 + else if (n < INT_MAX) {
13160 + if (!__builtin_constant_p(n))
13161 + check_object_size(to, n, false);
13162 + memset(to, 0, n);
13163 + }
13164 return n;
13165 }
13166
13167 static __always_inline __must_check
13168 -int copy_to_user(void __user *dst, const void *src, unsigned size)
13169 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
13170 {
13171 might_fault();
13172
13173 - return _copy_to_user(dst, src, size);
13174 + if (access_ok(VERIFY_WRITE, dst, size))
13175 + size = __copy_to_user(dst, src, size);
13176 + return size;
13177 }
13178
13179 static __always_inline __must_check
13180 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13181 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13182 {
13183 - int ret = 0;
13184 + size_t sz = __compiletime_object_size(dst);
13185 + unsigned ret = 0;
13186
13187 might_fault();
13188 - if (!__builtin_constant_p(size))
13189 - return copy_user_generic(dst, (__force void *)src, size);
13190 +
13191 + if (size > INT_MAX)
13192 + return size;
13193 +
13194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13195 + if (!__access_ok(VERIFY_READ, src, size))
13196 + return size;
13197 +#endif
13198 +
13199 + if (unlikely(sz != (size_t)-1 && sz < size)) {
13200 + copy_from_user_overflow();
13201 + return size;
13202 + }
13203 +
13204 + if (!__builtin_constant_p(size)) {
13205 + check_object_size(dst, size, false);
13206 +
13207 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13208 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13209 + src += PAX_USER_SHADOW_BASE;
13210 +#endif
13211 +
13212 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13213 + }
13214 switch (size) {
13215 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13216 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13217 ret, "b", "b", "=q", 1);
13218 return ret;
13219 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13220 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13221 ret, "w", "w", "=r", 2);
13222 return ret;
13223 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13224 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13225 ret, "l", "k", "=r", 4);
13226 return ret;
13227 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13228 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13229 ret, "q", "", "=r", 8);
13230 return ret;
13231 case 10:
13232 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13233 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13234 ret, "q", "", "=r", 10);
13235 if (unlikely(ret))
13236 return ret;
13237 __get_user_asm(*(u16 *)(8 + (char *)dst),
13238 - (u16 __user *)(8 + (char __user *)src),
13239 + (const u16 __user *)(8 + (const char __user *)src),
13240 ret, "w", "w", "=r", 2);
13241 return ret;
13242 case 16:
13243 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13244 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13245 ret, "q", "", "=r", 16);
13246 if (unlikely(ret))
13247 return ret;
13248 __get_user_asm(*(u64 *)(8 + (char *)dst),
13249 - (u64 __user *)(8 + (char __user *)src),
13250 + (const u64 __user *)(8 + (const char __user *)src),
13251 ret, "q", "", "=r", 8);
13252 return ret;
13253 default:
13254 - return copy_user_generic(dst, (__force void *)src, size);
13255 +
13256 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13257 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13258 + src += PAX_USER_SHADOW_BASE;
13259 +#endif
13260 +
13261 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13262 }
13263 }
13264
13265 static __always_inline __must_check
13266 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13267 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13268 {
13269 - int ret = 0;
13270 + size_t sz = __compiletime_object_size(src);
13271 + unsigned ret = 0;
13272
13273 might_fault();
13274 - if (!__builtin_constant_p(size))
13275 - return copy_user_generic((__force void *)dst, src, size);
13276 +
13277 + if (size > INT_MAX)
13278 + return size;
13279 +
13280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13281 + if (!__access_ok(VERIFY_WRITE, dst, size))
13282 + return size;
13283 +#endif
13284 +
13285 + if (unlikely(sz != (size_t)-1 && sz < size)) {
13286 + copy_to_user_overflow();
13287 + return size;
13288 + }
13289 +
13290 + if (!__builtin_constant_p(size)) {
13291 + check_object_size(src, size, true);
13292 +
13293 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13294 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13295 + dst += PAX_USER_SHADOW_BASE;
13296 +#endif
13297 +
13298 + return copy_user_generic((__force_kernel void *)dst, src, size);
13299 + }
13300 switch (size) {
13301 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13302 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13303 ret, "b", "b", "iq", 1);
13304 return ret;
13305 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13306 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13307 ret, "w", "w", "ir", 2);
13308 return ret;
13309 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13310 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13311 ret, "l", "k", "ir", 4);
13312 return ret;
13313 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13314 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13315 ret, "q", "", "er", 8);
13316 return ret;
13317 case 10:
13318 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13319 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13320 ret, "q", "", "er", 10);
13321 if (unlikely(ret))
13322 return ret;
13323 asm("":::"memory");
13324 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13325 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13326 ret, "w", "w", "ir", 2);
13327 return ret;
13328 case 16:
13329 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13330 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13331 ret, "q", "", "er", 16);
13332 if (unlikely(ret))
13333 return ret;
13334 asm("":::"memory");
13335 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13336 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13337 ret, "q", "", "er", 8);
13338 return ret;
13339 default:
13340 - return copy_user_generic((__force void *)dst, src, size);
13341 +
13342 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13343 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13344 + dst += PAX_USER_SHADOW_BASE;
13345 +#endif
13346 +
13347 + return copy_user_generic((__force_kernel void *)dst, src, size);
13348 }
13349 }
13350
13351 static __always_inline __must_check
13352 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13353 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13354 {
13355 - int ret = 0;
13356 + unsigned ret = 0;
13357
13358 might_fault();
13359 - if (!__builtin_constant_p(size))
13360 - return copy_user_generic((__force void *)dst,
13361 - (__force void *)src, size);
13362 +
13363 + if (size > INT_MAX)
13364 + return size;
13365 +
13366 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13367 + if (!__access_ok(VERIFY_READ, src, size))
13368 + return size;
13369 + if (!__access_ok(VERIFY_WRITE, dst, size))
13370 + return size;
13371 +#endif
13372 +
13373 + if (!__builtin_constant_p(size)) {
13374 +
13375 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13376 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13377 + src += PAX_USER_SHADOW_BASE;
13378 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13379 + dst += PAX_USER_SHADOW_BASE;
13380 +#endif
13381 +
13382 + return copy_user_generic((__force_kernel void *)dst,
13383 + (__force_kernel const void *)src, size);
13384 + }
13385 switch (size) {
13386 case 1: {
13387 u8 tmp;
13388 - __get_user_asm(tmp, (u8 __user *)src,
13389 + __get_user_asm(tmp, (const u8 __user *)src,
13390 ret, "b", "b", "=q", 1);
13391 if (likely(!ret))
13392 __put_user_asm(tmp, (u8 __user *)dst,
13393 @@ -176,7 +275,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13394 }
13395 case 2: {
13396 u16 tmp;
13397 - __get_user_asm(tmp, (u16 __user *)src,
13398 + __get_user_asm(tmp, (const u16 __user *)src,
13399 ret, "w", "w", "=r", 2);
13400 if (likely(!ret))
13401 __put_user_asm(tmp, (u16 __user *)dst,
13402 @@ -186,7 +285,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13403
13404 case 4: {
13405 u32 tmp;
13406 - __get_user_asm(tmp, (u32 __user *)src,
13407 + __get_user_asm(tmp, (const u32 __user *)src,
13408 ret, "l", "k", "=r", 4);
13409 if (likely(!ret))
13410 __put_user_asm(tmp, (u32 __user *)dst,
13411 @@ -195,7 +294,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13412 }
13413 case 8: {
13414 u64 tmp;
13415 - __get_user_asm(tmp, (u64 __user *)src,
13416 + __get_user_asm(tmp, (const u64 __user *)src,
13417 ret, "q", "", "=r", 8);
13418 if (likely(!ret))
13419 __put_user_asm(tmp, (u64 __user *)dst,
13420 @@ -203,44 +302,89 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13421 return ret;
13422 }
13423 default:
13424 - return copy_user_generic((__force void *)dst,
13425 - (__force void *)src, size);
13426 +
13427 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13428 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13429 + src += PAX_USER_SHADOW_BASE;
13430 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13431 + dst += PAX_USER_SHADOW_BASE;
13432 +#endif
13433 +
13434 + return copy_user_generic((__force_kernel void *)dst,
13435 + (__force_kernel const void *)src, size);
13436 }
13437 }
13438
13439 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13440 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13441 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13442 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13443
13444 static __must_check __always_inline int
13445 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13446 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13447 {
13448 - return copy_user_generic(dst, (__force const void *)src, size);
13449 + if (size > INT_MAX)
13450 + return size;
13451 +
13452 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13453 + if (!__access_ok(VERIFY_READ, src, size))
13454 + return size;
13455 +
13456 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13457 + src += PAX_USER_SHADOW_BASE;
13458 +#endif
13459 +
13460 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13461 }
13462
13463 -static __must_check __always_inline int
13464 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13465 +static __must_check __always_inline unsigned long
13466 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13467 {
13468 - return copy_user_generic((__force void *)dst, src, size);
13469 + if (size > INT_MAX)
13470 + return size;
13471 +
13472 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13473 + if (!__access_ok(VERIFY_WRITE, dst, size))
13474 + return size;
13475 +
13476 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13477 + dst += PAX_USER_SHADOW_BASE;
13478 +#endif
13479 +
13480 + return copy_user_generic((__force_kernel void *)dst, src, size);
13481 }
13482
13483 -extern long __copy_user_nocache(void *dst, const void __user *src,
13484 - unsigned size, int zerorest);
13485 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13486 + unsigned long size, int zerorest) __size_overflow(3);
13487
13488 -static inline int
13489 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13490 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13491 {
13492 might_sleep();
13493 +
13494 + if (size > INT_MAX)
13495 + return size;
13496 +
13497 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13498 + if (!__access_ok(VERIFY_READ, src, size))
13499 + return size;
13500 +#endif
13501 +
13502 return __copy_user_nocache(dst, src, size, 1);
13503 }
13504
13505 -static inline int
13506 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13507 - unsigned size)
13508 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13509 + unsigned long size)
13510 {
13511 + if (size > INT_MAX)
13512 + return size;
13513 +
13514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13515 + if (!__access_ok(VERIFY_READ, src, size))
13516 + return size;
13517 +#endif
13518 +
13519 return __copy_user_nocache(dst, src, size, 0);
13520 }
13521
13522 -unsigned long
13523 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13524 +extern unsigned long
13525 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13526
13527 #endif /* _ASM_X86_UACCESS_64_H */
13528 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13529 index bb05228..d763d5b 100644
13530 --- a/arch/x86/include/asm/vdso.h
13531 +++ b/arch/x86/include/asm/vdso.h
13532 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13533 #define VDSO32_SYMBOL(base, name) \
13534 ({ \
13535 extern const char VDSO32_##name[]; \
13536 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13537 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13538 })
13539 #endif
13540
13541 diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
13542 index 5b238981..77fdd78 100644
13543 --- a/arch/x86/include/asm/word-at-a-time.h
13544 +++ b/arch/x86/include/asm/word-at-a-time.h
13545 @@ -11,7 +11,7 @@
13546 * and shift, for example.
13547 */
13548 struct word_at_a_time {
13549 - const unsigned long one_bits, high_bits;
13550 + unsigned long one_bits, high_bits;
13551 };
13552
13553 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
13554 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13555 index c090af1..7e7bf16 100644
13556 --- a/arch/x86/include/asm/x86_init.h
13557 +++ b/arch/x86/include/asm/x86_init.h
13558 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13559 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13560 void (*find_smp_config)(void);
13561 void (*get_smp_config)(unsigned int early);
13562 -};
13563 +} __no_const;
13564
13565 /**
13566 * struct x86_init_resources - platform specific resource related ops
13567 @@ -43,7 +43,7 @@ struct x86_init_resources {
13568 void (*probe_roms)(void);
13569 void (*reserve_resources)(void);
13570 char *(*memory_setup)(void);
13571 -};
13572 +} __no_const;
13573
13574 /**
13575 * struct x86_init_irqs - platform specific interrupt setup
13576 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13577 void (*pre_vector_init)(void);
13578 void (*intr_init)(void);
13579 void (*trap_init)(void);
13580 -};
13581 +} __no_const;
13582
13583 /**
13584 * struct x86_init_oem - oem platform specific customizing functions
13585 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13586 struct x86_init_oem {
13587 void (*arch_setup)(void);
13588 void (*banner)(void);
13589 -};
13590 +} __no_const;
13591
13592 /**
13593 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13594 @@ -77,7 +77,7 @@ struct x86_init_oem {
13595 */
13596 struct x86_init_mapping {
13597 void (*pagetable_reserve)(u64 start, u64 end);
13598 -};
13599 +} __no_const;
13600
13601 /**
13602 * struct x86_init_paging - platform specific paging functions
13603 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13604 struct x86_init_paging {
13605 void (*pagetable_setup_start)(pgd_t *base);
13606 void (*pagetable_setup_done)(pgd_t *base);
13607 -};
13608 +} __no_const;
13609
13610 /**
13611 * struct x86_init_timers - platform specific timer setup
13612 @@ -102,7 +102,7 @@ struct x86_init_timers {
13613 void (*tsc_pre_init)(void);
13614 void (*timer_init)(void);
13615 void (*wallclock_init)(void);
13616 -};
13617 +} __no_const;
13618
13619 /**
13620 * struct x86_init_iommu - platform specific iommu setup
13621 @@ -110,7 +110,7 @@ struct x86_init_timers {
13622 */
13623 struct x86_init_iommu {
13624 int (*iommu_init)(void);
13625 -};
13626 +} __no_const;
13627
13628 /**
13629 * struct x86_init_pci - platform specific pci init functions
13630 @@ -124,7 +124,7 @@ struct x86_init_pci {
13631 int (*init)(void);
13632 void (*init_irq)(void);
13633 void (*fixup_irqs)(void);
13634 -};
13635 +} __no_const;
13636
13637 /**
13638 * struct x86_init_ops - functions for platform specific setup
13639 @@ -140,7 +140,7 @@ struct x86_init_ops {
13640 struct x86_init_timers timers;
13641 struct x86_init_iommu iommu;
13642 struct x86_init_pci pci;
13643 -};
13644 +} __no_const;
13645
13646 /**
13647 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13648 @@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13649 void (*setup_percpu_clockev)(void);
13650 void (*early_percpu_clock_init)(void);
13651 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13652 -};
13653 +} __no_const;
13654
13655 /**
13656 * struct x86_platform_ops - platform specific runtime functions
13657 @@ -177,7 +177,7 @@ struct x86_platform_ops {
13658 int (*i8042_detect)(void);
13659 void (*save_sched_clock_state)(void);
13660 void (*restore_sched_clock_state)(void);
13661 -};
13662 +} __no_const;
13663
13664 struct pci_dev;
13665
13666 @@ -186,14 +186,14 @@ struct x86_msi_ops {
13667 void (*teardown_msi_irq)(unsigned int irq);
13668 void (*teardown_msi_irqs)(struct pci_dev *dev);
13669 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13670 -};
13671 +} __no_const;
13672
13673 struct x86_io_apic_ops {
13674 void (*init) (void);
13675 unsigned int (*read) (unsigned int apic, unsigned int reg);
13676 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
13677 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
13678 -};
13679 +} __no_const;
13680
13681 extern struct x86_init_ops x86_init;
13682 extern struct x86_cpuinit_ops x86_cpuinit;
13683 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13684 index 8a1b6f9..a29c4e4 100644
13685 --- a/arch/x86/include/asm/xsave.h
13686 +++ b/arch/x86/include/asm/xsave.h
13687 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13688 {
13689 int err;
13690
13691 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13692 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13693 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13694 +#endif
13695 +
13696 /*
13697 * Clear the xsave header first, so that reserved fields are
13698 * initialized to zero.
13699 @@ -93,10 +98,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13700 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13701 {
13702 int err;
13703 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13704 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13705 u32 lmask = mask;
13706 u32 hmask = mask >> 32;
13707
13708 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13709 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13710 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13711 +#endif
13712 +
13713 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13714 "2:\n"
13715 ".section .fixup,\"ax\"\n"
13716 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13717 index 95bf99de..de9235c 100644
13718 --- a/arch/x86/kernel/acpi/sleep.c
13719 +++ b/arch/x86/kernel/acpi/sleep.c
13720 @@ -73,8 +73,12 @@ int acpi_suspend_lowlevel(void)
13721 #else /* CONFIG_64BIT */
13722 #ifdef CONFIG_SMP
13723 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13724 +
13725 + pax_open_kernel();
13726 early_gdt_descr.address =
13727 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13728 + pax_close_kernel();
13729 +
13730 initial_gs = per_cpu_offset(smp_processor_id());
13731 #endif
13732 initial_code = (unsigned long)wakeup_long64;
13733 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13734 index 7261083..5c12053 100644
13735 --- a/arch/x86/kernel/acpi/wakeup_32.S
13736 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13737 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13738 # and restore the stack ... but you need gdt for this to work
13739 movl saved_context_esp, %esp
13740
13741 - movl %cs:saved_magic, %eax
13742 - cmpl $0x12345678, %eax
13743 + cmpl $0x12345678, saved_magic
13744 jne bogus_magic
13745
13746 # jump to place where we left off
13747 - movl saved_eip, %eax
13748 - jmp *%eax
13749 + jmp *(saved_eip)
13750
13751 bogus_magic:
13752 jmp bogus_magic
13753 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13754 index 73ef56c..0238021 100644
13755 --- a/arch/x86/kernel/alternative.c
13756 +++ b/arch/x86/kernel/alternative.c
13757 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13758 */
13759 for (a = start; a < end; a++) {
13760 instr = (u8 *)&a->instr_offset + a->instr_offset;
13761 +
13762 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13763 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13764 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13765 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13766 +#endif
13767 +
13768 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13769 BUG_ON(a->replacementlen > a->instrlen);
13770 BUG_ON(a->instrlen > sizeof(insnbuf));
13771 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13772 for (poff = start; poff < end; poff++) {
13773 u8 *ptr = (u8 *)poff + *poff;
13774
13775 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13776 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13777 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13778 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13779 +#endif
13780 +
13781 if (!*poff || ptr < text || ptr >= text_end)
13782 continue;
13783 /* turn DS segment override prefix into lock prefix */
13784 - if (*ptr == 0x3e)
13785 + if (*ktla_ktva(ptr) == 0x3e)
13786 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13787 };
13788 mutex_unlock(&text_mutex);
13789 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13790 for (poff = start; poff < end; poff++) {
13791 u8 *ptr = (u8 *)poff + *poff;
13792
13793 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13794 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13795 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13796 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13797 +#endif
13798 +
13799 if (!*poff || ptr < text || ptr >= text_end)
13800 continue;
13801 /* turn lock prefix into DS segment override prefix */
13802 - if (*ptr == 0xf0)
13803 + if (*ktla_ktva(ptr) == 0xf0)
13804 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13805 };
13806 mutex_unlock(&text_mutex);
13807 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13808
13809 BUG_ON(p->len > MAX_PATCH_LEN);
13810 /* prep the buffer with the original instructions */
13811 - memcpy(insnbuf, p->instr, p->len);
13812 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13813 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13814 (unsigned long)p->instr, p->len);
13815
13816 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13817 if (smp_alt_once)
13818 free_init_pages("SMP alternatives",
13819 (unsigned long)__smp_locks,
13820 - (unsigned long)__smp_locks_end);
13821 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13822
13823 restart_nmi();
13824 }
13825 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13826 * instructions. And on the local CPU you need to be protected again NMI or MCE
13827 * handlers seeing an inconsistent instruction while you patch.
13828 */
13829 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13830 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13831 size_t len)
13832 {
13833 unsigned long flags;
13834 local_irq_save(flags);
13835 - memcpy(addr, opcode, len);
13836 +
13837 + pax_open_kernel();
13838 + memcpy(ktla_ktva(addr), opcode, len);
13839 sync_core();
13840 + pax_close_kernel();
13841 +
13842 local_irq_restore(flags);
13843 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13844 that causes hangs on some VIA CPUs. */
13845 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13846 */
13847 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13848 {
13849 - unsigned long flags;
13850 - char *vaddr;
13851 + unsigned char *vaddr = ktla_ktva(addr);
13852 struct page *pages[2];
13853 - int i;
13854 + size_t i;
13855
13856 if (!core_kernel_text((unsigned long)addr)) {
13857 - pages[0] = vmalloc_to_page(addr);
13858 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13859 + pages[0] = vmalloc_to_page(vaddr);
13860 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13861 } else {
13862 - pages[0] = virt_to_page(addr);
13863 + pages[0] = virt_to_page(vaddr);
13864 WARN_ON(!PageReserved(pages[0]));
13865 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13866 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13867 }
13868 BUG_ON(!pages[0]);
13869 - local_irq_save(flags);
13870 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13871 - if (pages[1])
13872 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13873 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13874 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13875 - clear_fixmap(FIX_TEXT_POKE0);
13876 - if (pages[1])
13877 - clear_fixmap(FIX_TEXT_POKE1);
13878 - local_flush_tlb();
13879 - sync_core();
13880 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13881 - that causes hangs on some VIA CPUs. */
13882 + text_poke_early(addr, opcode, len);
13883 for (i = 0; i < len; i++)
13884 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13885 - local_irq_restore(flags);
13886 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13887 return addr;
13888 }
13889
13890 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13891 index 39a222e..85a7767 100644
13892 --- a/arch/x86/kernel/apic/apic.c
13893 +++ b/arch/x86/kernel/apic/apic.c
13894 @@ -185,7 +185,7 @@ int first_system_vector = 0xfe;
13895 /*
13896 * Debug level, exported for io_apic.c
13897 */
13898 -unsigned int apic_verbosity;
13899 +int apic_verbosity;
13900
13901 int pic_mode;
13902
13903 @@ -1923,7 +1923,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13904 apic_write(APIC_ESR, 0);
13905 v1 = apic_read(APIC_ESR);
13906 ack_APIC_irq();
13907 - atomic_inc(&irq_err_count);
13908 + atomic_inc_unchecked(&irq_err_count);
13909
13910 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13911 smp_processor_id(), v0 , v1);
13912 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13913 index 5f0ff59..f9e01bc 100644
13914 --- a/arch/x86/kernel/apic/io_apic.c
13915 +++ b/arch/x86/kernel/apic/io_apic.c
13916 @@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13917 }
13918 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13919
13920 -void lock_vector_lock(void)
13921 +void lock_vector_lock(void) __acquires(vector_lock)
13922 {
13923 /* Used to the online set of cpus does not change
13924 * during assign_irq_vector.
13925 @@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
13926 raw_spin_lock(&vector_lock);
13927 }
13928
13929 -void unlock_vector_lock(void)
13930 +void unlock_vector_lock(void) __releases(vector_lock)
13931 {
13932 raw_spin_unlock(&vector_lock);
13933 }
13934 @@ -2369,7 +2369,7 @@ static void ack_apic_edge(struct irq_data *data)
13935 ack_APIC_irq();
13936 }
13937
13938 -atomic_t irq_mis_count;
13939 +atomic_unchecked_t irq_mis_count;
13940
13941 #ifdef CONFIG_GENERIC_PENDING_IRQ
13942 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
13943 @@ -2510,7 +2510,7 @@ static void ack_apic_level(struct irq_data *data)
13944 * at the cpu.
13945 */
13946 if (!(v & (1 << (i & 0x1f)))) {
13947 - atomic_inc(&irq_mis_count);
13948 + atomic_inc_unchecked(&irq_mis_count);
13949
13950 eoi_ioapic_irq(irq, cfg);
13951 }
13952 diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
13953 index 3fe9866..6abf259 100644
13954 --- a/arch/x86/kernel/apic/probe_64.c
13955 +++ b/arch/x86/kernel/apic/probe_64.c
13956 @@ -50,7 +50,7 @@ void __init default_setup_apic_routing(void)
13957
13958 if (is_vsmp_box()) {
13959 /* need to update phys_pkg_id */
13960 - apic->phys_pkg_id = apicid_phys_pkg_id;
13961 + *(void **)&apic->phys_pkg_id = apicid_phys_pkg_id;
13962 }
13963 }
13964
13965 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13966 index 07b0c0d..1df6f42 100644
13967 --- a/arch/x86/kernel/apm_32.c
13968 +++ b/arch/x86/kernel/apm_32.c
13969 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13970 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13971 * even though they are called in protected mode.
13972 */
13973 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13974 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13975 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13976
13977 static const char driver_version[] = "1.16ac"; /* no spaces */
13978 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13979 BUG_ON(cpu != 0);
13980 gdt = get_cpu_gdt_table(cpu);
13981 save_desc_40 = gdt[0x40 / 8];
13982 +
13983 + pax_open_kernel();
13984 gdt[0x40 / 8] = bad_bios_desc;
13985 + pax_close_kernel();
13986
13987 apm_irq_save(flags);
13988 APM_DO_SAVE_SEGS;
13989 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13990 &call->esi);
13991 APM_DO_RESTORE_SEGS;
13992 apm_irq_restore(flags);
13993 +
13994 + pax_open_kernel();
13995 gdt[0x40 / 8] = save_desc_40;
13996 + pax_close_kernel();
13997 +
13998 put_cpu();
13999
14000 return call->eax & 0xff;
14001 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14002 BUG_ON(cpu != 0);
14003 gdt = get_cpu_gdt_table(cpu);
14004 save_desc_40 = gdt[0x40 / 8];
14005 +
14006 + pax_open_kernel();
14007 gdt[0x40 / 8] = bad_bios_desc;
14008 + pax_close_kernel();
14009
14010 apm_irq_save(flags);
14011 APM_DO_SAVE_SEGS;
14012 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14013 &call->eax);
14014 APM_DO_RESTORE_SEGS;
14015 apm_irq_restore(flags);
14016 +
14017 + pax_open_kernel();
14018 gdt[0x40 / 8] = save_desc_40;
14019 + pax_close_kernel();
14020 +
14021 put_cpu();
14022 return error;
14023 }
14024 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
14025 * code to that CPU.
14026 */
14027 gdt = get_cpu_gdt_table(0);
14028 +
14029 + pax_open_kernel();
14030 set_desc_base(&gdt[APM_CS >> 3],
14031 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14032 set_desc_base(&gdt[APM_CS_16 >> 3],
14033 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14034 set_desc_base(&gdt[APM_DS >> 3],
14035 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14036 + pax_close_kernel();
14037
14038 proc_create("apm", 0, NULL, &apm_file_ops);
14039
14040 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14041 index 68de2dc..1f3c720 100644
14042 --- a/arch/x86/kernel/asm-offsets.c
14043 +++ b/arch/x86/kernel/asm-offsets.c
14044 @@ -33,6 +33,8 @@ void common(void) {
14045 OFFSET(TI_status, thread_info, status);
14046 OFFSET(TI_addr_limit, thread_info, addr_limit);
14047 OFFSET(TI_preempt_count, thread_info, preempt_count);
14048 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14049 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14050
14051 BLANK();
14052 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14053 @@ -53,8 +55,26 @@ void common(void) {
14054 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14055 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14056 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14057 +
14058 +#ifdef CONFIG_PAX_KERNEXEC
14059 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14060 #endif
14061
14062 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14063 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14064 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14065 +#ifdef CONFIG_X86_64
14066 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14067 +#endif
14068 +#endif
14069 +
14070 +#endif
14071 +
14072 + BLANK();
14073 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14074 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14075 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14076 +
14077 #ifdef CONFIG_XEN
14078 BLANK();
14079 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14080 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14081 index 1b4754f..fbb4227 100644
14082 --- a/arch/x86/kernel/asm-offsets_64.c
14083 +++ b/arch/x86/kernel/asm-offsets_64.c
14084 @@ -76,6 +76,7 @@ int main(void)
14085 BLANK();
14086 #undef ENTRY
14087
14088 + DEFINE(TSS_size, sizeof(struct tss_struct));
14089 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14090 BLANK();
14091
14092 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14093 index 6ab6aa2..8f71507 100644
14094 --- a/arch/x86/kernel/cpu/Makefile
14095 +++ b/arch/x86/kernel/cpu/Makefile
14096 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14097 CFLAGS_REMOVE_perf_event.o = -pg
14098 endif
14099
14100 -# Make sure load_percpu_segment has no stackprotector
14101 -nostackp := $(call cc-option, -fno-stack-protector)
14102 -CFLAGS_common.o := $(nostackp)
14103 -
14104 obj-y := intel_cacheinfo.o scattered.o topology.o
14105 obj-y += proc.o capflags.o powerflags.o common.o
14106 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14107 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14108 index 146bb62..ac9c74a 100644
14109 --- a/arch/x86/kernel/cpu/amd.c
14110 +++ b/arch/x86/kernel/cpu/amd.c
14111 @@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14112 unsigned int size)
14113 {
14114 /* AMD errata T13 (order #21922) */
14115 - if ((c->x86 == 6)) {
14116 + if (c->x86 == 6) {
14117 /* Duron Rev A0 */
14118 if (c->x86_model == 3 && c->x86_mask == 0)
14119 size = 64;
14120 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14121 index cc9c146..2373d59 100644
14122 --- a/arch/x86/kernel/cpu/common.c
14123 +++ b/arch/x86/kernel/cpu/common.c
14124 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14125
14126 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14127
14128 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14129 -#ifdef CONFIG_X86_64
14130 - /*
14131 - * We need valid kernel segments for data and code in long mode too
14132 - * IRET will check the segment types kkeil 2000/10/28
14133 - * Also sysret mandates a special GDT layout
14134 - *
14135 - * TLS descriptors are currently at a different place compared to i386.
14136 - * Hopefully nobody expects them at a fixed place (Wine?)
14137 - */
14138 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14139 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14140 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14141 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14142 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14143 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14144 -#else
14145 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14146 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14147 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14148 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14149 - /*
14150 - * Segments used for calling PnP BIOS have byte granularity.
14151 - * They code segments and data segments have fixed 64k limits,
14152 - * the transfer segment sizes are set at run time.
14153 - */
14154 - /* 32-bit code */
14155 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14156 - /* 16-bit code */
14157 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14158 - /* 16-bit data */
14159 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14160 - /* 16-bit data */
14161 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14162 - /* 16-bit data */
14163 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14164 - /*
14165 - * The APM segments have byte granularity and their bases
14166 - * are set at run time. All have 64k limits.
14167 - */
14168 - /* 32-bit code */
14169 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14170 - /* 16-bit code */
14171 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14172 - /* data */
14173 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14174 -
14175 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14176 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14177 - GDT_STACK_CANARY_INIT
14178 -#endif
14179 -} };
14180 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14181 -
14182 static int __init x86_xsave_setup(char *s)
14183 {
14184 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14185 @@ -376,7 +322,7 @@ void switch_to_new_gdt(int cpu)
14186 {
14187 struct desc_ptr gdt_descr;
14188
14189 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14190 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14191 gdt_descr.size = GDT_SIZE - 1;
14192 load_gdt(&gdt_descr);
14193 /* Reload the per-cpu base */
14194 @@ -843,6 +789,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14195 /* Filter out anything that depends on CPUID levels we don't have */
14196 filter_cpuid_features(c, true);
14197
14198 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14199 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14200 +#endif
14201 +
14202 /* If the model name is still unset, do table lookup. */
14203 if (!c->x86_model_id[0]) {
14204 const char *p;
14205 @@ -1023,10 +973,12 @@ static __init int setup_disablecpuid(char *arg)
14206 }
14207 __setup("clearcpuid=", setup_disablecpuid);
14208
14209 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14210 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14211 +
14212 #ifdef CONFIG_X86_64
14213 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14214 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14215 - (unsigned long) nmi_idt_table };
14216 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14217
14218 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14219 irq_stack_union) __aligned(PAGE_SIZE);
14220 @@ -1040,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14221 EXPORT_PER_CPU_SYMBOL(current_task);
14222
14223 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14224 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14225 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14226 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14227
14228 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14229 @@ -1134,7 +1086,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14230 {
14231 memset(regs, 0, sizeof(struct pt_regs));
14232 regs->fs = __KERNEL_PERCPU;
14233 - regs->gs = __KERNEL_STACK_CANARY;
14234 + savesegment(gs, regs->gs);
14235
14236 return regs;
14237 }
14238 @@ -1189,7 +1141,7 @@ void __cpuinit cpu_init(void)
14239 int i;
14240
14241 cpu = stack_smp_processor_id();
14242 - t = &per_cpu(init_tss, cpu);
14243 + t = init_tss + cpu;
14244 oist = &per_cpu(orig_ist, cpu);
14245
14246 #ifdef CONFIG_NUMA
14247 @@ -1215,7 +1167,7 @@ void __cpuinit cpu_init(void)
14248 switch_to_new_gdt(cpu);
14249 loadsegment(fs, 0);
14250
14251 - load_idt((const struct desc_ptr *)&idt_descr);
14252 + load_idt(&idt_descr);
14253
14254 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14255 syscall_init();
14256 @@ -1224,7 +1176,6 @@ void __cpuinit cpu_init(void)
14257 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14258 barrier();
14259
14260 - x86_configure_nx();
14261 if (cpu != 0)
14262 enable_x2apic();
14263
14264 @@ -1280,7 +1231,7 @@ void __cpuinit cpu_init(void)
14265 {
14266 int cpu = smp_processor_id();
14267 struct task_struct *curr = current;
14268 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14269 + struct tss_struct *t = init_tss + cpu;
14270 struct thread_struct *thread = &curr->thread;
14271
14272 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14273 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14274 index 3e6ff6c..54b4992 100644
14275 --- a/arch/x86/kernel/cpu/intel.c
14276 +++ b/arch/x86/kernel/cpu/intel.c
14277 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14278 * Update the IDT descriptor and reload the IDT so that
14279 * it uses the read-only mapped virtual address.
14280 */
14281 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14282 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14283 load_idt(&idt_descr);
14284 }
14285 #endif
14286 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14287 index c46ed49..5dc0a53 100644
14288 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14289 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14290 @@ -42,6 +42,7 @@
14291 #include <asm/processor.h>
14292 #include <asm/mce.h>
14293 #include <asm/msr.h>
14294 +#include <asm/local.h>
14295
14296 #include "mce-internal.h"
14297
14298 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14299 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14300 m->cs, m->ip);
14301
14302 - if (m->cs == __KERNEL_CS)
14303 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14304 print_symbol("{%s}", m->ip);
14305 pr_cont("\n");
14306 }
14307 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14308
14309 #define PANIC_TIMEOUT 5 /* 5 seconds */
14310
14311 -static atomic_t mce_paniced;
14312 +static atomic_unchecked_t mce_paniced;
14313
14314 static int fake_panic;
14315 -static atomic_t mce_fake_paniced;
14316 +static atomic_unchecked_t mce_fake_paniced;
14317
14318 /* Panic in progress. Enable interrupts and wait for final IPI */
14319 static void wait_for_panic(void)
14320 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14321 /*
14322 * Make sure only one CPU runs in machine check panic
14323 */
14324 - if (atomic_inc_return(&mce_paniced) > 1)
14325 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14326 wait_for_panic();
14327 barrier();
14328
14329 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14330 console_verbose();
14331 } else {
14332 /* Don't log too much for fake panic */
14333 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14334 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14335 return;
14336 }
14337 /* First print corrected ones that are still unlogged */
14338 @@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
14339 * might have been modified by someone else.
14340 */
14341 rmb();
14342 - if (atomic_read(&mce_paniced))
14343 + if (atomic_read_unchecked(&mce_paniced))
14344 wait_for_panic();
14345 if (!monarch_timeout)
14346 goto out;
14347 @@ -1581,7 +1582,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14348 }
14349
14350 /* Call the installed machine check handler for this CPU setup. */
14351 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14352 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14353 unexpected_machine_check;
14354
14355 /*
14356 @@ -1604,7 +1605,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14357 return;
14358 }
14359
14360 + pax_open_kernel();
14361 machine_check_vector = do_machine_check;
14362 + pax_close_kernel();
14363
14364 __mcheck_cpu_init_generic();
14365 __mcheck_cpu_init_vendor(c);
14366 @@ -1618,7 +1621,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14367 */
14368
14369 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14370 -static int mce_chrdev_open_count; /* #times opened */
14371 +static local_t mce_chrdev_open_count; /* #times opened */
14372 static int mce_chrdev_open_exclu; /* already open exclusive? */
14373
14374 static int mce_chrdev_open(struct inode *inode, struct file *file)
14375 @@ -1626,7 +1629,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14376 spin_lock(&mce_chrdev_state_lock);
14377
14378 if (mce_chrdev_open_exclu ||
14379 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14380 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14381 spin_unlock(&mce_chrdev_state_lock);
14382
14383 return -EBUSY;
14384 @@ -1634,7 +1637,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14385
14386 if (file->f_flags & O_EXCL)
14387 mce_chrdev_open_exclu = 1;
14388 - mce_chrdev_open_count++;
14389 + local_inc(&mce_chrdev_open_count);
14390
14391 spin_unlock(&mce_chrdev_state_lock);
14392
14393 @@ -1645,7 +1648,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14394 {
14395 spin_lock(&mce_chrdev_state_lock);
14396
14397 - mce_chrdev_open_count--;
14398 + local_dec(&mce_chrdev_open_count);
14399 mce_chrdev_open_exclu = 0;
14400
14401 spin_unlock(&mce_chrdev_state_lock);
14402 @@ -2370,7 +2373,7 @@ struct dentry *mce_get_debugfs_dir(void)
14403 static void mce_reset(void)
14404 {
14405 cpu_missing = 0;
14406 - atomic_set(&mce_fake_paniced, 0);
14407 + atomic_set_unchecked(&mce_fake_paniced, 0);
14408 atomic_set(&mce_executing, 0);
14409 atomic_set(&mce_callin, 0);
14410 atomic_set(&global_nwo, 0);
14411 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14412 index 2d5454c..51987eb 100644
14413 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14414 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14415 @@ -11,6 +11,7 @@
14416 #include <asm/processor.h>
14417 #include <asm/mce.h>
14418 #include <asm/msr.h>
14419 +#include <asm/pgtable.h>
14420
14421 /* By default disabled */
14422 int mce_p5_enabled __read_mostly;
14423 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14424 if (!cpu_has(c, X86_FEATURE_MCE))
14425 return;
14426
14427 + pax_open_kernel();
14428 machine_check_vector = pentium_machine_check;
14429 + pax_close_kernel();
14430 /* Make sure the vector pointer is visible before we enable MCEs: */
14431 wmb();
14432
14433 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14434 index 2d7998f..17c9de1 100644
14435 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14436 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14437 @@ -10,6 +10,7 @@
14438 #include <asm/processor.h>
14439 #include <asm/mce.h>
14440 #include <asm/msr.h>
14441 +#include <asm/pgtable.h>
14442
14443 /* Machine check handler for WinChip C6: */
14444 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14445 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14446 {
14447 u32 lo, hi;
14448
14449 + pax_open_kernel();
14450 machine_check_vector = winchip_machine_check;
14451 + pax_close_kernel();
14452 /* Make sure the vector pointer is visible before we enable MCEs: */
14453 wmb();
14454
14455 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14456 index 6b96110..0da73eb 100644
14457 --- a/arch/x86/kernel/cpu/mtrr/main.c
14458 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14459 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14460 u64 size_or_mask, size_and_mask;
14461 static bool mtrr_aps_delayed_init;
14462
14463 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14464 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14465
14466 const struct mtrr_ops *mtrr_if;
14467
14468 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14469 index df5e41f..816c719 100644
14470 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14471 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14472 @@ -25,7 +25,7 @@ struct mtrr_ops {
14473 int (*validate_add_page)(unsigned long base, unsigned long size,
14474 unsigned int type);
14475 int (*have_wrcomb)(void);
14476 -};
14477 +} __do_const;
14478
14479 extern int generic_get_free_region(unsigned long base, unsigned long size,
14480 int replace_reg);
14481 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14482 index c4706cf..264b0f7 100644
14483 --- a/arch/x86/kernel/cpu/perf_event.c
14484 +++ b/arch/x86/kernel/cpu/perf_event.c
14485 @@ -1837,7 +1837,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14486 break;
14487
14488 perf_callchain_store(entry, frame.return_address);
14489 - fp = frame.next_frame;
14490 + fp = (const void __force_user *)frame.next_frame;
14491 }
14492 }
14493
14494 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
14495 index 187c294..28a069c 100644
14496 --- a/arch/x86/kernel/cpu/perf_event_intel.c
14497 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
14498 @@ -1811,10 +1811,10 @@ __init int intel_pmu_init(void)
14499 * v2 and above have a perf capabilities MSR
14500 */
14501 if (version > 1) {
14502 - u64 capabilities;
14503 + u64 capabilities = x86_pmu.intel_cap.capabilities;
14504
14505 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
14506 - x86_pmu.intel_cap.capabilities = capabilities;
14507 + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
14508 + x86_pmu.intel_cap.capabilities = capabilities;
14509 }
14510
14511 intel_ds_init();
14512 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14513 index 13ad899..f642b9a 100644
14514 --- a/arch/x86/kernel/crash.c
14515 +++ b/arch/x86/kernel/crash.c
14516 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14517 {
14518 #ifdef CONFIG_X86_32
14519 struct pt_regs fixed_regs;
14520 -#endif
14521
14522 -#ifdef CONFIG_X86_32
14523 - if (!user_mode_vm(regs)) {
14524 + if (!user_mode(regs)) {
14525 crash_fixup_ss_esp(&fixed_regs, regs);
14526 regs = &fixed_regs;
14527 }
14528 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14529 index 37250fe..bf2ec74 100644
14530 --- a/arch/x86/kernel/doublefault_32.c
14531 +++ b/arch/x86/kernel/doublefault_32.c
14532 @@ -11,7 +11,7 @@
14533
14534 #define DOUBLEFAULT_STACKSIZE (1024)
14535 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14536 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14537 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14538
14539 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14540
14541 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14542 unsigned long gdt, tss;
14543
14544 store_gdt(&gdt_desc);
14545 - gdt = gdt_desc.address;
14546 + gdt = (unsigned long)gdt_desc.address;
14547
14548 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14549
14550 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14551 /* 0x2 bit is always set */
14552 .flags = X86_EFLAGS_SF | 0x2,
14553 .sp = STACK_START,
14554 - .es = __USER_DS,
14555 + .es = __KERNEL_DS,
14556 .cs = __KERNEL_CS,
14557 .ss = __KERNEL_DS,
14558 - .ds = __USER_DS,
14559 + .ds = __KERNEL_DS,
14560 .fs = __KERNEL_PERCPU,
14561
14562 .__cr3 = __pa_nodebug(swapper_pg_dir),
14563 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14564 index 571246d..81f335c 100644
14565 --- a/arch/x86/kernel/dumpstack.c
14566 +++ b/arch/x86/kernel/dumpstack.c
14567 @@ -2,6 +2,9 @@
14568 * Copyright (C) 1991, 1992 Linus Torvalds
14569 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14570 */
14571 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14572 +#define __INCLUDED_BY_HIDESYM 1
14573 +#endif
14574 #include <linux/kallsyms.h>
14575 #include <linux/kprobes.h>
14576 #include <linux/uaccess.h>
14577 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14578 static void
14579 print_ftrace_graph_addr(unsigned long addr, void *data,
14580 const struct stacktrace_ops *ops,
14581 - struct thread_info *tinfo, int *graph)
14582 + struct task_struct *task, int *graph)
14583 {
14584 - struct task_struct *task;
14585 unsigned long ret_addr;
14586 int index;
14587
14588 if (addr != (unsigned long)return_to_handler)
14589 return;
14590
14591 - task = tinfo->task;
14592 index = task->curr_ret_stack;
14593
14594 if (!task->ret_stack || index < *graph)
14595 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14596 static inline void
14597 print_ftrace_graph_addr(unsigned long addr, void *data,
14598 const struct stacktrace_ops *ops,
14599 - struct thread_info *tinfo, int *graph)
14600 + struct task_struct *task, int *graph)
14601 { }
14602 #endif
14603
14604 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14605 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14606 */
14607
14608 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14609 - void *p, unsigned int size, void *end)
14610 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14611 {
14612 - void *t = tinfo;
14613 if (end) {
14614 if (p < end && p >= (end-THREAD_SIZE))
14615 return 1;
14616 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14617 }
14618
14619 unsigned long
14620 -print_context_stack(struct thread_info *tinfo,
14621 +print_context_stack(struct task_struct *task, void *stack_start,
14622 unsigned long *stack, unsigned long bp,
14623 const struct stacktrace_ops *ops, void *data,
14624 unsigned long *end, int *graph)
14625 {
14626 struct stack_frame *frame = (struct stack_frame *)bp;
14627
14628 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14629 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14630 unsigned long addr;
14631
14632 addr = *stack;
14633 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14634 } else {
14635 ops->address(data, addr, 0);
14636 }
14637 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14638 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14639 }
14640 stack++;
14641 }
14642 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14643 EXPORT_SYMBOL_GPL(print_context_stack);
14644
14645 unsigned long
14646 -print_context_stack_bp(struct thread_info *tinfo,
14647 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14648 unsigned long *stack, unsigned long bp,
14649 const struct stacktrace_ops *ops, void *data,
14650 unsigned long *end, int *graph)
14651 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14652 struct stack_frame *frame = (struct stack_frame *)bp;
14653 unsigned long *ret_addr = &frame->return_address;
14654
14655 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14656 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14657 unsigned long addr = *ret_addr;
14658
14659 if (!__kernel_text_address(addr))
14660 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14661 ops->address(data, addr, 1);
14662 frame = frame->next_frame;
14663 ret_addr = &frame->return_address;
14664 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14665 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14666 }
14667
14668 return (unsigned long)frame;
14669 @@ -189,7 +188,7 @@ void dump_stack(void)
14670
14671 bp = stack_frame(current, NULL);
14672 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14673 - current->pid, current->comm, print_tainted(),
14674 + task_pid_nr(current), current->comm, print_tainted(),
14675 init_utsname()->release,
14676 (int)strcspn(init_utsname()->version, " "),
14677 init_utsname()->version);
14678 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14679 }
14680 EXPORT_SYMBOL_GPL(oops_begin);
14681
14682 +extern void gr_handle_kernel_exploit(void);
14683 +
14684 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14685 {
14686 if (regs && kexec_should_crash(current))
14687 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14688 panic("Fatal exception in interrupt");
14689 if (panic_on_oops)
14690 panic("Fatal exception");
14691 - do_exit(signr);
14692 +
14693 + gr_handle_kernel_exploit();
14694 +
14695 + do_group_exit(signr);
14696 }
14697
14698 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14699 @@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14700
14701 show_regs(regs);
14702 #ifdef CONFIG_X86_32
14703 - if (user_mode_vm(regs)) {
14704 + if (user_mode(regs)) {
14705 sp = regs->sp;
14706 ss = regs->ss & 0xffff;
14707 } else {
14708 @@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14709 unsigned long flags = oops_begin();
14710 int sig = SIGSEGV;
14711
14712 - if (!user_mode_vm(regs))
14713 + if (!user_mode(regs))
14714 report_bug(regs->ip, regs);
14715
14716 if (__die(str, regs, err))
14717 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14718 index e0b1d78..a8ade5e 100644
14719 --- a/arch/x86/kernel/dumpstack_32.c
14720 +++ b/arch/x86/kernel/dumpstack_32.c
14721 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14722 bp = stack_frame(task, regs);
14723
14724 for (;;) {
14725 - struct thread_info *context;
14726 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14727
14728 - context = (struct thread_info *)
14729 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14730 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14731 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14732
14733 - stack = (unsigned long *)context->previous_esp;
14734 - if (!stack)
14735 + if (stack_start == task_stack_page(task))
14736 break;
14737 + stack = *(unsigned long **)stack_start;
14738 if (ops->stack(data, "IRQ") < 0)
14739 break;
14740 touch_nmi_watchdog();
14741 @@ -87,7 +85,7 @@ void show_regs(struct pt_regs *regs)
14742 int i;
14743
14744 print_modules();
14745 - __show_regs(regs, !user_mode_vm(regs));
14746 + __show_regs(regs, !user_mode(regs));
14747
14748 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14749 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14750 @@ -96,21 +94,22 @@ void show_regs(struct pt_regs *regs)
14751 * When in-kernel, we also print out the stack and code at the
14752 * time of the fault..
14753 */
14754 - if (!user_mode_vm(regs)) {
14755 + if (!user_mode(regs)) {
14756 unsigned int code_prologue = code_bytes * 43 / 64;
14757 unsigned int code_len = code_bytes;
14758 unsigned char c;
14759 u8 *ip;
14760 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14761
14762 printk(KERN_EMERG "Stack:\n");
14763 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14764
14765 printk(KERN_EMERG "Code: ");
14766
14767 - ip = (u8 *)regs->ip - code_prologue;
14768 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14769 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14770 /* try starting at IP */
14771 - ip = (u8 *)regs->ip;
14772 + ip = (u8 *)regs->ip + cs_base;
14773 code_len = code_len - code_prologue + 1;
14774 }
14775 for (i = 0; i < code_len; i++, ip++) {
14776 @@ -119,7 +118,7 @@ void show_regs(struct pt_regs *regs)
14777 printk(KERN_CONT " Bad EIP value.");
14778 break;
14779 }
14780 - if (ip == (u8 *)regs->ip)
14781 + if (ip == (u8 *)regs->ip + cs_base)
14782 printk(KERN_CONT "<%02x> ", c);
14783 else
14784 printk(KERN_CONT "%02x ", c);
14785 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14786 {
14787 unsigned short ud2;
14788
14789 + ip = ktla_ktva(ip);
14790 if (ip < PAGE_OFFSET)
14791 return 0;
14792 if (probe_kernel_address((unsigned short *)ip, ud2))
14793 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14794
14795 return ud2 == 0x0b0f;
14796 }
14797 +
14798 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14799 +void pax_check_alloca(unsigned long size)
14800 +{
14801 + unsigned long sp = (unsigned long)&sp, stack_left;
14802 +
14803 + /* all kernel stacks are of the same size */
14804 + stack_left = sp & (THREAD_SIZE - 1);
14805 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14806 +}
14807 +EXPORT_SYMBOL(pax_check_alloca);
14808 +#endif
14809 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14810 index 791b761..2ab6e33 100644
14811 --- a/arch/x86/kernel/dumpstack_64.c
14812 +++ b/arch/x86/kernel/dumpstack_64.c
14813 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14814 unsigned long *irq_stack_end =
14815 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14816 unsigned used = 0;
14817 - struct thread_info *tinfo;
14818 int graph = 0;
14819 unsigned long dummy;
14820 + void *stack_start;
14821
14822 if (!task)
14823 task = current;
14824 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14825 * current stack address. If the stacks consist of nested
14826 * exceptions
14827 */
14828 - tinfo = task_thread_info(task);
14829 for (;;) {
14830 char *id;
14831 unsigned long *estack_end;
14832 +
14833 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14834 &used, &id);
14835
14836 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14837 if (ops->stack(data, id) < 0)
14838 break;
14839
14840 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14841 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14842 data, estack_end, &graph);
14843 ops->stack(data, "<EOE>");
14844 /*
14845 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14846 * second-to-last pointer (index -2 to end) in the
14847 * exception stack:
14848 */
14849 + if ((u16)estack_end[-1] != __KERNEL_DS)
14850 + goto out;
14851 stack = (unsigned long *) estack_end[-2];
14852 continue;
14853 }
14854 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14855 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14856 if (ops->stack(data, "IRQ") < 0)
14857 break;
14858 - bp = ops->walk_stack(tinfo, stack, bp,
14859 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14860 ops, data, irq_stack_end, &graph);
14861 /*
14862 * We link to the next stack (which would be
14863 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14864 /*
14865 * This handles the process stack:
14866 */
14867 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14868 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14869 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14870 +out:
14871 put_cpu();
14872 }
14873 EXPORT_SYMBOL(dump_trace);
14874 @@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14875
14876 return ud2 == 0x0b0f;
14877 }
14878 +
14879 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14880 +void pax_check_alloca(unsigned long size)
14881 +{
14882 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14883 + unsigned cpu, used;
14884 + char *id;
14885 +
14886 + /* check the process stack first */
14887 + stack_start = (unsigned long)task_stack_page(current);
14888 + stack_end = stack_start + THREAD_SIZE;
14889 + if (likely(stack_start <= sp && sp < stack_end)) {
14890 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14891 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14892 + return;
14893 + }
14894 +
14895 + cpu = get_cpu();
14896 +
14897 + /* check the irq stacks */
14898 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14899 + stack_start = stack_end - IRQ_STACK_SIZE;
14900 + if (stack_start <= sp && sp < stack_end) {
14901 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14902 + put_cpu();
14903 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14904 + return;
14905 + }
14906 +
14907 + /* check the exception stacks */
14908 + used = 0;
14909 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14910 + stack_start = stack_end - EXCEPTION_STKSZ;
14911 + if (stack_end && stack_start <= sp && sp < stack_end) {
14912 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14913 + put_cpu();
14914 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14915 + return;
14916 + }
14917 +
14918 + put_cpu();
14919 +
14920 + /* unknown stack */
14921 + BUG();
14922 +}
14923 +EXPORT_SYMBOL(pax_check_alloca);
14924 +#endif
14925 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14926 index 9b9f18b..9fcaa04 100644
14927 --- a/arch/x86/kernel/early_printk.c
14928 +++ b/arch/x86/kernel/early_printk.c
14929 @@ -7,6 +7,7 @@
14930 #include <linux/pci_regs.h>
14931 #include <linux/pci_ids.h>
14932 #include <linux/errno.h>
14933 +#include <linux/sched.h>
14934 #include <asm/io.h>
14935 #include <asm/processor.h>
14936 #include <asm/fcntl.h>
14937 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14938 index 623f288..8bdd78a 100644
14939 --- a/arch/x86/kernel/entry_32.S
14940 +++ b/arch/x86/kernel/entry_32.S
14941 @@ -176,13 +176,153 @@
14942 /*CFI_REL_OFFSET gs, PT_GS*/
14943 .endm
14944 .macro SET_KERNEL_GS reg
14945 +
14946 +#ifdef CONFIG_CC_STACKPROTECTOR
14947 movl $(__KERNEL_STACK_CANARY), \reg
14948 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14949 + movl $(__USER_DS), \reg
14950 +#else
14951 + xorl \reg, \reg
14952 +#endif
14953 +
14954 movl \reg, %gs
14955 .endm
14956
14957 #endif /* CONFIG_X86_32_LAZY_GS */
14958
14959 -.macro SAVE_ALL
14960 +.macro pax_enter_kernel
14961 +#ifdef CONFIG_PAX_KERNEXEC
14962 + call pax_enter_kernel
14963 +#endif
14964 +.endm
14965 +
14966 +.macro pax_exit_kernel
14967 +#ifdef CONFIG_PAX_KERNEXEC
14968 + call pax_exit_kernel
14969 +#endif
14970 +.endm
14971 +
14972 +#ifdef CONFIG_PAX_KERNEXEC
14973 +ENTRY(pax_enter_kernel)
14974 +#ifdef CONFIG_PARAVIRT
14975 + pushl %eax
14976 + pushl %ecx
14977 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14978 + mov %eax, %esi
14979 +#else
14980 + mov %cr0, %esi
14981 +#endif
14982 + bts $16, %esi
14983 + jnc 1f
14984 + mov %cs, %esi
14985 + cmp $__KERNEL_CS, %esi
14986 + jz 3f
14987 + ljmp $__KERNEL_CS, $3f
14988 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14989 +2:
14990 +#ifdef CONFIG_PARAVIRT
14991 + mov %esi, %eax
14992 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14993 +#else
14994 + mov %esi, %cr0
14995 +#endif
14996 +3:
14997 +#ifdef CONFIG_PARAVIRT
14998 + popl %ecx
14999 + popl %eax
15000 +#endif
15001 + ret
15002 +ENDPROC(pax_enter_kernel)
15003 +
15004 +ENTRY(pax_exit_kernel)
15005 +#ifdef CONFIG_PARAVIRT
15006 + pushl %eax
15007 + pushl %ecx
15008 +#endif
15009 + mov %cs, %esi
15010 + cmp $__KERNEXEC_KERNEL_CS, %esi
15011 + jnz 2f
15012 +#ifdef CONFIG_PARAVIRT
15013 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15014 + mov %eax, %esi
15015 +#else
15016 + mov %cr0, %esi
15017 +#endif
15018 + btr $16, %esi
15019 + ljmp $__KERNEL_CS, $1f
15020 +1:
15021 +#ifdef CONFIG_PARAVIRT
15022 + mov %esi, %eax
15023 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15024 +#else
15025 + mov %esi, %cr0
15026 +#endif
15027 +2:
15028 +#ifdef CONFIG_PARAVIRT
15029 + popl %ecx
15030 + popl %eax
15031 +#endif
15032 + ret
15033 +ENDPROC(pax_exit_kernel)
15034 +#endif
15035 +
15036 +.macro pax_erase_kstack
15037 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15038 + call pax_erase_kstack
15039 +#endif
15040 +.endm
15041 +
15042 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15043 +/*
15044 + * ebp: thread_info
15045 + */
15046 +ENTRY(pax_erase_kstack)
15047 + pushl %edi
15048 + pushl %ecx
15049 + pushl %eax
15050 +
15051 + mov TI_lowest_stack(%ebp), %edi
15052 + mov $-0xBEEF, %eax
15053 + std
15054 +
15055 +1: mov %edi, %ecx
15056 + and $THREAD_SIZE_asm - 1, %ecx
15057 + shr $2, %ecx
15058 + repne scasl
15059 + jecxz 2f
15060 +
15061 + cmp $2*16, %ecx
15062 + jc 2f
15063 +
15064 + mov $2*16, %ecx
15065 + repe scasl
15066 + jecxz 2f
15067 + jne 1b
15068 +
15069 +2: cld
15070 + mov %esp, %ecx
15071 + sub %edi, %ecx
15072 +
15073 + cmp $THREAD_SIZE_asm, %ecx
15074 + jb 3f
15075 + ud2
15076 +3:
15077 +
15078 + shr $2, %ecx
15079 + rep stosl
15080 +
15081 + mov TI_task_thread_sp0(%ebp), %edi
15082 + sub $128, %edi
15083 + mov %edi, TI_lowest_stack(%ebp)
15084 +
15085 + popl %eax
15086 + popl %ecx
15087 + popl %edi
15088 + ret
15089 +ENDPROC(pax_erase_kstack)
15090 +#endif
15091 +
15092 +.macro __SAVE_ALL _DS
15093 cld
15094 PUSH_GS
15095 pushl_cfi %fs
15096 @@ -205,7 +345,7 @@
15097 CFI_REL_OFFSET ecx, 0
15098 pushl_cfi %ebx
15099 CFI_REL_OFFSET ebx, 0
15100 - movl $(__USER_DS), %edx
15101 + movl $\_DS, %edx
15102 movl %edx, %ds
15103 movl %edx, %es
15104 movl $(__KERNEL_PERCPU), %edx
15105 @@ -213,6 +353,15 @@
15106 SET_KERNEL_GS %edx
15107 .endm
15108
15109 +.macro SAVE_ALL
15110 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15111 + __SAVE_ALL __KERNEL_DS
15112 + pax_enter_kernel
15113 +#else
15114 + __SAVE_ALL __USER_DS
15115 +#endif
15116 +.endm
15117 +
15118 .macro RESTORE_INT_REGS
15119 popl_cfi %ebx
15120 CFI_RESTORE ebx
15121 @@ -296,7 +445,7 @@ ENTRY(ret_from_fork)
15122 popfl_cfi
15123 jmp syscall_exit
15124 CFI_ENDPROC
15125 -END(ret_from_fork)
15126 +ENDPROC(ret_from_fork)
15127
15128 /*
15129 * Interrupt exit functions should be protected against kprobes
15130 @@ -329,7 +478,15 @@ ret_from_intr:
15131 andl $SEGMENT_RPL_MASK, %eax
15132 #endif
15133 cmpl $USER_RPL, %eax
15134 +
15135 +#ifdef CONFIG_PAX_KERNEXEC
15136 + jae resume_userspace
15137 +
15138 + pax_exit_kernel
15139 + jmp resume_kernel
15140 +#else
15141 jb resume_kernel # not returning to v8086 or userspace
15142 +#endif
15143
15144 ENTRY(resume_userspace)
15145 LOCKDEP_SYS_EXIT
15146 @@ -341,8 +498,8 @@ ENTRY(resume_userspace)
15147 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15148 # int/exception return?
15149 jne work_pending
15150 - jmp restore_all
15151 -END(ret_from_exception)
15152 + jmp restore_all_pax
15153 +ENDPROC(ret_from_exception)
15154
15155 #ifdef CONFIG_PREEMPT
15156 ENTRY(resume_kernel)
15157 @@ -357,7 +514,7 @@ need_resched:
15158 jz restore_all
15159 call preempt_schedule_irq
15160 jmp need_resched
15161 -END(resume_kernel)
15162 +ENDPROC(resume_kernel)
15163 #endif
15164 CFI_ENDPROC
15165 /*
15166 @@ -391,28 +548,43 @@ sysenter_past_esp:
15167 /*CFI_REL_OFFSET cs, 0*/
15168 /*
15169 * Push current_thread_info()->sysenter_return to the stack.
15170 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15171 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15172 */
15173 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15174 + pushl_cfi $0
15175 CFI_REL_OFFSET eip, 0
15176
15177 pushl_cfi %eax
15178 SAVE_ALL
15179 + GET_THREAD_INFO(%ebp)
15180 + movl TI_sysenter_return(%ebp),%ebp
15181 + movl %ebp,PT_EIP(%esp)
15182 ENABLE_INTERRUPTS(CLBR_NONE)
15183
15184 /*
15185 * Load the potential sixth argument from user stack.
15186 * Careful about security.
15187 */
15188 + movl PT_OLDESP(%esp),%ebp
15189 +
15190 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15191 + mov PT_OLDSS(%esp),%ds
15192 +1: movl %ds:(%ebp),%ebp
15193 + push %ss
15194 + pop %ds
15195 +#else
15196 cmpl $__PAGE_OFFSET-3,%ebp
15197 jae syscall_fault
15198 1: movl (%ebp),%ebp
15199 +#endif
15200 +
15201 movl %ebp,PT_EBP(%esp)
15202 _ASM_EXTABLE(1b,syscall_fault)
15203
15204 GET_THREAD_INFO(%ebp)
15205
15206 +#ifdef CONFIG_PAX_RANDKSTACK
15207 + pax_erase_kstack
15208 +#endif
15209 +
15210 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
15211 jnz sysenter_audit
15212 sysenter_do_call:
15213 @@ -427,12 +599,24 @@ sysenter_do_call:
15214 testl $_TIF_ALLWORK_MASK, %ecx
15215 jne sysexit_audit
15216 sysenter_exit:
15217 +
15218 +#ifdef CONFIG_PAX_RANDKSTACK
15219 + pushl_cfi %eax
15220 + movl %esp, %eax
15221 + call pax_randomize_kstack
15222 + popl_cfi %eax
15223 +#endif
15224 +
15225 + pax_erase_kstack
15226 +
15227 /* if something modifies registers it must also disable sysexit */
15228 movl PT_EIP(%esp), %edx
15229 movl PT_OLDESP(%esp), %ecx
15230 xorl %ebp,%ebp
15231 TRACE_IRQS_ON
15232 1: mov PT_FS(%esp), %fs
15233 +2: mov PT_DS(%esp), %ds
15234 +3: mov PT_ES(%esp), %es
15235 PTGS_TO_GS
15236 ENABLE_INTERRUPTS_SYSEXIT
15237
15238 @@ -449,6 +633,9 @@ sysenter_audit:
15239 movl %eax,%edx /* 2nd arg: syscall number */
15240 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15241 call __audit_syscall_entry
15242 +
15243 + pax_erase_kstack
15244 +
15245 pushl_cfi %ebx
15246 movl PT_EAX(%esp),%eax /* reload syscall number */
15247 jmp sysenter_do_call
15248 @@ -474,10 +661,16 @@ sysexit_audit:
15249
15250 CFI_ENDPROC
15251 .pushsection .fixup,"ax"
15252 -2: movl $0,PT_FS(%esp)
15253 +4: movl $0,PT_FS(%esp)
15254 + jmp 1b
15255 +5: movl $0,PT_DS(%esp)
15256 + jmp 1b
15257 +6: movl $0,PT_ES(%esp)
15258 jmp 1b
15259 .popsection
15260 - _ASM_EXTABLE(1b,2b)
15261 + _ASM_EXTABLE(1b,4b)
15262 + _ASM_EXTABLE(2b,5b)
15263 + _ASM_EXTABLE(3b,6b)
15264 PTGS_TO_GS_EX
15265 ENDPROC(ia32_sysenter_target)
15266
15267 @@ -491,6 +684,11 @@ ENTRY(system_call)
15268 pushl_cfi %eax # save orig_eax
15269 SAVE_ALL
15270 GET_THREAD_INFO(%ebp)
15271 +
15272 +#ifdef CONFIG_PAX_RANDKSTACK
15273 + pax_erase_kstack
15274 +#endif
15275 +
15276 # system call tracing in operation / emulation
15277 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
15278 jnz syscall_trace_entry
15279 @@ -509,6 +707,15 @@ syscall_exit:
15280 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15281 jne syscall_exit_work
15282
15283 +restore_all_pax:
15284 +
15285 +#ifdef CONFIG_PAX_RANDKSTACK
15286 + movl %esp, %eax
15287 + call pax_randomize_kstack
15288 +#endif
15289 +
15290 + pax_erase_kstack
15291 +
15292 restore_all:
15293 TRACE_IRQS_IRET
15294 restore_all_notrace:
15295 @@ -565,14 +772,34 @@ ldt_ss:
15296 * compensating for the offset by changing to the ESPFIX segment with
15297 * a base address that matches for the difference.
15298 */
15299 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15300 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15301 mov %esp, %edx /* load kernel esp */
15302 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15303 mov %dx, %ax /* eax: new kernel esp */
15304 sub %eax, %edx /* offset (low word is 0) */
15305 +#ifdef CONFIG_SMP
15306 + movl PER_CPU_VAR(cpu_number), %ebx
15307 + shll $PAGE_SHIFT_asm, %ebx
15308 + addl $cpu_gdt_table, %ebx
15309 +#else
15310 + movl $cpu_gdt_table, %ebx
15311 +#endif
15312 shr $16, %edx
15313 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15314 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15315 +
15316 +#ifdef CONFIG_PAX_KERNEXEC
15317 + mov %cr0, %esi
15318 + btr $16, %esi
15319 + mov %esi, %cr0
15320 +#endif
15321 +
15322 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15323 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15324 +
15325 +#ifdef CONFIG_PAX_KERNEXEC
15326 + bts $16, %esi
15327 + mov %esi, %cr0
15328 +#endif
15329 +
15330 pushl_cfi $__ESPFIX_SS
15331 pushl_cfi %eax /* new kernel esp */
15332 /* Disable interrupts, but do not irqtrace this section: we
15333 @@ -601,35 +828,23 @@ work_resched:
15334 movl TI_flags(%ebp), %ecx
15335 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15336 # than syscall tracing?
15337 - jz restore_all
15338 + jz restore_all_pax
15339 testb $_TIF_NEED_RESCHED, %cl
15340 jnz work_resched
15341
15342 work_notifysig: # deal with pending signals and
15343 # notify-resume requests
15344 + movl %esp, %eax
15345 #ifdef CONFIG_VM86
15346 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15347 - movl %esp, %eax
15348 - jne work_notifysig_v86 # returning to kernel-space or
15349 + jz 1f # returning to kernel-space or
15350 # vm86-space
15351 - TRACE_IRQS_ON
15352 - ENABLE_INTERRUPTS(CLBR_NONE)
15353 - movb PT_CS(%esp), %bl
15354 - andb $SEGMENT_RPL_MASK, %bl
15355 - cmpb $USER_RPL, %bl
15356 - jb resume_kernel
15357 - xorl %edx, %edx
15358 - call do_notify_resume
15359 - jmp resume_userspace
15360
15361 - ALIGN
15362 -work_notifysig_v86:
15363 pushl_cfi %ecx # save ti_flags for do_notify_resume
15364 call save_v86_state # %eax contains pt_regs pointer
15365 popl_cfi %ecx
15366 movl %eax, %esp
15367 -#else
15368 - movl %esp, %eax
15369 +1:
15370 #endif
15371 TRACE_IRQS_ON
15372 ENABLE_INTERRUPTS(CLBR_NONE)
15373 @@ -640,7 +855,7 @@ work_notifysig_v86:
15374 xorl %edx, %edx
15375 call do_notify_resume
15376 jmp resume_userspace
15377 -END(work_pending)
15378 +ENDPROC(work_pending)
15379
15380 # perform syscall exit tracing
15381 ALIGN
15382 @@ -648,11 +863,14 @@ syscall_trace_entry:
15383 movl $-ENOSYS,PT_EAX(%esp)
15384 movl %esp, %eax
15385 call syscall_trace_enter
15386 +
15387 + pax_erase_kstack
15388 +
15389 /* What it returned is what we'll actually use. */
15390 cmpl $(NR_syscalls), %eax
15391 jnae syscall_call
15392 jmp syscall_exit
15393 -END(syscall_trace_entry)
15394 +ENDPROC(syscall_trace_entry)
15395
15396 # perform syscall exit tracing
15397 ALIGN
15398 @@ -665,20 +883,24 @@ syscall_exit_work:
15399 movl %esp, %eax
15400 call syscall_trace_leave
15401 jmp resume_userspace
15402 -END(syscall_exit_work)
15403 +ENDPROC(syscall_exit_work)
15404 CFI_ENDPROC
15405
15406 RING0_INT_FRAME # can't unwind into user space anyway
15407 syscall_fault:
15408 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15409 + push %ss
15410 + pop %ds
15411 +#endif
15412 GET_THREAD_INFO(%ebp)
15413 movl $-EFAULT,PT_EAX(%esp)
15414 jmp resume_userspace
15415 -END(syscall_fault)
15416 +ENDPROC(syscall_fault)
15417
15418 syscall_badsys:
15419 movl $-ENOSYS,PT_EAX(%esp)
15420 jmp resume_userspace
15421 -END(syscall_badsys)
15422 +ENDPROC(syscall_badsys)
15423 CFI_ENDPROC
15424 /*
15425 * End of kprobes section
15426 @@ -750,6 +972,36 @@ ENTRY(ptregs_clone)
15427 CFI_ENDPROC
15428 ENDPROC(ptregs_clone)
15429
15430 + ALIGN;
15431 +ENTRY(kernel_execve)
15432 + CFI_STARTPROC
15433 + pushl_cfi %ebp
15434 + sub $PT_OLDSS+4,%esp
15435 + pushl_cfi %edi
15436 + pushl_cfi %ecx
15437 + pushl_cfi %eax
15438 + lea 3*4(%esp),%edi
15439 + mov $PT_OLDSS/4+1,%ecx
15440 + xorl %eax,%eax
15441 + rep stosl
15442 + popl_cfi %eax
15443 + popl_cfi %ecx
15444 + popl_cfi %edi
15445 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15446 + pushl_cfi %esp
15447 + call sys_execve
15448 + add $4,%esp
15449 + CFI_ADJUST_CFA_OFFSET -4
15450 + GET_THREAD_INFO(%ebp)
15451 + test %eax,%eax
15452 + jz syscall_exit
15453 + add $PT_OLDSS+4,%esp
15454 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15455 + popl_cfi %ebp
15456 + ret
15457 + CFI_ENDPROC
15458 +ENDPROC(kernel_execve)
15459 +
15460 .macro FIXUP_ESPFIX_STACK
15461 /*
15462 * Switch back for ESPFIX stack to the normal zerobased stack
15463 @@ -759,8 +1011,15 @@ ENDPROC(ptregs_clone)
15464 * normal stack and adjusts ESP with the matching offset.
15465 */
15466 /* fixup the stack */
15467 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15468 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15469 +#ifdef CONFIG_SMP
15470 + movl PER_CPU_VAR(cpu_number), %ebx
15471 + shll $PAGE_SHIFT_asm, %ebx
15472 + addl $cpu_gdt_table, %ebx
15473 +#else
15474 + movl $cpu_gdt_table, %ebx
15475 +#endif
15476 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15477 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15478 shl $16, %eax
15479 addl %esp, %eax /* the adjusted stack pointer */
15480 pushl_cfi $__KERNEL_DS
15481 @@ -813,7 +1072,7 @@ vector=vector+1
15482 .endr
15483 2: jmp common_interrupt
15484 .endr
15485 -END(irq_entries_start)
15486 +ENDPROC(irq_entries_start)
15487
15488 .previous
15489 END(interrupt)
15490 @@ -861,7 +1120,7 @@ ENTRY(coprocessor_error)
15491 pushl_cfi $do_coprocessor_error
15492 jmp error_code
15493 CFI_ENDPROC
15494 -END(coprocessor_error)
15495 +ENDPROC(coprocessor_error)
15496
15497 ENTRY(simd_coprocessor_error)
15498 RING0_INT_FRAME
15499 @@ -882,7 +1141,7 @@ ENTRY(simd_coprocessor_error)
15500 #endif
15501 jmp error_code
15502 CFI_ENDPROC
15503 -END(simd_coprocessor_error)
15504 +ENDPROC(simd_coprocessor_error)
15505
15506 ENTRY(device_not_available)
15507 RING0_INT_FRAME
15508 @@ -890,18 +1149,18 @@ ENTRY(device_not_available)
15509 pushl_cfi $do_device_not_available
15510 jmp error_code
15511 CFI_ENDPROC
15512 -END(device_not_available)
15513 +ENDPROC(device_not_available)
15514
15515 #ifdef CONFIG_PARAVIRT
15516 ENTRY(native_iret)
15517 iret
15518 _ASM_EXTABLE(native_iret, iret_exc)
15519 -END(native_iret)
15520 +ENDPROC(native_iret)
15521
15522 ENTRY(native_irq_enable_sysexit)
15523 sti
15524 sysexit
15525 -END(native_irq_enable_sysexit)
15526 +ENDPROC(native_irq_enable_sysexit)
15527 #endif
15528
15529 ENTRY(overflow)
15530 @@ -910,7 +1169,7 @@ ENTRY(overflow)
15531 pushl_cfi $do_overflow
15532 jmp error_code
15533 CFI_ENDPROC
15534 -END(overflow)
15535 +ENDPROC(overflow)
15536
15537 ENTRY(bounds)
15538 RING0_INT_FRAME
15539 @@ -918,7 +1177,7 @@ ENTRY(bounds)
15540 pushl_cfi $do_bounds
15541 jmp error_code
15542 CFI_ENDPROC
15543 -END(bounds)
15544 +ENDPROC(bounds)
15545
15546 ENTRY(invalid_op)
15547 RING0_INT_FRAME
15548 @@ -926,7 +1185,7 @@ ENTRY(invalid_op)
15549 pushl_cfi $do_invalid_op
15550 jmp error_code
15551 CFI_ENDPROC
15552 -END(invalid_op)
15553 +ENDPROC(invalid_op)
15554
15555 ENTRY(coprocessor_segment_overrun)
15556 RING0_INT_FRAME
15557 @@ -934,35 +1193,35 @@ ENTRY(coprocessor_segment_overrun)
15558 pushl_cfi $do_coprocessor_segment_overrun
15559 jmp error_code
15560 CFI_ENDPROC
15561 -END(coprocessor_segment_overrun)
15562 +ENDPROC(coprocessor_segment_overrun)
15563
15564 ENTRY(invalid_TSS)
15565 RING0_EC_FRAME
15566 pushl_cfi $do_invalid_TSS
15567 jmp error_code
15568 CFI_ENDPROC
15569 -END(invalid_TSS)
15570 +ENDPROC(invalid_TSS)
15571
15572 ENTRY(segment_not_present)
15573 RING0_EC_FRAME
15574 pushl_cfi $do_segment_not_present
15575 jmp error_code
15576 CFI_ENDPROC
15577 -END(segment_not_present)
15578 +ENDPROC(segment_not_present)
15579
15580 ENTRY(stack_segment)
15581 RING0_EC_FRAME
15582 pushl_cfi $do_stack_segment
15583 jmp error_code
15584 CFI_ENDPROC
15585 -END(stack_segment)
15586 +ENDPROC(stack_segment)
15587
15588 ENTRY(alignment_check)
15589 RING0_EC_FRAME
15590 pushl_cfi $do_alignment_check
15591 jmp error_code
15592 CFI_ENDPROC
15593 -END(alignment_check)
15594 +ENDPROC(alignment_check)
15595
15596 ENTRY(divide_error)
15597 RING0_INT_FRAME
15598 @@ -970,7 +1229,7 @@ ENTRY(divide_error)
15599 pushl_cfi $do_divide_error
15600 jmp error_code
15601 CFI_ENDPROC
15602 -END(divide_error)
15603 +ENDPROC(divide_error)
15604
15605 #ifdef CONFIG_X86_MCE
15606 ENTRY(machine_check)
15607 @@ -979,7 +1238,7 @@ ENTRY(machine_check)
15608 pushl_cfi machine_check_vector
15609 jmp error_code
15610 CFI_ENDPROC
15611 -END(machine_check)
15612 +ENDPROC(machine_check)
15613 #endif
15614
15615 ENTRY(spurious_interrupt_bug)
15616 @@ -988,7 +1247,7 @@ ENTRY(spurious_interrupt_bug)
15617 pushl_cfi $do_spurious_interrupt_bug
15618 jmp error_code
15619 CFI_ENDPROC
15620 -END(spurious_interrupt_bug)
15621 +ENDPROC(spurious_interrupt_bug)
15622 /*
15623 * End of kprobes section
15624 */
15625 @@ -1100,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15626
15627 ENTRY(mcount)
15628 ret
15629 -END(mcount)
15630 +ENDPROC(mcount)
15631
15632 ENTRY(ftrace_caller)
15633 cmpl $0, function_trace_stop
15634 @@ -1129,7 +1388,7 @@ ftrace_graph_call:
15635 .globl ftrace_stub
15636 ftrace_stub:
15637 ret
15638 -END(ftrace_caller)
15639 +ENDPROC(ftrace_caller)
15640
15641 #else /* ! CONFIG_DYNAMIC_FTRACE */
15642
15643 @@ -1165,7 +1424,7 @@ trace:
15644 popl %ecx
15645 popl %eax
15646 jmp ftrace_stub
15647 -END(mcount)
15648 +ENDPROC(mcount)
15649 #endif /* CONFIG_DYNAMIC_FTRACE */
15650 #endif /* CONFIG_FUNCTION_TRACER */
15651
15652 @@ -1186,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15653 popl %ecx
15654 popl %eax
15655 ret
15656 -END(ftrace_graph_caller)
15657 +ENDPROC(ftrace_graph_caller)
15658
15659 .globl return_to_handler
15660 return_to_handler:
15661 @@ -1241,15 +1500,18 @@ error_code:
15662 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15663 REG_TO_PTGS %ecx
15664 SET_KERNEL_GS %ecx
15665 - movl $(__USER_DS), %ecx
15666 + movl $(__KERNEL_DS), %ecx
15667 movl %ecx, %ds
15668 movl %ecx, %es
15669 +
15670 + pax_enter_kernel
15671 +
15672 TRACE_IRQS_OFF
15673 movl %esp,%eax # pt_regs pointer
15674 call *%edi
15675 jmp ret_from_exception
15676 CFI_ENDPROC
15677 -END(page_fault)
15678 +ENDPROC(page_fault)
15679
15680 /*
15681 * Debug traps and NMI can happen at the one SYSENTER instruction
15682 @@ -1291,7 +1553,7 @@ debug_stack_correct:
15683 call do_debug
15684 jmp ret_from_exception
15685 CFI_ENDPROC
15686 -END(debug)
15687 +ENDPROC(debug)
15688
15689 /*
15690 * NMI is doubly nasty. It can happen _while_ we're handling
15691 @@ -1328,6 +1590,9 @@ nmi_stack_correct:
15692 xorl %edx,%edx # zero error code
15693 movl %esp,%eax # pt_regs pointer
15694 call do_nmi
15695 +
15696 + pax_exit_kernel
15697 +
15698 jmp restore_all_notrace
15699 CFI_ENDPROC
15700
15701 @@ -1364,12 +1629,15 @@ nmi_espfix_stack:
15702 FIXUP_ESPFIX_STACK # %eax == %esp
15703 xorl %edx,%edx # zero error code
15704 call do_nmi
15705 +
15706 + pax_exit_kernel
15707 +
15708 RESTORE_REGS
15709 lss 12+4(%esp), %esp # back to espfix stack
15710 CFI_ADJUST_CFA_OFFSET -24
15711 jmp irq_return
15712 CFI_ENDPROC
15713 -END(nmi)
15714 +ENDPROC(nmi)
15715
15716 ENTRY(int3)
15717 RING0_INT_FRAME
15718 @@ -1381,14 +1649,14 @@ ENTRY(int3)
15719 call do_int3
15720 jmp ret_from_exception
15721 CFI_ENDPROC
15722 -END(int3)
15723 +ENDPROC(int3)
15724
15725 ENTRY(general_protection)
15726 RING0_EC_FRAME
15727 pushl_cfi $do_general_protection
15728 jmp error_code
15729 CFI_ENDPROC
15730 -END(general_protection)
15731 +ENDPROC(general_protection)
15732
15733 #ifdef CONFIG_KVM_GUEST
15734 ENTRY(async_page_fault)
15735 @@ -1396,7 +1664,7 @@ ENTRY(async_page_fault)
15736 pushl_cfi $do_async_page_fault
15737 jmp error_code
15738 CFI_ENDPROC
15739 -END(async_page_fault)
15740 +ENDPROC(async_page_fault)
15741 #endif
15742
15743 /*
15744 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15745 index 7d65133..c888d5f 100644
15746 --- a/arch/x86/kernel/entry_64.S
15747 +++ b/arch/x86/kernel/entry_64.S
15748 @@ -57,6 +57,8 @@
15749 #include <asm/percpu.h>
15750 #include <asm/asm.h>
15751 #include <linux/err.h>
15752 +#include <asm/pgtable.h>
15753 +#include <asm/alternative-asm.h>
15754
15755 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15756 #include <linux/elf-em.h>
15757 @@ -70,8 +72,9 @@
15758 #ifdef CONFIG_FUNCTION_TRACER
15759 #ifdef CONFIG_DYNAMIC_FTRACE
15760 ENTRY(mcount)
15761 + pax_force_retaddr
15762 retq
15763 -END(mcount)
15764 +ENDPROC(mcount)
15765
15766 ENTRY(ftrace_caller)
15767 cmpl $0, function_trace_stop
15768 @@ -94,8 +97,9 @@ GLOBAL(ftrace_graph_call)
15769 #endif
15770
15771 GLOBAL(ftrace_stub)
15772 + pax_force_retaddr
15773 retq
15774 -END(ftrace_caller)
15775 +ENDPROC(ftrace_caller)
15776
15777 #else /* ! CONFIG_DYNAMIC_FTRACE */
15778 ENTRY(mcount)
15779 @@ -114,6 +118,7 @@ ENTRY(mcount)
15780 #endif
15781
15782 GLOBAL(ftrace_stub)
15783 + pax_force_retaddr
15784 retq
15785
15786 trace:
15787 @@ -123,12 +128,13 @@ trace:
15788 movq 8(%rbp), %rsi
15789 subq $MCOUNT_INSN_SIZE, %rdi
15790
15791 + pax_force_fptr ftrace_trace_function
15792 call *ftrace_trace_function
15793
15794 MCOUNT_RESTORE_FRAME
15795
15796 jmp ftrace_stub
15797 -END(mcount)
15798 +ENDPROC(mcount)
15799 #endif /* CONFIG_DYNAMIC_FTRACE */
15800 #endif /* CONFIG_FUNCTION_TRACER */
15801
15802 @@ -148,8 +154,9 @@ ENTRY(ftrace_graph_caller)
15803
15804 MCOUNT_RESTORE_FRAME
15805
15806 + pax_force_retaddr
15807 retq
15808 -END(ftrace_graph_caller)
15809 +ENDPROC(ftrace_graph_caller)
15810
15811 GLOBAL(return_to_handler)
15812 subq $24, %rsp
15813 @@ -165,6 +172,7 @@ GLOBAL(return_to_handler)
15814 movq 8(%rsp), %rdx
15815 movq (%rsp), %rax
15816 addq $24, %rsp
15817 + pax_force_fptr %rdi
15818 jmp *%rdi
15819 #endif
15820
15821 @@ -180,6 +188,280 @@ ENTRY(native_usergs_sysret64)
15822 ENDPROC(native_usergs_sysret64)
15823 #endif /* CONFIG_PARAVIRT */
15824
15825 + .macro ljmpq sel, off
15826 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15827 + .byte 0x48; ljmp *1234f(%rip)
15828 + .pushsection .rodata
15829 + .align 16
15830 + 1234: .quad \off; .word \sel
15831 + .popsection
15832 +#else
15833 + pushq $\sel
15834 + pushq $\off
15835 + lretq
15836 +#endif
15837 + .endm
15838 +
15839 + .macro pax_enter_kernel
15840 + pax_set_fptr_mask
15841 +#ifdef CONFIG_PAX_KERNEXEC
15842 + call pax_enter_kernel
15843 +#endif
15844 + .endm
15845 +
15846 + .macro pax_exit_kernel
15847 +#ifdef CONFIG_PAX_KERNEXEC
15848 + call pax_exit_kernel
15849 +#endif
15850 + .endm
15851 +
15852 +#ifdef CONFIG_PAX_KERNEXEC
15853 +ENTRY(pax_enter_kernel)
15854 + pushq %rdi
15855 +
15856 +#ifdef CONFIG_PARAVIRT
15857 + PV_SAVE_REGS(CLBR_RDI)
15858 +#endif
15859 +
15860 + GET_CR0_INTO_RDI
15861 + bts $16,%rdi
15862 + jnc 3f
15863 + mov %cs,%edi
15864 + cmp $__KERNEL_CS,%edi
15865 + jnz 2f
15866 +1:
15867 +
15868 +#ifdef CONFIG_PARAVIRT
15869 + PV_RESTORE_REGS(CLBR_RDI)
15870 +#endif
15871 +
15872 + popq %rdi
15873 + pax_force_retaddr
15874 + retq
15875 +
15876 +2: ljmpq __KERNEL_CS,1f
15877 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15878 +4: SET_RDI_INTO_CR0
15879 + jmp 1b
15880 +ENDPROC(pax_enter_kernel)
15881 +
15882 +ENTRY(pax_exit_kernel)
15883 + pushq %rdi
15884 +
15885 +#ifdef CONFIG_PARAVIRT
15886 + PV_SAVE_REGS(CLBR_RDI)
15887 +#endif
15888 +
15889 + mov %cs,%rdi
15890 + cmp $__KERNEXEC_KERNEL_CS,%edi
15891 + jz 2f
15892 +1:
15893 +
15894 +#ifdef CONFIG_PARAVIRT
15895 + PV_RESTORE_REGS(CLBR_RDI);
15896 +#endif
15897 +
15898 + popq %rdi
15899 + pax_force_retaddr
15900 + retq
15901 +
15902 +2: GET_CR0_INTO_RDI
15903 + btr $16,%rdi
15904 + ljmpq __KERNEL_CS,3f
15905 +3: SET_RDI_INTO_CR0
15906 + jmp 1b
15907 +#ifdef CONFIG_PARAVIRT
15908 + PV_RESTORE_REGS(CLBR_RDI);
15909 +#endif
15910 +
15911 + popq %rdi
15912 + pax_force_retaddr
15913 + retq
15914 +ENDPROC(pax_exit_kernel)
15915 +#endif
15916 +
15917 + .macro pax_enter_kernel_user
15918 + pax_set_fptr_mask
15919 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15920 + call pax_enter_kernel_user
15921 +#endif
15922 + .endm
15923 +
15924 + .macro pax_exit_kernel_user
15925 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15926 + call pax_exit_kernel_user
15927 +#endif
15928 +#ifdef CONFIG_PAX_RANDKSTACK
15929 + pushq %rax
15930 + call pax_randomize_kstack
15931 + popq %rax
15932 +#endif
15933 + .endm
15934 +
15935 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15936 +ENTRY(pax_enter_kernel_user)
15937 + pushq %rdi
15938 + pushq %rbx
15939 +
15940 +#ifdef CONFIG_PARAVIRT
15941 + PV_SAVE_REGS(CLBR_RDI)
15942 +#endif
15943 +
15944 + GET_CR3_INTO_RDI
15945 + mov %rdi,%rbx
15946 + add $__START_KERNEL_map,%rbx
15947 + sub phys_base(%rip),%rbx
15948 +
15949 +#ifdef CONFIG_PARAVIRT
15950 + pushq %rdi
15951 + cmpl $0, pv_info+PARAVIRT_enabled
15952 + jz 1f
15953 + i = 0
15954 + .rept USER_PGD_PTRS
15955 + mov i*8(%rbx),%rsi
15956 + mov $0,%sil
15957 + lea i*8(%rbx),%rdi
15958 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15959 + i = i + 1
15960 + .endr
15961 + jmp 2f
15962 +1:
15963 +#endif
15964 +
15965 + i = 0
15966 + .rept USER_PGD_PTRS
15967 + movb $0,i*8(%rbx)
15968 + i = i + 1
15969 + .endr
15970 +
15971 +#ifdef CONFIG_PARAVIRT
15972 +2: popq %rdi
15973 +#endif
15974 + SET_RDI_INTO_CR3
15975 +
15976 +#ifdef CONFIG_PAX_KERNEXEC
15977 + GET_CR0_INTO_RDI
15978 + bts $16,%rdi
15979 + SET_RDI_INTO_CR0
15980 +#endif
15981 +
15982 +#ifdef CONFIG_PARAVIRT
15983 + PV_RESTORE_REGS(CLBR_RDI)
15984 +#endif
15985 +
15986 + popq %rbx
15987 + popq %rdi
15988 + pax_force_retaddr
15989 + retq
15990 +ENDPROC(pax_enter_kernel_user)
15991 +
15992 +ENTRY(pax_exit_kernel_user)
15993 + push %rdi
15994 +
15995 +#ifdef CONFIG_PARAVIRT
15996 + pushq %rbx
15997 + PV_SAVE_REGS(CLBR_RDI)
15998 +#endif
15999 +
16000 +#ifdef CONFIG_PAX_KERNEXEC
16001 + GET_CR0_INTO_RDI
16002 + btr $16,%rdi
16003 + SET_RDI_INTO_CR0
16004 +#endif
16005 +
16006 + GET_CR3_INTO_RDI
16007 + add $__START_KERNEL_map,%rdi
16008 + sub phys_base(%rip),%rdi
16009 +
16010 +#ifdef CONFIG_PARAVIRT
16011 + cmpl $0, pv_info+PARAVIRT_enabled
16012 + jz 1f
16013 + mov %rdi,%rbx
16014 + i = 0
16015 + .rept USER_PGD_PTRS
16016 + mov i*8(%rbx),%rsi
16017 + mov $0x67,%sil
16018 + lea i*8(%rbx),%rdi
16019 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16020 + i = i + 1
16021 + .endr
16022 + jmp 2f
16023 +1:
16024 +#endif
16025 +
16026 + i = 0
16027 + .rept USER_PGD_PTRS
16028 + movb $0x67,i*8(%rdi)
16029 + i = i + 1
16030 + .endr
16031 +
16032 +#ifdef CONFIG_PARAVIRT
16033 +2: PV_RESTORE_REGS(CLBR_RDI)
16034 + popq %rbx
16035 +#endif
16036 +
16037 + popq %rdi
16038 + pax_force_retaddr
16039 + retq
16040 +ENDPROC(pax_exit_kernel_user)
16041 +#endif
16042 +
16043 +.macro pax_erase_kstack
16044 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16045 + call pax_erase_kstack
16046 +#endif
16047 +.endm
16048 +
16049 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16050 +ENTRY(pax_erase_kstack)
16051 + pushq %rdi
16052 + pushq %rcx
16053 + pushq %rax
16054 + pushq %r11
16055 +
16056 + GET_THREAD_INFO(%r11)
16057 + mov TI_lowest_stack(%r11), %rdi
16058 + mov $-0xBEEF, %rax
16059 + std
16060 +
16061 +1: mov %edi, %ecx
16062 + and $THREAD_SIZE_asm - 1, %ecx
16063 + shr $3, %ecx
16064 + repne scasq
16065 + jecxz 2f
16066 +
16067 + cmp $2*8, %ecx
16068 + jc 2f
16069 +
16070 + mov $2*8, %ecx
16071 + repe scasq
16072 + jecxz 2f
16073 + jne 1b
16074 +
16075 +2: cld
16076 + mov %esp, %ecx
16077 + sub %edi, %ecx
16078 +
16079 + cmp $THREAD_SIZE_asm, %rcx
16080 + jb 3f
16081 + ud2
16082 +3:
16083 +
16084 + shr $3, %ecx
16085 + rep stosq
16086 +
16087 + mov TI_task_thread_sp0(%r11), %rdi
16088 + sub $256, %rdi
16089 + mov %rdi, TI_lowest_stack(%r11)
16090 +
16091 + popq %r11
16092 + popq %rax
16093 + popq %rcx
16094 + popq %rdi
16095 + pax_force_retaddr
16096 + ret
16097 +ENDPROC(pax_erase_kstack)
16098 +#endif
16099
16100 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16101 #ifdef CONFIG_TRACE_IRQFLAGS
16102 @@ -271,8 +553,8 @@ ENDPROC(native_usergs_sysret64)
16103 .endm
16104
16105 .macro UNFAKE_STACK_FRAME
16106 - addq $8*6, %rsp
16107 - CFI_ADJUST_CFA_OFFSET -(6*8)
16108 + addq $8*6 + ARG_SKIP, %rsp
16109 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16110 .endm
16111
16112 /*
16113 @@ -359,7 +641,7 @@ ENDPROC(native_usergs_sysret64)
16114 movq %rsp, %rsi
16115
16116 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16117 - testl $3, CS-RBP(%rsi)
16118 + testb $3, CS-RBP(%rsi)
16119 je 1f
16120 SWAPGS
16121 /*
16122 @@ -394,9 +676,10 @@ ENTRY(save_rest)
16123 movq_cfi r15, R15+16
16124 movq %r11, 8(%rsp) /* return address */
16125 FIXUP_TOP_OF_STACK %r11, 16
16126 + pax_force_retaddr
16127 ret
16128 CFI_ENDPROC
16129 -END(save_rest)
16130 +ENDPROC(save_rest)
16131
16132 /* save complete stack frame */
16133 .pushsection .kprobes.text, "ax"
16134 @@ -425,9 +708,10 @@ ENTRY(save_paranoid)
16135 js 1f /* negative -> in kernel */
16136 SWAPGS
16137 xorl %ebx,%ebx
16138 -1: ret
16139 +1: pax_force_retaddr_bts
16140 + ret
16141 CFI_ENDPROC
16142 -END(save_paranoid)
16143 +ENDPROC(save_paranoid)
16144 .popsection
16145
16146 /*
16147 @@ -449,7 +733,7 @@ ENTRY(ret_from_fork)
16148
16149 RESTORE_REST
16150
16151 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16152 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16153 jz retint_restore_args
16154
16155 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16156 @@ -459,7 +743,7 @@ ENTRY(ret_from_fork)
16157 jmp ret_from_sys_call # go to the SYSRET fastpath
16158
16159 CFI_ENDPROC
16160 -END(ret_from_fork)
16161 +ENDPROC(ret_from_fork)
16162
16163 /*
16164 * System call entry. Up to 6 arguments in registers are supported.
16165 @@ -495,7 +779,7 @@ END(ret_from_fork)
16166 ENTRY(system_call)
16167 CFI_STARTPROC simple
16168 CFI_SIGNAL_FRAME
16169 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16170 + CFI_DEF_CFA rsp,0
16171 CFI_REGISTER rip,rcx
16172 /*CFI_REGISTER rflags,r11*/
16173 SWAPGS_UNSAFE_STACK
16174 @@ -508,16 +792,23 @@ GLOBAL(system_call_after_swapgs)
16175
16176 movq %rsp,PER_CPU_VAR(old_rsp)
16177 movq PER_CPU_VAR(kernel_stack),%rsp
16178 + SAVE_ARGS 8*6,0
16179 + pax_enter_kernel_user
16180 +
16181 +#ifdef CONFIG_PAX_RANDKSTACK
16182 + pax_erase_kstack
16183 +#endif
16184 +
16185 /*
16186 * No need to follow this irqs off/on section - it's straight
16187 * and short:
16188 */
16189 ENABLE_INTERRUPTS(CLBR_NONE)
16190 - SAVE_ARGS 8,0
16191 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16192 movq %rcx,RIP-ARGOFFSET(%rsp)
16193 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16194 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16195 + GET_THREAD_INFO(%rcx)
16196 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16197 jnz tracesys
16198 system_call_fastpath:
16199 #if __SYSCALL_MASK == ~0
16200 @@ -527,7 +818,7 @@ system_call_fastpath:
16201 cmpl $__NR_syscall_max,%eax
16202 #endif
16203 ja badsys
16204 - movq %r10,%rcx
16205 + movq R10-ARGOFFSET(%rsp),%rcx
16206 call *sys_call_table(,%rax,8) # XXX: rip relative
16207 movq %rax,RAX-ARGOFFSET(%rsp)
16208 /*
16209 @@ -541,10 +832,13 @@ sysret_check:
16210 LOCKDEP_SYS_EXIT
16211 DISABLE_INTERRUPTS(CLBR_NONE)
16212 TRACE_IRQS_OFF
16213 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16214 + GET_THREAD_INFO(%rcx)
16215 + movl TI_flags(%rcx),%edx
16216 andl %edi,%edx
16217 jnz sysret_careful
16218 CFI_REMEMBER_STATE
16219 + pax_exit_kernel_user
16220 + pax_erase_kstack
16221 /*
16222 * sysretq will re-enable interrupts:
16223 */
16224 @@ -596,14 +890,18 @@ badsys:
16225 * jump back to the normal fast path.
16226 */
16227 auditsys:
16228 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16229 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16230 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16231 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16232 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16233 movq %rax,%rsi /* 2nd arg: syscall number */
16234 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16235 call __audit_syscall_entry
16236 +
16237 + pax_erase_kstack
16238 +
16239 LOAD_ARGS 0 /* reload call-clobbered registers */
16240 + pax_set_fptr_mask
16241 jmp system_call_fastpath
16242
16243 /*
16244 @@ -624,7 +922,7 @@ sysret_audit:
16245 /* Do syscall tracing */
16246 tracesys:
16247 #ifdef CONFIG_AUDITSYSCALL
16248 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16249 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16250 jz auditsys
16251 #endif
16252 SAVE_REST
16253 @@ -632,12 +930,16 @@ tracesys:
16254 FIXUP_TOP_OF_STACK %rdi
16255 movq %rsp,%rdi
16256 call syscall_trace_enter
16257 +
16258 + pax_erase_kstack
16259 +
16260 /*
16261 * Reload arg registers from stack in case ptrace changed them.
16262 * We don't reload %rax because syscall_trace_enter() returned
16263 * the value it wants us to use in the table lookup.
16264 */
16265 LOAD_ARGS ARGOFFSET, 1
16266 + pax_set_fptr_mask
16267 RESTORE_REST
16268 #if __SYSCALL_MASK == ~0
16269 cmpq $__NR_syscall_max,%rax
16270 @@ -646,7 +948,7 @@ tracesys:
16271 cmpl $__NR_syscall_max,%eax
16272 #endif
16273 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16274 - movq %r10,%rcx /* fixup for C */
16275 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16276 call *sys_call_table(,%rax,8)
16277 movq %rax,RAX-ARGOFFSET(%rsp)
16278 /* Use IRET because user could have changed frame */
16279 @@ -667,7 +969,9 @@ GLOBAL(int_with_check)
16280 andl %edi,%edx
16281 jnz int_careful
16282 andl $~TS_COMPAT,TI_status(%rcx)
16283 - jmp retint_swapgs
16284 + pax_exit_kernel_user
16285 + pax_erase_kstack
16286 + jmp retint_swapgs_pax
16287
16288 /* Either reschedule or signal or syscall exit tracking needed. */
16289 /* First do a reschedule test. */
16290 @@ -713,7 +1017,7 @@ int_restore_rest:
16291 TRACE_IRQS_OFF
16292 jmp int_with_check
16293 CFI_ENDPROC
16294 -END(system_call)
16295 +ENDPROC(system_call)
16296
16297 /*
16298 * Certain special system calls that need to save a complete full stack frame.
16299 @@ -729,7 +1033,7 @@ ENTRY(\label)
16300 call \func
16301 jmp ptregscall_common
16302 CFI_ENDPROC
16303 -END(\label)
16304 +ENDPROC(\label)
16305 .endm
16306
16307 PTREGSCALL stub_clone, sys_clone, %r8
16308 @@ -747,9 +1051,10 @@ ENTRY(ptregscall_common)
16309 movq_cfi_restore R12+8, r12
16310 movq_cfi_restore RBP+8, rbp
16311 movq_cfi_restore RBX+8, rbx
16312 + pax_force_retaddr
16313 ret $REST_SKIP /* pop extended registers */
16314 CFI_ENDPROC
16315 -END(ptregscall_common)
16316 +ENDPROC(ptregscall_common)
16317
16318 ENTRY(stub_execve)
16319 CFI_STARTPROC
16320 @@ -764,7 +1069,7 @@ ENTRY(stub_execve)
16321 RESTORE_REST
16322 jmp int_ret_from_sys_call
16323 CFI_ENDPROC
16324 -END(stub_execve)
16325 +ENDPROC(stub_execve)
16326
16327 /*
16328 * sigreturn is special because it needs to restore all registers on return.
16329 @@ -782,7 +1087,7 @@ ENTRY(stub_rt_sigreturn)
16330 RESTORE_REST
16331 jmp int_ret_from_sys_call
16332 CFI_ENDPROC
16333 -END(stub_rt_sigreturn)
16334 +ENDPROC(stub_rt_sigreturn)
16335
16336 #ifdef CONFIG_X86_X32_ABI
16337 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16338 @@ -851,7 +1156,7 @@ vector=vector+1
16339 2: jmp common_interrupt
16340 .endr
16341 CFI_ENDPROC
16342 -END(irq_entries_start)
16343 +ENDPROC(irq_entries_start)
16344
16345 .previous
16346 END(interrupt)
16347 @@ -871,6 +1176,16 @@ END(interrupt)
16348 subq $ORIG_RAX-RBP, %rsp
16349 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16350 SAVE_ARGS_IRQ
16351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16352 + testb $3, CS(%rdi)
16353 + jnz 1f
16354 + pax_enter_kernel
16355 + jmp 2f
16356 +1: pax_enter_kernel_user
16357 +2:
16358 +#else
16359 + pax_enter_kernel
16360 +#endif
16361 call \func
16362 .endm
16363
16364 @@ -902,7 +1217,7 @@ ret_from_intr:
16365
16366 exit_intr:
16367 GET_THREAD_INFO(%rcx)
16368 - testl $3,CS-ARGOFFSET(%rsp)
16369 + testb $3,CS-ARGOFFSET(%rsp)
16370 je retint_kernel
16371
16372 /* Interrupt came from user space */
16373 @@ -924,12 +1239,16 @@ retint_swapgs: /* return to user-space */
16374 * The iretq could re-enable interrupts:
16375 */
16376 DISABLE_INTERRUPTS(CLBR_ANY)
16377 + pax_exit_kernel_user
16378 +retint_swapgs_pax:
16379 TRACE_IRQS_IRETQ
16380 SWAPGS
16381 jmp restore_args
16382
16383 retint_restore_args: /* return to kernel space */
16384 DISABLE_INTERRUPTS(CLBR_ANY)
16385 + pax_exit_kernel
16386 + pax_force_retaddr RIP-ARGOFFSET
16387 /*
16388 * The iretq could re-enable interrupts:
16389 */
16390 @@ -1012,7 +1331,7 @@ ENTRY(retint_kernel)
16391 #endif
16392
16393 CFI_ENDPROC
16394 -END(common_interrupt)
16395 +ENDPROC(common_interrupt)
16396 /*
16397 * End of kprobes section
16398 */
16399 @@ -1029,7 +1348,7 @@ ENTRY(\sym)
16400 interrupt \do_sym
16401 jmp ret_from_intr
16402 CFI_ENDPROC
16403 -END(\sym)
16404 +ENDPROC(\sym)
16405 .endm
16406
16407 #ifdef CONFIG_SMP
16408 @@ -1102,12 +1421,22 @@ ENTRY(\sym)
16409 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16410 call error_entry
16411 DEFAULT_FRAME 0
16412 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16413 + testb $3, CS(%rsp)
16414 + jnz 1f
16415 + pax_enter_kernel
16416 + jmp 2f
16417 +1: pax_enter_kernel_user
16418 +2:
16419 +#else
16420 + pax_enter_kernel
16421 +#endif
16422 movq %rsp,%rdi /* pt_regs pointer */
16423 xorl %esi,%esi /* no error code */
16424 call \do_sym
16425 jmp error_exit /* %ebx: no swapgs flag */
16426 CFI_ENDPROC
16427 -END(\sym)
16428 +ENDPROC(\sym)
16429 .endm
16430
16431 .macro paranoidzeroentry sym do_sym
16432 @@ -1119,15 +1448,25 @@ ENTRY(\sym)
16433 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16434 call save_paranoid
16435 TRACE_IRQS_OFF
16436 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16437 + testb $3, CS(%rsp)
16438 + jnz 1f
16439 + pax_enter_kernel
16440 + jmp 2f
16441 +1: pax_enter_kernel_user
16442 +2:
16443 +#else
16444 + pax_enter_kernel
16445 +#endif
16446 movq %rsp,%rdi /* pt_regs pointer */
16447 xorl %esi,%esi /* no error code */
16448 call \do_sym
16449 jmp paranoid_exit /* %ebx: no swapgs flag */
16450 CFI_ENDPROC
16451 -END(\sym)
16452 +ENDPROC(\sym)
16453 .endm
16454
16455 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16456 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16457 .macro paranoidzeroentry_ist sym do_sym ist
16458 ENTRY(\sym)
16459 INTR_FRAME
16460 @@ -1137,14 +1476,30 @@ ENTRY(\sym)
16461 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16462 call save_paranoid
16463 TRACE_IRQS_OFF_DEBUG
16464 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16465 + testb $3, CS(%rsp)
16466 + jnz 1f
16467 + pax_enter_kernel
16468 + jmp 2f
16469 +1: pax_enter_kernel_user
16470 +2:
16471 +#else
16472 + pax_enter_kernel
16473 +#endif
16474 movq %rsp,%rdi /* pt_regs pointer */
16475 xorl %esi,%esi /* no error code */
16476 +#ifdef CONFIG_SMP
16477 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16478 + lea init_tss(%r12), %r12
16479 +#else
16480 + lea init_tss(%rip), %r12
16481 +#endif
16482 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16483 call \do_sym
16484 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16485 jmp paranoid_exit /* %ebx: no swapgs flag */
16486 CFI_ENDPROC
16487 -END(\sym)
16488 +ENDPROC(\sym)
16489 .endm
16490
16491 .macro errorentry sym do_sym
16492 @@ -1155,13 +1510,23 @@ ENTRY(\sym)
16493 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16494 call error_entry
16495 DEFAULT_FRAME 0
16496 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16497 + testb $3, CS(%rsp)
16498 + jnz 1f
16499 + pax_enter_kernel
16500 + jmp 2f
16501 +1: pax_enter_kernel_user
16502 +2:
16503 +#else
16504 + pax_enter_kernel
16505 +#endif
16506 movq %rsp,%rdi /* pt_regs pointer */
16507 movq ORIG_RAX(%rsp),%rsi /* get error code */
16508 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16509 call \do_sym
16510 jmp error_exit /* %ebx: no swapgs flag */
16511 CFI_ENDPROC
16512 -END(\sym)
16513 +ENDPROC(\sym)
16514 .endm
16515
16516 /* error code is on the stack already */
16517 @@ -1174,13 +1539,23 @@ ENTRY(\sym)
16518 call save_paranoid
16519 DEFAULT_FRAME 0
16520 TRACE_IRQS_OFF
16521 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16522 + testb $3, CS(%rsp)
16523 + jnz 1f
16524 + pax_enter_kernel
16525 + jmp 2f
16526 +1: pax_enter_kernel_user
16527 +2:
16528 +#else
16529 + pax_enter_kernel
16530 +#endif
16531 movq %rsp,%rdi /* pt_regs pointer */
16532 movq ORIG_RAX(%rsp),%rsi /* get error code */
16533 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16534 call \do_sym
16535 jmp paranoid_exit /* %ebx: no swapgs flag */
16536 CFI_ENDPROC
16537 -END(\sym)
16538 +ENDPROC(\sym)
16539 .endm
16540
16541 zeroentry divide_error do_divide_error
16542 @@ -1210,9 +1585,10 @@ gs_change:
16543 2: mfence /* workaround */
16544 SWAPGS
16545 popfq_cfi
16546 + pax_force_retaddr
16547 ret
16548 CFI_ENDPROC
16549 -END(native_load_gs_index)
16550 +ENDPROC(native_load_gs_index)
16551
16552 _ASM_EXTABLE(gs_change,bad_gs)
16553 .section .fixup,"ax"
16554 @@ -1231,13 +1607,14 @@ ENTRY(kernel_thread_helper)
16555 * Here we are in the child and the registers are set as they were
16556 * at kernel_thread() invocation in the parent.
16557 */
16558 + pax_force_fptr %rsi
16559 call *%rsi
16560 # exit
16561 mov %eax, %edi
16562 call do_exit
16563 ud2 # padding for call trace
16564 CFI_ENDPROC
16565 -END(kernel_thread_helper)
16566 +ENDPROC(kernel_thread_helper)
16567
16568 /*
16569 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16570 @@ -1264,11 +1641,11 @@ ENTRY(kernel_execve)
16571 RESTORE_REST
16572 testq %rax,%rax
16573 je int_ret_from_sys_call
16574 - RESTORE_ARGS
16575 UNFAKE_STACK_FRAME
16576 + pax_force_retaddr
16577 ret
16578 CFI_ENDPROC
16579 -END(kernel_execve)
16580 +ENDPROC(kernel_execve)
16581
16582 /* Call softirq on interrupt stack. Interrupts are off. */
16583 ENTRY(call_softirq)
16584 @@ -1286,9 +1663,10 @@ ENTRY(call_softirq)
16585 CFI_DEF_CFA_REGISTER rsp
16586 CFI_ADJUST_CFA_OFFSET -8
16587 decl PER_CPU_VAR(irq_count)
16588 + pax_force_retaddr
16589 ret
16590 CFI_ENDPROC
16591 -END(call_softirq)
16592 +ENDPROC(call_softirq)
16593
16594 #ifdef CONFIG_XEN
16595 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16596 @@ -1326,7 +1704,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16597 decl PER_CPU_VAR(irq_count)
16598 jmp error_exit
16599 CFI_ENDPROC
16600 -END(xen_do_hypervisor_callback)
16601 +ENDPROC(xen_do_hypervisor_callback)
16602
16603 /*
16604 * Hypervisor uses this for application faults while it executes.
16605 @@ -1385,7 +1763,7 @@ ENTRY(xen_failsafe_callback)
16606 SAVE_ALL
16607 jmp error_exit
16608 CFI_ENDPROC
16609 -END(xen_failsafe_callback)
16610 +ENDPROC(xen_failsafe_callback)
16611
16612 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16613 xen_hvm_callback_vector xen_evtchn_do_upcall
16614 @@ -1434,16 +1812,31 @@ ENTRY(paranoid_exit)
16615 TRACE_IRQS_OFF_DEBUG
16616 testl %ebx,%ebx /* swapgs needed? */
16617 jnz paranoid_restore
16618 - testl $3,CS(%rsp)
16619 + testb $3,CS(%rsp)
16620 jnz paranoid_userspace
16621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16622 + pax_exit_kernel
16623 + TRACE_IRQS_IRETQ 0
16624 + SWAPGS_UNSAFE_STACK
16625 + RESTORE_ALL 8
16626 + pax_force_retaddr_bts
16627 + jmp irq_return
16628 +#endif
16629 paranoid_swapgs:
16630 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16631 + pax_exit_kernel_user
16632 +#else
16633 + pax_exit_kernel
16634 +#endif
16635 TRACE_IRQS_IRETQ 0
16636 SWAPGS_UNSAFE_STACK
16637 RESTORE_ALL 8
16638 jmp irq_return
16639 paranoid_restore:
16640 + pax_exit_kernel
16641 TRACE_IRQS_IRETQ_DEBUG 0
16642 RESTORE_ALL 8
16643 + pax_force_retaddr_bts
16644 jmp irq_return
16645 paranoid_userspace:
16646 GET_THREAD_INFO(%rcx)
16647 @@ -1472,7 +1865,7 @@ paranoid_schedule:
16648 TRACE_IRQS_OFF
16649 jmp paranoid_userspace
16650 CFI_ENDPROC
16651 -END(paranoid_exit)
16652 +ENDPROC(paranoid_exit)
16653
16654 /*
16655 * Exception entry point. This expects an error code/orig_rax on the stack.
16656 @@ -1499,12 +1892,13 @@ ENTRY(error_entry)
16657 movq_cfi r14, R14+8
16658 movq_cfi r15, R15+8
16659 xorl %ebx,%ebx
16660 - testl $3,CS+8(%rsp)
16661 + testb $3,CS+8(%rsp)
16662 je error_kernelspace
16663 error_swapgs:
16664 SWAPGS
16665 error_sti:
16666 TRACE_IRQS_OFF
16667 + pax_force_retaddr_bts
16668 ret
16669
16670 /*
16671 @@ -1531,7 +1925,7 @@ bstep_iret:
16672 movq %rcx,RIP+8(%rsp)
16673 jmp error_swapgs
16674 CFI_ENDPROC
16675 -END(error_entry)
16676 +ENDPROC(error_entry)
16677
16678
16679 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16680 @@ -1551,7 +1945,7 @@ ENTRY(error_exit)
16681 jnz retint_careful
16682 jmp retint_swapgs
16683 CFI_ENDPROC
16684 -END(error_exit)
16685 +ENDPROC(error_exit)
16686
16687 /*
16688 * Test if a given stack is an NMI stack or not.
16689 @@ -1609,9 +2003,11 @@ ENTRY(nmi)
16690 * If %cs was not the kernel segment, then the NMI triggered in user
16691 * space, which means it is definitely not nested.
16692 */
16693 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16694 + je 1f
16695 cmpl $__KERNEL_CS, 16(%rsp)
16696 jne first_nmi
16697 -
16698 +1:
16699 /*
16700 * Check the special variable on the stack to see if NMIs are
16701 * executing.
16702 @@ -1758,6 +2154,16 @@ end_repeat_nmi:
16703 */
16704 call save_paranoid
16705 DEFAULT_FRAME 0
16706 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16707 + testb $3, CS(%rsp)
16708 + jnz 1f
16709 + pax_enter_kernel
16710 + jmp 2f
16711 +1: pax_enter_kernel_user
16712 +2:
16713 +#else
16714 + pax_enter_kernel
16715 +#endif
16716 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16717 movq %rsp,%rdi
16718 movq $-1,%rsi
16719 @@ -1765,21 +2171,32 @@ end_repeat_nmi:
16720 testl %ebx,%ebx /* swapgs needed? */
16721 jnz nmi_restore
16722 nmi_swapgs:
16723 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16724 + pax_exit_kernel_user
16725 +#else
16726 + pax_exit_kernel
16727 +#endif
16728 SWAPGS_UNSAFE_STACK
16729 + RESTORE_ALL 8
16730 + /* Clear the NMI executing stack variable */
16731 + movq $0, 10*8(%rsp)
16732 + jmp irq_return
16733 nmi_restore:
16734 + pax_exit_kernel
16735 RESTORE_ALL 8
16736 + pax_force_retaddr_bts
16737 /* Clear the NMI executing stack variable */
16738 movq $0, 10*8(%rsp)
16739 jmp irq_return
16740 CFI_ENDPROC
16741 -END(nmi)
16742 +ENDPROC(nmi)
16743
16744 ENTRY(ignore_sysret)
16745 CFI_STARTPROC
16746 mov $-ENOSYS,%eax
16747 sysret
16748 CFI_ENDPROC
16749 -END(ignore_sysret)
16750 +ENDPROC(ignore_sysret)
16751
16752 /*
16753 * End of kprobes section
16754 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16755 index c3a7cb4..3ad00dc 100644
16756 --- a/arch/x86/kernel/ftrace.c
16757 +++ b/arch/x86/kernel/ftrace.c
16758 @@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
16759 {
16760 unsigned char replaced[MCOUNT_INSN_SIZE];
16761
16762 + ip = ktla_ktva(ip);
16763 +
16764 /*
16765 * Note: Due to modules and __init, code can
16766 * disappear and change, we need to protect against faulting
16767 @@ -212,7 +214,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16768 unsigned char old[MCOUNT_INSN_SIZE], *new;
16769 int ret;
16770
16771 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16772 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16773 new = ftrace_call_replace(ip, (unsigned long)func);
16774
16775 /* See comment above by declaration of modifying_ftrace_code */
16776 @@ -605,6 +607,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16777 {
16778 unsigned char code[MCOUNT_INSN_SIZE];
16779
16780 + ip = ktla_ktva(ip);
16781 +
16782 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16783 return -EFAULT;
16784
16785 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16786 index c18f59d..9c0c9f6 100644
16787 --- a/arch/x86/kernel/head32.c
16788 +++ b/arch/x86/kernel/head32.c
16789 @@ -18,6 +18,7 @@
16790 #include <asm/io_apic.h>
16791 #include <asm/bios_ebda.h>
16792 #include <asm/tlbflush.h>
16793 +#include <asm/boot.h>
16794
16795 static void __init i386_default_early_setup(void)
16796 {
16797 @@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
16798
16799 void __init i386_start_kernel(void)
16800 {
16801 - memblock_reserve(__pa_symbol(&_text),
16802 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16803 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16804
16805 #ifdef CONFIG_BLK_DEV_INITRD
16806 /* Reserve INITRD */
16807 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16808 index d42ab17..cb1b997 100644
16809 --- a/arch/x86/kernel/head_32.S
16810 +++ b/arch/x86/kernel/head_32.S
16811 @@ -26,6 +26,12 @@
16812 /* Physical address */
16813 #define pa(X) ((X) - __PAGE_OFFSET)
16814
16815 +#ifdef CONFIG_PAX_KERNEXEC
16816 +#define ta(X) (X)
16817 +#else
16818 +#define ta(X) ((X) - __PAGE_OFFSET)
16819 +#endif
16820 +
16821 /*
16822 * References to members of the new_cpu_data structure.
16823 */
16824 @@ -55,11 +61,7 @@
16825 * and small than max_low_pfn, otherwise will waste some page table entries
16826 */
16827
16828 -#if PTRS_PER_PMD > 1
16829 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16830 -#else
16831 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16832 -#endif
16833 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16834
16835 /* Number of possible pages in the lowmem region */
16836 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16837 @@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16838 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16839
16840 /*
16841 + * Real beginning of normal "text" segment
16842 + */
16843 +ENTRY(stext)
16844 +ENTRY(_stext)
16845 +
16846 +/*
16847 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16848 * %esi points to the real-mode code as a 32-bit pointer.
16849 * CS and DS must be 4 GB flat segments, but we don't depend on
16850 @@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16851 * can.
16852 */
16853 __HEAD
16854 +
16855 +#ifdef CONFIG_PAX_KERNEXEC
16856 + jmp startup_32
16857 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16858 +.fill PAGE_SIZE-5,1,0xcc
16859 +#endif
16860 +
16861 ENTRY(startup_32)
16862 movl pa(stack_start),%ecx
16863
16864 @@ -106,6 +121,57 @@ ENTRY(startup_32)
16865 2:
16866 leal -__PAGE_OFFSET(%ecx),%esp
16867
16868 +#ifdef CONFIG_SMP
16869 + movl $pa(cpu_gdt_table),%edi
16870 + movl $__per_cpu_load,%eax
16871 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16872 + rorl $16,%eax
16873 + movb %al,__KERNEL_PERCPU + 4(%edi)
16874 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16875 + movl $__per_cpu_end - 1,%eax
16876 + subl $__per_cpu_start,%eax
16877 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16878 +#endif
16879 +
16880 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16881 + movl $NR_CPUS,%ecx
16882 + movl $pa(cpu_gdt_table),%edi
16883 +1:
16884 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16885 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16886 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16887 + addl $PAGE_SIZE_asm,%edi
16888 + loop 1b
16889 +#endif
16890 +
16891 +#ifdef CONFIG_PAX_KERNEXEC
16892 + movl $pa(boot_gdt),%edi
16893 + movl $__LOAD_PHYSICAL_ADDR,%eax
16894 + movw %ax,__BOOT_CS + 2(%edi)
16895 + rorl $16,%eax
16896 + movb %al,__BOOT_CS + 4(%edi)
16897 + movb %ah,__BOOT_CS + 7(%edi)
16898 + rorl $16,%eax
16899 +
16900 + ljmp $(__BOOT_CS),$1f
16901 +1:
16902 +
16903 + movl $NR_CPUS,%ecx
16904 + movl $pa(cpu_gdt_table),%edi
16905 + addl $__PAGE_OFFSET,%eax
16906 +1:
16907 + movw %ax,__KERNEL_CS + 2(%edi)
16908 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16909 + rorl $16,%eax
16910 + movb %al,__KERNEL_CS + 4(%edi)
16911 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16912 + movb %ah,__KERNEL_CS + 7(%edi)
16913 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16914 + rorl $16,%eax
16915 + addl $PAGE_SIZE_asm,%edi
16916 + loop 1b
16917 +#endif
16918 +
16919 /*
16920 * Clear BSS first so that there are no surprises...
16921 */
16922 @@ -196,8 +262,11 @@ ENTRY(startup_32)
16923 movl %eax, pa(max_pfn_mapped)
16924
16925 /* Do early initialization of the fixmap area */
16926 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16927 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16928 +#ifdef CONFIG_COMPAT_VDSO
16929 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16930 +#else
16931 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16932 +#endif
16933 #else /* Not PAE */
16934
16935 page_pde_offset = (__PAGE_OFFSET >> 20);
16936 @@ -227,8 +296,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16937 movl %eax, pa(max_pfn_mapped)
16938
16939 /* Do early initialization of the fixmap area */
16940 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16941 - movl %eax,pa(initial_page_table+0xffc)
16942 +#ifdef CONFIG_COMPAT_VDSO
16943 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16944 +#else
16945 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16946 +#endif
16947 #endif
16948
16949 #ifdef CONFIG_PARAVIRT
16950 @@ -242,9 +314,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16951 cmpl $num_subarch_entries, %eax
16952 jae bad_subarch
16953
16954 - movl pa(subarch_entries)(,%eax,4), %eax
16955 - subl $__PAGE_OFFSET, %eax
16956 - jmp *%eax
16957 + jmp *pa(subarch_entries)(,%eax,4)
16958
16959 bad_subarch:
16960 WEAK(lguest_entry)
16961 @@ -256,10 +326,10 @@ WEAK(xen_entry)
16962 __INITDATA
16963
16964 subarch_entries:
16965 - .long default_entry /* normal x86/PC */
16966 - .long lguest_entry /* lguest hypervisor */
16967 - .long xen_entry /* Xen hypervisor */
16968 - .long default_entry /* Moorestown MID */
16969 + .long ta(default_entry) /* normal x86/PC */
16970 + .long ta(lguest_entry) /* lguest hypervisor */
16971 + .long ta(xen_entry) /* Xen hypervisor */
16972 + .long ta(default_entry) /* Moorestown MID */
16973 num_subarch_entries = (. - subarch_entries) / 4
16974 .previous
16975 #else
16976 @@ -310,6 +380,7 @@ default_entry:
16977 orl %edx,%eax
16978 movl %eax,%cr4
16979
16980 +#ifdef CONFIG_X86_PAE
16981 testb $X86_CR4_PAE, %al # check if PAE is enabled
16982 jz 6f
16983
16984 @@ -338,6 +409,9 @@ default_entry:
16985 /* Make changes effective */
16986 wrmsr
16987
16988 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16989 +#endif
16990 +
16991 6:
16992
16993 /*
16994 @@ -436,14 +510,20 @@ is386: movl $2,%ecx # set MP
16995 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16996 movl %eax,%ss # after changing gdt.
16997
16998 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16999 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17000 movl %eax,%ds
17001 movl %eax,%es
17002
17003 movl $(__KERNEL_PERCPU), %eax
17004 movl %eax,%fs # set this cpu's percpu
17005
17006 +#ifdef CONFIG_CC_STACKPROTECTOR
17007 movl $(__KERNEL_STACK_CANARY),%eax
17008 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17009 + movl $(__USER_DS),%eax
17010 +#else
17011 + xorl %eax,%eax
17012 +#endif
17013 movl %eax,%gs
17014
17015 xorl %eax,%eax # Clear LDT
17016 @@ -520,8 +600,11 @@ setup_once:
17017 * relocation. Manually set base address in stack canary
17018 * segment descriptor.
17019 */
17020 - movl $gdt_page,%eax
17021 + movl $cpu_gdt_table,%eax
17022 movl $stack_canary,%ecx
17023 +#ifdef CONFIG_SMP
17024 + addl $__per_cpu_load,%ecx
17025 +#endif
17026 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17027 shrl $16, %ecx
17028 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17029 @@ -552,7 +635,7 @@ ENDPROC(early_idt_handlers)
17030 /* This is global to keep gas from relaxing the jumps */
17031 ENTRY(early_idt_handler)
17032 cld
17033 - cmpl $2,%ss:early_recursion_flag
17034 + cmpl $1,%ss:early_recursion_flag
17035 je hlt_loop
17036 incl %ss:early_recursion_flag
17037
17038 @@ -590,8 +673,8 @@ ENTRY(early_idt_handler)
17039 pushl (20+6*4)(%esp) /* trapno */
17040 pushl $fault_msg
17041 call printk
17042 -#endif
17043 call dump_stack
17044 +#endif
17045 hlt_loop:
17046 hlt
17047 jmp hlt_loop
17048 @@ -610,8 +693,11 @@ ENDPROC(early_idt_handler)
17049 /* This is the default interrupt "handler" :-) */
17050 ALIGN
17051 ignore_int:
17052 - cld
17053 #ifdef CONFIG_PRINTK
17054 + cmpl $2,%ss:early_recursion_flag
17055 + je hlt_loop
17056 + incl %ss:early_recursion_flag
17057 + cld
17058 pushl %eax
17059 pushl %ecx
17060 pushl %edx
17061 @@ -620,9 +706,6 @@ ignore_int:
17062 movl $(__KERNEL_DS),%eax
17063 movl %eax,%ds
17064 movl %eax,%es
17065 - cmpl $2,early_recursion_flag
17066 - je hlt_loop
17067 - incl early_recursion_flag
17068 pushl 16(%esp)
17069 pushl 24(%esp)
17070 pushl 32(%esp)
17071 @@ -656,29 +739,43 @@ ENTRY(setup_once_ref)
17072 /*
17073 * BSS section
17074 */
17075 -__PAGE_ALIGNED_BSS
17076 - .align PAGE_SIZE
17077 #ifdef CONFIG_X86_PAE
17078 +.section .initial_pg_pmd,"a",@progbits
17079 initial_pg_pmd:
17080 .fill 1024*KPMDS,4,0
17081 #else
17082 +.section .initial_page_table,"a",@progbits
17083 ENTRY(initial_page_table)
17084 .fill 1024,4,0
17085 #endif
17086 +.section .initial_pg_fixmap,"a",@progbits
17087 initial_pg_fixmap:
17088 .fill 1024,4,0
17089 +.section .empty_zero_page,"a",@progbits
17090 ENTRY(empty_zero_page)
17091 .fill 4096,1,0
17092 +.section .swapper_pg_dir,"a",@progbits
17093 ENTRY(swapper_pg_dir)
17094 +#ifdef CONFIG_X86_PAE
17095 + .fill 4,8,0
17096 +#else
17097 .fill 1024,4,0
17098 +#endif
17099 +
17100 +/*
17101 + * The IDT has to be page-aligned to simplify the Pentium
17102 + * F0 0F bug workaround.. We have a special link segment
17103 + * for this.
17104 + */
17105 +.section .idt,"a",@progbits
17106 +ENTRY(idt_table)
17107 + .fill 256,8,0
17108
17109 /*
17110 * This starts the data section.
17111 */
17112 #ifdef CONFIG_X86_PAE
17113 -__PAGE_ALIGNED_DATA
17114 - /* Page-aligned for the benefit of paravirt? */
17115 - .align PAGE_SIZE
17116 +.section .initial_page_table,"a",@progbits
17117 ENTRY(initial_page_table)
17118 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17119 # if KPMDS == 3
17120 @@ -697,12 +794,20 @@ ENTRY(initial_page_table)
17121 # error "Kernel PMDs should be 1, 2 or 3"
17122 # endif
17123 .align PAGE_SIZE /* needs to be page-sized too */
17124 +
17125 +#ifdef CONFIG_PAX_PER_CPU_PGD
17126 +ENTRY(cpu_pgd)
17127 + .rept NR_CPUS
17128 + .fill 4,8,0
17129 + .endr
17130 +#endif
17131 +
17132 #endif
17133
17134 .data
17135 .balign 4
17136 ENTRY(stack_start)
17137 - .long init_thread_union+THREAD_SIZE
17138 + .long init_thread_union+THREAD_SIZE-8
17139
17140 __INITRODATA
17141 int_msg:
17142 @@ -730,7 +835,7 @@ fault_msg:
17143 * segment size, and 32-bit linear address value:
17144 */
17145
17146 - .data
17147 +.section .rodata,"a",@progbits
17148 .globl boot_gdt_descr
17149 .globl idt_descr
17150
17151 @@ -739,7 +844,7 @@ fault_msg:
17152 .word 0 # 32 bit align gdt_desc.address
17153 boot_gdt_descr:
17154 .word __BOOT_DS+7
17155 - .long boot_gdt - __PAGE_OFFSET
17156 + .long pa(boot_gdt)
17157
17158 .word 0 # 32-bit align idt_desc.address
17159 idt_descr:
17160 @@ -750,7 +855,7 @@ idt_descr:
17161 .word 0 # 32 bit align gdt_desc.address
17162 ENTRY(early_gdt_descr)
17163 .word GDT_ENTRIES*8-1
17164 - .long gdt_page /* Overwritten for secondary CPUs */
17165 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17166
17167 /*
17168 * The boot_gdt must mirror the equivalent in setup.S and is
17169 @@ -759,5 +864,65 @@ ENTRY(early_gdt_descr)
17170 .align L1_CACHE_BYTES
17171 ENTRY(boot_gdt)
17172 .fill GDT_ENTRY_BOOT_CS,8,0
17173 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17174 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17175 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17176 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17177 +
17178 + .align PAGE_SIZE_asm
17179 +ENTRY(cpu_gdt_table)
17180 + .rept NR_CPUS
17181 + .quad 0x0000000000000000 /* NULL descriptor */
17182 + .quad 0x0000000000000000 /* 0x0b reserved */
17183 + .quad 0x0000000000000000 /* 0x13 reserved */
17184 + .quad 0x0000000000000000 /* 0x1b reserved */
17185 +
17186 +#ifdef CONFIG_PAX_KERNEXEC
17187 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17188 +#else
17189 + .quad 0x0000000000000000 /* 0x20 unused */
17190 +#endif
17191 +
17192 + .quad 0x0000000000000000 /* 0x28 unused */
17193 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17194 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17195 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17196 + .quad 0x0000000000000000 /* 0x4b reserved */
17197 + .quad 0x0000000000000000 /* 0x53 reserved */
17198 + .quad 0x0000000000000000 /* 0x5b reserved */
17199 +
17200 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17201 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17202 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17203 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17204 +
17205 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17206 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17207 +
17208 + /*
17209 + * Segments used for calling PnP BIOS have byte granularity.
17210 + * The code segments and data segments have fixed 64k limits,
17211 + * the transfer segment sizes are set at run time.
17212 + */
17213 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17214 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17215 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17216 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17217 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17218 +
17219 + /*
17220 + * The APM segments have byte granularity and their bases
17221 + * are set at run time. All have 64k limits.
17222 + */
17223 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17224 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17225 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17226 +
17227 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17228 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17229 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17230 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17231 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17232 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17233 +
17234 + /* Be sure this is zeroed to avoid false validations in Xen */
17235 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17236 + .endr
17237 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17238 index 94bf9cc..400455a 100644
17239 --- a/arch/x86/kernel/head_64.S
17240 +++ b/arch/x86/kernel/head_64.S
17241 @@ -20,6 +20,8 @@
17242 #include <asm/processor-flags.h>
17243 #include <asm/percpu.h>
17244 #include <asm/nops.h>
17245 +#include <asm/cpufeature.h>
17246 +#include <asm/alternative-asm.h>
17247
17248 #ifdef CONFIG_PARAVIRT
17249 #include <asm/asm-offsets.h>
17250 @@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17251 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17252 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17253 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17254 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17255 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17256 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17257 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17258 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17259 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17260
17261 .text
17262 __HEAD
17263 @@ -88,35 +96,23 @@ startup_64:
17264 */
17265 addq %rbp, init_level4_pgt + 0(%rip)
17266 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17267 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17268 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17269 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17270 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17271
17272 addq %rbp, level3_ident_pgt + 0(%rip)
17273 +#ifndef CONFIG_XEN
17274 + addq %rbp, level3_ident_pgt + 8(%rip)
17275 +#endif
17276
17277 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17278 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17279 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17280 +
17281 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17282 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17283
17284 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17285 -
17286 - /* Add an Identity mapping if I am above 1G */
17287 - leaq _text(%rip), %rdi
17288 - andq $PMD_PAGE_MASK, %rdi
17289 -
17290 - movq %rdi, %rax
17291 - shrq $PUD_SHIFT, %rax
17292 - andq $(PTRS_PER_PUD - 1), %rax
17293 - jz ident_complete
17294 -
17295 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17296 - leaq level3_ident_pgt(%rip), %rbx
17297 - movq %rdx, 0(%rbx, %rax, 8)
17298 -
17299 - movq %rdi, %rax
17300 - shrq $PMD_SHIFT, %rax
17301 - andq $(PTRS_PER_PMD - 1), %rax
17302 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17303 - leaq level2_spare_pgt(%rip), %rbx
17304 - movq %rdx, 0(%rbx, %rax, 8)
17305 -ident_complete:
17306 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17307
17308 /*
17309 * Fixup the kernel text+data virtual addresses. Note that
17310 @@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
17311 * after the boot processor executes this code.
17312 */
17313
17314 - /* Enable PAE mode and PGE */
17315 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17316 + /* Enable PAE mode and PSE/PGE */
17317 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17318 movq %rax, %cr4
17319
17320 /* Setup early boot stage 4 level pagetables. */
17321 @@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
17322 movl $MSR_EFER, %ecx
17323 rdmsr
17324 btsl $_EFER_SCE, %eax /* Enable System Call */
17325 - btl $20,%edi /* No Execute supported? */
17326 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17327 jnc 1f
17328 btsl $_EFER_NX, %eax
17329 + leaq init_level4_pgt(%rip), %rdi
17330 +#ifndef CONFIG_EFI
17331 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17332 +#endif
17333 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17334 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17335 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17336 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17337 1: wrmsr /* Make changes effective */
17338
17339 /* Setup cr0 */
17340 @@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
17341 * jump. In addition we need to ensure %cs is set so we make this
17342 * a far return.
17343 */
17344 + pax_set_fptr_mask
17345 movq initial_code(%rip),%rax
17346 pushq $0 # fake return address to stop unwinder
17347 pushq $__KERNEL_CS # set correct cs
17348 @@ -268,7 +273,7 @@ ENTRY(secondary_startup_64)
17349 bad_address:
17350 jmp bad_address
17351
17352 - .section ".init.text","ax"
17353 + __INIT
17354 .globl early_idt_handlers
17355 early_idt_handlers:
17356 # 104(%rsp) %rflags
17357 @@ -347,11 +352,15 @@ ENTRY(early_idt_handler)
17358 addq $16,%rsp # drop vector number and error code
17359 decl early_recursion_flag(%rip)
17360 INTERRUPT_RETURN
17361 + .previous
17362
17363 + __INITDATA
17364 .balign 4
17365 early_recursion_flag:
17366 .long 0
17367 + .previous
17368
17369 + .section .rodata,"a",@progbits
17370 #ifdef CONFIG_EARLY_PRINTK
17371 early_idt_msg:
17372 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17373 @@ -360,6 +369,7 @@ early_idt_ripmsg:
17374 #endif /* CONFIG_EARLY_PRINTK */
17375 .previous
17376
17377 + .section .rodata,"a",@progbits
17378 #define NEXT_PAGE(name) \
17379 .balign PAGE_SIZE; \
17380 ENTRY(name)
17381 @@ -372,7 +382,6 @@ ENTRY(name)
17382 i = i + 1 ; \
17383 .endr
17384
17385 - .data
17386 /*
17387 * This default setting generates an ident mapping at address 0x100000
17388 * and a mapping for the kernel that precisely maps virtual address
17389 @@ -383,13 +392,41 @@ NEXT_PAGE(init_level4_pgt)
17390 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17391 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17392 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17393 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17394 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17395 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17396 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17397 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17398 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17399 .org init_level4_pgt + L4_START_KERNEL*8, 0
17400 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17401 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17402
17403 +#ifdef CONFIG_PAX_PER_CPU_PGD
17404 +NEXT_PAGE(cpu_pgd)
17405 + .rept NR_CPUS
17406 + .fill 512,8,0
17407 + .endr
17408 +#endif
17409 +
17410 NEXT_PAGE(level3_ident_pgt)
17411 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17412 +#ifdef CONFIG_XEN
17413 .fill 511,8,0
17414 +#else
17415 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17416 + .fill 510,8,0
17417 +#endif
17418 +
17419 +NEXT_PAGE(level3_vmalloc_start_pgt)
17420 + .fill 512,8,0
17421 +
17422 +NEXT_PAGE(level3_vmalloc_end_pgt)
17423 + .fill 512,8,0
17424 +
17425 +NEXT_PAGE(level3_vmemmap_pgt)
17426 + .fill L3_VMEMMAP_START,8,0
17427 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17428
17429 NEXT_PAGE(level3_kernel_pgt)
17430 .fill L3_START_KERNEL,8,0
17431 @@ -397,20 +434,23 @@ NEXT_PAGE(level3_kernel_pgt)
17432 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17433 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17434
17435 +NEXT_PAGE(level2_vmemmap_pgt)
17436 + .fill 512,8,0
17437 +
17438 NEXT_PAGE(level2_fixmap_pgt)
17439 - .fill 506,8,0
17440 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17441 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17442 - .fill 5,8,0
17443 + .fill 507,8,0
17444 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17445 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17446 + .fill 4,8,0
17447
17448 -NEXT_PAGE(level1_fixmap_pgt)
17449 +NEXT_PAGE(level1_vsyscall_pgt)
17450 .fill 512,8,0
17451
17452 -NEXT_PAGE(level2_ident_pgt)
17453 - /* Since I easily can, map the first 1G.
17454 + /* Since I easily can, map the first 2G.
17455 * Don't set NX because code runs from these pages.
17456 */
17457 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17458 +NEXT_PAGE(level2_ident_pgt)
17459 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17460
17461 NEXT_PAGE(level2_kernel_pgt)
17462 /*
17463 @@ -423,37 +463,59 @@ NEXT_PAGE(level2_kernel_pgt)
17464 * If you want to increase this then increase MODULES_VADDR
17465 * too.)
17466 */
17467 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17468 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17469 -
17470 -NEXT_PAGE(level2_spare_pgt)
17471 - .fill 512, 8, 0
17472 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17473
17474 #undef PMDS
17475 #undef NEXT_PAGE
17476
17477 - .data
17478 + .align PAGE_SIZE
17479 +ENTRY(cpu_gdt_table)
17480 + .rept NR_CPUS
17481 + .quad 0x0000000000000000 /* NULL descriptor */
17482 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17483 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17484 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17485 + .quad 0x00cffb000000ffff /* __USER32_CS */
17486 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17487 + .quad 0x00affb000000ffff /* __USER_CS */
17488 +
17489 +#ifdef CONFIG_PAX_KERNEXEC
17490 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17491 +#else
17492 + .quad 0x0 /* unused */
17493 +#endif
17494 +
17495 + .quad 0,0 /* TSS */
17496 + .quad 0,0 /* LDT */
17497 + .quad 0,0,0 /* three TLS descriptors */
17498 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17499 + /* asm/segment.h:GDT_ENTRIES must match this */
17500 +
17501 + /* zero the remaining page */
17502 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17503 + .endr
17504 +
17505 .align 16
17506 .globl early_gdt_descr
17507 early_gdt_descr:
17508 .word GDT_ENTRIES*8-1
17509 early_gdt_descr_base:
17510 - .quad INIT_PER_CPU_VAR(gdt_page)
17511 + .quad cpu_gdt_table
17512
17513 ENTRY(phys_base)
17514 /* This must match the first entry in level2_kernel_pgt */
17515 .quad 0x0000000000000000
17516
17517 #include "../../x86/xen/xen-head.S"
17518 -
17519 - .section .bss, "aw", @nobits
17520 +
17521 + .section .rodata,"a",@progbits
17522 .align L1_CACHE_BYTES
17523 ENTRY(idt_table)
17524 - .skip IDT_ENTRIES * 16
17525 + .fill 512,8,0
17526
17527 .align L1_CACHE_BYTES
17528 ENTRY(nmi_idt_table)
17529 - .skip IDT_ENTRIES * 16
17530 + .fill 512,8,0
17531
17532 __PAGE_ALIGNED_BSS
17533 .align PAGE_SIZE
17534 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17535 index 9c3bd4a..e1d9b35 100644
17536 --- a/arch/x86/kernel/i386_ksyms_32.c
17537 +++ b/arch/x86/kernel/i386_ksyms_32.c
17538 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17539 EXPORT_SYMBOL(cmpxchg8b_emu);
17540 #endif
17541
17542 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17543 +
17544 /* Networking helper routines. */
17545 EXPORT_SYMBOL(csum_partial_copy_generic);
17546 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17547 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17548
17549 EXPORT_SYMBOL(__get_user_1);
17550 EXPORT_SYMBOL(__get_user_2);
17551 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17552
17553 EXPORT_SYMBOL(csum_partial);
17554 EXPORT_SYMBOL(empty_zero_page);
17555 +
17556 +#ifdef CONFIG_PAX_KERNEXEC
17557 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17558 +#endif
17559 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17560 index f250431..54097e7 100644
17561 --- a/arch/x86/kernel/i387.c
17562 +++ b/arch/x86/kernel/i387.c
17563 @@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17564 static inline bool interrupted_user_mode(void)
17565 {
17566 struct pt_regs *regs = get_irq_regs();
17567 - return regs && user_mode_vm(regs);
17568 + return regs && user_mode(regs);
17569 }
17570
17571 /*
17572 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17573 index 36d1853..bf25736 100644
17574 --- a/arch/x86/kernel/i8259.c
17575 +++ b/arch/x86/kernel/i8259.c
17576 @@ -209,7 +209,7 @@ spurious_8259A_irq:
17577 "spurious 8259A interrupt: IRQ%d.\n", irq);
17578 spurious_irq_mask |= irqmask;
17579 }
17580 - atomic_inc(&irq_err_count);
17581 + atomic_inc_unchecked(&irq_err_count);
17582 /*
17583 * Theoretically we do not have to handle this IRQ,
17584 * but in Linux this does not cause problems and is
17585 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17586 index 8c96897..be66bfa 100644
17587 --- a/arch/x86/kernel/ioport.c
17588 +++ b/arch/x86/kernel/ioport.c
17589 @@ -6,6 +6,7 @@
17590 #include <linux/sched.h>
17591 #include <linux/kernel.h>
17592 #include <linux/capability.h>
17593 +#include <linux/security.h>
17594 #include <linux/errno.h>
17595 #include <linux/types.h>
17596 #include <linux/ioport.h>
17597 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17598
17599 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17600 return -EINVAL;
17601 +#ifdef CONFIG_GRKERNSEC_IO
17602 + if (turn_on && grsec_disable_privio) {
17603 + gr_handle_ioperm();
17604 + return -EPERM;
17605 + }
17606 +#endif
17607 if (turn_on && !capable(CAP_SYS_RAWIO))
17608 return -EPERM;
17609
17610 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17611 * because the ->io_bitmap_max value must match the bitmap
17612 * contents:
17613 */
17614 - tss = &per_cpu(init_tss, get_cpu());
17615 + tss = init_tss + get_cpu();
17616
17617 if (turn_on)
17618 bitmap_clear(t->io_bitmap_ptr, from, num);
17619 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17620 return -EINVAL;
17621 /* Trying to gain more privileges? */
17622 if (level > old) {
17623 +#ifdef CONFIG_GRKERNSEC_IO
17624 + if (grsec_disable_privio) {
17625 + gr_handle_iopl();
17626 + return -EPERM;
17627 + }
17628 +#endif
17629 if (!capable(CAP_SYS_RAWIO))
17630 return -EPERM;
17631 }
17632 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17633 index 3dafc60..aa8e9c4 100644
17634 --- a/arch/x86/kernel/irq.c
17635 +++ b/arch/x86/kernel/irq.c
17636 @@ -18,7 +18,7 @@
17637 #include <asm/mce.h>
17638 #include <asm/hw_irq.h>
17639
17640 -atomic_t irq_err_count;
17641 +atomic_unchecked_t irq_err_count;
17642
17643 /* Function pointer for generic interrupt vector handling */
17644 void (*x86_platform_ipi_callback)(void) = NULL;
17645 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17646 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17647 seq_printf(p, " Machine check polls\n");
17648 #endif
17649 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17650 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17651 #if defined(CONFIG_X86_IO_APIC)
17652 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17653 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17654 #endif
17655 return 0;
17656 }
17657 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17658
17659 u64 arch_irq_stat(void)
17660 {
17661 - u64 sum = atomic_read(&irq_err_count);
17662 + u64 sum = atomic_read_unchecked(&irq_err_count);
17663
17664 #ifdef CONFIG_X86_IO_APIC
17665 - sum += atomic_read(&irq_mis_count);
17666 + sum += atomic_read_unchecked(&irq_mis_count);
17667 #endif
17668 return sum;
17669 }
17670 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17671 index 344faf8..355f60d 100644
17672 --- a/arch/x86/kernel/irq_32.c
17673 +++ b/arch/x86/kernel/irq_32.c
17674 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17675 __asm__ __volatile__("andl %%esp,%0" :
17676 "=r" (sp) : "0" (THREAD_SIZE - 1));
17677
17678 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17679 + return sp < STACK_WARN;
17680 }
17681
17682 static void print_stack_overflow(void)
17683 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17684 * per-CPU IRQ handling contexts (thread information and stack)
17685 */
17686 union irq_ctx {
17687 - struct thread_info tinfo;
17688 - u32 stack[THREAD_SIZE/sizeof(u32)];
17689 + unsigned long previous_esp;
17690 + u32 stack[THREAD_SIZE/sizeof(u32)];
17691 } __attribute__((aligned(THREAD_SIZE)));
17692
17693 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17694 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17695 static inline int
17696 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17697 {
17698 - union irq_ctx *curctx, *irqctx;
17699 + union irq_ctx *irqctx;
17700 u32 *isp, arg1, arg2;
17701
17702 - curctx = (union irq_ctx *) current_thread_info();
17703 irqctx = __this_cpu_read(hardirq_ctx);
17704
17705 /*
17706 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17707 * handler) we can't do that and just have to keep using the
17708 * current stack (which is the irq stack already after all)
17709 */
17710 - if (unlikely(curctx == irqctx))
17711 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17712 return 0;
17713
17714 /* build the stack frame on the IRQ stack */
17715 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17716 - irqctx->tinfo.task = curctx->tinfo.task;
17717 - irqctx->tinfo.previous_esp = current_stack_pointer;
17718 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17719 + irqctx->previous_esp = current_stack_pointer;
17720
17721 - /* Copy the preempt_count so that the [soft]irq checks work. */
17722 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17723 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17724 + __set_fs(MAKE_MM_SEG(0));
17725 +#endif
17726
17727 if (unlikely(overflow))
17728 call_on_stack(print_stack_overflow, isp);
17729 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17730 : "0" (irq), "1" (desc), "2" (isp),
17731 "D" (desc->handle_irq)
17732 : "memory", "cc", "ecx");
17733 +
17734 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17735 + __set_fs(current_thread_info()->addr_limit);
17736 +#endif
17737 +
17738 return 1;
17739 }
17740
17741 @@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17742 */
17743 void __cpuinit irq_ctx_init(int cpu)
17744 {
17745 - union irq_ctx *irqctx;
17746 -
17747 if (per_cpu(hardirq_ctx, cpu))
17748 return;
17749
17750 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17751 - THREADINFO_GFP,
17752 - THREAD_SIZE_ORDER));
17753 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17754 - irqctx->tinfo.cpu = cpu;
17755 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17756 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17757 -
17758 - per_cpu(hardirq_ctx, cpu) = irqctx;
17759 -
17760 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17761 - THREADINFO_GFP,
17762 - THREAD_SIZE_ORDER));
17763 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17764 - irqctx->tinfo.cpu = cpu;
17765 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17766 -
17767 - per_cpu(softirq_ctx, cpu) = irqctx;
17768 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
17769 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
17770 +
17771 + printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17772 + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17773
17774 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17775 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17776 @@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
17777 asmlinkage void do_softirq(void)
17778 {
17779 unsigned long flags;
17780 - struct thread_info *curctx;
17781 union irq_ctx *irqctx;
17782 u32 *isp;
17783
17784 @@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
17785 local_irq_save(flags);
17786
17787 if (local_softirq_pending()) {
17788 - curctx = current_thread_info();
17789 irqctx = __this_cpu_read(softirq_ctx);
17790 - irqctx->tinfo.task = curctx->task;
17791 - irqctx->tinfo.previous_esp = current_stack_pointer;
17792 + irqctx->previous_esp = current_stack_pointer;
17793
17794 /* build the stack frame on the softirq stack */
17795 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17796 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17797 +
17798 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17799 + __set_fs(MAKE_MM_SEG(0));
17800 +#endif
17801
17802 call_on_stack(__do_softirq, isp);
17803 +
17804 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17805 + __set_fs(current_thread_info()->addr_limit);
17806 +#endif
17807 +
17808 /*
17809 * Shouldn't happen, we returned above if in_interrupt():
17810 */
17811 @@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17812 if (unlikely(!desc))
17813 return false;
17814
17815 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17816 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17817 if (unlikely(overflow))
17818 print_stack_overflow();
17819 desc->handle_irq(irq, desc);
17820 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17821 index d04d3ec..ea4b374 100644
17822 --- a/arch/x86/kernel/irq_64.c
17823 +++ b/arch/x86/kernel/irq_64.c
17824 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17825 u64 estack_top, estack_bottom;
17826 u64 curbase = (u64)task_stack_page(current);
17827
17828 - if (user_mode_vm(regs))
17829 + if (user_mode(regs))
17830 return;
17831
17832 if (regs->sp >= curbase + sizeof(struct thread_info) +
17833 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17834 index 1d5d31e..72731d4 100644
17835 --- a/arch/x86/kernel/kdebugfs.c
17836 +++ b/arch/x86/kernel/kdebugfs.c
17837 @@ -27,7 +27,7 @@ struct setup_data_node {
17838 u32 len;
17839 };
17840
17841 -static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17842 +static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
17843 size_t count, loff_t *ppos)
17844 {
17845 struct setup_data_node *node = file->private_data;
17846 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17847 index 3f61904..873cea9 100644
17848 --- a/arch/x86/kernel/kgdb.c
17849 +++ b/arch/x86/kernel/kgdb.c
17850 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17851 #ifdef CONFIG_X86_32
17852 switch (regno) {
17853 case GDB_SS:
17854 - if (!user_mode_vm(regs))
17855 + if (!user_mode(regs))
17856 *(unsigned long *)mem = __KERNEL_DS;
17857 break;
17858 case GDB_SP:
17859 - if (!user_mode_vm(regs))
17860 + if (!user_mode(regs))
17861 *(unsigned long *)mem = kernel_stack_pointer(regs);
17862 break;
17863 case GDB_GS:
17864 @@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17865 case 'k':
17866 /* clear the trace bit */
17867 linux_regs->flags &= ~X86_EFLAGS_TF;
17868 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17869 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17870
17871 /* set the trace bit if we're stepping */
17872 if (remcomInBuffer[0] == 's') {
17873 linux_regs->flags |= X86_EFLAGS_TF;
17874 - atomic_set(&kgdb_cpu_doing_single_step,
17875 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17876 raw_smp_processor_id());
17877 }
17878
17879 @@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17880
17881 switch (cmd) {
17882 case DIE_DEBUG:
17883 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17884 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17885 if (user_mode(regs))
17886 return single_step_cont(regs, args);
17887 break;
17888 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17889 index c5e410e..da6aaf9 100644
17890 --- a/arch/x86/kernel/kprobes-opt.c
17891 +++ b/arch/x86/kernel/kprobes-opt.c
17892 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17893 * Verify if the address gap is in 2GB range, because this uses
17894 * a relative jump.
17895 */
17896 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17897 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17898 if (abs(rel) > 0x7fffffff)
17899 return -ERANGE;
17900
17901 @@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17902 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17903
17904 /* Set probe function call */
17905 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17906 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17907
17908 /* Set returning jmp instruction at the tail of out-of-line buffer */
17909 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17910 - (u8 *)op->kp.addr + op->optinsn.size);
17911 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17912
17913 flush_icache_range((unsigned long) buf,
17914 (unsigned long) buf + TMPL_END_IDX +
17915 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17916 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17917
17918 /* Backup instructions which will be replaced by jump address */
17919 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17920 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17921 RELATIVE_ADDR_SIZE);
17922
17923 insn_buf[0] = RELATIVEJUMP_OPCODE;
17924 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17925 index e2f751e..dffa2a0 100644
17926 --- a/arch/x86/kernel/kprobes.c
17927 +++ b/arch/x86/kernel/kprobes.c
17928 @@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17929 } __attribute__((packed)) *insn;
17930
17931 insn = (struct __arch_relative_insn *)from;
17932 +
17933 + pax_open_kernel();
17934 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17935 insn->op = op;
17936 + pax_close_kernel();
17937 }
17938
17939 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17940 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17941 kprobe_opcode_t opcode;
17942 kprobe_opcode_t *orig_opcodes = opcodes;
17943
17944 - if (search_exception_tables((unsigned long)opcodes))
17945 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17946 return 0; /* Page fault may occur on this address. */
17947
17948 retry:
17949 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17950 /* Another subsystem puts a breakpoint, failed to recover */
17951 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17952 return 0;
17953 + pax_open_kernel();
17954 memcpy(dest, insn.kaddr, insn.length);
17955 + pax_close_kernel();
17956
17957 #ifdef CONFIG_X86_64
17958 if (insn_rip_relative(&insn)) {
17959 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17960 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17961 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17962 disp = (u8 *) dest + insn_offset_displacement(&insn);
17963 + pax_open_kernel();
17964 *(s32 *) disp = (s32) newdisp;
17965 + pax_close_kernel();
17966 }
17967 #endif
17968 return insn.length;
17969 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17970 * nor set current_kprobe, because it doesn't use single
17971 * stepping.
17972 */
17973 - regs->ip = (unsigned long)p->ainsn.insn;
17974 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17975 preempt_enable_no_resched();
17976 return;
17977 }
17978 @@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17979 if (p->opcode == BREAKPOINT_INSTRUCTION)
17980 regs->ip = (unsigned long)p->addr;
17981 else
17982 - regs->ip = (unsigned long)p->ainsn.insn;
17983 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17984 }
17985
17986 /*
17987 @@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17988 setup_singlestep(p, regs, kcb, 0);
17989 return 1;
17990 }
17991 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17992 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17993 /*
17994 * The breakpoint instruction was removed right
17995 * after we hit it. Another cpu has removed
17996 @@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17997 " movq %rax, 152(%rsp)\n"
17998 RESTORE_REGS_STRING
17999 " popfq\n"
18000 +#ifdef KERNEXEC_PLUGIN
18001 + " btsq $63,(%rsp)\n"
18002 +#endif
18003 #else
18004 " pushf\n"
18005 SAVE_REGS_STRING
18006 @@ -765,7 +775,7 @@ static void __kprobes
18007 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18008 {
18009 unsigned long *tos = stack_addr(regs);
18010 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18011 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18012 unsigned long orig_ip = (unsigned long)p->addr;
18013 kprobe_opcode_t *insn = p->ainsn.insn;
18014
18015 @@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
18016 struct die_args *args = data;
18017 int ret = NOTIFY_DONE;
18018
18019 - if (args->regs && user_mode_vm(args->regs))
18020 + if (args->regs && user_mode(args->regs))
18021 return ret;
18022
18023 switch (val) {
18024 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18025 index ebc9873..1b9724b 100644
18026 --- a/arch/x86/kernel/ldt.c
18027 +++ b/arch/x86/kernel/ldt.c
18028 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18029 if (reload) {
18030 #ifdef CONFIG_SMP
18031 preempt_disable();
18032 - load_LDT(pc);
18033 + load_LDT_nolock(pc);
18034 if (!cpumask_equal(mm_cpumask(current->mm),
18035 cpumask_of(smp_processor_id())))
18036 smp_call_function(flush_ldt, current->mm, 1);
18037 preempt_enable();
18038 #else
18039 - load_LDT(pc);
18040 + load_LDT_nolock(pc);
18041 #endif
18042 }
18043 if (oldsize) {
18044 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18045 return err;
18046
18047 for (i = 0; i < old->size; i++)
18048 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18049 + write_ldt_entry(new->ldt, i, old->ldt + i);
18050 return 0;
18051 }
18052
18053 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18054 retval = copy_ldt(&mm->context, &old_mm->context);
18055 mutex_unlock(&old_mm->context.lock);
18056 }
18057 +
18058 + if (tsk == current) {
18059 + mm->context.vdso = 0;
18060 +
18061 +#ifdef CONFIG_X86_32
18062 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18063 + mm->context.user_cs_base = 0UL;
18064 + mm->context.user_cs_limit = ~0UL;
18065 +
18066 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18067 + cpus_clear(mm->context.cpu_user_cs_mask);
18068 +#endif
18069 +
18070 +#endif
18071 +#endif
18072 +
18073 + }
18074 +
18075 return retval;
18076 }
18077
18078 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18079 }
18080 }
18081
18082 +#ifdef CONFIG_PAX_SEGMEXEC
18083 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18084 + error = -EINVAL;
18085 + goto out_unlock;
18086 + }
18087 +#endif
18088 +
18089 fill_ldt(&ldt, &ldt_info);
18090 if (oldmode)
18091 ldt.avl = 0;
18092 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18093 index 5b19e4d..6476a76 100644
18094 --- a/arch/x86/kernel/machine_kexec_32.c
18095 +++ b/arch/x86/kernel/machine_kexec_32.c
18096 @@ -26,7 +26,7 @@
18097 #include <asm/cacheflush.h>
18098 #include <asm/debugreg.h>
18099
18100 -static void set_idt(void *newidt, __u16 limit)
18101 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18102 {
18103 struct desc_ptr curidt;
18104
18105 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18106 }
18107
18108
18109 -static void set_gdt(void *newgdt, __u16 limit)
18110 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18111 {
18112 struct desc_ptr curgdt;
18113
18114 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18115 }
18116
18117 control_page = page_address(image->control_code_page);
18118 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18119 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18120
18121 relocate_kernel_ptr = control_page;
18122 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18123 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18124 index 0327e2b..e43737b 100644
18125 --- a/arch/x86/kernel/microcode_intel.c
18126 +++ b/arch/x86/kernel/microcode_intel.c
18127 @@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18128
18129 static int get_ucode_user(void *to, const void *from, size_t n)
18130 {
18131 - return copy_from_user(to, from, n);
18132 + return copy_from_user(to, (const void __force_user *)from, n);
18133 }
18134
18135 static enum ucode_state
18136 request_microcode_user(int cpu, const void __user *buf, size_t size)
18137 {
18138 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18139 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18140 }
18141
18142 static void microcode_fini_cpu(int cpu)
18143 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18144 index f21fd94..61565cd 100644
18145 --- a/arch/x86/kernel/module.c
18146 +++ b/arch/x86/kernel/module.c
18147 @@ -35,15 +35,60 @@
18148 #define DEBUGP(fmt...)
18149 #endif
18150
18151 -void *module_alloc(unsigned long size)
18152 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18153 {
18154 - if (PAGE_ALIGN(size) > MODULES_LEN)
18155 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18156 return NULL;
18157 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18158 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18159 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18160 -1, __builtin_return_address(0));
18161 }
18162
18163 +void *module_alloc(unsigned long size)
18164 +{
18165 +
18166 +#ifdef CONFIG_PAX_KERNEXEC
18167 + return __module_alloc(size, PAGE_KERNEL);
18168 +#else
18169 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18170 +#endif
18171 +
18172 +}
18173 +
18174 +#ifdef CONFIG_PAX_KERNEXEC
18175 +#ifdef CONFIG_X86_32
18176 +void *module_alloc_exec(unsigned long size)
18177 +{
18178 + struct vm_struct *area;
18179 +
18180 + if (size == 0)
18181 + return NULL;
18182 +
18183 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18184 + return area ? area->addr : NULL;
18185 +}
18186 +EXPORT_SYMBOL(module_alloc_exec);
18187 +
18188 +void module_free_exec(struct module *mod, void *module_region)
18189 +{
18190 + vunmap(module_region);
18191 +}
18192 +EXPORT_SYMBOL(module_free_exec);
18193 +#else
18194 +void module_free_exec(struct module *mod, void *module_region)
18195 +{
18196 + module_free(mod, module_region);
18197 +}
18198 +EXPORT_SYMBOL(module_free_exec);
18199 +
18200 +void *module_alloc_exec(unsigned long size)
18201 +{
18202 + return __module_alloc(size, PAGE_KERNEL_RX);
18203 +}
18204 +EXPORT_SYMBOL(module_alloc_exec);
18205 +#endif
18206 +#endif
18207 +
18208 #ifdef CONFIG_X86_32
18209 int apply_relocate(Elf32_Shdr *sechdrs,
18210 const char *strtab,
18211 @@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18212 unsigned int i;
18213 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18214 Elf32_Sym *sym;
18215 - uint32_t *location;
18216 + uint32_t *plocation, location;
18217
18218 DEBUGP("Applying relocate section %u to %u\n", relsec,
18219 sechdrs[relsec].sh_info);
18220 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18221 /* This is where to make the change */
18222 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18223 - + rel[i].r_offset;
18224 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18225 + location = (uint32_t)plocation;
18226 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18227 + plocation = ktla_ktva((void *)plocation);
18228 /* This is the symbol it is referring to. Note that all
18229 undefined symbols have been resolved. */
18230 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18231 @@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18232 switch (ELF32_R_TYPE(rel[i].r_info)) {
18233 case R_386_32:
18234 /* We add the value into the location given */
18235 - *location += sym->st_value;
18236 + pax_open_kernel();
18237 + *plocation += sym->st_value;
18238 + pax_close_kernel();
18239 break;
18240 case R_386_PC32:
18241 /* Add the value, subtract its postition */
18242 - *location += sym->st_value - (uint32_t)location;
18243 + pax_open_kernel();
18244 + *plocation += sym->st_value - location;
18245 + pax_close_kernel();
18246 break;
18247 default:
18248 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18249 @@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18250 case R_X86_64_NONE:
18251 break;
18252 case R_X86_64_64:
18253 + pax_open_kernel();
18254 *(u64 *)loc = val;
18255 + pax_close_kernel();
18256 break;
18257 case R_X86_64_32:
18258 + pax_open_kernel();
18259 *(u32 *)loc = val;
18260 + pax_close_kernel();
18261 if (val != *(u32 *)loc)
18262 goto overflow;
18263 break;
18264 case R_X86_64_32S:
18265 + pax_open_kernel();
18266 *(s32 *)loc = val;
18267 + pax_close_kernel();
18268 if ((s64)val != *(s32 *)loc)
18269 goto overflow;
18270 break;
18271 case R_X86_64_PC32:
18272 val -= (u64)loc;
18273 + pax_open_kernel();
18274 *(u32 *)loc = val;
18275 + pax_close_kernel();
18276 +
18277 #if 0
18278 if ((s64)val != *(s32 *)loc)
18279 goto overflow;
18280 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18281 index a0b2f84..875ab81 100644
18282 --- a/arch/x86/kernel/nmi.c
18283 +++ b/arch/x86/kernel/nmi.c
18284 @@ -460,6 +460,17 @@ static inline void nmi_nesting_postprocess(void)
18285 dotraplinkage notrace __kprobes void
18286 do_nmi(struct pt_regs *regs, long error_code)
18287 {
18288 +
18289 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18290 + if (!user_mode(regs)) {
18291 + unsigned long cs = regs->cs & 0xFFFF;
18292 + unsigned long ip = ktva_ktla(regs->ip);
18293 +
18294 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18295 + regs->ip = ip;
18296 + }
18297 +#endif
18298 +
18299 nmi_nesting_preprocess(regs);
18300
18301 nmi_enter();
18302 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18303 index 676b8c7..870ba04 100644
18304 --- a/arch/x86/kernel/paravirt-spinlocks.c
18305 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18306 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18307 arch_spin_lock(lock);
18308 }
18309
18310 -struct pv_lock_ops pv_lock_ops = {
18311 +struct pv_lock_ops pv_lock_ops __read_only = {
18312 #ifdef CONFIG_SMP
18313 .spin_is_locked = __ticket_spin_is_locked,
18314 .spin_is_contended = __ticket_spin_is_contended,
18315 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18316 index 9ce8859..b49bf51 100644
18317 --- a/arch/x86/kernel/paravirt.c
18318 +++ b/arch/x86/kernel/paravirt.c
18319 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18320 {
18321 return x;
18322 }
18323 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18324 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18325 +#endif
18326
18327 void __init default_banner(void)
18328 {
18329 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18330 if (opfunc == NULL)
18331 /* If there's no function, patch it with a ud2a (BUG) */
18332 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18333 - else if (opfunc == _paravirt_nop)
18334 + else if (opfunc == (void *)_paravirt_nop)
18335 /* If the operation is a nop, then nop the callsite */
18336 ret = paravirt_patch_nop();
18337
18338 /* identity functions just return their single argument */
18339 - else if (opfunc == _paravirt_ident_32)
18340 + else if (opfunc == (void *)_paravirt_ident_32)
18341 ret = paravirt_patch_ident_32(insnbuf, len);
18342 - else if (opfunc == _paravirt_ident_64)
18343 + else if (opfunc == (void *)_paravirt_ident_64)
18344 ret = paravirt_patch_ident_64(insnbuf, len);
18345 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18346 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18347 + ret = paravirt_patch_ident_64(insnbuf, len);
18348 +#endif
18349
18350 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18351 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18352 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18353 if (insn_len > len || start == NULL)
18354 insn_len = len;
18355 else
18356 - memcpy(insnbuf, start, insn_len);
18357 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18358
18359 return insn_len;
18360 }
18361 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18362 preempt_enable();
18363 }
18364
18365 -struct pv_info pv_info = {
18366 +struct pv_info pv_info __read_only = {
18367 .name = "bare hardware",
18368 .paravirt_enabled = 0,
18369 .kernel_rpl = 0,
18370 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
18371 #endif
18372 };
18373
18374 -struct pv_init_ops pv_init_ops = {
18375 +struct pv_init_ops pv_init_ops __read_only = {
18376 .patch = native_patch,
18377 };
18378
18379 -struct pv_time_ops pv_time_ops = {
18380 +struct pv_time_ops pv_time_ops __read_only = {
18381 .sched_clock = native_sched_clock,
18382 .steal_clock = native_steal_clock,
18383 };
18384
18385 -struct pv_irq_ops pv_irq_ops = {
18386 +struct pv_irq_ops pv_irq_ops __read_only = {
18387 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18388 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18389 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18390 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18391 #endif
18392 };
18393
18394 -struct pv_cpu_ops pv_cpu_ops = {
18395 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18396 .cpuid = native_cpuid,
18397 .get_debugreg = native_get_debugreg,
18398 .set_debugreg = native_set_debugreg,
18399 @@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18400 .end_context_switch = paravirt_nop,
18401 };
18402
18403 -struct pv_apic_ops pv_apic_ops = {
18404 +struct pv_apic_ops pv_apic_ops __read_only = {
18405 #ifdef CONFIG_X86_LOCAL_APIC
18406 .startup_ipi_hook = paravirt_nop,
18407 #endif
18408 };
18409
18410 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18411 +#ifdef CONFIG_X86_32
18412 +#ifdef CONFIG_X86_PAE
18413 +/* 64-bit pagetable entries */
18414 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18415 +#else
18416 /* 32-bit pagetable entries */
18417 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18418 +#endif
18419 #else
18420 /* 64-bit pagetable entries */
18421 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18422 #endif
18423
18424 -struct pv_mmu_ops pv_mmu_ops = {
18425 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18426
18427 .read_cr2 = native_read_cr2,
18428 .write_cr2 = native_write_cr2,
18429 @@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18430 .make_pud = PTE_IDENT,
18431
18432 .set_pgd = native_set_pgd,
18433 + .set_pgd_batched = native_set_pgd_batched,
18434 #endif
18435 #endif /* PAGETABLE_LEVELS >= 3 */
18436
18437 @@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18438 },
18439
18440 .set_fixmap = native_set_fixmap,
18441 +
18442 +#ifdef CONFIG_PAX_KERNEXEC
18443 + .pax_open_kernel = native_pax_open_kernel,
18444 + .pax_close_kernel = native_pax_close_kernel,
18445 +#endif
18446 +
18447 };
18448
18449 EXPORT_SYMBOL_GPL(pv_time_ops);
18450 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18451 index 35ccf75..7a15747 100644
18452 --- a/arch/x86/kernel/pci-iommu_table.c
18453 +++ b/arch/x86/kernel/pci-iommu_table.c
18454 @@ -2,7 +2,7 @@
18455 #include <asm/iommu_table.h>
18456 #include <linux/string.h>
18457 #include <linux/kallsyms.h>
18458 -
18459 +#include <linux/sched.h>
18460
18461 #define DEBUG 1
18462
18463 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18464 index 735279e..5008677 100644
18465 --- a/arch/x86/kernel/process.c
18466 +++ b/arch/x86/kernel/process.c
18467 @@ -34,7 +34,8 @@
18468 * section. Since TSS's are completely CPU-local, we want them
18469 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18470 */
18471 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18472 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18473 +EXPORT_SYMBOL(init_tss);
18474
18475 #ifdef CONFIG_X86_64
18476 static DEFINE_PER_CPU(unsigned char, is_idle);
18477 @@ -92,7 +93,7 @@ void arch_task_cache_init(void)
18478 task_xstate_cachep =
18479 kmem_cache_create("task_xstate", xstate_size,
18480 __alignof__(union thread_xstate),
18481 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18482 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18483 }
18484
18485 static inline void drop_fpu(struct task_struct *tsk)
18486 @@ -115,7 +116,7 @@ void exit_thread(void)
18487 unsigned long *bp = t->io_bitmap_ptr;
18488
18489 if (bp) {
18490 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18491 + struct tss_struct *tss = init_tss + get_cpu();
18492
18493 t->io_bitmap_ptr = NULL;
18494 clear_thread_flag(TIF_IO_BITMAP);
18495 @@ -147,7 +148,7 @@ void show_regs_common(void)
18496
18497 printk(KERN_CONT "\n");
18498 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18499 - current->pid, current->comm, print_tainted(),
18500 + task_pid_nr(current), current->comm, print_tainted(),
18501 init_utsname()->release,
18502 (int)strcspn(init_utsname()->version, " "),
18503 init_utsname()->version);
18504 @@ -161,6 +162,9 @@ void flush_thread(void)
18505 {
18506 struct task_struct *tsk = current;
18507
18508 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18509 + loadsegment(gs, 0);
18510 +#endif
18511 flush_ptrace_hw_breakpoint(tsk);
18512 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18513 drop_fpu(tsk);
18514 @@ -318,10 +322,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18515 regs.di = (unsigned long) arg;
18516
18517 #ifdef CONFIG_X86_32
18518 - regs.ds = __USER_DS;
18519 - regs.es = __USER_DS;
18520 + regs.ds = __KERNEL_DS;
18521 + regs.es = __KERNEL_DS;
18522 regs.fs = __KERNEL_PERCPU;
18523 - regs.gs = __KERNEL_STACK_CANARY;
18524 + savesegment(gs, regs.gs);
18525 #else
18526 regs.ss = __KERNEL_DS;
18527 #endif
18528 @@ -407,7 +411,7 @@ static void __exit_idle(void)
18529 void exit_idle(void)
18530 {
18531 /* idle loop has pid 0 */
18532 - if (current->pid)
18533 + if (task_pid_nr(current))
18534 return;
18535 __exit_idle();
18536 }
18537 @@ -516,7 +520,7 @@ bool set_pm_idle_to_default(void)
18538
18539 return ret;
18540 }
18541 -void stop_this_cpu(void *dummy)
18542 +__noreturn void stop_this_cpu(void *dummy)
18543 {
18544 local_irq_disable();
18545 /*
18546 @@ -746,16 +750,37 @@ static int __init idle_setup(char *str)
18547 }
18548 early_param("idle", idle_setup);
18549
18550 -unsigned long arch_align_stack(unsigned long sp)
18551 +#ifdef CONFIG_PAX_RANDKSTACK
18552 +void pax_randomize_kstack(struct pt_regs *regs)
18553 {
18554 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18555 - sp -= get_random_int() % 8192;
18556 - return sp & ~0xf;
18557 -}
18558 + struct thread_struct *thread = &current->thread;
18559 + unsigned long time;
18560
18561 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18562 -{
18563 - unsigned long range_end = mm->brk + 0x02000000;
18564 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18565 -}
18566 + if (!randomize_va_space)
18567 + return;
18568 +
18569 + if (v8086_mode(regs))
18570 + return;
18571
18572 + rdtscl(time);
18573 +
18574 + /* P4 seems to return a 0 LSB, ignore it */
18575 +#ifdef CONFIG_MPENTIUM4
18576 + time &= 0x3EUL;
18577 + time <<= 2;
18578 +#elif defined(CONFIG_X86_64)
18579 + time &= 0xFUL;
18580 + time <<= 4;
18581 +#else
18582 + time &= 0x1FUL;
18583 + time <<= 3;
18584 +#endif
18585 +
18586 + thread->sp0 ^= time;
18587 + load_sp0(init_tss + smp_processor_id(), thread);
18588 +
18589 +#ifdef CONFIG_X86_64
18590 + this_cpu_write(kernel_stack, thread->sp0);
18591 +#endif
18592 +}
18593 +#endif
18594 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18595 index 516fa18..80bd9e6 100644
18596 --- a/arch/x86/kernel/process_32.c
18597 +++ b/arch/x86/kernel/process_32.c
18598 @@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18599 unsigned long thread_saved_pc(struct task_struct *tsk)
18600 {
18601 return ((unsigned long *)tsk->thread.sp)[3];
18602 +//XXX return tsk->thread.eip;
18603 }
18604
18605 void __show_regs(struct pt_regs *regs, int all)
18606 @@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18607 unsigned long sp;
18608 unsigned short ss, gs;
18609
18610 - if (user_mode_vm(regs)) {
18611 + if (user_mode(regs)) {
18612 sp = regs->sp;
18613 ss = regs->ss & 0xffff;
18614 - gs = get_user_gs(regs);
18615 } else {
18616 sp = kernel_stack_pointer(regs);
18617 savesegment(ss, ss);
18618 - savesegment(gs, gs);
18619 }
18620 + gs = get_user_gs(regs);
18621
18622 show_regs_common();
18623
18624 @@ -134,13 +134,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18625 struct task_struct *tsk;
18626 int err;
18627
18628 - childregs = task_pt_regs(p);
18629 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18630 *childregs = *regs;
18631 childregs->ax = 0;
18632 childregs->sp = sp;
18633
18634 p->thread.sp = (unsigned long) childregs;
18635 p->thread.sp0 = (unsigned long) (childregs+1);
18636 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18637
18638 p->thread.ip = (unsigned long) ret_from_fork;
18639
18640 @@ -231,7 +232,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18641 struct thread_struct *prev = &prev_p->thread,
18642 *next = &next_p->thread;
18643 int cpu = smp_processor_id();
18644 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18645 + struct tss_struct *tss = init_tss + cpu;
18646 fpu_switch_t fpu;
18647
18648 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18649 @@ -255,6 +256,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18650 */
18651 lazy_save_gs(prev->gs);
18652
18653 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18654 + __set_fs(task_thread_info(next_p)->addr_limit);
18655 +#endif
18656 +
18657 /*
18658 * Load the per-thread Thread-Local Storage descriptor.
18659 */
18660 @@ -285,6 +290,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18661 */
18662 arch_end_context_switch(next_p);
18663
18664 + this_cpu_write(current_task, next_p);
18665 + this_cpu_write(current_tinfo, &next_p->tinfo);
18666 +
18667 /*
18668 * Restore %gs if needed (which is common)
18669 */
18670 @@ -293,8 +301,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18671
18672 switch_fpu_finish(next_p, fpu);
18673
18674 - this_cpu_write(current_task, next_p);
18675 -
18676 return prev_p;
18677 }
18678
18679 @@ -324,4 +330,3 @@ unsigned long get_wchan(struct task_struct *p)
18680 } while (count++ < 16);
18681 return 0;
18682 }
18683 -
18684 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18685 index 61cdf7f..797f06a 100644
18686 --- a/arch/x86/kernel/process_64.c
18687 +++ b/arch/x86/kernel/process_64.c
18688 @@ -153,8 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18689 struct pt_regs *childregs;
18690 struct task_struct *me = current;
18691
18692 - childregs = ((struct pt_regs *)
18693 - (THREAD_SIZE + task_stack_page(p))) - 1;
18694 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18695 *childregs = *regs;
18696
18697 childregs->ax = 0;
18698 @@ -166,6 +165,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18699 p->thread.sp = (unsigned long) childregs;
18700 p->thread.sp0 = (unsigned long) (childregs+1);
18701 p->thread.usersp = me->thread.usersp;
18702 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18703
18704 set_tsk_thread_flag(p, TIF_FORK);
18705
18706 @@ -271,7 +271,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18707 struct thread_struct *prev = &prev_p->thread;
18708 struct thread_struct *next = &next_p->thread;
18709 int cpu = smp_processor_id();
18710 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18711 + struct tss_struct *tss = init_tss + cpu;
18712 unsigned fsindex, gsindex;
18713 fpu_switch_t fpu;
18714
18715 @@ -353,10 +353,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18716 prev->usersp = this_cpu_read(old_rsp);
18717 this_cpu_write(old_rsp, next->usersp);
18718 this_cpu_write(current_task, next_p);
18719 + this_cpu_write(current_tinfo, &next_p->tinfo);
18720
18721 - this_cpu_write(kernel_stack,
18722 - (unsigned long)task_stack_page(next_p) +
18723 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18724 + this_cpu_write(kernel_stack, next->sp0);
18725
18726 /*
18727 * Now maybe reload the debug registers and handle I/O bitmaps
18728 @@ -425,12 +424,11 @@ unsigned long get_wchan(struct task_struct *p)
18729 if (!p || p == current || p->state == TASK_RUNNING)
18730 return 0;
18731 stack = (unsigned long)task_stack_page(p);
18732 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18733 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18734 return 0;
18735 fp = *(u64 *)(p->thread.sp);
18736 do {
18737 - if (fp < (unsigned long)stack ||
18738 - fp >= (unsigned long)stack+THREAD_SIZE)
18739 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18740 return 0;
18741 ip = *(u64 *)(fp+8);
18742 if (!in_sched_functions(ip))
18743 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18744 index c4c6a5c..905f440 100644
18745 --- a/arch/x86/kernel/ptrace.c
18746 +++ b/arch/x86/kernel/ptrace.c
18747 @@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18748 unsigned long addr, unsigned long data)
18749 {
18750 int ret;
18751 - unsigned long __user *datap = (unsigned long __user *)data;
18752 + unsigned long __user *datap = (__force unsigned long __user *)data;
18753
18754 switch (request) {
18755 /* read the word at location addr in the USER area. */
18756 @@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18757 if ((int) addr < 0)
18758 return -EIO;
18759 ret = do_get_thread_area(child, addr,
18760 - (struct user_desc __user *)data);
18761 + (__force struct user_desc __user *) data);
18762 break;
18763
18764 case PTRACE_SET_THREAD_AREA:
18765 if ((int) addr < 0)
18766 return -EIO;
18767 ret = do_set_thread_area(child, addr,
18768 - (struct user_desc __user *)data, 0);
18769 + (__force struct user_desc __user *) data, 0);
18770 break;
18771 #endif
18772
18773 @@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18774 memset(info, 0, sizeof(*info));
18775 info->si_signo = SIGTRAP;
18776 info->si_code = si_code;
18777 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18778 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18779 }
18780
18781 void user_single_step_siginfo(struct task_struct *tsk,
18782 @@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18783 # define IS_IA32 0
18784 #endif
18785
18786 +#ifdef CONFIG_GRKERNSEC_SETXID
18787 +extern void gr_delayed_cred_worker(void);
18788 +#endif
18789 +
18790 /*
18791 * We must return the syscall number to actually look up in the table.
18792 * This can be -1L to skip running any syscall at all.
18793 @@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18794 {
18795 long ret = 0;
18796
18797 +#ifdef CONFIG_GRKERNSEC_SETXID
18798 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18799 + gr_delayed_cred_worker();
18800 +#endif
18801 +
18802 /*
18803 * If we stepped into a sysenter/syscall insn, it trapped in
18804 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18805 @@ -1511,6 +1520,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18806 {
18807 bool step;
18808
18809 +#ifdef CONFIG_GRKERNSEC_SETXID
18810 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18811 + gr_delayed_cred_worker();
18812 +#endif
18813 +
18814 audit_syscall_exit(regs);
18815
18816 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18817 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18818 index 42eb330..139955c 100644
18819 --- a/arch/x86/kernel/pvclock.c
18820 +++ b/arch/x86/kernel/pvclock.c
18821 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18822 return pv_tsc_khz;
18823 }
18824
18825 -static atomic64_t last_value = ATOMIC64_INIT(0);
18826 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18827
18828 void pvclock_resume(void)
18829 {
18830 - atomic64_set(&last_value, 0);
18831 + atomic64_set_unchecked(&last_value, 0);
18832 }
18833
18834 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18835 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18836 * updating at the same time, and one of them could be slightly behind,
18837 * making the assumption that last_value always go forward fail to hold.
18838 */
18839 - last = atomic64_read(&last_value);
18840 + last = atomic64_read_unchecked(&last_value);
18841 do {
18842 if (ret < last)
18843 return last;
18844 - last = atomic64_cmpxchg(&last_value, last, ret);
18845 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18846 } while (unlikely(last != ret));
18847
18848 return ret;
18849 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18850 index 5de92f1..776788d 100644
18851 --- a/arch/x86/kernel/reboot.c
18852 +++ b/arch/x86/kernel/reboot.c
18853 @@ -36,7 +36,7 @@ void (*pm_power_off)(void);
18854 EXPORT_SYMBOL(pm_power_off);
18855
18856 static const struct desc_ptr no_idt = {};
18857 -static int reboot_mode;
18858 +static unsigned short reboot_mode;
18859 enum reboot_type reboot_type = BOOT_ACPI;
18860 int reboot_force;
18861
18862 @@ -157,11 +157,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
18863 return 0;
18864 }
18865
18866 -void machine_real_restart(unsigned int type)
18867 +__noreturn void machine_real_restart(unsigned int type)
18868 {
18869 void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
18870 real_mode_header->machine_real_restart_asm;
18871
18872 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18873 + struct desc_struct *gdt;
18874 +#endif
18875 +
18876 local_irq_disable();
18877
18878 /*
18879 @@ -189,10 +193,38 @@ void machine_real_restart(unsigned int type)
18880 * boot)". This seems like a fairly standard thing that gets set by
18881 * REBOOT.COM programs, and the previous reset routine did this
18882 * too. */
18883 - *((unsigned short *)0x472) = reboot_mode;
18884 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18885
18886 /* Jump to the identity-mapped low memory code */
18887 +
18888 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18889 + gdt = get_cpu_gdt_table(smp_processor_id());
18890 + pax_open_kernel();
18891 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18892 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18893 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18894 + loadsegment(ds, __KERNEL_DS);
18895 + loadsegment(es, __KERNEL_DS);
18896 + loadsegment(ss, __KERNEL_DS);
18897 +#endif
18898 +#ifdef CONFIG_PAX_KERNEXEC
18899 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18900 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18901 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18902 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18903 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18904 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18905 +#endif
18906 + pax_close_kernel();
18907 +#endif
18908 +
18909 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18910 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18911 + unreachable();
18912 +#else
18913 restart_lowmem(type);
18914 +#endif
18915 +
18916 }
18917 #ifdef CONFIG_APM_MODULE
18918 EXPORT_SYMBOL(machine_real_restart);
18919 @@ -543,7 +575,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18920 * try to force a triple fault and then cycle between hitting the keyboard
18921 * controller and doing that
18922 */
18923 -static void native_machine_emergency_restart(void)
18924 +static void __noreturn native_machine_emergency_restart(void)
18925 {
18926 int i;
18927 int attempt = 0;
18928 @@ -670,13 +702,13 @@ void native_machine_shutdown(void)
18929 #endif
18930 }
18931
18932 -static void __machine_emergency_restart(int emergency)
18933 +static __noreturn void __machine_emergency_restart(int emergency)
18934 {
18935 reboot_emergency = emergency;
18936 machine_ops.emergency_restart();
18937 }
18938
18939 -static void native_machine_restart(char *__unused)
18940 +static void __noreturn native_machine_restart(char *__unused)
18941 {
18942 printk("machine restart\n");
18943
18944 @@ -685,7 +717,7 @@ static void native_machine_restart(char *__unused)
18945 __machine_emergency_restart(0);
18946 }
18947
18948 -static void native_machine_halt(void)
18949 +static void __noreturn native_machine_halt(void)
18950 {
18951 /* Stop other cpus and apics */
18952 machine_shutdown();
18953 @@ -695,7 +727,7 @@ static void native_machine_halt(void)
18954 stop_this_cpu(NULL);
18955 }
18956
18957 -static void native_machine_power_off(void)
18958 +static void __noreturn native_machine_power_off(void)
18959 {
18960 if (pm_power_off) {
18961 if (!reboot_force)
18962 @@ -704,6 +736,7 @@ static void native_machine_power_off(void)
18963 }
18964 /* A fallback in case there is no PM info available */
18965 tboot_shutdown(TB_SHUTDOWN_HALT);
18966 + unreachable();
18967 }
18968
18969 struct machine_ops machine_ops = {
18970 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18971 index 7a6f3b3..bed145d7 100644
18972 --- a/arch/x86/kernel/relocate_kernel_64.S
18973 +++ b/arch/x86/kernel/relocate_kernel_64.S
18974 @@ -11,6 +11,7 @@
18975 #include <asm/kexec.h>
18976 #include <asm/processor-flags.h>
18977 #include <asm/pgtable_types.h>
18978 +#include <asm/alternative-asm.h>
18979
18980 /*
18981 * Must be relocatable PIC code callable as a C function
18982 @@ -160,13 +161,14 @@ identity_mapped:
18983 xorq %rbp, %rbp
18984 xorq %r8, %r8
18985 xorq %r9, %r9
18986 - xorq %r10, %r9
18987 + xorq %r10, %r10
18988 xorq %r11, %r11
18989 xorq %r12, %r12
18990 xorq %r13, %r13
18991 xorq %r14, %r14
18992 xorq %r15, %r15
18993
18994 + pax_force_retaddr 0, 1
18995 ret
18996
18997 1:
18998 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18999 index 16be6dc..4686132 100644
19000 --- a/arch/x86/kernel/setup.c
19001 +++ b/arch/x86/kernel/setup.c
19002 @@ -440,7 +440,7 @@ static void __init parse_setup_data(void)
19003
19004 switch (data->type) {
19005 case SETUP_E820_EXT:
19006 - parse_e820_ext(data);
19007 + parse_e820_ext((struct setup_data __force_kernel *)data);
19008 break;
19009 case SETUP_DTB:
19010 add_dtb(pa_data);
19011 @@ -632,7 +632,7 @@ static void __init trim_bios_range(void)
19012 * area (640->1Mb) as ram even though it is not.
19013 * take them out.
19014 */
19015 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19016 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19017 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19018 }
19019
19020 @@ -755,14 +755,14 @@ void __init setup_arch(char **cmdline_p)
19021
19022 if (!boot_params.hdr.root_flags)
19023 root_mountflags &= ~MS_RDONLY;
19024 - init_mm.start_code = (unsigned long) _text;
19025 - init_mm.end_code = (unsigned long) _etext;
19026 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19027 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19028 init_mm.end_data = (unsigned long) _edata;
19029 init_mm.brk = _brk_end;
19030
19031 - code_resource.start = virt_to_phys(_text);
19032 - code_resource.end = virt_to_phys(_etext)-1;
19033 - data_resource.start = virt_to_phys(_etext);
19034 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19035 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19036 + data_resource.start = virt_to_phys(_sdata);
19037 data_resource.end = virt_to_phys(_edata)-1;
19038 bss_resource.start = virt_to_phys(&__bss_start);
19039 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19040 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19041 index 5a98aa2..2f9288d 100644
19042 --- a/arch/x86/kernel/setup_percpu.c
19043 +++ b/arch/x86/kernel/setup_percpu.c
19044 @@ -21,19 +21,17 @@
19045 #include <asm/cpu.h>
19046 #include <asm/stackprotector.h>
19047
19048 -DEFINE_PER_CPU(int, cpu_number);
19049 +#ifdef CONFIG_SMP
19050 +DEFINE_PER_CPU(unsigned int, cpu_number);
19051 EXPORT_PER_CPU_SYMBOL(cpu_number);
19052 +#endif
19053
19054 -#ifdef CONFIG_X86_64
19055 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19056 -#else
19057 -#define BOOT_PERCPU_OFFSET 0
19058 -#endif
19059
19060 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19061 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19062
19063 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19064 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19065 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19066 };
19067 EXPORT_SYMBOL(__per_cpu_offset);
19068 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19069 {
19070 #ifdef CONFIG_X86_32
19071 struct desc_struct gdt;
19072 + unsigned long base = per_cpu_offset(cpu);
19073
19074 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19075 - 0x2 | DESCTYPE_S, 0x8);
19076 - gdt.s = 1;
19077 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19078 + 0x83 | DESCTYPE_S, 0xC);
19079 write_gdt_entry(get_cpu_gdt_table(cpu),
19080 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19081 #endif
19082 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19083 /* alrighty, percpu areas up and running */
19084 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19085 for_each_possible_cpu(cpu) {
19086 +#ifdef CONFIG_CC_STACKPROTECTOR
19087 +#ifdef CONFIG_X86_32
19088 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19089 +#endif
19090 +#endif
19091 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19092 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19093 per_cpu(cpu_number, cpu) = cpu;
19094 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19095 */
19096 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19097 #endif
19098 +#ifdef CONFIG_CC_STACKPROTECTOR
19099 +#ifdef CONFIG_X86_32
19100 + if (!cpu)
19101 + per_cpu(stack_canary.canary, cpu) = canary;
19102 +#endif
19103 +#endif
19104 /*
19105 * Up to this point, the boot CPU has been using .init.data
19106 * area. Reload any changed state for the boot CPU.
19107 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19108 index 21af737..fb45e22 100644
19109 --- a/arch/x86/kernel/signal.c
19110 +++ b/arch/x86/kernel/signal.c
19111 @@ -191,7 +191,7 @@ static unsigned long align_sigframe(unsigned long sp)
19112 * Align the stack pointer according to the i386 ABI,
19113 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19114 */
19115 - sp = ((sp + 4) & -16ul) - 4;
19116 + sp = ((sp - 12) & -16ul) - 4;
19117 #else /* !CONFIG_X86_32 */
19118 sp = round_down(sp, 16) - 8;
19119 #endif
19120 @@ -242,11 +242,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19121 * Return an always-bogus address instead so we will die with SIGSEGV.
19122 */
19123 if (onsigstack && !likely(on_sig_stack(sp)))
19124 - return (void __user *)-1L;
19125 + return (__force void __user *)-1L;
19126
19127 /* save i387 state */
19128 if (used_math() && save_i387_xstate(*fpstate) < 0)
19129 - return (void __user *)-1L;
19130 + return (__force void __user *)-1L;
19131
19132 return (void __user *)sp;
19133 }
19134 @@ -301,9 +301,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19135 }
19136
19137 if (current->mm->context.vdso)
19138 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19139 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19140 else
19141 - restorer = &frame->retcode;
19142 + restorer = (void __user *)&frame->retcode;
19143 if (ka->sa.sa_flags & SA_RESTORER)
19144 restorer = ka->sa.sa_restorer;
19145
19146 @@ -317,7 +317,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19147 * reasons and because gdb uses it as a signature to notice
19148 * signal handler stack frames.
19149 */
19150 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19151 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19152
19153 if (err)
19154 return -EFAULT;
19155 @@ -371,7 +371,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19156 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19157
19158 /* Set up to return from userspace. */
19159 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19160 + if (current->mm->context.vdso)
19161 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19162 + else
19163 + restorer = (void __user *)&frame->retcode;
19164 if (ka->sa.sa_flags & SA_RESTORER)
19165 restorer = ka->sa.sa_restorer;
19166 put_user_ex(restorer, &frame->pretcode);
19167 @@ -383,7 +386,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19168 * reasons and because gdb uses it as a signature to notice
19169 * signal handler stack frames.
19170 */
19171 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19172 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19173 } put_user_catch(err);
19174
19175 if (err)
19176 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19177 index 7bd8a08..2659b5b 100644
19178 --- a/arch/x86/kernel/smpboot.c
19179 +++ b/arch/x86/kernel/smpboot.c
19180 @@ -679,6 +679,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
19181 idle->thread.sp = (unsigned long) (((struct pt_regs *)
19182 (THREAD_SIZE + task_stack_page(idle))) - 1);
19183 per_cpu(current_task, cpu) = idle;
19184 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
19185
19186 #ifdef CONFIG_X86_32
19187 /* Stack for startup_32 can be just as for start_secondary onwards */
19188 @@ -686,11 +687,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
19189 #else
19190 clear_tsk_thread_flag(idle, TIF_FORK);
19191 initial_gs = per_cpu_offset(cpu);
19192 - per_cpu(kernel_stack, cpu) =
19193 - (unsigned long)task_stack_page(idle) -
19194 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19195 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
19196 #endif
19197 +
19198 + pax_open_kernel();
19199 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19200 + pax_close_kernel();
19201 +
19202 initial_code = (unsigned long)start_secondary;
19203 stack_start = idle->thread.sp;
19204
19205 @@ -826,6 +829,12 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
19206
19207 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19208
19209 +#ifdef CONFIG_PAX_PER_CPU_PGD
19210 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19211 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19212 + KERNEL_PGD_PTRS);
19213 +#endif
19214 +
19215 err = do_boot_cpu(apicid, cpu, tidle);
19216 if (err) {
19217 pr_debug("do_boot_cpu failed %d\n", err);
19218 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19219 index c346d11..d43b163 100644
19220 --- a/arch/x86/kernel/step.c
19221 +++ b/arch/x86/kernel/step.c
19222 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19223 struct desc_struct *desc;
19224 unsigned long base;
19225
19226 - seg &= ~7UL;
19227 + seg >>= 3;
19228
19229 mutex_lock(&child->mm->context.lock);
19230 - if (unlikely((seg >> 3) >= child->mm->context.size))
19231 + if (unlikely(seg >= child->mm->context.size))
19232 addr = -1L; /* bogus selector, access would fault */
19233 else {
19234 desc = child->mm->context.ldt + seg;
19235 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19236 addr += base;
19237 }
19238 mutex_unlock(&child->mm->context.lock);
19239 - }
19240 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19241 + addr = ktla_ktva(addr);
19242
19243 return addr;
19244 }
19245 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19246 unsigned char opcode[15];
19247 unsigned long addr = convert_ip_to_linear(child, regs);
19248
19249 + if (addr == -EINVAL)
19250 + return 0;
19251 +
19252 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19253 for (i = 0; i < copied; i++) {
19254 switch (opcode[i]) {
19255 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19256 index 0b0cb5f..db6b9ed 100644
19257 --- a/arch/x86/kernel/sys_i386_32.c
19258 +++ b/arch/x86/kernel/sys_i386_32.c
19259 @@ -24,17 +24,224 @@
19260
19261 #include <asm/syscalls.h>
19262
19263 -/*
19264 - * Do a system call from kernel instead of calling sys_execve so we
19265 - * end up with proper pt_regs.
19266 - */
19267 -int kernel_execve(const char *filename,
19268 - const char *const argv[],
19269 - const char *const envp[])
19270 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19271 {
19272 - long __res;
19273 - asm volatile ("int $0x80"
19274 - : "=a" (__res)
19275 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19276 - return __res;
19277 + unsigned long pax_task_size = TASK_SIZE;
19278 +
19279 +#ifdef CONFIG_PAX_SEGMEXEC
19280 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19281 + pax_task_size = SEGMEXEC_TASK_SIZE;
19282 +#endif
19283 +
19284 + if (len > pax_task_size || addr > pax_task_size - len)
19285 + return -EINVAL;
19286 +
19287 + return 0;
19288 +}
19289 +
19290 +unsigned long
19291 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19292 + unsigned long len, unsigned long pgoff, unsigned long flags)
19293 +{
19294 + struct mm_struct *mm = current->mm;
19295 + struct vm_area_struct *vma;
19296 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19297 +
19298 +#ifdef CONFIG_PAX_SEGMEXEC
19299 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19300 + pax_task_size = SEGMEXEC_TASK_SIZE;
19301 +#endif
19302 +
19303 + pax_task_size -= PAGE_SIZE;
19304 +
19305 + if (len > pax_task_size)
19306 + return -ENOMEM;
19307 +
19308 + if (flags & MAP_FIXED)
19309 + return addr;
19310 +
19311 +#ifdef CONFIG_PAX_RANDMMAP
19312 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19313 +#endif
19314 +
19315 + if (addr) {
19316 + addr = PAGE_ALIGN(addr);
19317 + if (pax_task_size - len >= addr) {
19318 + vma = find_vma(mm, addr);
19319 + if (check_heap_stack_gap(vma, addr, len))
19320 + return addr;
19321 + }
19322 + }
19323 + if (len > mm->cached_hole_size) {
19324 + start_addr = addr = mm->free_area_cache;
19325 + } else {
19326 + start_addr = addr = mm->mmap_base;
19327 + mm->cached_hole_size = 0;
19328 + }
19329 +
19330 +#ifdef CONFIG_PAX_PAGEEXEC
19331 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19332 + start_addr = 0x00110000UL;
19333 +
19334 +#ifdef CONFIG_PAX_RANDMMAP
19335 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19336 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19337 +#endif
19338 +
19339 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19340 + start_addr = addr = mm->mmap_base;
19341 + else
19342 + addr = start_addr;
19343 + }
19344 +#endif
19345 +
19346 +full_search:
19347 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19348 + /* At this point: (!vma || addr < vma->vm_end). */
19349 + if (pax_task_size - len < addr) {
19350 + /*
19351 + * Start a new search - just in case we missed
19352 + * some holes.
19353 + */
19354 + if (start_addr != mm->mmap_base) {
19355 + start_addr = addr = mm->mmap_base;
19356 + mm->cached_hole_size = 0;
19357 + goto full_search;
19358 + }
19359 + return -ENOMEM;
19360 + }
19361 + if (check_heap_stack_gap(vma, addr, len))
19362 + break;
19363 + if (addr + mm->cached_hole_size < vma->vm_start)
19364 + mm->cached_hole_size = vma->vm_start - addr;
19365 + addr = vma->vm_end;
19366 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19367 + start_addr = addr = mm->mmap_base;
19368 + mm->cached_hole_size = 0;
19369 + goto full_search;
19370 + }
19371 + }
19372 +
19373 + /*
19374 + * Remember the place where we stopped the search:
19375 + */
19376 + mm->free_area_cache = addr + len;
19377 + return addr;
19378 +}
19379 +
19380 +unsigned long
19381 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19382 + const unsigned long len, const unsigned long pgoff,
19383 + const unsigned long flags)
19384 +{
19385 + struct vm_area_struct *vma;
19386 + struct mm_struct *mm = current->mm;
19387 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19388 +
19389 +#ifdef CONFIG_PAX_SEGMEXEC
19390 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19391 + pax_task_size = SEGMEXEC_TASK_SIZE;
19392 +#endif
19393 +
19394 + pax_task_size -= PAGE_SIZE;
19395 +
19396 + /* requested length too big for entire address space */
19397 + if (len > pax_task_size)
19398 + return -ENOMEM;
19399 +
19400 + if (flags & MAP_FIXED)
19401 + return addr;
19402 +
19403 +#ifdef CONFIG_PAX_PAGEEXEC
19404 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19405 + goto bottomup;
19406 +#endif
19407 +
19408 +#ifdef CONFIG_PAX_RANDMMAP
19409 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19410 +#endif
19411 +
19412 + /* requesting a specific address */
19413 + if (addr) {
19414 + addr = PAGE_ALIGN(addr);
19415 + if (pax_task_size - len >= addr) {
19416 + vma = find_vma(mm, addr);
19417 + if (check_heap_stack_gap(vma, addr, len))
19418 + return addr;
19419 + }
19420 + }
19421 +
19422 + /* check if free_area_cache is useful for us */
19423 + if (len <= mm->cached_hole_size) {
19424 + mm->cached_hole_size = 0;
19425 + mm->free_area_cache = mm->mmap_base;
19426 + }
19427 +
19428 + /* either no address requested or can't fit in requested address hole */
19429 + addr = mm->free_area_cache;
19430 +
19431 + /* make sure it can fit in the remaining address space */
19432 + if (addr > len) {
19433 + vma = find_vma(mm, addr-len);
19434 + if (check_heap_stack_gap(vma, addr - len, len))
19435 + /* remember the address as a hint for next time */
19436 + return (mm->free_area_cache = addr-len);
19437 + }
19438 +
19439 + if (mm->mmap_base < len)
19440 + goto bottomup;
19441 +
19442 + addr = mm->mmap_base-len;
19443 +
19444 + do {
19445 + /*
19446 + * Lookup failure means no vma is above this address,
19447 + * else if new region fits below vma->vm_start,
19448 + * return with success:
19449 + */
19450 + vma = find_vma(mm, addr);
19451 + if (check_heap_stack_gap(vma, addr, len))
19452 + /* remember the address as a hint for next time */
19453 + return (mm->free_area_cache = addr);
19454 +
19455 + /* remember the largest hole we saw so far */
19456 + if (addr + mm->cached_hole_size < vma->vm_start)
19457 + mm->cached_hole_size = vma->vm_start - addr;
19458 +
19459 + /* try just below the current vma->vm_start */
19460 + addr = skip_heap_stack_gap(vma, len);
19461 + } while (!IS_ERR_VALUE(addr));
19462 +
19463 +bottomup:
19464 + /*
19465 + * A failed mmap() very likely causes application failure,
19466 + * so fall back to the bottom-up function here. This scenario
19467 + * can happen with large stack limits and large mmap()
19468 + * allocations.
19469 + */
19470 +
19471 +#ifdef CONFIG_PAX_SEGMEXEC
19472 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19473 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19474 + else
19475 +#endif
19476 +
19477 + mm->mmap_base = TASK_UNMAPPED_BASE;
19478 +
19479 +#ifdef CONFIG_PAX_RANDMMAP
19480 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19481 + mm->mmap_base += mm->delta_mmap;
19482 +#endif
19483 +
19484 + mm->free_area_cache = mm->mmap_base;
19485 + mm->cached_hole_size = ~0UL;
19486 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19487 + /*
19488 + * Restore the topdown base:
19489 + */
19490 + mm->mmap_base = base;
19491 + mm->free_area_cache = base;
19492 + mm->cached_hole_size = ~0UL;
19493 +
19494 + return addr;
19495 }
19496 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19497 index b4d3c39..82bb73b 100644
19498 --- a/arch/x86/kernel/sys_x86_64.c
19499 +++ b/arch/x86/kernel/sys_x86_64.c
19500 @@ -95,8 +95,8 @@ out:
19501 return error;
19502 }
19503
19504 -static void find_start_end(unsigned long flags, unsigned long *begin,
19505 - unsigned long *end)
19506 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19507 + unsigned long *begin, unsigned long *end)
19508 {
19509 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19510 unsigned long new_begin;
19511 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19512 *begin = new_begin;
19513 }
19514 } else {
19515 - *begin = TASK_UNMAPPED_BASE;
19516 + *begin = mm->mmap_base;
19517 *end = TASK_SIZE;
19518 }
19519 }
19520 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19521 if (flags & MAP_FIXED)
19522 return addr;
19523
19524 - find_start_end(flags, &begin, &end);
19525 + find_start_end(mm, flags, &begin, &end);
19526
19527 if (len > end)
19528 return -ENOMEM;
19529
19530 +#ifdef CONFIG_PAX_RANDMMAP
19531 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19532 +#endif
19533 +
19534 if (addr) {
19535 addr = PAGE_ALIGN(addr);
19536 vma = find_vma(mm, addr);
19537 - if (end - len >= addr &&
19538 - (!vma || addr + len <= vma->vm_start))
19539 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19540 return addr;
19541 }
19542 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19543 @@ -172,7 +175,7 @@ full_search:
19544 }
19545 return -ENOMEM;
19546 }
19547 - if (!vma || addr + len <= vma->vm_start) {
19548 + if (check_heap_stack_gap(vma, addr, len)) {
19549 /*
19550 * Remember the place where we stopped the search:
19551 */
19552 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19553 {
19554 struct vm_area_struct *vma;
19555 struct mm_struct *mm = current->mm;
19556 - unsigned long addr = addr0, start_addr;
19557 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19558
19559 /* requested length too big for entire address space */
19560 if (len > TASK_SIZE)
19561 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19562 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19563 goto bottomup;
19564
19565 +#ifdef CONFIG_PAX_RANDMMAP
19566 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19567 +#endif
19568 +
19569 /* requesting a specific address */
19570 if (addr) {
19571 addr = PAGE_ALIGN(addr);
19572 - vma = find_vma(mm, addr);
19573 - if (TASK_SIZE - len >= addr &&
19574 - (!vma || addr + len <= vma->vm_start))
19575 - return addr;
19576 + if (TASK_SIZE - len >= addr) {
19577 + vma = find_vma(mm, addr);
19578 + if (check_heap_stack_gap(vma, addr, len))
19579 + return addr;
19580 + }
19581 }
19582
19583 /* check if free_area_cache is useful for us */
19584 @@ -240,7 +248,7 @@ try_again:
19585 * return with success:
19586 */
19587 vma = find_vma(mm, addr);
19588 - if (!vma || addr+len <= vma->vm_start)
19589 + if (check_heap_stack_gap(vma, addr, len))
19590 /* remember the address as a hint for next time */
19591 return mm->free_area_cache = addr;
19592
19593 @@ -249,8 +257,8 @@ try_again:
19594 mm->cached_hole_size = vma->vm_start - addr;
19595
19596 /* try just below the current vma->vm_start */
19597 - addr = vma->vm_start-len;
19598 - } while (len < vma->vm_start);
19599 + addr = skip_heap_stack_gap(vma, len);
19600 + } while (!IS_ERR_VALUE(addr));
19601
19602 fail:
19603 /*
19604 @@ -270,13 +278,21 @@ bottomup:
19605 * can happen with large stack limits and large mmap()
19606 * allocations.
19607 */
19608 + mm->mmap_base = TASK_UNMAPPED_BASE;
19609 +
19610 +#ifdef CONFIG_PAX_RANDMMAP
19611 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19612 + mm->mmap_base += mm->delta_mmap;
19613 +#endif
19614 +
19615 + mm->free_area_cache = mm->mmap_base;
19616 mm->cached_hole_size = ~0UL;
19617 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19618 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19619 /*
19620 * Restore the topdown base:
19621 */
19622 - mm->free_area_cache = mm->mmap_base;
19623 + mm->mmap_base = base;
19624 + mm->free_area_cache = base;
19625 mm->cached_hole_size = ~0UL;
19626
19627 return addr;
19628 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19629 index f84fe00..93fe08f 100644
19630 --- a/arch/x86/kernel/tboot.c
19631 +++ b/arch/x86/kernel/tboot.c
19632 @@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
19633
19634 void tboot_shutdown(u32 shutdown_type)
19635 {
19636 - void (*shutdown)(void);
19637 + void (* __noreturn shutdown)(void);
19638
19639 if (!tboot_enabled())
19640 return;
19641 @@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
19642
19643 switch_to_tboot_pt();
19644
19645 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19646 + shutdown = (void *)tboot->shutdown_entry;
19647 shutdown();
19648
19649 /* should not reach here */
19650 @@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19651 return 0;
19652 }
19653
19654 -static atomic_t ap_wfs_count;
19655 +static atomic_unchecked_t ap_wfs_count;
19656
19657 static int tboot_wait_for_aps(int num_aps)
19658 {
19659 @@ -324,9 +324,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19660 {
19661 switch (action) {
19662 case CPU_DYING:
19663 - atomic_inc(&ap_wfs_count);
19664 + atomic_inc_unchecked(&ap_wfs_count);
19665 if (num_online_cpus() == 1)
19666 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19667 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19668 return NOTIFY_BAD;
19669 break;
19670 }
19671 @@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
19672
19673 tboot_create_trampoline();
19674
19675 - atomic_set(&ap_wfs_count, 0);
19676 + atomic_set_unchecked(&ap_wfs_count, 0);
19677 register_hotcpu_notifier(&tboot_cpu_notifier);
19678
19679 acpi_os_set_prepare_sleep(&tboot_sleep);
19680 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19681 index 24d3c91..d06b473 100644
19682 --- a/arch/x86/kernel/time.c
19683 +++ b/arch/x86/kernel/time.c
19684 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19685 {
19686 unsigned long pc = instruction_pointer(regs);
19687
19688 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19689 + if (!user_mode(regs) && in_lock_functions(pc)) {
19690 #ifdef CONFIG_FRAME_POINTER
19691 - return *(unsigned long *)(regs->bp + sizeof(long));
19692 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19693 #else
19694 unsigned long *sp =
19695 (unsigned long *)kernel_stack_pointer(regs);
19696 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19697 * or above a saved flags. Eflags has bits 22-31 zero,
19698 * kernel addresses don't.
19699 */
19700 +
19701 +#ifdef CONFIG_PAX_KERNEXEC
19702 + return ktla_ktva(sp[0]);
19703 +#else
19704 if (sp[0] >> 22)
19705 return sp[0];
19706 if (sp[1] >> 22)
19707 return sp[1];
19708 #endif
19709 +
19710 +#endif
19711 }
19712 return pc;
19713 }
19714 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19715 index 9d9d2f9..cad418a 100644
19716 --- a/arch/x86/kernel/tls.c
19717 +++ b/arch/x86/kernel/tls.c
19718 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19719 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19720 return -EINVAL;
19721
19722 +#ifdef CONFIG_PAX_SEGMEXEC
19723 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19724 + return -EINVAL;
19725 +#endif
19726 +
19727 set_tls_desc(p, idx, &info, 1);
19728
19729 return 0;
19730 @@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
19731
19732 if (kbuf)
19733 info = kbuf;
19734 - else if (__copy_from_user(infobuf, ubuf, count))
19735 + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
19736 return -EFAULT;
19737 else
19738 info = infobuf;
19739 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19740 index 05b31d9..501d3ba 100644
19741 --- a/arch/x86/kernel/traps.c
19742 +++ b/arch/x86/kernel/traps.c
19743 @@ -67,12 +67,6 @@ asmlinkage int system_call(void);
19744
19745 /* Do we ignore FPU interrupts ? */
19746 char ignore_fpu_irq;
19747 -
19748 -/*
19749 - * The IDT has to be page-aligned to simplify the Pentium
19750 - * F0 0F bug workaround.
19751 - */
19752 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19753 #endif
19754
19755 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19756 @@ -105,13 +99,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19757 }
19758
19759 static void __kprobes
19760 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19761 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19762 long error_code, siginfo_t *info)
19763 {
19764 struct task_struct *tsk = current;
19765
19766 #ifdef CONFIG_X86_32
19767 - if (regs->flags & X86_VM_MASK) {
19768 + if (v8086_mode(regs)) {
19769 /*
19770 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19771 * On nmi (interrupt 2), do_trap should not be called.
19772 @@ -122,7 +116,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19773 }
19774 #endif
19775
19776 - if (!user_mode(regs))
19777 + if (!user_mode_novm(regs))
19778 goto kernel_trap;
19779
19780 #ifdef CONFIG_X86_32
19781 @@ -145,7 +139,7 @@ trap_signal:
19782 printk_ratelimit()) {
19783 printk(KERN_INFO
19784 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19785 - tsk->comm, tsk->pid, str,
19786 + tsk->comm, task_pid_nr(tsk), str,
19787 regs->ip, regs->sp, error_code);
19788 print_vma_addr(" in ", regs->ip);
19789 printk("\n");
19790 @@ -162,8 +156,20 @@ kernel_trap:
19791 if (!fixup_exception(regs)) {
19792 tsk->thread.error_code = error_code;
19793 tsk->thread.trap_nr = trapnr;
19794 +
19795 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19796 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19797 + str = "PAX: suspicious stack segment fault";
19798 +#endif
19799 +
19800 die(str, regs, error_code);
19801 }
19802 +
19803 +#ifdef CONFIG_PAX_REFCOUNT
19804 + if (trapnr == 4)
19805 + pax_report_refcount_overflow(regs);
19806 +#endif
19807 +
19808 return;
19809
19810 #ifdef CONFIG_X86_32
19811 @@ -256,14 +262,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19812 conditional_sti(regs);
19813
19814 #ifdef CONFIG_X86_32
19815 - if (regs->flags & X86_VM_MASK)
19816 + if (v8086_mode(regs))
19817 goto gp_in_vm86;
19818 #endif
19819
19820 tsk = current;
19821 - if (!user_mode(regs))
19822 + if (!user_mode_novm(regs))
19823 goto gp_in_kernel;
19824
19825 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19826 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19827 + struct mm_struct *mm = tsk->mm;
19828 + unsigned long limit;
19829 +
19830 + down_write(&mm->mmap_sem);
19831 + limit = mm->context.user_cs_limit;
19832 + if (limit < TASK_SIZE) {
19833 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19834 + up_write(&mm->mmap_sem);
19835 + return;
19836 + }
19837 + up_write(&mm->mmap_sem);
19838 + }
19839 +#endif
19840 +
19841 tsk->thread.error_code = error_code;
19842 tsk->thread.trap_nr = X86_TRAP_GP;
19843
19844 @@ -296,6 +318,13 @@ gp_in_kernel:
19845 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19846 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19847 return;
19848 +
19849 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19850 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19851 + die("PAX: suspicious general protection fault", regs, error_code);
19852 + else
19853 +#endif
19854 +
19855 die("general protection fault", regs, error_code);
19856 }
19857
19858 @@ -431,7 +460,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19859 /* It's safe to allow irq's after DR6 has been saved */
19860 preempt_conditional_sti(regs);
19861
19862 - if (regs->flags & X86_VM_MASK) {
19863 + if (v8086_mode(regs)) {
19864 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19865 X86_TRAP_DB);
19866 preempt_conditional_cli(regs);
19867 @@ -446,7 +475,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19868 * We already checked v86 mode above, so we can check for kernel mode
19869 * by just checking the CPL of CS.
19870 */
19871 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19872 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19873 tsk->thread.debugreg6 &= ~DR_STEP;
19874 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19875 regs->flags &= ~X86_EFLAGS_TF;
19876 @@ -477,7 +506,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19877 return;
19878 conditional_sti(regs);
19879
19880 - if (!user_mode_vm(regs))
19881 + if (!user_mode(regs))
19882 {
19883 if (!fixup_exception(regs)) {
19884 task->thread.error_code = error_code;
19885 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
19886 index dc4e910..c9dedab 100644
19887 --- a/arch/x86/kernel/uprobes.c
19888 +++ b/arch/x86/kernel/uprobes.c
19889 @@ -606,7 +606,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
19890 int ret = NOTIFY_DONE;
19891
19892 /* We are only interested in userspace traps */
19893 - if (regs && !user_mode_vm(regs))
19894 + if (regs && !user_mode(regs))
19895 return NOTIFY_DONE;
19896
19897 switch (val) {
19898 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19899 index b9242ba..50c5edd 100644
19900 --- a/arch/x86/kernel/verify_cpu.S
19901 +++ b/arch/x86/kernel/verify_cpu.S
19902 @@ -20,6 +20,7 @@
19903 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19904 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19905 * arch/x86/kernel/head_32.S: processor startup
19906 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19907 *
19908 * verify_cpu, returns the status of longmode and SSE in register %eax.
19909 * 0: Success 1: Failure
19910 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19911 index 255f58a..5e91150 100644
19912 --- a/arch/x86/kernel/vm86_32.c
19913 +++ b/arch/x86/kernel/vm86_32.c
19914 @@ -41,6 +41,7 @@
19915 #include <linux/ptrace.h>
19916 #include <linux/audit.h>
19917 #include <linux/stddef.h>
19918 +#include <linux/grsecurity.h>
19919
19920 #include <asm/uaccess.h>
19921 #include <asm/io.h>
19922 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19923 do_exit(SIGSEGV);
19924 }
19925
19926 - tss = &per_cpu(init_tss, get_cpu());
19927 + tss = init_tss + get_cpu();
19928 current->thread.sp0 = current->thread.saved_sp0;
19929 current->thread.sysenter_cs = __KERNEL_CS;
19930 load_sp0(tss, &current->thread);
19931 @@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19932 struct task_struct *tsk;
19933 int tmp, ret = -EPERM;
19934
19935 +#ifdef CONFIG_GRKERNSEC_VM86
19936 + if (!capable(CAP_SYS_RAWIO)) {
19937 + gr_handle_vm86();
19938 + goto out;
19939 + }
19940 +#endif
19941 +
19942 tsk = current;
19943 if (tsk->thread.saved_sp0)
19944 goto out;
19945 @@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19946 int tmp, ret;
19947 struct vm86plus_struct __user *v86;
19948
19949 +#ifdef CONFIG_GRKERNSEC_VM86
19950 + if (!capable(CAP_SYS_RAWIO)) {
19951 + gr_handle_vm86();
19952 + ret = -EPERM;
19953 + goto out;
19954 + }
19955 +#endif
19956 +
19957 tsk = current;
19958 switch (cmd) {
19959 case VM86_REQUEST_IRQ:
19960 @@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19961 tsk->thread.saved_fs = info->regs32->fs;
19962 tsk->thread.saved_gs = get_user_gs(info->regs32);
19963
19964 - tss = &per_cpu(init_tss, get_cpu());
19965 + tss = init_tss + get_cpu();
19966 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19967 if (cpu_has_sep)
19968 tsk->thread.sysenter_cs = 0;
19969 @@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19970 goto cannot_handle;
19971 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19972 goto cannot_handle;
19973 - intr_ptr = (unsigned long __user *) (i << 2);
19974 + intr_ptr = (__force unsigned long __user *) (i << 2);
19975 if (get_user(segoffs, intr_ptr))
19976 goto cannot_handle;
19977 if ((segoffs >> 16) == BIOSSEG)
19978 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19979 index 22a1530..8fbaaad 100644
19980 --- a/arch/x86/kernel/vmlinux.lds.S
19981 +++ b/arch/x86/kernel/vmlinux.lds.S
19982 @@ -26,6 +26,13 @@
19983 #include <asm/page_types.h>
19984 #include <asm/cache.h>
19985 #include <asm/boot.h>
19986 +#include <asm/segment.h>
19987 +
19988 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19989 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19990 +#else
19991 +#define __KERNEL_TEXT_OFFSET 0
19992 +#endif
19993
19994 #undef i386 /* in case the preprocessor is a 32bit one */
19995
19996 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19997
19998 PHDRS {
19999 text PT_LOAD FLAGS(5); /* R_E */
20000 +#ifdef CONFIG_X86_32
20001 + module PT_LOAD FLAGS(5); /* R_E */
20002 +#endif
20003 +#ifdef CONFIG_XEN
20004 + rodata PT_LOAD FLAGS(5); /* R_E */
20005 +#else
20006 + rodata PT_LOAD FLAGS(4); /* R__ */
20007 +#endif
20008 data PT_LOAD FLAGS(6); /* RW_ */
20009 -#ifdef CONFIG_X86_64
20010 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20011 #ifdef CONFIG_SMP
20012 percpu PT_LOAD FLAGS(6); /* RW_ */
20013 #endif
20014 + text.init PT_LOAD FLAGS(5); /* R_E */
20015 + text.exit PT_LOAD FLAGS(5); /* R_E */
20016 init PT_LOAD FLAGS(7); /* RWE */
20017 -#endif
20018 note PT_NOTE FLAGS(0); /* ___ */
20019 }
20020
20021 SECTIONS
20022 {
20023 #ifdef CONFIG_X86_32
20024 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20025 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20026 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20027 #else
20028 - . = __START_KERNEL;
20029 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20030 + . = __START_KERNEL;
20031 #endif
20032
20033 /* Text and read-only data */
20034 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20035 - _text = .;
20036 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20037 /* bootstrapping code */
20038 +#ifdef CONFIG_X86_32
20039 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20040 +#else
20041 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20042 +#endif
20043 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20044 + _text = .;
20045 HEAD_TEXT
20046 #ifdef CONFIG_X86_32
20047 . = ALIGN(PAGE_SIZE);
20048 @@ -108,13 +128,48 @@ SECTIONS
20049 IRQENTRY_TEXT
20050 *(.fixup)
20051 *(.gnu.warning)
20052 - /* End of text section */
20053 - _etext = .;
20054 } :text = 0x9090
20055
20056 - NOTES :text :note
20057 + . += __KERNEL_TEXT_OFFSET;
20058
20059 - EXCEPTION_TABLE(16) :text = 0x9090
20060 +#ifdef CONFIG_X86_32
20061 + . = ALIGN(PAGE_SIZE);
20062 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20063 +
20064 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20065 + MODULES_EXEC_VADDR = .;
20066 + BYTE(0)
20067 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20068 + . = ALIGN(HPAGE_SIZE) - 1;
20069 + MODULES_EXEC_END = .;
20070 +#endif
20071 +
20072 + } :module
20073 +#endif
20074 +
20075 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20076 + /* End of text section */
20077 + BYTE(0)
20078 + _etext = . - __KERNEL_TEXT_OFFSET;
20079 + }
20080 +
20081 +#ifdef CONFIG_X86_32
20082 + . = ALIGN(PAGE_SIZE);
20083 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20084 + *(.idt)
20085 + . = ALIGN(PAGE_SIZE);
20086 + *(.empty_zero_page)
20087 + *(.initial_pg_fixmap)
20088 + *(.initial_pg_pmd)
20089 + *(.initial_page_table)
20090 + *(.swapper_pg_dir)
20091 + } :rodata
20092 +#endif
20093 +
20094 + . = ALIGN(PAGE_SIZE);
20095 + NOTES :rodata :note
20096 +
20097 + EXCEPTION_TABLE(16) :rodata
20098
20099 #if defined(CONFIG_DEBUG_RODATA)
20100 /* .text should occupy whole number of pages */
20101 @@ -126,16 +181,20 @@ SECTIONS
20102
20103 /* Data */
20104 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20105 +
20106 +#ifdef CONFIG_PAX_KERNEXEC
20107 + . = ALIGN(HPAGE_SIZE);
20108 +#else
20109 + . = ALIGN(PAGE_SIZE);
20110 +#endif
20111 +
20112 /* Start of data section */
20113 _sdata = .;
20114
20115 /* init_task */
20116 INIT_TASK_DATA(THREAD_SIZE)
20117
20118 -#ifdef CONFIG_X86_32
20119 - /* 32 bit has nosave before _edata */
20120 NOSAVE_DATA
20121 -#endif
20122
20123 PAGE_ALIGNED_DATA(PAGE_SIZE)
20124
20125 @@ -176,12 +235,19 @@ SECTIONS
20126 #endif /* CONFIG_X86_64 */
20127
20128 /* Init code and data - will be freed after init */
20129 - . = ALIGN(PAGE_SIZE);
20130 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20131 + BYTE(0)
20132 +
20133 +#ifdef CONFIG_PAX_KERNEXEC
20134 + . = ALIGN(HPAGE_SIZE);
20135 +#else
20136 + . = ALIGN(PAGE_SIZE);
20137 +#endif
20138 +
20139 __init_begin = .; /* paired with __init_end */
20140 - }
20141 + } :init.begin
20142
20143 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20144 +#ifdef CONFIG_SMP
20145 /*
20146 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20147 * output PHDR, so the next output section - .init.text - should
20148 @@ -190,12 +256,27 @@ SECTIONS
20149 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20150 #endif
20151
20152 - INIT_TEXT_SECTION(PAGE_SIZE)
20153 -#ifdef CONFIG_X86_64
20154 - :init
20155 -#endif
20156 + . = ALIGN(PAGE_SIZE);
20157 + init_begin = .;
20158 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20159 + VMLINUX_SYMBOL(_sinittext) = .;
20160 + INIT_TEXT
20161 + VMLINUX_SYMBOL(_einittext) = .;
20162 + . = ALIGN(PAGE_SIZE);
20163 + } :text.init
20164
20165 - INIT_DATA_SECTION(16)
20166 + /*
20167 + * .exit.text is discard at runtime, not link time, to deal with
20168 + * references from .altinstructions and .eh_frame
20169 + */
20170 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20171 + EXIT_TEXT
20172 + . = ALIGN(16);
20173 + } :text.exit
20174 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20175 +
20176 + . = ALIGN(PAGE_SIZE);
20177 + INIT_DATA_SECTION(16) :init
20178
20179 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20180 __x86_cpu_dev_start = .;
20181 @@ -257,19 +338,12 @@ SECTIONS
20182 }
20183
20184 . = ALIGN(8);
20185 - /*
20186 - * .exit.text is discard at runtime, not link time, to deal with
20187 - * references from .altinstructions and .eh_frame
20188 - */
20189 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20190 - EXIT_TEXT
20191 - }
20192
20193 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20194 EXIT_DATA
20195 }
20196
20197 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20198 +#ifndef CONFIG_SMP
20199 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20200 #endif
20201
20202 @@ -288,16 +362,10 @@ SECTIONS
20203 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20204 __smp_locks = .;
20205 *(.smp_locks)
20206 - . = ALIGN(PAGE_SIZE);
20207 __smp_locks_end = .;
20208 + . = ALIGN(PAGE_SIZE);
20209 }
20210
20211 -#ifdef CONFIG_X86_64
20212 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20213 - NOSAVE_DATA
20214 - }
20215 -#endif
20216 -
20217 /* BSS */
20218 . = ALIGN(PAGE_SIZE);
20219 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20220 @@ -313,6 +381,7 @@ SECTIONS
20221 __brk_base = .;
20222 . += 64 * 1024; /* 64k alignment slop space */
20223 *(.brk_reservation) /* areas brk users have reserved */
20224 + . = ALIGN(HPAGE_SIZE);
20225 __brk_limit = .;
20226 }
20227
20228 @@ -339,13 +408,12 @@ SECTIONS
20229 * for the boot processor.
20230 */
20231 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20232 -INIT_PER_CPU(gdt_page);
20233 INIT_PER_CPU(irq_stack_union);
20234
20235 /*
20236 * Build-time check on the image size:
20237 */
20238 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20239 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20240 "kernel image bigger than KERNEL_IMAGE_SIZE");
20241
20242 #ifdef CONFIG_SMP
20243 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20244 index 5db36ca..2938af9 100644
20245 --- a/arch/x86/kernel/vsyscall_64.c
20246 +++ b/arch/x86/kernel/vsyscall_64.c
20247 @@ -54,15 +54,13 @@
20248 DEFINE_VVAR(int, vgetcpu_mode);
20249 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20250
20251 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20252 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20253
20254 static int __init vsyscall_setup(char *str)
20255 {
20256 if (str) {
20257 if (!strcmp("emulate", str))
20258 vsyscall_mode = EMULATE;
20259 - else if (!strcmp("native", str))
20260 - vsyscall_mode = NATIVE;
20261 else if (!strcmp("none", str))
20262 vsyscall_mode = NONE;
20263 else
20264 @@ -309,8 +307,7 @@ done:
20265 return true;
20266
20267 sigsegv:
20268 - force_sig(SIGSEGV, current);
20269 - return true;
20270 + do_group_exit(SIGKILL);
20271 }
20272
20273 /*
20274 @@ -363,10 +360,7 @@ void __init map_vsyscall(void)
20275 extern char __vvar_page;
20276 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20277
20278 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20279 - vsyscall_mode == NATIVE
20280 - ? PAGE_KERNEL_VSYSCALL
20281 - : PAGE_KERNEL_VVAR);
20282 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20283 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20284 (unsigned long)VSYSCALL_START);
20285
20286 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20287 index 9796c2f..f686fbf 100644
20288 --- a/arch/x86/kernel/x8664_ksyms_64.c
20289 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20290 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20291 EXPORT_SYMBOL(copy_user_generic_string);
20292 EXPORT_SYMBOL(copy_user_generic_unrolled);
20293 EXPORT_SYMBOL(__copy_user_nocache);
20294 -EXPORT_SYMBOL(_copy_from_user);
20295 -EXPORT_SYMBOL(_copy_to_user);
20296
20297 EXPORT_SYMBOL(copy_page);
20298 EXPORT_SYMBOL(clear_page);
20299 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20300 index bd18149..2ea0183 100644
20301 --- a/arch/x86/kernel/xsave.c
20302 +++ b/arch/x86/kernel/xsave.c
20303 @@ -129,7 +129,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20304 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20305 return -EINVAL;
20306
20307 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20308 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20309 fx_sw_user->extended_size -
20310 FP_XSTATE_MAGIC2_SIZE));
20311 if (err)
20312 @@ -265,7 +265,7 @@ fx_only:
20313 * the other extended state.
20314 */
20315 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20316 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20317 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20318 }
20319
20320 /*
20321 @@ -294,7 +294,7 @@ int restore_i387_xstate(void __user *buf)
20322 if (use_xsave())
20323 err = restore_user_xstate(buf);
20324 else
20325 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20326 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20327 buf);
20328 if (unlikely(err)) {
20329 /*
20330 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20331 index 7df1c6d..9ea7c79 100644
20332 --- a/arch/x86/kvm/cpuid.c
20333 +++ b/arch/x86/kvm/cpuid.c
20334 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20335 struct kvm_cpuid2 *cpuid,
20336 struct kvm_cpuid_entry2 __user *entries)
20337 {
20338 - int r;
20339 + int r, i;
20340
20341 r = -E2BIG;
20342 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20343 goto out;
20344 r = -EFAULT;
20345 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20346 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20347 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20348 goto out;
20349 + for (i = 0; i < cpuid->nent; ++i) {
20350 + struct kvm_cpuid_entry2 cpuid_entry;
20351 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20352 + goto out;
20353 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20354 + }
20355 vcpu->arch.cpuid_nent = cpuid->nent;
20356 kvm_apic_set_version(vcpu);
20357 kvm_x86_ops->cpuid_update(vcpu);
20358 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20359 struct kvm_cpuid2 *cpuid,
20360 struct kvm_cpuid_entry2 __user *entries)
20361 {
20362 - int r;
20363 + int r, i;
20364
20365 r = -E2BIG;
20366 if (cpuid->nent < vcpu->arch.cpuid_nent)
20367 goto out;
20368 r = -EFAULT;
20369 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20370 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20371 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20372 goto out;
20373 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20374 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20375 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20376 + goto out;
20377 + }
20378 return 0;
20379
20380 out:
20381 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20382 index 4837375..2cc9722 100644
20383 --- a/arch/x86/kvm/emulate.c
20384 +++ b/arch/x86/kvm/emulate.c
20385 @@ -256,6 +256,7 @@ struct gprefix {
20386
20387 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20388 do { \
20389 + unsigned long _tmp; \
20390 __asm__ __volatile__ ( \
20391 _PRE_EFLAGS("0", "4", "2") \
20392 _op _suffix " %"_x"3,%1; " \
20393 @@ -270,8 +271,6 @@ struct gprefix {
20394 /* Raw emulation: instruction has two explicit operands. */
20395 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20396 do { \
20397 - unsigned long _tmp; \
20398 - \
20399 switch ((ctxt)->dst.bytes) { \
20400 case 2: \
20401 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20402 @@ -287,7 +286,6 @@ struct gprefix {
20403
20404 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20405 do { \
20406 - unsigned long _tmp; \
20407 switch ((ctxt)->dst.bytes) { \
20408 case 1: \
20409 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20410 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20411 index 93c1574..d6097dc 100644
20412 --- a/arch/x86/kvm/lapic.c
20413 +++ b/arch/x86/kvm/lapic.c
20414 @@ -54,7 +54,7 @@
20415 #define APIC_BUS_CYCLE_NS 1
20416
20417 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20418 -#define apic_debug(fmt, arg...)
20419 +#define apic_debug(fmt, arg...) do {} while (0)
20420
20421 #define APIC_LVT_NUM 6
20422 /* 14 is the version for Xeon and Pentium 8.4.8*/
20423 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20424 index 34f9709..8eca2d5 100644
20425 --- a/arch/x86/kvm/paging_tmpl.h
20426 +++ b/arch/x86/kvm/paging_tmpl.h
20427 @@ -197,7 +197,7 @@ retry_walk:
20428 if (unlikely(kvm_is_error_hva(host_addr)))
20429 goto error;
20430
20431 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20432 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20433 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20434 goto error;
20435
20436 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20437 index f75af40..285b18f 100644
20438 --- a/arch/x86/kvm/svm.c
20439 +++ b/arch/x86/kvm/svm.c
20440 @@ -3516,7 +3516,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20441 int cpu = raw_smp_processor_id();
20442
20443 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20444 +
20445 + pax_open_kernel();
20446 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20447 + pax_close_kernel();
20448 +
20449 load_TR_desc();
20450 }
20451
20452 @@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20453 #endif
20454 #endif
20455
20456 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20457 + __set_fs(current_thread_info()->addr_limit);
20458 +#endif
20459 +
20460 reload_tss(vcpu);
20461
20462 local_irq_disable();
20463 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20464 index 86c8704..d9277bb 100644
20465 --- a/arch/x86/kvm/vmx.c
20466 +++ b/arch/x86/kvm/vmx.c
20467 @@ -1317,7 +1317,11 @@ static void reload_tss(void)
20468 struct desc_struct *descs;
20469
20470 descs = (void *)gdt->address;
20471 +
20472 + pax_open_kernel();
20473 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20474 + pax_close_kernel();
20475 +
20476 load_TR_desc();
20477 }
20478
20479 @@ -1527,6 +1531,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
20480 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
20481 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
20482
20483 +#ifdef CONFIG_PAX_PER_CPU_PGD
20484 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
20485 +#endif
20486 +
20487 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
20488 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
20489 vmx->loaded_vmcs->cpu = cpu;
20490 @@ -2650,8 +2658,11 @@ static __init int hardware_setup(void)
20491 if (!cpu_has_vmx_flexpriority())
20492 flexpriority_enabled = 0;
20493
20494 - if (!cpu_has_vmx_tpr_shadow())
20495 - kvm_x86_ops->update_cr8_intercept = NULL;
20496 + if (!cpu_has_vmx_tpr_shadow()) {
20497 + pax_open_kernel();
20498 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20499 + pax_close_kernel();
20500 + }
20501
20502 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20503 kvm_disable_largepages();
20504 @@ -3697,7 +3708,10 @@ static void vmx_set_constant_host_state(void)
20505
20506 vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
20507 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
20508 +
20509 +#ifndef CONFIG_PAX_PER_CPU_PGD
20510 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
20511 +#endif
20512
20513 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
20514 #ifdef CONFIG_X86_64
20515 @@ -3719,7 +3733,7 @@ static void vmx_set_constant_host_state(void)
20516 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20517
20518 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20519 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20520 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20521
20522 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20523 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20524 @@ -6257,6 +6271,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20525 "jmp .Lkvm_vmx_return \n\t"
20526 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20527 ".Lkvm_vmx_return: "
20528 +
20529 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20530 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20531 + ".Lkvm_vmx_return2: "
20532 +#endif
20533 +
20534 /* Save guest registers, load host registers, keep flags */
20535 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20536 "pop %0 \n\t"
20537 @@ -6305,6 +6325,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20538 #endif
20539 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20540 [wordsize]"i"(sizeof(ulong))
20541 +
20542 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20543 + ,[cs]"i"(__KERNEL_CS)
20544 +#endif
20545 +
20546 : "cc", "memory"
20547 , R"ax", R"bx", R"di", R"si"
20548 #ifdef CONFIG_X86_64
20549 @@ -6312,7 +6337,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20550 #endif
20551 );
20552
20553 -#ifndef CONFIG_X86_64
20554 +#ifdef CONFIG_X86_32
20555 /*
20556 * The sysexit path does not restore ds/es, so we must set them to
20557 * a reasonable value ourselves.
20558 @@ -6321,8 +6346,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20559 * may be executed in interrupt context, which saves and restore segments
20560 * around it, nullifying its effect.
20561 */
20562 - loadsegment(ds, __USER_DS);
20563 - loadsegment(es, __USER_DS);
20564 + loadsegment(ds, __KERNEL_DS);
20565 + loadsegment(es, __KERNEL_DS);
20566 + loadsegment(ss, __KERNEL_DS);
20567 +
20568 +#ifdef CONFIG_PAX_KERNEXEC
20569 + loadsegment(fs, __KERNEL_PERCPU);
20570 +#endif
20571 +
20572 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20573 + __set_fs(current_thread_info()->addr_limit);
20574 +#endif
20575 +
20576 #endif
20577
20578 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
20579 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20580 index 14c290d..0dae6e5 100644
20581 --- a/arch/x86/kvm/x86.c
20582 +++ b/arch/x86/kvm/x86.c
20583 @@ -1361,8 +1361,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20584 {
20585 struct kvm *kvm = vcpu->kvm;
20586 int lm = is_long_mode(vcpu);
20587 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20588 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20589 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20590 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20591 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20592 : kvm->arch.xen_hvm_config.blob_size_32;
20593 u32 page_num = data & ~PAGE_MASK;
20594 @@ -2218,6 +2218,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20595 if (n < msr_list.nmsrs)
20596 goto out;
20597 r = -EFAULT;
20598 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20599 + goto out;
20600 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20601 num_msrs_to_save * sizeof(u32)))
20602 goto out;
20603 @@ -2343,7 +2345,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20604 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20605 struct kvm_interrupt *irq)
20606 {
20607 - if (irq->irq < 0 || irq->irq >= 256)
20608 + if (irq->irq >= 256)
20609 return -EINVAL;
20610 if (irqchip_in_kernel(vcpu->kvm))
20611 return -ENXIO;
20612 @@ -4880,7 +4882,7 @@ static void kvm_set_mmio_spte_mask(void)
20613 kvm_mmu_set_mmio_spte_mask(mask);
20614 }
20615
20616 -int kvm_arch_init(void *opaque)
20617 +int kvm_arch_init(const void *opaque)
20618 {
20619 int r;
20620 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20621 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20622 index 642d880..cc9ebac 100644
20623 --- a/arch/x86/lguest/boot.c
20624 +++ b/arch/x86/lguest/boot.c
20625 @@ -1116,12 +1116,12 @@ static u32 lguest_apic_safe_wait_icr_idle(void)
20626
20627 static void set_lguest_basic_apic_ops(void)
20628 {
20629 - apic->read = lguest_apic_read;
20630 - apic->write = lguest_apic_write;
20631 - apic->icr_read = lguest_apic_icr_read;
20632 - apic->icr_write = lguest_apic_icr_write;
20633 - apic->wait_icr_idle = lguest_apic_wait_icr_idle;
20634 - apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
20635 + *(void **)&apic->read = lguest_apic_read;
20636 + *(void **)&apic->write = lguest_apic_write;
20637 + *(void **)&apic->icr_read = lguest_apic_icr_read;
20638 + *(void **)&apic->icr_write = lguest_apic_icr_write;
20639 + *(void **)&apic->wait_icr_idle = lguest_apic_wait_icr_idle;
20640 + *(void **)&apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
20641 };
20642 #endif
20643
20644 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20645 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20646 * Launcher to reboot us.
20647 */
20648 -static void lguest_restart(char *reason)
20649 +static __noreturn void lguest_restart(char *reason)
20650 {
20651 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20652 + BUG();
20653 }
20654
20655 /*G:050
20656 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20657 index 00933d5..3a64af9 100644
20658 --- a/arch/x86/lib/atomic64_386_32.S
20659 +++ b/arch/x86/lib/atomic64_386_32.S
20660 @@ -48,6 +48,10 @@ BEGIN(read)
20661 movl (v), %eax
20662 movl 4(v), %edx
20663 RET_ENDP
20664 +BEGIN(read_unchecked)
20665 + movl (v), %eax
20666 + movl 4(v), %edx
20667 +RET_ENDP
20668 #undef v
20669
20670 #define v %esi
20671 @@ -55,6 +59,10 @@ BEGIN(set)
20672 movl %ebx, (v)
20673 movl %ecx, 4(v)
20674 RET_ENDP
20675 +BEGIN(set_unchecked)
20676 + movl %ebx, (v)
20677 + movl %ecx, 4(v)
20678 +RET_ENDP
20679 #undef v
20680
20681 #define v %esi
20682 @@ -70,6 +78,20 @@ RET_ENDP
20683 BEGIN(add)
20684 addl %eax, (v)
20685 adcl %edx, 4(v)
20686 +
20687 +#ifdef CONFIG_PAX_REFCOUNT
20688 + jno 0f
20689 + subl %eax, (v)
20690 + sbbl %edx, 4(v)
20691 + int $4
20692 +0:
20693 + _ASM_EXTABLE(0b, 0b)
20694 +#endif
20695 +
20696 +RET_ENDP
20697 +BEGIN(add_unchecked)
20698 + addl %eax, (v)
20699 + adcl %edx, 4(v)
20700 RET_ENDP
20701 #undef v
20702
20703 @@ -77,6 +99,24 @@ RET_ENDP
20704 BEGIN(add_return)
20705 addl (v), %eax
20706 adcl 4(v), %edx
20707 +
20708 +#ifdef CONFIG_PAX_REFCOUNT
20709 + into
20710 +1234:
20711 + _ASM_EXTABLE(1234b, 2f)
20712 +#endif
20713 +
20714 + movl %eax, (v)
20715 + movl %edx, 4(v)
20716 +
20717 +#ifdef CONFIG_PAX_REFCOUNT
20718 +2:
20719 +#endif
20720 +
20721 +RET_ENDP
20722 +BEGIN(add_return_unchecked)
20723 + addl (v), %eax
20724 + adcl 4(v), %edx
20725 movl %eax, (v)
20726 movl %edx, 4(v)
20727 RET_ENDP
20728 @@ -86,6 +126,20 @@ RET_ENDP
20729 BEGIN(sub)
20730 subl %eax, (v)
20731 sbbl %edx, 4(v)
20732 +
20733 +#ifdef CONFIG_PAX_REFCOUNT
20734 + jno 0f
20735 + addl %eax, (v)
20736 + adcl %edx, 4(v)
20737 + int $4
20738 +0:
20739 + _ASM_EXTABLE(0b, 0b)
20740 +#endif
20741 +
20742 +RET_ENDP
20743 +BEGIN(sub_unchecked)
20744 + subl %eax, (v)
20745 + sbbl %edx, 4(v)
20746 RET_ENDP
20747 #undef v
20748
20749 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20750 sbbl $0, %edx
20751 addl (v), %eax
20752 adcl 4(v), %edx
20753 +
20754 +#ifdef CONFIG_PAX_REFCOUNT
20755 + into
20756 +1234:
20757 + _ASM_EXTABLE(1234b, 2f)
20758 +#endif
20759 +
20760 + movl %eax, (v)
20761 + movl %edx, 4(v)
20762 +
20763 +#ifdef CONFIG_PAX_REFCOUNT
20764 +2:
20765 +#endif
20766 +
20767 +RET_ENDP
20768 +BEGIN(sub_return_unchecked)
20769 + negl %edx
20770 + negl %eax
20771 + sbbl $0, %edx
20772 + addl (v), %eax
20773 + adcl 4(v), %edx
20774 movl %eax, (v)
20775 movl %edx, 4(v)
20776 RET_ENDP
20777 @@ -105,6 +180,20 @@ RET_ENDP
20778 BEGIN(inc)
20779 addl $1, (v)
20780 adcl $0, 4(v)
20781 +
20782 +#ifdef CONFIG_PAX_REFCOUNT
20783 + jno 0f
20784 + subl $1, (v)
20785 + sbbl $0, 4(v)
20786 + int $4
20787 +0:
20788 + _ASM_EXTABLE(0b, 0b)
20789 +#endif
20790 +
20791 +RET_ENDP
20792 +BEGIN(inc_unchecked)
20793 + addl $1, (v)
20794 + adcl $0, 4(v)
20795 RET_ENDP
20796 #undef v
20797
20798 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20799 movl 4(v), %edx
20800 addl $1, %eax
20801 adcl $0, %edx
20802 +
20803 +#ifdef CONFIG_PAX_REFCOUNT
20804 + into
20805 +1234:
20806 + _ASM_EXTABLE(1234b, 2f)
20807 +#endif
20808 +
20809 + movl %eax, (v)
20810 + movl %edx, 4(v)
20811 +
20812 +#ifdef CONFIG_PAX_REFCOUNT
20813 +2:
20814 +#endif
20815 +
20816 +RET_ENDP
20817 +BEGIN(inc_return_unchecked)
20818 + movl (v), %eax
20819 + movl 4(v), %edx
20820 + addl $1, %eax
20821 + adcl $0, %edx
20822 movl %eax, (v)
20823 movl %edx, 4(v)
20824 RET_ENDP
20825 @@ -123,6 +232,20 @@ RET_ENDP
20826 BEGIN(dec)
20827 subl $1, (v)
20828 sbbl $0, 4(v)
20829 +
20830 +#ifdef CONFIG_PAX_REFCOUNT
20831 + jno 0f
20832 + addl $1, (v)
20833 + adcl $0, 4(v)
20834 + int $4
20835 +0:
20836 + _ASM_EXTABLE(0b, 0b)
20837 +#endif
20838 +
20839 +RET_ENDP
20840 +BEGIN(dec_unchecked)
20841 + subl $1, (v)
20842 + sbbl $0, 4(v)
20843 RET_ENDP
20844 #undef v
20845
20846 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20847 movl 4(v), %edx
20848 subl $1, %eax
20849 sbbl $0, %edx
20850 +
20851 +#ifdef CONFIG_PAX_REFCOUNT
20852 + into
20853 +1234:
20854 + _ASM_EXTABLE(1234b, 2f)
20855 +#endif
20856 +
20857 + movl %eax, (v)
20858 + movl %edx, 4(v)
20859 +
20860 +#ifdef CONFIG_PAX_REFCOUNT
20861 +2:
20862 +#endif
20863 +
20864 +RET_ENDP
20865 +BEGIN(dec_return_unchecked)
20866 + movl (v), %eax
20867 + movl 4(v), %edx
20868 + subl $1, %eax
20869 + sbbl $0, %edx
20870 movl %eax, (v)
20871 movl %edx, 4(v)
20872 RET_ENDP
20873 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20874 adcl %edx, %edi
20875 addl (v), %eax
20876 adcl 4(v), %edx
20877 +
20878 +#ifdef CONFIG_PAX_REFCOUNT
20879 + into
20880 +1234:
20881 + _ASM_EXTABLE(1234b, 2f)
20882 +#endif
20883 +
20884 cmpl %eax, %ecx
20885 je 3f
20886 1:
20887 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20888 1:
20889 addl $1, %eax
20890 adcl $0, %edx
20891 +
20892 +#ifdef CONFIG_PAX_REFCOUNT
20893 + into
20894 +1234:
20895 + _ASM_EXTABLE(1234b, 2f)
20896 +#endif
20897 +
20898 movl %eax, (v)
20899 movl %edx, 4(v)
20900 movl $1, %eax
20901 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20902 movl 4(v), %edx
20903 subl $1, %eax
20904 sbbl $0, %edx
20905 +
20906 +#ifdef CONFIG_PAX_REFCOUNT
20907 + into
20908 +1234:
20909 + _ASM_EXTABLE(1234b, 1f)
20910 +#endif
20911 +
20912 js 1f
20913 movl %eax, (v)
20914 movl %edx, 4(v)
20915 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20916 index f5cc9eb..51fa319 100644
20917 --- a/arch/x86/lib/atomic64_cx8_32.S
20918 +++ b/arch/x86/lib/atomic64_cx8_32.S
20919 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20920 CFI_STARTPROC
20921
20922 read64 %ecx
20923 + pax_force_retaddr
20924 ret
20925 CFI_ENDPROC
20926 ENDPROC(atomic64_read_cx8)
20927
20928 +ENTRY(atomic64_read_unchecked_cx8)
20929 + CFI_STARTPROC
20930 +
20931 + read64 %ecx
20932 + pax_force_retaddr
20933 + ret
20934 + CFI_ENDPROC
20935 +ENDPROC(atomic64_read_unchecked_cx8)
20936 +
20937 ENTRY(atomic64_set_cx8)
20938 CFI_STARTPROC
20939
20940 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20941 cmpxchg8b (%esi)
20942 jne 1b
20943
20944 + pax_force_retaddr
20945 ret
20946 CFI_ENDPROC
20947 ENDPROC(atomic64_set_cx8)
20948
20949 +ENTRY(atomic64_set_unchecked_cx8)
20950 + CFI_STARTPROC
20951 +
20952 +1:
20953 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20954 + * are atomic on 586 and newer */
20955 + cmpxchg8b (%esi)
20956 + jne 1b
20957 +
20958 + pax_force_retaddr
20959 + ret
20960 + CFI_ENDPROC
20961 +ENDPROC(atomic64_set_unchecked_cx8)
20962 +
20963 ENTRY(atomic64_xchg_cx8)
20964 CFI_STARTPROC
20965
20966 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
20967 cmpxchg8b (%esi)
20968 jne 1b
20969
20970 + pax_force_retaddr
20971 ret
20972 CFI_ENDPROC
20973 ENDPROC(atomic64_xchg_cx8)
20974
20975 -.macro addsub_return func ins insc
20976 -ENTRY(atomic64_\func\()_return_cx8)
20977 +.macro addsub_return func ins insc unchecked=""
20978 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20979 CFI_STARTPROC
20980 SAVE ebp
20981 SAVE ebx
20982 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20983 movl %edx, %ecx
20984 \ins\()l %esi, %ebx
20985 \insc\()l %edi, %ecx
20986 +
20987 +.ifb \unchecked
20988 +#ifdef CONFIG_PAX_REFCOUNT
20989 + into
20990 +2:
20991 + _ASM_EXTABLE(2b, 3f)
20992 +#endif
20993 +.endif
20994 +
20995 LOCK_PREFIX
20996 cmpxchg8b (%ebp)
20997 jne 1b
20998 -
20999 -10:
21000 movl %ebx, %eax
21001 movl %ecx, %edx
21002 +
21003 +.ifb \unchecked
21004 +#ifdef CONFIG_PAX_REFCOUNT
21005 +3:
21006 +#endif
21007 +.endif
21008 +
21009 RESTORE edi
21010 RESTORE esi
21011 RESTORE ebx
21012 RESTORE ebp
21013 + pax_force_retaddr
21014 ret
21015 CFI_ENDPROC
21016 -ENDPROC(atomic64_\func\()_return_cx8)
21017 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21018 .endm
21019
21020 addsub_return add add adc
21021 addsub_return sub sub sbb
21022 +addsub_return add add adc _unchecked
21023 +addsub_return sub sub sbb _unchecked
21024
21025 -.macro incdec_return func ins insc
21026 -ENTRY(atomic64_\func\()_return_cx8)
21027 +.macro incdec_return func ins insc unchecked=""
21028 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21029 CFI_STARTPROC
21030 SAVE ebx
21031
21032 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21033 movl %edx, %ecx
21034 \ins\()l $1, %ebx
21035 \insc\()l $0, %ecx
21036 +
21037 +.ifb \unchecked
21038 +#ifdef CONFIG_PAX_REFCOUNT
21039 + into
21040 +2:
21041 + _ASM_EXTABLE(2b, 3f)
21042 +#endif
21043 +.endif
21044 +
21045 LOCK_PREFIX
21046 cmpxchg8b (%esi)
21047 jne 1b
21048
21049 -10:
21050 movl %ebx, %eax
21051 movl %ecx, %edx
21052 +
21053 +.ifb \unchecked
21054 +#ifdef CONFIG_PAX_REFCOUNT
21055 +3:
21056 +#endif
21057 +.endif
21058 +
21059 RESTORE ebx
21060 + pax_force_retaddr
21061 ret
21062 CFI_ENDPROC
21063 -ENDPROC(atomic64_\func\()_return_cx8)
21064 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21065 .endm
21066
21067 incdec_return inc add adc
21068 incdec_return dec sub sbb
21069 +incdec_return inc add adc _unchecked
21070 +incdec_return dec sub sbb _unchecked
21071
21072 ENTRY(atomic64_dec_if_positive_cx8)
21073 CFI_STARTPROC
21074 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21075 movl %edx, %ecx
21076 subl $1, %ebx
21077 sbb $0, %ecx
21078 +
21079 +#ifdef CONFIG_PAX_REFCOUNT
21080 + into
21081 +1234:
21082 + _ASM_EXTABLE(1234b, 2f)
21083 +#endif
21084 +
21085 js 2f
21086 LOCK_PREFIX
21087 cmpxchg8b (%esi)
21088 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21089 movl %ebx, %eax
21090 movl %ecx, %edx
21091 RESTORE ebx
21092 + pax_force_retaddr
21093 ret
21094 CFI_ENDPROC
21095 ENDPROC(atomic64_dec_if_positive_cx8)
21096 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21097 movl %edx, %ecx
21098 addl %ebp, %ebx
21099 adcl %edi, %ecx
21100 +
21101 +#ifdef CONFIG_PAX_REFCOUNT
21102 + into
21103 +1234:
21104 + _ASM_EXTABLE(1234b, 3f)
21105 +#endif
21106 +
21107 LOCK_PREFIX
21108 cmpxchg8b (%esi)
21109 jne 1b
21110 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21111 CFI_ADJUST_CFA_OFFSET -8
21112 RESTORE ebx
21113 RESTORE ebp
21114 + pax_force_retaddr
21115 ret
21116 4:
21117 cmpl %edx, 4(%esp)
21118 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21119 xorl %ecx, %ecx
21120 addl $1, %ebx
21121 adcl %edx, %ecx
21122 +
21123 +#ifdef CONFIG_PAX_REFCOUNT
21124 + into
21125 +1234:
21126 + _ASM_EXTABLE(1234b, 3f)
21127 +#endif
21128 +
21129 LOCK_PREFIX
21130 cmpxchg8b (%esi)
21131 jne 1b
21132 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21133 movl $1, %eax
21134 3:
21135 RESTORE ebx
21136 + pax_force_retaddr
21137 ret
21138 CFI_ENDPROC
21139 ENDPROC(atomic64_inc_not_zero_cx8)
21140 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21141 index 2af5df3..62b1a5a 100644
21142 --- a/arch/x86/lib/checksum_32.S
21143 +++ b/arch/x86/lib/checksum_32.S
21144 @@ -29,7 +29,8 @@
21145 #include <asm/dwarf2.h>
21146 #include <asm/errno.h>
21147 #include <asm/asm.h>
21148 -
21149 +#include <asm/segment.h>
21150 +
21151 /*
21152 * computes a partial checksum, e.g. for TCP/UDP fragments
21153 */
21154 @@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21155
21156 #define ARGBASE 16
21157 #define FP 12
21158 -
21159 -ENTRY(csum_partial_copy_generic)
21160 +
21161 +ENTRY(csum_partial_copy_generic_to_user)
21162 CFI_STARTPROC
21163 +
21164 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21165 + pushl_cfi %gs
21166 + popl_cfi %es
21167 + jmp csum_partial_copy_generic
21168 +#endif
21169 +
21170 +ENTRY(csum_partial_copy_generic_from_user)
21171 +
21172 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21173 + pushl_cfi %gs
21174 + popl_cfi %ds
21175 +#endif
21176 +
21177 +ENTRY(csum_partial_copy_generic)
21178 subl $4,%esp
21179 CFI_ADJUST_CFA_OFFSET 4
21180 pushl_cfi %edi
21181 @@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
21182 jmp 4f
21183 SRC(1: movw (%esi), %bx )
21184 addl $2, %esi
21185 -DST( movw %bx, (%edi) )
21186 +DST( movw %bx, %es:(%edi) )
21187 addl $2, %edi
21188 addw %bx, %ax
21189 adcl $0, %eax
21190 @@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
21191 SRC(1: movl (%esi), %ebx )
21192 SRC( movl 4(%esi), %edx )
21193 adcl %ebx, %eax
21194 -DST( movl %ebx, (%edi) )
21195 +DST( movl %ebx, %es:(%edi) )
21196 adcl %edx, %eax
21197 -DST( movl %edx, 4(%edi) )
21198 +DST( movl %edx, %es:4(%edi) )
21199
21200 SRC( movl 8(%esi), %ebx )
21201 SRC( movl 12(%esi), %edx )
21202 adcl %ebx, %eax
21203 -DST( movl %ebx, 8(%edi) )
21204 +DST( movl %ebx, %es:8(%edi) )
21205 adcl %edx, %eax
21206 -DST( movl %edx, 12(%edi) )
21207 +DST( movl %edx, %es:12(%edi) )
21208
21209 SRC( movl 16(%esi), %ebx )
21210 SRC( movl 20(%esi), %edx )
21211 adcl %ebx, %eax
21212 -DST( movl %ebx, 16(%edi) )
21213 +DST( movl %ebx, %es:16(%edi) )
21214 adcl %edx, %eax
21215 -DST( movl %edx, 20(%edi) )
21216 +DST( movl %edx, %es:20(%edi) )
21217
21218 SRC( movl 24(%esi), %ebx )
21219 SRC( movl 28(%esi), %edx )
21220 adcl %ebx, %eax
21221 -DST( movl %ebx, 24(%edi) )
21222 +DST( movl %ebx, %es:24(%edi) )
21223 adcl %edx, %eax
21224 -DST( movl %edx, 28(%edi) )
21225 +DST( movl %edx, %es:28(%edi) )
21226
21227 lea 32(%esi), %esi
21228 lea 32(%edi), %edi
21229 @@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
21230 shrl $2, %edx # This clears CF
21231 SRC(3: movl (%esi), %ebx )
21232 adcl %ebx, %eax
21233 -DST( movl %ebx, (%edi) )
21234 +DST( movl %ebx, %es:(%edi) )
21235 lea 4(%esi), %esi
21236 lea 4(%edi), %edi
21237 dec %edx
21238 @@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
21239 jb 5f
21240 SRC( movw (%esi), %cx )
21241 leal 2(%esi), %esi
21242 -DST( movw %cx, (%edi) )
21243 +DST( movw %cx, %es:(%edi) )
21244 leal 2(%edi), %edi
21245 je 6f
21246 shll $16,%ecx
21247 SRC(5: movb (%esi), %cl )
21248 -DST( movb %cl, (%edi) )
21249 +DST( movb %cl, %es:(%edi) )
21250 6: addl %ecx, %eax
21251 adcl $0, %eax
21252 7:
21253 @@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
21254
21255 6001:
21256 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21257 - movl $-EFAULT, (%ebx)
21258 + movl $-EFAULT, %ss:(%ebx)
21259
21260 # zero the complete destination - computing the rest
21261 # is too much work
21262 @@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
21263
21264 6002:
21265 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21266 - movl $-EFAULT,(%ebx)
21267 + movl $-EFAULT,%ss:(%ebx)
21268 jmp 5000b
21269
21270 .previous
21271
21272 + pushl_cfi %ss
21273 + popl_cfi %ds
21274 + pushl_cfi %ss
21275 + popl_cfi %es
21276 popl_cfi %ebx
21277 CFI_RESTORE ebx
21278 popl_cfi %esi
21279 @@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
21280 popl_cfi %ecx # equivalent to addl $4,%esp
21281 ret
21282 CFI_ENDPROC
21283 -ENDPROC(csum_partial_copy_generic)
21284 +ENDPROC(csum_partial_copy_generic_to_user)
21285
21286 #else
21287
21288 /* Version for PentiumII/PPro */
21289
21290 #define ROUND1(x) \
21291 + nop; nop; nop; \
21292 SRC(movl x(%esi), %ebx ) ; \
21293 addl %ebx, %eax ; \
21294 - DST(movl %ebx, x(%edi) ) ;
21295 + DST(movl %ebx, %es:x(%edi)) ;
21296
21297 #define ROUND(x) \
21298 + nop; nop; nop; \
21299 SRC(movl x(%esi), %ebx ) ; \
21300 adcl %ebx, %eax ; \
21301 - DST(movl %ebx, x(%edi) ) ;
21302 + DST(movl %ebx, %es:x(%edi)) ;
21303
21304 #define ARGBASE 12
21305 -
21306 -ENTRY(csum_partial_copy_generic)
21307 +
21308 +ENTRY(csum_partial_copy_generic_to_user)
21309 CFI_STARTPROC
21310 +
21311 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21312 + pushl_cfi %gs
21313 + popl_cfi %es
21314 + jmp csum_partial_copy_generic
21315 +#endif
21316 +
21317 +ENTRY(csum_partial_copy_generic_from_user)
21318 +
21319 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21320 + pushl_cfi %gs
21321 + popl_cfi %ds
21322 +#endif
21323 +
21324 +ENTRY(csum_partial_copy_generic)
21325 pushl_cfi %ebx
21326 CFI_REL_OFFSET ebx, 0
21327 pushl_cfi %edi
21328 @@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
21329 subl %ebx, %edi
21330 lea -1(%esi),%edx
21331 andl $-32,%edx
21332 - lea 3f(%ebx,%ebx), %ebx
21333 + lea 3f(%ebx,%ebx,2), %ebx
21334 testl %esi, %esi
21335 jmp *%ebx
21336 1: addl $64,%esi
21337 @@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
21338 jb 5f
21339 SRC( movw (%esi), %dx )
21340 leal 2(%esi), %esi
21341 -DST( movw %dx, (%edi) )
21342 +DST( movw %dx, %es:(%edi) )
21343 leal 2(%edi), %edi
21344 je 6f
21345 shll $16,%edx
21346 5:
21347 SRC( movb (%esi), %dl )
21348 -DST( movb %dl, (%edi) )
21349 +DST( movb %dl, %es:(%edi) )
21350 6: addl %edx, %eax
21351 adcl $0, %eax
21352 7:
21353 .section .fixup, "ax"
21354 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21355 - movl $-EFAULT, (%ebx)
21356 + movl $-EFAULT, %ss:(%ebx)
21357 # zero the complete destination (computing the rest is too much work)
21358 movl ARGBASE+8(%esp),%edi # dst
21359 movl ARGBASE+12(%esp),%ecx # len
21360 @@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
21361 rep; stosb
21362 jmp 7b
21363 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21364 - movl $-EFAULT, (%ebx)
21365 + movl $-EFAULT, %ss:(%ebx)
21366 jmp 7b
21367 .previous
21368
21369 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21370 + pushl_cfi %ss
21371 + popl_cfi %ds
21372 + pushl_cfi %ss
21373 + popl_cfi %es
21374 +#endif
21375 +
21376 popl_cfi %esi
21377 CFI_RESTORE esi
21378 popl_cfi %edi
21379 @@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
21380 CFI_RESTORE ebx
21381 ret
21382 CFI_ENDPROC
21383 -ENDPROC(csum_partial_copy_generic)
21384 +ENDPROC(csum_partial_copy_generic_to_user)
21385
21386 #undef ROUND
21387 #undef ROUND1
21388 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21389 index f2145cf..cea889d 100644
21390 --- a/arch/x86/lib/clear_page_64.S
21391 +++ b/arch/x86/lib/clear_page_64.S
21392 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21393 movl $4096/8,%ecx
21394 xorl %eax,%eax
21395 rep stosq
21396 + pax_force_retaddr
21397 ret
21398 CFI_ENDPROC
21399 ENDPROC(clear_page_c)
21400 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21401 movl $4096,%ecx
21402 xorl %eax,%eax
21403 rep stosb
21404 + pax_force_retaddr
21405 ret
21406 CFI_ENDPROC
21407 ENDPROC(clear_page_c_e)
21408 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21409 leaq 64(%rdi),%rdi
21410 jnz .Lloop
21411 nop
21412 + pax_force_retaddr
21413 ret
21414 CFI_ENDPROC
21415 .Lclear_page_end:
21416 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21417
21418 #include <asm/cpufeature.h>
21419
21420 - .section .altinstr_replacement,"ax"
21421 + .section .altinstr_replacement,"a"
21422 1: .byte 0xeb /* jmp <disp8> */
21423 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21424 2: .byte 0xeb /* jmp <disp8> */
21425 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21426 index 1e572c5..2a162cd 100644
21427 --- a/arch/x86/lib/cmpxchg16b_emu.S
21428 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21429 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21430
21431 popf
21432 mov $1, %al
21433 + pax_force_retaddr
21434 ret
21435
21436 not_same:
21437 popf
21438 xor %al,%al
21439 + pax_force_retaddr
21440 ret
21441
21442 CFI_ENDPROC
21443 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21444 index 6b34d04..dccb07f 100644
21445 --- a/arch/x86/lib/copy_page_64.S
21446 +++ b/arch/x86/lib/copy_page_64.S
21447 @@ -9,6 +9,7 @@ copy_page_c:
21448 CFI_STARTPROC
21449 movl $4096/8,%ecx
21450 rep movsq
21451 + pax_force_retaddr
21452 ret
21453 CFI_ENDPROC
21454 ENDPROC(copy_page_c)
21455 @@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21456
21457 ENTRY(copy_page)
21458 CFI_STARTPROC
21459 - subq $2*8,%rsp
21460 - CFI_ADJUST_CFA_OFFSET 2*8
21461 + subq $3*8,%rsp
21462 + CFI_ADJUST_CFA_OFFSET 3*8
21463 movq %rbx,(%rsp)
21464 CFI_REL_OFFSET rbx, 0
21465 movq %r12,1*8(%rsp)
21466 CFI_REL_OFFSET r12, 1*8
21467 + movq %r13,2*8(%rsp)
21468 + CFI_REL_OFFSET r13, 2*8
21469
21470 movl $(4096/64)-5,%ecx
21471 .p2align 4
21472 @@ -37,7 +40,7 @@ ENTRY(copy_page)
21473 movq 16 (%rsi), %rdx
21474 movq 24 (%rsi), %r8
21475 movq 32 (%rsi), %r9
21476 - movq 40 (%rsi), %r10
21477 + movq 40 (%rsi), %r13
21478 movq 48 (%rsi), %r11
21479 movq 56 (%rsi), %r12
21480
21481 @@ -48,7 +51,7 @@ ENTRY(copy_page)
21482 movq %rdx, 16 (%rdi)
21483 movq %r8, 24 (%rdi)
21484 movq %r9, 32 (%rdi)
21485 - movq %r10, 40 (%rdi)
21486 + movq %r13, 40 (%rdi)
21487 movq %r11, 48 (%rdi)
21488 movq %r12, 56 (%rdi)
21489
21490 @@ -67,7 +70,7 @@ ENTRY(copy_page)
21491 movq 16 (%rsi), %rdx
21492 movq 24 (%rsi), %r8
21493 movq 32 (%rsi), %r9
21494 - movq 40 (%rsi), %r10
21495 + movq 40 (%rsi), %r13
21496 movq 48 (%rsi), %r11
21497 movq 56 (%rsi), %r12
21498
21499 @@ -76,7 +79,7 @@ ENTRY(copy_page)
21500 movq %rdx, 16 (%rdi)
21501 movq %r8, 24 (%rdi)
21502 movq %r9, 32 (%rdi)
21503 - movq %r10, 40 (%rdi)
21504 + movq %r13, 40 (%rdi)
21505 movq %r11, 48 (%rdi)
21506 movq %r12, 56 (%rdi)
21507
21508 @@ -89,8 +92,11 @@ ENTRY(copy_page)
21509 CFI_RESTORE rbx
21510 movq 1*8(%rsp),%r12
21511 CFI_RESTORE r12
21512 - addq $2*8,%rsp
21513 - CFI_ADJUST_CFA_OFFSET -2*8
21514 + movq 2*8(%rsp),%r13
21515 + CFI_RESTORE r13
21516 + addq $3*8,%rsp
21517 + CFI_ADJUST_CFA_OFFSET -3*8
21518 + pax_force_retaddr
21519 ret
21520 .Lcopy_page_end:
21521 CFI_ENDPROC
21522 @@ -101,7 +107,7 @@ ENDPROC(copy_page)
21523
21524 #include <asm/cpufeature.h>
21525
21526 - .section .altinstr_replacement,"ax"
21527 + .section .altinstr_replacement,"a"
21528 1: .byte 0xeb /* jmp <disp8> */
21529 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21530 2:
21531 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21532 index 5b2995f..78e7644 100644
21533 --- a/arch/x86/lib/copy_user_64.S
21534 +++ b/arch/x86/lib/copy_user_64.S
21535 @@ -17,6 +17,7 @@
21536 #include <asm/cpufeature.h>
21537 #include <asm/alternative-asm.h>
21538 #include <asm/asm.h>
21539 +#include <asm/pgtable.h>
21540
21541 /*
21542 * By placing feature2 after feature1 in altinstructions section, we logically
21543 @@ -30,7 +31,7 @@
21544 .byte 0xe9 /* 32bit jump */
21545 .long \orig-1f /* by default jump to orig */
21546 1:
21547 - .section .altinstr_replacement,"ax"
21548 + .section .altinstr_replacement,"a"
21549 2: .byte 0xe9 /* near jump with 32bit immediate */
21550 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21551 3: .byte 0xe9 /* near jump with 32bit immediate */
21552 @@ -69,47 +70,20 @@
21553 #endif
21554 .endm
21555
21556 -/* Standard copy_to_user with segment limit checking */
21557 -ENTRY(_copy_to_user)
21558 - CFI_STARTPROC
21559 - GET_THREAD_INFO(%rax)
21560 - movq %rdi,%rcx
21561 - addq %rdx,%rcx
21562 - jc bad_to_user
21563 - cmpq TI_addr_limit(%rax),%rcx
21564 - ja bad_to_user
21565 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21566 - copy_user_generic_unrolled,copy_user_generic_string, \
21567 - copy_user_enhanced_fast_string
21568 - CFI_ENDPROC
21569 -ENDPROC(_copy_to_user)
21570 -
21571 -/* Standard copy_from_user with segment limit checking */
21572 -ENTRY(_copy_from_user)
21573 - CFI_STARTPROC
21574 - GET_THREAD_INFO(%rax)
21575 - movq %rsi,%rcx
21576 - addq %rdx,%rcx
21577 - jc bad_from_user
21578 - cmpq TI_addr_limit(%rax),%rcx
21579 - ja bad_from_user
21580 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21581 - copy_user_generic_unrolled,copy_user_generic_string, \
21582 - copy_user_enhanced_fast_string
21583 - CFI_ENDPROC
21584 -ENDPROC(_copy_from_user)
21585 -
21586 .section .fixup,"ax"
21587 /* must zero dest */
21588 ENTRY(bad_from_user)
21589 bad_from_user:
21590 CFI_STARTPROC
21591 + testl %edx,%edx
21592 + js bad_to_user
21593 movl %edx,%ecx
21594 xorl %eax,%eax
21595 rep
21596 stosb
21597 bad_to_user:
21598 movl %edx,%eax
21599 + pax_force_retaddr
21600 ret
21601 CFI_ENDPROC
21602 ENDPROC(bad_from_user)
21603 @@ -139,19 +113,19 @@ ENTRY(copy_user_generic_unrolled)
21604 jz 17f
21605 1: movq (%rsi),%r8
21606 2: movq 1*8(%rsi),%r9
21607 -3: movq 2*8(%rsi),%r10
21608 +3: movq 2*8(%rsi),%rax
21609 4: movq 3*8(%rsi),%r11
21610 5: movq %r8,(%rdi)
21611 6: movq %r9,1*8(%rdi)
21612 -7: movq %r10,2*8(%rdi)
21613 +7: movq %rax,2*8(%rdi)
21614 8: movq %r11,3*8(%rdi)
21615 9: movq 4*8(%rsi),%r8
21616 10: movq 5*8(%rsi),%r9
21617 -11: movq 6*8(%rsi),%r10
21618 +11: movq 6*8(%rsi),%rax
21619 12: movq 7*8(%rsi),%r11
21620 13: movq %r8,4*8(%rdi)
21621 14: movq %r9,5*8(%rdi)
21622 -15: movq %r10,6*8(%rdi)
21623 +15: movq %rax,6*8(%rdi)
21624 16: movq %r11,7*8(%rdi)
21625 leaq 64(%rsi),%rsi
21626 leaq 64(%rdi),%rdi
21627 @@ -177,6 +151,7 @@ ENTRY(copy_user_generic_unrolled)
21628 decl %ecx
21629 jnz 21b
21630 23: xor %eax,%eax
21631 + pax_force_retaddr
21632 ret
21633
21634 .section .fixup,"ax"
21635 @@ -246,6 +221,7 @@ ENTRY(copy_user_generic_string)
21636 3: rep
21637 movsb
21638 4: xorl %eax,%eax
21639 + pax_force_retaddr
21640 ret
21641
21642 .section .fixup,"ax"
21643 @@ -279,6 +255,7 @@ ENTRY(copy_user_enhanced_fast_string)
21644 1: rep
21645 movsb
21646 2: xorl %eax,%eax
21647 + pax_force_retaddr
21648 ret
21649
21650 .section .fixup,"ax"
21651 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21652 index cacddc7..09d49e4 100644
21653 --- a/arch/x86/lib/copy_user_nocache_64.S
21654 +++ b/arch/x86/lib/copy_user_nocache_64.S
21655 @@ -8,6 +8,7 @@
21656
21657 #include <linux/linkage.h>
21658 #include <asm/dwarf2.h>
21659 +#include <asm/alternative-asm.h>
21660
21661 #define FIX_ALIGNMENT 1
21662
21663 @@ -15,6 +16,7 @@
21664 #include <asm/asm-offsets.h>
21665 #include <asm/thread_info.h>
21666 #include <asm/asm.h>
21667 +#include <asm/pgtable.h>
21668
21669 .macro ALIGN_DESTINATION
21670 #ifdef FIX_ALIGNMENT
21671 @@ -48,6 +50,15 @@
21672 */
21673 ENTRY(__copy_user_nocache)
21674 CFI_STARTPROC
21675 +
21676 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21677 + mov $PAX_USER_SHADOW_BASE,%rcx
21678 + cmp %rcx,%rsi
21679 + jae 1f
21680 + add %rcx,%rsi
21681 +1:
21682 +#endif
21683 +
21684 cmpl $8,%edx
21685 jb 20f /* less then 8 bytes, go to byte copy loop */
21686 ALIGN_DESTINATION
21687 @@ -57,19 +68,19 @@ ENTRY(__copy_user_nocache)
21688 jz 17f
21689 1: movq (%rsi),%r8
21690 2: movq 1*8(%rsi),%r9
21691 -3: movq 2*8(%rsi),%r10
21692 +3: movq 2*8(%rsi),%rax
21693 4: movq 3*8(%rsi),%r11
21694 5: movnti %r8,(%rdi)
21695 6: movnti %r9,1*8(%rdi)
21696 -7: movnti %r10,2*8(%rdi)
21697 +7: movnti %rax,2*8(%rdi)
21698 8: movnti %r11,3*8(%rdi)
21699 9: movq 4*8(%rsi),%r8
21700 10: movq 5*8(%rsi),%r9
21701 -11: movq 6*8(%rsi),%r10
21702 +11: movq 6*8(%rsi),%rax
21703 12: movq 7*8(%rsi),%r11
21704 13: movnti %r8,4*8(%rdi)
21705 14: movnti %r9,5*8(%rdi)
21706 -15: movnti %r10,6*8(%rdi)
21707 +15: movnti %rax,6*8(%rdi)
21708 16: movnti %r11,7*8(%rdi)
21709 leaq 64(%rsi),%rsi
21710 leaq 64(%rdi),%rdi
21711 @@ -96,6 +107,7 @@ ENTRY(__copy_user_nocache)
21712 jnz 21b
21713 23: xorl %eax,%eax
21714 sfence
21715 + pax_force_retaddr
21716 ret
21717
21718 .section .fixup,"ax"
21719 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21720 index 2419d5f..953ee51 100644
21721 --- a/arch/x86/lib/csum-copy_64.S
21722 +++ b/arch/x86/lib/csum-copy_64.S
21723 @@ -9,6 +9,7 @@
21724 #include <asm/dwarf2.h>
21725 #include <asm/errno.h>
21726 #include <asm/asm.h>
21727 +#include <asm/alternative-asm.h>
21728
21729 /*
21730 * Checksum copy with exception handling.
21731 @@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
21732 CFI_RESTORE rbp
21733 addq $7*8, %rsp
21734 CFI_ADJUST_CFA_OFFSET -7*8
21735 + pax_force_retaddr 0, 1
21736 ret
21737 CFI_RESTORE_STATE
21738
21739 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21740 index 25b7ae8..3b52ccd 100644
21741 --- a/arch/x86/lib/csum-wrappers_64.c
21742 +++ b/arch/x86/lib/csum-wrappers_64.c
21743 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21744 len -= 2;
21745 }
21746 }
21747 - isum = csum_partial_copy_generic((__force const void *)src,
21748 +
21749 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21750 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21751 + src += PAX_USER_SHADOW_BASE;
21752 +#endif
21753 +
21754 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21755 dst, len, isum, errp, NULL);
21756 if (unlikely(*errp))
21757 goto out_err;
21758 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21759 }
21760
21761 *errp = 0;
21762 - return csum_partial_copy_generic(src, (void __force *)dst,
21763 +
21764 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21765 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21766 + dst += PAX_USER_SHADOW_BASE;
21767 +#endif
21768 +
21769 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21770 len, isum, NULL, errp);
21771 }
21772 EXPORT_SYMBOL(csum_partial_copy_to_user);
21773 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21774 index b33b1fb..219f389 100644
21775 --- a/arch/x86/lib/getuser.S
21776 +++ b/arch/x86/lib/getuser.S
21777 @@ -33,15 +33,38 @@
21778 #include <asm/asm-offsets.h>
21779 #include <asm/thread_info.h>
21780 #include <asm/asm.h>
21781 +#include <asm/segment.h>
21782 +#include <asm/pgtable.h>
21783 +#include <asm/alternative-asm.h>
21784 +
21785 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21786 +#define __copyuser_seg gs;
21787 +#else
21788 +#define __copyuser_seg
21789 +#endif
21790
21791 .text
21792 ENTRY(__get_user_1)
21793 CFI_STARTPROC
21794 +
21795 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21796 GET_THREAD_INFO(%_ASM_DX)
21797 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21798 jae bad_get_user
21799 -1: movzb (%_ASM_AX),%edx
21800 +
21801 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21802 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21803 + cmp %_ASM_DX,%_ASM_AX
21804 + jae 1234f
21805 + add %_ASM_DX,%_ASM_AX
21806 +1234:
21807 +#endif
21808 +
21809 +#endif
21810 +
21811 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21812 xor %eax,%eax
21813 + pax_force_retaddr
21814 ret
21815 CFI_ENDPROC
21816 ENDPROC(__get_user_1)
21817 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21818 ENTRY(__get_user_2)
21819 CFI_STARTPROC
21820 add $1,%_ASM_AX
21821 +
21822 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21823 jc bad_get_user
21824 GET_THREAD_INFO(%_ASM_DX)
21825 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21826 jae bad_get_user
21827 -2: movzwl -1(%_ASM_AX),%edx
21828 +
21829 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21830 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21831 + cmp %_ASM_DX,%_ASM_AX
21832 + jae 1234f
21833 + add %_ASM_DX,%_ASM_AX
21834 +1234:
21835 +#endif
21836 +
21837 +#endif
21838 +
21839 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21840 xor %eax,%eax
21841 + pax_force_retaddr
21842 ret
21843 CFI_ENDPROC
21844 ENDPROC(__get_user_2)
21845 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21846 ENTRY(__get_user_4)
21847 CFI_STARTPROC
21848 add $3,%_ASM_AX
21849 +
21850 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21851 jc bad_get_user
21852 GET_THREAD_INFO(%_ASM_DX)
21853 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21854 jae bad_get_user
21855 -3: mov -3(%_ASM_AX),%edx
21856 +
21857 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21858 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21859 + cmp %_ASM_DX,%_ASM_AX
21860 + jae 1234f
21861 + add %_ASM_DX,%_ASM_AX
21862 +1234:
21863 +#endif
21864 +
21865 +#endif
21866 +
21867 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21868 xor %eax,%eax
21869 + pax_force_retaddr
21870 ret
21871 CFI_ENDPROC
21872 ENDPROC(__get_user_4)
21873 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21874 GET_THREAD_INFO(%_ASM_DX)
21875 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21876 jae bad_get_user
21877 +
21878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21879 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21880 + cmp %_ASM_DX,%_ASM_AX
21881 + jae 1234f
21882 + add %_ASM_DX,%_ASM_AX
21883 +1234:
21884 +#endif
21885 +
21886 4: movq -7(%_ASM_AX),%_ASM_DX
21887 xor %eax,%eax
21888 + pax_force_retaddr
21889 ret
21890 CFI_ENDPROC
21891 ENDPROC(__get_user_8)
21892 @@ -91,6 +152,7 @@ bad_get_user:
21893 CFI_STARTPROC
21894 xor %edx,%edx
21895 mov $(-EFAULT),%_ASM_AX
21896 + pax_force_retaddr
21897 ret
21898 CFI_ENDPROC
21899 END(bad_get_user)
21900 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21901 index b1e6c4b..21ae8fc 100644
21902 --- a/arch/x86/lib/insn.c
21903 +++ b/arch/x86/lib/insn.c
21904 @@ -21,6 +21,11 @@
21905 #include <linux/string.h>
21906 #include <asm/inat.h>
21907 #include <asm/insn.h>
21908 +#ifdef __KERNEL__
21909 +#include <asm/pgtable_types.h>
21910 +#else
21911 +#define ktla_ktva(addr) addr
21912 +#endif
21913
21914 /* Verify next sizeof(t) bytes can be on the same instruction */
21915 #define validate_next(t, insn, n) \
21916 @@ -49,8 +54,8 @@
21917 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21918 {
21919 memset(insn, 0, sizeof(*insn));
21920 - insn->kaddr = kaddr;
21921 - insn->next_byte = kaddr;
21922 + insn->kaddr = ktla_ktva(kaddr);
21923 + insn->next_byte = ktla_ktva(kaddr);
21924 insn->x86_64 = x86_64 ? 1 : 0;
21925 insn->opnd_bytes = 4;
21926 if (x86_64)
21927 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21928 index 05a95e7..326f2fa 100644
21929 --- a/arch/x86/lib/iomap_copy_64.S
21930 +++ b/arch/x86/lib/iomap_copy_64.S
21931 @@ -17,6 +17,7 @@
21932
21933 #include <linux/linkage.h>
21934 #include <asm/dwarf2.h>
21935 +#include <asm/alternative-asm.h>
21936
21937 /*
21938 * override generic version in lib/iomap_copy.c
21939 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21940 CFI_STARTPROC
21941 movl %edx,%ecx
21942 rep movsd
21943 + pax_force_retaddr
21944 ret
21945 CFI_ENDPROC
21946 ENDPROC(__iowrite32_copy)
21947 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21948 index 1c273be..da9cc0e 100644
21949 --- a/arch/x86/lib/memcpy_64.S
21950 +++ b/arch/x86/lib/memcpy_64.S
21951 @@ -33,6 +33,7 @@
21952 rep movsq
21953 movl %edx, %ecx
21954 rep movsb
21955 + pax_force_retaddr
21956 ret
21957 .Lmemcpy_e:
21958 .previous
21959 @@ -49,6 +50,7 @@
21960 movq %rdi, %rax
21961 movq %rdx, %rcx
21962 rep movsb
21963 + pax_force_retaddr
21964 ret
21965 .Lmemcpy_e_e:
21966 .previous
21967 @@ -76,13 +78,13 @@ ENTRY(memcpy)
21968 */
21969 movq 0*8(%rsi), %r8
21970 movq 1*8(%rsi), %r9
21971 - movq 2*8(%rsi), %r10
21972 + movq 2*8(%rsi), %rcx
21973 movq 3*8(%rsi), %r11
21974 leaq 4*8(%rsi), %rsi
21975
21976 movq %r8, 0*8(%rdi)
21977 movq %r9, 1*8(%rdi)
21978 - movq %r10, 2*8(%rdi)
21979 + movq %rcx, 2*8(%rdi)
21980 movq %r11, 3*8(%rdi)
21981 leaq 4*8(%rdi), %rdi
21982 jae .Lcopy_forward_loop
21983 @@ -105,12 +107,12 @@ ENTRY(memcpy)
21984 subq $0x20, %rdx
21985 movq -1*8(%rsi), %r8
21986 movq -2*8(%rsi), %r9
21987 - movq -3*8(%rsi), %r10
21988 + movq -3*8(%rsi), %rcx
21989 movq -4*8(%rsi), %r11
21990 leaq -4*8(%rsi), %rsi
21991 movq %r8, -1*8(%rdi)
21992 movq %r9, -2*8(%rdi)
21993 - movq %r10, -3*8(%rdi)
21994 + movq %rcx, -3*8(%rdi)
21995 movq %r11, -4*8(%rdi)
21996 leaq -4*8(%rdi), %rdi
21997 jae .Lcopy_backward_loop
21998 @@ -130,12 +132,13 @@ ENTRY(memcpy)
21999 */
22000 movq 0*8(%rsi), %r8
22001 movq 1*8(%rsi), %r9
22002 - movq -2*8(%rsi, %rdx), %r10
22003 + movq -2*8(%rsi, %rdx), %rcx
22004 movq -1*8(%rsi, %rdx), %r11
22005 movq %r8, 0*8(%rdi)
22006 movq %r9, 1*8(%rdi)
22007 - movq %r10, -2*8(%rdi, %rdx)
22008 + movq %rcx, -2*8(%rdi, %rdx)
22009 movq %r11, -1*8(%rdi, %rdx)
22010 + pax_force_retaddr
22011 retq
22012 .p2align 4
22013 .Lless_16bytes:
22014 @@ -148,6 +151,7 @@ ENTRY(memcpy)
22015 movq -1*8(%rsi, %rdx), %r9
22016 movq %r8, 0*8(%rdi)
22017 movq %r9, -1*8(%rdi, %rdx)
22018 + pax_force_retaddr
22019 retq
22020 .p2align 4
22021 .Lless_8bytes:
22022 @@ -161,6 +165,7 @@ ENTRY(memcpy)
22023 movl -4(%rsi, %rdx), %r8d
22024 movl %ecx, (%rdi)
22025 movl %r8d, -4(%rdi, %rdx)
22026 + pax_force_retaddr
22027 retq
22028 .p2align 4
22029 .Lless_3bytes:
22030 @@ -179,6 +184,7 @@ ENTRY(memcpy)
22031 movb %cl, (%rdi)
22032
22033 .Lend:
22034 + pax_force_retaddr
22035 retq
22036 CFI_ENDPROC
22037 ENDPROC(memcpy)
22038 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22039 index ee16461..c39c199 100644
22040 --- a/arch/x86/lib/memmove_64.S
22041 +++ b/arch/x86/lib/memmove_64.S
22042 @@ -61,13 +61,13 @@ ENTRY(memmove)
22043 5:
22044 sub $0x20, %rdx
22045 movq 0*8(%rsi), %r11
22046 - movq 1*8(%rsi), %r10
22047 + movq 1*8(%rsi), %rcx
22048 movq 2*8(%rsi), %r9
22049 movq 3*8(%rsi), %r8
22050 leaq 4*8(%rsi), %rsi
22051
22052 movq %r11, 0*8(%rdi)
22053 - movq %r10, 1*8(%rdi)
22054 + movq %rcx, 1*8(%rdi)
22055 movq %r9, 2*8(%rdi)
22056 movq %r8, 3*8(%rdi)
22057 leaq 4*8(%rdi), %rdi
22058 @@ -81,10 +81,10 @@ ENTRY(memmove)
22059 4:
22060 movq %rdx, %rcx
22061 movq -8(%rsi, %rdx), %r11
22062 - lea -8(%rdi, %rdx), %r10
22063 + lea -8(%rdi, %rdx), %r9
22064 shrq $3, %rcx
22065 rep movsq
22066 - movq %r11, (%r10)
22067 + movq %r11, (%r9)
22068 jmp 13f
22069 .Lmemmove_end_forward:
22070
22071 @@ -95,14 +95,14 @@ ENTRY(memmove)
22072 7:
22073 movq %rdx, %rcx
22074 movq (%rsi), %r11
22075 - movq %rdi, %r10
22076 + movq %rdi, %r9
22077 leaq -8(%rsi, %rdx), %rsi
22078 leaq -8(%rdi, %rdx), %rdi
22079 shrq $3, %rcx
22080 std
22081 rep movsq
22082 cld
22083 - movq %r11, (%r10)
22084 + movq %r11, (%r9)
22085 jmp 13f
22086
22087 /*
22088 @@ -127,13 +127,13 @@ ENTRY(memmove)
22089 8:
22090 subq $0x20, %rdx
22091 movq -1*8(%rsi), %r11
22092 - movq -2*8(%rsi), %r10
22093 + movq -2*8(%rsi), %rcx
22094 movq -3*8(%rsi), %r9
22095 movq -4*8(%rsi), %r8
22096 leaq -4*8(%rsi), %rsi
22097
22098 movq %r11, -1*8(%rdi)
22099 - movq %r10, -2*8(%rdi)
22100 + movq %rcx, -2*8(%rdi)
22101 movq %r9, -3*8(%rdi)
22102 movq %r8, -4*8(%rdi)
22103 leaq -4*8(%rdi), %rdi
22104 @@ -151,11 +151,11 @@ ENTRY(memmove)
22105 * Move data from 16 bytes to 31 bytes.
22106 */
22107 movq 0*8(%rsi), %r11
22108 - movq 1*8(%rsi), %r10
22109 + movq 1*8(%rsi), %rcx
22110 movq -2*8(%rsi, %rdx), %r9
22111 movq -1*8(%rsi, %rdx), %r8
22112 movq %r11, 0*8(%rdi)
22113 - movq %r10, 1*8(%rdi)
22114 + movq %rcx, 1*8(%rdi)
22115 movq %r9, -2*8(%rdi, %rdx)
22116 movq %r8, -1*8(%rdi, %rdx)
22117 jmp 13f
22118 @@ -167,9 +167,9 @@ ENTRY(memmove)
22119 * Move data from 8 bytes to 15 bytes.
22120 */
22121 movq 0*8(%rsi), %r11
22122 - movq -1*8(%rsi, %rdx), %r10
22123 + movq -1*8(%rsi, %rdx), %r9
22124 movq %r11, 0*8(%rdi)
22125 - movq %r10, -1*8(%rdi, %rdx)
22126 + movq %r9, -1*8(%rdi, %rdx)
22127 jmp 13f
22128 10:
22129 cmpq $4, %rdx
22130 @@ -178,9 +178,9 @@ ENTRY(memmove)
22131 * Move data from 4 bytes to 7 bytes.
22132 */
22133 movl (%rsi), %r11d
22134 - movl -4(%rsi, %rdx), %r10d
22135 + movl -4(%rsi, %rdx), %r9d
22136 movl %r11d, (%rdi)
22137 - movl %r10d, -4(%rdi, %rdx)
22138 + movl %r9d, -4(%rdi, %rdx)
22139 jmp 13f
22140 11:
22141 cmp $2, %rdx
22142 @@ -189,9 +189,9 @@ ENTRY(memmove)
22143 * Move data from 2 bytes to 3 bytes.
22144 */
22145 movw (%rsi), %r11w
22146 - movw -2(%rsi, %rdx), %r10w
22147 + movw -2(%rsi, %rdx), %r9w
22148 movw %r11w, (%rdi)
22149 - movw %r10w, -2(%rdi, %rdx)
22150 + movw %r9w, -2(%rdi, %rdx)
22151 jmp 13f
22152 12:
22153 cmp $1, %rdx
22154 @@ -202,6 +202,7 @@ ENTRY(memmove)
22155 movb (%rsi), %r11b
22156 movb %r11b, (%rdi)
22157 13:
22158 + pax_force_retaddr
22159 retq
22160 CFI_ENDPROC
22161
22162 @@ -210,6 +211,7 @@ ENTRY(memmove)
22163 /* Forward moving data. */
22164 movq %rdx, %rcx
22165 rep movsb
22166 + pax_force_retaddr
22167 retq
22168 .Lmemmove_end_forward_efs:
22169 .previous
22170 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22171 index 2dcb380..963660a 100644
22172 --- a/arch/x86/lib/memset_64.S
22173 +++ b/arch/x86/lib/memset_64.S
22174 @@ -30,6 +30,7 @@
22175 movl %edx,%ecx
22176 rep stosb
22177 movq %r9,%rax
22178 + pax_force_retaddr
22179 ret
22180 .Lmemset_e:
22181 .previous
22182 @@ -52,6 +53,7 @@
22183 movq %rdx,%rcx
22184 rep stosb
22185 movq %r9,%rax
22186 + pax_force_retaddr
22187 ret
22188 .Lmemset_e_e:
22189 .previous
22190 @@ -59,7 +61,7 @@
22191 ENTRY(memset)
22192 ENTRY(__memset)
22193 CFI_STARTPROC
22194 - movq %rdi,%r10
22195 + movq %rdi,%r11
22196
22197 /* expand byte value */
22198 movzbl %sil,%ecx
22199 @@ -117,7 +119,8 @@ ENTRY(__memset)
22200 jnz .Lloop_1
22201
22202 .Lende:
22203 - movq %r10,%rax
22204 + movq %r11,%rax
22205 + pax_force_retaddr
22206 ret
22207
22208 CFI_RESTORE_STATE
22209 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22210 index c9f2d9b..e7fd2c0 100644
22211 --- a/arch/x86/lib/mmx_32.c
22212 +++ b/arch/x86/lib/mmx_32.c
22213 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22214 {
22215 void *p;
22216 int i;
22217 + unsigned long cr0;
22218
22219 if (unlikely(in_interrupt()))
22220 return __memcpy(to, from, len);
22221 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22222 kernel_fpu_begin();
22223
22224 __asm__ __volatile__ (
22225 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22226 - " prefetch 64(%0)\n"
22227 - " prefetch 128(%0)\n"
22228 - " prefetch 192(%0)\n"
22229 - " prefetch 256(%0)\n"
22230 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22231 + " prefetch 64(%1)\n"
22232 + " prefetch 128(%1)\n"
22233 + " prefetch 192(%1)\n"
22234 + " prefetch 256(%1)\n"
22235 "2: \n"
22236 ".section .fixup, \"ax\"\n"
22237 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22238 + "3: \n"
22239 +
22240 +#ifdef CONFIG_PAX_KERNEXEC
22241 + " movl %%cr0, %0\n"
22242 + " movl %0, %%eax\n"
22243 + " andl $0xFFFEFFFF, %%eax\n"
22244 + " movl %%eax, %%cr0\n"
22245 +#endif
22246 +
22247 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22248 +
22249 +#ifdef CONFIG_PAX_KERNEXEC
22250 + " movl %0, %%cr0\n"
22251 +#endif
22252 +
22253 " jmp 2b\n"
22254 ".previous\n"
22255 _ASM_EXTABLE(1b, 3b)
22256 - : : "r" (from));
22257 + : "=&r" (cr0) : "r" (from) : "ax");
22258
22259 for ( ; i > 5; i--) {
22260 __asm__ __volatile__ (
22261 - "1: prefetch 320(%0)\n"
22262 - "2: movq (%0), %%mm0\n"
22263 - " movq 8(%0), %%mm1\n"
22264 - " movq 16(%0), %%mm2\n"
22265 - " movq 24(%0), %%mm3\n"
22266 - " movq %%mm0, (%1)\n"
22267 - " movq %%mm1, 8(%1)\n"
22268 - " movq %%mm2, 16(%1)\n"
22269 - " movq %%mm3, 24(%1)\n"
22270 - " movq 32(%0), %%mm0\n"
22271 - " movq 40(%0), %%mm1\n"
22272 - " movq 48(%0), %%mm2\n"
22273 - " movq 56(%0), %%mm3\n"
22274 - " movq %%mm0, 32(%1)\n"
22275 - " movq %%mm1, 40(%1)\n"
22276 - " movq %%mm2, 48(%1)\n"
22277 - " movq %%mm3, 56(%1)\n"
22278 + "1: prefetch 320(%1)\n"
22279 + "2: movq (%1), %%mm0\n"
22280 + " movq 8(%1), %%mm1\n"
22281 + " movq 16(%1), %%mm2\n"
22282 + " movq 24(%1), %%mm3\n"
22283 + " movq %%mm0, (%2)\n"
22284 + " movq %%mm1, 8(%2)\n"
22285 + " movq %%mm2, 16(%2)\n"
22286 + " movq %%mm3, 24(%2)\n"
22287 + " movq 32(%1), %%mm0\n"
22288 + " movq 40(%1), %%mm1\n"
22289 + " movq 48(%1), %%mm2\n"
22290 + " movq 56(%1), %%mm3\n"
22291 + " movq %%mm0, 32(%2)\n"
22292 + " movq %%mm1, 40(%2)\n"
22293 + " movq %%mm2, 48(%2)\n"
22294 + " movq %%mm3, 56(%2)\n"
22295 ".section .fixup, \"ax\"\n"
22296 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22297 + "3:\n"
22298 +
22299 +#ifdef CONFIG_PAX_KERNEXEC
22300 + " movl %%cr0, %0\n"
22301 + " movl %0, %%eax\n"
22302 + " andl $0xFFFEFFFF, %%eax\n"
22303 + " movl %%eax, %%cr0\n"
22304 +#endif
22305 +
22306 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22307 +
22308 +#ifdef CONFIG_PAX_KERNEXEC
22309 + " movl %0, %%cr0\n"
22310 +#endif
22311 +
22312 " jmp 2b\n"
22313 ".previous\n"
22314 _ASM_EXTABLE(1b, 3b)
22315 - : : "r" (from), "r" (to) : "memory");
22316 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22317
22318 from += 64;
22319 to += 64;
22320 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22321 static void fast_copy_page(void *to, void *from)
22322 {
22323 int i;
22324 + unsigned long cr0;
22325
22326 kernel_fpu_begin();
22327
22328 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22329 * but that is for later. -AV
22330 */
22331 __asm__ __volatile__(
22332 - "1: prefetch (%0)\n"
22333 - " prefetch 64(%0)\n"
22334 - " prefetch 128(%0)\n"
22335 - " prefetch 192(%0)\n"
22336 - " prefetch 256(%0)\n"
22337 + "1: prefetch (%1)\n"
22338 + " prefetch 64(%1)\n"
22339 + " prefetch 128(%1)\n"
22340 + " prefetch 192(%1)\n"
22341 + " prefetch 256(%1)\n"
22342 "2: \n"
22343 ".section .fixup, \"ax\"\n"
22344 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22345 + "3: \n"
22346 +
22347 +#ifdef CONFIG_PAX_KERNEXEC
22348 + " movl %%cr0, %0\n"
22349 + " movl %0, %%eax\n"
22350 + " andl $0xFFFEFFFF, %%eax\n"
22351 + " movl %%eax, %%cr0\n"
22352 +#endif
22353 +
22354 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22355 +
22356 +#ifdef CONFIG_PAX_KERNEXEC
22357 + " movl %0, %%cr0\n"
22358 +#endif
22359 +
22360 " jmp 2b\n"
22361 ".previous\n"
22362 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22363 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22364
22365 for (i = 0; i < (4096-320)/64; i++) {
22366 __asm__ __volatile__ (
22367 - "1: prefetch 320(%0)\n"
22368 - "2: movq (%0), %%mm0\n"
22369 - " movntq %%mm0, (%1)\n"
22370 - " movq 8(%0), %%mm1\n"
22371 - " movntq %%mm1, 8(%1)\n"
22372 - " movq 16(%0), %%mm2\n"
22373 - " movntq %%mm2, 16(%1)\n"
22374 - " movq 24(%0), %%mm3\n"
22375 - " movntq %%mm3, 24(%1)\n"
22376 - " movq 32(%0), %%mm4\n"
22377 - " movntq %%mm4, 32(%1)\n"
22378 - " movq 40(%0), %%mm5\n"
22379 - " movntq %%mm5, 40(%1)\n"
22380 - " movq 48(%0), %%mm6\n"
22381 - " movntq %%mm6, 48(%1)\n"
22382 - " movq 56(%0), %%mm7\n"
22383 - " movntq %%mm7, 56(%1)\n"
22384 + "1: prefetch 320(%1)\n"
22385 + "2: movq (%1), %%mm0\n"
22386 + " movntq %%mm0, (%2)\n"
22387 + " movq 8(%1), %%mm1\n"
22388 + " movntq %%mm1, 8(%2)\n"
22389 + " movq 16(%1), %%mm2\n"
22390 + " movntq %%mm2, 16(%2)\n"
22391 + " movq 24(%1), %%mm3\n"
22392 + " movntq %%mm3, 24(%2)\n"
22393 + " movq 32(%1), %%mm4\n"
22394 + " movntq %%mm4, 32(%2)\n"
22395 + " movq 40(%1), %%mm5\n"
22396 + " movntq %%mm5, 40(%2)\n"
22397 + " movq 48(%1), %%mm6\n"
22398 + " movntq %%mm6, 48(%2)\n"
22399 + " movq 56(%1), %%mm7\n"
22400 + " movntq %%mm7, 56(%2)\n"
22401 ".section .fixup, \"ax\"\n"
22402 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22403 + "3:\n"
22404 +
22405 +#ifdef CONFIG_PAX_KERNEXEC
22406 + " movl %%cr0, %0\n"
22407 + " movl %0, %%eax\n"
22408 + " andl $0xFFFEFFFF, %%eax\n"
22409 + " movl %%eax, %%cr0\n"
22410 +#endif
22411 +
22412 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22413 +
22414 +#ifdef CONFIG_PAX_KERNEXEC
22415 + " movl %0, %%cr0\n"
22416 +#endif
22417 +
22418 " jmp 2b\n"
22419 ".previous\n"
22420 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22421 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22422
22423 from += 64;
22424 to += 64;
22425 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22426 static void fast_copy_page(void *to, void *from)
22427 {
22428 int i;
22429 + unsigned long cr0;
22430
22431 kernel_fpu_begin();
22432
22433 __asm__ __volatile__ (
22434 - "1: prefetch (%0)\n"
22435 - " prefetch 64(%0)\n"
22436 - " prefetch 128(%0)\n"
22437 - " prefetch 192(%0)\n"
22438 - " prefetch 256(%0)\n"
22439 + "1: prefetch (%1)\n"
22440 + " prefetch 64(%1)\n"
22441 + " prefetch 128(%1)\n"
22442 + " prefetch 192(%1)\n"
22443 + " prefetch 256(%1)\n"
22444 "2: \n"
22445 ".section .fixup, \"ax\"\n"
22446 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22447 + "3: \n"
22448 +
22449 +#ifdef CONFIG_PAX_KERNEXEC
22450 + " movl %%cr0, %0\n"
22451 + " movl %0, %%eax\n"
22452 + " andl $0xFFFEFFFF, %%eax\n"
22453 + " movl %%eax, %%cr0\n"
22454 +#endif
22455 +
22456 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22457 +
22458 +#ifdef CONFIG_PAX_KERNEXEC
22459 + " movl %0, %%cr0\n"
22460 +#endif
22461 +
22462 " jmp 2b\n"
22463 ".previous\n"
22464 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22465 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22466
22467 for (i = 0; i < 4096/64; i++) {
22468 __asm__ __volatile__ (
22469 - "1: prefetch 320(%0)\n"
22470 - "2: movq (%0), %%mm0\n"
22471 - " movq 8(%0), %%mm1\n"
22472 - " movq 16(%0), %%mm2\n"
22473 - " movq 24(%0), %%mm3\n"
22474 - " movq %%mm0, (%1)\n"
22475 - " movq %%mm1, 8(%1)\n"
22476 - " movq %%mm2, 16(%1)\n"
22477 - " movq %%mm3, 24(%1)\n"
22478 - " movq 32(%0), %%mm0\n"
22479 - " movq 40(%0), %%mm1\n"
22480 - " movq 48(%0), %%mm2\n"
22481 - " movq 56(%0), %%mm3\n"
22482 - " movq %%mm0, 32(%1)\n"
22483 - " movq %%mm1, 40(%1)\n"
22484 - " movq %%mm2, 48(%1)\n"
22485 - " movq %%mm3, 56(%1)\n"
22486 + "1: prefetch 320(%1)\n"
22487 + "2: movq (%1), %%mm0\n"
22488 + " movq 8(%1), %%mm1\n"
22489 + " movq 16(%1), %%mm2\n"
22490 + " movq 24(%1), %%mm3\n"
22491 + " movq %%mm0, (%2)\n"
22492 + " movq %%mm1, 8(%2)\n"
22493 + " movq %%mm2, 16(%2)\n"
22494 + " movq %%mm3, 24(%2)\n"
22495 + " movq 32(%1), %%mm0\n"
22496 + " movq 40(%1), %%mm1\n"
22497 + " movq 48(%1), %%mm2\n"
22498 + " movq 56(%1), %%mm3\n"
22499 + " movq %%mm0, 32(%2)\n"
22500 + " movq %%mm1, 40(%2)\n"
22501 + " movq %%mm2, 48(%2)\n"
22502 + " movq %%mm3, 56(%2)\n"
22503 ".section .fixup, \"ax\"\n"
22504 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22505 + "3:\n"
22506 +
22507 +#ifdef CONFIG_PAX_KERNEXEC
22508 + " movl %%cr0, %0\n"
22509 + " movl %0, %%eax\n"
22510 + " andl $0xFFFEFFFF, %%eax\n"
22511 + " movl %%eax, %%cr0\n"
22512 +#endif
22513 +
22514 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22515 +
22516 +#ifdef CONFIG_PAX_KERNEXEC
22517 + " movl %0, %%cr0\n"
22518 +#endif
22519 +
22520 " jmp 2b\n"
22521 ".previous\n"
22522 _ASM_EXTABLE(1b, 3b)
22523 - : : "r" (from), "r" (to) : "memory");
22524 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22525
22526 from += 64;
22527 to += 64;
22528 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22529 index 69fa106..adda88b 100644
22530 --- a/arch/x86/lib/msr-reg.S
22531 +++ b/arch/x86/lib/msr-reg.S
22532 @@ -3,6 +3,7 @@
22533 #include <asm/dwarf2.h>
22534 #include <asm/asm.h>
22535 #include <asm/msr.h>
22536 +#include <asm/alternative-asm.h>
22537
22538 #ifdef CONFIG_X86_64
22539 /*
22540 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22541 CFI_STARTPROC
22542 pushq_cfi %rbx
22543 pushq_cfi %rbp
22544 - movq %rdi, %r10 /* Save pointer */
22545 + movq %rdi, %r9 /* Save pointer */
22546 xorl %r11d, %r11d /* Return value */
22547 movl (%rdi), %eax
22548 movl 4(%rdi), %ecx
22549 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22550 movl 28(%rdi), %edi
22551 CFI_REMEMBER_STATE
22552 1: \op
22553 -2: movl %eax, (%r10)
22554 +2: movl %eax, (%r9)
22555 movl %r11d, %eax /* Return value */
22556 - movl %ecx, 4(%r10)
22557 - movl %edx, 8(%r10)
22558 - movl %ebx, 12(%r10)
22559 - movl %ebp, 20(%r10)
22560 - movl %esi, 24(%r10)
22561 - movl %edi, 28(%r10)
22562 + movl %ecx, 4(%r9)
22563 + movl %edx, 8(%r9)
22564 + movl %ebx, 12(%r9)
22565 + movl %ebp, 20(%r9)
22566 + movl %esi, 24(%r9)
22567 + movl %edi, 28(%r9)
22568 popq_cfi %rbp
22569 popq_cfi %rbx
22570 + pax_force_retaddr
22571 ret
22572 3:
22573 CFI_RESTORE_STATE
22574 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22575 index 7f951c8..ebd573a 100644
22576 --- a/arch/x86/lib/putuser.S
22577 +++ b/arch/x86/lib/putuser.S
22578 @@ -15,7 +15,9 @@
22579 #include <asm/thread_info.h>
22580 #include <asm/errno.h>
22581 #include <asm/asm.h>
22582 -
22583 +#include <asm/segment.h>
22584 +#include <asm/pgtable.h>
22585 +#include <asm/alternative-asm.h>
22586
22587 /*
22588 * __put_user_X
22589 @@ -29,52 +31,119 @@
22590 * as they get called from within inline assembly.
22591 */
22592
22593 -#define ENTER CFI_STARTPROC ; \
22594 - GET_THREAD_INFO(%_ASM_BX)
22595 -#define EXIT ret ; \
22596 +#define ENTER CFI_STARTPROC
22597 +#define EXIT pax_force_retaddr; ret ; \
22598 CFI_ENDPROC
22599
22600 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22601 +#define _DEST %_ASM_CX,%_ASM_BX
22602 +#else
22603 +#define _DEST %_ASM_CX
22604 +#endif
22605 +
22606 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22607 +#define __copyuser_seg gs;
22608 +#else
22609 +#define __copyuser_seg
22610 +#endif
22611 +
22612 .text
22613 ENTRY(__put_user_1)
22614 ENTER
22615 +
22616 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22617 + GET_THREAD_INFO(%_ASM_BX)
22618 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22619 jae bad_put_user
22620 -1: movb %al,(%_ASM_CX)
22621 +
22622 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22623 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22624 + cmp %_ASM_BX,%_ASM_CX
22625 + jb 1234f
22626 + xor %ebx,%ebx
22627 +1234:
22628 +#endif
22629 +
22630 +#endif
22631 +
22632 +1: __copyuser_seg movb %al,(_DEST)
22633 xor %eax,%eax
22634 EXIT
22635 ENDPROC(__put_user_1)
22636
22637 ENTRY(__put_user_2)
22638 ENTER
22639 +
22640 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22641 + GET_THREAD_INFO(%_ASM_BX)
22642 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22643 sub $1,%_ASM_BX
22644 cmp %_ASM_BX,%_ASM_CX
22645 jae bad_put_user
22646 -2: movw %ax,(%_ASM_CX)
22647 +
22648 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22649 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22650 + cmp %_ASM_BX,%_ASM_CX
22651 + jb 1234f
22652 + xor %ebx,%ebx
22653 +1234:
22654 +#endif
22655 +
22656 +#endif
22657 +
22658 +2: __copyuser_seg movw %ax,(_DEST)
22659 xor %eax,%eax
22660 EXIT
22661 ENDPROC(__put_user_2)
22662
22663 ENTRY(__put_user_4)
22664 ENTER
22665 +
22666 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22667 + GET_THREAD_INFO(%_ASM_BX)
22668 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22669 sub $3,%_ASM_BX
22670 cmp %_ASM_BX,%_ASM_CX
22671 jae bad_put_user
22672 -3: movl %eax,(%_ASM_CX)
22673 +
22674 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22675 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22676 + cmp %_ASM_BX,%_ASM_CX
22677 + jb 1234f
22678 + xor %ebx,%ebx
22679 +1234:
22680 +#endif
22681 +
22682 +#endif
22683 +
22684 +3: __copyuser_seg movl %eax,(_DEST)
22685 xor %eax,%eax
22686 EXIT
22687 ENDPROC(__put_user_4)
22688
22689 ENTRY(__put_user_8)
22690 ENTER
22691 +
22692 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22693 + GET_THREAD_INFO(%_ASM_BX)
22694 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22695 sub $7,%_ASM_BX
22696 cmp %_ASM_BX,%_ASM_CX
22697 jae bad_put_user
22698 -4: mov %_ASM_AX,(%_ASM_CX)
22699 +
22700 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22701 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22702 + cmp %_ASM_BX,%_ASM_CX
22703 + jb 1234f
22704 + xor %ebx,%ebx
22705 +1234:
22706 +#endif
22707 +
22708 +#endif
22709 +
22710 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22711 #ifdef CONFIG_X86_32
22712 -5: movl %edx,4(%_ASM_CX)
22713 +5: __copyuser_seg movl %edx,4(_DEST)
22714 #endif
22715 xor %eax,%eax
22716 EXIT
22717 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22718 index 1cad221..de671ee 100644
22719 --- a/arch/x86/lib/rwlock.S
22720 +++ b/arch/x86/lib/rwlock.S
22721 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22722 FRAME
22723 0: LOCK_PREFIX
22724 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22725 +
22726 +#ifdef CONFIG_PAX_REFCOUNT
22727 + jno 1234f
22728 + LOCK_PREFIX
22729 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22730 + int $4
22731 +1234:
22732 + _ASM_EXTABLE(1234b, 1234b)
22733 +#endif
22734 +
22735 1: rep; nop
22736 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22737 jne 1b
22738 LOCK_PREFIX
22739 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22740 +
22741 +#ifdef CONFIG_PAX_REFCOUNT
22742 + jno 1234f
22743 + LOCK_PREFIX
22744 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22745 + int $4
22746 +1234:
22747 + _ASM_EXTABLE(1234b, 1234b)
22748 +#endif
22749 +
22750 jnz 0b
22751 ENDFRAME
22752 + pax_force_retaddr
22753 ret
22754 CFI_ENDPROC
22755 END(__write_lock_failed)
22756 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22757 FRAME
22758 0: LOCK_PREFIX
22759 READ_LOCK_SIZE(inc) (%__lock_ptr)
22760 +
22761 +#ifdef CONFIG_PAX_REFCOUNT
22762 + jno 1234f
22763 + LOCK_PREFIX
22764 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22765 + int $4
22766 +1234:
22767 + _ASM_EXTABLE(1234b, 1234b)
22768 +#endif
22769 +
22770 1: rep; nop
22771 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22772 js 1b
22773 LOCK_PREFIX
22774 READ_LOCK_SIZE(dec) (%__lock_ptr)
22775 +
22776 +#ifdef CONFIG_PAX_REFCOUNT
22777 + jno 1234f
22778 + LOCK_PREFIX
22779 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22780 + int $4
22781 +1234:
22782 + _ASM_EXTABLE(1234b, 1234b)
22783 +#endif
22784 +
22785 js 0b
22786 ENDFRAME
22787 + pax_force_retaddr
22788 ret
22789 CFI_ENDPROC
22790 END(__read_lock_failed)
22791 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22792 index 5dff5f0..cadebf4 100644
22793 --- a/arch/x86/lib/rwsem.S
22794 +++ b/arch/x86/lib/rwsem.S
22795 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22796 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22797 CFI_RESTORE __ASM_REG(dx)
22798 restore_common_regs
22799 + pax_force_retaddr
22800 ret
22801 CFI_ENDPROC
22802 ENDPROC(call_rwsem_down_read_failed)
22803 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22804 movq %rax,%rdi
22805 call rwsem_down_write_failed
22806 restore_common_regs
22807 + pax_force_retaddr
22808 ret
22809 CFI_ENDPROC
22810 ENDPROC(call_rwsem_down_write_failed)
22811 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22812 movq %rax,%rdi
22813 call rwsem_wake
22814 restore_common_regs
22815 -1: ret
22816 +1: pax_force_retaddr
22817 + ret
22818 CFI_ENDPROC
22819 ENDPROC(call_rwsem_wake)
22820
22821 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22822 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22823 CFI_RESTORE __ASM_REG(dx)
22824 restore_common_regs
22825 + pax_force_retaddr
22826 ret
22827 CFI_ENDPROC
22828 ENDPROC(call_rwsem_downgrade_wake)
22829 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22830 index a63efd6..ccecad8 100644
22831 --- a/arch/x86/lib/thunk_64.S
22832 +++ b/arch/x86/lib/thunk_64.S
22833 @@ -8,6 +8,7 @@
22834 #include <linux/linkage.h>
22835 #include <asm/dwarf2.h>
22836 #include <asm/calling.h>
22837 +#include <asm/alternative-asm.h>
22838
22839 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22840 .macro THUNK name, func, put_ret_addr_in_rdi=0
22841 @@ -41,5 +42,6 @@
22842 SAVE_ARGS
22843 restore:
22844 RESTORE_ARGS
22845 + pax_force_retaddr
22846 ret
22847 CFI_ENDPROC
22848 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22849 index 1781b2f..90368dd 100644
22850 --- a/arch/x86/lib/usercopy_32.c
22851 +++ b/arch/x86/lib/usercopy_32.c
22852 @@ -42,10 +42,12 @@ do { \
22853 int __d0; \
22854 might_fault(); \
22855 __asm__ __volatile__( \
22856 + __COPYUSER_SET_ES \
22857 "0: rep; stosl\n" \
22858 " movl %2,%0\n" \
22859 "1: rep; stosb\n" \
22860 "2:\n" \
22861 + __COPYUSER_RESTORE_ES \
22862 ".section .fixup,\"ax\"\n" \
22863 "3: lea 0(%2,%0,4),%0\n" \
22864 " jmp 2b\n" \
22865 @@ -97,7 +99,7 @@ EXPORT_SYMBOL(__clear_user);
22866
22867 #ifdef CONFIG_X86_INTEL_USERCOPY
22868 static unsigned long
22869 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22870 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22871 {
22872 int d0, d1;
22873 __asm__ __volatile__(
22874 @@ -109,36 +111,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22875 " .align 2,0x90\n"
22876 "3: movl 0(%4), %%eax\n"
22877 "4: movl 4(%4), %%edx\n"
22878 - "5: movl %%eax, 0(%3)\n"
22879 - "6: movl %%edx, 4(%3)\n"
22880 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22881 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22882 "7: movl 8(%4), %%eax\n"
22883 "8: movl 12(%4),%%edx\n"
22884 - "9: movl %%eax, 8(%3)\n"
22885 - "10: movl %%edx, 12(%3)\n"
22886 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22887 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22888 "11: movl 16(%4), %%eax\n"
22889 "12: movl 20(%4), %%edx\n"
22890 - "13: movl %%eax, 16(%3)\n"
22891 - "14: movl %%edx, 20(%3)\n"
22892 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22893 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22894 "15: movl 24(%4), %%eax\n"
22895 "16: movl 28(%4), %%edx\n"
22896 - "17: movl %%eax, 24(%3)\n"
22897 - "18: movl %%edx, 28(%3)\n"
22898 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22899 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22900 "19: movl 32(%4), %%eax\n"
22901 "20: movl 36(%4), %%edx\n"
22902 - "21: movl %%eax, 32(%3)\n"
22903 - "22: movl %%edx, 36(%3)\n"
22904 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22905 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22906 "23: movl 40(%4), %%eax\n"
22907 "24: movl 44(%4), %%edx\n"
22908 - "25: movl %%eax, 40(%3)\n"
22909 - "26: movl %%edx, 44(%3)\n"
22910 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22911 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22912 "27: movl 48(%4), %%eax\n"
22913 "28: movl 52(%4), %%edx\n"
22914 - "29: movl %%eax, 48(%3)\n"
22915 - "30: movl %%edx, 52(%3)\n"
22916 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22917 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22918 "31: movl 56(%4), %%eax\n"
22919 "32: movl 60(%4), %%edx\n"
22920 - "33: movl %%eax, 56(%3)\n"
22921 - "34: movl %%edx, 60(%3)\n"
22922 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22923 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22924 " addl $-64, %0\n"
22925 " addl $64, %4\n"
22926 " addl $64, %3\n"
22927 @@ -148,10 +150,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22928 " shrl $2, %0\n"
22929 " andl $3, %%eax\n"
22930 " cld\n"
22931 + __COPYUSER_SET_ES
22932 "99: rep; movsl\n"
22933 "36: movl %%eax, %0\n"
22934 "37: rep; movsb\n"
22935 "100:\n"
22936 + __COPYUSER_RESTORE_ES
22937 ".section .fixup,\"ax\"\n"
22938 "101: lea 0(%%eax,%0,4),%0\n"
22939 " jmp 100b\n"
22940 @@ -201,46 +205,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22941 }
22942
22943 static unsigned long
22944 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22945 +{
22946 + int d0, d1;
22947 + __asm__ __volatile__(
22948 + " .align 2,0x90\n"
22949 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22950 + " cmpl $67, %0\n"
22951 + " jbe 3f\n"
22952 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22953 + " .align 2,0x90\n"
22954 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22955 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22956 + "5: movl %%eax, 0(%3)\n"
22957 + "6: movl %%edx, 4(%3)\n"
22958 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22959 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22960 + "9: movl %%eax, 8(%3)\n"
22961 + "10: movl %%edx, 12(%3)\n"
22962 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22963 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22964 + "13: movl %%eax, 16(%3)\n"
22965 + "14: movl %%edx, 20(%3)\n"
22966 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22967 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22968 + "17: movl %%eax, 24(%3)\n"
22969 + "18: movl %%edx, 28(%3)\n"
22970 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22971 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22972 + "21: movl %%eax, 32(%3)\n"
22973 + "22: movl %%edx, 36(%3)\n"
22974 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22975 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22976 + "25: movl %%eax, 40(%3)\n"
22977 + "26: movl %%edx, 44(%3)\n"
22978 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22979 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22980 + "29: movl %%eax, 48(%3)\n"
22981 + "30: movl %%edx, 52(%3)\n"
22982 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22983 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22984 + "33: movl %%eax, 56(%3)\n"
22985 + "34: movl %%edx, 60(%3)\n"
22986 + " addl $-64, %0\n"
22987 + " addl $64, %4\n"
22988 + " addl $64, %3\n"
22989 + " cmpl $63, %0\n"
22990 + " ja 1b\n"
22991 + "35: movl %0, %%eax\n"
22992 + " shrl $2, %0\n"
22993 + " andl $3, %%eax\n"
22994 + " cld\n"
22995 + "99: rep; "__copyuser_seg" movsl\n"
22996 + "36: movl %%eax, %0\n"
22997 + "37: rep; "__copyuser_seg" movsb\n"
22998 + "100:\n"
22999 + ".section .fixup,\"ax\"\n"
23000 + "101: lea 0(%%eax,%0,4),%0\n"
23001 + " jmp 100b\n"
23002 + ".previous\n"
23003 + _ASM_EXTABLE(1b,100b)
23004 + _ASM_EXTABLE(2b,100b)
23005 + _ASM_EXTABLE(3b,100b)
23006 + _ASM_EXTABLE(4b,100b)
23007 + _ASM_EXTABLE(5b,100b)
23008 + _ASM_EXTABLE(6b,100b)
23009 + _ASM_EXTABLE(7b,100b)
23010 + _ASM_EXTABLE(8b,100b)
23011 + _ASM_EXTABLE(9b,100b)
23012 + _ASM_EXTABLE(10b,100b)
23013 + _ASM_EXTABLE(11b,100b)
23014 + _ASM_EXTABLE(12b,100b)
23015 + _ASM_EXTABLE(13b,100b)
23016 + _ASM_EXTABLE(14b,100b)
23017 + _ASM_EXTABLE(15b,100b)
23018 + _ASM_EXTABLE(16b,100b)
23019 + _ASM_EXTABLE(17b,100b)
23020 + _ASM_EXTABLE(18b,100b)
23021 + _ASM_EXTABLE(19b,100b)
23022 + _ASM_EXTABLE(20b,100b)
23023 + _ASM_EXTABLE(21b,100b)
23024 + _ASM_EXTABLE(22b,100b)
23025 + _ASM_EXTABLE(23b,100b)
23026 + _ASM_EXTABLE(24b,100b)
23027 + _ASM_EXTABLE(25b,100b)
23028 + _ASM_EXTABLE(26b,100b)
23029 + _ASM_EXTABLE(27b,100b)
23030 + _ASM_EXTABLE(28b,100b)
23031 + _ASM_EXTABLE(29b,100b)
23032 + _ASM_EXTABLE(30b,100b)
23033 + _ASM_EXTABLE(31b,100b)
23034 + _ASM_EXTABLE(32b,100b)
23035 + _ASM_EXTABLE(33b,100b)
23036 + _ASM_EXTABLE(34b,100b)
23037 + _ASM_EXTABLE(35b,100b)
23038 + _ASM_EXTABLE(36b,100b)
23039 + _ASM_EXTABLE(37b,100b)
23040 + _ASM_EXTABLE(99b,101b)
23041 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23042 + : "1"(to), "2"(from), "0"(size)
23043 + : "eax", "edx", "memory");
23044 + return size;
23045 +}
23046 +
23047 +static unsigned long __size_overflow(3)
23048 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23049 {
23050 int d0, d1;
23051 __asm__ __volatile__(
23052 " .align 2,0x90\n"
23053 - "0: movl 32(%4), %%eax\n"
23054 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23055 " cmpl $67, %0\n"
23056 " jbe 2f\n"
23057 - "1: movl 64(%4), %%eax\n"
23058 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23059 " .align 2,0x90\n"
23060 - "2: movl 0(%4), %%eax\n"
23061 - "21: movl 4(%4), %%edx\n"
23062 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23063 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23064 " movl %%eax, 0(%3)\n"
23065 " movl %%edx, 4(%3)\n"
23066 - "3: movl 8(%4), %%eax\n"
23067 - "31: movl 12(%4),%%edx\n"
23068 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23069 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23070 " movl %%eax, 8(%3)\n"
23071 " movl %%edx, 12(%3)\n"
23072 - "4: movl 16(%4), %%eax\n"
23073 - "41: movl 20(%4), %%edx\n"
23074 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23075 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23076 " movl %%eax, 16(%3)\n"
23077 " movl %%edx, 20(%3)\n"
23078 - "10: movl 24(%4), %%eax\n"
23079 - "51: movl 28(%4), %%edx\n"
23080 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23081 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23082 " movl %%eax, 24(%3)\n"
23083 " movl %%edx, 28(%3)\n"
23084 - "11: movl 32(%4), %%eax\n"
23085 - "61: movl 36(%4), %%edx\n"
23086 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23087 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23088 " movl %%eax, 32(%3)\n"
23089 " movl %%edx, 36(%3)\n"
23090 - "12: movl 40(%4), %%eax\n"
23091 - "71: movl 44(%4), %%edx\n"
23092 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23093 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23094 " movl %%eax, 40(%3)\n"
23095 " movl %%edx, 44(%3)\n"
23096 - "13: movl 48(%4), %%eax\n"
23097 - "81: movl 52(%4), %%edx\n"
23098 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23099 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23100 " movl %%eax, 48(%3)\n"
23101 " movl %%edx, 52(%3)\n"
23102 - "14: movl 56(%4), %%eax\n"
23103 - "91: movl 60(%4), %%edx\n"
23104 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23105 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23106 " movl %%eax, 56(%3)\n"
23107 " movl %%edx, 60(%3)\n"
23108 " addl $-64, %0\n"
23109 @@ -252,9 +360,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23110 " shrl $2, %0\n"
23111 " andl $3, %%eax\n"
23112 " cld\n"
23113 - "6: rep; movsl\n"
23114 + "6: rep; "__copyuser_seg" movsl\n"
23115 " movl %%eax,%0\n"
23116 - "7: rep; movsb\n"
23117 + "7: rep; "__copyuser_seg" movsb\n"
23118 "8:\n"
23119 ".section .fixup,\"ax\"\n"
23120 "9: lea 0(%%eax,%0,4),%0\n"
23121 @@ -297,48 +405,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23122 * hyoshiok@miraclelinux.com
23123 */
23124
23125 -static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23126 +static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
23127 const void __user *from, unsigned long size)
23128 {
23129 int d0, d1;
23130
23131 __asm__ __volatile__(
23132 " .align 2,0x90\n"
23133 - "0: movl 32(%4), %%eax\n"
23134 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23135 " cmpl $67, %0\n"
23136 " jbe 2f\n"
23137 - "1: movl 64(%4), %%eax\n"
23138 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23139 " .align 2,0x90\n"
23140 - "2: movl 0(%4), %%eax\n"
23141 - "21: movl 4(%4), %%edx\n"
23142 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23143 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23144 " movnti %%eax, 0(%3)\n"
23145 " movnti %%edx, 4(%3)\n"
23146 - "3: movl 8(%4), %%eax\n"
23147 - "31: movl 12(%4),%%edx\n"
23148 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23149 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23150 " movnti %%eax, 8(%3)\n"
23151 " movnti %%edx, 12(%3)\n"
23152 - "4: movl 16(%4), %%eax\n"
23153 - "41: movl 20(%4), %%edx\n"
23154 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23155 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23156 " movnti %%eax, 16(%3)\n"
23157 " movnti %%edx, 20(%3)\n"
23158 - "10: movl 24(%4), %%eax\n"
23159 - "51: movl 28(%4), %%edx\n"
23160 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23161 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23162 " movnti %%eax, 24(%3)\n"
23163 " movnti %%edx, 28(%3)\n"
23164 - "11: movl 32(%4), %%eax\n"
23165 - "61: movl 36(%4), %%edx\n"
23166 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23167 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23168 " movnti %%eax, 32(%3)\n"
23169 " movnti %%edx, 36(%3)\n"
23170 - "12: movl 40(%4), %%eax\n"
23171 - "71: movl 44(%4), %%edx\n"
23172 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23173 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23174 " movnti %%eax, 40(%3)\n"
23175 " movnti %%edx, 44(%3)\n"
23176 - "13: movl 48(%4), %%eax\n"
23177 - "81: movl 52(%4), %%edx\n"
23178 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23179 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23180 " movnti %%eax, 48(%3)\n"
23181 " movnti %%edx, 52(%3)\n"
23182 - "14: movl 56(%4), %%eax\n"
23183 - "91: movl 60(%4), %%edx\n"
23184 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23185 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23186 " movnti %%eax, 56(%3)\n"
23187 " movnti %%edx, 60(%3)\n"
23188 " addl $-64, %0\n"
23189 @@ -351,9 +459,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23190 " shrl $2, %0\n"
23191 " andl $3, %%eax\n"
23192 " cld\n"
23193 - "6: rep; movsl\n"
23194 + "6: rep; "__copyuser_seg" movsl\n"
23195 " movl %%eax,%0\n"
23196 - "7: rep; movsb\n"
23197 + "7: rep; "__copyuser_seg" movsb\n"
23198 "8:\n"
23199 ".section .fixup,\"ax\"\n"
23200 "9: lea 0(%%eax,%0,4),%0\n"
23201 @@ -391,48 +499,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23202 return size;
23203 }
23204
23205 -static unsigned long __copy_user_intel_nocache(void *to,
23206 +static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
23207 const void __user *from, unsigned long size)
23208 {
23209 int d0, d1;
23210
23211 __asm__ __volatile__(
23212 " .align 2,0x90\n"
23213 - "0: movl 32(%4), %%eax\n"
23214 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23215 " cmpl $67, %0\n"
23216 " jbe 2f\n"
23217 - "1: movl 64(%4), %%eax\n"
23218 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23219 " .align 2,0x90\n"
23220 - "2: movl 0(%4), %%eax\n"
23221 - "21: movl 4(%4), %%edx\n"
23222 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23223 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23224 " movnti %%eax, 0(%3)\n"
23225 " movnti %%edx, 4(%3)\n"
23226 - "3: movl 8(%4), %%eax\n"
23227 - "31: movl 12(%4),%%edx\n"
23228 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23229 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23230 " movnti %%eax, 8(%3)\n"
23231 " movnti %%edx, 12(%3)\n"
23232 - "4: movl 16(%4), %%eax\n"
23233 - "41: movl 20(%4), %%edx\n"
23234 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23235 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23236 " movnti %%eax, 16(%3)\n"
23237 " movnti %%edx, 20(%3)\n"
23238 - "10: movl 24(%4), %%eax\n"
23239 - "51: movl 28(%4), %%edx\n"
23240 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23241 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23242 " movnti %%eax, 24(%3)\n"
23243 " movnti %%edx, 28(%3)\n"
23244 - "11: movl 32(%4), %%eax\n"
23245 - "61: movl 36(%4), %%edx\n"
23246 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23247 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23248 " movnti %%eax, 32(%3)\n"
23249 " movnti %%edx, 36(%3)\n"
23250 - "12: movl 40(%4), %%eax\n"
23251 - "71: movl 44(%4), %%edx\n"
23252 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23253 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23254 " movnti %%eax, 40(%3)\n"
23255 " movnti %%edx, 44(%3)\n"
23256 - "13: movl 48(%4), %%eax\n"
23257 - "81: movl 52(%4), %%edx\n"
23258 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23259 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23260 " movnti %%eax, 48(%3)\n"
23261 " movnti %%edx, 52(%3)\n"
23262 - "14: movl 56(%4), %%eax\n"
23263 - "91: movl 60(%4), %%edx\n"
23264 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23265 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23266 " movnti %%eax, 56(%3)\n"
23267 " movnti %%edx, 60(%3)\n"
23268 " addl $-64, %0\n"
23269 @@ -445,9 +553,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23270 " shrl $2, %0\n"
23271 " andl $3, %%eax\n"
23272 " cld\n"
23273 - "6: rep; movsl\n"
23274 + "6: rep; "__copyuser_seg" movsl\n"
23275 " movl %%eax,%0\n"
23276 - "7: rep; movsb\n"
23277 + "7: rep; "__copyuser_seg" movsb\n"
23278 "8:\n"
23279 ".section .fixup,\"ax\"\n"
23280 "9: lea 0(%%eax,%0,4),%0\n"
23281 @@ -487,32 +595,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23282 */
23283 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23284 unsigned long size);
23285 -unsigned long __copy_user_intel(void __user *to, const void *from,
23286 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23287 + unsigned long size);
23288 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23289 unsigned long size);
23290 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23291 const void __user *from, unsigned long size);
23292 #endif /* CONFIG_X86_INTEL_USERCOPY */
23293
23294 /* Generic arbitrary sized copy. */
23295 -#define __copy_user(to, from, size) \
23296 +#define __copy_user(to, from, size, prefix, set, restore) \
23297 do { \
23298 int __d0, __d1, __d2; \
23299 __asm__ __volatile__( \
23300 + set \
23301 " cmp $7,%0\n" \
23302 " jbe 1f\n" \
23303 " movl %1,%0\n" \
23304 " negl %0\n" \
23305 " andl $7,%0\n" \
23306 " subl %0,%3\n" \
23307 - "4: rep; movsb\n" \
23308 + "4: rep; "prefix"movsb\n" \
23309 " movl %3,%0\n" \
23310 " shrl $2,%0\n" \
23311 " andl $3,%3\n" \
23312 " .align 2,0x90\n" \
23313 - "0: rep; movsl\n" \
23314 + "0: rep; "prefix"movsl\n" \
23315 " movl %3,%0\n" \
23316 - "1: rep; movsb\n" \
23317 + "1: rep; "prefix"movsb\n" \
23318 "2:\n" \
23319 + restore \
23320 ".section .fixup,\"ax\"\n" \
23321 "5: addl %3,%0\n" \
23322 " jmp 2b\n" \
23323 @@ -537,14 +649,14 @@ do { \
23324 " negl %0\n" \
23325 " andl $7,%0\n" \
23326 " subl %0,%3\n" \
23327 - "4: rep; movsb\n" \
23328 + "4: rep; "__copyuser_seg"movsb\n" \
23329 " movl %3,%0\n" \
23330 " shrl $2,%0\n" \
23331 " andl $3,%3\n" \
23332 " .align 2,0x90\n" \
23333 - "0: rep; movsl\n" \
23334 + "0: rep; "__copyuser_seg"movsl\n" \
23335 " movl %3,%0\n" \
23336 - "1: rep; movsb\n" \
23337 + "1: rep; "__copyuser_seg"movsb\n" \
23338 "2:\n" \
23339 ".section .fixup,\"ax\"\n" \
23340 "5: addl %3,%0\n" \
23341 @@ -627,9 +739,9 @@ survive:
23342 }
23343 #endif
23344 if (movsl_is_ok(to, from, n))
23345 - __copy_user(to, from, n);
23346 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23347 else
23348 - n = __copy_user_intel(to, from, n);
23349 + n = __generic_copy_to_user_intel(to, from, n);
23350 return n;
23351 }
23352 EXPORT_SYMBOL(__copy_to_user_ll);
23353 @@ -649,10 +761,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23354 unsigned long n)
23355 {
23356 if (movsl_is_ok(to, from, n))
23357 - __copy_user(to, from, n);
23358 + __copy_user(to, from, n, __copyuser_seg, "", "");
23359 else
23360 - n = __copy_user_intel((void __user *)to,
23361 - (const void *)from, n);
23362 + n = __generic_copy_from_user_intel(to, from, n);
23363 return n;
23364 }
23365 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23366 @@ -679,65 +790,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23367 if (n > 64 && cpu_has_xmm2)
23368 n = __copy_user_intel_nocache(to, from, n);
23369 else
23370 - __copy_user(to, from, n);
23371 + __copy_user(to, from, n, __copyuser_seg, "", "");
23372 #else
23373 - __copy_user(to, from, n);
23374 + __copy_user(to, from, n, __copyuser_seg, "", "");
23375 #endif
23376 return n;
23377 }
23378 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23379
23380 -/**
23381 - * copy_to_user: - Copy a block of data into user space.
23382 - * @to: Destination address, in user space.
23383 - * @from: Source address, in kernel space.
23384 - * @n: Number of bytes to copy.
23385 - *
23386 - * Context: User context only. This function may sleep.
23387 - *
23388 - * Copy data from kernel space to user space.
23389 - *
23390 - * Returns number of bytes that could not be copied.
23391 - * On success, this will be zero.
23392 - */
23393 -unsigned long
23394 -copy_to_user(void __user *to, const void *from, unsigned long n)
23395 -{
23396 - if (access_ok(VERIFY_WRITE, to, n))
23397 - n = __copy_to_user(to, from, n);
23398 - return n;
23399 -}
23400 -EXPORT_SYMBOL(copy_to_user);
23401 -
23402 -/**
23403 - * copy_from_user: - Copy a block of data from user space.
23404 - * @to: Destination address, in kernel space.
23405 - * @from: Source address, in user space.
23406 - * @n: Number of bytes to copy.
23407 - *
23408 - * Context: User context only. This function may sleep.
23409 - *
23410 - * Copy data from user space to kernel space.
23411 - *
23412 - * Returns number of bytes that could not be copied.
23413 - * On success, this will be zero.
23414 - *
23415 - * If some data could not be copied, this function will pad the copied
23416 - * data to the requested size using zero bytes.
23417 - */
23418 -unsigned long
23419 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23420 -{
23421 - if (access_ok(VERIFY_READ, from, n))
23422 - n = __copy_from_user(to, from, n);
23423 - else
23424 - memset(to, 0, n);
23425 - return n;
23426 -}
23427 -EXPORT_SYMBOL(_copy_from_user);
23428 -
23429 void copy_from_user_overflow(void)
23430 {
23431 WARN(1, "Buffer overflow detected!\n");
23432 }
23433 EXPORT_SYMBOL(copy_from_user_overflow);
23434 +
23435 +void copy_to_user_overflow(void)
23436 +{
23437 + WARN(1, "Buffer overflow detected!\n");
23438 +}
23439 +EXPORT_SYMBOL(copy_to_user_overflow);
23440 +
23441 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23442 +void __set_fs(mm_segment_t x)
23443 +{
23444 + switch (x.seg) {
23445 + case 0:
23446 + loadsegment(gs, 0);
23447 + break;
23448 + case TASK_SIZE_MAX:
23449 + loadsegment(gs, __USER_DS);
23450 + break;
23451 + case -1UL:
23452 + loadsegment(gs, __KERNEL_DS);
23453 + break;
23454 + default:
23455 + BUG();
23456 + }
23457 + return;
23458 +}
23459 +EXPORT_SYMBOL(__set_fs);
23460 +
23461 +void set_fs(mm_segment_t x)
23462 +{
23463 + current_thread_info()->addr_limit = x;
23464 + __set_fs(x);
23465 +}
23466 +EXPORT_SYMBOL(set_fs);
23467 +#endif
23468 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23469 index e5b130b..6690d31 100644
23470 --- a/arch/x86/lib/usercopy_64.c
23471 +++ b/arch/x86/lib/usercopy_64.c
23472 @@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23473 {
23474 long __d0;
23475 might_fault();
23476 +
23477 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23478 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23479 + addr += PAX_USER_SHADOW_BASE;
23480 +#endif
23481 +
23482 /* no memory constraint because it doesn't change any memory gcc knows
23483 about */
23484 asm volatile(
23485 @@ -52,12 +58,20 @@ unsigned long clear_user(void __user *to, unsigned long n)
23486 }
23487 EXPORT_SYMBOL(clear_user);
23488
23489 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23490 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23491 {
23492 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23493 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23494 - }
23495 - return len;
23496 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23497 +
23498 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23499 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23500 + to += PAX_USER_SHADOW_BASE;
23501 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23502 + from += PAX_USER_SHADOW_BASE;
23503 +#endif
23504 +
23505 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23506 + }
23507 + return len;
23508 }
23509 EXPORT_SYMBOL(copy_in_user);
23510
23511 @@ -67,7 +81,7 @@ EXPORT_SYMBOL(copy_in_user);
23512 * it is not necessary to optimize tail handling.
23513 */
23514 unsigned long
23515 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23516 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23517 {
23518 char c;
23519 unsigned zero_len;
23520 @@ -84,3 +98,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23521 break;
23522 return len;
23523 }
23524 +
23525 +void copy_from_user_overflow(void)
23526 +{
23527 + WARN(1, "Buffer overflow detected!\n");
23528 +}
23529 +EXPORT_SYMBOL(copy_from_user_overflow);
23530 +
23531 +void copy_to_user_overflow(void)
23532 +{
23533 + WARN(1, "Buffer overflow detected!\n");
23534 +}
23535 +EXPORT_SYMBOL(copy_to_user_overflow);
23536 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23537 index 903ec1e..c4166b2 100644
23538 --- a/arch/x86/mm/extable.c
23539 +++ b/arch/x86/mm/extable.c
23540 @@ -6,12 +6,24 @@
23541 static inline unsigned long
23542 ex_insn_addr(const struct exception_table_entry *x)
23543 {
23544 - return (unsigned long)&x->insn + x->insn;
23545 + unsigned long reloc = 0;
23546 +
23547 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23548 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23549 +#endif
23550 +
23551 + return (unsigned long)&x->insn + x->insn + reloc;
23552 }
23553 static inline unsigned long
23554 ex_fixup_addr(const struct exception_table_entry *x)
23555 {
23556 - return (unsigned long)&x->fixup + x->fixup;
23557 + unsigned long reloc = 0;
23558 +
23559 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23560 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23561 +#endif
23562 +
23563 + return (unsigned long)&x->fixup + x->fixup + reloc;
23564 }
23565
23566 int fixup_exception(struct pt_regs *regs)
23567 @@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
23568 unsigned long new_ip;
23569
23570 #ifdef CONFIG_PNPBIOS
23571 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23572 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23573 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23574 extern u32 pnp_bios_is_utter_crap;
23575 pnp_bios_is_utter_crap = 1;
23576 @@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
23577 i += 4;
23578 p->fixup -= i;
23579 i += 4;
23580 +
23581 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23582 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
23583 + p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23584 + p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23585 +#endif
23586 +
23587 }
23588 }
23589
23590 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23591 index 76dcd9d..e9dffde 100644
23592 --- a/arch/x86/mm/fault.c
23593 +++ b/arch/x86/mm/fault.c
23594 @@ -13,11 +13,18 @@
23595 #include <linux/perf_event.h> /* perf_sw_event */
23596 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23597 #include <linux/prefetch.h> /* prefetchw */
23598 +#include <linux/unistd.h>
23599 +#include <linux/compiler.h>
23600
23601 #include <asm/traps.h> /* dotraplinkage, ... */
23602 #include <asm/pgalloc.h> /* pgd_*(), ... */
23603 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23604 #include <asm/fixmap.h> /* VSYSCALL_START */
23605 +#include <asm/tlbflush.h>
23606 +
23607 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23608 +#include <asm/stacktrace.h>
23609 +#endif
23610
23611 /*
23612 * Page fault error code bits:
23613 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23614 int ret = 0;
23615
23616 /* kprobe_running() needs smp_processor_id() */
23617 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23618 + if (kprobes_built_in() && !user_mode(regs)) {
23619 preempt_disable();
23620 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23621 ret = 1;
23622 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23623 return !instr_lo || (instr_lo>>1) == 1;
23624 case 0x00:
23625 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23626 - if (probe_kernel_address(instr, opcode))
23627 + if (user_mode(regs)) {
23628 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23629 + return 0;
23630 + } else if (probe_kernel_address(instr, opcode))
23631 return 0;
23632
23633 *prefetch = (instr_lo == 0xF) &&
23634 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23635 while (instr < max_instr) {
23636 unsigned char opcode;
23637
23638 - if (probe_kernel_address(instr, opcode))
23639 + if (user_mode(regs)) {
23640 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23641 + break;
23642 + } else if (probe_kernel_address(instr, opcode))
23643 break;
23644
23645 instr++;
23646 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23647 force_sig_info(si_signo, &info, tsk);
23648 }
23649
23650 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23651 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23652 +#endif
23653 +
23654 +#ifdef CONFIG_PAX_EMUTRAMP
23655 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23656 +#endif
23657 +
23658 +#ifdef CONFIG_PAX_PAGEEXEC
23659 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23660 +{
23661 + pgd_t *pgd;
23662 + pud_t *pud;
23663 + pmd_t *pmd;
23664 +
23665 + pgd = pgd_offset(mm, address);
23666 + if (!pgd_present(*pgd))
23667 + return NULL;
23668 + pud = pud_offset(pgd, address);
23669 + if (!pud_present(*pud))
23670 + return NULL;
23671 + pmd = pmd_offset(pud, address);
23672 + if (!pmd_present(*pmd))
23673 + return NULL;
23674 + return pmd;
23675 +}
23676 +#endif
23677 +
23678 DEFINE_SPINLOCK(pgd_lock);
23679 LIST_HEAD(pgd_list);
23680
23681 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23682 for (address = VMALLOC_START & PMD_MASK;
23683 address >= TASK_SIZE && address < FIXADDR_TOP;
23684 address += PMD_SIZE) {
23685 +
23686 +#ifdef CONFIG_PAX_PER_CPU_PGD
23687 + unsigned long cpu;
23688 +#else
23689 struct page *page;
23690 +#endif
23691
23692 spin_lock(&pgd_lock);
23693 +
23694 +#ifdef CONFIG_PAX_PER_CPU_PGD
23695 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23696 + pgd_t *pgd = get_cpu_pgd(cpu);
23697 + pmd_t *ret;
23698 +#else
23699 list_for_each_entry(page, &pgd_list, lru) {
23700 + pgd_t *pgd = page_address(page);
23701 spinlock_t *pgt_lock;
23702 pmd_t *ret;
23703
23704 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23705 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23706
23707 spin_lock(pgt_lock);
23708 - ret = vmalloc_sync_one(page_address(page), address);
23709 +#endif
23710 +
23711 + ret = vmalloc_sync_one(pgd, address);
23712 +
23713 +#ifndef CONFIG_PAX_PER_CPU_PGD
23714 spin_unlock(pgt_lock);
23715 +#endif
23716
23717 if (!ret)
23718 break;
23719 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23720 * an interrupt in the middle of a task switch..
23721 */
23722 pgd_paddr = read_cr3();
23723 +
23724 +#ifdef CONFIG_PAX_PER_CPU_PGD
23725 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23726 +#endif
23727 +
23728 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23729 if (!pmd_k)
23730 return -1;
23731 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23732 * happen within a race in page table update. In the later
23733 * case just flush:
23734 */
23735 +
23736 +#ifdef CONFIG_PAX_PER_CPU_PGD
23737 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23738 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23739 +#else
23740 pgd = pgd_offset(current->active_mm, address);
23741 +#endif
23742 +
23743 pgd_ref = pgd_offset_k(address);
23744 if (pgd_none(*pgd_ref))
23745 return -1;
23746 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23747 static int is_errata100(struct pt_regs *regs, unsigned long address)
23748 {
23749 #ifdef CONFIG_X86_64
23750 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23751 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23752 return 1;
23753 #endif
23754 return 0;
23755 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23756 }
23757
23758 static const char nx_warning[] = KERN_CRIT
23759 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23760 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23761
23762 static void
23763 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23764 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23765 if (!oops_may_print())
23766 return;
23767
23768 - if (error_code & PF_INSTR) {
23769 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23770 unsigned int level;
23771
23772 pte_t *pte = lookup_address(address, &level);
23773
23774 if (pte && pte_present(*pte) && !pte_exec(*pte))
23775 - printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
23776 + printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
23777 }
23778
23779 +#ifdef CONFIG_PAX_KERNEXEC
23780 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23781 + if (current->signal->curr_ip)
23782 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23783 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23784 + else
23785 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23786 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23787 + }
23788 +#endif
23789 +
23790 printk(KERN_ALERT "BUG: unable to handle kernel ");
23791 if (address < PAGE_SIZE)
23792 printk(KERN_CONT "NULL pointer dereference");
23793 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23794 }
23795 #endif
23796
23797 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23798 + if (pax_is_fetch_fault(regs, error_code, address)) {
23799 +
23800 +#ifdef CONFIG_PAX_EMUTRAMP
23801 + switch (pax_handle_fetch_fault(regs)) {
23802 + case 2:
23803 + return;
23804 + }
23805 +#endif
23806 +
23807 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23808 + do_group_exit(SIGKILL);
23809 + }
23810 +#endif
23811 +
23812 if (unlikely(show_unhandled_signals))
23813 show_signal_msg(regs, error_code, address, tsk);
23814
23815 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23816 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23817 printk(KERN_ERR
23818 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23819 - tsk->comm, tsk->pid, address);
23820 + tsk->comm, task_pid_nr(tsk), address);
23821 code = BUS_MCEERR_AR;
23822 }
23823 #endif
23824 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23825 return 1;
23826 }
23827
23828 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23829 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23830 +{
23831 + pte_t *pte;
23832 + pmd_t *pmd;
23833 + spinlock_t *ptl;
23834 + unsigned char pte_mask;
23835 +
23836 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23837 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23838 + return 0;
23839 +
23840 + /* PaX: it's our fault, let's handle it if we can */
23841 +
23842 + /* PaX: take a look at read faults before acquiring any locks */
23843 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23844 + /* instruction fetch attempt from a protected page in user mode */
23845 + up_read(&mm->mmap_sem);
23846 +
23847 +#ifdef CONFIG_PAX_EMUTRAMP
23848 + switch (pax_handle_fetch_fault(regs)) {
23849 + case 2:
23850 + return 1;
23851 + }
23852 +#endif
23853 +
23854 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23855 + do_group_exit(SIGKILL);
23856 + }
23857 +
23858 + pmd = pax_get_pmd(mm, address);
23859 + if (unlikely(!pmd))
23860 + return 0;
23861 +
23862 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23863 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23864 + pte_unmap_unlock(pte, ptl);
23865 + return 0;
23866 + }
23867 +
23868 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23869 + /* write attempt to a protected page in user mode */
23870 + pte_unmap_unlock(pte, ptl);
23871 + return 0;
23872 + }
23873 +
23874 +#ifdef CONFIG_SMP
23875 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23876 +#else
23877 + if (likely(address > get_limit(regs->cs)))
23878 +#endif
23879 + {
23880 + set_pte(pte, pte_mkread(*pte));
23881 + __flush_tlb_one(address);
23882 + pte_unmap_unlock(pte, ptl);
23883 + up_read(&mm->mmap_sem);
23884 + return 1;
23885 + }
23886 +
23887 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23888 +
23889 + /*
23890 + * PaX: fill DTLB with user rights and retry
23891 + */
23892 + __asm__ __volatile__ (
23893 + "orb %2,(%1)\n"
23894 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23895 +/*
23896 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23897 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23898 + * page fault when examined during a TLB load attempt. this is true not only
23899 + * for PTEs holding a non-present entry but also present entries that will
23900 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23901 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23902 + * for our target pages since their PTEs are simply not in the TLBs at all.
23903 +
23904 + * the best thing in omitting it is that we gain around 15-20% speed in the
23905 + * fast path of the page fault handler and can get rid of tracing since we
23906 + * can no longer flush unintended entries.
23907 + */
23908 + "invlpg (%0)\n"
23909 +#endif
23910 + __copyuser_seg"testb $0,(%0)\n"
23911 + "xorb %3,(%1)\n"
23912 + :
23913 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23914 + : "memory", "cc");
23915 + pte_unmap_unlock(pte, ptl);
23916 + up_read(&mm->mmap_sem);
23917 + return 1;
23918 +}
23919 +#endif
23920 +
23921 /*
23922 * Handle a spurious fault caused by a stale TLB entry.
23923 *
23924 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23925 static inline int
23926 access_error(unsigned long error_code, struct vm_area_struct *vma)
23927 {
23928 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23929 + return 1;
23930 +
23931 if (error_code & PF_WRITE) {
23932 /* write, present and write, not present: */
23933 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23934 @@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23935 {
23936 struct vm_area_struct *vma;
23937 struct task_struct *tsk;
23938 - unsigned long address;
23939 struct mm_struct *mm;
23940 int fault;
23941 int write = error_code & PF_WRITE;
23942 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23943 (write ? FAULT_FLAG_WRITE : 0);
23944
23945 - tsk = current;
23946 - mm = tsk->mm;
23947 -
23948 /* Get the faulting address: */
23949 - address = read_cr2();
23950 + unsigned long address = read_cr2();
23951 +
23952 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23953 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23954 + if (!search_exception_tables(regs->ip)) {
23955 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23956 + bad_area_nosemaphore(regs, error_code, address);
23957 + return;
23958 + }
23959 + if (address < PAX_USER_SHADOW_BASE) {
23960 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23961 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23962 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23963 + } else
23964 + address -= PAX_USER_SHADOW_BASE;
23965 + }
23966 +#endif
23967 +
23968 + tsk = current;
23969 + mm = tsk->mm;
23970
23971 /*
23972 * Detect and handle instructions that would cause a page fault for
23973 @@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23974 * User-mode registers count as a user access even for any
23975 * potential system fault or CPU buglet:
23976 */
23977 - if (user_mode_vm(regs)) {
23978 + if (user_mode(regs)) {
23979 local_irq_enable();
23980 error_code |= PF_USER;
23981 } else {
23982 @@ -1132,6 +1339,11 @@ retry:
23983 might_sleep();
23984 }
23985
23986 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23987 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23988 + return;
23989 +#endif
23990 +
23991 vma = find_vma(mm, address);
23992 if (unlikely(!vma)) {
23993 bad_area(regs, error_code, address);
23994 @@ -1143,18 +1355,24 @@ retry:
23995 bad_area(regs, error_code, address);
23996 return;
23997 }
23998 - if (error_code & PF_USER) {
23999 - /*
24000 - * Accessing the stack below %sp is always a bug.
24001 - * The large cushion allows instructions like enter
24002 - * and pusha to work. ("enter $65535, $31" pushes
24003 - * 32 pointers and then decrements %sp by 65535.)
24004 - */
24005 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24006 - bad_area(regs, error_code, address);
24007 - return;
24008 - }
24009 + /*
24010 + * Accessing the stack below %sp is always a bug.
24011 + * The large cushion allows instructions like enter
24012 + * and pusha to work. ("enter $65535, $31" pushes
24013 + * 32 pointers and then decrements %sp by 65535.)
24014 + */
24015 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24016 + bad_area(regs, error_code, address);
24017 + return;
24018 }
24019 +
24020 +#ifdef CONFIG_PAX_SEGMEXEC
24021 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24022 + bad_area(regs, error_code, address);
24023 + return;
24024 + }
24025 +#endif
24026 +
24027 if (unlikely(expand_stack(vma, address))) {
24028 bad_area(regs, error_code, address);
24029 return;
24030 @@ -1209,3 +1427,292 @@ good_area:
24031
24032 up_read(&mm->mmap_sem);
24033 }
24034 +
24035 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24036 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24037 +{
24038 + struct mm_struct *mm = current->mm;
24039 + unsigned long ip = regs->ip;
24040 +
24041 + if (v8086_mode(regs))
24042 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24043 +
24044 +#ifdef CONFIG_PAX_PAGEEXEC
24045 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24046 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24047 + return true;
24048 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24049 + return true;
24050 + return false;
24051 + }
24052 +#endif
24053 +
24054 +#ifdef CONFIG_PAX_SEGMEXEC
24055 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24056 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24057 + return true;
24058 + return false;
24059 + }
24060 +#endif
24061 +
24062 + return false;
24063 +}
24064 +#endif
24065 +
24066 +#ifdef CONFIG_PAX_EMUTRAMP
24067 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24068 +{
24069 + int err;
24070 +
24071 + do { /* PaX: libffi trampoline emulation */
24072 + unsigned char mov, jmp;
24073 + unsigned int addr1, addr2;
24074 +
24075 +#ifdef CONFIG_X86_64
24076 + if ((regs->ip + 9) >> 32)
24077 + break;
24078 +#endif
24079 +
24080 + err = get_user(mov, (unsigned char __user *)regs->ip);
24081 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24082 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24083 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24084 +
24085 + if (err)
24086 + break;
24087 +
24088 + if (mov == 0xB8 && jmp == 0xE9) {
24089 + regs->ax = addr1;
24090 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24091 + return 2;
24092 + }
24093 + } while (0);
24094 +
24095 + do { /* PaX: gcc trampoline emulation #1 */
24096 + unsigned char mov1, mov2;
24097 + unsigned short jmp;
24098 + unsigned int addr1, addr2;
24099 +
24100 +#ifdef CONFIG_X86_64
24101 + if ((regs->ip + 11) >> 32)
24102 + break;
24103 +#endif
24104 +
24105 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24106 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24107 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24108 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24109 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24110 +
24111 + if (err)
24112 + break;
24113 +
24114 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24115 + regs->cx = addr1;
24116 + regs->ax = addr2;
24117 + regs->ip = addr2;
24118 + return 2;
24119 + }
24120 + } while (0);
24121 +
24122 + do { /* PaX: gcc trampoline emulation #2 */
24123 + unsigned char mov, jmp;
24124 + unsigned int addr1, addr2;
24125 +
24126 +#ifdef CONFIG_X86_64
24127 + if ((regs->ip + 9) >> 32)
24128 + break;
24129 +#endif
24130 +
24131 + err = get_user(mov, (unsigned char __user *)regs->ip);
24132 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24133 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24134 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24135 +
24136 + if (err)
24137 + break;
24138 +
24139 + if (mov == 0xB9 && jmp == 0xE9) {
24140 + regs->cx = addr1;
24141 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24142 + return 2;
24143 + }
24144 + } while (0);
24145 +
24146 + return 1; /* PaX in action */
24147 +}
24148 +
24149 +#ifdef CONFIG_X86_64
24150 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24151 +{
24152 + int err;
24153 +
24154 + do { /* PaX: libffi trampoline emulation */
24155 + unsigned short mov1, mov2, jmp1;
24156 + unsigned char stcclc, jmp2;
24157 + unsigned long addr1, addr2;
24158 +
24159 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24160 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24161 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24162 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24163 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24164 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24165 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24166 +
24167 + if (err)
24168 + break;
24169 +
24170 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24171 + regs->r11 = addr1;
24172 + regs->r10 = addr2;
24173 + if (stcclc == 0xF8)
24174 + regs->flags &= ~X86_EFLAGS_CF;
24175 + else
24176 + regs->flags |= X86_EFLAGS_CF;
24177 + regs->ip = addr1;
24178 + return 2;
24179 + }
24180 + } while (0);
24181 +
24182 + do { /* PaX: gcc trampoline emulation #1 */
24183 + unsigned short mov1, mov2, jmp1;
24184 + unsigned char jmp2;
24185 + unsigned int addr1;
24186 + unsigned long addr2;
24187 +
24188 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24189 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24190 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24191 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24192 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24193 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24194 +
24195 + if (err)
24196 + break;
24197 +
24198 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24199 + regs->r11 = addr1;
24200 + regs->r10 = addr2;
24201 + regs->ip = addr1;
24202 + return 2;
24203 + }
24204 + } while (0);
24205 +
24206 + do { /* PaX: gcc trampoline emulation #2 */
24207 + unsigned short mov1, mov2, jmp1;
24208 + unsigned char jmp2;
24209 + unsigned long addr1, addr2;
24210 +
24211 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24212 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24213 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24214 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24215 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24216 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24217 +
24218 + if (err)
24219 + break;
24220 +
24221 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24222 + regs->r11 = addr1;
24223 + regs->r10 = addr2;
24224 + regs->ip = addr1;
24225 + return 2;
24226 + }
24227 + } while (0);
24228 +
24229 + return 1; /* PaX in action */
24230 +}
24231 +#endif
24232 +
24233 +/*
24234 + * PaX: decide what to do with offenders (regs->ip = fault address)
24235 + *
24236 + * returns 1 when task should be killed
24237 + * 2 when gcc trampoline was detected
24238 + */
24239 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24240 +{
24241 + if (v8086_mode(regs))
24242 + return 1;
24243 +
24244 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24245 + return 1;
24246 +
24247 +#ifdef CONFIG_X86_32
24248 + return pax_handle_fetch_fault_32(regs);
24249 +#else
24250 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24251 + return pax_handle_fetch_fault_32(regs);
24252 + else
24253 + return pax_handle_fetch_fault_64(regs);
24254 +#endif
24255 +}
24256 +#endif
24257 +
24258 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24259 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24260 +{
24261 + long i;
24262 +
24263 + printk(KERN_ERR "PAX: bytes at PC: ");
24264 + for (i = 0; i < 20; i++) {
24265 + unsigned char c;
24266 + if (get_user(c, (unsigned char __force_user *)pc+i))
24267 + printk(KERN_CONT "?? ");
24268 + else
24269 + printk(KERN_CONT "%02x ", c);
24270 + }
24271 + printk("\n");
24272 +
24273 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24274 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24275 + unsigned long c;
24276 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24277 +#ifdef CONFIG_X86_32
24278 + printk(KERN_CONT "???????? ");
24279 +#else
24280 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24281 + printk(KERN_CONT "???????? ???????? ");
24282 + else
24283 + printk(KERN_CONT "???????????????? ");
24284 +#endif
24285 + } else {
24286 +#ifdef CONFIG_X86_64
24287 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24288 + printk(KERN_CONT "%08x ", (unsigned int)c);
24289 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24290 + } else
24291 +#endif
24292 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24293 + }
24294 + }
24295 + printk("\n");
24296 +}
24297 +#endif
24298 +
24299 +/**
24300 + * probe_kernel_write(): safely attempt to write to a location
24301 + * @dst: address to write to
24302 + * @src: pointer to the data that shall be written
24303 + * @size: size of the data chunk
24304 + *
24305 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24306 + * happens, handle that and return -EFAULT.
24307 + */
24308 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24309 +{
24310 + long ret;
24311 + mm_segment_t old_fs = get_fs();
24312 +
24313 + set_fs(KERNEL_DS);
24314 + pagefault_disable();
24315 + pax_open_kernel();
24316 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24317 + pax_close_kernel();
24318 + pagefault_enable();
24319 + set_fs(old_fs);
24320 +
24321 + return ret ? -EFAULT : 0;
24322 +}
24323 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24324 index dd74e46..7d26398 100644
24325 --- a/arch/x86/mm/gup.c
24326 +++ b/arch/x86/mm/gup.c
24327 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24328 addr = start;
24329 len = (unsigned long) nr_pages << PAGE_SHIFT;
24330 end = start + len;
24331 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24332 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24333 (void __user *)start, len)))
24334 return 0;
24335
24336 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24337 index 6f31ee5..8ee4164 100644
24338 --- a/arch/x86/mm/highmem_32.c
24339 +++ b/arch/x86/mm/highmem_32.c
24340 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24341 idx = type + KM_TYPE_NR*smp_processor_id();
24342 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24343 BUG_ON(!pte_none(*(kmap_pte-idx)));
24344 +
24345 + pax_open_kernel();
24346 set_pte(kmap_pte-idx, mk_pte(page, prot));
24347 + pax_close_kernel();
24348 +
24349 arch_flush_lazy_mmu_mode();
24350
24351 return (void *)vaddr;
24352 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24353 index b91e485..d00e7c9 100644
24354 --- a/arch/x86/mm/hugetlbpage.c
24355 +++ b/arch/x86/mm/hugetlbpage.c
24356 @@ -277,13 +277,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24357 struct hstate *h = hstate_file(file);
24358 struct mm_struct *mm = current->mm;
24359 struct vm_area_struct *vma;
24360 - unsigned long start_addr;
24361 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24362 +
24363 +#ifdef CONFIG_PAX_SEGMEXEC
24364 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24365 + pax_task_size = SEGMEXEC_TASK_SIZE;
24366 +#endif
24367 +
24368 + pax_task_size -= PAGE_SIZE;
24369
24370 if (len > mm->cached_hole_size) {
24371 - start_addr = mm->free_area_cache;
24372 + start_addr = mm->free_area_cache;
24373 } else {
24374 - start_addr = TASK_UNMAPPED_BASE;
24375 - mm->cached_hole_size = 0;
24376 + start_addr = mm->mmap_base;
24377 + mm->cached_hole_size = 0;
24378 }
24379
24380 full_search:
24381 @@ -291,26 +298,27 @@ full_search:
24382
24383 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24384 /* At this point: (!vma || addr < vma->vm_end). */
24385 - if (TASK_SIZE - len < addr) {
24386 + if (pax_task_size - len < addr) {
24387 /*
24388 * Start a new search - just in case we missed
24389 * some holes.
24390 */
24391 - if (start_addr != TASK_UNMAPPED_BASE) {
24392 - start_addr = TASK_UNMAPPED_BASE;
24393 + if (start_addr != mm->mmap_base) {
24394 + start_addr = mm->mmap_base;
24395 mm->cached_hole_size = 0;
24396 goto full_search;
24397 }
24398 return -ENOMEM;
24399 }
24400 - if (!vma || addr + len <= vma->vm_start) {
24401 - mm->free_area_cache = addr + len;
24402 - return addr;
24403 - }
24404 + if (check_heap_stack_gap(vma, addr, len))
24405 + break;
24406 if (addr + mm->cached_hole_size < vma->vm_start)
24407 mm->cached_hole_size = vma->vm_start - addr;
24408 addr = ALIGN(vma->vm_end, huge_page_size(h));
24409 }
24410 +
24411 + mm->free_area_cache = addr + len;
24412 + return addr;
24413 }
24414
24415 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24416 @@ -321,9 +329,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24417 struct mm_struct *mm = current->mm;
24418 struct vm_area_struct *vma;
24419 unsigned long base = mm->mmap_base;
24420 - unsigned long addr = addr0;
24421 + unsigned long addr;
24422 unsigned long largest_hole = mm->cached_hole_size;
24423 - unsigned long start_addr;
24424
24425 /* don't allow allocations above current base */
24426 if (mm->free_area_cache > base)
24427 @@ -333,16 +340,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24428 largest_hole = 0;
24429 mm->free_area_cache = base;
24430 }
24431 -try_again:
24432 - start_addr = mm->free_area_cache;
24433
24434 /* make sure it can fit in the remaining address space */
24435 if (mm->free_area_cache < len)
24436 goto fail;
24437
24438 /* either no address requested or can't fit in requested address hole */
24439 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24440 + addr = mm->free_area_cache - len;
24441 do {
24442 + addr &= huge_page_mask(h);
24443 /*
24444 * Lookup failure means no vma is above this address,
24445 * i.e. return with success:
24446 @@ -351,10 +357,10 @@ try_again:
24447 if (!vma)
24448 return addr;
24449
24450 - if (addr + len <= vma->vm_start) {
24451 + if (check_heap_stack_gap(vma, addr, len)) {
24452 /* remember the address as a hint for next time */
24453 - mm->cached_hole_size = largest_hole;
24454 - return (mm->free_area_cache = addr);
24455 + mm->cached_hole_size = largest_hole;
24456 + return (mm->free_area_cache = addr);
24457 } else if (mm->free_area_cache == vma->vm_end) {
24458 /* pull free_area_cache down to the first hole */
24459 mm->free_area_cache = vma->vm_start;
24460 @@ -363,29 +369,34 @@ try_again:
24461
24462 /* remember the largest hole we saw so far */
24463 if (addr + largest_hole < vma->vm_start)
24464 - largest_hole = vma->vm_start - addr;
24465 + largest_hole = vma->vm_start - addr;
24466
24467 /* try just below the current vma->vm_start */
24468 - addr = (vma->vm_start - len) & huge_page_mask(h);
24469 - } while (len <= vma->vm_start);
24470 + addr = skip_heap_stack_gap(vma, len);
24471 + } while (!IS_ERR_VALUE(addr));
24472
24473 fail:
24474 /*
24475 - * if hint left us with no space for the requested
24476 - * mapping then try again:
24477 - */
24478 - if (start_addr != base) {
24479 - mm->free_area_cache = base;
24480 - largest_hole = 0;
24481 - goto try_again;
24482 - }
24483 - /*
24484 * A failed mmap() very likely causes application failure,
24485 * so fall back to the bottom-up function here. This scenario
24486 * can happen with large stack limits and large mmap()
24487 * allocations.
24488 */
24489 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24490 +
24491 +#ifdef CONFIG_PAX_SEGMEXEC
24492 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24493 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24494 + else
24495 +#endif
24496 +
24497 + mm->mmap_base = TASK_UNMAPPED_BASE;
24498 +
24499 +#ifdef CONFIG_PAX_RANDMMAP
24500 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24501 + mm->mmap_base += mm->delta_mmap;
24502 +#endif
24503 +
24504 + mm->free_area_cache = mm->mmap_base;
24505 mm->cached_hole_size = ~0UL;
24506 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24507 len, pgoff, flags);
24508 @@ -393,6 +404,7 @@ fail:
24509 /*
24510 * Restore the topdown base:
24511 */
24512 + mm->mmap_base = base;
24513 mm->free_area_cache = base;
24514 mm->cached_hole_size = ~0UL;
24515
24516 @@ -406,10 +418,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24517 struct hstate *h = hstate_file(file);
24518 struct mm_struct *mm = current->mm;
24519 struct vm_area_struct *vma;
24520 + unsigned long pax_task_size = TASK_SIZE;
24521
24522 if (len & ~huge_page_mask(h))
24523 return -EINVAL;
24524 - if (len > TASK_SIZE)
24525 +
24526 +#ifdef CONFIG_PAX_SEGMEXEC
24527 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24528 + pax_task_size = SEGMEXEC_TASK_SIZE;
24529 +#endif
24530 +
24531 + pax_task_size -= PAGE_SIZE;
24532 +
24533 + if (len > pax_task_size)
24534 return -ENOMEM;
24535
24536 if (flags & MAP_FIXED) {
24537 @@ -421,8 +442,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24538 if (addr) {
24539 addr = ALIGN(addr, huge_page_size(h));
24540 vma = find_vma(mm, addr);
24541 - if (TASK_SIZE - len >= addr &&
24542 - (!vma || addr + len <= vma->vm_start))
24543 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24544 return addr;
24545 }
24546 if (mm->get_unmapped_area == arch_get_unmapped_area)
24547 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24548 index bc4e9d8..ca4c14b 100644
24549 --- a/arch/x86/mm/init.c
24550 +++ b/arch/x86/mm/init.c
24551 @@ -16,6 +16,8 @@
24552 #include <asm/tlb.h>
24553 #include <asm/proto.h>
24554 #include <asm/dma.h> /* for MAX_DMA_PFN */
24555 +#include <asm/desc.h>
24556 +#include <asm/bios_ebda.h>
24557
24558 unsigned long __initdata pgt_buf_start;
24559 unsigned long __meminitdata pgt_buf_end;
24560 @@ -38,7 +40,7 @@ struct map_range {
24561 static void __init find_early_table_space(struct map_range *mr, unsigned long end,
24562 int use_pse, int use_gbpages)
24563 {
24564 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24565 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24566 phys_addr_t base;
24567
24568 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24569 @@ -317,10 +319,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24570 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24571 * mmio resources as well as potential bios/acpi data regions.
24572 */
24573 +
24574 +#ifdef CONFIG_GRKERNSEC_KMEM
24575 +static unsigned int ebda_start __read_only;
24576 +static unsigned int ebda_end __read_only;
24577 +#endif
24578 +
24579 int devmem_is_allowed(unsigned long pagenr)
24580 {
24581 +#ifdef CONFIG_GRKERNSEC_KMEM
24582 + /* allow BDA */
24583 + if (!pagenr)
24584 + return 1;
24585 + /* allow EBDA */
24586 + if (pagenr >= ebda_start && pagenr < ebda_end)
24587 + return 1;
24588 +#else
24589 + if (!pagenr)
24590 + return 1;
24591 +#ifdef CONFIG_VM86
24592 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24593 + return 1;
24594 +#endif
24595 +#endif
24596 +
24597 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24598 + return 1;
24599 +#ifdef CONFIG_GRKERNSEC_KMEM
24600 + /* throw out everything else below 1MB */
24601 if (pagenr <= 256)
24602 - return 1;
24603 + return 0;
24604 +#endif
24605 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24606 return 0;
24607 if (!page_is_ram(pagenr))
24608 @@ -377,8 +406,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24609 #endif
24610 }
24611
24612 +#ifdef CONFIG_GRKERNSEC_KMEM
24613 +static inline void gr_init_ebda(void)
24614 +{
24615 + unsigned int ebda_addr;
24616 + unsigned int ebda_size = 0;
24617 +
24618 + ebda_addr = get_bios_ebda();
24619 + if (ebda_addr) {
24620 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24621 + ebda_size <<= 10;
24622 + }
24623 + if (ebda_addr && ebda_size) {
24624 + ebda_start = ebda_addr >> PAGE_SHIFT;
24625 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24626 + } else {
24627 + ebda_start = 0x9f000 >> PAGE_SHIFT;
24628 + ebda_end = 0xa0000 >> PAGE_SHIFT;
24629 + }
24630 +}
24631 +#else
24632 +static inline void gr_init_ebda(void) { }
24633 +#endif
24634 +
24635 void free_initmem(void)
24636 {
24637 +#ifdef CONFIG_PAX_KERNEXEC
24638 +#ifdef CONFIG_X86_32
24639 + /* PaX: limit KERNEL_CS to actual size */
24640 + unsigned long addr, limit;
24641 + struct desc_struct d;
24642 + int cpu;
24643 +#else
24644 + pgd_t *pgd;
24645 + pud_t *pud;
24646 + pmd_t *pmd;
24647 + unsigned long addr, end;
24648 +#endif
24649 +#endif
24650 +
24651 + gr_init_ebda();
24652 +
24653 +#ifdef CONFIG_PAX_KERNEXEC
24654 +#ifdef CONFIG_X86_32
24655 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24656 + limit = (limit - 1UL) >> PAGE_SHIFT;
24657 +
24658 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24659 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24660 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24661 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24662 + }
24663 +
24664 + /* PaX: make KERNEL_CS read-only */
24665 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24666 + if (!paravirt_enabled())
24667 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24668 +/*
24669 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24670 + pgd = pgd_offset_k(addr);
24671 + pud = pud_offset(pgd, addr);
24672 + pmd = pmd_offset(pud, addr);
24673 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24674 + }
24675 +*/
24676 +#ifdef CONFIG_X86_PAE
24677 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24678 +/*
24679 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24680 + pgd = pgd_offset_k(addr);
24681 + pud = pud_offset(pgd, addr);
24682 + pmd = pmd_offset(pud, addr);
24683 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24684 + }
24685 +*/
24686 +#endif
24687 +
24688 +#ifdef CONFIG_MODULES
24689 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24690 +#endif
24691 +
24692 +#else
24693 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24694 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24695 + pgd = pgd_offset_k(addr);
24696 + pud = pud_offset(pgd, addr);
24697 + pmd = pmd_offset(pud, addr);
24698 + if (!pmd_present(*pmd))
24699 + continue;
24700 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24701 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24702 + else
24703 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24704 + }
24705 +
24706 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24707 + end = addr + KERNEL_IMAGE_SIZE;
24708 + for (; addr < end; addr += PMD_SIZE) {
24709 + pgd = pgd_offset_k(addr);
24710 + pud = pud_offset(pgd, addr);
24711 + pmd = pmd_offset(pud, addr);
24712 + if (!pmd_present(*pmd))
24713 + continue;
24714 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24715 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24716 + }
24717 +#endif
24718 +
24719 + flush_tlb_all();
24720 +#endif
24721 +
24722 free_init_pages("unused kernel memory",
24723 (unsigned long)(&__init_begin),
24724 (unsigned long)(&__init_end));
24725 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24726 index 575d86f..4987469 100644
24727 --- a/arch/x86/mm/init_32.c
24728 +++ b/arch/x86/mm/init_32.c
24729 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24730 }
24731
24732 /*
24733 - * Creates a middle page table and puts a pointer to it in the
24734 - * given global directory entry. This only returns the gd entry
24735 - * in non-PAE compilation mode, since the middle layer is folded.
24736 - */
24737 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24738 -{
24739 - pud_t *pud;
24740 - pmd_t *pmd_table;
24741 -
24742 -#ifdef CONFIG_X86_PAE
24743 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24744 - if (after_bootmem)
24745 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24746 - else
24747 - pmd_table = (pmd_t *)alloc_low_page();
24748 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24749 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24750 - pud = pud_offset(pgd, 0);
24751 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24752 -
24753 - return pmd_table;
24754 - }
24755 -#endif
24756 - pud = pud_offset(pgd, 0);
24757 - pmd_table = pmd_offset(pud, 0);
24758 -
24759 - return pmd_table;
24760 -}
24761 -
24762 -/*
24763 * Create a page table and place a pointer to it in a middle page
24764 * directory entry:
24765 */
24766 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24767 page_table = (pte_t *)alloc_low_page();
24768
24769 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24770 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24771 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24772 +#else
24773 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24774 +#endif
24775 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24776 }
24777
24778 return pte_offset_kernel(pmd, 0);
24779 }
24780
24781 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24782 +{
24783 + pud_t *pud;
24784 + pmd_t *pmd_table;
24785 +
24786 + pud = pud_offset(pgd, 0);
24787 + pmd_table = pmd_offset(pud, 0);
24788 +
24789 + return pmd_table;
24790 +}
24791 +
24792 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24793 {
24794 int pgd_idx = pgd_index(vaddr);
24795 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24796 int pgd_idx, pmd_idx;
24797 unsigned long vaddr;
24798 pgd_t *pgd;
24799 + pud_t *pud;
24800 pmd_t *pmd;
24801 pte_t *pte = NULL;
24802
24803 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24804 pgd = pgd_base + pgd_idx;
24805
24806 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24807 - pmd = one_md_table_init(pgd);
24808 - pmd = pmd + pmd_index(vaddr);
24809 + pud = pud_offset(pgd, vaddr);
24810 + pmd = pmd_offset(pud, vaddr);
24811 +
24812 +#ifdef CONFIG_X86_PAE
24813 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24814 +#endif
24815 +
24816 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24817 pmd++, pmd_idx++) {
24818 pte = page_table_kmap_check(one_page_table_init(pmd),
24819 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24820 }
24821 }
24822
24823 -static inline int is_kernel_text(unsigned long addr)
24824 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24825 {
24826 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24827 - return 1;
24828 - return 0;
24829 + if ((start > ktla_ktva((unsigned long)_etext) ||
24830 + end <= ktla_ktva((unsigned long)_stext)) &&
24831 + (start > ktla_ktva((unsigned long)_einittext) ||
24832 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24833 +
24834 +#ifdef CONFIG_ACPI_SLEEP
24835 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24836 +#endif
24837 +
24838 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24839 + return 0;
24840 + return 1;
24841 }
24842
24843 /*
24844 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24845 unsigned long last_map_addr = end;
24846 unsigned long start_pfn, end_pfn;
24847 pgd_t *pgd_base = swapper_pg_dir;
24848 - int pgd_idx, pmd_idx, pte_ofs;
24849 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24850 unsigned long pfn;
24851 pgd_t *pgd;
24852 + pud_t *pud;
24853 pmd_t *pmd;
24854 pte_t *pte;
24855 unsigned pages_2m, pages_4k;
24856 @@ -280,8 +281,13 @@ repeat:
24857 pfn = start_pfn;
24858 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24859 pgd = pgd_base + pgd_idx;
24860 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24861 - pmd = one_md_table_init(pgd);
24862 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24863 + pud = pud_offset(pgd, 0);
24864 + pmd = pmd_offset(pud, 0);
24865 +
24866 +#ifdef CONFIG_X86_PAE
24867 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24868 +#endif
24869
24870 if (pfn >= end_pfn)
24871 continue;
24872 @@ -293,14 +299,13 @@ repeat:
24873 #endif
24874 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24875 pmd++, pmd_idx++) {
24876 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24877 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24878
24879 /*
24880 * Map with big pages if possible, otherwise
24881 * create normal page tables:
24882 */
24883 if (use_pse) {
24884 - unsigned int addr2;
24885 pgprot_t prot = PAGE_KERNEL_LARGE;
24886 /*
24887 * first pass will use the same initial
24888 @@ -310,11 +315,7 @@ repeat:
24889 __pgprot(PTE_IDENT_ATTR |
24890 _PAGE_PSE);
24891
24892 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24893 - PAGE_OFFSET + PAGE_SIZE-1;
24894 -
24895 - if (is_kernel_text(addr) ||
24896 - is_kernel_text(addr2))
24897 + if (is_kernel_text(address, address + PMD_SIZE))
24898 prot = PAGE_KERNEL_LARGE_EXEC;
24899
24900 pages_2m++;
24901 @@ -331,7 +332,7 @@ repeat:
24902 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24903 pte += pte_ofs;
24904 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24905 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24906 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24907 pgprot_t prot = PAGE_KERNEL;
24908 /*
24909 * first pass will use the same initial
24910 @@ -339,7 +340,7 @@ repeat:
24911 */
24912 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24913
24914 - if (is_kernel_text(addr))
24915 + if (is_kernel_text(address, address + PAGE_SIZE))
24916 prot = PAGE_KERNEL_EXEC;
24917
24918 pages_4k++;
24919 @@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24920
24921 pud = pud_offset(pgd, va);
24922 pmd = pmd_offset(pud, va);
24923 - if (!pmd_present(*pmd))
24924 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24925 break;
24926
24927 pte = pte_offset_kernel(pmd, va);
24928 @@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24929
24930 static void __init pagetable_init(void)
24931 {
24932 - pgd_t *pgd_base = swapper_pg_dir;
24933 -
24934 - permanent_kmaps_init(pgd_base);
24935 + permanent_kmaps_init(swapper_pg_dir);
24936 }
24937
24938 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24939 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24940 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24941
24942 /* user-defined highmem size */
24943 @@ -734,6 +733,12 @@ void __init mem_init(void)
24944
24945 pci_iommu_alloc();
24946
24947 +#ifdef CONFIG_PAX_PER_CPU_PGD
24948 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24949 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24950 + KERNEL_PGD_PTRS);
24951 +#endif
24952 +
24953 #ifdef CONFIG_FLATMEM
24954 BUG_ON(!mem_map);
24955 #endif
24956 @@ -760,7 +765,7 @@ void __init mem_init(void)
24957 reservedpages++;
24958
24959 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24960 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24961 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24962 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24963
24964 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24965 @@ -801,10 +806,10 @@ void __init mem_init(void)
24966 ((unsigned long)&__init_end -
24967 (unsigned long)&__init_begin) >> 10,
24968
24969 - (unsigned long)&_etext, (unsigned long)&_edata,
24970 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24971 + (unsigned long)&_sdata, (unsigned long)&_edata,
24972 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24973
24974 - (unsigned long)&_text, (unsigned long)&_etext,
24975 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24976 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24977
24978 /*
24979 @@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24980 if (!kernel_set_to_readonly)
24981 return;
24982
24983 + start = ktla_ktva(start);
24984 pr_debug("Set kernel text: %lx - %lx for read write\n",
24985 start, start+size);
24986
24987 @@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
24988 if (!kernel_set_to_readonly)
24989 return;
24990
24991 + start = ktla_ktva(start);
24992 pr_debug("Set kernel text: %lx - %lx for read only\n",
24993 start, start+size);
24994
24995 @@ -924,6 +931,7 @@ void mark_rodata_ro(void)
24996 unsigned long start = PFN_ALIGN(_text);
24997 unsigned long size = PFN_ALIGN(_etext) - start;
24998
24999 + start = ktla_ktva(start);
25000 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25001 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25002 size >> 10);
25003 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25004 index 2b6b4a3..c17210d 100644
25005 --- a/arch/x86/mm/init_64.c
25006 +++ b/arch/x86/mm/init_64.c
25007 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
25008 * around without checking the pgd every time.
25009 */
25010
25011 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25012 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25013 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25014
25015 int force_personality32;
25016 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25017
25018 for (address = start; address <= end; address += PGDIR_SIZE) {
25019 const pgd_t *pgd_ref = pgd_offset_k(address);
25020 +
25021 +#ifdef CONFIG_PAX_PER_CPU_PGD
25022 + unsigned long cpu;
25023 +#else
25024 struct page *page;
25025 +#endif
25026
25027 if (pgd_none(*pgd_ref))
25028 continue;
25029
25030 spin_lock(&pgd_lock);
25031 +
25032 +#ifdef CONFIG_PAX_PER_CPU_PGD
25033 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25034 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
25035 +#else
25036 list_for_each_entry(page, &pgd_list, lru) {
25037 pgd_t *pgd;
25038 spinlock_t *pgt_lock;
25039 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25040 /* the pgt_lock only for Xen */
25041 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25042 spin_lock(pgt_lock);
25043 +#endif
25044
25045 if (pgd_none(*pgd))
25046 set_pgd(pgd, *pgd_ref);
25047 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25048 BUG_ON(pgd_page_vaddr(*pgd)
25049 != pgd_page_vaddr(*pgd_ref));
25050
25051 +#ifndef CONFIG_PAX_PER_CPU_PGD
25052 spin_unlock(pgt_lock);
25053 +#endif
25054 +
25055 }
25056 spin_unlock(&pgd_lock);
25057 }
25058 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25059 {
25060 if (pgd_none(*pgd)) {
25061 pud_t *pud = (pud_t *)spp_getpage();
25062 - pgd_populate(&init_mm, pgd, pud);
25063 + pgd_populate_kernel(&init_mm, pgd, pud);
25064 if (pud != pud_offset(pgd, 0))
25065 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25066 pud, pud_offset(pgd, 0));
25067 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25068 {
25069 if (pud_none(*pud)) {
25070 pmd_t *pmd = (pmd_t *) spp_getpage();
25071 - pud_populate(&init_mm, pud, pmd);
25072 + pud_populate_kernel(&init_mm, pud, pmd);
25073 if (pmd != pmd_offset(pud, 0))
25074 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25075 pmd, pmd_offset(pud, 0));
25076 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25077 pmd = fill_pmd(pud, vaddr);
25078 pte = fill_pte(pmd, vaddr);
25079
25080 + pax_open_kernel();
25081 set_pte(pte, new_pte);
25082 + pax_close_kernel();
25083
25084 /*
25085 * It's enough to flush this one mapping.
25086 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25087 pgd = pgd_offset_k((unsigned long)__va(phys));
25088 if (pgd_none(*pgd)) {
25089 pud = (pud_t *) spp_getpage();
25090 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25091 - _PAGE_USER));
25092 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25093 }
25094 pud = pud_offset(pgd, (unsigned long)__va(phys));
25095 if (pud_none(*pud)) {
25096 pmd = (pmd_t *) spp_getpage();
25097 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25098 - _PAGE_USER));
25099 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25100 }
25101 pmd = pmd_offset(pud, phys);
25102 BUG_ON(!pmd_none(*pmd));
25103 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25104 if (pfn >= pgt_buf_top)
25105 panic("alloc_low_page: ran out of memory");
25106
25107 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25108 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25109 clear_page(adr);
25110 *phys = pfn * PAGE_SIZE;
25111 return adr;
25112 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25113
25114 phys = __pa(virt);
25115 left = phys & (PAGE_SIZE - 1);
25116 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25117 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25118 adr = (void *)(((unsigned long)adr) | left);
25119
25120 return adr;
25121 @@ -548,7 +562,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25122 unmap_low_page(pmd);
25123
25124 spin_lock(&init_mm.page_table_lock);
25125 - pud_populate(&init_mm, pud, __va(pmd_phys));
25126 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25127 spin_unlock(&init_mm.page_table_lock);
25128 }
25129 __flush_tlb_all();
25130 @@ -594,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start,
25131 unmap_low_page(pud);
25132
25133 spin_lock(&init_mm.page_table_lock);
25134 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25135 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25136 spin_unlock(&init_mm.page_table_lock);
25137 pgd_changed = true;
25138 }
25139 @@ -686,6 +700,12 @@ void __init mem_init(void)
25140
25141 pci_iommu_alloc();
25142
25143 +#ifdef CONFIG_PAX_PER_CPU_PGD
25144 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25145 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25146 + KERNEL_PGD_PTRS);
25147 +#endif
25148 +
25149 /* clear_bss() already clear the empty_zero_page */
25150
25151 reservedpages = 0;
25152 @@ -846,8 +866,8 @@ int kern_addr_valid(unsigned long addr)
25153 static struct vm_area_struct gate_vma = {
25154 .vm_start = VSYSCALL_START,
25155 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25156 - .vm_page_prot = PAGE_READONLY_EXEC,
25157 - .vm_flags = VM_READ | VM_EXEC
25158 + .vm_page_prot = PAGE_READONLY,
25159 + .vm_flags = VM_READ
25160 };
25161
25162 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25163 @@ -881,7 +901,7 @@ int in_gate_area_no_mm(unsigned long addr)
25164
25165 const char *arch_vma_name(struct vm_area_struct *vma)
25166 {
25167 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25168 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25169 return "[vdso]";
25170 if (vma == &gate_vma)
25171 return "[vsyscall]";
25172 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25173 index 7b179b4..6bd1777 100644
25174 --- a/arch/x86/mm/iomap_32.c
25175 +++ b/arch/x86/mm/iomap_32.c
25176 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25177 type = kmap_atomic_idx_push();
25178 idx = type + KM_TYPE_NR * smp_processor_id();
25179 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25180 +
25181 + pax_open_kernel();
25182 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25183 + pax_close_kernel();
25184 +
25185 arch_flush_lazy_mmu_mode();
25186
25187 return (void *)vaddr;
25188 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25189 index 78fe3f1..8293b6f 100644
25190 --- a/arch/x86/mm/ioremap.c
25191 +++ b/arch/x86/mm/ioremap.c
25192 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25193 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25194 int is_ram = page_is_ram(pfn);
25195
25196 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25197 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25198 return NULL;
25199 WARN_ON_ONCE(is_ram);
25200 }
25201 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25202
25203 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25204 if (page_is_ram(start >> PAGE_SHIFT))
25205 +#ifdef CONFIG_HIGHMEM
25206 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25207 +#endif
25208 return __va(phys);
25209
25210 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25211 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25212 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25213
25214 static __initdata int after_paging_init;
25215 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25216 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25217
25218 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25219 {
25220 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25221 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25222
25223 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25224 - memset(bm_pte, 0, sizeof(bm_pte));
25225 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25226 + pmd_populate_user(&init_mm, pmd, bm_pte);
25227
25228 /*
25229 * The boot-ioremap range spans multiple pmds, for which
25230 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25231 index d87dd6d..bf3fa66 100644
25232 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25233 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25234 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25235 * memory (e.g. tracked pages)? For now, we need this to avoid
25236 * invoking kmemcheck for PnP BIOS calls.
25237 */
25238 - if (regs->flags & X86_VM_MASK)
25239 + if (v8086_mode(regs))
25240 return false;
25241 - if (regs->cs != __KERNEL_CS)
25242 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25243 return false;
25244
25245 pte = kmemcheck_pte_lookup(address);
25246 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25247 index 845df68..1d8d29f 100644
25248 --- a/arch/x86/mm/mmap.c
25249 +++ b/arch/x86/mm/mmap.c
25250 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25251 * Leave an at least ~128 MB hole with possible stack randomization.
25252 */
25253 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25254 -#define MAX_GAP (TASK_SIZE/6*5)
25255 +#define MAX_GAP (pax_task_size/6*5)
25256
25257 static int mmap_is_legacy(void)
25258 {
25259 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25260 return rnd << PAGE_SHIFT;
25261 }
25262
25263 -static unsigned long mmap_base(void)
25264 +static unsigned long mmap_base(struct mm_struct *mm)
25265 {
25266 unsigned long gap = rlimit(RLIMIT_STACK);
25267 + unsigned long pax_task_size = TASK_SIZE;
25268 +
25269 +#ifdef CONFIG_PAX_SEGMEXEC
25270 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25271 + pax_task_size = SEGMEXEC_TASK_SIZE;
25272 +#endif
25273
25274 if (gap < MIN_GAP)
25275 gap = MIN_GAP;
25276 else if (gap > MAX_GAP)
25277 gap = MAX_GAP;
25278
25279 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25280 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25281 }
25282
25283 /*
25284 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25285 * does, but not when emulating X86_32
25286 */
25287 -static unsigned long mmap_legacy_base(void)
25288 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25289 {
25290 - if (mmap_is_ia32())
25291 + if (mmap_is_ia32()) {
25292 +
25293 +#ifdef CONFIG_PAX_SEGMEXEC
25294 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25295 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25296 + else
25297 +#endif
25298 +
25299 return TASK_UNMAPPED_BASE;
25300 - else
25301 + } else
25302 return TASK_UNMAPPED_BASE + mmap_rnd();
25303 }
25304
25305 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25306 void arch_pick_mmap_layout(struct mm_struct *mm)
25307 {
25308 if (mmap_is_legacy()) {
25309 - mm->mmap_base = mmap_legacy_base();
25310 + mm->mmap_base = mmap_legacy_base(mm);
25311 +
25312 +#ifdef CONFIG_PAX_RANDMMAP
25313 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25314 + mm->mmap_base += mm->delta_mmap;
25315 +#endif
25316 +
25317 mm->get_unmapped_area = arch_get_unmapped_area;
25318 mm->unmap_area = arch_unmap_area;
25319 } else {
25320 - mm->mmap_base = mmap_base();
25321 + mm->mmap_base = mmap_base(mm);
25322 +
25323 +#ifdef CONFIG_PAX_RANDMMAP
25324 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25325 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25326 +#endif
25327 +
25328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25329 mm->unmap_area = arch_unmap_area_topdown;
25330 }
25331 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25332 index dc0b727..dc9d71a 100644
25333 --- a/arch/x86/mm/mmio-mod.c
25334 +++ b/arch/x86/mm/mmio-mod.c
25335 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25336 break;
25337 default:
25338 {
25339 - unsigned char *ip = (unsigned char *)instptr;
25340 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25341 my_trace->opcode = MMIO_UNKNOWN_OP;
25342 my_trace->width = 0;
25343 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25344 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25345 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25346 void __iomem *addr)
25347 {
25348 - static atomic_t next_id;
25349 + static atomic_unchecked_t next_id;
25350 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25351 /* These are page-unaligned. */
25352 struct mmiotrace_map map = {
25353 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25354 .private = trace
25355 },
25356 .phys = offset,
25357 - .id = atomic_inc_return(&next_id)
25358 + .id = atomic_inc_return_unchecked(&next_id)
25359 };
25360 map.map_id = trace->id;
25361
25362 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25363 index b008656..773eac2 100644
25364 --- a/arch/x86/mm/pageattr-test.c
25365 +++ b/arch/x86/mm/pageattr-test.c
25366 @@ -36,7 +36,7 @@ enum {
25367
25368 static int pte_testbit(pte_t pte)
25369 {
25370 - return pte_flags(pte) & _PAGE_UNUSED1;
25371 + return pte_flags(pte) & _PAGE_CPA_TEST;
25372 }
25373
25374 struct split_state {
25375 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25376 index a718e0d..45efc32 100644
25377 --- a/arch/x86/mm/pageattr.c
25378 +++ b/arch/x86/mm/pageattr.c
25379 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25380 */
25381 #ifdef CONFIG_PCI_BIOS
25382 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25383 - pgprot_val(forbidden) |= _PAGE_NX;
25384 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25385 #endif
25386
25387 /*
25388 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25389 * Does not cover __inittext since that is gone later on. On
25390 * 64bit we do not enforce !NX on the low mapping
25391 */
25392 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25393 - pgprot_val(forbidden) |= _PAGE_NX;
25394 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25395 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25396
25397 +#ifdef CONFIG_DEBUG_RODATA
25398 /*
25399 * The .rodata section needs to be read-only. Using the pfn
25400 * catches all aliases.
25401 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25402 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25403 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25404 pgprot_val(forbidden) |= _PAGE_RW;
25405 +#endif
25406
25407 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25408 /*
25409 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25410 }
25411 #endif
25412
25413 +#ifdef CONFIG_PAX_KERNEXEC
25414 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25415 + pgprot_val(forbidden) |= _PAGE_RW;
25416 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25417 + }
25418 +#endif
25419 +
25420 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25421
25422 return prot;
25423 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25424 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25425 {
25426 /* change init_mm */
25427 + pax_open_kernel();
25428 set_pte_atomic(kpte, pte);
25429 +
25430 #ifdef CONFIG_X86_32
25431 if (!SHARED_KERNEL_PMD) {
25432 +
25433 +#ifdef CONFIG_PAX_PER_CPU_PGD
25434 + unsigned long cpu;
25435 +#else
25436 struct page *page;
25437 +#endif
25438
25439 +#ifdef CONFIG_PAX_PER_CPU_PGD
25440 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25441 + pgd_t *pgd = get_cpu_pgd(cpu);
25442 +#else
25443 list_for_each_entry(page, &pgd_list, lru) {
25444 - pgd_t *pgd;
25445 + pgd_t *pgd = (pgd_t *)page_address(page);
25446 +#endif
25447 +
25448 pud_t *pud;
25449 pmd_t *pmd;
25450
25451 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25452 + pgd += pgd_index(address);
25453 pud = pud_offset(pgd, address);
25454 pmd = pmd_offset(pud, address);
25455 set_pte_atomic((pte_t *)pmd, pte);
25456 }
25457 }
25458 #endif
25459 + pax_close_kernel();
25460 }
25461
25462 static int
25463 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25464 index 3d68ef6..7f69136 100644
25465 --- a/arch/x86/mm/pat.c
25466 +++ b/arch/x86/mm/pat.c
25467 @@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
25468
25469 if (!entry) {
25470 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
25471 - current->comm, current->pid, start, end - 1);
25472 + current->comm, task_pid_nr(current), start, end - 1);
25473 return -EINVAL;
25474 }
25475
25476 @@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25477
25478 while (cursor < to) {
25479 if (!devmem_is_allowed(pfn)) {
25480 - printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
25481 - current->comm, from, to - 1);
25482 + printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
25483 + current->comm, from, to - 1, cursor);
25484 return 0;
25485 }
25486 cursor += PAGE_SIZE;
25487 @@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25488 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
25489 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
25490 "for [mem %#010Lx-%#010Lx]\n",
25491 - current->comm, current->pid,
25492 + current->comm, task_pid_nr(current),
25493 cattr_name(flags),
25494 base, (unsigned long long)(base + size-1));
25495 return -EINVAL;
25496 @@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25497 flags = lookup_memtype(paddr);
25498 if (want_flags != flags) {
25499 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
25500 - current->comm, current->pid,
25501 + current->comm, task_pid_nr(current),
25502 cattr_name(want_flags),
25503 (unsigned long long)paddr,
25504 (unsigned long long)(paddr + size - 1),
25505 @@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25506 free_memtype(paddr, paddr + size);
25507 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25508 " for [mem %#010Lx-%#010Lx], got %s\n",
25509 - current->comm, current->pid,
25510 + current->comm, task_pid_nr(current),
25511 cattr_name(want_flags),
25512 (unsigned long long)paddr,
25513 (unsigned long long)(paddr + size - 1),
25514 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25515 index 9f0614d..92ae64a 100644
25516 --- a/arch/x86/mm/pf_in.c
25517 +++ b/arch/x86/mm/pf_in.c
25518 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25519 int i;
25520 enum reason_type rv = OTHERS;
25521
25522 - p = (unsigned char *)ins_addr;
25523 + p = (unsigned char *)ktla_ktva(ins_addr);
25524 p += skip_prefix(p, &prf);
25525 p += get_opcode(p, &opcode);
25526
25527 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25528 struct prefix_bits prf;
25529 int i;
25530
25531 - p = (unsigned char *)ins_addr;
25532 + p = (unsigned char *)ktla_ktva(ins_addr);
25533 p += skip_prefix(p, &prf);
25534 p += get_opcode(p, &opcode);
25535
25536 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25537 struct prefix_bits prf;
25538 int i;
25539
25540 - p = (unsigned char *)ins_addr;
25541 + p = (unsigned char *)ktla_ktva(ins_addr);
25542 p += skip_prefix(p, &prf);
25543 p += get_opcode(p, &opcode);
25544
25545 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25546 struct prefix_bits prf;
25547 int i;
25548
25549 - p = (unsigned char *)ins_addr;
25550 + p = (unsigned char *)ktla_ktva(ins_addr);
25551 p += skip_prefix(p, &prf);
25552 p += get_opcode(p, &opcode);
25553 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25554 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25555 struct prefix_bits prf;
25556 int i;
25557
25558 - p = (unsigned char *)ins_addr;
25559 + p = (unsigned char *)ktla_ktva(ins_addr);
25560 p += skip_prefix(p, &prf);
25561 p += get_opcode(p, &opcode);
25562 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25563 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25564 index 8573b83..4f3ed7e 100644
25565 --- a/arch/x86/mm/pgtable.c
25566 +++ b/arch/x86/mm/pgtable.c
25567 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25568 list_del(&page->lru);
25569 }
25570
25571 -#define UNSHARED_PTRS_PER_PGD \
25572 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25573 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25574 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25575
25576 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25577 +{
25578 + unsigned int count = USER_PGD_PTRS;
25579
25580 + while (count--)
25581 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25582 +}
25583 +#endif
25584 +
25585 +#ifdef CONFIG_PAX_PER_CPU_PGD
25586 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25587 +{
25588 + unsigned int count = USER_PGD_PTRS;
25589 +
25590 + while (count--) {
25591 + pgd_t pgd;
25592 +
25593 +#ifdef CONFIG_X86_64
25594 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25595 +#else
25596 + pgd = *src++;
25597 +#endif
25598 +
25599 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25600 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25601 +#endif
25602 +
25603 + *dst++ = pgd;
25604 + }
25605 +
25606 +}
25607 +#endif
25608 +
25609 +#ifdef CONFIG_X86_64
25610 +#define pxd_t pud_t
25611 +#define pyd_t pgd_t
25612 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25613 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25614 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25615 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25616 +#define PYD_SIZE PGDIR_SIZE
25617 +#else
25618 +#define pxd_t pmd_t
25619 +#define pyd_t pud_t
25620 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25621 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25622 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25623 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25624 +#define PYD_SIZE PUD_SIZE
25625 +#endif
25626 +
25627 +#ifdef CONFIG_PAX_PER_CPU_PGD
25628 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25629 +static inline void pgd_dtor(pgd_t *pgd) {}
25630 +#else
25631 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25632 {
25633 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25634 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25635 pgd_list_del(pgd);
25636 spin_unlock(&pgd_lock);
25637 }
25638 +#endif
25639
25640 /*
25641 * List of all pgd's needed for non-PAE so it can invalidate entries
25642 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25643 * -- wli
25644 */
25645
25646 -#ifdef CONFIG_X86_PAE
25647 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25648 /*
25649 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25650 * updating the top-level pagetable entries to guarantee the
25651 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25652 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25653 * and initialize the kernel pmds here.
25654 */
25655 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25656 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25657
25658 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25659 {
25660 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25661 */
25662 flush_tlb_mm(mm);
25663 }
25664 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25665 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25666 #else /* !CONFIG_X86_PAE */
25667
25668 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25669 -#define PREALLOCATED_PMDS 0
25670 +#define PREALLOCATED_PXDS 0
25671
25672 #endif /* CONFIG_X86_PAE */
25673
25674 -static void free_pmds(pmd_t *pmds[])
25675 +static void free_pxds(pxd_t *pxds[])
25676 {
25677 int i;
25678
25679 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25680 - if (pmds[i])
25681 - free_page((unsigned long)pmds[i]);
25682 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25683 + if (pxds[i])
25684 + free_page((unsigned long)pxds[i]);
25685 }
25686
25687 -static int preallocate_pmds(pmd_t *pmds[])
25688 +static int preallocate_pxds(pxd_t *pxds[])
25689 {
25690 int i;
25691 bool failed = false;
25692
25693 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25694 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25695 - if (pmd == NULL)
25696 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25697 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25698 + if (pxd == NULL)
25699 failed = true;
25700 - pmds[i] = pmd;
25701 + pxds[i] = pxd;
25702 }
25703
25704 if (failed) {
25705 - free_pmds(pmds);
25706 + free_pxds(pxds);
25707 return -ENOMEM;
25708 }
25709
25710 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25711 * preallocate which never got a corresponding vma will need to be
25712 * freed manually.
25713 */
25714 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25715 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25716 {
25717 int i;
25718
25719 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25720 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25721 pgd_t pgd = pgdp[i];
25722
25723 if (pgd_val(pgd) != 0) {
25724 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25725 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25726
25727 - pgdp[i] = native_make_pgd(0);
25728 + set_pgd(pgdp + i, native_make_pgd(0));
25729
25730 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25731 - pmd_free(mm, pmd);
25732 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25733 + pxd_free(mm, pxd);
25734 }
25735 }
25736 }
25737
25738 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25739 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25740 {
25741 - pud_t *pud;
25742 + pyd_t *pyd;
25743 unsigned long addr;
25744 int i;
25745
25746 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25747 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25748 return;
25749
25750 - pud = pud_offset(pgd, 0);
25751 +#ifdef CONFIG_X86_64
25752 + pyd = pyd_offset(mm, 0L);
25753 +#else
25754 + pyd = pyd_offset(pgd, 0L);
25755 +#endif
25756
25757 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25758 - i++, pud++, addr += PUD_SIZE) {
25759 - pmd_t *pmd = pmds[i];
25760 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25761 + i++, pyd++, addr += PYD_SIZE) {
25762 + pxd_t *pxd = pxds[i];
25763
25764 if (i >= KERNEL_PGD_BOUNDARY)
25765 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25766 - sizeof(pmd_t) * PTRS_PER_PMD);
25767 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25768 + sizeof(pxd_t) * PTRS_PER_PMD);
25769
25770 - pud_populate(mm, pud, pmd);
25771 + pyd_populate(mm, pyd, pxd);
25772 }
25773 }
25774
25775 pgd_t *pgd_alloc(struct mm_struct *mm)
25776 {
25777 pgd_t *pgd;
25778 - pmd_t *pmds[PREALLOCATED_PMDS];
25779 + pxd_t *pxds[PREALLOCATED_PXDS];
25780
25781 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25782
25783 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25784
25785 mm->pgd = pgd;
25786
25787 - if (preallocate_pmds(pmds) != 0)
25788 + if (preallocate_pxds(pxds) != 0)
25789 goto out_free_pgd;
25790
25791 if (paravirt_pgd_alloc(mm) != 0)
25792 - goto out_free_pmds;
25793 + goto out_free_pxds;
25794
25795 /*
25796 * Make sure that pre-populating the pmds is atomic with
25797 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25798 spin_lock(&pgd_lock);
25799
25800 pgd_ctor(mm, pgd);
25801 - pgd_prepopulate_pmd(mm, pgd, pmds);
25802 + pgd_prepopulate_pxd(mm, pgd, pxds);
25803
25804 spin_unlock(&pgd_lock);
25805
25806 return pgd;
25807
25808 -out_free_pmds:
25809 - free_pmds(pmds);
25810 +out_free_pxds:
25811 + free_pxds(pxds);
25812 out_free_pgd:
25813 free_page((unsigned long)pgd);
25814 out:
25815 @@ -295,7 +356,7 @@ out:
25816
25817 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25818 {
25819 - pgd_mop_up_pmds(mm, pgd);
25820 + pgd_mop_up_pxds(mm, pgd);
25821 pgd_dtor(pgd);
25822 paravirt_pgd_free(mm, pgd);
25823 free_page((unsigned long)pgd);
25824 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25825 index a69bcb8..19068ab 100644
25826 --- a/arch/x86/mm/pgtable_32.c
25827 +++ b/arch/x86/mm/pgtable_32.c
25828 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25829 return;
25830 }
25831 pte = pte_offset_kernel(pmd, vaddr);
25832 +
25833 + pax_open_kernel();
25834 if (pte_val(pteval))
25835 set_pte_at(&init_mm, vaddr, pte, pteval);
25836 else
25837 pte_clear(&init_mm, vaddr, pte);
25838 + pax_close_kernel();
25839
25840 /*
25841 * It's enough to flush this one mapping.
25842 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25843 index 410531d..0f16030 100644
25844 --- a/arch/x86/mm/setup_nx.c
25845 +++ b/arch/x86/mm/setup_nx.c
25846 @@ -5,8 +5,10 @@
25847 #include <asm/pgtable.h>
25848 #include <asm/proto.h>
25849
25850 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25851 static int disable_nx __cpuinitdata;
25852
25853 +#ifndef CONFIG_PAX_PAGEEXEC
25854 /*
25855 * noexec = on|off
25856 *
25857 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25858 return 0;
25859 }
25860 early_param("noexec", noexec_setup);
25861 +#endif
25862 +
25863 +#endif
25864
25865 void __cpuinit x86_configure_nx(void)
25866 {
25867 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25868 if (cpu_has_nx && !disable_nx)
25869 __supported_pte_mask |= _PAGE_NX;
25870 else
25871 +#endif
25872 __supported_pte_mask &= ~_PAGE_NX;
25873 }
25874
25875 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25876 index 5e57e11..64874249 100644
25877 --- a/arch/x86/mm/tlb.c
25878 +++ b/arch/x86/mm/tlb.c
25879 @@ -66,7 +66,11 @@ void leave_mm(int cpu)
25880 BUG();
25881 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
25882 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
25883 +
25884 +#ifndef CONFIG_PAX_PER_CPU_PGD
25885 load_cr3(swapper_pg_dir);
25886 +#endif
25887 +
25888 }
25889 }
25890 EXPORT_SYMBOL_GPL(leave_mm);
25891 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25892 index 877b9a1..a8ecf42 100644
25893 --- a/arch/x86/net/bpf_jit.S
25894 +++ b/arch/x86/net/bpf_jit.S
25895 @@ -9,6 +9,7 @@
25896 */
25897 #include <linux/linkage.h>
25898 #include <asm/dwarf2.h>
25899 +#include <asm/alternative-asm.h>
25900
25901 /*
25902 * Calling convention :
25903 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25904 jle bpf_slow_path_word
25905 mov (SKBDATA,%rsi),%eax
25906 bswap %eax /* ntohl() */
25907 + pax_force_retaddr
25908 ret
25909
25910 sk_load_half:
25911 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25912 jle bpf_slow_path_half
25913 movzwl (SKBDATA,%rsi),%eax
25914 rol $8,%ax # ntohs()
25915 + pax_force_retaddr
25916 ret
25917
25918 sk_load_byte:
25919 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25920 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25921 jle bpf_slow_path_byte
25922 movzbl (SKBDATA,%rsi),%eax
25923 + pax_force_retaddr
25924 ret
25925
25926 /**
25927 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25928 movzbl (SKBDATA,%rsi),%ebx
25929 and $15,%bl
25930 shl $2,%bl
25931 + pax_force_retaddr
25932 ret
25933
25934 /* rsi contains offset and can be scratched */
25935 @@ -109,6 +114,7 @@ bpf_slow_path_word:
25936 js bpf_error
25937 mov -12(%rbp),%eax
25938 bswap %eax
25939 + pax_force_retaddr
25940 ret
25941
25942 bpf_slow_path_half:
25943 @@ -117,12 +123,14 @@ bpf_slow_path_half:
25944 mov -12(%rbp),%ax
25945 rol $8,%ax
25946 movzwl %ax,%eax
25947 + pax_force_retaddr
25948 ret
25949
25950 bpf_slow_path_byte:
25951 bpf_slow_path_common(1)
25952 js bpf_error
25953 movzbl -12(%rbp),%eax
25954 + pax_force_retaddr
25955 ret
25956
25957 bpf_slow_path_byte_msh:
25958 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25959 and $15,%al
25960 shl $2,%al
25961 xchg %eax,%ebx
25962 + pax_force_retaddr
25963 ret
25964
25965 #define sk_negative_common(SIZE) \
25966 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25967 sk_negative_common(4)
25968 mov (%rax), %eax
25969 bswap %eax
25970 + pax_force_retaddr
25971 ret
25972
25973 bpf_slow_path_half_neg:
25974 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25975 mov (%rax),%ax
25976 rol $8,%ax
25977 movzwl %ax,%eax
25978 + pax_force_retaddr
25979 ret
25980
25981 bpf_slow_path_byte_neg:
25982 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
25983 .globl sk_load_byte_negative_offset
25984 sk_negative_common(1)
25985 movzbl (%rax), %eax
25986 + pax_force_retaddr
25987 ret
25988
25989 bpf_slow_path_byte_msh_neg:
25990 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
25991 and $15,%al
25992 shl $2,%al
25993 xchg %eax,%ebx
25994 + pax_force_retaddr
25995 ret
25996
25997 bpf_error:
25998 @@ -197,4 +210,5 @@ bpf_error:
25999 xor %eax,%eax
26000 mov -8(%rbp),%rbx
26001 leaveq
26002 + pax_force_retaddr
26003 ret
26004 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
26005 index 0597f95..a12c36e 100644
26006 --- a/arch/x86/net/bpf_jit_comp.c
26007 +++ b/arch/x86/net/bpf_jit_comp.c
26008 @@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
26009 set_fs(old_fs);
26010 }
26011
26012 +struct bpf_jit_work {
26013 + struct work_struct work;
26014 + void *image;
26015 +};
26016 +
26017 #define CHOOSE_LOAD_FUNC(K, func) \
26018 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
26019
26020 @@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26021 if (addrs == NULL)
26022 return;
26023
26024 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26025 + if (!fp->work)
26026 + goto out;
26027 +
26028 /* Before first pass, make a rough estimation of addrs[]
26029 * each bpf instruction is translated to less than 64 bytes
26030 */
26031 @@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26032 break;
26033 default:
26034 /* hmm, too complex filter, give up with jit compiler */
26035 - goto out;
26036 + goto error;
26037 }
26038 ilen = prog - temp;
26039 if (image) {
26040 if (unlikely(proglen + ilen > oldproglen)) {
26041 pr_err("bpb_jit_compile fatal error\n");
26042 - kfree(addrs);
26043 - module_free(NULL, image);
26044 - return;
26045 + module_free_exec(NULL, image);
26046 + goto error;
26047 }
26048 + pax_open_kernel();
26049 memcpy(image + proglen, temp, ilen);
26050 + pax_close_kernel();
26051 }
26052 proglen += ilen;
26053 addrs[i] = proglen;
26054 @@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26055 break;
26056 }
26057 if (proglen == oldproglen) {
26058 - image = module_alloc(max_t(unsigned int,
26059 - proglen,
26060 - sizeof(struct work_struct)));
26061 + image = module_alloc_exec(proglen);
26062 if (!image)
26063 - goto out;
26064 + goto error;
26065 }
26066 oldproglen = proglen;
26067 }
26068 @@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26069 bpf_flush_icache(image, image + proglen);
26070
26071 fp->bpf_func = (void *)image;
26072 - }
26073 + } else
26074 +error:
26075 + kfree(fp->work);
26076 +
26077 out:
26078 kfree(addrs);
26079 return;
26080 @@ -648,18 +659,20 @@ out:
26081
26082 static void jit_free_defer(struct work_struct *arg)
26083 {
26084 - module_free(NULL, arg);
26085 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26086 + kfree(arg);
26087 }
26088
26089 /* run from softirq, we must use a work_struct to call
26090 - * module_free() from process context
26091 + * module_free_exec() from process context
26092 */
26093 void bpf_jit_free(struct sk_filter *fp)
26094 {
26095 if (fp->bpf_func != sk_run_filter) {
26096 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
26097 + struct work_struct *work = &fp->work->work;
26098
26099 INIT_WORK(work, jit_free_defer);
26100 + fp->work->image = fp->bpf_func;
26101 schedule_work(work);
26102 }
26103 }
26104 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26105 index d6aa6e8..266395a 100644
26106 --- a/arch/x86/oprofile/backtrace.c
26107 +++ b/arch/x86/oprofile/backtrace.c
26108 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26109 struct stack_frame_ia32 *fp;
26110 unsigned long bytes;
26111
26112 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26113 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26114 if (bytes != sizeof(bufhead))
26115 return NULL;
26116
26117 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26118 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26119
26120 oprofile_add_trace(bufhead[0].return_address);
26121
26122 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26123 struct stack_frame bufhead[2];
26124 unsigned long bytes;
26125
26126 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26127 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26128 if (bytes != sizeof(bufhead))
26129 return NULL;
26130
26131 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26132 {
26133 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26134
26135 - if (!user_mode_vm(regs)) {
26136 + if (!user_mode(regs)) {
26137 unsigned long stack = kernel_stack_pointer(regs);
26138 if (depth)
26139 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26140 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26141 index 140942f..8a5cc55 100644
26142 --- a/arch/x86/pci/mrst.c
26143 +++ b/arch/x86/pci/mrst.c
26144 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26145 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26146 pci_mmcfg_late_init();
26147 pcibios_enable_irq = mrst_pci_irq_enable;
26148 - pci_root_ops = pci_mrst_ops;
26149 + pax_open_kernel();
26150 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26151 + pax_close_kernel();
26152 pci_soc_mode = 1;
26153 /* Continue with standard init */
26154 return 1;
26155 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26156 index da8fe05..7ee6704 100644
26157 --- a/arch/x86/pci/pcbios.c
26158 +++ b/arch/x86/pci/pcbios.c
26159 @@ -79,50 +79,93 @@ union bios32 {
26160 static struct {
26161 unsigned long address;
26162 unsigned short segment;
26163 -} bios32_indirect = { 0, __KERNEL_CS };
26164 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26165
26166 /*
26167 * Returns the entry point for the given service, NULL on error
26168 */
26169
26170 -static unsigned long bios32_service(unsigned long service)
26171 +static unsigned long __devinit bios32_service(unsigned long service)
26172 {
26173 unsigned char return_code; /* %al */
26174 unsigned long address; /* %ebx */
26175 unsigned long length; /* %ecx */
26176 unsigned long entry; /* %edx */
26177 unsigned long flags;
26178 + struct desc_struct d, *gdt;
26179
26180 local_irq_save(flags);
26181 - __asm__("lcall *(%%edi); cld"
26182 +
26183 + gdt = get_cpu_gdt_table(smp_processor_id());
26184 +
26185 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26186 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26187 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26188 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26189 +
26190 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26191 : "=a" (return_code),
26192 "=b" (address),
26193 "=c" (length),
26194 "=d" (entry)
26195 : "0" (service),
26196 "1" (0),
26197 - "D" (&bios32_indirect));
26198 + "D" (&bios32_indirect),
26199 + "r"(__PCIBIOS_DS)
26200 + : "memory");
26201 +
26202 + pax_open_kernel();
26203 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26204 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26205 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26206 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26207 + pax_close_kernel();
26208 +
26209 local_irq_restore(flags);
26210
26211 switch (return_code) {
26212 - case 0:
26213 - return address + entry;
26214 - case 0x80: /* Not present */
26215 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26216 - return 0;
26217 - default: /* Shouldn't happen */
26218 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26219 - service, return_code);
26220 + case 0: {
26221 + int cpu;
26222 + unsigned char flags;
26223 +
26224 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26225 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26226 + printk(KERN_WARNING "bios32_service: not valid\n");
26227 return 0;
26228 + }
26229 + address = address + PAGE_OFFSET;
26230 + length += 16UL; /* some BIOSs underreport this... */
26231 + flags = 4;
26232 + if (length >= 64*1024*1024) {
26233 + length >>= PAGE_SHIFT;
26234 + flags |= 8;
26235 + }
26236 +
26237 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26238 + gdt = get_cpu_gdt_table(cpu);
26239 + pack_descriptor(&d, address, length, 0x9b, flags);
26240 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26241 + pack_descriptor(&d, address, length, 0x93, flags);
26242 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26243 + }
26244 + return entry;
26245 + }
26246 + case 0x80: /* Not present */
26247 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26248 + return 0;
26249 + default: /* Shouldn't happen */
26250 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26251 + service, return_code);
26252 + return 0;
26253 }
26254 }
26255
26256 static struct {
26257 unsigned long address;
26258 unsigned short segment;
26259 -} pci_indirect = { 0, __KERNEL_CS };
26260 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26261
26262 -static int pci_bios_present;
26263 +static int pci_bios_present __read_only;
26264
26265 static int __devinit check_pcibios(void)
26266 {
26267 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26268 unsigned long flags, pcibios_entry;
26269
26270 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26271 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26272 + pci_indirect.address = pcibios_entry;
26273
26274 local_irq_save(flags);
26275 - __asm__(
26276 - "lcall *(%%edi); cld\n\t"
26277 + __asm__("movw %w6, %%ds\n\t"
26278 + "lcall *%%ss:(%%edi); cld\n\t"
26279 + "push %%ss\n\t"
26280 + "pop %%ds\n\t"
26281 "jc 1f\n\t"
26282 "xor %%ah, %%ah\n"
26283 "1:"
26284 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26285 "=b" (ebx),
26286 "=c" (ecx)
26287 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26288 - "D" (&pci_indirect)
26289 + "D" (&pci_indirect),
26290 + "r" (__PCIBIOS_DS)
26291 : "memory");
26292 local_irq_restore(flags);
26293
26294 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26295
26296 switch (len) {
26297 case 1:
26298 - __asm__("lcall *(%%esi); cld\n\t"
26299 + __asm__("movw %w6, %%ds\n\t"
26300 + "lcall *%%ss:(%%esi); cld\n\t"
26301 + "push %%ss\n\t"
26302 + "pop %%ds\n\t"
26303 "jc 1f\n\t"
26304 "xor %%ah, %%ah\n"
26305 "1:"
26306 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26307 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26308 "b" (bx),
26309 "D" ((long)reg),
26310 - "S" (&pci_indirect));
26311 + "S" (&pci_indirect),
26312 + "r" (__PCIBIOS_DS));
26313 /*
26314 * Zero-extend the result beyond 8 bits, do not trust the
26315 * BIOS having done it:
26316 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26317 *value &= 0xff;
26318 break;
26319 case 2:
26320 - __asm__("lcall *(%%esi); cld\n\t"
26321 + __asm__("movw %w6, %%ds\n\t"
26322 + "lcall *%%ss:(%%esi); cld\n\t"
26323 + "push %%ss\n\t"
26324 + "pop %%ds\n\t"
26325 "jc 1f\n\t"
26326 "xor %%ah, %%ah\n"
26327 "1:"
26328 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26329 : "1" (PCIBIOS_READ_CONFIG_WORD),
26330 "b" (bx),
26331 "D" ((long)reg),
26332 - "S" (&pci_indirect));
26333 + "S" (&pci_indirect),
26334 + "r" (__PCIBIOS_DS));
26335 /*
26336 * Zero-extend the result beyond 16 bits, do not trust the
26337 * BIOS having done it:
26338 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26339 *value &= 0xffff;
26340 break;
26341 case 4:
26342 - __asm__("lcall *(%%esi); cld\n\t"
26343 + __asm__("movw %w6, %%ds\n\t"
26344 + "lcall *%%ss:(%%esi); cld\n\t"
26345 + "push %%ss\n\t"
26346 + "pop %%ds\n\t"
26347 "jc 1f\n\t"
26348 "xor %%ah, %%ah\n"
26349 "1:"
26350 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26351 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26352 "b" (bx),
26353 "D" ((long)reg),
26354 - "S" (&pci_indirect));
26355 + "S" (&pci_indirect),
26356 + "r" (__PCIBIOS_DS));
26357 break;
26358 }
26359
26360 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26361
26362 switch (len) {
26363 case 1:
26364 - __asm__("lcall *(%%esi); cld\n\t"
26365 + __asm__("movw %w6, %%ds\n\t"
26366 + "lcall *%%ss:(%%esi); cld\n\t"
26367 + "push %%ss\n\t"
26368 + "pop %%ds\n\t"
26369 "jc 1f\n\t"
26370 "xor %%ah, %%ah\n"
26371 "1:"
26372 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26373 "c" (value),
26374 "b" (bx),
26375 "D" ((long)reg),
26376 - "S" (&pci_indirect));
26377 + "S" (&pci_indirect),
26378 + "r" (__PCIBIOS_DS));
26379 break;
26380 case 2:
26381 - __asm__("lcall *(%%esi); cld\n\t"
26382 + __asm__("movw %w6, %%ds\n\t"
26383 + "lcall *%%ss:(%%esi); cld\n\t"
26384 + "push %%ss\n\t"
26385 + "pop %%ds\n\t"
26386 "jc 1f\n\t"
26387 "xor %%ah, %%ah\n"
26388 "1:"
26389 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26390 "c" (value),
26391 "b" (bx),
26392 "D" ((long)reg),
26393 - "S" (&pci_indirect));
26394 + "S" (&pci_indirect),
26395 + "r" (__PCIBIOS_DS));
26396 break;
26397 case 4:
26398 - __asm__("lcall *(%%esi); cld\n\t"
26399 + __asm__("movw %w6, %%ds\n\t"
26400 + "lcall *%%ss:(%%esi); cld\n\t"
26401 + "push %%ss\n\t"
26402 + "pop %%ds\n\t"
26403 "jc 1f\n\t"
26404 "xor %%ah, %%ah\n"
26405 "1:"
26406 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26407 "c" (value),
26408 "b" (bx),
26409 "D" ((long)reg),
26410 - "S" (&pci_indirect));
26411 + "S" (&pci_indirect),
26412 + "r" (__PCIBIOS_DS));
26413 break;
26414 }
26415
26416 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26417
26418 DBG("PCI: Fetching IRQ routing table... ");
26419 __asm__("push %%es\n\t"
26420 + "movw %w8, %%ds\n\t"
26421 "push %%ds\n\t"
26422 "pop %%es\n\t"
26423 - "lcall *(%%esi); cld\n\t"
26424 + "lcall *%%ss:(%%esi); cld\n\t"
26425 "pop %%es\n\t"
26426 + "push %%ss\n\t"
26427 + "pop %%ds\n"
26428 "jc 1f\n\t"
26429 "xor %%ah, %%ah\n"
26430 "1:"
26431 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26432 "1" (0),
26433 "D" ((long) &opt),
26434 "S" (&pci_indirect),
26435 - "m" (opt)
26436 + "m" (opt),
26437 + "r" (__PCIBIOS_DS)
26438 : "memory");
26439 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26440 if (ret & 0xff00)
26441 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26442 {
26443 int ret;
26444
26445 - __asm__("lcall *(%%esi); cld\n\t"
26446 + __asm__("movw %w5, %%ds\n\t"
26447 + "lcall *%%ss:(%%esi); cld\n\t"
26448 + "push %%ss\n\t"
26449 + "pop %%ds\n"
26450 "jc 1f\n\t"
26451 "xor %%ah, %%ah\n"
26452 "1:"
26453 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26454 : "0" (PCIBIOS_SET_PCI_HW_INT),
26455 "b" ((dev->bus->number << 8) | dev->devfn),
26456 "c" ((irq << 8) | (pin + 10)),
26457 - "S" (&pci_indirect));
26458 + "S" (&pci_indirect),
26459 + "r" (__PCIBIOS_DS));
26460 return !(ret & 0xff00);
26461 }
26462 EXPORT_SYMBOL(pcibios_set_irq_routing);
26463 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26464 index 40e4469..1ab536e 100644
26465 --- a/arch/x86/platform/efi/efi_32.c
26466 +++ b/arch/x86/platform/efi/efi_32.c
26467 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26468 {
26469 struct desc_ptr gdt_descr;
26470
26471 +#ifdef CONFIG_PAX_KERNEXEC
26472 + struct desc_struct d;
26473 +#endif
26474 +
26475 local_irq_save(efi_rt_eflags);
26476
26477 load_cr3(initial_page_table);
26478 __flush_tlb_all();
26479
26480 +#ifdef CONFIG_PAX_KERNEXEC
26481 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26482 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26483 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26484 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26485 +#endif
26486 +
26487 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26488 gdt_descr.size = GDT_SIZE - 1;
26489 load_gdt(&gdt_descr);
26490 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26491 {
26492 struct desc_ptr gdt_descr;
26493
26494 +#ifdef CONFIG_PAX_KERNEXEC
26495 + struct desc_struct d;
26496 +
26497 + memset(&d, 0, sizeof d);
26498 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26499 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26500 +#endif
26501 +
26502 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26503 gdt_descr.size = GDT_SIZE - 1;
26504 load_gdt(&gdt_descr);
26505 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26506 index fbe66e6..eae5e38 100644
26507 --- a/arch/x86/platform/efi/efi_stub_32.S
26508 +++ b/arch/x86/platform/efi/efi_stub_32.S
26509 @@ -6,7 +6,9 @@
26510 */
26511
26512 #include <linux/linkage.h>
26513 +#include <linux/init.h>
26514 #include <asm/page_types.h>
26515 +#include <asm/segment.h>
26516
26517 /*
26518 * efi_call_phys(void *, ...) is a function with variable parameters.
26519 @@ -20,7 +22,7 @@
26520 * service functions will comply with gcc calling convention, too.
26521 */
26522
26523 -.text
26524 +__INIT
26525 ENTRY(efi_call_phys)
26526 /*
26527 * 0. The function can only be called in Linux kernel. So CS has been
26528 @@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
26529 * The mapping of lower virtual memory has been created in prelog and
26530 * epilog.
26531 */
26532 - movl $1f, %edx
26533 - subl $__PAGE_OFFSET, %edx
26534 - jmp *%edx
26535 +#ifdef CONFIG_PAX_KERNEXEC
26536 + movl $(__KERNEXEC_EFI_DS), %edx
26537 + mov %edx, %ds
26538 + mov %edx, %es
26539 + mov %edx, %ss
26540 + addl $2f,(1f)
26541 + ljmp *(1f)
26542 +
26543 +__INITDATA
26544 +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
26545 +.previous
26546 +
26547 +2:
26548 + subl $2b,(1b)
26549 +#else
26550 + jmp 1f-__PAGE_OFFSET
26551 1:
26552 +#endif
26553
26554 /*
26555 * 2. Now on the top of stack is the return
26556 @@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
26557 * parameter 2, ..., param n. To make things easy, we save the return
26558 * address of efi_call_phys in a global variable.
26559 */
26560 - popl %edx
26561 - movl %edx, saved_return_addr
26562 - /* get the function pointer into ECX*/
26563 - popl %ecx
26564 - movl %ecx, efi_rt_function_ptr
26565 - movl $2f, %edx
26566 - subl $__PAGE_OFFSET, %edx
26567 - pushl %edx
26568 + popl (saved_return_addr)
26569 + popl (efi_rt_function_ptr)
26570
26571 /*
26572 * 3. Clear PG bit in %CR0.
26573 @@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
26574 /*
26575 * 5. Call the physical function.
26576 */
26577 - jmp *%ecx
26578 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26579
26580 -2:
26581 /*
26582 * 6. After EFI runtime service returns, control will return to
26583 * following instruction. We'd better readjust stack pointer first.
26584 @@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
26585 movl %cr0, %edx
26586 orl $0x80000000, %edx
26587 movl %edx, %cr0
26588 - jmp 1f
26589 -1:
26590 +
26591 /*
26592 * 8. Now restore the virtual mode from flat mode by
26593 * adding EIP with PAGE_OFFSET.
26594 */
26595 - movl $1f, %edx
26596 - jmp *%edx
26597 +#ifdef CONFIG_PAX_KERNEXEC
26598 + movl $(__KERNEL_DS), %edx
26599 + mov %edx, %ds
26600 + mov %edx, %es
26601 + mov %edx, %ss
26602 + ljmp $(__KERNEL_CS),$1f
26603 +#else
26604 + jmp 1f+__PAGE_OFFSET
26605 +#endif
26606 1:
26607
26608 /*
26609 * 9. Balance the stack. And because EAX contain the return value,
26610 * we'd better not clobber it.
26611 */
26612 - leal efi_rt_function_ptr, %edx
26613 - movl (%edx), %ecx
26614 - pushl %ecx
26615 + pushl (efi_rt_function_ptr)
26616
26617 /*
26618 - * 10. Push the saved return address onto the stack and return.
26619 + * 10. Return to the saved return address.
26620 */
26621 - leal saved_return_addr, %edx
26622 - movl (%edx), %ecx
26623 - pushl %ecx
26624 - ret
26625 + jmpl *(saved_return_addr)
26626 ENDPROC(efi_call_phys)
26627 .previous
26628
26629 -.data
26630 +__INITDATA
26631 saved_return_addr:
26632 .long 0
26633 efi_rt_function_ptr:
26634 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26635 index 4c07cca..2c8427d 100644
26636 --- a/arch/x86/platform/efi/efi_stub_64.S
26637 +++ b/arch/x86/platform/efi/efi_stub_64.S
26638 @@ -7,6 +7,7 @@
26639 */
26640
26641 #include <linux/linkage.h>
26642 +#include <asm/alternative-asm.h>
26643
26644 #define SAVE_XMM \
26645 mov %rsp, %rax; \
26646 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26647 call *%rdi
26648 addq $32, %rsp
26649 RESTORE_XMM
26650 + pax_force_retaddr 0, 1
26651 ret
26652 ENDPROC(efi_call0)
26653
26654 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26655 call *%rdi
26656 addq $32, %rsp
26657 RESTORE_XMM
26658 + pax_force_retaddr 0, 1
26659 ret
26660 ENDPROC(efi_call1)
26661
26662 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26663 call *%rdi
26664 addq $32, %rsp
26665 RESTORE_XMM
26666 + pax_force_retaddr 0, 1
26667 ret
26668 ENDPROC(efi_call2)
26669
26670 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26671 call *%rdi
26672 addq $32, %rsp
26673 RESTORE_XMM
26674 + pax_force_retaddr 0, 1
26675 ret
26676 ENDPROC(efi_call3)
26677
26678 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26679 call *%rdi
26680 addq $32, %rsp
26681 RESTORE_XMM
26682 + pax_force_retaddr 0, 1
26683 ret
26684 ENDPROC(efi_call4)
26685
26686 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26687 call *%rdi
26688 addq $48, %rsp
26689 RESTORE_XMM
26690 + pax_force_retaddr 0, 1
26691 ret
26692 ENDPROC(efi_call5)
26693
26694 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26695 call *%rdi
26696 addq $48, %rsp
26697 RESTORE_XMM
26698 + pax_force_retaddr 0, 1
26699 ret
26700 ENDPROC(efi_call6)
26701 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26702 index fd41a92..9c33628 100644
26703 --- a/arch/x86/platform/mrst/mrst.c
26704 +++ b/arch/x86/platform/mrst/mrst.c
26705 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26706 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26707 int sfi_mrtc_num;
26708
26709 -static void mrst_power_off(void)
26710 +static __noreturn void mrst_power_off(void)
26711 {
26712 + BUG();
26713 }
26714
26715 -static void mrst_reboot(void)
26716 +static __noreturn void mrst_reboot(void)
26717 {
26718 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26719 + BUG();
26720 }
26721
26722 /* parse all the mtimer info to a static mtimer array */
26723 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26724 index 218cdb1..fd55c08 100644
26725 --- a/arch/x86/power/cpu.c
26726 +++ b/arch/x86/power/cpu.c
26727 @@ -132,7 +132,7 @@ static void do_fpu_end(void)
26728 static void fix_processor_context(void)
26729 {
26730 int cpu = smp_processor_id();
26731 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26732 + struct tss_struct *t = init_tss + cpu;
26733
26734 set_tss_desc(cpu, t); /*
26735 * This just modifies memory; should not be
26736 @@ -142,7 +142,9 @@ static void fix_processor_context(void)
26737 */
26738
26739 #ifdef CONFIG_X86_64
26740 + pax_open_kernel();
26741 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26742 + pax_close_kernel();
26743
26744 syscall_init(); /* This sets MSR_*STAR and related */
26745 #endif
26746 diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
26747 index cbca565..bae7133 100644
26748 --- a/arch/x86/realmode/init.c
26749 +++ b/arch/x86/realmode/init.c
26750 @@ -62,7 +62,13 @@ void __init setup_real_mode(void)
26751 __va(real_mode_header->trampoline_header);
26752
26753 #ifdef CONFIG_X86_32
26754 - trampoline_header->start = __pa(startup_32_smp);
26755 + trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
26756 +
26757 +#ifdef CONFIG_PAX_KERNEXEC
26758 + trampoline_header->start -= LOAD_PHYSICAL_ADDR;
26759 +#endif
26760 +
26761 + trampoline_header->boot_cs = __BOOT_CS;
26762 trampoline_header->gdt_limit = __BOOT_DS + 7;
26763 trampoline_header->gdt_base = __pa(boot_gdt);
26764 #else
26765 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
26766 index 5b84a2d..be46798 100644
26767 --- a/arch/x86/realmode/rm/Makefile
26768 +++ b/arch/x86/realmode/rm/Makefile
26769 @@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
26770 $(call cc-option, -fno-unit-at-a-time)) \
26771 $(call cc-option, -fno-stack-protector) \
26772 $(call cc-option, -mpreferred-stack-boundary=2)
26773 +ifdef CONSTIFY_PLUGIN
26774 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
26775 +endif
26776 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
26777 GCOV_PROFILE := n
26778 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
26779 index c1b2791..f9e31c7 100644
26780 --- a/arch/x86/realmode/rm/trampoline_32.S
26781 +++ b/arch/x86/realmode/rm/trampoline_32.S
26782 @@ -25,6 +25,12 @@
26783 #include <asm/page_types.h>
26784 #include "realmode.h"
26785
26786 +#ifdef CONFIG_PAX_KERNEXEC
26787 +#define ta(X) (X)
26788 +#else
26789 +#define ta(X) (pa_ ## X)
26790 +#endif
26791 +
26792 .text
26793 .code16
26794
26795 @@ -39,8 +45,6 @@ ENTRY(trampoline_start)
26796
26797 cli # We should be safe anyway
26798
26799 - movl tr_start, %eax # where we need to go
26800 -
26801 movl $0xA5A5A5A5, trampoline_status
26802 # write marker for master knows we're running
26803
26804 @@ -56,7 +60,7 @@ ENTRY(trampoline_start)
26805 movw $1, %dx # protected mode (PE) bit
26806 lmsw %dx # into protected mode
26807
26808 - ljmpl $__BOOT_CS, $pa_startup_32
26809 + ljmpl *(trampoline_header)
26810
26811 .section ".text32","ax"
26812 .code32
26813 @@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
26814 .balign 8
26815 GLOBAL(trampoline_header)
26816 tr_start: .space 4
26817 - tr_gdt_pad: .space 2
26818 + tr_boot_cs: .space 2
26819 tr_gdt: .space 6
26820 END(trampoline_header)
26821
26822 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
26823 index bb360dc..3e5945f 100644
26824 --- a/arch/x86/realmode/rm/trampoline_64.S
26825 +++ b/arch/x86/realmode/rm/trampoline_64.S
26826 @@ -107,7 +107,7 @@ ENTRY(startup_32)
26827 wrmsr
26828
26829 # Enable paging and in turn activate Long Mode
26830 - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
26831 + movl $(X86_CR0_PG | X86_CR0_PE), %eax
26832 movl %eax, %cr0
26833
26834 /*
26835 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26836 index 5a1847d..deccb30 100644
26837 --- a/arch/x86/tools/relocs.c
26838 +++ b/arch/x86/tools/relocs.c
26839 @@ -12,10 +12,13 @@
26840 #include <regex.h>
26841 #include <tools/le_byteshift.h>
26842
26843 +#include "../../../include/generated/autoconf.h"
26844 +
26845 static void die(char *fmt, ...);
26846
26847 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26848 static Elf32_Ehdr ehdr;
26849 +static Elf32_Phdr *phdr;
26850 static unsigned long reloc_count, reloc_idx;
26851 static unsigned long *relocs;
26852 static unsigned long reloc16_count, reloc16_idx;
26853 @@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
26854 }
26855 }
26856
26857 +static void read_phdrs(FILE *fp)
26858 +{
26859 + unsigned int i;
26860 +
26861 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26862 + if (!phdr) {
26863 + die("Unable to allocate %d program headers\n",
26864 + ehdr.e_phnum);
26865 + }
26866 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26867 + die("Seek to %d failed: %s\n",
26868 + ehdr.e_phoff, strerror(errno));
26869 + }
26870 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26871 + die("Cannot read ELF program headers: %s\n",
26872 + strerror(errno));
26873 + }
26874 + for(i = 0; i < ehdr.e_phnum; i++) {
26875 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26876 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26877 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26878 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26879 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26880 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26881 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26882 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26883 + }
26884 +
26885 +}
26886 +
26887 static void read_shdrs(FILE *fp)
26888 {
26889 - int i;
26890 + unsigned int i;
26891 Elf32_Shdr shdr;
26892
26893 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26894 @@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
26895
26896 static void read_strtabs(FILE *fp)
26897 {
26898 - int i;
26899 + unsigned int i;
26900 for (i = 0; i < ehdr.e_shnum; i++) {
26901 struct section *sec = &secs[i];
26902 if (sec->shdr.sh_type != SHT_STRTAB) {
26903 @@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
26904
26905 static void read_symtabs(FILE *fp)
26906 {
26907 - int i,j;
26908 + unsigned int i,j;
26909 for (i = 0; i < ehdr.e_shnum; i++) {
26910 struct section *sec = &secs[i];
26911 if (sec->shdr.sh_type != SHT_SYMTAB) {
26912 @@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
26913 }
26914
26915
26916 -static void read_relocs(FILE *fp)
26917 +static void read_relocs(FILE *fp, int use_real_mode)
26918 {
26919 - int i,j;
26920 + unsigned int i,j;
26921 + uint32_t base;
26922 +
26923 for (i = 0; i < ehdr.e_shnum; i++) {
26924 struct section *sec = &secs[i];
26925 if (sec->shdr.sh_type != SHT_REL) {
26926 @@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
26927 die("Cannot read symbol table: %s\n",
26928 strerror(errno));
26929 }
26930 + base = 0;
26931 +
26932 +#ifdef CONFIG_X86_32
26933 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
26934 + if (phdr[j].p_type != PT_LOAD )
26935 + continue;
26936 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26937 + continue;
26938 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26939 + break;
26940 + }
26941 +#endif
26942 +
26943 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26944 Elf32_Rel *rel = &sec->reltab[j];
26945 - rel->r_offset = elf32_to_cpu(rel->r_offset);
26946 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26947 rel->r_info = elf32_to_cpu(rel->r_info);
26948 }
26949 }
26950 @@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
26951
26952 static void print_absolute_symbols(void)
26953 {
26954 - int i;
26955 + unsigned int i;
26956 printf("Absolute symbols\n");
26957 printf(" Num: Value Size Type Bind Visibility Name\n");
26958 for (i = 0; i < ehdr.e_shnum; i++) {
26959 struct section *sec = &secs[i];
26960 char *sym_strtab;
26961 - int j;
26962 + unsigned int j;
26963
26964 if (sec->shdr.sh_type != SHT_SYMTAB) {
26965 continue;
26966 @@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
26967
26968 static void print_absolute_relocs(void)
26969 {
26970 - int i, printed = 0;
26971 + unsigned int i, printed = 0;
26972
26973 for (i = 0; i < ehdr.e_shnum; i++) {
26974 struct section *sec = &secs[i];
26975 struct section *sec_applies, *sec_symtab;
26976 char *sym_strtab;
26977 Elf32_Sym *sh_symtab;
26978 - int j;
26979 + unsigned int j;
26980 if (sec->shdr.sh_type != SHT_REL) {
26981 continue;
26982 }
26983 @@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
26984 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26985 int use_real_mode)
26986 {
26987 - int i;
26988 + unsigned int i;
26989 /* Walk through the relocations */
26990 for (i = 0; i < ehdr.e_shnum; i++) {
26991 char *sym_strtab;
26992 Elf32_Sym *sh_symtab;
26993 struct section *sec_applies, *sec_symtab;
26994 - int j;
26995 + unsigned int j;
26996 struct section *sec = &secs[i];
26997
26998 if (sec->shdr.sh_type != SHT_REL) {
26999 @@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
27000 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
27001 r_type = ELF32_R_TYPE(rel->r_info);
27002
27003 + if (!use_real_mode) {
27004 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
27005 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
27006 + continue;
27007 +
27008 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
27009 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
27010 + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
27011 + continue;
27012 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
27013 + continue;
27014 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
27015 + continue;
27016 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
27017 + continue;
27018 +#endif
27019 + }
27020 +
27021 shn_abs = sym->st_shndx == SHN_ABS;
27022
27023 switch (r_type) {
27024 @@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
27025
27026 static void emit_relocs(int as_text, int use_real_mode)
27027 {
27028 - int i;
27029 + unsigned int i;
27030 /* Count how many relocations I have and allocate space for them. */
27031 reloc_count = 0;
27032 walk_relocs(count_reloc, use_real_mode);
27033 @@ -808,10 +874,11 @@ int main(int argc, char **argv)
27034 fname, strerror(errno));
27035 }
27036 read_ehdr(fp);
27037 + read_phdrs(fp);
27038 read_shdrs(fp);
27039 read_strtabs(fp);
27040 read_symtabs(fp);
27041 - read_relocs(fp);
27042 + read_relocs(fp, use_real_mode);
27043 if (show_absolute_syms) {
27044 print_absolute_symbols();
27045 return 0;
27046 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
27047 index fd14be1..e3c79c0 100644
27048 --- a/arch/x86/vdso/Makefile
27049 +++ b/arch/x86/vdso/Makefile
27050 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
27051 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
27052 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
27053
27054 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27055 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27056 GCOV_PROFILE := n
27057
27058 #
27059 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
27060 index 66e6d93..587f435 100644
27061 --- a/arch/x86/vdso/vdso32-setup.c
27062 +++ b/arch/x86/vdso/vdso32-setup.c
27063 @@ -25,6 +25,7 @@
27064 #include <asm/tlbflush.h>
27065 #include <asm/vdso.h>
27066 #include <asm/proto.h>
27067 +#include <asm/mman.h>
27068
27069 enum {
27070 VDSO_DISABLED = 0,
27071 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
27072 void enable_sep_cpu(void)
27073 {
27074 int cpu = get_cpu();
27075 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
27076 + struct tss_struct *tss = init_tss + cpu;
27077
27078 if (!boot_cpu_has(X86_FEATURE_SEP)) {
27079 put_cpu();
27080 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
27081 gate_vma.vm_start = FIXADDR_USER_START;
27082 gate_vma.vm_end = FIXADDR_USER_END;
27083 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
27084 - gate_vma.vm_page_prot = __P101;
27085 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
27086
27087 return 0;
27088 }
27089 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27090 if (compat)
27091 addr = VDSO_HIGH_BASE;
27092 else {
27093 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27094 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27095 if (IS_ERR_VALUE(addr)) {
27096 ret = addr;
27097 goto up_fail;
27098 }
27099 }
27100
27101 - current->mm->context.vdso = (void *)addr;
27102 + current->mm->context.vdso = addr;
27103
27104 if (compat_uses_vma || !compat) {
27105 /*
27106 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27107 }
27108
27109 current_thread_info()->sysenter_return =
27110 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27111 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27112
27113 up_fail:
27114 if (ret)
27115 - current->mm->context.vdso = NULL;
27116 + current->mm->context.vdso = 0;
27117
27118 up_write(&mm->mmap_sem);
27119
27120 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
27121
27122 const char *arch_vma_name(struct vm_area_struct *vma)
27123 {
27124 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27125 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27126 return "[vdso]";
27127 +
27128 +#ifdef CONFIG_PAX_SEGMEXEC
27129 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27130 + return "[vdso]";
27131 +#endif
27132 +
27133 return NULL;
27134 }
27135
27136 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27137 * Check to see if the corresponding task was created in compat vdso
27138 * mode.
27139 */
27140 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27141 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27142 return &gate_vma;
27143 return NULL;
27144 }
27145 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27146 index 00aaf04..4a26505 100644
27147 --- a/arch/x86/vdso/vma.c
27148 +++ b/arch/x86/vdso/vma.c
27149 @@ -16,8 +16,6 @@
27150 #include <asm/vdso.h>
27151 #include <asm/page.h>
27152
27153 -unsigned int __read_mostly vdso_enabled = 1;
27154 -
27155 extern char vdso_start[], vdso_end[];
27156 extern unsigned short vdso_sync_cpuid;
27157
27158 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27159 * unaligned here as a result of stack start randomization.
27160 */
27161 addr = PAGE_ALIGN(addr);
27162 - addr = align_addr(addr, NULL, ALIGN_VDSO);
27163
27164 return addr;
27165 }
27166 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
27167 unsigned size)
27168 {
27169 struct mm_struct *mm = current->mm;
27170 - unsigned long addr;
27171 + unsigned long addr = 0;
27172 int ret;
27173
27174 - if (!vdso_enabled)
27175 - return 0;
27176 -
27177 down_write(&mm->mmap_sem);
27178 +
27179 +#ifdef CONFIG_PAX_RANDMMAP
27180 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27181 +#endif
27182 +
27183 addr = vdso_addr(mm->start_stack, size);
27184 + addr = align_addr(addr, NULL, ALIGN_VDSO);
27185 addr = get_unmapped_area(NULL, addr, size, 0, 0);
27186 if (IS_ERR_VALUE(addr)) {
27187 ret = addr;
27188 goto up_fail;
27189 }
27190
27191 - current->mm->context.vdso = (void *)addr;
27192 + mm->context.vdso = addr;
27193
27194 ret = install_special_mapping(mm, addr, size,
27195 VM_READ|VM_EXEC|
27196 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27197 pages);
27198 - if (ret) {
27199 - current->mm->context.vdso = NULL;
27200 - goto up_fail;
27201 - }
27202 + if (ret)
27203 + mm->context.vdso = 0;
27204
27205 up_fail:
27206 up_write(&mm->mmap_sem);
27207 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27208 vdsox32_size);
27209 }
27210 #endif
27211 -
27212 -static __init int vdso_setup(char *s)
27213 -{
27214 - vdso_enabled = simple_strtoul(s, NULL, 0);
27215 - return 0;
27216 -}
27217 -__setup("vdso=", vdso_setup);
27218 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27219 index 405307f..7ecf7b0 100644
27220 --- a/arch/x86/xen/enlighten.c
27221 +++ b/arch/x86/xen/enlighten.c
27222 @@ -97,8 +97,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27223
27224 struct shared_info xen_dummy_shared_info;
27225
27226 -void *xen_initial_gdt;
27227 -
27228 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27229 __read_mostly int xen_have_vector_callback;
27230 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27231 @@ -885,21 +883,21 @@ static u32 xen_safe_apic_wait_icr_idle(void)
27232
27233 static void set_xen_basic_apic_ops(void)
27234 {
27235 - apic->read = xen_apic_read;
27236 - apic->write = xen_apic_write;
27237 - apic->icr_read = xen_apic_icr_read;
27238 - apic->icr_write = xen_apic_icr_write;
27239 - apic->wait_icr_idle = xen_apic_wait_icr_idle;
27240 - apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
27241 - apic->set_apic_id = xen_set_apic_id;
27242 - apic->get_apic_id = xen_get_apic_id;
27243 + *(void **)&apic->read = xen_apic_read;
27244 + *(void **)&apic->write = xen_apic_write;
27245 + *(void **)&apic->icr_read = xen_apic_icr_read;
27246 + *(void **)&apic->icr_write = xen_apic_icr_write;
27247 + *(void **)&apic->wait_icr_idle = xen_apic_wait_icr_idle;
27248 + *(void **)&apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
27249 + *(void **)&apic->set_apic_id = xen_set_apic_id;
27250 + *(void **)&apic->get_apic_id = xen_get_apic_id;
27251
27252 #ifdef CONFIG_SMP
27253 - apic->send_IPI_allbutself = xen_send_IPI_allbutself;
27254 - apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
27255 - apic->send_IPI_mask = xen_send_IPI_mask;
27256 - apic->send_IPI_all = xen_send_IPI_all;
27257 - apic->send_IPI_self = xen_send_IPI_self;
27258 + *(void **)&apic->send_IPI_allbutself = xen_send_IPI_allbutself;
27259 + *(void **)&apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
27260 + *(void **)&apic->send_IPI_mask = xen_send_IPI_mask;
27261 + *(void **)&apic->send_IPI_all = xen_send_IPI_all;
27262 + *(void **)&apic->send_IPI_self = xen_send_IPI_self;
27263 #endif
27264 }
27265
27266 @@ -1175,30 +1173,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27267 #endif
27268 };
27269
27270 -static void xen_reboot(int reason)
27271 +static __noreturn void xen_reboot(int reason)
27272 {
27273 struct sched_shutdown r = { .reason = reason };
27274
27275 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27276 - BUG();
27277 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27278 + BUG();
27279 }
27280
27281 -static void xen_restart(char *msg)
27282 +static __noreturn void xen_restart(char *msg)
27283 {
27284 xen_reboot(SHUTDOWN_reboot);
27285 }
27286
27287 -static void xen_emergency_restart(void)
27288 +static __noreturn void xen_emergency_restart(void)
27289 {
27290 xen_reboot(SHUTDOWN_reboot);
27291 }
27292
27293 -static void xen_machine_halt(void)
27294 +static __noreturn void xen_machine_halt(void)
27295 {
27296 xen_reboot(SHUTDOWN_poweroff);
27297 }
27298
27299 -static void xen_machine_power_off(void)
27300 +static __noreturn void xen_machine_power_off(void)
27301 {
27302 if (pm_power_off)
27303 pm_power_off();
27304 @@ -1301,7 +1299,17 @@ asmlinkage void __init xen_start_kernel(void)
27305 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27306
27307 /* Work out if we support NX */
27308 - x86_configure_nx();
27309 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27310 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27311 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27312 + unsigned l, h;
27313 +
27314 + __supported_pte_mask |= _PAGE_NX;
27315 + rdmsr(MSR_EFER, l, h);
27316 + l |= EFER_NX;
27317 + wrmsr(MSR_EFER, l, h);
27318 + }
27319 +#endif
27320
27321 xen_setup_features();
27322
27323 @@ -1332,13 +1340,6 @@ asmlinkage void __init xen_start_kernel(void)
27324
27325 machine_ops = xen_machine_ops;
27326
27327 - /*
27328 - * The only reliable way to retain the initial address of the
27329 - * percpu gdt_page is to remember it here, so we can go and
27330 - * mark it RW later, when the initial percpu area is freed.
27331 - */
27332 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27333 -
27334 xen_smp_init();
27335
27336 #ifdef CONFIG_ACPI_NUMA
27337 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27338 index 3a73785..0d30df2 100644
27339 --- a/arch/x86/xen/mmu.c
27340 +++ b/arch/x86/xen/mmu.c
27341 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27342 convert_pfn_mfn(init_level4_pgt);
27343 convert_pfn_mfn(level3_ident_pgt);
27344 convert_pfn_mfn(level3_kernel_pgt);
27345 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27346 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27347 + convert_pfn_mfn(level3_vmemmap_pgt);
27348
27349 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27350 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27351 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27352 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27353 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27354 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27355 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27356 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27357 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27358 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27359 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27360 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27361 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27362
27363 @@ -1940,6 +1947,7 @@ static void __init xen_post_allocator_init(void)
27364 pv_mmu_ops.set_pud = xen_set_pud;
27365 #if PAGETABLE_LEVELS == 4
27366 pv_mmu_ops.set_pgd = xen_set_pgd;
27367 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27368 #endif
27369
27370 /* This will work as long as patching hasn't happened yet
27371 @@ -2021,6 +2029,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27372 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27373 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27374 .set_pgd = xen_set_pgd_hyper,
27375 + .set_pgd_batched = xen_set_pgd_hyper,
27376
27377 .alloc_pud = xen_alloc_pmd_init,
27378 .release_pud = xen_release_pmd_init,
27379 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27380 index afb250d..627075f 100644
27381 --- a/arch/x86/xen/smp.c
27382 +++ b/arch/x86/xen/smp.c
27383 @@ -231,11 +231,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27384 {
27385 BUG_ON(smp_processor_id() != 0);
27386 native_smp_prepare_boot_cpu();
27387 -
27388 - /* We've switched to the "real" per-cpu gdt, so make sure the
27389 - old memory can be recycled */
27390 - make_lowmem_page_readwrite(xen_initial_gdt);
27391 -
27392 xen_filter_cpu_maps();
27393 xen_setup_vcpu_info_placement();
27394 }
27395 @@ -302,12 +297,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27396 gdt = get_cpu_gdt_table(cpu);
27397
27398 ctxt->flags = VGCF_IN_KERNEL;
27399 - ctxt->user_regs.ds = __USER_DS;
27400 - ctxt->user_regs.es = __USER_DS;
27401 + ctxt->user_regs.ds = __KERNEL_DS;
27402 + ctxt->user_regs.es = __KERNEL_DS;
27403 ctxt->user_regs.ss = __KERNEL_DS;
27404 #ifdef CONFIG_X86_32
27405 ctxt->user_regs.fs = __KERNEL_PERCPU;
27406 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27407 + savesegment(gs, ctxt->user_regs.gs);
27408 #else
27409 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27410 #endif
27411 @@ -357,13 +352,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
27412 int rc;
27413
27414 per_cpu(current_task, cpu) = idle;
27415 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27416 #ifdef CONFIG_X86_32
27417 irq_ctx_init(cpu);
27418 #else
27419 clear_tsk_thread_flag(idle, TIF_FORK);
27420 - per_cpu(kernel_stack, cpu) =
27421 - (unsigned long)task_stack_page(idle) -
27422 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27423 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27424 #endif
27425 xen_setup_runstate_info(cpu);
27426 xen_setup_timer(cpu);
27427 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27428 index f9643fc..602e8af 100644
27429 --- a/arch/x86/xen/xen-asm_32.S
27430 +++ b/arch/x86/xen/xen-asm_32.S
27431 @@ -84,14 +84,14 @@ ENTRY(xen_iret)
27432 ESP_OFFSET=4 # bytes pushed onto stack
27433
27434 /*
27435 - * Store vcpu_info pointer for easy access. Do it this way to
27436 - * avoid having to reload %fs
27437 + * Store vcpu_info pointer for easy access.
27438 */
27439 #ifdef CONFIG_SMP
27440 - GET_THREAD_INFO(%eax)
27441 - movl TI_cpu(%eax), %eax
27442 - movl __per_cpu_offset(,%eax,4), %eax
27443 - mov xen_vcpu(%eax), %eax
27444 + push %fs
27445 + mov $(__KERNEL_PERCPU), %eax
27446 + mov %eax, %fs
27447 + mov PER_CPU_VAR(xen_vcpu), %eax
27448 + pop %fs
27449 #else
27450 movl xen_vcpu, %eax
27451 #endif
27452 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27453 index aaa7291..3f77960 100644
27454 --- a/arch/x86/xen/xen-head.S
27455 +++ b/arch/x86/xen/xen-head.S
27456 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27457 #ifdef CONFIG_X86_32
27458 mov %esi,xen_start_info
27459 mov $init_thread_union+THREAD_SIZE,%esp
27460 +#ifdef CONFIG_SMP
27461 + movl $cpu_gdt_table,%edi
27462 + movl $__per_cpu_load,%eax
27463 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27464 + rorl $16,%eax
27465 + movb %al,__KERNEL_PERCPU + 4(%edi)
27466 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27467 + movl $__per_cpu_end - 1,%eax
27468 + subl $__per_cpu_start,%eax
27469 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27470 +#endif
27471 #else
27472 mov %rsi,xen_start_info
27473 mov $init_thread_union+THREAD_SIZE,%rsp
27474 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27475 index 202d4c1..99b072a 100644
27476 --- a/arch/x86/xen/xen-ops.h
27477 +++ b/arch/x86/xen/xen-ops.h
27478 @@ -10,8 +10,6 @@
27479 extern const char xen_hypervisor_callback[];
27480 extern const char xen_failsafe_callback[];
27481
27482 -extern void *xen_initial_gdt;
27483 -
27484 struct trap_info;
27485 void xen_copy_trap_info(struct trap_info *traps);
27486
27487 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27488 index 525bd3d..ef888b1 100644
27489 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27490 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27491 @@ -119,9 +119,9 @@
27492 ----------------------------------------------------------------------*/
27493
27494 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27495 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27496 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27497 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27498 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27499
27500 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27501 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27502 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27503 index 2f33760..835e50a 100644
27504 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27505 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27506 @@ -11,6 +11,7 @@
27507 #ifndef _XTENSA_CORE_H
27508 #define _XTENSA_CORE_H
27509
27510 +#include <linux/const.h>
27511
27512 /****************************************************************************
27513 Parameters Useful for Any Code, USER or PRIVILEGED
27514 @@ -112,9 +113,9 @@
27515 ----------------------------------------------------------------------*/
27516
27517 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27518 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27519 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27520 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27521 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27522
27523 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27524 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27525 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27526 index af00795..2bb8105 100644
27527 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27528 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27529 @@ -11,6 +11,7 @@
27530 #ifndef _XTENSA_CORE_CONFIGURATION_H
27531 #define _XTENSA_CORE_CONFIGURATION_H
27532
27533 +#include <linux/const.h>
27534
27535 /****************************************************************************
27536 Parameters Useful for Any Code, USER or PRIVILEGED
27537 @@ -118,9 +119,9 @@
27538 ----------------------------------------------------------------------*/
27539
27540 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27541 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27542 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27543 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27544 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27545
27546 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27547 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27548 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27549 index 58916af..9cb880b 100644
27550 --- a/block/blk-iopoll.c
27551 +++ b/block/blk-iopoll.c
27552 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27553 }
27554 EXPORT_SYMBOL(blk_iopoll_complete);
27555
27556 -static void blk_iopoll_softirq(struct softirq_action *h)
27557 +static void blk_iopoll_softirq(void)
27558 {
27559 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27560 int rearm = 0, budget = blk_iopoll_budget;
27561 diff --git a/block/blk-map.c b/block/blk-map.c
27562 index 623e1cd..ca1e109 100644
27563 --- a/block/blk-map.c
27564 +++ b/block/blk-map.c
27565 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27566 if (!len || !kbuf)
27567 return -EINVAL;
27568
27569 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27570 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27571 if (do_copy)
27572 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27573 else
27574 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27575 index 467c8de..4bddc6d 100644
27576 --- a/block/blk-softirq.c
27577 +++ b/block/blk-softirq.c
27578 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27579 * Softirq action handler - move entries to local list and loop over them
27580 * while passing them to the queue registered handler.
27581 */
27582 -static void blk_done_softirq(struct softirq_action *h)
27583 +static void blk_done_softirq(void)
27584 {
27585 struct list_head *cpu_list, local_list;
27586
27587 diff --git a/block/bsg.c b/block/bsg.c
27588 index ff64ae3..593560c 100644
27589 --- a/block/bsg.c
27590 +++ b/block/bsg.c
27591 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27592 struct sg_io_v4 *hdr, struct bsg_device *bd,
27593 fmode_t has_write_perm)
27594 {
27595 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27596 + unsigned char *cmdptr;
27597 +
27598 if (hdr->request_len > BLK_MAX_CDB) {
27599 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27600 if (!rq->cmd)
27601 return -ENOMEM;
27602 - }
27603 + cmdptr = rq->cmd;
27604 + } else
27605 + cmdptr = tmpcmd;
27606
27607 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27608 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27609 hdr->request_len))
27610 return -EFAULT;
27611
27612 + if (cmdptr != rq->cmd)
27613 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27614 +
27615 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27616 if (blk_verify_command(rq->cmd, has_write_perm))
27617 return -EPERM;
27618 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27619 index 7c668c8..db3521c 100644
27620 --- a/block/compat_ioctl.c
27621 +++ b/block/compat_ioctl.c
27622 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27623 err |= __get_user(f->spec1, &uf->spec1);
27624 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27625 err |= __get_user(name, &uf->name);
27626 - f->name = compat_ptr(name);
27627 + f->name = (void __force_kernel *)compat_ptr(name);
27628 if (err) {
27629 err = -EFAULT;
27630 goto out;
27631 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27632 index 6296b40..417c00f 100644
27633 --- a/block/partitions/efi.c
27634 +++ b/block/partitions/efi.c
27635 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27636 if (!gpt)
27637 return NULL;
27638
27639 + if (!le32_to_cpu(gpt->num_partition_entries))
27640 + return NULL;
27641 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27642 + if (!pte)
27643 + return NULL;
27644 +
27645 count = le32_to_cpu(gpt->num_partition_entries) *
27646 le32_to_cpu(gpt->sizeof_partition_entry);
27647 - if (!count)
27648 - return NULL;
27649 - pte = kzalloc(count, GFP_KERNEL);
27650 - if (!pte)
27651 - return NULL;
27652 -
27653 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27654 (u8 *) pte,
27655 count) < count) {
27656 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27657 index 9a87daa..fb17486 100644
27658 --- a/block/scsi_ioctl.c
27659 +++ b/block/scsi_ioctl.c
27660 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27661 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27662 struct sg_io_hdr *hdr, fmode_t mode)
27663 {
27664 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27665 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27666 + unsigned char *cmdptr;
27667 +
27668 + if (rq->cmd != rq->__cmd)
27669 + cmdptr = rq->cmd;
27670 + else
27671 + cmdptr = tmpcmd;
27672 +
27673 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27674 return -EFAULT;
27675 +
27676 + if (cmdptr != rq->cmd)
27677 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27678 +
27679 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27680 return -EPERM;
27681
27682 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27683 int err;
27684 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27685 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27686 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27687 + unsigned char *cmdptr;
27688
27689 if (!sic)
27690 return -EINVAL;
27691 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27692 */
27693 err = -EFAULT;
27694 rq->cmd_len = cmdlen;
27695 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27696 +
27697 + if (rq->cmd != rq->__cmd)
27698 + cmdptr = rq->cmd;
27699 + else
27700 + cmdptr = tmpcmd;
27701 +
27702 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27703 goto error;
27704
27705 + if (rq->cmd != cmdptr)
27706 + memcpy(rq->cmd, cmdptr, cmdlen);
27707 +
27708 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27709 goto error;
27710
27711 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27712 index 671d4d6..5f24030 100644
27713 --- a/crypto/cryptd.c
27714 +++ b/crypto/cryptd.c
27715 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27716
27717 struct cryptd_blkcipher_request_ctx {
27718 crypto_completion_t complete;
27719 -};
27720 +} __no_const;
27721
27722 struct cryptd_hash_ctx {
27723 struct crypto_shash *child;
27724 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27725
27726 struct cryptd_aead_request_ctx {
27727 crypto_completion_t complete;
27728 -};
27729 +} __no_const;
27730
27731 static void cryptd_queue_worker(struct work_struct *work);
27732
27733 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27734 index e6defd8..c26a225 100644
27735 --- a/drivers/acpi/apei/cper.c
27736 +++ b/drivers/acpi/apei/cper.c
27737 @@ -38,12 +38,12 @@
27738 */
27739 u64 cper_next_record_id(void)
27740 {
27741 - static atomic64_t seq;
27742 + static atomic64_unchecked_t seq;
27743
27744 - if (!atomic64_read(&seq))
27745 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27746 + if (!atomic64_read_unchecked(&seq))
27747 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27748
27749 - return atomic64_inc_return(&seq);
27750 + return atomic64_inc_return_unchecked(&seq);
27751 }
27752 EXPORT_SYMBOL_GPL(cper_next_record_id);
27753
27754 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27755 index 7586544..636a2f0 100644
27756 --- a/drivers/acpi/ec_sys.c
27757 +++ b/drivers/acpi/ec_sys.c
27758 @@ -12,6 +12,7 @@
27759 #include <linux/acpi.h>
27760 #include <linux/debugfs.h>
27761 #include <linux/module.h>
27762 +#include <linux/uaccess.h>
27763 #include "internal.h"
27764
27765 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27766 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27767 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27768 */
27769 unsigned int size = EC_SPACE_SIZE;
27770 - u8 *data = (u8 *) buf;
27771 + u8 data;
27772 loff_t init_off = *off;
27773 int err = 0;
27774
27775 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27776 size = count;
27777
27778 while (size) {
27779 - err = ec_read(*off, &data[*off - init_off]);
27780 + err = ec_read(*off, &data);
27781 if (err)
27782 return err;
27783 + if (put_user(data, &buf[*off - init_off]))
27784 + return -EFAULT;
27785 *off += 1;
27786 size--;
27787 }
27788 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27789
27790 unsigned int size = count;
27791 loff_t init_off = *off;
27792 - u8 *data = (u8 *) buf;
27793 int err = 0;
27794
27795 if (*off >= EC_SPACE_SIZE)
27796 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27797 }
27798
27799 while (size) {
27800 - u8 byte_write = data[*off - init_off];
27801 + u8 byte_write;
27802 + if (get_user(byte_write, &buf[*off - init_off]))
27803 + return -EFAULT;
27804 err = ec_write(*off, byte_write);
27805 if (err)
27806 return err;
27807 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27808 index 251c7b62..000462d 100644
27809 --- a/drivers/acpi/proc.c
27810 +++ b/drivers/acpi/proc.c
27811 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27812 size_t count, loff_t * ppos)
27813 {
27814 struct list_head *node, *next;
27815 - char strbuf[5];
27816 - char str[5] = "";
27817 - unsigned int len = count;
27818 + char strbuf[5] = {0};
27819
27820 - if (len > 4)
27821 - len = 4;
27822 - if (len < 0)
27823 + if (count > 4)
27824 + count = 4;
27825 + if (copy_from_user(strbuf, buffer, count))
27826 return -EFAULT;
27827 -
27828 - if (copy_from_user(strbuf, buffer, len))
27829 - return -EFAULT;
27830 - strbuf[len] = '\0';
27831 - sscanf(strbuf, "%s", str);
27832 + strbuf[count] = '\0';
27833
27834 mutex_lock(&acpi_device_lock);
27835 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27836 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27837 if (!dev->wakeup.flags.valid)
27838 continue;
27839
27840 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27841 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27842 if (device_can_wakeup(&dev->dev)) {
27843 bool enable = !device_may_wakeup(&dev->dev);
27844 device_set_wakeup_enable(&dev->dev, enable);
27845 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27846 index bbac51e..4c094f9 100644
27847 --- a/drivers/acpi/processor_driver.c
27848 +++ b/drivers/acpi/processor_driver.c
27849 @@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27850 return 0;
27851 #endif
27852
27853 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27854 + BUG_ON(pr->id >= nr_cpu_ids);
27855
27856 /*
27857 * Buggy BIOS check
27858 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27859 index 71e8385..98d1e14 100644
27860 --- a/drivers/ata/libata-core.c
27861 +++ b/drivers/ata/libata-core.c
27862 @@ -4744,7 +4744,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27863 struct ata_port *ap;
27864 unsigned int tag;
27865
27866 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27867 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27868 ap = qc->ap;
27869
27870 qc->flags = 0;
27871 @@ -4760,7 +4760,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27872 struct ata_port *ap;
27873 struct ata_link *link;
27874
27875 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27876 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27877 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27878 ap = qc->ap;
27879 link = qc->dev->link;
27880 @@ -5824,6 +5824,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27881 return;
27882
27883 spin_lock(&lock);
27884 + pax_open_kernel();
27885
27886 for (cur = ops->inherits; cur; cur = cur->inherits) {
27887 void **inherit = (void **)cur;
27888 @@ -5837,8 +5838,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27889 if (IS_ERR(*pp))
27890 *pp = NULL;
27891
27892 - ops->inherits = NULL;
27893 + *(struct ata_port_operations **)&ops->inherits = NULL;
27894
27895 + pax_close_kernel();
27896 spin_unlock(&lock);
27897 }
27898
27899 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27900 index ac6a5be..c7176b1 100644
27901 --- a/drivers/ata/pata_arasan_cf.c
27902 +++ b/drivers/ata/pata_arasan_cf.c
27903 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27904 /* Handle platform specific quirks */
27905 if (pdata->quirk) {
27906 if (pdata->quirk & CF_BROKEN_PIO) {
27907 - ap->ops->set_piomode = NULL;
27908 + pax_open_kernel();
27909 + *(void **)&ap->ops->set_piomode = NULL;
27910 + pax_close_kernel();
27911 ap->pio_mask = 0;
27912 }
27913 if (pdata->quirk & CF_BROKEN_MWDMA)
27914 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27915 index f9b983a..887b9d8 100644
27916 --- a/drivers/atm/adummy.c
27917 +++ b/drivers/atm/adummy.c
27918 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27919 vcc->pop(vcc, skb);
27920 else
27921 dev_kfree_skb_any(skb);
27922 - atomic_inc(&vcc->stats->tx);
27923 + atomic_inc_unchecked(&vcc->stats->tx);
27924
27925 return 0;
27926 }
27927 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27928 index 89b30f3..7964211 100644
27929 --- a/drivers/atm/ambassador.c
27930 +++ b/drivers/atm/ambassador.c
27931 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27932 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27933
27934 // VC layer stats
27935 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27936 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27937
27938 // free the descriptor
27939 kfree (tx_descr);
27940 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27941 dump_skb ("<<<", vc, skb);
27942
27943 // VC layer stats
27944 - atomic_inc(&atm_vcc->stats->rx);
27945 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27946 __net_timestamp(skb);
27947 // end of our responsibility
27948 atm_vcc->push (atm_vcc, skb);
27949 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27950 } else {
27951 PRINTK (KERN_INFO, "dropped over-size frame");
27952 // should we count this?
27953 - atomic_inc(&atm_vcc->stats->rx_drop);
27954 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27955 }
27956
27957 } else {
27958 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27959 }
27960
27961 if (check_area (skb->data, skb->len)) {
27962 - atomic_inc(&atm_vcc->stats->tx_err);
27963 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27964 return -ENOMEM; // ?
27965 }
27966
27967 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27968 index b22d71c..d6e1049 100644
27969 --- a/drivers/atm/atmtcp.c
27970 +++ b/drivers/atm/atmtcp.c
27971 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27972 if (vcc->pop) vcc->pop(vcc,skb);
27973 else dev_kfree_skb(skb);
27974 if (dev_data) return 0;
27975 - atomic_inc(&vcc->stats->tx_err);
27976 + atomic_inc_unchecked(&vcc->stats->tx_err);
27977 return -ENOLINK;
27978 }
27979 size = skb->len+sizeof(struct atmtcp_hdr);
27980 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27981 if (!new_skb) {
27982 if (vcc->pop) vcc->pop(vcc,skb);
27983 else dev_kfree_skb(skb);
27984 - atomic_inc(&vcc->stats->tx_err);
27985 + atomic_inc_unchecked(&vcc->stats->tx_err);
27986 return -ENOBUFS;
27987 }
27988 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27989 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27990 if (vcc->pop) vcc->pop(vcc,skb);
27991 else dev_kfree_skb(skb);
27992 out_vcc->push(out_vcc,new_skb);
27993 - atomic_inc(&vcc->stats->tx);
27994 - atomic_inc(&out_vcc->stats->rx);
27995 + atomic_inc_unchecked(&vcc->stats->tx);
27996 + atomic_inc_unchecked(&out_vcc->stats->rx);
27997 return 0;
27998 }
27999
28000 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
28001 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
28002 read_unlock(&vcc_sklist_lock);
28003 if (!out_vcc) {
28004 - atomic_inc(&vcc->stats->tx_err);
28005 + atomic_inc_unchecked(&vcc->stats->tx_err);
28006 goto done;
28007 }
28008 skb_pull(skb,sizeof(struct atmtcp_hdr));
28009 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
28010 __net_timestamp(new_skb);
28011 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
28012 out_vcc->push(out_vcc,new_skb);
28013 - atomic_inc(&vcc->stats->tx);
28014 - atomic_inc(&out_vcc->stats->rx);
28015 + atomic_inc_unchecked(&vcc->stats->tx);
28016 + atomic_inc_unchecked(&out_vcc->stats->rx);
28017 done:
28018 if (vcc->pop) vcc->pop(vcc,skb);
28019 else dev_kfree_skb(skb);
28020 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
28021 index 2059ee4..faf51c7 100644
28022 --- a/drivers/atm/eni.c
28023 +++ b/drivers/atm/eni.c
28024 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
28025 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
28026 vcc->dev->number);
28027 length = 0;
28028 - atomic_inc(&vcc->stats->rx_err);
28029 + atomic_inc_unchecked(&vcc->stats->rx_err);
28030 }
28031 else {
28032 length = ATM_CELL_SIZE-1; /* no HEC */
28033 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
28034 size);
28035 }
28036 eff = length = 0;
28037 - atomic_inc(&vcc->stats->rx_err);
28038 + atomic_inc_unchecked(&vcc->stats->rx_err);
28039 }
28040 else {
28041 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
28042 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
28043 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
28044 vcc->dev->number,vcc->vci,length,size << 2,descr);
28045 length = eff = 0;
28046 - atomic_inc(&vcc->stats->rx_err);
28047 + atomic_inc_unchecked(&vcc->stats->rx_err);
28048 }
28049 }
28050 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
28051 @@ -767,7 +767,7 @@ rx_dequeued++;
28052 vcc->push(vcc,skb);
28053 pushed++;
28054 }
28055 - atomic_inc(&vcc->stats->rx);
28056 + atomic_inc_unchecked(&vcc->stats->rx);
28057 }
28058 wake_up(&eni_dev->rx_wait);
28059 }
28060 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
28061 PCI_DMA_TODEVICE);
28062 if (vcc->pop) vcc->pop(vcc,skb);
28063 else dev_kfree_skb_irq(skb);
28064 - atomic_inc(&vcc->stats->tx);
28065 + atomic_inc_unchecked(&vcc->stats->tx);
28066 wake_up(&eni_dev->tx_wait);
28067 dma_complete++;
28068 }
28069 @@ -1567,7 +1567,7 @@ tx_complete++;
28070 /*--------------------------------- entries ---------------------------------*/
28071
28072
28073 -static const char *media_name[] __devinitdata = {
28074 +static const char *media_name[] __devinitconst = {
28075 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
28076 "UTP", "05?", "06?", "07?", /* 4- 7 */
28077 "TAXI","09?", "10?", "11?", /* 8-11 */
28078 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
28079 index 86fed1b..6dc4721 100644
28080 --- a/drivers/atm/firestream.c
28081 +++ b/drivers/atm/firestream.c
28082 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
28083 }
28084 }
28085
28086 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28087 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28088
28089 fs_dprintk (FS_DEBUG_TXMEM, "i");
28090 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
28091 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
28092 #endif
28093 skb_put (skb, qe->p1 & 0xffff);
28094 ATM_SKB(skb)->vcc = atm_vcc;
28095 - atomic_inc(&atm_vcc->stats->rx);
28096 + atomic_inc_unchecked(&atm_vcc->stats->rx);
28097 __net_timestamp(skb);
28098 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
28099 atm_vcc->push (atm_vcc, skb);
28100 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
28101 kfree (pe);
28102 }
28103 if (atm_vcc)
28104 - atomic_inc(&atm_vcc->stats->rx_drop);
28105 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28106 break;
28107 case 0x1f: /* Reassembly abort: no buffers. */
28108 /* Silently increment error counter. */
28109 if (atm_vcc)
28110 - atomic_inc(&atm_vcc->stats->rx_drop);
28111 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28112 break;
28113 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
28114 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
28115 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
28116 index 361f5ae..7fc552d 100644
28117 --- a/drivers/atm/fore200e.c
28118 +++ b/drivers/atm/fore200e.c
28119 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
28120 #endif
28121 /* check error condition */
28122 if (*entry->status & STATUS_ERROR)
28123 - atomic_inc(&vcc->stats->tx_err);
28124 + atomic_inc_unchecked(&vcc->stats->tx_err);
28125 else
28126 - atomic_inc(&vcc->stats->tx);
28127 + atomic_inc_unchecked(&vcc->stats->tx);
28128 }
28129 }
28130
28131 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28132 if (skb == NULL) {
28133 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
28134
28135 - atomic_inc(&vcc->stats->rx_drop);
28136 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28137 return -ENOMEM;
28138 }
28139
28140 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28141
28142 dev_kfree_skb_any(skb);
28143
28144 - atomic_inc(&vcc->stats->rx_drop);
28145 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28146 return -ENOMEM;
28147 }
28148
28149 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28150
28151 vcc->push(vcc, skb);
28152 - atomic_inc(&vcc->stats->rx);
28153 + atomic_inc_unchecked(&vcc->stats->rx);
28154
28155 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28156
28157 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
28158 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28159 fore200e->atm_dev->number,
28160 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28161 - atomic_inc(&vcc->stats->rx_err);
28162 + atomic_inc_unchecked(&vcc->stats->rx_err);
28163 }
28164 }
28165
28166 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
28167 goto retry_here;
28168 }
28169
28170 - atomic_inc(&vcc->stats->tx_err);
28171 + atomic_inc_unchecked(&vcc->stats->tx_err);
28172
28173 fore200e->tx_sat++;
28174 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
28175 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
28176 index b182c2f..1c6fa8a 100644
28177 --- a/drivers/atm/he.c
28178 +++ b/drivers/atm/he.c
28179 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28180
28181 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28182 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28183 - atomic_inc(&vcc->stats->rx_drop);
28184 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28185 goto return_host_buffers;
28186 }
28187
28188 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28189 RBRQ_LEN_ERR(he_dev->rbrq_head)
28190 ? "LEN_ERR" : "",
28191 vcc->vpi, vcc->vci);
28192 - atomic_inc(&vcc->stats->rx_err);
28193 + atomic_inc_unchecked(&vcc->stats->rx_err);
28194 goto return_host_buffers;
28195 }
28196
28197 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28198 vcc->push(vcc, skb);
28199 spin_lock(&he_dev->global_lock);
28200
28201 - atomic_inc(&vcc->stats->rx);
28202 + atomic_inc_unchecked(&vcc->stats->rx);
28203
28204 return_host_buffers:
28205 ++pdus_assembled;
28206 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28207 tpd->vcc->pop(tpd->vcc, tpd->skb);
28208 else
28209 dev_kfree_skb_any(tpd->skb);
28210 - atomic_inc(&tpd->vcc->stats->tx_err);
28211 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28212 }
28213 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28214 return;
28215 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28216 vcc->pop(vcc, skb);
28217 else
28218 dev_kfree_skb_any(skb);
28219 - atomic_inc(&vcc->stats->tx_err);
28220 + atomic_inc_unchecked(&vcc->stats->tx_err);
28221 return -EINVAL;
28222 }
28223
28224 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28225 vcc->pop(vcc, skb);
28226 else
28227 dev_kfree_skb_any(skb);
28228 - atomic_inc(&vcc->stats->tx_err);
28229 + atomic_inc_unchecked(&vcc->stats->tx_err);
28230 return -EINVAL;
28231 }
28232 #endif
28233 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28234 vcc->pop(vcc, skb);
28235 else
28236 dev_kfree_skb_any(skb);
28237 - atomic_inc(&vcc->stats->tx_err);
28238 + atomic_inc_unchecked(&vcc->stats->tx_err);
28239 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28240 return -ENOMEM;
28241 }
28242 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28243 vcc->pop(vcc, skb);
28244 else
28245 dev_kfree_skb_any(skb);
28246 - atomic_inc(&vcc->stats->tx_err);
28247 + atomic_inc_unchecked(&vcc->stats->tx_err);
28248 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28249 return -ENOMEM;
28250 }
28251 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28252 __enqueue_tpd(he_dev, tpd, cid);
28253 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28254
28255 - atomic_inc(&vcc->stats->tx);
28256 + atomic_inc_unchecked(&vcc->stats->tx);
28257
28258 return 0;
28259 }
28260 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28261 index 7d01c2a..4e3ac01 100644
28262 --- a/drivers/atm/horizon.c
28263 +++ b/drivers/atm/horizon.c
28264 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28265 {
28266 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28267 // VC layer stats
28268 - atomic_inc(&vcc->stats->rx);
28269 + atomic_inc_unchecked(&vcc->stats->rx);
28270 __net_timestamp(skb);
28271 // end of our responsibility
28272 vcc->push (vcc, skb);
28273 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28274 dev->tx_iovec = NULL;
28275
28276 // VC layer stats
28277 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28278 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28279
28280 // free the skb
28281 hrz_kfree_skb (skb);
28282 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28283 index 8974bd2..b856f85 100644
28284 --- a/drivers/atm/idt77252.c
28285 +++ b/drivers/atm/idt77252.c
28286 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28287 else
28288 dev_kfree_skb(skb);
28289
28290 - atomic_inc(&vcc->stats->tx);
28291 + atomic_inc_unchecked(&vcc->stats->tx);
28292 }
28293
28294 atomic_dec(&scq->used);
28295 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28296 if ((sb = dev_alloc_skb(64)) == NULL) {
28297 printk("%s: Can't allocate buffers for aal0.\n",
28298 card->name);
28299 - atomic_add(i, &vcc->stats->rx_drop);
28300 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28301 break;
28302 }
28303 if (!atm_charge(vcc, sb->truesize)) {
28304 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28305 card->name);
28306 - atomic_add(i - 1, &vcc->stats->rx_drop);
28307 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28308 dev_kfree_skb(sb);
28309 break;
28310 }
28311 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28312 ATM_SKB(sb)->vcc = vcc;
28313 __net_timestamp(sb);
28314 vcc->push(vcc, sb);
28315 - atomic_inc(&vcc->stats->rx);
28316 + atomic_inc_unchecked(&vcc->stats->rx);
28317
28318 cell += ATM_CELL_PAYLOAD;
28319 }
28320 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28321 "(CDC: %08x)\n",
28322 card->name, len, rpp->len, readl(SAR_REG_CDC));
28323 recycle_rx_pool_skb(card, rpp);
28324 - atomic_inc(&vcc->stats->rx_err);
28325 + atomic_inc_unchecked(&vcc->stats->rx_err);
28326 return;
28327 }
28328 if (stat & SAR_RSQE_CRC) {
28329 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28330 recycle_rx_pool_skb(card, rpp);
28331 - atomic_inc(&vcc->stats->rx_err);
28332 + atomic_inc_unchecked(&vcc->stats->rx_err);
28333 return;
28334 }
28335 if (skb_queue_len(&rpp->queue) > 1) {
28336 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28337 RXPRINTK("%s: Can't alloc RX skb.\n",
28338 card->name);
28339 recycle_rx_pool_skb(card, rpp);
28340 - atomic_inc(&vcc->stats->rx_err);
28341 + atomic_inc_unchecked(&vcc->stats->rx_err);
28342 return;
28343 }
28344 if (!atm_charge(vcc, skb->truesize)) {
28345 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28346 __net_timestamp(skb);
28347
28348 vcc->push(vcc, skb);
28349 - atomic_inc(&vcc->stats->rx);
28350 + atomic_inc_unchecked(&vcc->stats->rx);
28351
28352 return;
28353 }
28354 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28355 __net_timestamp(skb);
28356
28357 vcc->push(vcc, skb);
28358 - atomic_inc(&vcc->stats->rx);
28359 + atomic_inc_unchecked(&vcc->stats->rx);
28360
28361 if (skb->truesize > SAR_FB_SIZE_3)
28362 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28363 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28364 if (vcc->qos.aal != ATM_AAL0) {
28365 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28366 card->name, vpi, vci);
28367 - atomic_inc(&vcc->stats->rx_drop);
28368 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28369 goto drop;
28370 }
28371
28372 if ((sb = dev_alloc_skb(64)) == NULL) {
28373 printk("%s: Can't allocate buffers for AAL0.\n",
28374 card->name);
28375 - atomic_inc(&vcc->stats->rx_err);
28376 + atomic_inc_unchecked(&vcc->stats->rx_err);
28377 goto drop;
28378 }
28379
28380 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28381 ATM_SKB(sb)->vcc = vcc;
28382 __net_timestamp(sb);
28383 vcc->push(vcc, sb);
28384 - atomic_inc(&vcc->stats->rx);
28385 + atomic_inc_unchecked(&vcc->stats->rx);
28386
28387 drop:
28388 skb_pull(queue, 64);
28389 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28390
28391 if (vc == NULL) {
28392 printk("%s: NULL connection in send().\n", card->name);
28393 - atomic_inc(&vcc->stats->tx_err);
28394 + atomic_inc_unchecked(&vcc->stats->tx_err);
28395 dev_kfree_skb(skb);
28396 return -EINVAL;
28397 }
28398 if (!test_bit(VCF_TX, &vc->flags)) {
28399 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28400 - atomic_inc(&vcc->stats->tx_err);
28401 + atomic_inc_unchecked(&vcc->stats->tx_err);
28402 dev_kfree_skb(skb);
28403 return -EINVAL;
28404 }
28405 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28406 break;
28407 default:
28408 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28409 - atomic_inc(&vcc->stats->tx_err);
28410 + atomic_inc_unchecked(&vcc->stats->tx_err);
28411 dev_kfree_skb(skb);
28412 return -EINVAL;
28413 }
28414
28415 if (skb_shinfo(skb)->nr_frags != 0) {
28416 printk("%s: No scatter-gather yet.\n", card->name);
28417 - atomic_inc(&vcc->stats->tx_err);
28418 + atomic_inc_unchecked(&vcc->stats->tx_err);
28419 dev_kfree_skb(skb);
28420 return -EINVAL;
28421 }
28422 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28423
28424 err = queue_skb(card, vc, skb, oam);
28425 if (err) {
28426 - atomic_inc(&vcc->stats->tx_err);
28427 + atomic_inc_unchecked(&vcc->stats->tx_err);
28428 dev_kfree_skb(skb);
28429 return err;
28430 }
28431 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28432 skb = dev_alloc_skb(64);
28433 if (!skb) {
28434 printk("%s: Out of memory in send_oam().\n", card->name);
28435 - atomic_inc(&vcc->stats->tx_err);
28436 + atomic_inc_unchecked(&vcc->stats->tx_err);
28437 return -ENOMEM;
28438 }
28439 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28440 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28441 index d438601..8b98495 100644
28442 --- a/drivers/atm/iphase.c
28443 +++ b/drivers/atm/iphase.c
28444 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28445 status = (u_short) (buf_desc_ptr->desc_mode);
28446 if (status & (RX_CER | RX_PTE | RX_OFL))
28447 {
28448 - atomic_inc(&vcc->stats->rx_err);
28449 + atomic_inc_unchecked(&vcc->stats->rx_err);
28450 IF_ERR(printk("IA: bad packet, dropping it");)
28451 if (status & RX_CER) {
28452 IF_ERR(printk(" cause: packet CRC error\n");)
28453 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28454 len = dma_addr - buf_addr;
28455 if (len > iadev->rx_buf_sz) {
28456 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28457 - atomic_inc(&vcc->stats->rx_err);
28458 + atomic_inc_unchecked(&vcc->stats->rx_err);
28459 goto out_free_desc;
28460 }
28461
28462 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28463 ia_vcc = INPH_IA_VCC(vcc);
28464 if (ia_vcc == NULL)
28465 {
28466 - atomic_inc(&vcc->stats->rx_err);
28467 + atomic_inc_unchecked(&vcc->stats->rx_err);
28468 atm_return(vcc, skb->truesize);
28469 dev_kfree_skb_any(skb);
28470 goto INCR_DLE;
28471 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28472 if ((length > iadev->rx_buf_sz) || (length >
28473 (skb->len - sizeof(struct cpcs_trailer))))
28474 {
28475 - atomic_inc(&vcc->stats->rx_err);
28476 + atomic_inc_unchecked(&vcc->stats->rx_err);
28477 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28478 length, skb->len);)
28479 atm_return(vcc, skb->truesize);
28480 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28481
28482 IF_RX(printk("rx_dle_intr: skb push");)
28483 vcc->push(vcc,skb);
28484 - atomic_inc(&vcc->stats->rx);
28485 + atomic_inc_unchecked(&vcc->stats->rx);
28486 iadev->rx_pkt_cnt++;
28487 }
28488 INCR_DLE:
28489 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28490 {
28491 struct k_sonet_stats *stats;
28492 stats = &PRIV(_ia_dev[board])->sonet_stats;
28493 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28494 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28495 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28496 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28497 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28498 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28499 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28500 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28501 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28502 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28503 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28504 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28505 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28506 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28507 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28508 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28509 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28510 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28511 }
28512 ia_cmds.status = 0;
28513 break;
28514 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28515 if ((desc == 0) || (desc > iadev->num_tx_desc))
28516 {
28517 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28518 - atomic_inc(&vcc->stats->tx);
28519 + atomic_inc_unchecked(&vcc->stats->tx);
28520 if (vcc->pop)
28521 vcc->pop(vcc, skb);
28522 else
28523 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28524 ATM_DESC(skb) = vcc->vci;
28525 skb_queue_tail(&iadev->tx_dma_q, skb);
28526
28527 - atomic_inc(&vcc->stats->tx);
28528 + atomic_inc_unchecked(&vcc->stats->tx);
28529 iadev->tx_pkt_cnt++;
28530 /* Increment transaction counter */
28531 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28532
28533 #if 0
28534 /* add flow control logic */
28535 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28536 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28537 if (iavcc->vc_desc_cnt > 10) {
28538 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28539 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28540 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28541 index 68c7588..7036683 100644
28542 --- a/drivers/atm/lanai.c
28543 +++ b/drivers/atm/lanai.c
28544 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28545 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28546 lanai_endtx(lanai, lvcc);
28547 lanai_free_skb(lvcc->tx.atmvcc, skb);
28548 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28549 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28550 }
28551
28552 /* Try to fill the buffer - don't call unless there is backlog */
28553 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28554 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28555 __net_timestamp(skb);
28556 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28557 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28558 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28559 out:
28560 lvcc->rx.buf.ptr = end;
28561 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28562 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28563 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28564 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28565 lanai->stats.service_rxnotaal5++;
28566 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28567 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28568 return 0;
28569 }
28570 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28571 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28572 int bytes;
28573 read_unlock(&vcc_sklist_lock);
28574 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28575 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28576 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28577 lvcc->stats.x.aal5.service_trash++;
28578 bytes = (SERVICE_GET_END(s) * 16) -
28579 (((unsigned long) lvcc->rx.buf.ptr) -
28580 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28581 }
28582 if (s & SERVICE_STREAM) {
28583 read_unlock(&vcc_sklist_lock);
28584 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28585 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28586 lvcc->stats.x.aal5.service_stream++;
28587 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28588 "PDU on VCI %d!\n", lanai->number, vci);
28589 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28590 return 0;
28591 }
28592 DPRINTK("got rx crc error on vci %d\n", vci);
28593 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28594 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28595 lvcc->stats.x.aal5.service_rxcrc++;
28596 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28597 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28598 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28599 index 1c70c45..300718d 100644
28600 --- a/drivers/atm/nicstar.c
28601 +++ b/drivers/atm/nicstar.c
28602 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28603 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28604 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28605 card->index);
28606 - atomic_inc(&vcc->stats->tx_err);
28607 + atomic_inc_unchecked(&vcc->stats->tx_err);
28608 dev_kfree_skb_any(skb);
28609 return -EINVAL;
28610 }
28611 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28612 if (!vc->tx) {
28613 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28614 card->index);
28615 - atomic_inc(&vcc->stats->tx_err);
28616 + atomic_inc_unchecked(&vcc->stats->tx_err);
28617 dev_kfree_skb_any(skb);
28618 return -EINVAL;
28619 }
28620 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28621 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28622 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28623 card->index);
28624 - atomic_inc(&vcc->stats->tx_err);
28625 + atomic_inc_unchecked(&vcc->stats->tx_err);
28626 dev_kfree_skb_any(skb);
28627 return -EINVAL;
28628 }
28629
28630 if (skb_shinfo(skb)->nr_frags != 0) {
28631 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28632 - atomic_inc(&vcc->stats->tx_err);
28633 + atomic_inc_unchecked(&vcc->stats->tx_err);
28634 dev_kfree_skb_any(skb);
28635 return -EINVAL;
28636 }
28637 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28638 }
28639
28640 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28641 - atomic_inc(&vcc->stats->tx_err);
28642 + atomic_inc_unchecked(&vcc->stats->tx_err);
28643 dev_kfree_skb_any(skb);
28644 return -EIO;
28645 }
28646 - atomic_inc(&vcc->stats->tx);
28647 + atomic_inc_unchecked(&vcc->stats->tx);
28648
28649 return 0;
28650 }
28651 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28652 printk
28653 ("nicstar%d: Can't allocate buffers for aal0.\n",
28654 card->index);
28655 - atomic_add(i, &vcc->stats->rx_drop);
28656 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28657 break;
28658 }
28659 if (!atm_charge(vcc, sb->truesize)) {
28660 RXPRINTK
28661 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28662 card->index);
28663 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28664 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28665 dev_kfree_skb_any(sb);
28666 break;
28667 }
28668 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28669 ATM_SKB(sb)->vcc = vcc;
28670 __net_timestamp(sb);
28671 vcc->push(vcc, sb);
28672 - atomic_inc(&vcc->stats->rx);
28673 + atomic_inc_unchecked(&vcc->stats->rx);
28674 cell += ATM_CELL_PAYLOAD;
28675 }
28676
28677 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28678 if (iovb == NULL) {
28679 printk("nicstar%d: Out of iovec buffers.\n",
28680 card->index);
28681 - atomic_inc(&vcc->stats->rx_drop);
28682 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28683 recycle_rx_buf(card, skb);
28684 return;
28685 }
28686 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28687 small or large buffer itself. */
28688 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28689 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28690 - atomic_inc(&vcc->stats->rx_err);
28691 + atomic_inc_unchecked(&vcc->stats->rx_err);
28692 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28693 NS_MAX_IOVECS);
28694 NS_PRV_IOVCNT(iovb) = 0;
28695 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28696 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28697 card->index);
28698 which_list(card, skb);
28699 - atomic_inc(&vcc->stats->rx_err);
28700 + atomic_inc_unchecked(&vcc->stats->rx_err);
28701 recycle_rx_buf(card, skb);
28702 vc->rx_iov = NULL;
28703 recycle_iov_buf(card, iovb);
28704 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28705 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28706 card->index);
28707 which_list(card, skb);
28708 - atomic_inc(&vcc->stats->rx_err);
28709 + atomic_inc_unchecked(&vcc->stats->rx_err);
28710 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28711 NS_PRV_IOVCNT(iovb));
28712 vc->rx_iov = NULL;
28713 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28714 printk(" - PDU size mismatch.\n");
28715 else
28716 printk(".\n");
28717 - atomic_inc(&vcc->stats->rx_err);
28718 + atomic_inc_unchecked(&vcc->stats->rx_err);
28719 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28720 NS_PRV_IOVCNT(iovb));
28721 vc->rx_iov = NULL;
28722 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28723 /* skb points to a small buffer */
28724 if (!atm_charge(vcc, skb->truesize)) {
28725 push_rxbufs(card, skb);
28726 - atomic_inc(&vcc->stats->rx_drop);
28727 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28728 } else {
28729 skb_put(skb, len);
28730 dequeue_sm_buf(card, skb);
28731 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28732 ATM_SKB(skb)->vcc = vcc;
28733 __net_timestamp(skb);
28734 vcc->push(vcc, skb);
28735 - atomic_inc(&vcc->stats->rx);
28736 + atomic_inc_unchecked(&vcc->stats->rx);
28737 }
28738 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28739 struct sk_buff *sb;
28740 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28741 if (len <= NS_SMBUFSIZE) {
28742 if (!atm_charge(vcc, sb->truesize)) {
28743 push_rxbufs(card, sb);
28744 - atomic_inc(&vcc->stats->rx_drop);
28745 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28746 } else {
28747 skb_put(sb, len);
28748 dequeue_sm_buf(card, sb);
28749 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28750 ATM_SKB(sb)->vcc = vcc;
28751 __net_timestamp(sb);
28752 vcc->push(vcc, sb);
28753 - atomic_inc(&vcc->stats->rx);
28754 + atomic_inc_unchecked(&vcc->stats->rx);
28755 }
28756
28757 push_rxbufs(card, skb);
28758 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28759
28760 if (!atm_charge(vcc, skb->truesize)) {
28761 push_rxbufs(card, skb);
28762 - atomic_inc(&vcc->stats->rx_drop);
28763 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28764 } else {
28765 dequeue_lg_buf(card, skb);
28766 #ifdef NS_USE_DESTRUCTORS
28767 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28768 ATM_SKB(skb)->vcc = vcc;
28769 __net_timestamp(skb);
28770 vcc->push(vcc, skb);
28771 - atomic_inc(&vcc->stats->rx);
28772 + atomic_inc_unchecked(&vcc->stats->rx);
28773 }
28774
28775 push_rxbufs(card, sb);
28776 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28777 printk
28778 ("nicstar%d: Out of huge buffers.\n",
28779 card->index);
28780 - atomic_inc(&vcc->stats->rx_drop);
28781 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28782 recycle_iovec_rx_bufs(card,
28783 (struct iovec *)
28784 iovb->data,
28785 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28786 card->hbpool.count++;
28787 } else
28788 dev_kfree_skb_any(hb);
28789 - atomic_inc(&vcc->stats->rx_drop);
28790 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28791 } else {
28792 /* Copy the small buffer to the huge buffer */
28793 sb = (struct sk_buff *)iov->iov_base;
28794 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28795 #endif /* NS_USE_DESTRUCTORS */
28796 __net_timestamp(hb);
28797 vcc->push(vcc, hb);
28798 - atomic_inc(&vcc->stats->rx);
28799 + atomic_inc_unchecked(&vcc->stats->rx);
28800 }
28801 }
28802
28803 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28804 index 9851093..adb2b1e 100644
28805 --- a/drivers/atm/solos-pci.c
28806 +++ b/drivers/atm/solos-pci.c
28807 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28808 }
28809 atm_charge(vcc, skb->truesize);
28810 vcc->push(vcc, skb);
28811 - atomic_inc(&vcc->stats->rx);
28812 + atomic_inc_unchecked(&vcc->stats->rx);
28813 break;
28814
28815 case PKT_STATUS:
28816 @@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28817 vcc = SKB_CB(oldskb)->vcc;
28818
28819 if (vcc) {
28820 - atomic_inc(&vcc->stats->tx);
28821 + atomic_inc_unchecked(&vcc->stats->tx);
28822 solos_pop(vcc, oldskb);
28823 } else
28824 dev_kfree_skb_irq(oldskb);
28825 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28826 index 0215934..ce9f5b1 100644
28827 --- a/drivers/atm/suni.c
28828 +++ b/drivers/atm/suni.c
28829 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28830
28831
28832 #define ADD_LIMITED(s,v) \
28833 - atomic_add((v),&stats->s); \
28834 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28835 + atomic_add_unchecked((v),&stats->s); \
28836 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28837
28838
28839 static void suni_hz(unsigned long from_timer)
28840 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28841 index 5120a96..e2572bd 100644
28842 --- a/drivers/atm/uPD98402.c
28843 +++ b/drivers/atm/uPD98402.c
28844 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28845 struct sonet_stats tmp;
28846 int error = 0;
28847
28848 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28849 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28850 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28851 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28852 if (zero && !error) {
28853 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28854
28855
28856 #define ADD_LIMITED(s,v) \
28857 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28858 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28859 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28860 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28861 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28862 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28863
28864
28865 static void stat_event(struct atm_dev *dev)
28866 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28867 if (reason & uPD98402_INT_PFM) stat_event(dev);
28868 if (reason & uPD98402_INT_PCO) {
28869 (void) GET(PCOCR); /* clear interrupt cause */
28870 - atomic_add(GET(HECCT),
28871 + atomic_add_unchecked(GET(HECCT),
28872 &PRIV(dev)->sonet_stats.uncorr_hcs);
28873 }
28874 if ((reason & uPD98402_INT_RFO) &&
28875 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28876 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28877 uPD98402_INT_LOS),PIMR); /* enable them */
28878 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28879 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28880 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28881 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28882 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28883 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28884 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28885 return 0;
28886 }
28887
28888 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28889 index abe4e20..83c4727 100644
28890 --- a/drivers/atm/zatm.c
28891 +++ b/drivers/atm/zatm.c
28892 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28893 }
28894 if (!size) {
28895 dev_kfree_skb_irq(skb);
28896 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28897 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28898 continue;
28899 }
28900 if (!atm_charge(vcc,skb->truesize)) {
28901 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28902 skb->len = size;
28903 ATM_SKB(skb)->vcc = vcc;
28904 vcc->push(vcc,skb);
28905 - atomic_inc(&vcc->stats->rx);
28906 + atomic_inc_unchecked(&vcc->stats->rx);
28907 }
28908 zout(pos & 0xffff,MTA(mbx));
28909 #if 0 /* probably a stupid idea */
28910 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28911 skb_queue_head(&zatm_vcc->backlog,skb);
28912 break;
28913 }
28914 - atomic_inc(&vcc->stats->tx);
28915 + atomic_inc_unchecked(&vcc->stats->tx);
28916 wake_up(&zatm_vcc->tx_wait);
28917 }
28918
28919 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28920 index 765c3a2..771ace6 100644
28921 --- a/drivers/base/devtmpfs.c
28922 +++ b/drivers/base/devtmpfs.c
28923 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28924 if (!thread)
28925 return 0;
28926
28927 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28928 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28929 if (err)
28930 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28931 else
28932 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28933 index cbb463b..babe2cf 100644
28934 --- a/drivers/base/power/wakeup.c
28935 +++ b/drivers/base/power/wakeup.c
28936 @@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
28937 * They need to be modified together atomically, so it's better to use one
28938 * atomic variable to hold them both.
28939 */
28940 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28941 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28942
28943 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28944 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28945
28946 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28947 {
28948 - unsigned int comb = atomic_read(&combined_event_count);
28949 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28950
28951 *cnt = (comb >> IN_PROGRESS_BITS);
28952 *inpr = comb & MAX_IN_PROGRESS;
28953 @@ -385,7 +385,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28954 ws->start_prevent_time = ws->last_time;
28955
28956 /* Increment the counter of events in progress. */
28957 - cec = atomic_inc_return(&combined_event_count);
28958 + cec = atomic_inc_return_unchecked(&combined_event_count);
28959
28960 trace_wakeup_source_activate(ws->name, cec);
28961 }
28962 @@ -511,7 +511,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28963 * Increment the counter of registered wakeup events and decrement the
28964 * couter of wakeup events in progress simultaneously.
28965 */
28966 - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
28967 + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28968 trace_wakeup_source_deactivate(ws->name, cec);
28969
28970 split_counters(&cnt, &inpr);
28971 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28972 index b0f553b..77b928b 100644
28973 --- a/drivers/block/cciss.c
28974 +++ b/drivers/block/cciss.c
28975 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28976 int err;
28977 u32 cp;
28978
28979 + memset(&arg64, 0, sizeof(arg64));
28980 +
28981 err = 0;
28982 err |=
28983 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28984 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28985 while (!list_empty(&h->reqQ)) {
28986 c = list_entry(h->reqQ.next, CommandList_struct, list);
28987 /* can't do anything if fifo is full */
28988 - if ((h->access.fifo_full(h))) {
28989 + if ((h->access->fifo_full(h))) {
28990 dev_warn(&h->pdev->dev, "fifo full\n");
28991 break;
28992 }
28993 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28994 h->Qdepth--;
28995
28996 /* Tell the controller execute command */
28997 - h->access.submit_command(h, c);
28998 + h->access->submit_command(h, c);
28999
29000 /* Put job onto the completed Q */
29001 addQ(&h->cmpQ, c);
29002 @@ -3443,17 +3445,17 @@ startio:
29003
29004 static inline unsigned long get_next_completion(ctlr_info_t *h)
29005 {
29006 - return h->access.command_completed(h);
29007 + return h->access->command_completed(h);
29008 }
29009
29010 static inline int interrupt_pending(ctlr_info_t *h)
29011 {
29012 - return h->access.intr_pending(h);
29013 + return h->access->intr_pending(h);
29014 }
29015
29016 static inline long interrupt_not_for_us(ctlr_info_t *h)
29017 {
29018 - return ((h->access.intr_pending(h) == 0) ||
29019 + return ((h->access->intr_pending(h) == 0) ||
29020 (h->interrupts_enabled == 0));
29021 }
29022
29023 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
29024 u32 a;
29025
29026 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
29027 - return h->access.command_completed(h);
29028 + return h->access->command_completed(h);
29029
29030 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
29031 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
29032 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
29033 trans_support & CFGTBL_Trans_use_short_tags);
29034
29035 /* Change the access methods to the performant access methods */
29036 - h->access = SA5_performant_access;
29037 + h->access = &SA5_performant_access;
29038 h->transMethod = CFGTBL_Trans_Performant;
29039
29040 return;
29041 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
29042 if (prod_index < 0)
29043 return -ENODEV;
29044 h->product_name = products[prod_index].product_name;
29045 - h->access = *(products[prod_index].access);
29046 + h->access = products[prod_index].access;
29047
29048 if (cciss_board_disabled(h)) {
29049 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
29050 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
29051 }
29052
29053 /* make sure the board interrupts are off */
29054 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
29055 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
29056 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
29057 if (rc)
29058 goto clean2;
29059 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
29060 * fake ones to scoop up any residual completions.
29061 */
29062 spin_lock_irqsave(&h->lock, flags);
29063 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
29064 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
29065 spin_unlock_irqrestore(&h->lock, flags);
29066 free_irq(h->intr[h->intr_mode], h);
29067 rc = cciss_request_irq(h, cciss_msix_discard_completions,
29068 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
29069 dev_info(&h->pdev->dev, "Board READY.\n");
29070 dev_info(&h->pdev->dev,
29071 "Waiting for stale completions to drain.\n");
29072 - h->access.set_intr_mask(h, CCISS_INTR_ON);
29073 + h->access->set_intr_mask(h, CCISS_INTR_ON);
29074 msleep(10000);
29075 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
29076 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
29077
29078 rc = controller_reset_failed(h->cfgtable);
29079 if (rc)
29080 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
29081 cciss_scsi_setup(h);
29082
29083 /* Turn the interrupts on so we can service requests */
29084 - h->access.set_intr_mask(h, CCISS_INTR_ON);
29085 + h->access->set_intr_mask(h, CCISS_INTR_ON);
29086
29087 /* Get the firmware version */
29088 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
29089 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
29090 kfree(flush_buf);
29091 if (return_code != IO_OK)
29092 dev_warn(&h->pdev->dev, "Error flushing cache\n");
29093 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
29094 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
29095 free_irq(h->intr[h->intr_mode], h);
29096 }
29097
29098 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
29099 index 7fda30e..eb5dfe0 100644
29100 --- a/drivers/block/cciss.h
29101 +++ b/drivers/block/cciss.h
29102 @@ -101,7 +101,7 @@ struct ctlr_info
29103 /* information about each logical volume */
29104 drive_info_struct *drv[CISS_MAX_LUN];
29105
29106 - struct access_method access;
29107 + struct access_method *access;
29108
29109 /* queue and queue Info */
29110 struct list_head reqQ;
29111 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
29112 index 9125bbe..eede5c8 100644
29113 --- a/drivers/block/cpqarray.c
29114 +++ b/drivers/block/cpqarray.c
29115 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29116 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29117 goto Enomem4;
29118 }
29119 - hba[i]->access.set_intr_mask(hba[i], 0);
29120 + hba[i]->access->set_intr_mask(hba[i], 0);
29121 if (request_irq(hba[i]->intr, do_ida_intr,
29122 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29123 {
29124 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29125 add_timer(&hba[i]->timer);
29126
29127 /* Enable IRQ now that spinlock and rate limit timer are set up */
29128 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29129 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29130
29131 for(j=0; j<NWD; j++) {
29132 struct gendisk *disk = ida_gendisk[i][j];
29133 @@ -694,7 +694,7 @@ DBGINFO(
29134 for(i=0; i<NR_PRODUCTS; i++) {
29135 if (board_id == products[i].board_id) {
29136 c->product_name = products[i].product_name;
29137 - c->access = *(products[i].access);
29138 + c->access = products[i].access;
29139 break;
29140 }
29141 }
29142 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
29143 hba[ctlr]->intr = intr;
29144 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29145 hba[ctlr]->product_name = products[j].product_name;
29146 - hba[ctlr]->access = *(products[j].access);
29147 + hba[ctlr]->access = products[j].access;
29148 hba[ctlr]->ctlr = ctlr;
29149 hba[ctlr]->board_id = board_id;
29150 hba[ctlr]->pci_dev = NULL; /* not PCI */
29151 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
29152
29153 while((c = h->reqQ) != NULL) {
29154 /* Can't do anything if we're busy */
29155 - if (h->access.fifo_full(h) == 0)
29156 + if (h->access->fifo_full(h) == 0)
29157 return;
29158
29159 /* Get the first entry from the request Q */
29160 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
29161 h->Qdepth--;
29162
29163 /* Tell the controller to do our bidding */
29164 - h->access.submit_command(h, c);
29165 + h->access->submit_command(h, c);
29166
29167 /* Get onto the completion Q */
29168 addQ(&h->cmpQ, c);
29169 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29170 unsigned long flags;
29171 __u32 a,a1;
29172
29173 - istat = h->access.intr_pending(h);
29174 + istat = h->access->intr_pending(h);
29175 /* Is this interrupt for us? */
29176 if (istat == 0)
29177 return IRQ_NONE;
29178 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29179 */
29180 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29181 if (istat & FIFO_NOT_EMPTY) {
29182 - while((a = h->access.command_completed(h))) {
29183 + while((a = h->access->command_completed(h))) {
29184 a1 = a; a &= ~3;
29185 if ((c = h->cmpQ) == NULL)
29186 {
29187 @@ -1449,11 +1449,11 @@ static int sendcmd(
29188 /*
29189 * Disable interrupt
29190 */
29191 - info_p->access.set_intr_mask(info_p, 0);
29192 + info_p->access->set_intr_mask(info_p, 0);
29193 /* Make sure there is room in the command FIFO */
29194 /* Actually it should be completely empty at this time. */
29195 for (i = 200000; i > 0; i--) {
29196 - temp = info_p->access.fifo_full(info_p);
29197 + temp = info_p->access->fifo_full(info_p);
29198 if (temp != 0) {
29199 break;
29200 }
29201 @@ -1466,7 +1466,7 @@ DBG(
29202 /*
29203 * Send the cmd
29204 */
29205 - info_p->access.submit_command(info_p, c);
29206 + info_p->access->submit_command(info_p, c);
29207 complete = pollcomplete(ctlr);
29208
29209 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29210 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29211 * we check the new geometry. Then turn interrupts back on when
29212 * we're done.
29213 */
29214 - host->access.set_intr_mask(host, 0);
29215 + host->access->set_intr_mask(host, 0);
29216 getgeometry(ctlr);
29217 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29218 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29219
29220 for(i=0; i<NWD; i++) {
29221 struct gendisk *disk = ida_gendisk[ctlr][i];
29222 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29223 /* Wait (up to 2 seconds) for a command to complete */
29224
29225 for (i = 200000; i > 0; i--) {
29226 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
29227 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
29228 if (done == 0) {
29229 udelay(10); /* a short fixed delay */
29230 } else
29231 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29232 index be73e9d..7fbf140 100644
29233 --- a/drivers/block/cpqarray.h
29234 +++ b/drivers/block/cpqarray.h
29235 @@ -99,7 +99,7 @@ struct ctlr_info {
29236 drv_info_t drv[NWD];
29237 struct proc_dir_entry *proc;
29238
29239 - struct access_method access;
29240 + struct access_method *access;
29241
29242 cmdlist_t *reqQ;
29243 cmdlist_t *cmpQ;
29244 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29245 index 02f013a..afeba24 100644
29246 --- a/drivers/block/drbd/drbd_int.h
29247 +++ b/drivers/block/drbd/drbd_int.h
29248 @@ -735,7 +735,7 @@ struct drbd_request;
29249 struct drbd_epoch {
29250 struct list_head list;
29251 unsigned int barrier_nr;
29252 - atomic_t epoch_size; /* increased on every request added. */
29253 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29254 atomic_t active; /* increased on every req. added, and dec on every finished. */
29255 unsigned long flags;
29256 };
29257 @@ -1110,7 +1110,7 @@ struct drbd_conf {
29258 void *int_dig_in;
29259 void *int_dig_vv;
29260 wait_queue_head_t seq_wait;
29261 - atomic_t packet_seq;
29262 + atomic_unchecked_t packet_seq;
29263 unsigned int peer_seq;
29264 spinlock_t peer_seq_lock;
29265 unsigned int minor;
29266 @@ -1651,30 +1651,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29267
29268 static inline void drbd_tcp_cork(struct socket *sock)
29269 {
29270 - int __user val = 1;
29271 + int val = 1;
29272 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29273 - (char __user *)&val, sizeof(val));
29274 + (char __force_user *)&val, sizeof(val));
29275 }
29276
29277 static inline void drbd_tcp_uncork(struct socket *sock)
29278 {
29279 - int __user val = 0;
29280 + int val = 0;
29281 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29282 - (char __user *)&val, sizeof(val));
29283 + (char __force_user *)&val, sizeof(val));
29284 }
29285
29286 static inline void drbd_tcp_nodelay(struct socket *sock)
29287 {
29288 - int __user val = 1;
29289 + int val = 1;
29290 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29291 - (char __user *)&val, sizeof(val));
29292 + (char __force_user *)&val, sizeof(val));
29293 }
29294
29295 static inline void drbd_tcp_quickack(struct socket *sock)
29296 {
29297 - int __user val = 2;
29298 + int val = 2;
29299 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29300 - (char __user *)&val, sizeof(val));
29301 + (char __force_user *)&val, sizeof(val));
29302 }
29303
29304 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29305 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29306 index 920ede2..cb827ba 100644
29307 --- a/drivers/block/drbd/drbd_main.c
29308 +++ b/drivers/block/drbd/drbd_main.c
29309 @@ -2555,7 +2555,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29310 p.sector = sector;
29311 p.block_id = block_id;
29312 p.blksize = blksize;
29313 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29314 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29315
29316 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29317 return false;
29318 @@ -2853,7 +2853,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29319
29320 p.sector = cpu_to_be64(req->sector);
29321 p.block_id = (unsigned long)req;
29322 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29323 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29324
29325 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29326
29327 @@ -3138,7 +3138,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29328 atomic_set(&mdev->unacked_cnt, 0);
29329 atomic_set(&mdev->local_cnt, 0);
29330 atomic_set(&mdev->net_cnt, 0);
29331 - atomic_set(&mdev->packet_seq, 0);
29332 + atomic_set_unchecked(&mdev->packet_seq, 0);
29333 atomic_set(&mdev->pp_in_use, 0);
29334 atomic_set(&mdev->pp_in_use_by_net, 0);
29335 atomic_set(&mdev->rs_sect_in, 0);
29336 @@ -3220,8 +3220,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29337 mdev->receiver.t_state);
29338
29339 /* no need to lock it, I'm the only thread alive */
29340 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29341 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29342 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29343 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29344 mdev->al_writ_cnt =
29345 mdev->bm_writ_cnt =
29346 mdev->read_cnt =
29347 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29348 index 6d4de6a..7b7ad4b 100644
29349 --- a/drivers/block/drbd/drbd_nl.c
29350 +++ b/drivers/block/drbd/drbd_nl.c
29351 @@ -2387,7 +2387,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29352 module_put(THIS_MODULE);
29353 }
29354
29355 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29356 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29357
29358 static unsigned short *
29359 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29360 @@ -2458,7 +2458,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29361 cn_reply->id.idx = CN_IDX_DRBD;
29362 cn_reply->id.val = CN_VAL_DRBD;
29363
29364 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29365 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29366 cn_reply->ack = 0; /* not used here. */
29367 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29368 (int)((char *)tl - (char *)reply->tag_list);
29369 @@ -2490,7 +2490,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29370 cn_reply->id.idx = CN_IDX_DRBD;
29371 cn_reply->id.val = CN_VAL_DRBD;
29372
29373 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29374 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29375 cn_reply->ack = 0; /* not used here. */
29376 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29377 (int)((char *)tl - (char *)reply->tag_list);
29378 @@ -2568,7 +2568,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29379 cn_reply->id.idx = CN_IDX_DRBD;
29380 cn_reply->id.val = CN_VAL_DRBD;
29381
29382 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29383 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29384 cn_reply->ack = 0; // not used here.
29385 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29386 (int)((char*)tl - (char*)reply->tag_list);
29387 @@ -2607,7 +2607,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29388 cn_reply->id.idx = CN_IDX_DRBD;
29389 cn_reply->id.val = CN_VAL_DRBD;
29390
29391 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29392 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29393 cn_reply->ack = 0; /* not used here. */
29394 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29395 (int)((char *)tl - (char *)reply->tag_list);
29396 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29397 index ea4836e..272d72a 100644
29398 --- a/drivers/block/drbd/drbd_receiver.c
29399 +++ b/drivers/block/drbd/drbd_receiver.c
29400 @@ -893,7 +893,7 @@ retry:
29401 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29402 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29403
29404 - atomic_set(&mdev->packet_seq, 0);
29405 + atomic_set_unchecked(&mdev->packet_seq, 0);
29406 mdev->peer_seq = 0;
29407
29408 if (drbd_send_protocol(mdev) == -1)
29409 @@ -994,7 +994,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29410 do {
29411 next_epoch = NULL;
29412
29413 - epoch_size = atomic_read(&epoch->epoch_size);
29414 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29415
29416 switch (ev & ~EV_CLEANUP) {
29417 case EV_PUT:
29418 @@ -1030,7 +1030,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29419 rv = FE_DESTROYED;
29420 } else {
29421 epoch->flags = 0;
29422 - atomic_set(&epoch->epoch_size, 0);
29423 + atomic_set_unchecked(&epoch->epoch_size, 0);
29424 /* atomic_set(&epoch->active, 0); is already zero */
29425 if (rv == FE_STILL_LIVE)
29426 rv = FE_RECYCLED;
29427 @@ -1205,14 +1205,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29428 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29429 drbd_flush(mdev);
29430
29431 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29432 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29433 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29434 if (epoch)
29435 break;
29436 }
29437
29438 epoch = mdev->current_epoch;
29439 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29440 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29441
29442 D_ASSERT(atomic_read(&epoch->active) == 0);
29443 D_ASSERT(epoch->flags == 0);
29444 @@ -1224,11 +1224,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29445 }
29446
29447 epoch->flags = 0;
29448 - atomic_set(&epoch->epoch_size, 0);
29449 + atomic_set_unchecked(&epoch->epoch_size, 0);
29450 atomic_set(&epoch->active, 0);
29451
29452 spin_lock(&mdev->epoch_lock);
29453 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29454 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29455 list_add(&epoch->list, &mdev->current_epoch->list);
29456 mdev->current_epoch = epoch;
29457 mdev->epochs++;
29458 @@ -1695,7 +1695,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29459 spin_unlock(&mdev->peer_seq_lock);
29460
29461 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29462 - atomic_inc(&mdev->current_epoch->epoch_size);
29463 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29464 return drbd_drain_block(mdev, data_size);
29465 }
29466
29467 @@ -1721,7 +1721,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29468
29469 spin_lock(&mdev->epoch_lock);
29470 e->epoch = mdev->current_epoch;
29471 - atomic_inc(&e->epoch->epoch_size);
29472 + atomic_inc_unchecked(&e->epoch->epoch_size);
29473 atomic_inc(&e->epoch->active);
29474 spin_unlock(&mdev->epoch_lock);
29475
29476 @@ -3936,7 +3936,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29477 D_ASSERT(list_empty(&mdev->done_ee));
29478
29479 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29480 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29481 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29482 D_ASSERT(list_empty(&mdev->current_epoch->list));
29483 }
29484
29485 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29486 index 3bba655..6151b66 100644
29487 --- a/drivers/block/loop.c
29488 +++ b/drivers/block/loop.c
29489 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29490 mm_segment_t old_fs = get_fs();
29491
29492 set_fs(get_ds());
29493 - bw = file->f_op->write(file, buf, len, &pos);
29494 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29495 set_fs(old_fs);
29496 if (likely(bw == len))
29497 return 0;
29498 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29499 index ea6f632..eafb192 100644
29500 --- a/drivers/char/Kconfig
29501 +++ b/drivers/char/Kconfig
29502 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29503
29504 config DEVKMEM
29505 bool "/dev/kmem virtual device support"
29506 - default y
29507 + default n
29508 + depends on !GRKERNSEC_KMEM
29509 help
29510 Say Y here if you want to support the /dev/kmem device. The
29511 /dev/kmem device is rarely used, but can be used for certain
29512 @@ -581,6 +582,7 @@ config DEVPORT
29513 bool
29514 depends on !M68K
29515 depends on ISA || PCI
29516 + depends on !GRKERNSEC_KMEM
29517 default y
29518
29519 source "drivers/s390/char/Kconfig"
29520 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29521 index 2e04433..22afc64 100644
29522 --- a/drivers/char/agp/frontend.c
29523 +++ b/drivers/char/agp/frontend.c
29524 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29525 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29526 return -EFAULT;
29527
29528 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29529 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29530 return -EFAULT;
29531
29532 client = agp_find_client_by_pid(reserve.pid);
29533 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29534 index 21cb980..f15107c 100644
29535 --- a/drivers/char/genrtc.c
29536 +++ b/drivers/char/genrtc.c
29537 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29538 switch (cmd) {
29539
29540 case RTC_PLL_GET:
29541 + memset(&pll, 0, sizeof(pll));
29542 if (get_rtc_pll(&pll))
29543 return -EINVAL;
29544 else
29545 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29546 index dfd7876..c0b0885 100644
29547 --- a/drivers/char/hpet.c
29548 +++ b/drivers/char/hpet.c
29549 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29550 }
29551
29552 static int
29553 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29554 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29555 struct hpet_info *info)
29556 {
29557 struct hpet_timer __iomem *timer;
29558 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29559 index 2c29942..604c5ba 100644
29560 --- a/drivers/char/ipmi/ipmi_msghandler.c
29561 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29562 @@ -420,7 +420,7 @@ struct ipmi_smi {
29563 struct proc_dir_entry *proc_dir;
29564 char proc_dir_name[10];
29565
29566 - atomic_t stats[IPMI_NUM_STATS];
29567 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29568
29569 /*
29570 * run_to_completion duplicate of smb_info, smi_info
29571 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29572
29573
29574 #define ipmi_inc_stat(intf, stat) \
29575 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29576 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29577 #define ipmi_get_stat(intf, stat) \
29578 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29579 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29580
29581 static int is_lan_addr(struct ipmi_addr *addr)
29582 {
29583 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29584 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29585 init_waitqueue_head(&intf->waitq);
29586 for (i = 0; i < IPMI_NUM_STATS; i++)
29587 - atomic_set(&intf->stats[i], 0);
29588 + atomic_set_unchecked(&intf->stats[i], 0);
29589
29590 intf->proc_dir = NULL;
29591
29592 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29593 index 1e638ff..a869ef5 100644
29594 --- a/drivers/char/ipmi/ipmi_si_intf.c
29595 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29596 @@ -275,7 +275,7 @@ struct smi_info {
29597 unsigned char slave_addr;
29598
29599 /* Counters and things for the proc filesystem. */
29600 - atomic_t stats[SI_NUM_STATS];
29601 + atomic_unchecked_t stats[SI_NUM_STATS];
29602
29603 struct task_struct *thread;
29604
29605 @@ -284,9 +284,9 @@ struct smi_info {
29606 };
29607
29608 #define smi_inc_stat(smi, stat) \
29609 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29610 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29611 #define smi_get_stat(smi, stat) \
29612 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29613 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29614
29615 #define SI_MAX_PARMS 4
29616
29617 @@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29618 atomic_set(&new_smi->req_events, 0);
29619 new_smi->run_to_completion = 0;
29620 for (i = 0; i < SI_NUM_STATS; i++)
29621 - atomic_set(&new_smi->stats[i], 0);
29622 + atomic_set_unchecked(&new_smi->stats[i], 0);
29623
29624 new_smi->interrupt_disabled = 1;
29625 atomic_set(&new_smi->stop_operation, 0);
29626 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29627 index 47ff7e4..0c7d340 100644
29628 --- a/drivers/char/mbcs.c
29629 +++ b/drivers/char/mbcs.c
29630 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29631 return 0;
29632 }
29633
29634 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29635 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29636 {
29637 .part_num = MBCS_PART_NUM,
29638 .mfg_num = MBCS_MFG_NUM,
29639 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29640 index 67c3371..ba8429d 100644
29641 --- a/drivers/char/mem.c
29642 +++ b/drivers/char/mem.c
29643 @@ -18,6 +18,7 @@
29644 #include <linux/raw.h>
29645 #include <linux/tty.h>
29646 #include <linux/capability.h>
29647 +#include <linux/security.h>
29648 #include <linux/ptrace.h>
29649 #include <linux/device.h>
29650 #include <linux/highmem.h>
29651 @@ -35,6 +36,10 @@
29652 # include <linux/efi.h>
29653 #endif
29654
29655 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29656 +extern const struct file_operations grsec_fops;
29657 +#endif
29658 +
29659 static inline unsigned long size_inside_page(unsigned long start,
29660 unsigned long size)
29661 {
29662 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29663
29664 while (cursor < to) {
29665 if (!devmem_is_allowed(pfn)) {
29666 +#ifdef CONFIG_GRKERNSEC_KMEM
29667 + gr_handle_mem_readwrite(from, to);
29668 +#else
29669 printk(KERN_INFO
29670 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29671 current->comm, from, to);
29672 +#endif
29673 return 0;
29674 }
29675 cursor += PAGE_SIZE;
29676 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29677 }
29678 return 1;
29679 }
29680 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29681 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29682 +{
29683 + return 0;
29684 +}
29685 #else
29686 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29687 {
29688 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29689
29690 while (count > 0) {
29691 unsigned long remaining;
29692 + char *temp;
29693
29694 sz = size_inside_page(p, count);
29695
29696 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29697 if (!ptr)
29698 return -EFAULT;
29699
29700 - remaining = copy_to_user(buf, ptr, sz);
29701 +#ifdef CONFIG_PAX_USERCOPY
29702 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29703 + if (!temp) {
29704 + unxlate_dev_mem_ptr(p, ptr);
29705 + return -ENOMEM;
29706 + }
29707 + memcpy(temp, ptr, sz);
29708 +#else
29709 + temp = ptr;
29710 +#endif
29711 +
29712 + remaining = copy_to_user(buf, temp, sz);
29713 +
29714 +#ifdef CONFIG_PAX_USERCOPY
29715 + kfree(temp);
29716 +#endif
29717 +
29718 unxlate_dev_mem_ptr(p, ptr);
29719 if (remaining)
29720 return -EFAULT;
29721 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29722 size_t count, loff_t *ppos)
29723 {
29724 unsigned long p = *ppos;
29725 - ssize_t low_count, read, sz;
29726 + ssize_t low_count, read, sz, err = 0;
29727 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29728 - int err = 0;
29729
29730 read = 0;
29731 if (p < (unsigned long) high_memory) {
29732 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29733 }
29734 #endif
29735 while (low_count > 0) {
29736 + char *temp;
29737 +
29738 sz = size_inside_page(p, low_count);
29739
29740 /*
29741 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29742 */
29743 kbuf = xlate_dev_kmem_ptr((char *)p);
29744
29745 - if (copy_to_user(buf, kbuf, sz))
29746 +#ifdef CONFIG_PAX_USERCOPY
29747 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29748 + if (!temp)
29749 + return -ENOMEM;
29750 + memcpy(temp, kbuf, sz);
29751 +#else
29752 + temp = kbuf;
29753 +#endif
29754 +
29755 + err = copy_to_user(buf, temp, sz);
29756 +
29757 +#ifdef CONFIG_PAX_USERCOPY
29758 + kfree(temp);
29759 +#endif
29760 +
29761 + if (err)
29762 return -EFAULT;
29763 buf += sz;
29764 p += sz;
29765 @@ -831,6 +878,9 @@ static const struct memdev {
29766 #ifdef CONFIG_CRASH_DUMP
29767 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29768 #endif
29769 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29770 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29771 +#endif
29772 };
29773
29774 static int memory_open(struct inode *inode, struct file *filp)
29775 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29776 index 9df78e2..01ba9ae 100644
29777 --- a/drivers/char/nvram.c
29778 +++ b/drivers/char/nvram.c
29779 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29780
29781 spin_unlock_irq(&rtc_lock);
29782
29783 - if (copy_to_user(buf, contents, tmp - contents))
29784 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29785 return -EFAULT;
29786
29787 *ppos = i;
29788 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
29789 index 0a484b4..f48ccd1 100644
29790 --- a/drivers/char/pcmcia/synclink_cs.c
29791 +++ b/drivers/char/pcmcia/synclink_cs.c
29792 @@ -2340,9 +2340,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
29793
29794 if (debug_level >= DEBUG_LEVEL_INFO)
29795 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
29796 - __FILE__,__LINE__, info->device_name, port->count);
29797 + __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
29798
29799 - WARN_ON(!port->count);
29800 + WARN_ON(!atomic_read(&port->count));
29801
29802 if (tty_port_close_start(port, tty, filp) == 0)
29803 goto cleanup;
29804 @@ -2360,7 +2360,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
29805 cleanup:
29806 if (debug_level >= DEBUG_LEVEL_INFO)
29807 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
29808 - tty->driver->name, port->count);
29809 + tty->driver->name, atomic_read(&port->count));
29810 }
29811
29812 /* Wait until the transmitter is empty.
29813 @@ -2502,7 +2502,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
29814
29815 if (debug_level >= DEBUG_LEVEL_INFO)
29816 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
29817 - __FILE__,__LINE__,tty->driver->name, port->count);
29818 + __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
29819
29820 /* If port is closing, signal caller to try again */
29821 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
29822 @@ -2522,11 +2522,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
29823 goto cleanup;
29824 }
29825 spin_lock(&port->lock);
29826 - port->count++;
29827 + atomic_inc(&port->count);
29828 spin_unlock(&port->lock);
29829 spin_unlock_irqrestore(&info->netlock, flags);
29830
29831 - if (port->count == 1) {
29832 + if (atomic_read(&port->count) == 1) {
29833 /* 1st open on this device, init hardware */
29834 retval = startup(info, tty);
29835 if (retval < 0)
29836 @@ -3891,7 +3891,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
29837 unsigned short new_crctype;
29838
29839 /* return error if TTY interface open */
29840 - if (info->port.count)
29841 + if (atomic_read(&info->port.count))
29842 return -EBUSY;
29843
29844 switch (encoding)
29845 @@ -3994,7 +3994,7 @@ static int hdlcdev_open(struct net_device *dev)
29846
29847 /* arbitrate between network and tty opens */
29848 spin_lock_irqsave(&info->netlock, flags);
29849 - if (info->port.count != 0 || info->netcount != 0) {
29850 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
29851 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
29852 spin_unlock_irqrestore(&info->netlock, flags);
29853 return -EBUSY;
29854 @@ -4083,7 +4083,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
29855 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
29856
29857 /* return error if TTY interface open */
29858 - if (info->port.count)
29859 + if (atomic_read(&info->port.count))
29860 return -EBUSY;
29861
29862 if (cmd != SIOCWANDEV)
29863 diff --git a/drivers/char/random.c b/drivers/char/random.c
29864 index d98b2a6..f0ceb97 100644
29865 --- a/drivers/char/random.c
29866 +++ b/drivers/char/random.c
29867 @@ -272,8 +272,13 @@
29868 /*
29869 * Configuration information
29870 */
29871 +#ifdef CONFIG_GRKERNSEC_RANDNET
29872 +#define INPUT_POOL_WORDS 512
29873 +#define OUTPUT_POOL_WORDS 128
29874 +#else
29875 #define INPUT_POOL_WORDS 128
29876 #define OUTPUT_POOL_WORDS 32
29877 +#endif
29878 #define SEC_XFER_SIZE 512
29879 #define EXTRACT_SIZE 10
29880
29881 @@ -313,10 +318,17 @@ static struct poolinfo {
29882 int poolwords;
29883 int tap1, tap2, tap3, tap4, tap5;
29884 } poolinfo_table[] = {
29885 +#ifdef CONFIG_GRKERNSEC_RANDNET
29886 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29887 + { 512, 411, 308, 208, 104, 1 },
29888 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29889 + { 128, 103, 76, 51, 25, 1 },
29890 +#else
29891 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29892 { 128, 103, 76, 51, 25, 1 },
29893 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29894 { 32, 26, 20, 14, 7, 1 },
29895 +#endif
29896 #if 0
29897 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29898 { 2048, 1638, 1231, 819, 411, 1 },
29899 @@ -527,8 +539,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
29900 input_rotate += i ? 7 : 14;
29901 }
29902
29903 - ACCESS_ONCE(r->input_rotate) = input_rotate;
29904 - ACCESS_ONCE(r->add_ptr) = i;
29905 + ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
29906 + ACCESS_ONCE_RW(r->add_ptr) = i;
29907 smp_wmb();
29908
29909 if (out)
29910 @@ -799,6 +811,17 @@ void add_disk_randomness(struct gendisk *disk)
29911 }
29912 #endif
29913
29914 +#ifdef CONFIG_PAX_LATENT_ENTROPY
29915 +u64 latent_entropy;
29916 +
29917 +__init void transfer_latent_entropy(void)
29918 +{
29919 + mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy), NULL);
29920 + mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy), NULL);
29921 +// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
29922 +}
29923 +#endif
29924 +
29925 /*********************************************************************
29926 *
29927 * Entropy extraction routines
29928 @@ -1008,7 +1031,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29929
29930 extract_buf(r, tmp);
29931 i = min_t(int, nbytes, EXTRACT_SIZE);
29932 - if (copy_to_user(buf, tmp, i)) {
29933 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29934 ret = -EFAULT;
29935 break;
29936 }
29937 @@ -1342,7 +1365,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29938 #include <linux/sysctl.h>
29939
29940 static int min_read_thresh = 8, min_write_thresh;
29941 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29942 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29943 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29944 static char sysctl_bootid[16];
29945
29946 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29947 index 45713f0..8286d21 100644
29948 --- a/drivers/char/sonypi.c
29949 +++ b/drivers/char/sonypi.c
29950 @@ -54,6 +54,7 @@
29951
29952 #include <asm/uaccess.h>
29953 #include <asm/io.h>
29954 +#include <asm/local.h>
29955
29956 #include <linux/sonypi.h>
29957
29958 @@ -490,7 +491,7 @@ static struct sonypi_device {
29959 spinlock_t fifo_lock;
29960 wait_queue_head_t fifo_proc_list;
29961 struct fasync_struct *fifo_async;
29962 - int open_count;
29963 + local_t open_count;
29964 int model;
29965 struct input_dev *input_jog_dev;
29966 struct input_dev *input_key_dev;
29967 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29968 static int sonypi_misc_release(struct inode *inode, struct file *file)
29969 {
29970 mutex_lock(&sonypi_device.lock);
29971 - sonypi_device.open_count--;
29972 + local_dec(&sonypi_device.open_count);
29973 mutex_unlock(&sonypi_device.lock);
29974 return 0;
29975 }
29976 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29977 {
29978 mutex_lock(&sonypi_device.lock);
29979 /* Flush input queue on first open */
29980 - if (!sonypi_device.open_count)
29981 + if (!local_read(&sonypi_device.open_count))
29982 kfifo_reset(&sonypi_device.fifo);
29983 - sonypi_device.open_count++;
29984 + local_inc(&sonypi_device.open_count);
29985 mutex_unlock(&sonypi_device.lock);
29986
29987 return 0;
29988 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29989 index 08427ab..1ab10b7 100644
29990 --- a/drivers/char/tpm/tpm.c
29991 +++ b/drivers/char/tpm/tpm.c
29992 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29993 chip->vendor.req_complete_val)
29994 goto out_recv;
29995
29996 - if ((status == chip->vendor.req_canceled)) {
29997 + if (status == chip->vendor.req_canceled) {
29998 dev_err(chip->dev, "Operation Canceled\n");
29999 rc = -ECANCELED;
30000 goto out;
30001 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
30002 index 0636520..169c1d0 100644
30003 --- a/drivers/char/tpm/tpm_bios.c
30004 +++ b/drivers/char/tpm/tpm_bios.c
30005 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
30006 event = addr;
30007
30008 if ((event->event_type == 0 && event->event_size == 0) ||
30009 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
30010 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
30011 return NULL;
30012
30013 return addr;
30014 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
30015 return NULL;
30016
30017 if ((event->event_type == 0 && event->event_size == 0) ||
30018 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
30019 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
30020 return NULL;
30021
30022 (*pos)++;
30023 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
30024 int i;
30025
30026 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
30027 - seq_putc(m, data[i]);
30028 + if (!seq_putc(m, data[i]))
30029 + return -EFAULT;
30030
30031 return 0;
30032 }
30033 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
30034 log->bios_event_log_end = log->bios_event_log + len;
30035
30036 virt = acpi_os_map_memory(start, len);
30037 + if (!virt) {
30038 + kfree(log->bios_event_log);
30039 + log->bios_event_log = NULL;
30040 + return -EFAULT;
30041 + }
30042
30043 - memcpy(log->bios_event_log, virt, len);
30044 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
30045
30046 acpi_os_unmap_memory(virt, len);
30047 return 0;
30048 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
30049 index cdf2f54..e55c197 100644
30050 --- a/drivers/char/virtio_console.c
30051 +++ b/drivers/char/virtio_console.c
30052 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
30053 if (to_user) {
30054 ssize_t ret;
30055
30056 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
30057 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
30058 if (ret)
30059 return -EFAULT;
30060 } else {
30061 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
30062 if (!port_has_data(port) && !port->host_connected)
30063 return 0;
30064
30065 - return fill_readbuf(port, ubuf, count, true);
30066 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
30067 }
30068
30069 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
30070 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
30071 index 97f5064..202b6e6 100644
30072 --- a/drivers/edac/edac_pci_sysfs.c
30073 +++ b/drivers/edac/edac_pci_sysfs.c
30074 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
30075 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
30076 static int edac_pci_poll_msec = 1000; /* one second workq period */
30077
30078 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
30079 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
30080 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
30081 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
30082
30083 static struct kobject *edac_pci_top_main_kobj;
30084 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
30085 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30086 edac_printk(KERN_CRIT, EDAC_PCI,
30087 "Signaled System Error on %s\n",
30088 pci_name(dev));
30089 - atomic_inc(&pci_nonparity_count);
30090 + atomic_inc_unchecked(&pci_nonparity_count);
30091 }
30092
30093 if (status & (PCI_STATUS_PARITY)) {
30094 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30095 "Master Data Parity Error on %s\n",
30096 pci_name(dev));
30097
30098 - atomic_inc(&pci_parity_count);
30099 + atomic_inc_unchecked(&pci_parity_count);
30100 }
30101
30102 if (status & (PCI_STATUS_DETECTED_PARITY)) {
30103 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30104 "Detected Parity Error on %s\n",
30105 pci_name(dev));
30106
30107 - atomic_inc(&pci_parity_count);
30108 + atomic_inc_unchecked(&pci_parity_count);
30109 }
30110 }
30111
30112 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30113 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
30114 "Signaled System Error on %s\n",
30115 pci_name(dev));
30116 - atomic_inc(&pci_nonparity_count);
30117 + atomic_inc_unchecked(&pci_nonparity_count);
30118 }
30119
30120 if (status & (PCI_STATUS_PARITY)) {
30121 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30122 "Master Data Parity Error on "
30123 "%s\n", pci_name(dev));
30124
30125 - atomic_inc(&pci_parity_count);
30126 + atomic_inc_unchecked(&pci_parity_count);
30127 }
30128
30129 if (status & (PCI_STATUS_DETECTED_PARITY)) {
30130 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30131 "Detected Parity Error on %s\n",
30132 pci_name(dev));
30133
30134 - atomic_inc(&pci_parity_count);
30135 + atomic_inc_unchecked(&pci_parity_count);
30136 }
30137 }
30138 }
30139 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
30140 if (!check_pci_errors)
30141 return;
30142
30143 - before_count = atomic_read(&pci_parity_count);
30144 + before_count = atomic_read_unchecked(&pci_parity_count);
30145
30146 /* scan all PCI devices looking for a Parity Error on devices and
30147 * bridges.
30148 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
30149 /* Only if operator has selected panic on PCI Error */
30150 if (edac_pci_get_panic_on_pe()) {
30151 /* If the count is different 'after' from 'before' */
30152 - if (before_count != atomic_read(&pci_parity_count))
30153 + if (before_count != atomic_read_unchecked(&pci_parity_count))
30154 panic("EDAC: PCI Parity Error");
30155 }
30156 }
30157 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
30158 index 8c87a5e..a19cbd7 100644
30159 --- a/drivers/edac/mce_amd.h
30160 +++ b/drivers/edac/mce_amd.h
30161 @@ -80,7 +80,7 @@ extern const char * const ii_msgs[];
30162 struct amd_decoder_ops {
30163 bool (*dc_mce)(u16, u8);
30164 bool (*ic_mce)(u16, u8);
30165 -};
30166 +} __no_const;
30167
30168 void amd_report_gart_errors(bool);
30169 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
30170 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
30171 index 57ea7f4..789e3c3 100644
30172 --- a/drivers/firewire/core-card.c
30173 +++ b/drivers/firewire/core-card.c
30174 @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
30175
30176 void fw_core_remove_card(struct fw_card *card)
30177 {
30178 - struct fw_card_driver dummy_driver = dummy_driver_template;
30179 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
30180
30181 card->driver->update_phy_reg(card, 4,
30182 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
30183 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
30184 index 2783f69..9f4b0cc 100644
30185 --- a/drivers/firewire/core-cdev.c
30186 +++ b/drivers/firewire/core-cdev.c
30187 @@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
30188 int ret;
30189
30190 if ((request->channels == 0 && request->bandwidth == 0) ||
30191 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
30192 - request->bandwidth < 0)
30193 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
30194 return -EINVAL;
30195
30196 r = kmalloc(sizeof(*r), GFP_KERNEL);
30197 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
30198 index 780708d..ad60a66 100644
30199 --- a/drivers/firewire/core-transaction.c
30200 +++ b/drivers/firewire/core-transaction.c
30201 @@ -37,6 +37,7 @@
30202 #include <linux/timer.h>
30203 #include <linux/types.h>
30204 #include <linux/workqueue.h>
30205 +#include <linux/sched.h>
30206
30207 #include <asm/byteorder.h>
30208
30209 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30210 index 515a42c..5ecf3ba 100644
30211 --- a/drivers/firewire/core.h
30212 +++ b/drivers/firewire/core.h
30213 @@ -111,6 +111,7 @@ struct fw_card_driver {
30214
30215 int (*stop_iso)(struct fw_iso_context *ctx);
30216 };
30217 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30218
30219 void fw_card_initialize(struct fw_card *card,
30220 const struct fw_card_driver *driver, struct device *device);
30221 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30222 index b298158..7ed8432 100644
30223 --- a/drivers/firmware/dmi_scan.c
30224 +++ b/drivers/firmware/dmi_scan.c
30225 @@ -452,11 +452,6 @@ void __init dmi_scan_machine(void)
30226 }
30227 }
30228 else {
30229 - /*
30230 - * no iounmap() for that ioremap(); it would be a no-op, but
30231 - * it's so early in setup that sucker gets confused into doing
30232 - * what it shouldn't if we actually call it.
30233 - */
30234 p = dmi_ioremap(0xF0000, 0x10000);
30235 if (p == NULL)
30236 goto error;
30237 @@ -726,7 +721,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30238 if (buf == NULL)
30239 return -1;
30240
30241 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30242 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30243
30244 iounmap(buf);
30245 return 0;
30246 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30247 index 82d5c20..44a7177 100644
30248 --- a/drivers/gpio/gpio-vr41xx.c
30249 +++ b/drivers/gpio/gpio-vr41xx.c
30250 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30251 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30252 maskl, pendl, maskh, pendh);
30253
30254 - atomic_inc(&irq_err_count);
30255 + atomic_inc_unchecked(&irq_err_count);
30256
30257 return -EINVAL;
30258 }
30259 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30260 index 3252e70..b5314ace 100644
30261 --- a/drivers/gpu/drm/drm_crtc_helper.c
30262 +++ b/drivers/gpu/drm/drm_crtc_helper.c
30263 @@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30264 struct drm_crtc *tmp;
30265 int crtc_mask = 1;
30266
30267 - WARN(!crtc, "checking null crtc?\n");
30268 + BUG_ON(!crtc);
30269
30270 dev = crtc->dev;
30271
30272 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30273 index 8a9d079..606cdd5 100644
30274 --- a/drivers/gpu/drm/drm_drv.c
30275 +++ b/drivers/gpu/drm/drm_drv.c
30276 @@ -318,7 +318,7 @@ module_exit(drm_core_exit);
30277 /**
30278 * Copy and IOCTL return string to user space
30279 */
30280 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30281 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30282 {
30283 int len;
30284
30285 @@ -401,7 +401,7 @@ long drm_ioctl(struct file *filp,
30286 return -ENODEV;
30287
30288 atomic_inc(&dev->ioctl_count);
30289 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30290 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30291 ++file_priv->ioctl_count;
30292
30293 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30294 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30295 index 123de28..43a0897 100644
30296 --- a/drivers/gpu/drm/drm_fops.c
30297 +++ b/drivers/gpu/drm/drm_fops.c
30298 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30299 }
30300
30301 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30302 - atomic_set(&dev->counts[i], 0);
30303 + atomic_set_unchecked(&dev->counts[i], 0);
30304
30305 dev->sigdata.lock = NULL;
30306
30307 @@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
30308
30309 retcode = drm_open_helper(inode, filp, dev);
30310 if (!retcode) {
30311 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30312 - if (!dev->open_count++)
30313 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30314 + if (local_inc_return(&dev->open_count) == 1)
30315 retcode = drm_setup(dev);
30316 }
30317 if (!retcode) {
30318 @@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
30319
30320 mutex_lock(&drm_global_mutex);
30321
30322 - DRM_DEBUG("open_count = %d\n", dev->open_count);
30323 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30324
30325 if (dev->driver->preclose)
30326 dev->driver->preclose(dev, file_priv);
30327 @@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30328 * Begin inline drm_release
30329 */
30330
30331 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30332 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30333 task_pid_nr(current),
30334 (long)old_encode_dev(file_priv->minor->device),
30335 - dev->open_count);
30336 + local_read(&dev->open_count));
30337
30338 /* Release any auth tokens that might point to this file_priv,
30339 (do that under the drm_global_mutex) */
30340 @@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30341 * End inline drm_release
30342 */
30343
30344 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30345 - if (!--dev->open_count) {
30346 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30347 + if (local_dec_and_test(&dev->open_count)) {
30348 if (atomic_read(&dev->ioctl_count)) {
30349 DRM_ERROR("Device busy: %d\n",
30350 atomic_read(&dev->ioctl_count));
30351 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30352 index c87dc96..326055d 100644
30353 --- a/drivers/gpu/drm/drm_global.c
30354 +++ b/drivers/gpu/drm/drm_global.c
30355 @@ -36,7 +36,7 @@
30356 struct drm_global_item {
30357 struct mutex mutex;
30358 void *object;
30359 - int refcount;
30360 + atomic_t refcount;
30361 };
30362
30363 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30364 @@ -49,7 +49,7 @@ void drm_global_init(void)
30365 struct drm_global_item *item = &glob[i];
30366 mutex_init(&item->mutex);
30367 item->object = NULL;
30368 - item->refcount = 0;
30369 + atomic_set(&item->refcount, 0);
30370 }
30371 }
30372
30373 @@ -59,7 +59,7 @@ void drm_global_release(void)
30374 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30375 struct drm_global_item *item = &glob[i];
30376 BUG_ON(item->object != NULL);
30377 - BUG_ON(item->refcount != 0);
30378 + BUG_ON(atomic_read(&item->refcount) != 0);
30379 }
30380 }
30381
30382 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30383 void *object;
30384
30385 mutex_lock(&item->mutex);
30386 - if (item->refcount == 0) {
30387 + if (atomic_read(&item->refcount) == 0) {
30388 item->object = kzalloc(ref->size, GFP_KERNEL);
30389 if (unlikely(item->object == NULL)) {
30390 ret = -ENOMEM;
30391 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30392 goto out_err;
30393
30394 }
30395 - ++item->refcount;
30396 + atomic_inc(&item->refcount);
30397 ref->object = item->object;
30398 object = item->object;
30399 mutex_unlock(&item->mutex);
30400 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30401 struct drm_global_item *item = &glob[ref->global_type];
30402
30403 mutex_lock(&item->mutex);
30404 - BUG_ON(item->refcount == 0);
30405 + BUG_ON(atomic_read(&item->refcount) == 0);
30406 BUG_ON(ref->object != item->object);
30407 - if (--item->refcount == 0) {
30408 + if (atomic_dec_and_test(&item->refcount)) {
30409 ref->release(ref);
30410 item->object = NULL;
30411 }
30412 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30413 index ab1162d..42587b2 100644
30414 --- a/drivers/gpu/drm/drm_info.c
30415 +++ b/drivers/gpu/drm/drm_info.c
30416 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30417 struct drm_local_map *map;
30418 struct drm_map_list *r_list;
30419
30420 - /* Hardcoded from _DRM_FRAME_BUFFER,
30421 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30422 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30423 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30424 + static const char * const types[] = {
30425 + [_DRM_FRAME_BUFFER] = "FB",
30426 + [_DRM_REGISTERS] = "REG",
30427 + [_DRM_SHM] = "SHM",
30428 + [_DRM_AGP] = "AGP",
30429 + [_DRM_SCATTER_GATHER] = "SG",
30430 + [_DRM_CONSISTENT] = "PCI",
30431 + [_DRM_GEM] = "GEM" };
30432 const char *type;
30433 int i;
30434
30435 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30436 map = r_list->map;
30437 if (!map)
30438 continue;
30439 - if (map->type < 0 || map->type > 5)
30440 + if (map->type >= ARRAY_SIZE(types))
30441 type = "??";
30442 else
30443 type = types[map->type];
30444 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30445 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30446 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30447 vma->vm_flags & VM_IO ? 'i' : '-',
30448 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30449 + 0);
30450 +#else
30451 vma->vm_pgoff);
30452 +#endif
30453
30454 #if defined(__i386__)
30455 pgprot = pgprot_val(vma->vm_page_prot);
30456 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30457 index 637fcc3..e890b33 100644
30458 --- a/drivers/gpu/drm/drm_ioc32.c
30459 +++ b/drivers/gpu/drm/drm_ioc32.c
30460 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30461 request = compat_alloc_user_space(nbytes);
30462 if (!access_ok(VERIFY_WRITE, request, nbytes))
30463 return -EFAULT;
30464 - list = (struct drm_buf_desc *) (request + 1);
30465 + list = (struct drm_buf_desc __user *) (request + 1);
30466
30467 if (__put_user(count, &request->count)
30468 || __put_user(list, &request->list))
30469 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30470 request = compat_alloc_user_space(nbytes);
30471 if (!access_ok(VERIFY_WRITE, request, nbytes))
30472 return -EFAULT;
30473 - list = (struct drm_buf_pub *) (request + 1);
30474 + list = (struct drm_buf_pub __user *) (request + 1);
30475
30476 if (__put_user(count, &request->count)
30477 || __put_user(list, &request->list))
30478 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30479 index 64a62c6..ceab35e 100644
30480 --- a/drivers/gpu/drm/drm_ioctl.c
30481 +++ b/drivers/gpu/drm/drm_ioctl.c
30482 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30483 stats->data[i].value =
30484 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30485 else
30486 - stats->data[i].value = atomic_read(&dev->counts[i]);
30487 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30488 stats->data[i].type = dev->types[i];
30489 }
30490
30491 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30492 index 5211520..c744d85 100644
30493 --- a/drivers/gpu/drm/drm_lock.c
30494 +++ b/drivers/gpu/drm/drm_lock.c
30495 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30496 if (drm_lock_take(&master->lock, lock->context)) {
30497 master->lock.file_priv = file_priv;
30498 master->lock.lock_time = jiffies;
30499 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30500 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30501 break; /* Got lock */
30502 }
30503
30504 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30505 return -EINVAL;
30506 }
30507
30508 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30509 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30510
30511 if (drm_lock_free(&master->lock, lock->context)) {
30512 /* FIXME: Should really bail out here. */
30513 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30514 index 21bcd4a..8e074e0 100644
30515 --- a/drivers/gpu/drm/drm_stub.c
30516 +++ b/drivers/gpu/drm/drm_stub.c
30517 @@ -511,7 +511,7 @@ void drm_unplug_dev(struct drm_device *dev)
30518
30519 drm_device_set_unplugged(dev);
30520
30521 - if (dev->open_count == 0) {
30522 + if (local_read(&dev->open_count) == 0) {
30523 drm_put_dev(dev);
30524 }
30525 mutex_unlock(&drm_global_mutex);
30526 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30527 index fa94391..ed26ec8 100644
30528 --- a/drivers/gpu/drm/i810/i810_dma.c
30529 +++ b/drivers/gpu/drm/i810/i810_dma.c
30530 @@ -943,8 +943,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30531 dma->buflist[vertex->idx],
30532 vertex->discard, vertex->used);
30533
30534 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30535 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30536 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30537 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30538 sarea_priv->last_enqueue = dev_priv->counter - 1;
30539 sarea_priv->last_dispatch = (int)hw_status[5];
30540
30541 @@ -1104,8 +1104,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30542 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30543 mc->last_render);
30544
30545 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30546 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30547 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30548 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30549 sarea_priv->last_enqueue = dev_priv->counter - 1;
30550 sarea_priv->last_dispatch = (int)hw_status[5];
30551
30552 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30553 index c9339f4..f5e1b9d 100644
30554 --- a/drivers/gpu/drm/i810/i810_drv.h
30555 +++ b/drivers/gpu/drm/i810/i810_drv.h
30556 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30557 int page_flipping;
30558
30559 wait_queue_head_t irq_queue;
30560 - atomic_t irq_received;
30561 - atomic_t irq_emitted;
30562 + atomic_unchecked_t irq_received;
30563 + atomic_unchecked_t irq_emitted;
30564
30565 int front_offset;
30566 } drm_i810_private_t;
30567 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30568 index 5363e9c..59360d1 100644
30569 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30570 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30571 @@ -518,7 +518,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30572 I915_READ(GTIMR));
30573 }
30574 seq_printf(m, "Interrupts received: %d\n",
30575 - atomic_read(&dev_priv->irq_received));
30576 + atomic_read_unchecked(&dev_priv->irq_received));
30577 for (i = 0; i < I915_NUM_RINGS; i++) {
30578 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30579 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30580 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30581 index 36822b9..b725e1b 100644
30582 --- a/drivers/gpu/drm/i915/i915_dma.c
30583 +++ b/drivers/gpu/drm/i915/i915_dma.c
30584 @@ -1266,7 +1266,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30585 bool can_switch;
30586
30587 spin_lock(&dev->count_lock);
30588 - can_switch = (dev->open_count == 0);
30589 + can_switch = (local_read(&dev->open_count) == 0);
30590 spin_unlock(&dev->count_lock);
30591 return can_switch;
30592 }
30593 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30594 index b0b676a..d107105 100644
30595 --- a/drivers/gpu/drm/i915/i915_drv.h
30596 +++ b/drivers/gpu/drm/i915/i915_drv.h
30597 @@ -268,7 +268,7 @@ struct drm_i915_display_funcs {
30598 /* render clock increase/decrease */
30599 /* display clock increase/decrease */
30600 /* pll clock increase/decrease */
30601 -};
30602 +} __no_const;
30603
30604 struct intel_device_info {
30605 u8 gen;
30606 @@ -386,7 +386,7 @@ typedef struct drm_i915_private {
30607 int current_page;
30608 int page_flipping;
30609
30610 - atomic_t irq_received;
30611 + atomic_unchecked_t irq_received;
30612
30613 /* protects the irq masks */
30614 spinlock_t irq_lock;
30615 @@ -985,7 +985,7 @@ struct drm_i915_gem_object {
30616 * will be page flipped away on the next vblank. When it
30617 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30618 */
30619 - atomic_t pending_flip;
30620 + atomic_unchecked_t pending_flip;
30621 };
30622
30623 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30624 @@ -1434,7 +1434,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
30625 struct drm_i915_private *dev_priv, unsigned port);
30626 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30627 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30628 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30629 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30630 {
30631 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30632 }
30633 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30634 index 974a9f1..b3ebd45 100644
30635 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30636 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30637 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30638 i915_gem_clflush_object(obj);
30639
30640 if (obj->base.pending_write_domain)
30641 - cd->flips |= atomic_read(&obj->pending_flip);
30642 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30643
30644 /* The actual obj->write_domain will be updated with
30645 * pending_write_domain after we emit the accumulated flush for all
30646 @@ -916,9 +916,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30647
30648 static int
30649 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30650 - int count)
30651 + unsigned int count)
30652 {
30653 - int i;
30654 + unsigned int i;
30655
30656 for (i = 0; i < count; i++) {
30657 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30658 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30659 index acc91b1..f383084 100644
30660 --- a/drivers/gpu/drm/i915/i915_irq.c
30661 +++ b/drivers/gpu/drm/i915/i915_irq.c
30662 @@ -433,7 +433,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
30663 int vblank = 0;
30664 bool blc_event;
30665
30666 - atomic_inc(&dev_priv->irq_received);
30667 + atomic_inc_unchecked(&dev_priv->irq_received);
30668
30669 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
30670 PIPE_VBLANK_INTERRUPT_STATUS;
30671 @@ -586,7 +586,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30672 irqreturn_t ret = IRQ_NONE;
30673 int i;
30674
30675 - atomic_inc(&dev_priv->irq_received);
30676 + atomic_inc_unchecked(&dev_priv->irq_received);
30677
30678 /* disable master interrupt before clearing iir */
30679 de_ier = I915_READ(DEIER);
30680 @@ -661,7 +661,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30681 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30682 u32 hotplug_mask;
30683
30684 - atomic_inc(&dev_priv->irq_received);
30685 + atomic_inc_unchecked(&dev_priv->irq_received);
30686
30687 /* disable master interrupt before clearing iir */
30688 de_ier = I915_READ(DEIER);
30689 @@ -1646,7 +1646,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30690 {
30691 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30692
30693 - atomic_set(&dev_priv->irq_received, 0);
30694 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30695
30696
30697 I915_WRITE(HWSTAM, 0xeffe);
30698 @@ -1673,7 +1673,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
30699 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30700 int pipe;
30701
30702 - atomic_set(&dev_priv->irq_received, 0);
30703 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30704
30705 /* VLV magic */
30706 I915_WRITE(VLV_IMR, 0);
30707 @@ -1969,7 +1969,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
30708 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30709 int pipe;
30710
30711 - atomic_set(&dev_priv->irq_received, 0);
30712 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30713
30714 for_each_pipe(pipe)
30715 I915_WRITE(PIPESTAT(pipe), 0);
30716 @@ -2020,7 +2020,7 @@ static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
30717 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
30718 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
30719
30720 - atomic_inc(&dev_priv->irq_received);
30721 + atomic_inc_unchecked(&dev_priv->irq_received);
30722
30723 iir = I915_READ16(IIR);
30724 if (iir == 0)
30725 @@ -2105,7 +2105,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
30726 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30727 int pipe;
30728
30729 - atomic_set(&dev_priv->irq_received, 0);
30730 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30731
30732 if (I915_HAS_HOTPLUG(dev)) {
30733 I915_WRITE(PORT_HOTPLUG_EN, 0);
30734 @@ -2200,7 +2200,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
30735 };
30736 int pipe, ret = IRQ_NONE;
30737
30738 - atomic_inc(&dev_priv->irq_received);
30739 + atomic_inc_unchecked(&dev_priv->irq_received);
30740
30741 iir = I915_READ(IIR);
30742 do {
30743 @@ -2326,7 +2326,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
30744 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30745 int pipe;
30746
30747 - atomic_set(&dev_priv->irq_received, 0);
30748 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30749
30750 if (I915_HAS_HOTPLUG(dev)) {
30751 I915_WRITE(PORT_HOTPLUG_EN, 0);
30752 @@ -2436,7 +2436,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
30753 int irq_received;
30754 int ret = IRQ_NONE, pipe;
30755
30756 - atomic_inc(&dev_priv->irq_received);
30757 + atomic_inc_unchecked(&dev_priv->irq_received);
30758
30759 iir = I915_READ(IIR);
30760
30761 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30762 index 2f22bea..bc9e330 100644
30763 --- a/drivers/gpu/drm/i915/intel_display.c
30764 +++ b/drivers/gpu/drm/i915/intel_display.c
30765 @@ -2000,7 +2000,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30766
30767 wait_event(dev_priv->pending_flip_queue,
30768 atomic_read(&dev_priv->mm.wedged) ||
30769 - atomic_read(&obj->pending_flip) == 0);
30770 + atomic_read_unchecked(&obj->pending_flip) == 0);
30771
30772 /* Big Hammer, we also need to ensure that any pending
30773 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30774 @@ -5914,9 +5914,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30775
30776 obj = work->old_fb_obj;
30777
30778 - atomic_clear_mask(1 << intel_crtc->plane,
30779 - &obj->pending_flip.counter);
30780 - if (atomic_read(&obj->pending_flip) == 0)
30781 + atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
30782 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30783 wake_up(&dev_priv->pending_flip_queue);
30784
30785 schedule_work(&work->work);
30786 @@ -6253,7 +6252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30787 /* Block clients from rendering to the new back buffer until
30788 * the flip occurs and the object is no longer visible.
30789 */
30790 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30791 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30792
30793 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30794 if (ret)
30795 @@ -6268,7 +6267,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30796 return 0;
30797
30798 cleanup_pending:
30799 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30800 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30801 drm_gem_object_unreference(&work->old_fb_obj->base);
30802 drm_gem_object_unreference(&obj->base);
30803 mutex_unlock(&dev->struct_mutex);
30804 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30805 index 54558a0..2d97005 100644
30806 --- a/drivers/gpu/drm/mga/mga_drv.h
30807 +++ b/drivers/gpu/drm/mga/mga_drv.h
30808 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30809 u32 clear_cmd;
30810 u32 maccess;
30811
30812 - atomic_t vbl_received; /**< Number of vblanks received. */
30813 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30814 wait_queue_head_t fence_queue;
30815 - atomic_t last_fence_retired;
30816 + atomic_unchecked_t last_fence_retired;
30817 u32 next_fence_to_post;
30818
30819 unsigned int fb_cpp;
30820 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30821 index 2581202..f230a8d9 100644
30822 --- a/drivers/gpu/drm/mga/mga_irq.c
30823 +++ b/drivers/gpu/drm/mga/mga_irq.c
30824 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30825 if (crtc != 0)
30826 return 0;
30827
30828 - return atomic_read(&dev_priv->vbl_received);
30829 + return atomic_read_unchecked(&dev_priv->vbl_received);
30830 }
30831
30832
30833 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30834 /* VBLANK interrupt */
30835 if (status & MGA_VLINEPEN) {
30836 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30837 - atomic_inc(&dev_priv->vbl_received);
30838 + atomic_inc_unchecked(&dev_priv->vbl_received);
30839 drm_handle_vblank(dev, 0);
30840 handled = 1;
30841 }
30842 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30843 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30844 MGA_WRITE(MGA_PRIMEND, prim_end);
30845
30846 - atomic_inc(&dev_priv->last_fence_retired);
30847 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30848 DRM_WAKEUP(&dev_priv->fence_queue);
30849 handled = 1;
30850 }
30851 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30852 * using fences.
30853 */
30854 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30855 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30856 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30857 - *sequence) <= (1 << 23)));
30858
30859 *sequence = cur_fence;
30860 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30861 index 2f11e16..191267e 100644
30862 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30863 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30864 @@ -5340,7 +5340,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30865 struct bit_table {
30866 const char id;
30867 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30868 -};
30869 +} __no_const;
30870
30871 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30872
30873 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30874 index b863a3a..c55e0dc 100644
30875 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30876 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30877 @@ -302,7 +302,7 @@ struct nouveau_exec_engine {
30878 u32 handle, u16 class);
30879 void (*set_tile_region)(struct drm_device *dev, int i);
30880 void (*tlb_flush)(struct drm_device *, int engine);
30881 -};
30882 +} __no_const;
30883
30884 struct nouveau_instmem_engine {
30885 void *priv;
30886 @@ -324,13 +324,13 @@ struct nouveau_instmem_engine {
30887 struct nouveau_mc_engine {
30888 int (*init)(struct drm_device *dev);
30889 void (*takedown)(struct drm_device *dev);
30890 -};
30891 +} __no_const;
30892
30893 struct nouveau_timer_engine {
30894 int (*init)(struct drm_device *dev);
30895 void (*takedown)(struct drm_device *dev);
30896 uint64_t (*read)(struct drm_device *dev);
30897 -};
30898 +} __no_const;
30899
30900 struct nouveau_fb_engine {
30901 int num_tiles;
30902 @@ -547,7 +547,7 @@ struct nouveau_vram_engine {
30903 void (*put)(struct drm_device *, struct nouveau_mem **);
30904
30905 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30906 -};
30907 +} __no_const;
30908
30909 struct nouveau_engine {
30910 struct nouveau_instmem_engine instmem;
30911 @@ -693,7 +693,7 @@ struct drm_nouveau_private {
30912 struct drm_global_reference mem_global_ref;
30913 struct ttm_bo_global_ref bo_global_ref;
30914 struct ttm_bo_device bdev;
30915 - atomic_t validate_sequence;
30916 + atomic_unchecked_t validate_sequence;
30917 int (*move)(struct nouveau_channel *,
30918 struct ttm_buffer_object *,
30919 struct ttm_mem_reg *, struct ttm_mem_reg *);
30920 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30921 index 30f5423..abca136 100644
30922 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30923 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30924 @@ -319,7 +319,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30925 int trycnt = 0;
30926 int ret, i;
30927
30928 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30929 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30930 retry:
30931 if (++trycnt > 100000) {
30932 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30933 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30934 index 22a90a0..8ccea014 100644
30935 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30936 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30937 @@ -490,7 +490,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30938 bool can_switch;
30939
30940 spin_lock(&dev->count_lock);
30941 - can_switch = (dev->open_count == 0);
30942 + can_switch = (local_read(&dev->open_count) == 0);
30943 spin_unlock(&dev->count_lock);
30944 return can_switch;
30945 }
30946 diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30947 index a9514ea..369d511 100644
30948 --- a/drivers/gpu/drm/nouveau/nv50_sor.c
30949 +++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30950 @@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30951 }
30952
30953 if (nv_encoder->dcb->type == OUTPUT_DP) {
30954 - struct dp_train_func func = {
30955 + static struct dp_train_func func = {
30956 .link_set = nv50_sor_dp_link_set,
30957 .train_set = nv50_sor_dp_train_set,
30958 .train_adj = nv50_sor_dp_train_adj
30959 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30960 index c50b075..6b07dfc 100644
30961 --- a/drivers/gpu/drm/nouveau/nvd0_display.c
30962 +++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30963 @@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30964 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30965
30966 if (nv_encoder->dcb->type == OUTPUT_DP) {
30967 - struct dp_train_func func = {
30968 + static struct dp_train_func func = {
30969 .link_set = nvd0_sor_dp_link_set,
30970 .train_set = nvd0_sor_dp_train_set,
30971 .train_adj = nvd0_sor_dp_train_adj
30972 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30973 index bcac90b..53bfc76 100644
30974 --- a/drivers/gpu/drm/r128/r128_cce.c
30975 +++ b/drivers/gpu/drm/r128/r128_cce.c
30976 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30977
30978 /* GH: Simple idle check.
30979 */
30980 - atomic_set(&dev_priv->idle_count, 0);
30981 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30982
30983 /* We don't support anything other than bus-mastering ring mode,
30984 * but the ring can be in either AGP or PCI space for the ring
30985 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30986 index 930c71b..499aded 100644
30987 --- a/drivers/gpu/drm/r128/r128_drv.h
30988 +++ b/drivers/gpu/drm/r128/r128_drv.h
30989 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30990 int is_pci;
30991 unsigned long cce_buffers_offset;
30992
30993 - atomic_t idle_count;
30994 + atomic_unchecked_t idle_count;
30995
30996 int page_flipping;
30997 int current_page;
30998 u32 crtc_offset;
30999 u32 crtc_offset_cntl;
31000
31001 - atomic_t vbl_received;
31002 + atomic_unchecked_t vbl_received;
31003
31004 u32 color_fmt;
31005 unsigned int front_offset;
31006 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
31007 index 429d5a0..7e899ed 100644
31008 --- a/drivers/gpu/drm/r128/r128_irq.c
31009 +++ b/drivers/gpu/drm/r128/r128_irq.c
31010 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
31011 if (crtc != 0)
31012 return 0;
31013
31014 - return atomic_read(&dev_priv->vbl_received);
31015 + return atomic_read_unchecked(&dev_priv->vbl_received);
31016 }
31017
31018 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
31019 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
31020 /* VBLANK interrupt */
31021 if (status & R128_CRTC_VBLANK_INT) {
31022 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
31023 - atomic_inc(&dev_priv->vbl_received);
31024 + atomic_inc_unchecked(&dev_priv->vbl_received);
31025 drm_handle_vblank(dev, 0);
31026 return IRQ_HANDLED;
31027 }
31028 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
31029 index a9e33ce..09edd4b 100644
31030 --- a/drivers/gpu/drm/r128/r128_state.c
31031 +++ b/drivers/gpu/drm/r128/r128_state.c
31032 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
31033
31034 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
31035 {
31036 - if (atomic_read(&dev_priv->idle_count) == 0)
31037 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
31038 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
31039 else
31040 - atomic_set(&dev_priv->idle_count, 0);
31041 + atomic_set_unchecked(&dev_priv->idle_count, 0);
31042 }
31043
31044 #endif
31045 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
31046 index 5a82b6b..9e69c73 100644
31047 --- a/drivers/gpu/drm/radeon/mkregtable.c
31048 +++ b/drivers/gpu/drm/radeon/mkregtable.c
31049 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
31050 regex_t mask_rex;
31051 regmatch_t match[4];
31052 char buf[1024];
31053 - size_t end;
31054 + long end;
31055 int len;
31056 int done = 0;
31057 int r;
31058 unsigned o;
31059 struct offset *offset;
31060 char last_reg_s[10];
31061 - int last_reg;
31062 + unsigned long last_reg;
31063
31064 if (regcomp
31065 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
31066 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
31067 index 5c8a0bf..b7a311c 100644
31068 --- a/drivers/gpu/drm/radeon/radeon.h
31069 +++ b/drivers/gpu/drm/radeon/radeon.h
31070 @@ -729,7 +729,7 @@ struct r600_blit_cp_primitives {
31071 int x2, int y2);
31072 void (*draw_auto)(struct radeon_device *rdev);
31073 void (*set_default_state)(struct radeon_device *rdev);
31074 -};
31075 +} __no_const;
31076
31077 struct r600_blit {
31078 struct radeon_bo *shader_obj;
31079 @@ -1230,7 +1230,7 @@ struct radeon_asic {
31080 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
31081 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
31082 } pflip;
31083 -};
31084 +} __no_const;
31085
31086 /*
31087 * Asic structures
31088 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
31089 index 8867400..72761bc 100644
31090 --- a/drivers/gpu/drm/radeon/radeon_device.c
31091 +++ b/drivers/gpu/drm/radeon/radeon_device.c
31092 @@ -692,7 +692,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
31093 bool can_switch;
31094
31095 spin_lock(&dev->count_lock);
31096 - can_switch = (dev->open_count == 0);
31097 + can_switch = (local_read(&dev->open_count) == 0);
31098 spin_unlock(&dev->count_lock);
31099 return can_switch;
31100 }
31101 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
31102 index a1b59ca..86f2d44 100644
31103 --- a/drivers/gpu/drm/radeon/radeon_drv.h
31104 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
31105 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
31106
31107 /* SW interrupt */
31108 wait_queue_head_t swi_queue;
31109 - atomic_t swi_emitted;
31110 + atomic_unchecked_t swi_emitted;
31111 int vblank_crtc;
31112 uint32_t irq_enable_reg;
31113 uint32_t r500_disp_irq_reg;
31114 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
31115 index 48b7cea..342236f 100644
31116 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
31117 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
31118 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
31119 request = compat_alloc_user_space(sizeof(*request));
31120 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
31121 || __put_user(req32.param, &request->param)
31122 - || __put_user((void __user *)(unsigned long)req32.value,
31123 + || __put_user((unsigned long)req32.value,
31124 &request->value))
31125 return -EFAULT;
31126
31127 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
31128 index 00da384..32f972d 100644
31129 --- a/drivers/gpu/drm/radeon/radeon_irq.c
31130 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
31131 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
31132 unsigned int ret;
31133 RING_LOCALS;
31134
31135 - atomic_inc(&dev_priv->swi_emitted);
31136 - ret = atomic_read(&dev_priv->swi_emitted);
31137 + atomic_inc_unchecked(&dev_priv->swi_emitted);
31138 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
31139
31140 BEGIN_RING(4);
31141 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
31142 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
31143 drm_radeon_private_t *dev_priv =
31144 (drm_radeon_private_t *) dev->dev_private;
31145
31146 - atomic_set(&dev_priv->swi_emitted, 0);
31147 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31148 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31149
31150 dev->max_vblank_count = 0x001fffff;
31151 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31152 index e8422ae..d22d4a8 100644
31153 --- a/drivers/gpu/drm/radeon/radeon_state.c
31154 +++ b/drivers/gpu/drm/radeon/radeon_state.c
31155 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31156 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31157 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31158
31159 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31160 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31161 sarea_priv->nbox * sizeof(depth_boxes[0])))
31162 return -EFAULT;
31163
31164 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31165 {
31166 drm_radeon_private_t *dev_priv = dev->dev_private;
31167 drm_radeon_getparam_t *param = data;
31168 - int value;
31169 + int value = 0;
31170
31171 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31172
31173 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31174 index c94a225..5795d34 100644
31175 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
31176 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31177 @@ -852,8 +852,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31178 }
31179 if (unlikely(ttm_vm_ops == NULL)) {
31180 ttm_vm_ops = vma->vm_ops;
31181 - radeon_ttm_vm_ops = *ttm_vm_ops;
31182 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31183 + pax_open_kernel();
31184 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31185 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31186 + pax_close_kernel();
31187 }
31188 vma->vm_ops = &radeon_ttm_vm_ops;
31189 return 0;
31190 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31191 index 159b6a4..fa82487 100644
31192 --- a/drivers/gpu/drm/radeon/rs690.c
31193 +++ b/drivers/gpu/drm/radeon/rs690.c
31194 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31195 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31196 rdev->pm.sideport_bandwidth.full)
31197 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31198 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31199 + read_delay_latency.full = dfixed_const(800 * 1000);
31200 read_delay_latency.full = dfixed_div(read_delay_latency,
31201 rdev->pm.igp_sideport_mclk);
31202 + a.full = dfixed_const(370);
31203 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31204 } else {
31205 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31206 rdev->pm.k8_bandwidth.full)
31207 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31208 index ebc6fac..a8313ed 100644
31209 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31210 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31211 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
31212 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31213 struct shrink_control *sc)
31214 {
31215 - static atomic_t start_pool = ATOMIC_INIT(0);
31216 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31217 unsigned i;
31218 - unsigned pool_offset = atomic_add_return(1, &start_pool);
31219 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31220 struct ttm_page_pool *pool;
31221 int shrink_pages = sc->nr_to_scan;
31222
31223 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31224 index 88edacc..1e5412b 100644
31225 --- a/drivers/gpu/drm/via/via_drv.h
31226 +++ b/drivers/gpu/drm/via/via_drv.h
31227 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31228 typedef uint32_t maskarray_t[5];
31229
31230 typedef struct drm_via_irq {
31231 - atomic_t irq_received;
31232 + atomic_unchecked_t irq_received;
31233 uint32_t pending_mask;
31234 uint32_t enable_mask;
31235 wait_queue_head_t irq_queue;
31236 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
31237 struct timeval last_vblank;
31238 int last_vblank_valid;
31239 unsigned usec_per_vblank;
31240 - atomic_t vbl_received;
31241 + atomic_unchecked_t vbl_received;
31242 drm_via_state_t hc_state;
31243 char pci_buf[VIA_PCI_BUF_SIZE];
31244 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31245 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31246 index d391f48..10c8ca3 100644
31247 --- a/drivers/gpu/drm/via/via_irq.c
31248 +++ b/drivers/gpu/drm/via/via_irq.c
31249 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31250 if (crtc != 0)
31251 return 0;
31252
31253 - return atomic_read(&dev_priv->vbl_received);
31254 + return atomic_read_unchecked(&dev_priv->vbl_received);
31255 }
31256
31257 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31258 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31259
31260 status = VIA_READ(VIA_REG_INTERRUPT);
31261 if (status & VIA_IRQ_VBLANK_PENDING) {
31262 - atomic_inc(&dev_priv->vbl_received);
31263 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31264 + atomic_inc_unchecked(&dev_priv->vbl_received);
31265 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31266 do_gettimeofday(&cur_vblank);
31267 if (dev_priv->last_vblank_valid) {
31268 dev_priv->usec_per_vblank =
31269 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31270 dev_priv->last_vblank = cur_vblank;
31271 dev_priv->last_vblank_valid = 1;
31272 }
31273 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31274 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31275 DRM_DEBUG("US per vblank is: %u\n",
31276 dev_priv->usec_per_vblank);
31277 }
31278 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31279
31280 for (i = 0; i < dev_priv->num_irqs; ++i) {
31281 if (status & cur_irq->pending_mask) {
31282 - atomic_inc(&cur_irq->irq_received);
31283 + atomic_inc_unchecked(&cur_irq->irq_received);
31284 DRM_WAKEUP(&cur_irq->irq_queue);
31285 handled = 1;
31286 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31287 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31288 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31289 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31290 masks[irq][4]));
31291 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31292 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31293 } else {
31294 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31295 (((cur_irq_sequence =
31296 - atomic_read(&cur_irq->irq_received)) -
31297 + atomic_read_unchecked(&cur_irq->irq_received)) -
31298 *sequence) <= (1 << 23)));
31299 }
31300 *sequence = cur_irq_sequence;
31301 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31302 }
31303
31304 for (i = 0; i < dev_priv->num_irqs; ++i) {
31305 - atomic_set(&cur_irq->irq_received, 0);
31306 + atomic_set_unchecked(&cur_irq->irq_received, 0);
31307 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31308 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31309 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31310 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31311 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31312 case VIA_IRQ_RELATIVE:
31313 irqwait->request.sequence +=
31314 - atomic_read(&cur_irq->irq_received);
31315 + atomic_read_unchecked(&cur_irq->irq_received);
31316 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31317 case VIA_IRQ_ABSOLUTE:
31318 break;
31319 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31320 index 29c984f..4084f1a 100644
31321 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31322 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31323 @@ -263,7 +263,7 @@ struct vmw_private {
31324 * Fencing and IRQs.
31325 */
31326
31327 - atomic_t marker_seq;
31328 + atomic_unchecked_t marker_seq;
31329 wait_queue_head_t fence_queue;
31330 wait_queue_head_t fifo_queue;
31331 int fence_queue_waiters; /* Protected by hw_mutex */
31332 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31333 index a0c2f12..68ae6cb 100644
31334 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31335 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31336 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31337 (unsigned int) min,
31338 (unsigned int) fifo->capabilities);
31339
31340 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31341 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31342 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31343 vmw_marker_queue_init(&fifo->marker_queue);
31344 return vmw_fifo_send_fence(dev_priv, &dummy);
31345 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31346 if (reserveable)
31347 iowrite32(bytes, fifo_mem +
31348 SVGA_FIFO_RESERVED);
31349 - return fifo_mem + (next_cmd >> 2);
31350 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31351 } else {
31352 need_bounce = true;
31353 }
31354 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31355
31356 fm = vmw_fifo_reserve(dev_priv, bytes);
31357 if (unlikely(fm == NULL)) {
31358 - *seqno = atomic_read(&dev_priv->marker_seq);
31359 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31360 ret = -ENOMEM;
31361 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31362 false, 3*HZ);
31363 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31364 }
31365
31366 do {
31367 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31368 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31369 } while (*seqno == 0);
31370
31371 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31372 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31373 index cabc95f..14b3d77 100644
31374 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31375 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31376 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31377 * emitted. Then the fence is stale and signaled.
31378 */
31379
31380 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31381 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31382 > VMW_FENCE_WRAP);
31383
31384 return ret;
31385 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31386
31387 if (fifo_idle)
31388 down_read(&fifo_state->rwsem);
31389 - signal_seq = atomic_read(&dev_priv->marker_seq);
31390 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31391 ret = 0;
31392
31393 for (;;) {
31394 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31395 index 8a8725c..afed796 100644
31396 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31397 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31398 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31399 while (!vmw_lag_lt(queue, us)) {
31400 spin_lock(&queue->lock);
31401 if (list_empty(&queue->head))
31402 - seqno = atomic_read(&dev_priv->marker_seq);
31403 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31404 else {
31405 marker = list_first_entry(&queue->head,
31406 struct vmw_marker, head);
31407 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31408 index 1f6957c..b579481 100644
31409 --- a/drivers/hid/hid-core.c
31410 +++ b/drivers/hid/hid-core.c
31411 @@ -2153,7 +2153,7 @@ static bool hid_ignore(struct hid_device *hdev)
31412
31413 int hid_add_device(struct hid_device *hdev)
31414 {
31415 - static atomic_t id = ATOMIC_INIT(0);
31416 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31417 int ret;
31418
31419 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31420 @@ -2188,7 +2188,7 @@ int hid_add_device(struct hid_device *hdev)
31421 /* XXX hack, any other cleaner solution after the driver core
31422 * is converted to allow more than 20 bytes as the device name? */
31423 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31424 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31425 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31426
31427 hid_debug_register(hdev, dev_name(&hdev->dev));
31428 ret = device_add(&hdev->dev);
31429 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31430 index eec3291..8ed706b 100644
31431 --- a/drivers/hid/hid-wiimote-debug.c
31432 +++ b/drivers/hid/hid-wiimote-debug.c
31433 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31434 else if (size == 0)
31435 return -EIO;
31436
31437 - if (copy_to_user(u, buf, size))
31438 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
31439 return -EFAULT;
31440
31441 *off += size;
31442 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31443 index 14599e2..711c965 100644
31444 --- a/drivers/hid/usbhid/hiddev.c
31445 +++ b/drivers/hid/usbhid/hiddev.c
31446 @@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31447 break;
31448
31449 case HIDIOCAPPLICATION:
31450 - if (arg < 0 || arg >= hid->maxapplication)
31451 + if (arg >= hid->maxapplication)
31452 break;
31453
31454 for (i = 0; i < hid->maxcollection; i++)
31455 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31456 index 4065374..10ed7dc 100644
31457 --- a/drivers/hv/channel.c
31458 +++ b/drivers/hv/channel.c
31459 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31460 int ret = 0;
31461 int t;
31462
31463 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31464 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31465 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31466 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31467
31468 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31469 if (ret)
31470 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31471 index 86f8885..ab9cb2b 100644
31472 --- a/drivers/hv/hv.c
31473 +++ b/drivers/hv/hv.c
31474 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31475 u64 output_address = (output) ? virt_to_phys(output) : 0;
31476 u32 output_address_hi = output_address >> 32;
31477 u32 output_address_lo = output_address & 0xFFFFFFFF;
31478 - void *hypercall_page = hv_context.hypercall_page;
31479 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31480
31481 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31482 "=a"(hv_status_lo) : "d" (control_hi),
31483 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31484 index b9426a6..677ce34 100644
31485 --- a/drivers/hv/hyperv_vmbus.h
31486 +++ b/drivers/hv/hyperv_vmbus.h
31487 @@ -555,7 +555,7 @@ enum vmbus_connect_state {
31488 struct vmbus_connection {
31489 enum vmbus_connect_state conn_state;
31490
31491 - atomic_t next_gpadl_handle;
31492 + atomic_unchecked_t next_gpadl_handle;
31493
31494 /*
31495 * Represents channel interrupts. Each bit position represents a
31496 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31497 index a220e57..428f54d 100644
31498 --- a/drivers/hv/vmbus_drv.c
31499 +++ b/drivers/hv/vmbus_drv.c
31500 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31501 {
31502 int ret = 0;
31503
31504 - static atomic_t device_num = ATOMIC_INIT(0);
31505 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31506
31507 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31508 - atomic_inc_return(&device_num));
31509 + atomic_inc_return_unchecked(&device_num));
31510
31511 child_device_obj->device.bus = &hv_bus;
31512 child_device_obj->device.parent = &hv_acpi_dev->dev;
31513 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31514 index 34ad5a2..e2b0ae8 100644
31515 --- a/drivers/hwmon/acpi_power_meter.c
31516 +++ b/drivers/hwmon/acpi_power_meter.c
31517 @@ -308,8 +308,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31518 return res;
31519
31520 temp /= 1000;
31521 - if (temp < 0)
31522 - return -EINVAL;
31523
31524 mutex_lock(&resource->lock);
31525 resource->trip[attr->index - 7] = temp;
31526 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31527 index 8b011d0..3de24a1 100644
31528 --- a/drivers/hwmon/sht15.c
31529 +++ b/drivers/hwmon/sht15.c
31530 @@ -166,7 +166,7 @@ struct sht15_data {
31531 int supply_uV;
31532 bool supply_uV_valid;
31533 struct work_struct update_supply_work;
31534 - atomic_t interrupt_handled;
31535 + atomic_unchecked_t interrupt_handled;
31536 };
31537
31538 /**
31539 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31540 return ret;
31541
31542 gpio_direction_input(data->pdata->gpio_data);
31543 - atomic_set(&data->interrupt_handled, 0);
31544 + atomic_set_unchecked(&data->interrupt_handled, 0);
31545
31546 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31547 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31548 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31549 /* Only relevant if the interrupt hasn't occurred. */
31550 - if (!atomic_read(&data->interrupt_handled))
31551 + if (!atomic_read_unchecked(&data->interrupt_handled))
31552 schedule_work(&data->read_work);
31553 }
31554 ret = wait_event_timeout(data->wait_queue,
31555 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31556
31557 /* First disable the interrupt */
31558 disable_irq_nosync(irq);
31559 - atomic_inc(&data->interrupt_handled);
31560 + atomic_inc_unchecked(&data->interrupt_handled);
31561 /* Then schedule a reading work struct */
31562 if (data->state != SHT15_READING_NOTHING)
31563 schedule_work(&data->read_work);
31564 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31565 * If not, then start the interrupt again - care here as could
31566 * have gone low in meantime so verify it hasn't!
31567 */
31568 - atomic_set(&data->interrupt_handled, 0);
31569 + atomic_set_unchecked(&data->interrupt_handled, 0);
31570 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31571 /* If still not occurred or another handler was scheduled */
31572 if (gpio_get_value(data->pdata->gpio_data)
31573 - || atomic_read(&data->interrupt_handled))
31574 + || atomic_read_unchecked(&data->interrupt_handled))
31575 return;
31576 }
31577
31578 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31579 index 378fcb5..5e91fa8 100644
31580 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31581 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31582 @@ -43,7 +43,7 @@
31583 extern struct i2c_adapter amd756_smbus;
31584
31585 static struct i2c_adapter *s4882_adapter;
31586 -static struct i2c_algorithm *s4882_algo;
31587 +static i2c_algorithm_no_const *s4882_algo;
31588
31589 /* Wrapper access functions for multiplexed SMBus */
31590 static DEFINE_MUTEX(amd756_lock);
31591 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31592 index 29015eb..af2d8e9 100644
31593 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31594 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31595 @@ -41,7 +41,7 @@
31596 extern struct i2c_adapter *nforce2_smbus;
31597
31598 static struct i2c_adapter *s4985_adapter;
31599 -static struct i2c_algorithm *s4985_algo;
31600 +static i2c_algorithm_no_const *s4985_algo;
31601
31602 /* Wrapper access functions for multiplexed SMBus */
31603 static DEFINE_MUTEX(nforce2_lock);
31604 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31605 index 1038c38..eb92f51 100644
31606 --- a/drivers/i2c/i2c-mux.c
31607 +++ b/drivers/i2c/i2c-mux.c
31608 @@ -30,7 +30,7 @@
31609 /* multiplexer per channel data */
31610 struct i2c_mux_priv {
31611 struct i2c_adapter adap;
31612 - struct i2c_algorithm algo;
31613 + i2c_algorithm_no_const algo;
31614
31615 struct i2c_adapter *parent;
31616 void *mux_priv; /* the mux chip/device */
31617 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31618 index 57d00ca..0145194 100644
31619 --- a/drivers/ide/aec62xx.c
31620 +++ b/drivers/ide/aec62xx.c
31621 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31622 .cable_detect = atp86x_cable_detect,
31623 };
31624
31625 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31626 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31627 { /* 0: AEC6210 */
31628 .name = DRV_NAME,
31629 .init_chipset = init_chipset_aec62xx,
31630 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31631 index 2c8016a..911a27c 100644
31632 --- a/drivers/ide/alim15x3.c
31633 +++ b/drivers/ide/alim15x3.c
31634 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31635 .dma_sff_read_status = ide_dma_sff_read_status,
31636 };
31637
31638 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31639 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31640 .name = DRV_NAME,
31641 .init_chipset = init_chipset_ali15x3,
31642 .init_hwif = init_hwif_ali15x3,
31643 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31644 index 3747b25..56fc995 100644
31645 --- a/drivers/ide/amd74xx.c
31646 +++ b/drivers/ide/amd74xx.c
31647 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31648 .udma_mask = udma, \
31649 }
31650
31651 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31652 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31653 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31654 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31655 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31656 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31657 index 15f0ead..cb43480 100644
31658 --- a/drivers/ide/atiixp.c
31659 +++ b/drivers/ide/atiixp.c
31660 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31661 .cable_detect = atiixp_cable_detect,
31662 };
31663
31664 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31665 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31666 { /* 0: IXP200/300/400/700 */
31667 .name = DRV_NAME,
31668 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31669 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31670 index 5f80312..d1fc438 100644
31671 --- a/drivers/ide/cmd64x.c
31672 +++ b/drivers/ide/cmd64x.c
31673 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31674 .dma_sff_read_status = ide_dma_sff_read_status,
31675 };
31676
31677 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31678 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31679 { /* 0: CMD643 */
31680 .name = DRV_NAME,
31681 .init_chipset = init_chipset_cmd64x,
31682 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31683 index 2c1e5f7..1444762 100644
31684 --- a/drivers/ide/cs5520.c
31685 +++ b/drivers/ide/cs5520.c
31686 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31687 .set_dma_mode = cs5520_set_dma_mode,
31688 };
31689
31690 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31691 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31692 .name = DRV_NAME,
31693 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31694 .port_ops = &cs5520_port_ops,
31695 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31696 index 4dc4eb9..49b40ad 100644
31697 --- a/drivers/ide/cs5530.c
31698 +++ b/drivers/ide/cs5530.c
31699 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31700 .udma_filter = cs5530_udma_filter,
31701 };
31702
31703 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31704 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31705 .name = DRV_NAME,
31706 .init_chipset = init_chipset_cs5530,
31707 .init_hwif = init_hwif_cs5530,
31708 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31709 index 5059faf..18d4c85 100644
31710 --- a/drivers/ide/cs5535.c
31711 +++ b/drivers/ide/cs5535.c
31712 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31713 .cable_detect = cs5535_cable_detect,
31714 };
31715
31716 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31717 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31718 .name = DRV_NAME,
31719 .port_ops = &cs5535_port_ops,
31720 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31721 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31722 index 847553f..3ffb49d 100644
31723 --- a/drivers/ide/cy82c693.c
31724 +++ b/drivers/ide/cy82c693.c
31725 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31726 .set_dma_mode = cy82c693_set_dma_mode,
31727 };
31728
31729 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31730 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31731 .name = DRV_NAME,
31732 .init_iops = init_iops_cy82c693,
31733 .port_ops = &cy82c693_port_ops,
31734 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31735 index 58c51cd..4aec3b8 100644
31736 --- a/drivers/ide/hpt366.c
31737 +++ b/drivers/ide/hpt366.c
31738 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31739 }
31740 };
31741
31742 -static const struct hpt_info hpt36x __devinitdata = {
31743 +static const struct hpt_info hpt36x __devinitconst = {
31744 .chip_name = "HPT36x",
31745 .chip_type = HPT36x,
31746 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31747 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31748 .timings = &hpt36x_timings
31749 };
31750
31751 -static const struct hpt_info hpt370 __devinitdata = {
31752 +static const struct hpt_info hpt370 __devinitconst = {
31753 .chip_name = "HPT370",
31754 .chip_type = HPT370,
31755 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31756 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31757 .timings = &hpt37x_timings
31758 };
31759
31760 -static const struct hpt_info hpt370a __devinitdata = {
31761 +static const struct hpt_info hpt370a __devinitconst = {
31762 .chip_name = "HPT370A",
31763 .chip_type = HPT370A,
31764 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31765 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31766 .timings = &hpt37x_timings
31767 };
31768
31769 -static const struct hpt_info hpt374 __devinitdata = {
31770 +static const struct hpt_info hpt374 __devinitconst = {
31771 .chip_name = "HPT374",
31772 .chip_type = HPT374,
31773 .udma_mask = ATA_UDMA5,
31774 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31775 .timings = &hpt37x_timings
31776 };
31777
31778 -static const struct hpt_info hpt372 __devinitdata = {
31779 +static const struct hpt_info hpt372 __devinitconst = {
31780 .chip_name = "HPT372",
31781 .chip_type = HPT372,
31782 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31783 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31784 .timings = &hpt37x_timings
31785 };
31786
31787 -static const struct hpt_info hpt372a __devinitdata = {
31788 +static const struct hpt_info hpt372a __devinitconst = {
31789 .chip_name = "HPT372A",
31790 .chip_type = HPT372A,
31791 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31792 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31793 .timings = &hpt37x_timings
31794 };
31795
31796 -static const struct hpt_info hpt302 __devinitdata = {
31797 +static const struct hpt_info hpt302 __devinitconst = {
31798 .chip_name = "HPT302",
31799 .chip_type = HPT302,
31800 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31801 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31802 .timings = &hpt37x_timings
31803 };
31804
31805 -static const struct hpt_info hpt371 __devinitdata = {
31806 +static const struct hpt_info hpt371 __devinitconst = {
31807 .chip_name = "HPT371",
31808 .chip_type = HPT371,
31809 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31810 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31811 .timings = &hpt37x_timings
31812 };
31813
31814 -static const struct hpt_info hpt372n __devinitdata = {
31815 +static const struct hpt_info hpt372n __devinitconst = {
31816 .chip_name = "HPT372N",
31817 .chip_type = HPT372N,
31818 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31819 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31820 .timings = &hpt37x_timings
31821 };
31822
31823 -static const struct hpt_info hpt302n __devinitdata = {
31824 +static const struct hpt_info hpt302n __devinitconst = {
31825 .chip_name = "HPT302N",
31826 .chip_type = HPT302N,
31827 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31828 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31829 .timings = &hpt37x_timings
31830 };
31831
31832 -static const struct hpt_info hpt371n __devinitdata = {
31833 +static const struct hpt_info hpt371n __devinitconst = {
31834 .chip_name = "HPT371N",
31835 .chip_type = HPT371N,
31836 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31837 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31838 .dma_sff_read_status = ide_dma_sff_read_status,
31839 };
31840
31841 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31842 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31843 { /* 0: HPT36x */
31844 .name = DRV_NAME,
31845 .init_chipset = init_chipset_hpt366,
31846 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31847 index 8126824..55a2798 100644
31848 --- a/drivers/ide/ide-cd.c
31849 +++ b/drivers/ide/ide-cd.c
31850 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31851 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31852 if ((unsigned long)buf & alignment
31853 || blk_rq_bytes(rq) & q->dma_pad_mask
31854 - || object_is_on_stack(buf))
31855 + || object_starts_on_stack(buf))
31856 drive->dma = 0;
31857 }
31858 }
31859 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31860 index 7f56b73..dab5b67 100644
31861 --- a/drivers/ide/ide-pci-generic.c
31862 +++ b/drivers/ide/ide-pci-generic.c
31863 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31864 .udma_mask = ATA_UDMA6, \
31865 }
31866
31867 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31868 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31869 /* 0: Unknown */
31870 DECLARE_GENERIC_PCI_DEV(0),
31871
31872 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31873 index 560e66d..d5dd180 100644
31874 --- a/drivers/ide/it8172.c
31875 +++ b/drivers/ide/it8172.c
31876 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31877 .set_dma_mode = it8172_set_dma_mode,
31878 };
31879
31880 -static const struct ide_port_info it8172_port_info __devinitdata = {
31881 +static const struct ide_port_info it8172_port_info __devinitconst = {
31882 .name = DRV_NAME,
31883 .port_ops = &it8172_port_ops,
31884 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31885 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31886 index 46816ba..1847aeb 100644
31887 --- a/drivers/ide/it8213.c
31888 +++ b/drivers/ide/it8213.c
31889 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31890 .cable_detect = it8213_cable_detect,
31891 };
31892
31893 -static const struct ide_port_info it8213_chipset __devinitdata = {
31894 +static const struct ide_port_info it8213_chipset __devinitconst = {
31895 .name = DRV_NAME,
31896 .enablebits = { {0x41, 0x80, 0x80} },
31897 .port_ops = &it8213_port_ops,
31898 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31899 index 2e3169f..c5611db 100644
31900 --- a/drivers/ide/it821x.c
31901 +++ b/drivers/ide/it821x.c
31902 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31903 .cable_detect = it821x_cable_detect,
31904 };
31905
31906 -static const struct ide_port_info it821x_chipset __devinitdata = {
31907 +static const struct ide_port_info it821x_chipset __devinitconst = {
31908 .name = DRV_NAME,
31909 .init_chipset = init_chipset_it821x,
31910 .init_hwif = init_hwif_it821x,
31911 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31912 index 74c2c4a..efddd7d 100644
31913 --- a/drivers/ide/jmicron.c
31914 +++ b/drivers/ide/jmicron.c
31915 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31916 .cable_detect = jmicron_cable_detect,
31917 };
31918
31919 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31920 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31921 .name = DRV_NAME,
31922 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31923 .port_ops = &jmicron_port_ops,
31924 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31925 index 95327a2..73f78d8 100644
31926 --- a/drivers/ide/ns87415.c
31927 +++ b/drivers/ide/ns87415.c
31928 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31929 .dma_sff_read_status = superio_dma_sff_read_status,
31930 };
31931
31932 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31933 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31934 .name = DRV_NAME,
31935 .init_hwif = init_hwif_ns87415,
31936 .tp_ops = &ns87415_tp_ops,
31937 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31938 index 1a53a4c..39edc66 100644
31939 --- a/drivers/ide/opti621.c
31940 +++ b/drivers/ide/opti621.c
31941 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31942 .set_pio_mode = opti621_set_pio_mode,
31943 };
31944
31945 -static const struct ide_port_info opti621_chipset __devinitdata = {
31946 +static const struct ide_port_info opti621_chipset __devinitconst = {
31947 .name = DRV_NAME,
31948 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31949 .port_ops = &opti621_port_ops,
31950 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31951 index 9546fe2..2e5ceb6 100644
31952 --- a/drivers/ide/pdc202xx_new.c
31953 +++ b/drivers/ide/pdc202xx_new.c
31954 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31955 .udma_mask = udma, \
31956 }
31957
31958 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31959 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31960 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31961 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31962 };
31963 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31964 index 3a35ec6..5634510 100644
31965 --- a/drivers/ide/pdc202xx_old.c
31966 +++ b/drivers/ide/pdc202xx_old.c
31967 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31968 .max_sectors = sectors, \
31969 }
31970
31971 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31972 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31973 { /* 0: PDC20246 */
31974 .name = DRV_NAME,
31975 .init_chipset = init_chipset_pdc202xx,
31976 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31977 index 1892e81..fe0fd60 100644
31978 --- a/drivers/ide/piix.c
31979 +++ b/drivers/ide/piix.c
31980 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31981 .udma_mask = udma, \
31982 }
31983
31984 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31985 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31986 /* 0: MPIIX */
31987 { /*
31988 * MPIIX actually has only a single IDE channel mapped to
31989 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31990 index a6414a8..c04173e 100644
31991 --- a/drivers/ide/rz1000.c
31992 +++ b/drivers/ide/rz1000.c
31993 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31994 }
31995 }
31996
31997 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31998 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31999 .name = DRV_NAME,
32000 .host_flags = IDE_HFLAG_NO_DMA,
32001 };
32002 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
32003 index 356b9b5..d4758eb 100644
32004 --- a/drivers/ide/sc1200.c
32005 +++ b/drivers/ide/sc1200.c
32006 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
32007 .dma_sff_read_status = ide_dma_sff_read_status,
32008 };
32009
32010 -static const struct ide_port_info sc1200_chipset __devinitdata = {
32011 +static const struct ide_port_info sc1200_chipset __devinitconst = {
32012 .name = DRV_NAME,
32013 .port_ops = &sc1200_port_ops,
32014 .dma_ops = &sc1200_dma_ops,
32015 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
32016 index b7f5b0c..9701038 100644
32017 --- a/drivers/ide/scc_pata.c
32018 +++ b/drivers/ide/scc_pata.c
32019 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
32020 .dma_sff_read_status = scc_dma_sff_read_status,
32021 };
32022
32023 -static const struct ide_port_info scc_chipset __devinitdata = {
32024 +static const struct ide_port_info scc_chipset __devinitconst = {
32025 .name = "sccIDE",
32026 .init_iops = init_iops_scc,
32027 .init_dma = scc_init_dma,
32028 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
32029 index 35fb8da..24d72ef 100644
32030 --- a/drivers/ide/serverworks.c
32031 +++ b/drivers/ide/serverworks.c
32032 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
32033 .cable_detect = svwks_cable_detect,
32034 };
32035
32036 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
32037 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
32038 { /* 0: OSB4 */
32039 .name = DRV_NAME,
32040 .init_chipset = init_chipset_svwks,
32041 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
32042 index ddeda44..46f7e30 100644
32043 --- a/drivers/ide/siimage.c
32044 +++ b/drivers/ide/siimage.c
32045 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
32046 .udma_mask = ATA_UDMA6, \
32047 }
32048
32049 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
32050 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
32051 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
32052 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
32053 };
32054 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
32055 index 4a00225..09e61b4 100644
32056 --- a/drivers/ide/sis5513.c
32057 +++ b/drivers/ide/sis5513.c
32058 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
32059 .cable_detect = sis_cable_detect,
32060 };
32061
32062 -static const struct ide_port_info sis5513_chipset __devinitdata = {
32063 +static const struct ide_port_info sis5513_chipset __devinitconst = {
32064 .name = DRV_NAME,
32065 .init_chipset = init_chipset_sis5513,
32066 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
32067 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
32068 index f21dc2a..d051cd2 100644
32069 --- a/drivers/ide/sl82c105.c
32070 +++ b/drivers/ide/sl82c105.c
32071 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
32072 .dma_sff_read_status = ide_dma_sff_read_status,
32073 };
32074
32075 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
32076 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
32077 .name = DRV_NAME,
32078 .init_chipset = init_chipset_sl82c105,
32079 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
32080 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
32081 index 864ffe0..863a5e9 100644
32082 --- a/drivers/ide/slc90e66.c
32083 +++ b/drivers/ide/slc90e66.c
32084 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
32085 .cable_detect = slc90e66_cable_detect,
32086 };
32087
32088 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
32089 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
32090 .name = DRV_NAME,
32091 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
32092 .port_ops = &slc90e66_port_ops,
32093 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
32094 index 4799d5c..1794678 100644
32095 --- a/drivers/ide/tc86c001.c
32096 +++ b/drivers/ide/tc86c001.c
32097 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
32098 .dma_sff_read_status = ide_dma_sff_read_status,
32099 };
32100
32101 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
32102 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
32103 .name = DRV_NAME,
32104 .init_hwif = init_hwif_tc86c001,
32105 .port_ops = &tc86c001_port_ops,
32106 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
32107 index 281c914..55ce1b8 100644
32108 --- a/drivers/ide/triflex.c
32109 +++ b/drivers/ide/triflex.c
32110 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
32111 .set_dma_mode = triflex_set_mode,
32112 };
32113
32114 -static const struct ide_port_info triflex_device __devinitdata = {
32115 +static const struct ide_port_info triflex_device __devinitconst = {
32116 .name = DRV_NAME,
32117 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
32118 .port_ops = &triflex_port_ops,
32119 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
32120 index 4b42ca0..e494a98 100644
32121 --- a/drivers/ide/trm290.c
32122 +++ b/drivers/ide/trm290.c
32123 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
32124 .dma_check = trm290_dma_check,
32125 };
32126
32127 -static const struct ide_port_info trm290_chipset __devinitdata = {
32128 +static const struct ide_port_info trm290_chipset __devinitconst = {
32129 .name = DRV_NAME,
32130 .init_hwif = init_hwif_trm290,
32131 .tp_ops = &trm290_tp_ops,
32132 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
32133 index f46f49c..eb77678 100644
32134 --- a/drivers/ide/via82cxxx.c
32135 +++ b/drivers/ide/via82cxxx.c
32136 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
32137 .cable_detect = via82cxxx_cable_detect,
32138 };
32139
32140 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
32141 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
32142 .name = DRV_NAME,
32143 .init_chipset = init_chipset_via82cxxx,
32144 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
32145 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
32146 index 73d4531..c90cd2d 100644
32147 --- a/drivers/ieee802154/fakehard.c
32148 +++ b/drivers/ieee802154/fakehard.c
32149 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
32150 phy->transmit_power = 0xbf;
32151
32152 dev->netdev_ops = &fake_ops;
32153 - dev->ml_priv = &fake_mlme;
32154 + dev->ml_priv = (void *)&fake_mlme;
32155
32156 priv = netdev_priv(dev);
32157 priv->phy = phy;
32158 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32159 index c889aae..6cf5aa7 100644
32160 --- a/drivers/infiniband/core/cm.c
32161 +++ b/drivers/infiniband/core/cm.c
32162 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32163
32164 struct cm_counter_group {
32165 struct kobject obj;
32166 - atomic_long_t counter[CM_ATTR_COUNT];
32167 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32168 };
32169
32170 struct cm_counter_attribute {
32171 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32172 struct ib_mad_send_buf *msg = NULL;
32173 int ret;
32174
32175 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32176 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32177 counter[CM_REQ_COUNTER]);
32178
32179 /* Quick state check to discard duplicate REQs. */
32180 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32181 if (!cm_id_priv)
32182 return;
32183
32184 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32185 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32186 counter[CM_REP_COUNTER]);
32187 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32188 if (ret)
32189 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32190 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32191 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32192 spin_unlock_irq(&cm_id_priv->lock);
32193 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32194 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32195 counter[CM_RTU_COUNTER]);
32196 goto out;
32197 }
32198 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32199 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32200 dreq_msg->local_comm_id);
32201 if (!cm_id_priv) {
32202 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32203 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32204 counter[CM_DREQ_COUNTER]);
32205 cm_issue_drep(work->port, work->mad_recv_wc);
32206 return -EINVAL;
32207 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32208 case IB_CM_MRA_REP_RCVD:
32209 break;
32210 case IB_CM_TIMEWAIT:
32211 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32212 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32213 counter[CM_DREQ_COUNTER]);
32214 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32215 goto unlock;
32216 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32217 cm_free_msg(msg);
32218 goto deref;
32219 case IB_CM_DREQ_RCVD:
32220 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32221 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32222 counter[CM_DREQ_COUNTER]);
32223 goto unlock;
32224 default:
32225 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32226 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32227 cm_id_priv->msg, timeout)) {
32228 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32229 - atomic_long_inc(&work->port->
32230 + atomic_long_inc_unchecked(&work->port->
32231 counter_group[CM_RECV_DUPLICATES].
32232 counter[CM_MRA_COUNTER]);
32233 goto out;
32234 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32235 break;
32236 case IB_CM_MRA_REQ_RCVD:
32237 case IB_CM_MRA_REP_RCVD:
32238 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32239 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32240 counter[CM_MRA_COUNTER]);
32241 /* fall through */
32242 default:
32243 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32244 case IB_CM_LAP_IDLE:
32245 break;
32246 case IB_CM_MRA_LAP_SENT:
32247 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32248 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32249 counter[CM_LAP_COUNTER]);
32250 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32251 goto unlock;
32252 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32253 cm_free_msg(msg);
32254 goto deref;
32255 case IB_CM_LAP_RCVD:
32256 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32257 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32258 counter[CM_LAP_COUNTER]);
32259 goto unlock;
32260 default:
32261 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32262 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32263 if (cur_cm_id_priv) {
32264 spin_unlock_irq(&cm.lock);
32265 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32266 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32267 counter[CM_SIDR_REQ_COUNTER]);
32268 goto out; /* Duplicate message. */
32269 }
32270 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32271 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32272 msg->retries = 1;
32273
32274 - atomic_long_add(1 + msg->retries,
32275 + atomic_long_add_unchecked(1 + msg->retries,
32276 &port->counter_group[CM_XMIT].counter[attr_index]);
32277 if (msg->retries)
32278 - atomic_long_add(msg->retries,
32279 + atomic_long_add_unchecked(msg->retries,
32280 &port->counter_group[CM_XMIT_RETRIES].
32281 counter[attr_index]);
32282
32283 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32284 }
32285
32286 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32287 - atomic_long_inc(&port->counter_group[CM_RECV].
32288 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32289 counter[attr_id - CM_ATTR_ID_OFFSET]);
32290
32291 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32292 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32293 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32294
32295 return sprintf(buf, "%ld\n",
32296 - atomic_long_read(&group->counter[cm_attr->index]));
32297 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32298 }
32299
32300 static const struct sysfs_ops cm_counter_ops = {
32301 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32302 index 176c8f9..2627b62 100644
32303 --- a/drivers/infiniband/core/fmr_pool.c
32304 +++ b/drivers/infiniband/core/fmr_pool.c
32305 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
32306
32307 struct task_struct *thread;
32308
32309 - atomic_t req_ser;
32310 - atomic_t flush_ser;
32311 + atomic_unchecked_t req_ser;
32312 + atomic_unchecked_t flush_ser;
32313
32314 wait_queue_head_t force_wait;
32315 };
32316 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32317 struct ib_fmr_pool *pool = pool_ptr;
32318
32319 do {
32320 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32321 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32322 ib_fmr_batch_release(pool);
32323
32324 - atomic_inc(&pool->flush_ser);
32325 + atomic_inc_unchecked(&pool->flush_ser);
32326 wake_up_interruptible(&pool->force_wait);
32327
32328 if (pool->flush_function)
32329 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32330 }
32331
32332 set_current_state(TASK_INTERRUPTIBLE);
32333 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32334 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32335 !kthread_should_stop())
32336 schedule();
32337 __set_current_state(TASK_RUNNING);
32338 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32339 pool->dirty_watermark = params->dirty_watermark;
32340 pool->dirty_len = 0;
32341 spin_lock_init(&pool->pool_lock);
32342 - atomic_set(&pool->req_ser, 0);
32343 - atomic_set(&pool->flush_ser, 0);
32344 + atomic_set_unchecked(&pool->req_ser, 0);
32345 + atomic_set_unchecked(&pool->flush_ser, 0);
32346 init_waitqueue_head(&pool->force_wait);
32347
32348 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32349 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32350 }
32351 spin_unlock_irq(&pool->pool_lock);
32352
32353 - serial = atomic_inc_return(&pool->req_ser);
32354 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32355 wake_up_process(pool->thread);
32356
32357 if (wait_event_interruptible(pool->force_wait,
32358 - atomic_read(&pool->flush_ser) - serial >= 0))
32359 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32360 return -EINTR;
32361
32362 return 0;
32363 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32364 } else {
32365 list_add_tail(&fmr->list, &pool->dirty_list);
32366 if (++pool->dirty_len >= pool->dirty_watermark) {
32367 - atomic_inc(&pool->req_ser);
32368 + atomic_inc_unchecked(&pool->req_ser);
32369 wake_up_process(pool->thread);
32370 }
32371 }
32372 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32373 index 57e07c6..56d09d4 100644
32374 --- a/drivers/infiniband/hw/cxgb4/mem.c
32375 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32376 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32377 int err;
32378 struct fw_ri_tpte tpt;
32379 u32 stag_idx;
32380 - static atomic_t key;
32381 + static atomic_unchecked_t key;
32382
32383 if (c4iw_fatal_error(rdev))
32384 return -EIO;
32385 @@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32386 if (rdev->stats.stag.cur > rdev->stats.stag.max)
32387 rdev->stats.stag.max = rdev->stats.stag.cur;
32388 mutex_unlock(&rdev->stats.lock);
32389 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32390 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32391 }
32392 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32393 __func__, stag_state, type, pdid, stag_idx);
32394 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32395 index 79b3dbc..96e5fcc 100644
32396 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32397 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32398 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32399 struct ib_atomic_eth *ateth;
32400 struct ipath_ack_entry *e;
32401 u64 vaddr;
32402 - atomic64_t *maddr;
32403 + atomic64_unchecked_t *maddr;
32404 u64 sdata;
32405 u32 rkey;
32406 u8 next;
32407 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32408 IB_ACCESS_REMOTE_ATOMIC)))
32409 goto nack_acc_unlck;
32410 /* Perform atomic OP and save result. */
32411 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32412 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32413 sdata = be64_to_cpu(ateth->swap_data);
32414 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32415 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32416 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32417 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32418 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32419 be64_to_cpu(ateth->compare_data),
32420 sdata);
32421 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32422 index 1f95bba..9530f87 100644
32423 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32424 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32425 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32426 unsigned long flags;
32427 struct ib_wc wc;
32428 u64 sdata;
32429 - atomic64_t *maddr;
32430 + atomic64_unchecked_t *maddr;
32431 enum ib_wc_status send_status;
32432
32433 /*
32434 @@ -382,11 +382,11 @@ again:
32435 IB_ACCESS_REMOTE_ATOMIC)))
32436 goto acc_err;
32437 /* Perform atomic OP and save result. */
32438 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32439 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32440 sdata = wqe->wr.wr.atomic.compare_add;
32441 *(u64 *) sqp->s_sge.sge.vaddr =
32442 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32443 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32444 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32445 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32446 sdata, wqe->wr.wr.atomic.swap);
32447 goto send_comp;
32448 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32449 index 7140199..da60063 100644
32450 --- a/drivers/infiniband/hw/nes/nes.c
32451 +++ b/drivers/infiniband/hw/nes/nes.c
32452 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32453 LIST_HEAD(nes_adapter_list);
32454 static LIST_HEAD(nes_dev_list);
32455
32456 -atomic_t qps_destroyed;
32457 +atomic_unchecked_t qps_destroyed;
32458
32459 static unsigned int ee_flsh_adapter;
32460 static unsigned int sysfs_nonidx_addr;
32461 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32462 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32463 struct nes_adapter *nesadapter = nesdev->nesadapter;
32464
32465 - atomic_inc(&qps_destroyed);
32466 + atomic_inc_unchecked(&qps_destroyed);
32467
32468 /* Free the control structures */
32469
32470 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32471 index c438e46..ca30356 100644
32472 --- a/drivers/infiniband/hw/nes/nes.h
32473 +++ b/drivers/infiniband/hw/nes/nes.h
32474 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32475 extern unsigned int wqm_quanta;
32476 extern struct list_head nes_adapter_list;
32477
32478 -extern atomic_t cm_connects;
32479 -extern atomic_t cm_accepts;
32480 -extern atomic_t cm_disconnects;
32481 -extern atomic_t cm_closes;
32482 -extern atomic_t cm_connecteds;
32483 -extern atomic_t cm_connect_reqs;
32484 -extern atomic_t cm_rejects;
32485 -extern atomic_t mod_qp_timouts;
32486 -extern atomic_t qps_created;
32487 -extern atomic_t qps_destroyed;
32488 -extern atomic_t sw_qps_destroyed;
32489 +extern atomic_unchecked_t cm_connects;
32490 +extern atomic_unchecked_t cm_accepts;
32491 +extern atomic_unchecked_t cm_disconnects;
32492 +extern atomic_unchecked_t cm_closes;
32493 +extern atomic_unchecked_t cm_connecteds;
32494 +extern atomic_unchecked_t cm_connect_reqs;
32495 +extern atomic_unchecked_t cm_rejects;
32496 +extern atomic_unchecked_t mod_qp_timouts;
32497 +extern atomic_unchecked_t qps_created;
32498 +extern atomic_unchecked_t qps_destroyed;
32499 +extern atomic_unchecked_t sw_qps_destroyed;
32500 extern u32 mh_detected;
32501 extern u32 mh_pauses_sent;
32502 extern u32 cm_packets_sent;
32503 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32504 extern u32 cm_packets_received;
32505 extern u32 cm_packets_dropped;
32506 extern u32 cm_packets_retrans;
32507 -extern atomic_t cm_listens_created;
32508 -extern atomic_t cm_listens_destroyed;
32509 +extern atomic_unchecked_t cm_listens_created;
32510 +extern atomic_unchecked_t cm_listens_destroyed;
32511 extern u32 cm_backlog_drops;
32512 -extern atomic_t cm_loopbacks;
32513 -extern atomic_t cm_nodes_created;
32514 -extern atomic_t cm_nodes_destroyed;
32515 -extern atomic_t cm_accel_dropped_pkts;
32516 -extern atomic_t cm_resets_recvd;
32517 -extern atomic_t pau_qps_created;
32518 -extern atomic_t pau_qps_destroyed;
32519 +extern atomic_unchecked_t cm_loopbacks;
32520 +extern atomic_unchecked_t cm_nodes_created;
32521 +extern atomic_unchecked_t cm_nodes_destroyed;
32522 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32523 +extern atomic_unchecked_t cm_resets_recvd;
32524 +extern atomic_unchecked_t pau_qps_created;
32525 +extern atomic_unchecked_t pau_qps_destroyed;
32526
32527 extern u32 int_mod_timer_init;
32528 extern u32 int_mod_cq_depth_256;
32529 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32530 index 020e95c..fbb3450 100644
32531 --- a/drivers/infiniband/hw/nes/nes_cm.c
32532 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32533 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32534 u32 cm_packets_retrans;
32535 u32 cm_packets_created;
32536 u32 cm_packets_received;
32537 -atomic_t cm_listens_created;
32538 -atomic_t cm_listens_destroyed;
32539 +atomic_unchecked_t cm_listens_created;
32540 +atomic_unchecked_t cm_listens_destroyed;
32541 u32 cm_backlog_drops;
32542 -atomic_t cm_loopbacks;
32543 -atomic_t cm_nodes_created;
32544 -atomic_t cm_nodes_destroyed;
32545 -atomic_t cm_accel_dropped_pkts;
32546 -atomic_t cm_resets_recvd;
32547 +atomic_unchecked_t cm_loopbacks;
32548 +atomic_unchecked_t cm_nodes_created;
32549 +atomic_unchecked_t cm_nodes_destroyed;
32550 +atomic_unchecked_t cm_accel_dropped_pkts;
32551 +atomic_unchecked_t cm_resets_recvd;
32552
32553 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32554 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32555 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32556
32557 static struct nes_cm_core *g_cm_core;
32558
32559 -atomic_t cm_connects;
32560 -atomic_t cm_accepts;
32561 -atomic_t cm_disconnects;
32562 -atomic_t cm_closes;
32563 -atomic_t cm_connecteds;
32564 -atomic_t cm_connect_reqs;
32565 -atomic_t cm_rejects;
32566 +atomic_unchecked_t cm_connects;
32567 +atomic_unchecked_t cm_accepts;
32568 +atomic_unchecked_t cm_disconnects;
32569 +atomic_unchecked_t cm_closes;
32570 +atomic_unchecked_t cm_connecteds;
32571 +atomic_unchecked_t cm_connect_reqs;
32572 +atomic_unchecked_t cm_rejects;
32573
32574 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32575 {
32576 @@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32577 kfree(listener);
32578 listener = NULL;
32579 ret = 0;
32580 - atomic_inc(&cm_listens_destroyed);
32581 + atomic_inc_unchecked(&cm_listens_destroyed);
32582 } else {
32583 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32584 }
32585 @@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32586 cm_node->rem_mac);
32587
32588 add_hte_node(cm_core, cm_node);
32589 - atomic_inc(&cm_nodes_created);
32590 + atomic_inc_unchecked(&cm_nodes_created);
32591
32592 return cm_node;
32593 }
32594 @@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32595 }
32596
32597 atomic_dec(&cm_core->node_cnt);
32598 - atomic_inc(&cm_nodes_destroyed);
32599 + atomic_inc_unchecked(&cm_nodes_destroyed);
32600 nesqp = cm_node->nesqp;
32601 if (nesqp) {
32602 nesqp->cm_node = NULL;
32603 @@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32604
32605 static void drop_packet(struct sk_buff *skb)
32606 {
32607 - atomic_inc(&cm_accel_dropped_pkts);
32608 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32609 dev_kfree_skb_any(skb);
32610 }
32611
32612 @@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32613 {
32614
32615 int reset = 0; /* whether to send reset in case of err.. */
32616 - atomic_inc(&cm_resets_recvd);
32617 + atomic_inc_unchecked(&cm_resets_recvd);
32618 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32619 " refcnt=%d\n", cm_node, cm_node->state,
32620 atomic_read(&cm_node->ref_count));
32621 @@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32622 rem_ref_cm_node(cm_node->cm_core, cm_node);
32623 return NULL;
32624 }
32625 - atomic_inc(&cm_loopbacks);
32626 + atomic_inc_unchecked(&cm_loopbacks);
32627 loopbackremotenode->loopbackpartner = cm_node;
32628 loopbackremotenode->tcp_cntxt.rcv_wscale =
32629 NES_CM_DEFAULT_RCV_WND_SCALE;
32630 @@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32631 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32632 else {
32633 rem_ref_cm_node(cm_core, cm_node);
32634 - atomic_inc(&cm_accel_dropped_pkts);
32635 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32636 dev_kfree_skb_any(skb);
32637 }
32638 break;
32639 @@ -2891,7 +2891,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32640
32641 if ((cm_id) && (cm_id->event_handler)) {
32642 if (issue_disconn) {
32643 - atomic_inc(&cm_disconnects);
32644 + atomic_inc_unchecked(&cm_disconnects);
32645 cm_event.event = IW_CM_EVENT_DISCONNECT;
32646 cm_event.status = disconn_status;
32647 cm_event.local_addr = cm_id->local_addr;
32648 @@ -2913,7 +2913,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32649 }
32650
32651 if (issue_close) {
32652 - atomic_inc(&cm_closes);
32653 + atomic_inc_unchecked(&cm_closes);
32654 nes_disconnect(nesqp, 1);
32655
32656 cm_id->provider_data = nesqp;
32657 @@ -3049,7 +3049,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32658
32659 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32660 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32661 - atomic_inc(&cm_accepts);
32662 + atomic_inc_unchecked(&cm_accepts);
32663
32664 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32665 netdev_refcnt_read(nesvnic->netdev));
32666 @@ -3251,7 +3251,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32667 struct nes_cm_core *cm_core;
32668 u8 *start_buff;
32669
32670 - atomic_inc(&cm_rejects);
32671 + atomic_inc_unchecked(&cm_rejects);
32672 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32673 loopback = cm_node->loopbackpartner;
32674 cm_core = cm_node->cm_core;
32675 @@ -3311,7 +3311,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32676 ntohl(cm_id->local_addr.sin_addr.s_addr),
32677 ntohs(cm_id->local_addr.sin_port));
32678
32679 - atomic_inc(&cm_connects);
32680 + atomic_inc_unchecked(&cm_connects);
32681 nesqp->active_conn = 1;
32682
32683 /* cache the cm_id in the qp */
32684 @@ -3421,7 +3421,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32685 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32686 return err;
32687 }
32688 - atomic_inc(&cm_listens_created);
32689 + atomic_inc_unchecked(&cm_listens_created);
32690 }
32691
32692 cm_id->add_ref(cm_id);
32693 @@ -3522,7 +3522,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32694
32695 if (nesqp->destroyed)
32696 return;
32697 - atomic_inc(&cm_connecteds);
32698 + atomic_inc_unchecked(&cm_connecteds);
32699 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32700 " local port 0x%04X. jiffies = %lu.\n",
32701 nesqp->hwqp.qp_id,
32702 @@ -3709,7 +3709,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32703
32704 cm_id->add_ref(cm_id);
32705 ret = cm_id->event_handler(cm_id, &cm_event);
32706 - atomic_inc(&cm_closes);
32707 + atomic_inc_unchecked(&cm_closes);
32708 cm_event.event = IW_CM_EVENT_CLOSE;
32709 cm_event.status = 0;
32710 cm_event.provider_data = cm_id->provider_data;
32711 @@ -3745,7 +3745,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32712 return;
32713 cm_id = cm_node->cm_id;
32714
32715 - atomic_inc(&cm_connect_reqs);
32716 + atomic_inc_unchecked(&cm_connect_reqs);
32717 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32718 cm_node, cm_id, jiffies);
32719
32720 @@ -3785,7 +3785,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32721 return;
32722 cm_id = cm_node->cm_id;
32723
32724 - atomic_inc(&cm_connect_reqs);
32725 + atomic_inc_unchecked(&cm_connect_reqs);
32726 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32727 cm_node, cm_id, jiffies);
32728
32729 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32730 index 3ba7be3..c81f6ff 100644
32731 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32732 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32733 @@ -40,8 +40,8 @@
32734 #include "nes.h"
32735 #include "nes_mgt.h"
32736
32737 -atomic_t pau_qps_created;
32738 -atomic_t pau_qps_destroyed;
32739 +atomic_unchecked_t pau_qps_created;
32740 +atomic_unchecked_t pau_qps_destroyed;
32741
32742 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32743 {
32744 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32745 {
32746 struct sk_buff *skb;
32747 unsigned long flags;
32748 - atomic_inc(&pau_qps_destroyed);
32749 + atomic_inc_unchecked(&pau_qps_destroyed);
32750
32751 /* Free packets that have not yet been forwarded */
32752 /* Lock is acquired by skb_dequeue when removing the skb */
32753 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32754 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32755 skb_queue_head_init(&nesqp->pau_list);
32756 spin_lock_init(&nesqp->pau_lock);
32757 - atomic_inc(&pau_qps_created);
32758 + atomic_inc_unchecked(&pau_qps_created);
32759 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32760 }
32761
32762 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32763 index f3a3ecf..57d311d 100644
32764 --- a/drivers/infiniband/hw/nes/nes_nic.c
32765 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32766 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32767 target_stat_values[++index] = mh_detected;
32768 target_stat_values[++index] = mh_pauses_sent;
32769 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32770 - target_stat_values[++index] = atomic_read(&cm_connects);
32771 - target_stat_values[++index] = atomic_read(&cm_accepts);
32772 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32773 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32774 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32775 - target_stat_values[++index] = atomic_read(&cm_rejects);
32776 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32777 - target_stat_values[++index] = atomic_read(&qps_created);
32778 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32779 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32780 - target_stat_values[++index] = atomic_read(&cm_closes);
32781 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32782 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32783 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32784 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32785 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32786 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32787 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32788 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32789 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32790 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32791 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32792 target_stat_values[++index] = cm_packets_sent;
32793 target_stat_values[++index] = cm_packets_bounced;
32794 target_stat_values[++index] = cm_packets_created;
32795 target_stat_values[++index] = cm_packets_received;
32796 target_stat_values[++index] = cm_packets_dropped;
32797 target_stat_values[++index] = cm_packets_retrans;
32798 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32799 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32800 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32801 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32802 target_stat_values[++index] = cm_backlog_drops;
32803 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32804 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32805 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32806 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32807 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32808 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32809 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32810 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32811 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32812 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32813 target_stat_values[++index] = nesadapter->free_4kpbl;
32814 target_stat_values[++index] = nesadapter->free_256pbl;
32815 target_stat_values[++index] = int_mod_timer_init;
32816 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32817 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32818 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32819 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32820 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32821 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32822 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32823 }
32824
32825 /**
32826 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32827 index 8b8812d..a5e1133 100644
32828 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32829 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32830 @@ -46,9 +46,9 @@
32831
32832 #include <rdma/ib_umem.h>
32833
32834 -atomic_t mod_qp_timouts;
32835 -atomic_t qps_created;
32836 -atomic_t sw_qps_destroyed;
32837 +atomic_unchecked_t mod_qp_timouts;
32838 +atomic_unchecked_t qps_created;
32839 +atomic_unchecked_t sw_qps_destroyed;
32840
32841 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32842
32843 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32844 if (init_attr->create_flags)
32845 return ERR_PTR(-EINVAL);
32846
32847 - atomic_inc(&qps_created);
32848 + atomic_inc_unchecked(&qps_created);
32849 switch (init_attr->qp_type) {
32850 case IB_QPT_RC:
32851 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32852 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32853 struct iw_cm_event cm_event;
32854 int ret = 0;
32855
32856 - atomic_inc(&sw_qps_destroyed);
32857 + atomic_inc_unchecked(&sw_qps_destroyed);
32858 nesqp->destroyed = 1;
32859
32860 /* Blow away the connection if it exists. */
32861 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32862 index 7e62f41..4c2b8e2 100644
32863 --- a/drivers/infiniband/hw/qib/qib.h
32864 +++ b/drivers/infiniband/hw/qib/qib.h
32865 @@ -51,6 +51,7 @@
32866 #include <linux/completion.h>
32867 #include <linux/kref.h>
32868 #include <linux/sched.h>
32869 +#include <linux/slab.h>
32870
32871 #include "qib_common.h"
32872 #include "qib_verbs.h"
32873 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32874 index da739d9..da1c7f4 100644
32875 --- a/drivers/input/gameport/gameport.c
32876 +++ b/drivers/input/gameport/gameport.c
32877 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32878 */
32879 static void gameport_init_port(struct gameport *gameport)
32880 {
32881 - static atomic_t gameport_no = ATOMIC_INIT(0);
32882 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32883
32884 __module_get(THIS_MODULE);
32885
32886 mutex_init(&gameport->drv_mutex);
32887 device_initialize(&gameport->dev);
32888 dev_set_name(&gameport->dev, "gameport%lu",
32889 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32890 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32891 gameport->dev.bus = &gameport_bus;
32892 gameport->dev.release = gameport_release_port;
32893 if (gameport->parent)
32894 diff --git a/drivers/input/input.c b/drivers/input/input.c
32895 index 8921c61..f5cd63d 100644
32896 --- a/drivers/input/input.c
32897 +++ b/drivers/input/input.c
32898 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32899 */
32900 int input_register_device(struct input_dev *dev)
32901 {
32902 - static atomic_t input_no = ATOMIC_INIT(0);
32903 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32904 struct input_handler *handler;
32905 const char *path;
32906 int error;
32907 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32908 dev->setkeycode = input_default_setkeycode;
32909
32910 dev_set_name(&dev->dev, "input%ld",
32911 - (unsigned long) atomic_inc_return(&input_no) - 1);
32912 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32913
32914 error = device_add(&dev->dev);
32915 if (error)
32916 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32917 index 04c69af..5f92d00 100644
32918 --- a/drivers/input/joystick/sidewinder.c
32919 +++ b/drivers/input/joystick/sidewinder.c
32920 @@ -30,6 +30,7 @@
32921 #include <linux/kernel.h>
32922 #include <linux/module.h>
32923 #include <linux/slab.h>
32924 +#include <linux/sched.h>
32925 #include <linux/init.h>
32926 #include <linux/input.h>
32927 #include <linux/gameport.h>
32928 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32929 index 83811e4..0822b90 100644
32930 --- a/drivers/input/joystick/xpad.c
32931 +++ b/drivers/input/joystick/xpad.c
32932 @@ -726,7 +726,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32933
32934 static int xpad_led_probe(struct usb_xpad *xpad)
32935 {
32936 - static atomic_t led_seq = ATOMIC_INIT(0);
32937 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32938 long led_no;
32939 struct xpad_led *led;
32940 struct led_classdev *led_cdev;
32941 @@ -739,7 +739,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32942 if (!led)
32943 return -ENOMEM;
32944
32945 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32946 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32947
32948 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32949 led->xpad = xpad;
32950 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32951 index 0110b5a..d3ad144 100644
32952 --- a/drivers/input/mousedev.c
32953 +++ b/drivers/input/mousedev.c
32954 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32955
32956 spin_unlock_irq(&client->packet_lock);
32957
32958 - if (copy_to_user(buffer, data, count))
32959 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32960 return -EFAULT;
32961
32962 return count;
32963 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32964 index d0f7533..fb8215b 100644
32965 --- a/drivers/input/serio/serio.c
32966 +++ b/drivers/input/serio/serio.c
32967 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32968 */
32969 static void serio_init_port(struct serio *serio)
32970 {
32971 - static atomic_t serio_no = ATOMIC_INIT(0);
32972 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32973
32974 __module_get(THIS_MODULE);
32975
32976 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32977 mutex_init(&serio->drv_mutex);
32978 device_initialize(&serio->dev);
32979 dev_set_name(&serio->dev, "serio%ld",
32980 - (long)atomic_inc_return(&serio_no) - 1);
32981 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32982 serio->dev.bus = &serio_bus;
32983 serio->dev.release = serio_release_port;
32984 serio->dev.groups = serio_device_attr_groups;
32985 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32986 index 38c4bd8..58965d9 100644
32987 --- a/drivers/isdn/capi/capi.c
32988 +++ b/drivers/isdn/capi/capi.c
32989 @@ -83,8 +83,8 @@ struct capiminor {
32990
32991 struct capi20_appl *ap;
32992 u32 ncci;
32993 - atomic_t datahandle;
32994 - atomic_t msgid;
32995 + atomic_unchecked_t datahandle;
32996 + atomic_unchecked_t msgid;
32997
32998 struct tty_port port;
32999 int ttyinstop;
33000 @@ -392,7 +392,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
33001 capimsg_setu16(s, 2, mp->ap->applid);
33002 capimsg_setu8 (s, 4, CAPI_DATA_B3);
33003 capimsg_setu8 (s, 5, CAPI_RESP);
33004 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
33005 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
33006 capimsg_setu32(s, 8, mp->ncci);
33007 capimsg_setu16(s, 12, datahandle);
33008 }
33009 @@ -513,14 +513,14 @@ static void handle_minor_send(struct capiminor *mp)
33010 mp->outbytes -= len;
33011 spin_unlock_bh(&mp->outlock);
33012
33013 - datahandle = atomic_inc_return(&mp->datahandle);
33014 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
33015 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
33016 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33017 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33018 capimsg_setu16(skb->data, 2, mp->ap->applid);
33019 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
33020 capimsg_setu8 (skb->data, 5, CAPI_REQ);
33021 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
33022 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
33023 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
33024 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
33025 capimsg_setu16(skb->data, 16, len); /* Data length */
33026 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
33027 index a6d9fd2..afdb8a3 100644
33028 --- a/drivers/isdn/gigaset/interface.c
33029 +++ b/drivers/isdn/gigaset/interface.c
33030 @@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
33031 }
33032 tty->driver_data = cs;
33033
33034 - ++cs->port.count;
33035 + atomic_inc(&cs->port.count);
33036
33037 - if (cs->port.count == 1) {
33038 + if (atomic_read(&cs->port.count) == 1) {
33039 tty_port_tty_set(&cs->port, tty);
33040 tty->low_latency = 1;
33041 }
33042 @@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
33043
33044 if (!cs->connected)
33045 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33046 - else if (!cs->port.count)
33047 + else if (!atomic_read(&cs->port.count))
33048 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33049 - else if (!--cs->port.count)
33050 + else if (!atomic_dec_return(&cs->port.count))
33051 tty_port_tty_set(&cs->port, NULL);
33052
33053 mutex_unlock(&cs->mutex);
33054 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
33055 index 821f7ac..28d4030 100644
33056 --- a/drivers/isdn/hardware/avm/b1.c
33057 +++ b/drivers/isdn/hardware/avm/b1.c
33058 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
33059 }
33060 if (left) {
33061 if (t4file->user) {
33062 - if (copy_from_user(buf, dp, left))
33063 + if (left > sizeof buf || copy_from_user(buf, dp, left))
33064 return -EFAULT;
33065 } else {
33066 memcpy(buf, dp, left);
33067 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
33068 }
33069 if (left) {
33070 if (config->user) {
33071 - if (copy_from_user(buf, dp, left))
33072 + if (left > sizeof buf || copy_from_user(buf, dp, left))
33073 return -EFAULT;
33074 } else {
33075 memcpy(buf, dp, left);
33076 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
33077 index dd6b53a..19d9ee6 100644
33078 --- a/drivers/isdn/hardware/eicon/divasync.h
33079 +++ b/drivers/isdn/hardware/eicon/divasync.h
33080 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
33081 } diva_didd_add_adapter_t;
33082 typedef struct _diva_didd_remove_adapter {
33083 IDI_CALL p_request;
33084 -} diva_didd_remove_adapter_t;
33085 +} __no_const diva_didd_remove_adapter_t;
33086 typedef struct _diva_didd_read_adapter_array {
33087 void *buffer;
33088 dword length;
33089 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
33090 index d303e65..28bcb7b 100644
33091 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
33092 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
33093 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
33094 typedef struct _diva_os_idi_adapter_interface {
33095 diva_init_card_proc_t cleanup_adapter_proc;
33096 diva_cmd_card_proc_t cmd_proc;
33097 -} diva_os_idi_adapter_interface_t;
33098 +} __no_const diva_os_idi_adapter_interface_t;
33099
33100 typedef struct _diva_os_xdi_adapter {
33101 struct list_head link;
33102 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
33103 index 7bc5067..fd36232 100644
33104 --- a/drivers/isdn/i4l/isdn_tty.c
33105 +++ b/drivers/isdn/i4l/isdn_tty.c
33106 @@ -1505,9 +1505,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
33107 port = &info->port;
33108 #ifdef ISDN_DEBUG_MODEM_OPEN
33109 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
33110 - port->count);
33111 + atomic_read(&port->count))
33112 #endif
33113 - port->count++;
33114 + atomic_inc(&port->count);
33115 tty->driver_data = info;
33116 port->tty = tty;
33117 tty->port = port;
33118 @@ -1553,7 +1553,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
33119 #endif
33120 return;
33121 }
33122 - if ((tty->count == 1) && (port->count != 1)) {
33123 + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
33124 /*
33125 * Uh, oh. tty->count is 1, which means that the tty
33126 * structure will be freed. Info->count should always
33127 @@ -1562,15 +1562,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
33128 * serial port won't be shutdown.
33129 */
33130 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
33131 - "info->count is %d\n", port->count);
33132 - port->count = 1;
33133 + "info->count is %d\n", atomic_read(&port->count));
33134 + atomic_set(&port->count, 1);
33135 }
33136 - if (--port->count < 0) {
33137 + if (atomic_dec_return(&port->count) < 0) {
33138 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
33139 - info->line, port->count);
33140 - port->count = 0;
33141 + info->line, atomic_read(&port->count));
33142 + atomic_set(&port->count, 0);
33143 }
33144 - if (port->count) {
33145 + if (atomic_read(&port->count)) {
33146 #ifdef ISDN_DEBUG_MODEM_OPEN
33147 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
33148 #endif
33149 @@ -1624,7 +1624,7 @@ isdn_tty_hangup(struct tty_struct *tty)
33150 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
33151 return;
33152 isdn_tty_shutdown(info);
33153 - port->count = 0;
33154 + atomic_set(&port->count, 0);
33155 port->flags &= ~ASYNC_NORMAL_ACTIVE;
33156 port->tty = NULL;
33157 wake_up_interruptible(&port->open_wait);
33158 @@ -1964,7 +1964,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
33159 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
33160 modem_info *info = &dev->mdm.info[i];
33161
33162 - if (info->port.count == 0)
33163 + if (atomic_read(&info->port.count) == 0)
33164 continue;
33165 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
33166 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
33167 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
33168 index e74df7c..03a03ba 100644
33169 --- a/drivers/isdn/icn/icn.c
33170 +++ b/drivers/isdn/icn/icn.c
33171 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
33172 if (count > len)
33173 count = len;
33174 if (user) {
33175 - if (copy_from_user(msg, buf, count))
33176 + if (count > sizeof msg || copy_from_user(msg, buf, count))
33177 return -EFAULT;
33178 } else
33179 memcpy(msg, buf, count);
33180 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
33181 index b5fdcb7..5b6c59f 100644
33182 --- a/drivers/lguest/core.c
33183 +++ b/drivers/lguest/core.c
33184 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
33185 * it's worked so far. The end address needs +1 because __get_vm_area
33186 * allocates an extra guard page, so we need space for that.
33187 */
33188 +
33189 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33190 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33191 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
33192 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33193 +#else
33194 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33195 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
33196 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33197 +#endif
33198 +
33199 if (!switcher_vma) {
33200 err = -ENOMEM;
33201 printk("lguest: could not map switcher pages high\n");
33202 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
33203 * Now the Switcher is mapped at the right address, we can't fail!
33204 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
33205 */
33206 - memcpy(switcher_vma->addr, start_switcher_text,
33207 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
33208 end_switcher_text - start_switcher_text);
33209
33210 printk(KERN_INFO "lguest: mapped switcher at %p\n",
33211 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
33212 index 39809035..ce25c5e 100644
33213 --- a/drivers/lguest/x86/core.c
33214 +++ b/drivers/lguest/x86/core.c
33215 @@ -59,7 +59,7 @@ static struct {
33216 /* Offset from where switcher.S was compiled to where we've copied it */
33217 static unsigned long switcher_offset(void)
33218 {
33219 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
33220 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
33221 }
33222
33223 /* This cpu's struct lguest_pages. */
33224 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
33225 * These copies are pretty cheap, so we do them unconditionally: */
33226 /* Save the current Host top-level page directory.
33227 */
33228 +
33229 +#ifdef CONFIG_PAX_PER_CPU_PGD
33230 + pages->state.host_cr3 = read_cr3();
33231 +#else
33232 pages->state.host_cr3 = __pa(current->mm->pgd);
33233 +#endif
33234 +
33235 /*
33236 * Set up the Guest's page tables to see this CPU's pages (and no
33237 * other CPU's pages).
33238 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33239 * compiled-in switcher code and the high-mapped copy we just made.
33240 */
33241 for (i = 0; i < IDT_ENTRIES; i++)
33242 - default_idt_entries[i] += switcher_offset();
33243 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33244
33245 /*
33246 * Set up the Switcher's per-cpu areas.
33247 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33248 * it will be undisturbed when we switch. To change %cs and jump we
33249 * need this structure to feed to Intel's "lcall" instruction.
33250 */
33251 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33252 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33253 lguest_entry.segment = LGUEST_CS;
33254
33255 /*
33256 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33257 index 40634b0..4f5855e 100644
33258 --- a/drivers/lguest/x86/switcher_32.S
33259 +++ b/drivers/lguest/x86/switcher_32.S
33260 @@ -87,6 +87,7 @@
33261 #include <asm/page.h>
33262 #include <asm/segment.h>
33263 #include <asm/lguest.h>
33264 +#include <asm/processor-flags.h>
33265
33266 // We mark the start of the code to copy
33267 // It's placed in .text tho it's never run here
33268 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33269 // Changes type when we load it: damn Intel!
33270 // For after we switch over our page tables
33271 // That entry will be read-only: we'd crash.
33272 +
33273 +#ifdef CONFIG_PAX_KERNEXEC
33274 + mov %cr0, %edx
33275 + xor $X86_CR0_WP, %edx
33276 + mov %edx, %cr0
33277 +#endif
33278 +
33279 movl $(GDT_ENTRY_TSS*8), %edx
33280 ltr %dx
33281
33282 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33283 // Let's clear it again for our return.
33284 // The GDT descriptor of the Host
33285 // Points to the table after two "size" bytes
33286 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33287 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33288 // Clear "used" from type field (byte 5, bit 2)
33289 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33290 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33291 +
33292 +#ifdef CONFIG_PAX_KERNEXEC
33293 + mov %cr0, %eax
33294 + xor $X86_CR0_WP, %eax
33295 + mov %eax, %cr0
33296 +#endif
33297
33298 // Once our page table's switched, the Guest is live!
33299 // The Host fades as we run this final step.
33300 @@ -295,13 +309,12 @@ deliver_to_host:
33301 // I consulted gcc, and it gave
33302 // These instructions, which I gladly credit:
33303 leal (%edx,%ebx,8), %eax
33304 - movzwl (%eax),%edx
33305 - movl 4(%eax), %eax
33306 - xorw %ax, %ax
33307 - orl %eax, %edx
33308 + movl 4(%eax), %edx
33309 + movw (%eax), %dx
33310 // Now the address of the handler's in %edx
33311 // We call it now: its "iret" drops us home.
33312 - jmp *%edx
33313 + ljmp $__KERNEL_CS, $1f
33314 +1: jmp *%edx
33315
33316 // Every interrupt can come to us here
33317 // But we must truly tell each apart.
33318 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33319 index 20e5c2c..9e849a9 100644
33320 --- a/drivers/macintosh/macio_asic.c
33321 +++ b/drivers/macintosh/macio_asic.c
33322 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33323 * MacIO is matched against any Apple ID, it's probe() function
33324 * will then decide wether it applies or not
33325 */
33326 -static const struct pci_device_id __devinitdata pci_ids [] = { {
33327 +static const struct pci_device_id __devinitconst pci_ids [] = { {
33328 .vendor = PCI_VENDOR_ID_APPLE,
33329 .device = PCI_ANY_ID,
33330 .subvendor = PCI_ANY_ID,
33331 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33332 index 15dbe03..743fc65 100644
33333 --- a/drivers/md/bitmap.c
33334 +++ b/drivers/md/bitmap.c
33335 @@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33336 chunk_kb ? "KB" : "B");
33337 if (bitmap->storage.file) {
33338 seq_printf(seq, ", file: ");
33339 - seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
33340 + seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
33341 }
33342
33343 seq_printf(seq, "\n");
33344 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33345 index a1a3e6d..1918bfc 100644
33346 --- a/drivers/md/dm-ioctl.c
33347 +++ b/drivers/md/dm-ioctl.c
33348 @@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33349 cmd == DM_LIST_VERSIONS_CMD)
33350 return 0;
33351
33352 - if ((cmd == DM_DEV_CREATE_CMD)) {
33353 + if (cmd == DM_DEV_CREATE_CMD) {
33354 if (!*param->name) {
33355 DMWARN("name not supplied when creating device");
33356 return -EINVAL;
33357 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33358 index b58b7a3..8018b19 100644
33359 --- a/drivers/md/dm-raid1.c
33360 +++ b/drivers/md/dm-raid1.c
33361 @@ -40,7 +40,7 @@ enum dm_raid1_error {
33362
33363 struct mirror {
33364 struct mirror_set *ms;
33365 - atomic_t error_count;
33366 + atomic_unchecked_t error_count;
33367 unsigned long error_type;
33368 struct dm_dev *dev;
33369 sector_t offset;
33370 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33371 struct mirror *m;
33372
33373 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33374 - if (!atomic_read(&m->error_count))
33375 + if (!atomic_read_unchecked(&m->error_count))
33376 return m;
33377
33378 return NULL;
33379 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33380 * simple way to tell if a device has encountered
33381 * errors.
33382 */
33383 - atomic_inc(&m->error_count);
33384 + atomic_inc_unchecked(&m->error_count);
33385
33386 if (test_and_set_bit(error_type, &m->error_type))
33387 return;
33388 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33389 struct mirror *m = get_default_mirror(ms);
33390
33391 do {
33392 - if (likely(!atomic_read(&m->error_count)))
33393 + if (likely(!atomic_read_unchecked(&m->error_count)))
33394 return m;
33395
33396 if (m-- == ms->mirror)
33397 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33398 {
33399 struct mirror *default_mirror = get_default_mirror(m->ms);
33400
33401 - return !atomic_read(&default_mirror->error_count);
33402 + return !atomic_read_unchecked(&default_mirror->error_count);
33403 }
33404
33405 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33406 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33407 */
33408 if (likely(region_in_sync(ms, region, 1)))
33409 m = choose_mirror(ms, bio->bi_sector);
33410 - else if (m && atomic_read(&m->error_count))
33411 + else if (m && atomic_read_unchecked(&m->error_count))
33412 m = NULL;
33413
33414 if (likely(m))
33415 @@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33416 }
33417
33418 ms->mirror[mirror].ms = ms;
33419 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33420 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33421 ms->mirror[mirror].error_type = 0;
33422 ms->mirror[mirror].offset = offset;
33423
33424 @@ -1352,7 +1352,7 @@ static void mirror_resume(struct dm_target *ti)
33425 */
33426 static char device_status_char(struct mirror *m)
33427 {
33428 - if (!atomic_read(&(m->error_count)))
33429 + if (!atomic_read_unchecked(&(m->error_count)))
33430 return 'A';
33431
33432 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33433 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33434 index 35c94ff..20d4c17 100644
33435 --- a/drivers/md/dm-stripe.c
33436 +++ b/drivers/md/dm-stripe.c
33437 @@ -20,7 +20,7 @@ struct stripe {
33438 struct dm_dev *dev;
33439 sector_t physical_start;
33440
33441 - atomic_t error_count;
33442 + atomic_unchecked_t error_count;
33443 };
33444
33445 struct stripe_c {
33446 @@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33447 kfree(sc);
33448 return r;
33449 }
33450 - atomic_set(&(sc->stripe[i].error_count), 0);
33451 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33452 }
33453
33454 ti->private = sc;
33455 @@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33456 DMEMIT("%d ", sc->stripes);
33457 for (i = 0; i < sc->stripes; i++) {
33458 DMEMIT("%s ", sc->stripe[i].dev->name);
33459 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33460 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33461 'D' : 'A';
33462 }
33463 buffer[i] = '\0';
33464 @@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33465 */
33466 for (i = 0; i < sc->stripes; i++)
33467 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33468 - atomic_inc(&(sc->stripe[i].error_count));
33469 - if (atomic_read(&(sc->stripe[i].error_count)) <
33470 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33471 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33472 DM_IO_ERROR_THRESHOLD)
33473 schedule_work(&sc->trigger_event);
33474 }
33475 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33476 index 2e227fb..44ead1f 100644
33477 --- a/drivers/md/dm-table.c
33478 +++ b/drivers/md/dm-table.c
33479 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33480 if (!dev_size)
33481 return 0;
33482
33483 - if ((start >= dev_size) || (start + len > dev_size)) {
33484 + if ((start >= dev_size) || (len > dev_size - start)) {
33485 DMWARN("%s: %s too small for target: "
33486 "start=%llu, len=%llu, dev_size=%llu",
33487 dm_device_name(ti->table->md), bdevname(bdev, b),
33488 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33489 index 3e2907f..c28851a 100644
33490 --- a/drivers/md/dm-thin-metadata.c
33491 +++ b/drivers/md/dm-thin-metadata.c
33492 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33493
33494 pmd->info.tm = tm;
33495 pmd->info.levels = 2;
33496 - pmd->info.value_type.context = pmd->data_sm;
33497 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33498 pmd->info.value_type.size = sizeof(__le64);
33499 pmd->info.value_type.inc = data_block_inc;
33500 pmd->info.value_type.dec = data_block_dec;
33501 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33502
33503 pmd->bl_info.tm = tm;
33504 pmd->bl_info.levels = 1;
33505 - pmd->bl_info.value_type.context = pmd->data_sm;
33506 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33507 pmd->bl_info.value_type.size = sizeof(__le64);
33508 pmd->bl_info.value_type.inc = data_block_inc;
33509 pmd->bl_info.value_type.dec = data_block_dec;
33510 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33511 index e24143c..ce2f21a1 100644
33512 --- a/drivers/md/dm.c
33513 +++ b/drivers/md/dm.c
33514 @@ -176,9 +176,9 @@ struct mapped_device {
33515 /*
33516 * Event handling.
33517 */
33518 - atomic_t event_nr;
33519 + atomic_unchecked_t event_nr;
33520 wait_queue_head_t eventq;
33521 - atomic_t uevent_seq;
33522 + atomic_unchecked_t uevent_seq;
33523 struct list_head uevent_list;
33524 spinlock_t uevent_lock; /* Protect access to uevent_list */
33525
33526 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33527 rwlock_init(&md->map_lock);
33528 atomic_set(&md->holders, 1);
33529 atomic_set(&md->open_count, 0);
33530 - atomic_set(&md->event_nr, 0);
33531 - atomic_set(&md->uevent_seq, 0);
33532 + atomic_set_unchecked(&md->event_nr, 0);
33533 + atomic_set_unchecked(&md->uevent_seq, 0);
33534 INIT_LIST_HEAD(&md->uevent_list);
33535 spin_lock_init(&md->uevent_lock);
33536
33537 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33538
33539 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33540
33541 - atomic_inc(&md->event_nr);
33542 + atomic_inc_unchecked(&md->event_nr);
33543 wake_up(&md->eventq);
33544 }
33545
33546 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33547
33548 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33549 {
33550 - return atomic_add_return(1, &md->uevent_seq);
33551 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33552 }
33553
33554 uint32_t dm_get_event_nr(struct mapped_device *md)
33555 {
33556 - return atomic_read(&md->event_nr);
33557 + return atomic_read_unchecked(&md->event_nr);
33558 }
33559
33560 int dm_wait_event(struct mapped_device *md, int event_nr)
33561 {
33562 return wait_event_interruptible(md->eventq,
33563 - (event_nr != atomic_read(&md->event_nr)));
33564 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33565 }
33566
33567 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33568 diff --git a/drivers/md/md.c b/drivers/md/md.c
33569 index 6fc0c26..0c864b6 100644
33570 --- a/drivers/md/md.c
33571 +++ b/drivers/md/md.c
33572 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33573 * start build, activate spare
33574 */
33575 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33576 -static atomic_t md_event_count;
33577 +static atomic_unchecked_t md_event_count;
33578 void md_new_event(struct mddev *mddev)
33579 {
33580 - atomic_inc(&md_event_count);
33581 + atomic_inc_unchecked(&md_event_count);
33582 wake_up(&md_event_waiters);
33583 }
33584 EXPORT_SYMBOL_GPL(md_new_event);
33585 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33586 */
33587 static void md_new_event_inintr(struct mddev *mddev)
33588 {
33589 - atomic_inc(&md_event_count);
33590 + atomic_inc_unchecked(&md_event_count);
33591 wake_up(&md_event_waiters);
33592 }
33593
33594 @@ -1568,7 +1568,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33595 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
33596 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
33597 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
33598 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33599 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33600
33601 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33602 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33603 @@ -1812,7 +1812,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33604 else
33605 sb->resync_offset = cpu_to_le64(0);
33606
33607 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33608 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33609
33610 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33611 sb->size = cpu_to_le64(mddev->dev_sectors);
33612 @@ -2806,7 +2806,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33613 static ssize_t
33614 errors_show(struct md_rdev *rdev, char *page)
33615 {
33616 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33617 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33618 }
33619
33620 static ssize_t
33621 @@ -2815,7 +2815,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33622 char *e;
33623 unsigned long n = simple_strtoul(buf, &e, 10);
33624 if (*buf && (*e == 0 || *e == '\n')) {
33625 - atomic_set(&rdev->corrected_errors, n);
33626 + atomic_set_unchecked(&rdev->corrected_errors, n);
33627 return len;
33628 }
33629 return -EINVAL;
33630 @@ -3262,8 +3262,8 @@ int md_rdev_init(struct md_rdev *rdev)
33631 rdev->sb_loaded = 0;
33632 rdev->bb_page = NULL;
33633 atomic_set(&rdev->nr_pending, 0);
33634 - atomic_set(&rdev->read_errors, 0);
33635 - atomic_set(&rdev->corrected_errors, 0);
33636 + atomic_set_unchecked(&rdev->read_errors, 0);
33637 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33638
33639 INIT_LIST_HEAD(&rdev->same_set);
33640 init_waitqueue_head(&rdev->blocked_wait);
33641 @@ -7000,7 +7000,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33642
33643 spin_unlock(&pers_lock);
33644 seq_printf(seq, "\n");
33645 - seq->poll_event = atomic_read(&md_event_count);
33646 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33647 return 0;
33648 }
33649 if (v == (void*)2) {
33650 @@ -7103,7 +7103,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33651 return error;
33652
33653 seq = file->private_data;
33654 - seq->poll_event = atomic_read(&md_event_count);
33655 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33656 return error;
33657 }
33658
33659 @@ -7117,7 +7117,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33660 /* always allow read */
33661 mask = POLLIN | POLLRDNORM;
33662
33663 - if (seq->poll_event != atomic_read(&md_event_count))
33664 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33665 mask |= POLLERR | POLLPRI;
33666 return mask;
33667 }
33668 @@ -7161,7 +7161,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33669 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33670 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33671 (int)part_stat_read(&disk->part0, sectors[1]) -
33672 - atomic_read(&disk->sync_io);
33673 + atomic_read_unchecked(&disk->sync_io);
33674 /* sync IO will cause sync_io to increase before the disk_stats
33675 * as sync_io is counted when a request starts, and
33676 * disk_stats is counted when it completes.
33677 diff --git a/drivers/md/md.h b/drivers/md/md.h
33678 index 7b4a3c3..994ad4f 100644
33679 --- a/drivers/md/md.h
33680 +++ b/drivers/md/md.h
33681 @@ -94,13 +94,13 @@ struct md_rdev {
33682 * only maintained for arrays that
33683 * support hot removal
33684 */
33685 - atomic_t read_errors; /* number of consecutive read errors that
33686 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33687 * we have tried to ignore.
33688 */
33689 struct timespec last_read_error; /* monotonic time since our
33690 * last read error
33691 */
33692 - atomic_t corrected_errors; /* number of corrected read errors,
33693 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33694 * for reporting to userspace and storing
33695 * in superblock.
33696 */
33697 @@ -435,7 +435,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33698
33699 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33700 {
33701 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33702 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33703 }
33704
33705 struct md_personality
33706 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33707 index fc90c11..c8cd9a9 100644
33708 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33709 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33710 @@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
33711 /*----------------------------------------------------------------*/
33712
33713 struct sm_checker {
33714 - struct dm_space_map sm;
33715 + dm_space_map_no_const sm;
33716
33717 struct count_array old_counts;
33718 struct count_array counts;
33719 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33720 index 3d0ed53..35dc592 100644
33721 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33722 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33723 @@ -23,7 +23,7 @@
33724 * Space map interface.
33725 */
33726 struct sm_disk {
33727 - struct dm_space_map sm;
33728 + dm_space_map_no_const sm;
33729
33730 struct ll_disk ll;
33731 struct ll_disk old_ll;
33732 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33733 index e89ae5e..062e4c2 100644
33734 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33735 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33736 @@ -43,7 +43,7 @@ struct block_op {
33737 };
33738
33739 struct sm_metadata {
33740 - struct dm_space_map sm;
33741 + dm_space_map_no_const sm;
33742
33743 struct ll_disk ll;
33744 struct ll_disk old_ll;
33745 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33746 index 1cbfc6b..56e1dbb 100644
33747 --- a/drivers/md/persistent-data/dm-space-map.h
33748 +++ b/drivers/md/persistent-data/dm-space-map.h
33749 @@ -60,6 +60,7 @@ struct dm_space_map {
33750 int (*root_size)(struct dm_space_map *sm, size_t *result);
33751 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33752 };
33753 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33754
33755 /*----------------------------------------------------------------*/
33756
33757 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33758 index 53aec45..250851c 100644
33759 --- a/drivers/md/raid1.c
33760 +++ b/drivers/md/raid1.c
33761 @@ -1685,7 +1685,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33762 if (r1_sync_page_io(rdev, sect, s,
33763 bio->bi_io_vec[idx].bv_page,
33764 READ) != 0)
33765 - atomic_add(s, &rdev->corrected_errors);
33766 + atomic_add_unchecked(s, &rdev->corrected_errors);
33767 }
33768 sectors -= s;
33769 sect += s;
33770 @@ -1907,7 +1907,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33771 test_bit(In_sync, &rdev->flags)) {
33772 if (r1_sync_page_io(rdev, sect, s,
33773 conf->tmppage, READ)) {
33774 - atomic_add(s, &rdev->corrected_errors);
33775 + atomic_add_unchecked(s, &rdev->corrected_errors);
33776 printk(KERN_INFO
33777 "md/raid1:%s: read error corrected "
33778 "(%d sectors at %llu on %s)\n",
33779 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33780 index e987da4..83649e4 100644
33781 --- a/drivers/md/raid10.c
33782 +++ b/drivers/md/raid10.c
33783 @@ -1790,7 +1790,7 @@ static void end_sync_read(struct bio *bio, int error)
33784 /* The write handler will notice the lack of
33785 * R10BIO_Uptodate and record any errors etc
33786 */
33787 - atomic_add(r10_bio->sectors,
33788 + atomic_add_unchecked(r10_bio->sectors,
33789 &conf->mirrors[d].rdev->corrected_errors);
33790
33791 /* for reconstruct, we always reschedule after a read.
33792 @@ -2139,7 +2139,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33793 {
33794 struct timespec cur_time_mon;
33795 unsigned long hours_since_last;
33796 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33797 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33798
33799 ktime_get_ts(&cur_time_mon);
33800
33801 @@ -2161,9 +2161,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33802 * overflowing the shift of read_errors by hours_since_last.
33803 */
33804 if (hours_since_last >= 8 * sizeof(read_errors))
33805 - atomic_set(&rdev->read_errors, 0);
33806 + atomic_set_unchecked(&rdev->read_errors, 0);
33807 else
33808 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33809 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33810 }
33811
33812 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33813 @@ -2217,8 +2217,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33814 return;
33815
33816 check_decay_read_errors(mddev, rdev);
33817 - atomic_inc(&rdev->read_errors);
33818 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33819 + atomic_inc_unchecked(&rdev->read_errors);
33820 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33821 char b[BDEVNAME_SIZE];
33822 bdevname(rdev->bdev, b);
33823
33824 @@ -2226,7 +2226,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33825 "md/raid10:%s: %s: Raid device exceeded "
33826 "read_error threshold [cur %d:max %d]\n",
33827 mdname(mddev), b,
33828 - atomic_read(&rdev->read_errors), max_read_errors);
33829 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33830 printk(KERN_NOTICE
33831 "md/raid10:%s: %s: Failing raid device\n",
33832 mdname(mddev), b);
33833 @@ -2381,7 +2381,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33834 sect +
33835 choose_data_offset(r10_bio, rdev)),
33836 bdevname(rdev->bdev, b));
33837 - atomic_add(s, &rdev->corrected_errors);
33838 + atomic_add_unchecked(s, &rdev->corrected_errors);
33839 }
33840
33841 rdev_dec_pending(rdev, mddev);
33842 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33843 index bcd096b..c24f08f 100644
33844 --- a/drivers/md/raid5.c
33845 +++ b/drivers/md/raid5.c
33846 @@ -1740,19 +1740,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
33847 mdname(conf->mddev), STRIPE_SECTORS,
33848 (unsigned long long)s,
33849 bdevname(rdev->bdev, b));
33850 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33851 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33852 clear_bit(R5_ReadError, &sh->dev[i].flags);
33853 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33854 }
33855 - if (atomic_read(&rdev->read_errors))
33856 - atomic_set(&rdev->read_errors, 0);
33857 + if (atomic_read_unchecked(&rdev->read_errors))
33858 + atomic_set_unchecked(&rdev->read_errors, 0);
33859 } else {
33860 const char *bdn = bdevname(rdev->bdev, b);
33861 int retry = 0;
33862 int set_bad = 0;
33863
33864 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33865 - atomic_inc(&rdev->read_errors);
33866 + atomic_inc_unchecked(&rdev->read_errors);
33867 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33868 printk_ratelimited(
33869 KERN_WARNING
33870 @@ -1780,7 +1780,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33871 mdname(conf->mddev),
33872 (unsigned long long)s,
33873 bdn);
33874 - } else if (atomic_read(&rdev->read_errors)
33875 + } else if (atomic_read_unchecked(&rdev->read_errors)
33876 > conf->max_nr_stripes)
33877 printk(KERN_WARNING
33878 "md/raid:%s: Too many read errors, failing device %s.\n",
33879 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33880 index 131b938..8572ed1 100644
33881 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33882 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33883 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33884 .subvendor = _subvend, .subdevice = _subdev, \
33885 .driver_data = (unsigned long)&_driverdata }
33886
33887 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33888 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33889 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33890 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33891 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33892 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33893 index fa7188a..04a045e 100644
33894 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33895 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33896 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33897 union {
33898 dmx_ts_cb ts;
33899 dmx_section_cb sec;
33900 - } cb;
33901 + } __no_const cb;
33902
33903 struct dvb_demux *demux;
33904 void *priv;
33905 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33906 index 39eab73..60033e7 100644
33907 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33908 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33909 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33910 const struct dvb_device *template, void *priv, int type)
33911 {
33912 struct dvb_device *dvbdev;
33913 - struct file_operations *dvbdevfops;
33914 + file_operations_no_const *dvbdevfops;
33915 struct device *clsdev;
33916 int minor;
33917 int id;
33918 diff --git a/drivers/media/dvb/dvb-usb/az6007.c b/drivers/media/dvb/dvb-usb/az6007.c
33919 index 4008b9c..ce714f5 100644
33920 --- a/drivers/media/dvb/dvb-usb/az6007.c
33921 +++ b/drivers/media/dvb/dvb-usb/az6007.c
33922 @@ -590,7 +590,7 @@ static int az6007_read_mac_addr(struct dvb_usb_device *d, u8 mac[6])
33923 int ret;
33924
33925 ret = az6007_read(d, AZ6007_READ_DATA, 6, 0, st->data, 6);
33926 - memcpy(mac, st->data, sizeof(mac));
33927 + memcpy(mac, st->data, 6);
33928
33929 if (ret > 0)
33930 deb_info("%s: mac is %02x:%02x:%02x:%02x:%02x:%02x\n",
33931 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33932 index 3940bb0..fb3952a 100644
33933 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33934 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33935 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33936
33937 struct dib0700_adapter_state {
33938 int (*set_param_save) (struct dvb_frontend *);
33939 -};
33940 +} __no_const;
33941
33942 static int dib7070_set_param_override(struct dvb_frontend *fe)
33943 {
33944 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33945 index 9382895..ac8093c 100644
33946 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33947 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33948 @@ -95,7 +95,7 @@ struct su3000_state {
33949
33950 struct s6x0_state {
33951 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33952 -};
33953 +} __no_const;
33954
33955 /* debug */
33956 static int dvb_usb_dw2102_debug;
33957 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33958 index 404f63a..4796533 100644
33959 --- a/drivers/media/dvb/frontends/dib3000.h
33960 +++ b/drivers/media/dvb/frontends/dib3000.h
33961 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33962 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33963 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33964 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33965 -};
33966 +} __no_const;
33967
33968 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33969 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33970 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33971 index 7539a5d..06531a6 100644
33972 --- a/drivers/media/dvb/ngene/ngene-cards.c
33973 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33974 @@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33975
33976 /****************************************************************************/
33977
33978 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33979 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33980 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33981 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33982 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33983 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33984 index 16a089f..1661b11 100644
33985 --- a/drivers/media/radio/radio-cadet.c
33986 +++ b/drivers/media/radio/radio-cadet.c
33987 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33988 unsigned char readbuf[RDS_BUFFER];
33989 int i = 0;
33990
33991 + if (count > RDS_BUFFER)
33992 + return -EFAULT;
33993 mutex_lock(&dev->lock);
33994 if (dev->rdsstat == 0) {
33995 dev->rdsstat = 1;
33996 @@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33997 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33998 mutex_unlock(&dev->lock);
33999
34000 - if (copy_to_user(data, readbuf, i))
34001 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
34002 return -EFAULT;
34003 return i;
34004 }
34005 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
34006 index 9cde353..8c6a1c3 100644
34007 --- a/drivers/media/video/au0828/au0828.h
34008 +++ b/drivers/media/video/au0828/au0828.h
34009 @@ -191,7 +191,7 @@ struct au0828_dev {
34010
34011 /* I2C */
34012 struct i2c_adapter i2c_adap;
34013 - struct i2c_algorithm i2c_algo;
34014 + i2c_algorithm_no_const i2c_algo;
34015 struct i2c_client i2c_client;
34016 u32 i2c_rc;
34017
34018 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
34019 index 04bf662..e0ac026 100644
34020 --- a/drivers/media/video/cx88/cx88-alsa.c
34021 +++ b/drivers/media/video/cx88/cx88-alsa.c
34022 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
34023 * Only boards with eeprom and byte 1 at eeprom=1 have it
34024 */
34025
34026 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
34027 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
34028 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34029 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34030 {0, }
34031 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
34032 index 88cf9d9..bbc4b2c 100644
34033 --- a/drivers/media/video/omap/omap_vout.c
34034 +++ b/drivers/media/video/omap/omap_vout.c
34035 @@ -64,7 +64,6 @@ enum omap_vout_channels {
34036 OMAP_VIDEO2,
34037 };
34038
34039 -static struct videobuf_queue_ops video_vbq_ops;
34040 /* Variables configurable through module params*/
34041 static u32 video1_numbuffers = 3;
34042 static u32 video2_numbuffers = 3;
34043 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
34044 {
34045 struct videobuf_queue *q;
34046 struct omap_vout_device *vout = NULL;
34047 + static struct videobuf_queue_ops video_vbq_ops = {
34048 + .buf_setup = omap_vout_buffer_setup,
34049 + .buf_prepare = omap_vout_buffer_prepare,
34050 + .buf_release = omap_vout_buffer_release,
34051 + .buf_queue = omap_vout_buffer_queue,
34052 + };
34053
34054 vout = video_drvdata(file);
34055 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
34056 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
34057 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
34058
34059 q = &vout->vbq;
34060 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
34061 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
34062 - video_vbq_ops.buf_release = omap_vout_buffer_release;
34063 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
34064 spin_lock_init(&vout->vbq_lock);
34065
34066 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
34067 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34068 index 036952f..80d356d 100644
34069 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34070 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34071 @@ -196,7 +196,7 @@ struct pvr2_hdw {
34072
34073 /* I2C stuff */
34074 struct i2c_adapter i2c_adap;
34075 - struct i2c_algorithm i2c_algo;
34076 + i2c_algorithm_no_const i2c_algo;
34077 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
34078 int i2c_cx25840_hack_state;
34079 int i2c_linked;
34080 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
34081 index 02194c0..091733b 100644
34082 --- a/drivers/media/video/timblogiw.c
34083 +++ b/drivers/media/video/timblogiw.c
34084 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
34085
34086 /* Platform device functions */
34087
34088 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34089 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
34090 .vidioc_querycap = timblogiw_querycap,
34091 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
34092 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
34093 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34094 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
34095 };
34096
34097 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
34098 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
34099 .owner = THIS_MODULE,
34100 .open = timblogiw_open,
34101 .release = timblogiw_close,
34102 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34103 index d99db56..a16b959 100644
34104 --- a/drivers/message/fusion/mptbase.c
34105 +++ b/drivers/message/fusion/mptbase.c
34106 @@ -6751,8 +6751,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34107 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34108 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34109
34110 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34111 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34112 +#else
34113 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34114 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34115 +#endif
34116 +
34117 /*
34118 * Rounding UP to nearest 4-kB boundary here...
34119 */
34120 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34121 index 551262e..7551198 100644
34122 --- a/drivers/message/fusion/mptsas.c
34123 +++ b/drivers/message/fusion/mptsas.c
34124 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34125 return 0;
34126 }
34127
34128 +static inline void
34129 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34130 +{
34131 + if (phy_info->port_details) {
34132 + phy_info->port_details->rphy = rphy;
34133 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34134 + ioc->name, rphy));
34135 + }
34136 +
34137 + if (rphy) {
34138 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34139 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34140 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34141 + ioc->name, rphy, rphy->dev.release));
34142 + }
34143 +}
34144 +
34145 /* no mutex */
34146 static void
34147 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34148 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34149 return NULL;
34150 }
34151
34152 -static inline void
34153 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34154 -{
34155 - if (phy_info->port_details) {
34156 - phy_info->port_details->rphy = rphy;
34157 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34158 - ioc->name, rphy));
34159 - }
34160 -
34161 - if (rphy) {
34162 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34163 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34164 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34165 - ioc->name, rphy, rphy->dev.release));
34166 - }
34167 -}
34168 -
34169 static inline struct sas_port *
34170 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34171 {
34172 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34173 index 0c3ced7..1fe34ec 100644
34174 --- a/drivers/message/fusion/mptscsih.c
34175 +++ b/drivers/message/fusion/mptscsih.c
34176 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34177
34178 h = shost_priv(SChost);
34179
34180 - if (h) {
34181 - if (h->info_kbuf == NULL)
34182 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34183 - return h->info_kbuf;
34184 - h->info_kbuf[0] = '\0';
34185 + if (!h)
34186 + return NULL;
34187
34188 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34189 - h->info_kbuf[size-1] = '\0';
34190 - }
34191 + if (h->info_kbuf == NULL)
34192 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34193 + return h->info_kbuf;
34194 + h->info_kbuf[0] = '\0';
34195 +
34196 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34197 + h->info_kbuf[size-1] = '\0';
34198
34199 return h->info_kbuf;
34200 }
34201 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34202 index 506c36f..b137580 100644
34203 --- a/drivers/message/i2o/i2o_proc.c
34204 +++ b/drivers/message/i2o/i2o_proc.c
34205 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34206 "Array Controller Device"
34207 };
34208
34209 -static char *chtostr(u8 * chars, int n)
34210 -{
34211 - char tmp[256];
34212 - tmp[0] = 0;
34213 - return strncat(tmp, (char *)chars, n);
34214 -}
34215 -
34216 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34217 char *group)
34218 {
34219 @@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34220
34221 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34222 seq_printf(seq, "%-#8x", ddm_table.module_id);
34223 - seq_printf(seq, "%-29s",
34224 - chtostr(ddm_table.module_name_version, 28));
34225 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34226 seq_printf(seq, "%9d ", ddm_table.data_size);
34227 seq_printf(seq, "%8d", ddm_table.code_size);
34228
34229 @@ -927,8 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34230
34231 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34232 seq_printf(seq, "%-#8x", dst->module_id);
34233 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34234 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34235 + seq_printf(seq, "%-.28s", dst->module_name_version);
34236 + seq_printf(seq, "%-.8s", dst->date);
34237 seq_printf(seq, "%8d ", dst->module_size);
34238 seq_printf(seq, "%8d ", dst->mpb_size);
34239 seq_printf(seq, "0x%04x", dst->module_flags);
34240 @@ -1259,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34241 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34242 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34243 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34244 - seq_printf(seq, "Vendor info : %s\n",
34245 - chtostr((u8 *) (work32 + 2), 16));
34246 - seq_printf(seq, "Product info : %s\n",
34247 - chtostr((u8 *) (work32 + 6), 16));
34248 - seq_printf(seq, "Description : %s\n",
34249 - chtostr((u8 *) (work32 + 10), 16));
34250 - seq_printf(seq, "Product rev. : %s\n",
34251 - chtostr((u8 *) (work32 + 14), 8));
34252 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34253 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34254 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34255 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34256
34257 seq_printf(seq, "Serial number : ");
34258 print_serial_number(seq, (u8 *) (work32 + 16),
34259 @@ -1311,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34260 }
34261
34262 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34263 - seq_printf(seq, "Module name : %s\n",
34264 - chtostr(result.module_name, 24));
34265 - seq_printf(seq, "Module revision : %s\n",
34266 - chtostr(result.module_rev, 8));
34267 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
34268 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34269
34270 seq_printf(seq, "Serial number : ");
34271 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34272 @@ -1345,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34273 return 0;
34274 }
34275
34276 - seq_printf(seq, "Device name : %s\n",
34277 - chtostr(result.device_name, 64));
34278 - seq_printf(seq, "Service name : %s\n",
34279 - chtostr(result.service_name, 64));
34280 - seq_printf(seq, "Physical name : %s\n",
34281 - chtostr(result.physical_location, 64));
34282 - seq_printf(seq, "Instance number : %s\n",
34283 - chtostr(result.instance_number, 4));
34284 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
34285 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
34286 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34287 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34288
34289 return 0;
34290 }
34291 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34292 index a8c08f3..155fe3d 100644
34293 --- a/drivers/message/i2o/iop.c
34294 +++ b/drivers/message/i2o/iop.c
34295 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34296
34297 spin_lock_irqsave(&c->context_list_lock, flags);
34298
34299 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34300 - atomic_inc(&c->context_list_counter);
34301 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34302 + atomic_inc_unchecked(&c->context_list_counter);
34303
34304 - entry->context = atomic_read(&c->context_list_counter);
34305 + entry->context = atomic_read_unchecked(&c->context_list_counter);
34306
34307 list_add(&entry->list, &c->context_list);
34308
34309 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34310
34311 #if BITS_PER_LONG == 64
34312 spin_lock_init(&c->context_list_lock);
34313 - atomic_set(&c->context_list_counter, 0);
34314 + atomic_set_unchecked(&c->context_list_counter, 0);
34315 INIT_LIST_HEAD(&c->context_list);
34316 #endif
34317
34318 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34319 index 7ce65f4..e66e9bc 100644
34320 --- a/drivers/mfd/abx500-core.c
34321 +++ b/drivers/mfd/abx500-core.c
34322 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34323
34324 struct abx500_device_entry {
34325 struct list_head list;
34326 - struct abx500_ops ops;
34327 + abx500_ops_no_const ops;
34328 struct device *dev;
34329 };
34330
34331 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34332 index 2ea9998..51dabee 100644
34333 --- a/drivers/mfd/janz-cmodio.c
34334 +++ b/drivers/mfd/janz-cmodio.c
34335 @@ -13,6 +13,7 @@
34336
34337 #include <linux/kernel.h>
34338 #include <linux/module.h>
34339 +#include <linux/slab.h>
34340 #include <linux/init.h>
34341 #include <linux/pci.h>
34342 #include <linux/interrupt.h>
34343 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34344 index a981e2a..5ca0c8b 100644
34345 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
34346 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34347 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34348 * the lid is closed. This leads to interrupts as soon as a little move
34349 * is done.
34350 */
34351 - atomic_inc(&lis3->count);
34352 + atomic_inc_unchecked(&lis3->count);
34353
34354 wake_up_interruptible(&lis3->misc_wait);
34355 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34356 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34357 if (lis3->pm_dev)
34358 pm_runtime_get_sync(lis3->pm_dev);
34359
34360 - atomic_set(&lis3->count, 0);
34361 + atomic_set_unchecked(&lis3->count, 0);
34362 return 0;
34363 }
34364
34365 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34366 add_wait_queue(&lis3->misc_wait, &wait);
34367 while (true) {
34368 set_current_state(TASK_INTERRUPTIBLE);
34369 - data = atomic_xchg(&lis3->count, 0);
34370 + data = atomic_xchg_unchecked(&lis3->count, 0);
34371 if (data)
34372 break;
34373
34374 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34375 struct lis3lv02d, miscdev);
34376
34377 poll_wait(file, &lis3->misc_wait, wait);
34378 - if (atomic_read(&lis3->count))
34379 + if (atomic_read_unchecked(&lis3->count))
34380 return POLLIN | POLLRDNORM;
34381 return 0;
34382 }
34383 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34384 index 2b1482a..5d33616 100644
34385 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
34386 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34387 @@ -266,7 +266,7 @@ struct lis3lv02d {
34388 struct input_polled_dev *idev; /* input device */
34389 struct platform_device *pdev; /* platform device */
34390 struct regulator_bulk_data regulators[2];
34391 - atomic_t count; /* interrupt count after last read */
34392 + atomic_unchecked_t count; /* interrupt count after last read */
34393 union axis_conversion ac; /* hw -> logical axis */
34394 int mapped_btns[3];
34395
34396 diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
34397 index 28adefe..08aad69 100644
34398 --- a/drivers/misc/lkdtm.c
34399 +++ b/drivers/misc/lkdtm.c
34400 @@ -477,6 +477,8 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
34401 int i, n, out;
34402
34403 buf = (char *)__get_free_page(GFP_KERNEL);
34404 + if (buf == NULL)
34405 + return -ENOMEM;
34406
34407 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
34408 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
34409 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34410 index 2f30bad..c4c13d0 100644
34411 --- a/drivers/misc/sgi-gru/gruhandles.c
34412 +++ b/drivers/misc/sgi-gru/gruhandles.c
34413 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34414 unsigned long nsec;
34415
34416 nsec = CLKS2NSEC(clks);
34417 - atomic_long_inc(&mcs_op_statistics[op].count);
34418 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34419 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34420 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34421 if (mcs_op_statistics[op].max < nsec)
34422 mcs_op_statistics[op].max = nsec;
34423 }
34424 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34425 index 950dbe9..eeef0f8 100644
34426 --- a/drivers/misc/sgi-gru/gruprocfs.c
34427 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34428 @@ -32,9 +32,9 @@
34429
34430 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34431
34432 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34433 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34434 {
34435 - unsigned long val = atomic_long_read(v);
34436 + unsigned long val = atomic_long_read_unchecked(v);
34437
34438 seq_printf(s, "%16lu %s\n", val, id);
34439 }
34440 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34441
34442 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34443 for (op = 0; op < mcsop_last; op++) {
34444 - count = atomic_long_read(&mcs_op_statistics[op].count);
34445 - total = atomic_long_read(&mcs_op_statistics[op].total);
34446 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34447 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34448 max = mcs_op_statistics[op].max;
34449 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34450 count ? total / count : 0, max);
34451 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34452 index 5c3ce24..4915ccb 100644
34453 --- a/drivers/misc/sgi-gru/grutables.h
34454 +++ b/drivers/misc/sgi-gru/grutables.h
34455 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34456 * GRU statistics.
34457 */
34458 struct gru_stats_s {
34459 - atomic_long_t vdata_alloc;
34460 - atomic_long_t vdata_free;
34461 - atomic_long_t gts_alloc;
34462 - atomic_long_t gts_free;
34463 - atomic_long_t gms_alloc;
34464 - atomic_long_t gms_free;
34465 - atomic_long_t gts_double_allocate;
34466 - atomic_long_t assign_context;
34467 - atomic_long_t assign_context_failed;
34468 - atomic_long_t free_context;
34469 - atomic_long_t load_user_context;
34470 - atomic_long_t load_kernel_context;
34471 - atomic_long_t lock_kernel_context;
34472 - atomic_long_t unlock_kernel_context;
34473 - atomic_long_t steal_user_context;
34474 - atomic_long_t steal_kernel_context;
34475 - atomic_long_t steal_context_failed;
34476 - atomic_long_t nopfn;
34477 - atomic_long_t asid_new;
34478 - atomic_long_t asid_next;
34479 - atomic_long_t asid_wrap;
34480 - atomic_long_t asid_reuse;
34481 - atomic_long_t intr;
34482 - atomic_long_t intr_cbr;
34483 - atomic_long_t intr_tfh;
34484 - atomic_long_t intr_spurious;
34485 - atomic_long_t intr_mm_lock_failed;
34486 - atomic_long_t call_os;
34487 - atomic_long_t call_os_wait_queue;
34488 - atomic_long_t user_flush_tlb;
34489 - atomic_long_t user_unload_context;
34490 - atomic_long_t user_exception;
34491 - atomic_long_t set_context_option;
34492 - atomic_long_t check_context_retarget_intr;
34493 - atomic_long_t check_context_unload;
34494 - atomic_long_t tlb_dropin;
34495 - atomic_long_t tlb_preload_page;
34496 - atomic_long_t tlb_dropin_fail_no_asid;
34497 - atomic_long_t tlb_dropin_fail_upm;
34498 - atomic_long_t tlb_dropin_fail_invalid;
34499 - atomic_long_t tlb_dropin_fail_range_active;
34500 - atomic_long_t tlb_dropin_fail_idle;
34501 - atomic_long_t tlb_dropin_fail_fmm;
34502 - atomic_long_t tlb_dropin_fail_no_exception;
34503 - atomic_long_t tfh_stale_on_fault;
34504 - atomic_long_t mmu_invalidate_range;
34505 - atomic_long_t mmu_invalidate_page;
34506 - atomic_long_t flush_tlb;
34507 - atomic_long_t flush_tlb_gru;
34508 - atomic_long_t flush_tlb_gru_tgh;
34509 - atomic_long_t flush_tlb_gru_zero_asid;
34510 + atomic_long_unchecked_t vdata_alloc;
34511 + atomic_long_unchecked_t vdata_free;
34512 + atomic_long_unchecked_t gts_alloc;
34513 + atomic_long_unchecked_t gts_free;
34514 + atomic_long_unchecked_t gms_alloc;
34515 + atomic_long_unchecked_t gms_free;
34516 + atomic_long_unchecked_t gts_double_allocate;
34517 + atomic_long_unchecked_t assign_context;
34518 + atomic_long_unchecked_t assign_context_failed;
34519 + atomic_long_unchecked_t free_context;
34520 + atomic_long_unchecked_t load_user_context;
34521 + atomic_long_unchecked_t load_kernel_context;
34522 + atomic_long_unchecked_t lock_kernel_context;
34523 + atomic_long_unchecked_t unlock_kernel_context;
34524 + atomic_long_unchecked_t steal_user_context;
34525 + atomic_long_unchecked_t steal_kernel_context;
34526 + atomic_long_unchecked_t steal_context_failed;
34527 + atomic_long_unchecked_t nopfn;
34528 + atomic_long_unchecked_t asid_new;
34529 + atomic_long_unchecked_t asid_next;
34530 + atomic_long_unchecked_t asid_wrap;
34531 + atomic_long_unchecked_t asid_reuse;
34532 + atomic_long_unchecked_t intr;
34533 + atomic_long_unchecked_t intr_cbr;
34534 + atomic_long_unchecked_t intr_tfh;
34535 + atomic_long_unchecked_t intr_spurious;
34536 + atomic_long_unchecked_t intr_mm_lock_failed;
34537 + atomic_long_unchecked_t call_os;
34538 + atomic_long_unchecked_t call_os_wait_queue;
34539 + atomic_long_unchecked_t user_flush_tlb;
34540 + atomic_long_unchecked_t user_unload_context;
34541 + atomic_long_unchecked_t user_exception;
34542 + atomic_long_unchecked_t set_context_option;
34543 + atomic_long_unchecked_t check_context_retarget_intr;
34544 + atomic_long_unchecked_t check_context_unload;
34545 + atomic_long_unchecked_t tlb_dropin;
34546 + atomic_long_unchecked_t tlb_preload_page;
34547 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34548 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34549 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34550 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34551 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34552 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34553 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34554 + atomic_long_unchecked_t tfh_stale_on_fault;
34555 + atomic_long_unchecked_t mmu_invalidate_range;
34556 + atomic_long_unchecked_t mmu_invalidate_page;
34557 + atomic_long_unchecked_t flush_tlb;
34558 + atomic_long_unchecked_t flush_tlb_gru;
34559 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34560 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34561
34562 - atomic_long_t copy_gpa;
34563 - atomic_long_t read_gpa;
34564 + atomic_long_unchecked_t copy_gpa;
34565 + atomic_long_unchecked_t read_gpa;
34566
34567 - atomic_long_t mesq_receive;
34568 - atomic_long_t mesq_receive_none;
34569 - atomic_long_t mesq_send;
34570 - atomic_long_t mesq_send_failed;
34571 - atomic_long_t mesq_noop;
34572 - atomic_long_t mesq_send_unexpected_error;
34573 - atomic_long_t mesq_send_lb_overflow;
34574 - atomic_long_t mesq_send_qlimit_reached;
34575 - atomic_long_t mesq_send_amo_nacked;
34576 - atomic_long_t mesq_send_put_nacked;
34577 - atomic_long_t mesq_page_overflow;
34578 - atomic_long_t mesq_qf_locked;
34579 - atomic_long_t mesq_qf_noop_not_full;
34580 - atomic_long_t mesq_qf_switch_head_failed;
34581 - atomic_long_t mesq_qf_unexpected_error;
34582 - atomic_long_t mesq_noop_unexpected_error;
34583 - atomic_long_t mesq_noop_lb_overflow;
34584 - atomic_long_t mesq_noop_qlimit_reached;
34585 - atomic_long_t mesq_noop_amo_nacked;
34586 - atomic_long_t mesq_noop_put_nacked;
34587 - atomic_long_t mesq_noop_page_overflow;
34588 + atomic_long_unchecked_t mesq_receive;
34589 + atomic_long_unchecked_t mesq_receive_none;
34590 + atomic_long_unchecked_t mesq_send;
34591 + atomic_long_unchecked_t mesq_send_failed;
34592 + atomic_long_unchecked_t mesq_noop;
34593 + atomic_long_unchecked_t mesq_send_unexpected_error;
34594 + atomic_long_unchecked_t mesq_send_lb_overflow;
34595 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34596 + atomic_long_unchecked_t mesq_send_amo_nacked;
34597 + atomic_long_unchecked_t mesq_send_put_nacked;
34598 + atomic_long_unchecked_t mesq_page_overflow;
34599 + atomic_long_unchecked_t mesq_qf_locked;
34600 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34601 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34602 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34603 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34604 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34605 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34606 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34607 + atomic_long_unchecked_t mesq_noop_put_nacked;
34608 + atomic_long_unchecked_t mesq_noop_page_overflow;
34609
34610 };
34611
34612 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34613 tghop_invalidate, mcsop_last};
34614
34615 struct mcs_op_statistic {
34616 - atomic_long_t count;
34617 - atomic_long_t total;
34618 + atomic_long_unchecked_t count;
34619 + atomic_long_unchecked_t total;
34620 unsigned long max;
34621 };
34622
34623 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34624
34625 #define STAT(id) do { \
34626 if (gru_options & OPT_STATS) \
34627 - atomic_long_inc(&gru_stats.id); \
34628 + atomic_long_inc_unchecked(&gru_stats.id); \
34629 } while (0)
34630
34631 #ifdef CONFIG_SGI_GRU_DEBUG
34632 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34633 index c862cd4..0d176fe 100644
34634 --- a/drivers/misc/sgi-xp/xp.h
34635 +++ b/drivers/misc/sgi-xp/xp.h
34636 @@ -288,7 +288,7 @@ struct xpc_interface {
34637 xpc_notify_func, void *);
34638 void (*received) (short, int, void *);
34639 enum xp_retval (*partid_to_nasids) (short, void *);
34640 -};
34641 +} __no_const;
34642
34643 extern struct xpc_interface xpc_interface;
34644
34645 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34646 index b94d5f7..7f494c5 100644
34647 --- a/drivers/misc/sgi-xp/xpc.h
34648 +++ b/drivers/misc/sgi-xp/xpc.h
34649 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34650 void (*received_payload) (struct xpc_channel *, void *);
34651 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34652 };
34653 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34654
34655 /* struct xpc_partition act_state values (for XPC HB) */
34656
34657 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34658 /* found in xpc_main.c */
34659 extern struct device *xpc_part;
34660 extern struct device *xpc_chan;
34661 -extern struct xpc_arch_operations xpc_arch_ops;
34662 +extern xpc_arch_operations_no_const xpc_arch_ops;
34663 extern int xpc_disengage_timelimit;
34664 extern int xpc_disengage_timedout;
34665 extern int xpc_activate_IRQ_rcvd;
34666 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34667 index 8d082b4..aa749ae 100644
34668 --- a/drivers/misc/sgi-xp/xpc_main.c
34669 +++ b/drivers/misc/sgi-xp/xpc_main.c
34670 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34671 .notifier_call = xpc_system_die,
34672 };
34673
34674 -struct xpc_arch_operations xpc_arch_ops;
34675 +xpc_arch_operations_no_const xpc_arch_ops;
34676
34677 /*
34678 * Timer function to enforce the timelimit on the partition disengage.
34679 diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
34680 index 2b62232..acfaeeb 100644
34681 --- a/drivers/misc/ti-st/st_core.c
34682 +++ b/drivers/misc/ti-st/st_core.c
34683 @@ -349,6 +349,11 @@ void st_int_recv(void *disc_data,
34684 st_gdata->rx_skb = alloc_skb(
34685 st_gdata->list[type]->max_frame_size,
34686 GFP_ATOMIC);
34687 + if (st_gdata->rx_skb == NULL) {
34688 + pr_err("out of memory: dropping\n");
34689 + goto done;
34690 + }
34691 +
34692 skb_reserve(st_gdata->rx_skb,
34693 st_gdata->list[type]->reserve);
34694 /* next 2 required for BT only */
34695 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34696 index 504da71..9722d43 100644
34697 --- a/drivers/mmc/host/sdhci-pci.c
34698 +++ b/drivers/mmc/host/sdhci-pci.c
34699 @@ -653,7 +653,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34700 .probe = via_probe,
34701 };
34702
34703 -static const struct pci_device_id pci_ids[] __devinitdata = {
34704 +static const struct pci_device_id pci_ids[] __devinitconst = {
34705 {
34706 .vendor = PCI_VENDOR_ID_RICOH,
34707 .device = PCI_DEVICE_ID_RICOH_R5C822,
34708 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34709 index a4eb8b5..8c0628f 100644
34710 --- a/drivers/mtd/devices/doc2000.c
34711 +++ b/drivers/mtd/devices/doc2000.c
34712 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34713
34714 /* The ECC will not be calculated correctly if less than 512 is written */
34715 /* DBB-
34716 - if (len != 0x200 && eccbuf)
34717 + if (len != 0x200)
34718 printk(KERN_WARNING
34719 "ECC needs a full sector write (adr: %lx size %lx)\n",
34720 (long) to, (long) len);
34721 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
34722 index f2f482b..a6e7451 100644
34723 --- a/drivers/mtd/mtdchar.c
34724 +++ b/drivers/mtd/mtdchar.c
34725 @@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
34726 }
34727 #endif
34728
34729 +static inline unsigned long get_vm_size(struct vm_area_struct *vma)
34730 +{
34731 + return vma->vm_end - vma->vm_start;
34732 +}
34733 +
34734 +static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
34735 +{
34736 + return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
34737 +}
34738 +
34739 +/*
34740 + * Set a new vm offset.
34741 + *
34742 + * Verify that the incoming offset really works as a page offset,
34743 + * and that the offset and size fit in a resource_size_t.
34744 + */
34745 +static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
34746 +{
34747 + pgoff_t pgoff = off >> PAGE_SHIFT;
34748 + if (off != (resource_size_t) pgoff << PAGE_SHIFT)
34749 + return -EINVAL;
34750 + if (off + get_vm_size(vma) - 1 < off)
34751 + return -EINVAL;
34752 + vma->vm_pgoff = pgoff;
34753 + return 0;
34754 +}
34755 +
34756 /*
34757 * set up a mapping for shared memory segments
34758 */
34759 @@ -1132,20 +1159,29 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
34760 struct mtd_file_info *mfi = file->private_data;
34761 struct mtd_info *mtd = mfi->mtd;
34762 struct map_info *map = mtd->priv;
34763 - unsigned long start;
34764 - unsigned long off;
34765 - u32 len;
34766 + resource_size_t start, off;
34767 + unsigned long len, vma_len;
34768
34769 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
34770 - off = vma->vm_pgoff << PAGE_SHIFT;
34771 + off = get_vm_offset(vma);
34772 start = map->phys;
34773 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
34774 start &= PAGE_MASK;
34775 - if ((vma->vm_end - vma->vm_start + off) > len)
34776 + vma_len = get_vm_size(vma);
34777 +
34778 + /* Overflow in off+len? */
34779 + if (vma_len + off < off)
34780 + return -EINVAL;
34781 + /* Does it fit in the mapping? */
34782 + if (vma_len + off > len)
34783 return -EINVAL;
34784
34785 off += start;
34786 - vma->vm_pgoff = off >> PAGE_SHIFT;
34787 + /* Did that overflow? */
34788 + if (off < start)
34789 + return -EINVAL;
34790 + if (set_vm_offset(vma, off) < 0)
34791 + return -EINVAL;
34792 vma->vm_flags |= VM_IO | VM_RESERVED;
34793
34794 #ifdef pgprot_noncached
34795 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34796 index 0650aaf..7718762 100644
34797 --- a/drivers/mtd/nand/denali.c
34798 +++ b/drivers/mtd/nand/denali.c
34799 @@ -26,6 +26,7 @@
34800 #include <linux/pci.h>
34801 #include <linux/mtd/mtd.h>
34802 #include <linux/module.h>
34803 +#include <linux/slab.h>
34804
34805 #include "denali.h"
34806
34807 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34808 index 51b9d6a..52af9a7 100644
34809 --- a/drivers/mtd/nftlmount.c
34810 +++ b/drivers/mtd/nftlmount.c
34811 @@ -24,6 +24,7 @@
34812 #include <asm/errno.h>
34813 #include <linux/delay.h>
34814 #include <linux/slab.h>
34815 +#include <linux/sched.h>
34816 #include <linux/mtd/mtd.h>
34817 #include <linux/mtd/nand.h>
34818 #include <linux/mtd/nftl.h>
34819 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34820 index 6762dc4..9956862 100644
34821 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34822 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34823 @@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34824 */
34825
34826 #define ATL2_PARAM(X, desc) \
34827 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34828 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34829 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34830 MODULE_PARM_DESC(X, desc);
34831 #else
34832 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34833 index efd80bd..21fcff0 100644
34834 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34835 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34836 @@ -487,7 +487,7 @@ struct bnx2x_rx_mode_obj {
34837
34838 int (*wait_comp)(struct bnx2x *bp,
34839 struct bnx2x_rx_mode_ramrod_params *p);
34840 -};
34841 +} __no_const;
34842
34843 /********************** Set multicast group ***********************************/
34844
34845 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34846 index 93865f8..5448741 100644
34847 --- a/drivers/net/ethernet/broadcom/tg3.h
34848 +++ b/drivers/net/ethernet/broadcom/tg3.h
34849 @@ -140,6 +140,7 @@
34850 #define CHIPREV_ID_5750_A0 0x4000
34851 #define CHIPREV_ID_5750_A1 0x4001
34852 #define CHIPREV_ID_5750_A3 0x4003
34853 +#define CHIPREV_ID_5750_C1 0x4201
34854 #define CHIPREV_ID_5750_C2 0x4202
34855 #define CHIPREV_ID_5752_A0_HW 0x5000
34856 #define CHIPREV_ID_5752_A0 0x6000
34857 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34858 index c4e8643..0979484 100644
34859 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34860 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34861 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34862 */
34863 struct l2t_skb_cb {
34864 arp_failure_handler_func arp_failure_handler;
34865 -};
34866 +} __no_const;
34867
34868 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34869
34870 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34871 index d3cd489..0fd52dd 100644
34872 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34873 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34874 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34875 for (i=0; i<ETH_ALEN; i++) {
34876 tmp.addr[i] = dev->dev_addr[i];
34877 }
34878 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34879 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34880 break;
34881
34882 case DE4X5_SET_HWADDR: /* Set the hardware address */
34883 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34884 spin_lock_irqsave(&lp->lock, flags);
34885 memcpy(&statbuf, &lp->pktStats, ioc->len);
34886 spin_unlock_irqrestore(&lp->lock, flags);
34887 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34888 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34889 return -EFAULT;
34890 break;
34891 }
34892 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34893 index ed7d1dc..d426748 100644
34894 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34895 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34896 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34897 {NULL}};
34898
34899
34900 -static const char *block_name[] __devinitdata = {
34901 +static const char *block_name[] __devinitconst = {
34902 "21140 non-MII",
34903 "21140 MII PHY",
34904 "21142 Serial PHY",
34905 diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
34906 index 75d45f8..3d9c55b 100644
34907 --- a/drivers/net/ethernet/dec/tulip/uli526x.c
34908 +++ b/drivers/net/ethernet/dec/tulip/uli526x.c
34909 @@ -129,7 +129,7 @@ struct uli526x_board_info {
34910 struct uli_phy_ops {
34911 void (*write)(struct uli526x_board_info *, u8, u8, u16);
34912 u16 (*read)(struct uli526x_board_info *, u8, u8);
34913 - } phy;
34914 + } __no_const phy;
34915 struct net_device *next_dev; /* next device */
34916 struct pci_dev *pdev; /* PCI device */
34917 spinlock_t lock;
34918 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34919 index 4d1ffca..7c1ec4d 100644
34920 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34921 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34922 @@ -236,7 +236,7 @@ struct pci_id_info {
34923 int drv_flags; /* Driver use, intended as capability flags. */
34924 };
34925
34926 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34927 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34928 { /* Sometime a Level-One switch card. */
34929 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34930 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34931 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34932 index d7bb52a..3b83588 100644
34933 --- a/drivers/net/ethernet/dlink/sundance.c
34934 +++ b/drivers/net/ethernet/dlink/sundance.c
34935 @@ -218,7 +218,7 @@ enum {
34936 struct pci_id_info {
34937 const char *name;
34938 };
34939 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34940 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34941 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34942 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34943 {"D-Link DFE-580TX 4 port Server Adapter"},
34944 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34945 index bd5cf7e..c165651 100644
34946 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34947 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34948 @@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34949
34950 if (wrapped)
34951 newacc += 65536;
34952 - ACCESS_ONCE(*acc) = newacc;
34953 + ACCESS_ONCE_RW(*acc) = newacc;
34954 }
34955
34956 void be_parse_stats(struct be_adapter *adapter)
34957 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34958 index 16b0704..d2c07d7 100644
34959 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34960 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34961 @@ -31,6 +31,8 @@
34962 #include <linux/netdevice.h>
34963 #include <linux/phy.h>
34964 #include <linux/platform_device.h>
34965 +#include <linux/interrupt.h>
34966 +#include <linux/irqreturn.h>
34967 #include <net/ip.h>
34968
34969 #include "ftgmac100.h"
34970 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34971 index 829b109..4ae5f6a 100644
34972 --- a/drivers/net/ethernet/faraday/ftmac100.c
34973 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34974 @@ -31,6 +31,8 @@
34975 #include <linux/module.h>
34976 #include <linux/netdevice.h>
34977 #include <linux/platform_device.h>
34978 +#include <linux/interrupt.h>
34979 +#include <linux/irqreturn.h>
34980
34981 #include "ftmac100.h"
34982
34983 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34984 index 9d71c9c..0e4a0ac 100644
34985 --- a/drivers/net/ethernet/fealnx.c
34986 +++ b/drivers/net/ethernet/fealnx.c
34987 @@ -150,7 +150,7 @@ struct chip_info {
34988 int flags;
34989 };
34990
34991 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34992 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34993 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34994 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34995 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34996 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34997 index fa47b85..246edeb 100644
34998 --- a/drivers/net/ethernet/intel/e1000e/e1000.h
34999 +++ b/drivers/net/ethernet/intel/e1000e/e1000.h
35000 @@ -181,7 +181,7 @@ struct e1000_info;
35001 #define E1000_TXDCTL_DMA_BURST_ENABLE \
35002 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
35003 E1000_TXDCTL_COUNT_DESC | \
35004 - (5 << 16) | /* wthresh must be +1 more than desired */\
35005 + (1 << 16) | /* wthresh must be +1 more than desired */\
35006 (1 << 8) | /* hthresh */ \
35007 0x1f) /* pthresh */
35008
35009 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
35010 index ed5b409..ec37828 100644
35011 --- a/drivers/net/ethernet/intel/e1000e/hw.h
35012 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
35013 @@ -797,6 +797,7 @@ struct e1000_mac_operations {
35014 void (*rar_set)(struct e1000_hw *, u8 *, u32);
35015 s32 (*read_mac_addr)(struct e1000_hw *);
35016 };
35017 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35018
35019 /*
35020 * When to use various PHY register access functions:
35021 @@ -837,6 +838,7 @@ struct e1000_phy_operations {
35022 void (*power_up)(struct e1000_hw *);
35023 void (*power_down)(struct e1000_hw *);
35024 };
35025 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
35026
35027 /* Function pointers for the NVM. */
35028 struct e1000_nvm_operations {
35029 @@ -849,9 +851,10 @@ struct e1000_nvm_operations {
35030 s32 (*validate)(struct e1000_hw *);
35031 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
35032 };
35033 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35034
35035 struct e1000_mac_info {
35036 - struct e1000_mac_operations ops;
35037 + e1000_mac_operations_no_const ops;
35038 u8 addr[ETH_ALEN];
35039 u8 perm_addr[ETH_ALEN];
35040
35041 @@ -892,7 +895,7 @@ struct e1000_mac_info {
35042 };
35043
35044 struct e1000_phy_info {
35045 - struct e1000_phy_operations ops;
35046 + e1000_phy_operations_no_const ops;
35047
35048 enum e1000_phy_type type;
35049
35050 @@ -926,7 +929,7 @@ struct e1000_phy_info {
35051 };
35052
35053 struct e1000_nvm_info {
35054 - struct e1000_nvm_operations ops;
35055 + e1000_nvm_operations_no_const ops;
35056
35057 enum e1000_nvm_type type;
35058 enum e1000_nvm_override override;
35059 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
35060 index c2a51dc..c2bd262 100644
35061 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
35062 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
35063 @@ -327,6 +327,7 @@ struct e1000_mac_operations {
35064 void (*release_swfw_sync)(struct e1000_hw *, u16);
35065
35066 };
35067 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35068
35069 struct e1000_phy_operations {
35070 s32 (*acquire)(struct e1000_hw *);
35071 @@ -343,6 +344,7 @@ struct e1000_phy_operations {
35072 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
35073 s32 (*write_reg)(struct e1000_hw *, u32, u16);
35074 };
35075 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
35076
35077 struct e1000_nvm_operations {
35078 s32 (*acquire)(struct e1000_hw *);
35079 @@ -353,6 +355,7 @@ struct e1000_nvm_operations {
35080 s32 (*validate)(struct e1000_hw *);
35081 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
35082 };
35083 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35084
35085 struct e1000_info {
35086 s32 (*get_invariants)(struct e1000_hw *);
35087 @@ -364,7 +367,7 @@ struct e1000_info {
35088 extern const struct e1000_info e1000_82575_info;
35089
35090 struct e1000_mac_info {
35091 - struct e1000_mac_operations ops;
35092 + e1000_mac_operations_no_const ops;
35093
35094 u8 addr[6];
35095 u8 perm_addr[6];
35096 @@ -402,7 +405,7 @@ struct e1000_mac_info {
35097 };
35098
35099 struct e1000_phy_info {
35100 - struct e1000_phy_operations ops;
35101 + e1000_phy_operations_no_const ops;
35102
35103 enum e1000_phy_type type;
35104
35105 @@ -437,7 +440,7 @@ struct e1000_phy_info {
35106 };
35107
35108 struct e1000_nvm_info {
35109 - struct e1000_nvm_operations ops;
35110 + e1000_nvm_operations_no_const ops;
35111 enum e1000_nvm_type type;
35112 enum e1000_nvm_override override;
35113
35114 @@ -482,6 +485,7 @@ struct e1000_mbx_operations {
35115 s32 (*check_for_ack)(struct e1000_hw *, u16);
35116 s32 (*check_for_rst)(struct e1000_hw *, u16);
35117 };
35118 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35119
35120 struct e1000_mbx_stats {
35121 u32 msgs_tx;
35122 @@ -493,7 +497,7 @@ struct e1000_mbx_stats {
35123 };
35124
35125 struct e1000_mbx_info {
35126 - struct e1000_mbx_operations ops;
35127 + e1000_mbx_operations_no_const ops;
35128 struct e1000_mbx_stats stats;
35129 u32 timeout;
35130 u32 usec_delay;
35131 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
35132 index 57db3c6..aa825fc 100644
35133 --- a/drivers/net/ethernet/intel/igbvf/vf.h
35134 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
35135 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
35136 s32 (*read_mac_addr)(struct e1000_hw *);
35137 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
35138 };
35139 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35140
35141 struct e1000_mac_info {
35142 - struct e1000_mac_operations ops;
35143 + e1000_mac_operations_no_const ops;
35144 u8 addr[6];
35145 u8 perm_addr[6];
35146
35147 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
35148 s32 (*check_for_ack)(struct e1000_hw *);
35149 s32 (*check_for_rst)(struct e1000_hw *);
35150 };
35151 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35152
35153 struct e1000_mbx_stats {
35154 u32 msgs_tx;
35155 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
35156 };
35157
35158 struct e1000_mbx_info {
35159 - struct e1000_mbx_operations ops;
35160 + e1000_mbx_operations_no_const ops;
35161 struct e1000_mbx_stats stats;
35162 u32 timeout;
35163 u32 usec_delay;
35164 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35165 index dcebd12..c1fe8be 100644
35166 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35167 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35168 @@ -805,7 +805,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
35169 /* store the new cycle speed */
35170 adapter->cycle_speed = cycle_speed;
35171
35172 - ACCESS_ONCE(adapter->base_incval) = incval;
35173 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
35174 smp_mb();
35175
35176 /* grab the ptp lock */
35177 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35178 index 204848d..d8aeaec 100644
35179 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35180 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35181 @@ -2791,6 +2791,7 @@ struct ixgbe_eeprom_operations {
35182 s32 (*update_checksum)(struct ixgbe_hw *);
35183 u16 (*calc_checksum)(struct ixgbe_hw *);
35184 };
35185 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
35186
35187 struct ixgbe_mac_operations {
35188 s32 (*init_hw)(struct ixgbe_hw *);
35189 @@ -2856,6 +2857,7 @@ struct ixgbe_mac_operations {
35190 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
35191 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
35192 };
35193 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35194
35195 struct ixgbe_phy_operations {
35196 s32 (*identify)(struct ixgbe_hw *);
35197 @@ -2875,9 +2877,10 @@ struct ixgbe_phy_operations {
35198 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35199 s32 (*check_overtemp)(struct ixgbe_hw *);
35200 };
35201 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
35202
35203 struct ixgbe_eeprom_info {
35204 - struct ixgbe_eeprom_operations ops;
35205 + ixgbe_eeprom_operations_no_const ops;
35206 enum ixgbe_eeprom_type type;
35207 u32 semaphore_delay;
35208 u16 word_size;
35209 @@ -2887,7 +2890,7 @@ struct ixgbe_eeprom_info {
35210
35211 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35212 struct ixgbe_mac_info {
35213 - struct ixgbe_mac_operations ops;
35214 + ixgbe_mac_operations_no_const ops;
35215 enum ixgbe_mac_type type;
35216 u8 addr[ETH_ALEN];
35217 u8 perm_addr[ETH_ALEN];
35218 @@ -2916,7 +2919,7 @@ struct ixgbe_mac_info {
35219 };
35220
35221 struct ixgbe_phy_info {
35222 - struct ixgbe_phy_operations ops;
35223 + ixgbe_phy_operations_no_const ops;
35224 struct mdio_if_info mdio;
35225 enum ixgbe_phy_type type;
35226 u32 id;
35227 @@ -2944,6 +2947,7 @@ struct ixgbe_mbx_operations {
35228 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35229 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35230 };
35231 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35232
35233 struct ixgbe_mbx_stats {
35234 u32 msgs_tx;
35235 @@ -2955,7 +2959,7 @@ struct ixgbe_mbx_stats {
35236 };
35237
35238 struct ixgbe_mbx_info {
35239 - struct ixgbe_mbx_operations ops;
35240 + ixgbe_mbx_operations_no_const ops;
35241 struct ixgbe_mbx_stats stats;
35242 u32 timeout;
35243 u32 usec_delay;
35244 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35245 index 25c951d..cc7cf33 100644
35246 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35247 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35248 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35249 s32 (*clear_vfta)(struct ixgbe_hw *);
35250 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35251 };
35252 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35253
35254 enum ixgbe_mac_type {
35255 ixgbe_mac_unknown = 0,
35256 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35257 };
35258
35259 struct ixgbe_mac_info {
35260 - struct ixgbe_mac_operations ops;
35261 + ixgbe_mac_operations_no_const ops;
35262 u8 addr[6];
35263 u8 perm_addr[6];
35264
35265 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35266 s32 (*check_for_ack)(struct ixgbe_hw *);
35267 s32 (*check_for_rst)(struct ixgbe_hw *);
35268 };
35269 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35270
35271 struct ixgbe_mbx_stats {
35272 u32 msgs_tx;
35273 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35274 };
35275
35276 struct ixgbe_mbx_info {
35277 - struct ixgbe_mbx_operations ops;
35278 + ixgbe_mbx_operations_no_const ops;
35279 struct ixgbe_mbx_stats stats;
35280 u32 timeout;
35281 u32 udelay;
35282 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35283 index a0313de..e83a572 100644
35284 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
35285 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35286 @@ -41,6 +41,7 @@
35287 #include <linux/slab.h>
35288 #include <linux/io-mapping.h>
35289 #include <linux/delay.h>
35290 +#include <linux/sched.h>
35291
35292 #include <linux/mlx4/device.h>
35293 #include <linux/mlx4/doorbell.h>
35294 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35295 index 5046a64..71ca936 100644
35296 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35297 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35298 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35299 void (*link_down)(struct __vxge_hw_device *devh);
35300 void (*crit_err)(struct __vxge_hw_device *devh,
35301 enum vxge_hw_event type, u64 ext_data);
35302 -};
35303 +} __no_const;
35304
35305 /*
35306 * struct __vxge_hw_blockpool_entry - Block private data structure
35307 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35308 index 4a518a3..936b334 100644
35309 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35310 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35311 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35312 struct vxge_hw_mempool_dma *dma_object,
35313 u32 index,
35314 u32 is_last);
35315 -};
35316 +} __no_const;
35317
35318 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35319 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35320 diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
35321 index cd827ff..389795e 100644
35322 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c
35323 +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
35324 @@ -683,10 +683,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
35325 p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
35326 PHY_INTERFACE_MODE_MII);
35327
35328 - if (IS_ERR(p->phydev)) {
35329 - p->phydev = NULL;
35330 + if (!p->phydev)
35331 return -1;
35332 - }
35333
35334 phy_start_aneg(p->phydev);
35335
35336 diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
35337 index e559dfa..6fa74d5 100644
35338 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c
35339 +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
35340 @@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev)
35341 phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
35342 PHY_INTERFACE_MODE_SGMII);
35343
35344 - if (IS_ERR(phydev)) {
35345 + if (!phydev) {
35346 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
35347 - return PTR_ERR(phydev);
35348 + return -ENODEV;
35349 }
35350
35351 mac->phydev = phydev;
35352 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35353 index eb81da4..1592b62 100644
35354 --- a/drivers/net/ethernet/realtek/r8169.c
35355 +++ b/drivers/net/ethernet/realtek/r8169.c
35356 @@ -723,22 +723,22 @@ struct rtl8169_private {
35357 struct mdio_ops {
35358 void (*write)(void __iomem *, int, int);
35359 int (*read)(void __iomem *, int);
35360 - } mdio_ops;
35361 + } __no_const mdio_ops;
35362
35363 struct pll_power_ops {
35364 void (*down)(struct rtl8169_private *);
35365 void (*up)(struct rtl8169_private *);
35366 - } pll_power_ops;
35367 + } __no_const pll_power_ops;
35368
35369 struct jumbo_ops {
35370 void (*enable)(struct rtl8169_private *);
35371 void (*disable)(struct rtl8169_private *);
35372 - } jumbo_ops;
35373 + } __no_const jumbo_ops;
35374
35375 struct csi_ops {
35376 void (*write)(void __iomem *, int, int);
35377 u32 (*read)(void __iomem *, int);
35378 - } csi_ops;
35379 + } __no_const csi_ops;
35380
35381 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35382 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35383 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35384 index 4613591..d816601 100644
35385 --- a/drivers/net/ethernet/sis/sis190.c
35386 +++ b/drivers/net/ethernet/sis/sis190.c
35387 @@ -1618,7 +1618,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35388 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35389 struct net_device *dev)
35390 {
35391 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35392 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35393 struct sis190_private *tp = netdev_priv(dev);
35394 struct pci_dev *isa_bridge;
35395 u8 reg, tmp8;
35396 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35397 index c07cfe9..81cbf7e 100644
35398 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35399 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35400 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35401
35402 writel(value, ioaddr + MMC_CNTRL);
35403
35404 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35405 - MMC_CNTRL, value);
35406 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35407 +// MMC_CNTRL, value);
35408 }
35409
35410 /* To mask all all interrupts.*/
35411 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35412 index 2857ab0..9a1f9b0 100644
35413 --- a/drivers/net/hyperv/hyperv_net.h
35414 +++ b/drivers/net/hyperv/hyperv_net.h
35415 @@ -99,7 +99,7 @@ struct rndis_device {
35416
35417 enum rndis_device_state state;
35418 bool link_state;
35419 - atomic_t new_req_id;
35420 + atomic_unchecked_t new_req_id;
35421
35422 spinlock_t request_lock;
35423 struct list_head req_list;
35424 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35425 index 981ebb1..b34959b 100644
35426 --- a/drivers/net/hyperv/rndis_filter.c
35427 +++ b/drivers/net/hyperv/rndis_filter.c
35428 @@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35429 * template
35430 */
35431 set = &rndis_msg->msg.set_req;
35432 - set->req_id = atomic_inc_return(&dev->new_req_id);
35433 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35434
35435 /* Add to the request list */
35436 spin_lock_irqsave(&dev->request_lock, flags);
35437 @@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35438
35439 /* Setup the rndis set */
35440 halt = &request->request_msg.msg.halt_req;
35441 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35442 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35443
35444 /* Ignore return since this msg is optional. */
35445 rndis_filter_send_request(dev, request);
35446 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35447 index 5c05572..389610b 100644
35448 --- a/drivers/net/ppp/ppp_generic.c
35449 +++ b/drivers/net/ppp/ppp_generic.c
35450 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35451 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35452 struct ppp_stats stats;
35453 struct ppp_comp_stats cstats;
35454 - char *vers;
35455
35456 switch (cmd) {
35457 case SIOCGPPPSTATS:
35458 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35459 break;
35460
35461 case SIOCGPPPVER:
35462 - vers = PPP_VERSION;
35463 - if (copy_to_user(addr, vers, strlen(vers) + 1))
35464 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35465 break;
35466 err = 0;
35467 break;
35468 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
35469 index c61ae35..7758801 100644
35470 --- a/drivers/net/team/team.c
35471 +++ b/drivers/net/team/team.c
35472 @@ -1410,8 +1410,8 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
35473
35474 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
35475 &team_nl_family, 0, TEAM_CMD_NOOP);
35476 - if (IS_ERR(hdr)) {
35477 - err = PTR_ERR(hdr);
35478 + if (!hdr) {
35479 + err = -EMSGSIZE;
35480 goto err_msg_put;
35481 }
35482
35483 @@ -1493,8 +1493,8 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
35484
35485 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
35486 TEAM_CMD_OPTIONS_GET);
35487 - if (IS_ERR(hdr))
35488 - return PTR_ERR(hdr);
35489 + if (!hdr)
35490 + return -EMSGSIZE;
35491
35492 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
35493 goto nla_put_failure;
35494 @@ -1739,8 +1739,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
35495
35496 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
35497 TEAM_CMD_PORT_LIST_GET);
35498 - if (IS_ERR(hdr))
35499 - return PTR_ERR(hdr);
35500 + if (!hdr)
35501 + return -EMSGSIZE;
35502
35503 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
35504 goto nla_put_failure;
35505 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
35506 index 5fb59ae..989715a 100644
35507 --- a/drivers/net/tun.c
35508 +++ b/drivers/net/tun.c
35509 @@ -1243,7 +1243,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
35510 }
35511
35512 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
35513 - unsigned long arg, int ifreq_len)
35514 + unsigned long arg, size_t ifreq_len)
35515 {
35516 struct tun_file *tfile = file->private_data;
35517 struct tun_struct *tun;
35518 @@ -1254,6 +1254,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
35519 int vnet_hdr_sz;
35520 int ret;
35521
35522 + if (ifreq_len > sizeof ifr)
35523 + return -EFAULT;
35524 +
35525 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
35526 if (copy_from_user(&ifr, argp, ifreq_len))
35527 return -EFAULT;
35528 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35529 index 62f30b4..ff99dfd 100644
35530 --- a/drivers/net/usb/hso.c
35531 +++ b/drivers/net/usb/hso.c
35532 @@ -71,7 +71,7 @@
35533 #include <asm/byteorder.h>
35534 #include <linux/serial_core.h>
35535 #include <linux/serial.h>
35536 -
35537 +#include <asm/local.h>
35538
35539 #define MOD_AUTHOR "Option Wireless"
35540 #define MOD_DESCRIPTION "USB High Speed Option driver"
35541 @@ -1182,7 +1182,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35542 struct urb *urb;
35543
35544 urb = serial->rx_urb[0];
35545 - if (serial->port.count > 0) {
35546 + if (atomic_read(&serial->port.count) > 0) {
35547 count = put_rxbuf_data(urb, serial);
35548 if (count == -1)
35549 return;
35550 @@ -1218,7 +1218,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35551 DUMP1(urb->transfer_buffer, urb->actual_length);
35552
35553 /* Anyone listening? */
35554 - if (serial->port.count == 0)
35555 + if (atomic_read(&serial->port.count) == 0)
35556 return;
35557
35558 if (status == 0) {
35559 @@ -1300,8 +1300,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35560 tty_port_tty_set(&serial->port, tty);
35561
35562 /* check for port already opened, if not set the termios */
35563 - serial->port.count++;
35564 - if (serial->port.count == 1) {
35565 + if (atomic_inc_return(&serial->port.count) == 1) {
35566 serial->rx_state = RX_IDLE;
35567 /* Force default termio settings */
35568 _hso_serial_set_termios(tty, NULL);
35569 @@ -1313,7 +1312,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35570 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35571 if (result) {
35572 hso_stop_serial_device(serial->parent);
35573 - serial->port.count--;
35574 + atomic_dec(&serial->port.count);
35575 kref_put(&serial->parent->ref, hso_serial_ref_free);
35576 }
35577 } else {
35578 @@ -1350,10 +1349,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35579
35580 /* reset the rts and dtr */
35581 /* do the actual close */
35582 - serial->port.count--;
35583 + atomic_dec(&serial->port.count);
35584
35585 - if (serial->port.count <= 0) {
35586 - serial->port.count = 0;
35587 + if (atomic_read(&serial->port.count) <= 0) {
35588 + atomic_set(&serial->port.count, 0);
35589 tty_port_tty_set(&serial->port, NULL);
35590 if (!usb_gone)
35591 hso_stop_serial_device(serial->parent);
35592 @@ -1429,7 +1428,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35593
35594 /* the actual setup */
35595 spin_lock_irqsave(&serial->serial_lock, flags);
35596 - if (serial->port.count)
35597 + if (atomic_read(&serial->port.count))
35598 _hso_serial_set_termios(tty, old);
35599 else
35600 tty->termios = old;
35601 @@ -1888,7 +1887,7 @@ static void intr_callback(struct urb *urb)
35602 D1("Pending read interrupt on port %d\n", i);
35603 spin_lock(&serial->serial_lock);
35604 if (serial->rx_state == RX_IDLE &&
35605 - serial->port.count > 0) {
35606 + atomic_read(&serial->port.count) > 0) {
35607 /* Setup and send a ctrl req read on
35608 * port i */
35609 if (!serial->rx_urb_filled[0]) {
35610 @@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
35611 /* Start all serial ports */
35612 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35613 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35614 - if (dev2ser(serial_table[i])->port.count) {
35615 + if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
35616 result =
35617 hso_start_serial_device(serial_table[i], GFP_NOIO);
35618 hso_kick_transmit(dev2ser(serial_table[i]));
35619 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35620 index 420d69b..74f90a2 100644
35621 --- a/drivers/net/wireless/ath/ath.h
35622 +++ b/drivers/net/wireless/ath/ath.h
35623 @@ -119,6 +119,7 @@ struct ath_ops {
35624 void (*write_flush) (void *);
35625 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35626 };
35627 +typedef struct ath_ops __no_const ath_ops_no_const;
35628
35629 struct ath_common;
35630 struct ath_bus_ops;
35631 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35632 index 8d78253..bebbb68 100644
35633 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35634 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35635 @@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35636 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35637 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35638
35639 - ACCESS_ONCE(ads->ds_link) = i->link;
35640 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35641 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35642 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35643
35644 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35645 ctl6 = SM(i->keytype, AR_EncrType);
35646 @@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35647
35648 if ((i->is_first || i->is_last) &&
35649 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35650 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35651 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35652 | set11nTries(i->rates, 1)
35653 | set11nTries(i->rates, 2)
35654 | set11nTries(i->rates, 3)
35655 | (i->dur_update ? AR_DurUpdateEna : 0)
35656 | SM(0, AR_BurstDur);
35657
35658 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35659 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35660 | set11nRate(i->rates, 1)
35661 | set11nRate(i->rates, 2)
35662 | set11nRate(i->rates, 3);
35663 } else {
35664 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35665 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35666 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35667 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35668 }
35669
35670 if (!i->is_first) {
35671 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35672 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35673 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35674 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35675 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35676 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35677 return;
35678 }
35679
35680 @@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35681 break;
35682 }
35683
35684 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35685 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35686 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35687 | SM(i->txpower, AR_XmitPower)
35688 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35689 @@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35690 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35691 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35692
35693 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35694 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35695 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35696 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35697
35698 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35699 return;
35700
35701 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35702 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35703 | set11nPktDurRTSCTS(i->rates, 1);
35704
35705 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35706 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35707 | set11nPktDurRTSCTS(i->rates, 3);
35708
35709 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35710 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35711 | set11nRateFlags(i->rates, 1)
35712 | set11nRateFlags(i->rates, 2)
35713 | set11nRateFlags(i->rates, 3)
35714 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35715 index d9e0824..1a874e7 100644
35716 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35717 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35718 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35719 (i->qcu << AR_TxQcuNum_S) | desc_len;
35720
35721 checksum += val;
35722 - ACCESS_ONCE(ads->info) = val;
35723 + ACCESS_ONCE_RW(ads->info) = val;
35724
35725 checksum += i->link;
35726 - ACCESS_ONCE(ads->link) = i->link;
35727 + ACCESS_ONCE_RW(ads->link) = i->link;
35728
35729 checksum += i->buf_addr[0];
35730 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35731 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35732 checksum += i->buf_addr[1];
35733 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35734 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35735 checksum += i->buf_addr[2];
35736 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35737 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35738 checksum += i->buf_addr[3];
35739 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35740 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35741
35742 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35743 - ACCESS_ONCE(ads->ctl3) = val;
35744 + ACCESS_ONCE_RW(ads->ctl3) = val;
35745 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35746 - ACCESS_ONCE(ads->ctl5) = val;
35747 + ACCESS_ONCE_RW(ads->ctl5) = val;
35748 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35749 - ACCESS_ONCE(ads->ctl7) = val;
35750 + ACCESS_ONCE_RW(ads->ctl7) = val;
35751 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35752 - ACCESS_ONCE(ads->ctl9) = val;
35753 + ACCESS_ONCE_RW(ads->ctl9) = val;
35754
35755 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35756 - ACCESS_ONCE(ads->ctl10) = checksum;
35757 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35758
35759 if (i->is_first || i->is_last) {
35760 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35761 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35762 | set11nTries(i->rates, 1)
35763 | set11nTries(i->rates, 2)
35764 | set11nTries(i->rates, 3)
35765 | (i->dur_update ? AR_DurUpdateEna : 0)
35766 | SM(0, AR_BurstDur);
35767
35768 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35769 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35770 | set11nRate(i->rates, 1)
35771 | set11nRate(i->rates, 2)
35772 | set11nRate(i->rates, 3);
35773 } else {
35774 - ACCESS_ONCE(ads->ctl13) = 0;
35775 - ACCESS_ONCE(ads->ctl14) = 0;
35776 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35777 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35778 }
35779
35780 ads->ctl20 = 0;
35781 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35782
35783 ctl17 = SM(i->keytype, AR_EncrType);
35784 if (!i->is_first) {
35785 - ACCESS_ONCE(ads->ctl11) = 0;
35786 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35787 - ACCESS_ONCE(ads->ctl15) = 0;
35788 - ACCESS_ONCE(ads->ctl16) = 0;
35789 - ACCESS_ONCE(ads->ctl17) = ctl17;
35790 - ACCESS_ONCE(ads->ctl18) = 0;
35791 - ACCESS_ONCE(ads->ctl19) = 0;
35792 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35793 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35794 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35795 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35796 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35797 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35798 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35799 return;
35800 }
35801
35802 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35803 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35804 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35805 | SM(i->txpower, AR_XmitPower)
35806 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35807 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35808 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35809 ctl12 |= SM(val, AR_PAPRDChainMask);
35810
35811 - ACCESS_ONCE(ads->ctl12) = ctl12;
35812 - ACCESS_ONCE(ads->ctl17) = ctl17;
35813 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35814 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35815
35816 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35817 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35818 | set11nPktDurRTSCTS(i->rates, 1);
35819
35820 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35821 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35822 | set11nPktDurRTSCTS(i->rates, 3);
35823
35824 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35825 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35826 | set11nRateFlags(i->rates, 1)
35827 | set11nRateFlags(i->rates, 2)
35828 | set11nRateFlags(i->rates, 3)
35829 | SM(i->rtscts_rate, AR_RTSCTSRate);
35830
35831 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35832 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35833 }
35834
35835 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35836 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35837 index 02f5007..bd0bd8f 100644
35838 --- a/drivers/net/wireless/ath/ath9k/hw.h
35839 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35840 @@ -610,7 +610,7 @@ struct ath_hw_private_ops {
35841
35842 /* ANI */
35843 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35844 -};
35845 +} __no_const;
35846
35847 /**
35848 * struct ath_hw_ops - callbacks used by hardware code and driver code
35849 @@ -640,7 +640,7 @@ struct ath_hw_ops {
35850 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35851 struct ath_hw_antcomb_conf *antconf);
35852
35853 -};
35854 +} __no_const;
35855
35856 struct ath_nf_limits {
35857 s16 max;
35858 @@ -660,7 +660,7 @@ enum ath_cal_list {
35859 #define AH_FASTCC 0x4
35860
35861 struct ath_hw {
35862 - struct ath_ops reg_ops;
35863 + ath_ops_no_const reg_ops;
35864
35865 struct ieee80211_hw *hw;
35866 struct ath_common common;
35867 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35868 index af00e2c..ab04d34 100644
35869 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35870 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35871 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35872 void (*carrsuppr)(struct brcms_phy *);
35873 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35874 void (*detach)(struct brcms_phy *);
35875 -};
35876 +} __no_const;
35877
35878 struct brcms_phy {
35879 struct brcms_phy_pub pubpi_ro;
35880 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35881 index faec404..a5277f1 100644
35882 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35883 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35884 @@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35885 */
35886 if (il3945_mod_params.disable_hw_scan) {
35887 D_INFO("Disabling hw_scan\n");
35888 - il3945_mac_ops.hw_scan = NULL;
35889 + pax_open_kernel();
35890 + *(void **)&il3945_mac_ops.hw_scan = NULL;
35891 + pax_close_kernel();
35892 }
35893
35894 D_INFO("*** LOAD DRIVER ***\n");
35895 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35896 index 5000690..a67f98a 100644
35897 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35898 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35899 @@ -207,7 +207,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
35900 {
35901 struct iwl_priv *priv = file->private_data;
35902 char buf[64];
35903 - int buf_size;
35904 + size_t buf_size;
35905 u32 offset, len;
35906
35907 memset(buf, 0, sizeof(buf));
35908 @@ -484,7 +484,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
35909 struct iwl_priv *priv = file->private_data;
35910
35911 char buf[8];
35912 - int buf_size;
35913 + size_t buf_size;
35914 u32 reset_flag;
35915
35916 memset(buf, 0, sizeof(buf));
35917 @@ -565,7 +565,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
35918 {
35919 struct iwl_priv *priv = file->private_data;
35920 char buf[8];
35921 - int buf_size;
35922 + size_t buf_size;
35923 int ht40;
35924
35925 memset(buf, 0, sizeof(buf));
35926 @@ -617,7 +617,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
35927 {
35928 struct iwl_priv *priv = file->private_data;
35929 char buf[8];
35930 - int buf_size;
35931 + size_t buf_size;
35932 int value;
35933
35934 memset(buf, 0, sizeof(buf));
35935 @@ -1882,7 +1882,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
35936 {
35937 struct iwl_priv *priv = file->private_data;
35938 char buf[8];
35939 - int buf_size;
35940 + size_t buf_size;
35941 int clear;
35942
35943 memset(buf, 0, sizeof(buf));
35944 @@ -1927,7 +1927,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
35945 {
35946 struct iwl_priv *priv = file->private_data;
35947 char buf[8];
35948 - int buf_size;
35949 + size_t buf_size;
35950 int trace;
35951
35952 memset(buf, 0, sizeof(buf));
35953 @@ -1998,7 +1998,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
35954 {
35955 struct iwl_priv *priv = file->private_data;
35956 char buf[8];
35957 - int buf_size;
35958 + size_t buf_size;
35959 int missed;
35960
35961 memset(buf, 0, sizeof(buf));
35962 @@ -2039,7 +2039,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
35963
35964 struct iwl_priv *priv = file->private_data;
35965 char buf[8];
35966 - int buf_size;
35967 + size_t buf_size;
35968 int plcp;
35969
35970 memset(buf, 0, sizeof(buf));
35971 @@ -2099,7 +2099,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
35972
35973 struct iwl_priv *priv = file->private_data;
35974 char buf[8];
35975 - int buf_size;
35976 + size_t buf_size;
35977 int flush;
35978
35979 memset(buf, 0, sizeof(buf));
35980 @@ -2189,7 +2189,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
35981
35982 struct iwl_priv *priv = file->private_data;
35983 char buf[8];
35984 - int buf_size;
35985 + size_t buf_size;
35986 int rts;
35987
35988 if (!priv->cfg->ht_params)
35989 @@ -2231,7 +2231,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
35990 {
35991 struct iwl_priv *priv = file->private_data;
35992 char buf[8];
35993 - int buf_size;
35994 + size_t buf_size;
35995
35996 memset(buf, 0, sizeof(buf));
35997 buf_size = min(count, sizeof(buf) - 1);
35998 @@ -2267,7 +2267,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
35999 struct iwl_priv *priv = file->private_data;
36000 u32 event_log_flag;
36001 char buf[8];
36002 - int buf_size;
36003 + size_t buf_size;
36004
36005 memset(buf, 0, sizeof(buf));
36006 buf_size = min(count, sizeof(buf) - 1);
36007 @@ -2317,7 +2317,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
36008 struct iwl_priv *priv = file->private_data;
36009 char buf[8];
36010 u32 calib_disabled;
36011 - int buf_size;
36012 + size_t buf_size;
36013
36014 memset(buf, 0, sizeof(buf));
36015 buf_size = min(count, sizeof(buf) - 1);
36016 diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
36017 index a1fb025..378e8a5 100644
36018 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
36019 +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
36020 @@ -1951,7 +1951,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
36021 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
36022
36023 char buf[8];
36024 - int buf_size;
36025 + size_t buf_size;
36026 u32 reset_flag;
36027
36028 memset(buf, 0, sizeof(buf));
36029 @@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
36030 {
36031 struct iwl_trans *trans = file->private_data;
36032 char buf[8];
36033 - int buf_size;
36034 + size_t buf_size;
36035 int csr;
36036
36037 memset(buf, 0, sizeof(buf));
36038 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
36039 index a0b7cfd..20b49f7 100644
36040 --- a/drivers/net/wireless/mac80211_hwsim.c
36041 +++ b/drivers/net/wireless/mac80211_hwsim.c
36042 @@ -1752,9 +1752,11 @@ static int __init init_mac80211_hwsim(void)
36043 return -EINVAL;
36044
36045 if (fake_hw_scan) {
36046 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36047 - mac80211_hwsim_ops.sw_scan_start = NULL;
36048 - mac80211_hwsim_ops.sw_scan_complete = NULL;
36049 + pax_open_kernel();
36050 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36051 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
36052 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
36053 + pax_close_kernel();
36054 }
36055
36056 spin_lock_init(&hwsim_radio_lock);
36057 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
36058 index bd3b0bf..f9db92a 100644
36059 --- a/drivers/net/wireless/mwifiex/main.h
36060 +++ b/drivers/net/wireless/mwifiex/main.h
36061 @@ -567,7 +567,7 @@ struct mwifiex_if_ops {
36062 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
36063 int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
36064 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
36065 -};
36066 +} __no_const;
36067
36068 struct mwifiex_adapter {
36069 u8 iface_type;
36070 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
36071 index dfcd02a..a42a59d 100644
36072 --- a/drivers/net/wireless/rndis_wlan.c
36073 +++ b/drivers/net/wireless/rndis_wlan.c
36074 @@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
36075
36076 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
36077
36078 - if (rts_threshold < 0 || rts_threshold > 2347)
36079 + if (rts_threshold > 2347)
36080 rts_threshold = 2347;
36081
36082 tmp = cpu_to_le32(rts_threshold);
36083 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
36084 index 8f75402..eed109d 100644
36085 --- a/drivers/net/wireless/rt2x00/rt2x00.h
36086 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
36087 @@ -396,7 +396,7 @@ struct rt2x00_intf {
36088 * for hardware which doesn't support hardware
36089 * sequence counting.
36090 */
36091 - atomic_t seqno;
36092 + atomic_unchecked_t seqno;
36093 };
36094
36095 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
36096 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
36097 index 2fd8301..9767e8c 100644
36098 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
36099 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
36100 @@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
36101 * sequence counter given by mac80211.
36102 */
36103 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
36104 - seqno = atomic_add_return(0x10, &intf->seqno);
36105 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
36106 else
36107 - seqno = atomic_read(&intf->seqno);
36108 + seqno = atomic_read_unchecked(&intf->seqno);
36109
36110 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
36111 hdr->seq_ctrl |= cpu_to_le16(seqno);
36112 diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
36113 index 9d8f581..0f6589e 100644
36114 --- a/drivers/net/wireless/ti/wl1251/wl1251.h
36115 +++ b/drivers/net/wireless/ti/wl1251/wl1251.h
36116 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
36117 void (*reset)(struct wl1251 *wl);
36118 void (*enable_irq)(struct wl1251 *wl);
36119 void (*disable_irq)(struct wl1251 *wl);
36120 -};
36121 +} __no_const;
36122
36123 struct wl1251 {
36124 struct ieee80211_hw *hw;
36125 diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
36126 index 0b3f0b5..62f68bd 100644
36127 --- a/drivers/net/wireless/ti/wlcore/wlcore.h
36128 +++ b/drivers/net/wireless/ti/wlcore/wlcore.h
36129 @@ -61,7 +61,7 @@ struct wlcore_ops {
36130 struct wl12xx_vif *wlvif);
36131 s8 (*get_pg_ver)(struct wl1271 *wl);
36132 void (*get_mac)(struct wl1271 *wl);
36133 -};
36134 +} __no_const;
36135
36136 enum wlcore_partitions {
36137 PART_DOWN,
36138 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
36139 index f34b5b2..b5abb9f 100644
36140 --- a/drivers/oprofile/buffer_sync.c
36141 +++ b/drivers/oprofile/buffer_sync.c
36142 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
36143 if (cookie == NO_COOKIE)
36144 offset = pc;
36145 if (cookie == INVALID_COOKIE) {
36146 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36147 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36148 offset = pc;
36149 }
36150 if (cookie != last_cookie) {
36151 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
36152 /* add userspace sample */
36153
36154 if (!mm) {
36155 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
36156 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
36157 return 0;
36158 }
36159
36160 cookie = lookup_dcookie(mm, s->eip, &offset);
36161
36162 if (cookie == INVALID_COOKIE) {
36163 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36164 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36165 return 0;
36166 }
36167
36168 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
36169 /* ignore backtraces if failed to add a sample */
36170 if (state == sb_bt_start) {
36171 state = sb_bt_ignore;
36172 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
36173 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
36174 }
36175 }
36176 release_mm(mm);
36177 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
36178 index c0cc4e7..44d4e54 100644
36179 --- a/drivers/oprofile/event_buffer.c
36180 +++ b/drivers/oprofile/event_buffer.c
36181 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
36182 }
36183
36184 if (buffer_pos == buffer_size) {
36185 - atomic_inc(&oprofile_stats.event_lost_overflow);
36186 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
36187 return;
36188 }
36189
36190 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
36191 index ed2c3ec..deda85a 100644
36192 --- a/drivers/oprofile/oprof.c
36193 +++ b/drivers/oprofile/oprof.c
36194 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
36195 if (oprofile_ops.switch_events())
36196 return;
36197
36198 - atomic_inc(&oprofile_stats.multiplex_counter);
36199 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
36200 start_switch_worker();
36201 }
36202
36203 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
36204 index 917d28e..d62d981 100644
36205 --- a/drivers/oprofile/oprofile_stats.c
36206 +++ b/drivers/oprofile/oprofile_stats.c
36207 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
36208 cpu_buf->sample_invalid_eip = 0;
36209 }
36210
36211 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
36212 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
36213 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
36214 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
36215 - atomic_set(&oprofile_stats.multiplex_counter, 0);
36216 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
36217 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
36218 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
36219 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
36220 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
36221 }
36222
36223
36224 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
36225 index 38b6fc0..b5cbfce 100644
36226 --- a/drivers/oprofile/oprofile_stats.h
36227 +++ b/drivers/oprofile/oprofile_stats.h
36228 @@ -13,11 +13,11 @@
36229 #include <linux/atomic.h>
36230
36231 struct oprofile_stat_struct {
36232 - atomic_t sample_lost_no_mm;
36233 - atomic_t sample_lost_no_mapping;
36234 - atomic_t bt_lost_no_mapping;
36235 - atomic_t event_lost_overflow;
36236 - atomic_t multiplex_counter;
36237 + atomic_unchecked_t sample_lost_no_mm;
36238 + atomic_unchecked_t sample_lost_no_mapping;
36239 + atomic_unchecked_t bt_lost_no_mapping;
36240 + atomic_unchecked_t event_lost_overflow;
36241 + atomic_unchecked_t multiplex_counter;
36242 };
36243
36244 extern struct oprofile_stat_struct oprofile_stats;
36245 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
36246 index 849357c..b83c1e0 100644
36247 --- a/drivers/oprofile/oprofilefs.c
36248 +++ b/drivers/oprofile/oprofilefs.c
36249 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
36250
36251
36252 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36253 - char const *name, atomic_t *val)
36254 + char const *name, atomic_unchecked_t *val)
36255 {
36256 return __oprofilefs_create_file(sb, root, name,
36257 &atomic_ro_fops, 0444, val);
36258 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
36259 index 3f56bc0..707d642 100644
36260 --- a/drivers/parport/procfs.c
36261 +++ b/drivers/parport/procfs.c
36262 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
36263
36264 *ppos += len;
36265
36266 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36267 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36268 }
36269
36270 #ifdef CONFIG_PARPORT_1284
36271 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
36272
36273 *ppos += len;
36274
36275 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36276 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36277 }
36278 #endif /* IEEE1284.3 support. */
36279
36280 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
36281 index 9fff878..ad0ad53 100644
36282 --- a/drivers/pci/hotplug/cpci_hotplug.h
36283 +++ b/drivers/pci/hotplug/cpci_hotplug.h
36284 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36285 int (*hardware_test) (struct slot* slot, u32 value);
36286 u8 (*get_power) (struct slot* slot);
36287 int (*set_power) (struct slot* slot, int value);
36288 -};
36289 +} __no_const;
36290
36291 struct cpci_hp_controller {
36292 unsigned int irq;
36293 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
36294 index 76ba8a1..20ca857 100644
36295 --- a/drivers/pci/hotplug/cpqphp_nvram.c
36296 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
36297 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
36298
36299 void compaq_nvram_init (void __iomem *rom_start)
36300 {
36301 +
36302 +#ifndef CONFIG_PAX_KERNEXEC
36303 if (rom_start) {
36304 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36305 }
36306 +#endif
36307 +
36308 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36309
36310 /* initialize our int15 lock */
36311 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
36312 index b500840..d7159d3 100644
36313 --- a/drivers/pci/pcie/aspm.c
36314 +++ b/drivers/pci/pcie/aspm.c
36315 @@ -27,9 +27,9 @@
36316 #define MODULE_PARAM_PREFIX "pcie_aspm."
36317
36318 /* Note: those are not register definitions */
36319 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36320 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36321 -#define ASPM_STATE_L1 (4) /* L1 state */
36322 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36323 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36324 +#define ASPM_STATE_L1 (4U) /* L1 state */
36325 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36326 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36327
36328 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
36329 index 658ac97..05e1b90 100644
36330 --- a/drivers/pci/probe.c
36331 +++ b/drivers/pci/probe.c
36332 @@ -137,7 +137,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
36333 u16 orig_cmd;
36334 struct pci_bus_region region;
36335
36336 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36337 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36338
36339 if (!dev->mmio_always_on) {
36340 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
36341 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36342 index 27911b5..5b6db88 100644
36343 --- a/drivers/pci/proc.c
36344 +++ b/drivers/pci/proc.c
36345 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36346 static int __init pci_proc_init(void)
36347 {
36348 struct pci_dev *dev = NULL;
36349 +
36350 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
36351 +#ifdef CONFIG_GRKERNSEC_PROC_USER
36352 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36353 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36354 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36355 +#endif
36356 +#else
36357 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36358 +#endif
36359 proc_create("devices", 0, proc_bus_pci_dir,
36360 &proc_bus_pci_dev_operations);
36361 proc_initialized = 1;
36362 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36363 index 8b5610d..a4c22bb 100644
36364 --- a/drivers/platform/x86/thinkpad_acpi.c
36365 +++ b/drivers/platform/x86/thinkpad_acpi.c
36366 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36367 return 0;
36368 }
36369
36370 -void static hotkey_mask_warn_incomplete_mask(void)
36371 +static void hotkey_mask_warn_incomplete_mask(void)
36372 {
36373 /* log only what the user can fix... */
36374 const u32 wantedmask = hotkey_driver_mask &
36375 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36376 }
36377 }
36378
36379 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36380 - struct tp_nvram_state *newn,
36381 - const u32 event_mask)
36382 -{
36383 -
36384 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36385 do { \
36386 if ((event_mask & (1 << __scancode)) && \
36387 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36388 tpacpi_hotkey_send_key(__scancode); \
36389 } while (0)
36390
36391 - void issue_volchange(const unsigned int oldvol,
36392 - const unsigned int newvol)
36393 - {
36394 - unsigned int i = oldvol;
36395 +static void issue_volchange(const unsigned int oldvol,
36396 + const unsigned int newvol,
36397 + const u32 event_mask)
36398 +{
36399 + unsigned int i = oldvol;
36400
36401 - while (i > newvol) {
36402 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36403 - i--;
36404 - }
36405 - while (i < newvol) {
36406 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36407 - i++;
36408 - }
36409 + while (i > newvol) {
36410 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36411 + i--;
36412 }
36413 + while (i < newvol) {
36414 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36415 + i++;
36416 + }
36417 +}
36418
36419 - void issue_brightnesschange(const unsigned int oldbrt,
36420 - const unsigned int newbrt)
36421 - {
36422 - unsigned int i = oldbrt;
36423 +static void issue_brightnesschange(const unsigned int oldbrt,
36424 + const unsigned int newbrt,
36425 + const u32 event_mask)
36426 +{
36427 + unsigned int i = oldbrt;
36428
36429 - while (i > newbrt) {
36430 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36431 - i--;
36432 - }
36433 - while (i < newbrt) {
36434 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36435 - i++;
36436 - }
36437 + while (i > newbrt) {
36438 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36439 + i--;
36440 + }
36441 + while (i < newbrt) {
36442 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36443 + i++;
36444 }
36445 +}
36446
36447 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36448 + struct tp_nvram_state *newn,
36449 + const u32 event_mask)
36450 +{
36451 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36452 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36453 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36454 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36455 oldn->volume_level != newn->volume_level) {
36456 /* recently muted, or repeated mute keypress, or
36457 * multiple presses ending in mute */
36458 - issue_volchange(oldn->volume_level, newn->volume_level);
36459 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36460 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36461 }
36462 } else {
36463 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36464 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36465 }
36466 if (oldn->volume_level != newn->volume_level) {
36467 - issue_volchange(oldn->volume_level, newn->volume_level);
36468 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36469 } else if (oldn->volume_toggle != newn->volume_toggle) {
36470 /* repeated vol up/down keypress at end of scale ? */
36471 if (newn->volume_level == 0)
36472 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36473 /* handle brightness */
36474 if (oldn->brightness_level != newn->brightness_level) {
36475 issue_brightnesschange(oldn->brightness_level,
36476 - newn->brightness_level);
36477 + newn->brightness_level,
36478 + event_mask);
36479 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36480 /* repeated key presses that didn't change state */
36481 if (newn->brightness_level == 0)
36482 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36483 && !tp_features.bright_unkfw)
36484 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36485 }
36486 +}
36487
36488 #undef TPACPI_COMPARE_KEY
36489 #undef TPACPI_MAY_SEND_KEY
36490 -}
36491
36492 /*
36493 * Polling driver
36494 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36495 index 769d265..a3a05ca 100644
36496 --- a/drivers/pnp/pnpbios/bioscalls.c
36497 +++ b/drivers/pnp/pnpbios/bioscalls.c
36498 @@ -58,7 +58,7 @@ do { \
36499 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36500 } while(0)
36501
36502 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36503 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36504 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36505
36506 /*
36507 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36508
36509 cpu = get_cpu();
36510 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36511 +
36512 + pax_open_kernel();
36513 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36514 + pax_close_kernel();
36515
36516 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36517 spin_lock_irqsave(&pnp_bios_lock, flags);
36518 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36519 :"memory");
36520 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36521
36522 + pax_open_kernel();
36523 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36524 + pax_close_kernel();
36525 +
36526 put_cpu();
36527
36528 /* If we get here and this is set then the PnP BIOS faulted on us. */
36529 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36530 return status;
36531 }
36532
36533 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36534 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36535 {
36536 int i;
36537
36538 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36539 pnp_bios_callpoint.offset = header->fields.pm16offset;
36540 pnp_bios_callpoint.segment = PNP_CS16;
36541
36542 + pax_open_kernel();
36543 +
36544 for_each_possible_cpu(i) {
36545 struct desc_struct *gdt = get_cpu_gdt_table(i);
36546 if (!gdt)
36547 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36548 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36549 (unsigned long)__va(header->fields.pm16dseg));
36550 }
36551 +
36552 + pax_close_kernel();
36553 }
36554 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36555 index b0ecacb..7c9da2e 100644
36556 --- a/drivers/pnp/resource.c
36557 +++ b/drivers/pnp/resource.c
36558 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36559 return 1;
36560
36561 /* check if the resource is valid */
36562 - if (*irq < 0 || *irq > 15)
36563 + if (*irq > 15)
36564 return 0;
36565
36566 /* check if the resource is reserved */
36567 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36568 return 1;
36569
36570 /* check if the resource is valid */
36571 - if (*dma < 0 || *dma == 4 || *dma > 7)
36572 + if (*dma == 4 || *dma > 7)
36573 return 0;
36574
36575 /* check if the resource is reserved */
36576 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36577 index f5d6d37..739f6a9 100644
36578 --- a/drivers/power/bq27x00_battery.c
36579 +++ b/drivers/power/bq27x00_battery.c
36580 @@ -72,7 +72,7 @@
36581 struct bq27x00_device_info;
36582 struct bq27x00_access_methods {
36583 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36584 -};
36585 +} __no_const;
36586
36587 enum bq27x00_chip { BQ27000, BQ27500 };
36588
36589 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36590 index 8d53174..04c65de 100644
36591 --- a/drivers/regulator/max8660.c
36592 +++ b/drivers/regulator/max8660.c
36593 @@ -333,8 +333,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36594 max8660->shadow_regs[MAX8660_OVER1] = 5;
36595 } else {
36596 /* Otherwise devices can be toggled via software */
36597 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
36598 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
36599 + pax_open_kernel();
36600 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36601 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36602 + pax_close_kernel();
36603 }
36604
36605 /*
36606 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36607 index 970a233..ee1f241 100644
36608 --- a/drivers/regulator/mc13892-regulator.c
36609 +++ b/drivers/regulator/mc13892-regulator.c
36610 @@ -566,10 +566,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36611 }
36612 mc13xxx_unlock(mc13892);
36613
36614 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36615 + pax_open_kernel();
36616 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36617 = mc13892_vcam_set_mode;
36618 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36619 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36620 = mc13892_vcam_get_mode;
36621 + pax_close_kernel();
36622
36623 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36624 ARRAY_SIZE(mc13892_regulators));
36625 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36626 index cace6d3..f623fda 100644
36627 --- a/drivers/rtc/rtc-dev.c
36628 +++ b/drivers/rtc/rtc-dev.c
36629 @@ -14,6 +14,7 @@
36630 #include <linux/module.h>
36631 #include <linux/rtc.h>
36632 #include <linux/sched.h>
36633 +#include <linux/grsecurity.h>
36634 #include "rtc-core.h"
36635
36636 static dev_t rtc_devt;
36637 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36638 if (copy_from_user(&tm, uarg, sizeof(tm)))
36639 return -EFAULT;
36640
36641 + gr_log_timechange();
36642 +
36643 return rtc_set_time(rtc, &tm);
36644
36645 case RTC_PIE_ON:
36646 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36647 index 3fcf627..f334910 100644
36648 --- a/drivers/scsi/aacraid/aacraid.h
36649 +++ b/drivers/scsi/aacraid/aacraid.h
36650 @@ -492,7 +492,7 @@ struct adapter_ops
36651 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36652 /* Administrative operations */
36653 int (*adapter_comm)(struct aac_dev * dev, int comm);
36654 -};
36655 +} __no_const;
36656
36657 /*
36658 * Define which interrupt handler needs to be installed
36659 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36660 index 0d279c44..3d25a97 100644
36661 --- a/drivers/scsi/aacraid/linit.c
36662 +++ b/drivers/scsi/aacraid/linit.c
36663 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36664 #elif defined(__devinitconst)
36665 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36666 #else
36667 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36668 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36669 #endif
36670 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36671 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36672 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36673 index ff80552..1c4120c 100644
36674 --- a/drivers/scsi/aic94xx/aic94xx_init.c
36675 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
36676 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36677 .lldd_ata_set_dmamode = asd_set_dmamode,
36678 };
36679
36680 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36681 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36682 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36683 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36684 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36685 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36686 index 4ad7e36..d004679 100644
36687 --- a/drivers/scsi/bfa/bfa.h
36688 +++ b/drivers/scsi/bfa/bfa.h
36689 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
36690 u32 *end);
36691 int cpe_vec_q0;
36692 int rme_vec_q0;
36693 -};
36694 +} __no_const;
36695 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36696
36697 struct bfa_faa_cbfn_s {
36698 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36699 index f0f80e2..8ec946b 100644
36700 --- a/drivers/scsi/bfa/bfa_fcpim.c
36701 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36702 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36703
36704 bfa_iotag_attach(fcp);
36705
36706 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36707 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36708 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36709 (fcp->num_itns * sizeof(struct bfa_itn_s));
36710 memset(fcp->itn_arr, 0,
36711 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36712 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36713 {
36714 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36715 - struct bfa_itn_s *itn;
36716 + bfa_itn_s_no_const *itn;
36717
36718 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36719 itn->isr = isr;
36720 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36721 index 36f26da..38a34a8 100644
36722 --- a/drivers/scsi/bfa/bfa_fcpim.h
36723 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36724 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36725 struct bfa_itn_s {
36726 bfa_isr_func_t isr;
36727 };
36728 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36729
36730 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36731 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36732 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36733 struct list_head iotag_tio_free_q; /* free IO resources */
36734 struct list_head iotag_unused_q; /* unused IO resources*/
36735 struct bfa_iotag_s *iotag_arr;
36736 - struct bfa_itn_s *itn_arr;
36737 + bfa_itn_s_no_const *itn_arr;
36738 int num_ioim_reqs;
36739 int num_fwtio_reqs;
36740 int num_itns;
36741 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36742 index 1a99d4b..e85d64b 100644
36743 --- a/drivers/scsi/bfa/bfa_ioc.h
36744 +++ b/drivers/scsi/bfa/bfa_ioc.h
36745 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36746 bfa_ioc_disable_cbfn_t disable_cbfn;
36747 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36748 bfa_ioc_reset_cbfn_t reset_cbfn;
36749 -};
36750 +} __no_const;
36751
36752 /*
36753 * IOC event notification mechanism.
36754 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36755 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36756 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36757 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36758 -};
36759 +} __no_const;
36760
36761 /*
36762 * Queue element to wait for room in request queue. FIFO order is
36763 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36764 index b48c24f..dac0fbc 100644
36765 --- a/drivers/scsi/hosts.c
36766 +++ b/drivers/scsi/hosts.c
36767 @@ -42,7 +42,7 @@
36768 #include "scsi_logging.h"
36769
36770
36771 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36772 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36773
36774
36775 static void scsi_host_cls_release(struct device *dev)
36776 @@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36777 * subtract one because we increment first then return, but we need to
36778 * know what the next host number was before increment
36779 */
36780 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36781 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36782 shost->dma_channel = 0xff;
36783
36784 /* These three are default values which can be overridden */
36785 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36786 index 2b4261c..e01516c 100644
36787 --- a/drivers/scsi/hpsa.c
36788 +++ b/drivers/scsi/hpsa.c
36789 @@ -536,7 +536,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
36790 unsigned long flags;
36791
36792 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36793 - return h->access.command_completed(h, q);
36794 + return h->access->command_completed(h, q);
36795
36796 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
36797 a = rq->head[rq->current_entry];
36798 @@ -3355,7 +3355,7 @@ static void start_io(struct ctlr_info *h)
36799 while (!list_empty(&h->reqQ)) {
36800 c = list_entry(h->reqQ.next, struct CommandList, list);
36801 /* can't do anything if fifo is full */
36802 - if ((h->access.fifo_full(h))) {
36803 + if ((h->access->fifo_full(h))) {
36804 dev_warn(&h->pdev->dev, "fifo full\n");
36805 break;
36806 }
36807 @@ -3377,7 +3377,7 @@ static void start_io(struct ctlr_info *h)
36808
36809 /* Tell the controller execute command */
36810 spin_unlock_irqrestore(&h->lock, flags);
36811 - h->access.submit_command(h, c);
36812 + h->access->submit_command(h, c);
36813 spin_lock_irqsave(&h->lock, flags);
36814 }
36815 spin_unlock_irqrestore(&h->lock, flags);
36816 @@ -3385,17 +3385,17 @@ static void start_io(struct ctlr_info *h)
36817
36818 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
36819 {
36820 - return h->access.command_completed(h, q);
36821 + return h->access->command_completed(h, q);
36822 }
36823
36824 static inline bool interrupt_pending(struct ctlr_info *h)
36825 {
36826 - return h->access.intr_pending(h);
36827 + return h->access->intr_pending(h);
36828 }
36829
36830 static inline long interrupt_not_for_us(struct ctlr_info *h)
36831 {
36832 - return (h->access.intr_pending(h) == 0) ||
36833 + return (h->access->intr_pending(h) == 0) ||
36834 (h->interrupts_enabled == 0);
36835 }
36836
36837 @@ -4299,7 +4299,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36838 if (prod_index < 0)
36839 return -ENODEV;
36840 h->product_name = products[prod_index].product_name;
36841 - h->access = *(products[prod_index].access);
36842 + h->access = products[prod_index].access;
36843
36844 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
36845 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
36846 @@ -4581,7 +4581,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36847
36848 assert_spin_locked(&lockup_detector_lock);
36849 remove_ctlr_from_lockup_detector_list(h);
36850 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36851 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36852 spin_lock_irqsave(&h->lock, flags);
36853 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36854 spin_unlock_irqrestore(&h->lock, flags);
36855 @@ -4759,7 +4759,7 @@ reinit_after_soft_reset:
36856 }
36857
36858 /* make sure the board interrupts are off */
36859 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36860 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36861
36862 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36863 goto clean2;
36864 @@ -4793,7 +4793,7 @@ reinit_after_soft_reset:
36865 * fake ones to scoop up any residual completions.
36866 */
36867 spin_lock_irqsave(&h->lock, flags);
36868 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36869 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36870 spin_unlock_irqrestore(&h->lock, flags);
36871 free_irqs(h);
36872 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36873 @@ -4812,9 +4812,9 @@ reinit_after_soft_reset:
36874 dev_info(&h->pdev->dev, "Board READY.\n");
36875 dev_info(&h->pdev->dev,
36876 "Waiting for stale completions to drain.\n");
36877 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36878 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36879 msleep(10000);
36880 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36881 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36882
36883 rc = controller_reset_failed(h->cfgtable);
36884 if (rc)
36885 @@ -4835,7 +4835,7 @@ reinit_after_soft_reset:
36886 }
36887
36888 /* Turn the interrupts on so we can service requests */
36889 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36890 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36891
36892 hpsa_hba_inquiry(h);
36893 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36894 @@ -4887,7 +4887,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36895 * To write all data in the battery backed cache to disks
36896 */
36897 hpsa_flush_cache(h);
36898 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36899 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36900 hpsa_free_irqs_and_disable_msix(h);
36901 }
36902
36903 @@ -5056,7 +5056,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36904 return;
36905 }
36906 /* Change the access methods to the performant access methods */
36907 - h->access = SA5_performant_access;
36908 + h->access = &SA5_performant_access;
36909 h->transMethod = CFGTBL_Trans_Performant;
36910 }
36911
36912 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36913 index 9816479..c5d4e97 100644
36914 --- a/drivers/scsi/hpsa.h
36915 +++ b/drivers/scsi/hpsa.h
36916 @@ -79,7 +79,7 @@ struct ctlr_info {
36917 unsigned int msix_vector;
36918 unsigned int msi_vector;
36919 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36920 - struct access_method access;
36921 + struct access_method *access;
36922
36923 /* queue and queue Info */
36924 struct list_head reqQ;
36925 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36926 index f2df059..a3a9930 100644
36927 --- a/drivers/scsi/ips.h
36928 +++ b/drivers/scsi/ips.h
36929 @@ -1027,7 +1027,7 @@ typedef struct {
36930 int (*intr)(struct ips_ha *);
36931 void (*enableint)(struct ips_ha *);
36932 uint32_t (*statupd)(struct ips_ha *);
36933 -} ips_hw_func_t;
36934 +} __no_const ips_hw_func_t;
36935
36936 typedef struct ips_ha {
36937 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36938 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36939 index aceffad..c35c08d 100644
36940 --- a/drivers/scsi/libfc/fc_exch.c
36941 +++ b/drivers/scsi/libfc/fc_exch.c
36942 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36943 * all together if not used XXX
36944 */
36945 struct {
36946 - atomic_t no_free_exch;
36947 - atomic_t no_free_exch_xid;
36948 - atomic_t xid_not_found;
36949 - atomic_t xid_busy;
36950 - atomic_t seq_not_found;
36951 - atomic_t non_bls_resp;
36952 + atomic_unchecked_t no_free_exch;
36953 + atomic_unchecked_t no_free_exch_xid;
36954 + atomic_unchecked_t xid_not_found;
36955 + atomic_unchecked_t xid_busy;
36956 + atomic_unchecked_t seq_not_found;
36957 + atomic_unchecked_t non_bls_resp;
36958 } stats;
36959 };
36960
36961 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36962 /* allocate memory for exchange */
36963 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36964 if (!ep) {
36965 - atomic_inc(&mp->stats.no_free_exch);
36966 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36967 goto out;
36968 }
36969 memset(ep, 0, sizeof(*ep));
36970 @@ -780,7 +780,7 @@ out:
36971 return ep;
36972 err:
36973 spin_unlock_bh(&pool->lock);
36974 - atomic_inc(&mp->stats.no_free_exch_xid);
36975 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36976 mempool_free(ep, mp->ep_pool);
36977 return NULL;
36978 }
36979 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36980 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36981 ep = fc_exch_find(mp, xid);
36982 if (!ep) {
36983 - atomic_inc(&mp->stats.xid_not_found);
36984 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36985 reject = FC_RJT_OX_ID;
36986 goto out;
36987 }
36988 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36989 ep = fc_exch_find(mp, xid);
36990 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36991 if (ep) {
36992 - atomic_inc(&mp->stats.xid_busy);
36993 + atomic_inc_unchecked(&mp->stats.xid_busy);
36994 reject = FC_RJT_RX_ID;
36995 goto rel;
36996 }
36997 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36998 }
36999 xid = ep->xid; /* get our XID */
37000 } else if (!ep) {
37001 - atomic_inc(&mp->stats.xid_not_found);
37002 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37003 reject = FC_RJT_RX_ID; /* XID not found */
37004 goto out;
37005 }
37006 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37007 } else {
37008 sp = &ep->seq;
37009 if (sp->id != fh->fh_seq_id) {
37010 - atomic_inc(&mp->stats.seq_not_found);
37011 + atomic_inc_unchecked(&mp->stats.seq_not_found);
37012 if (f_ctl & FC_FC_END_SEQ) {
37013 /*
37014 * Update sequence_id based on incoming last
37015 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37016
37017 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
37018 if (!ep) {
37019 - atomic_inc(&mp->stats.xid_not_found);
37020 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37021 goto out;
37022 }
37023 if (ep->esb_stat & ESB_ST_COMPLETE) {
37024 - atomic_inc(&mp->stats.xid_not_found);
37025 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37026 goto rel;
37027 }
37028 if (ep->rxid == FC_XID_UNKNOWN)
37029 ep->rxid = ntohs(fh->fh_rx_id);
37030 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
37031 - atomic_inc(&mp->stats.xid_not_found);
37032 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37033 goto rel;
37034 }
37035 if (ep->did != ntoh24(fh->fh_s_id) &&
37036 ep->did != FC_FID_FLOGI) {
37037 - atomic_inc(&mp->stats.xid_not_found);
37038 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37039 goto rel;
37040 }
37041 sof = fr_sof(fp);
37042 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37043 sp->ssb_stat |= SSB_ST_RESP;
37044 sp->id = fh->fh_seq_id;
37045 } else if (sp->id != fh->fh_seq_id) {
37046 - atomic_inc(&mp->stats.seq_not_found);
37047 + atomic_inc_unchecked(&mp->stats.seq_not_found);
37048 goto rel;
37049 }
37050
37051 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37052 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
37053
37054 if (!sp)
37055 - atomic_inc(&mp->stats.xid_not_found);
37056 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37057 else
37058 - atomic_inc(&mp->stats.non_bls_resp);
37059 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
37060
37061 fc_frame_free(fp);
37062 }
37063 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
37064 index d109cc3..09f4e7d 100644
37065 --- a/drivers/scsi/libsas/sas_ata.c
37066 +++ b/drivers/scsi/libsas/sas_ata.c
37067 @@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
37068 .postreset = ata_std_postreset,
37069 .error_handler = ata_std_error_handler,
37070 .post_internal_cmd = sas_ata_post_internal,
37071 - .qc_defer = ata_std_qc_defer,
37072 + .qc_defer = ata_std_qc_defer,
37073 .qc_prep = ata_noop_qc_prep,
37074 .qc_issue = sas_ata_qc_issue,
37075 .qc_fill_rtf = sas_ata_qc_fill_rtf,
37076 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
37077 index e5da6da..c888d48 100644
37078 --- a/drivers/scsi/lpfc/lpfc.h
37079 +++ b/drivers/scsi/lpfc/lpfc.h
37080 @@ -416,7 +416,7 @@ struct lpfc_vport {
37081 struct dentry *debug_nodelist;
37082 struct dentry *vport_debugfs_root;
37083 struct lpfc_debugfs_trc *disc_trc;
37084 - atomic_t disc_trc_cnt;
37085 + atomic_unchecked_t disc_trc_cnt;
37086 #endif
37087 uint8_t stat_data_enabled;
37088 uint8_t stat_data_blocked;
37089 @@ -830,8 +830,8 @@ struct lpfc_hba {
37090 struct timer_list fabric_block_timer;
37091 unsigned long bit_flags;
37092 #define FABRIC_COMANDS_BLOCKED 0
37093 - atomic_t num_rsrc_err;
37094 - atomic_t num_cmd_success;
37095 + atomic_unchecked_t num_rsrc_err;
37096 + atomic_unchecked_t num_cmd_success;
37097 unsigned long last_rsrc_error_time;
37098 unsigned long last_ramp_down_time;
37099 unsigned long last_ramp_up_time;
37100 @@ -867,7 +867,7 @@ struct lpfc_hba {
37101
37102 struct dentry *debug_slow_ring_trc;
37103 struct lpfc_debugfs_trc *slow_ring_trc;
37104 - atomic_t slow_ring_trc_cnt;
37105 + atomic_unchecked_t slow_ring_trc_cnt;
37106 /* iDiag debugfs sub-directory */
37107 struct dentry *idiag_root;
37108 struct dentry *idiag_pci_cfg;
37109 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
37110 index 3217d63..c417981 100644
37111 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
37112 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
37113 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
37114
37115 #include <linux/debugfs.h>
37116
37117 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37118 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37119 static unsigned long lpfc_debugfs_start_time = 0L;
37120
37121 /* iDiag */
37122 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
37123 lpfc_debugfs_enable = 0;
37124
37125 len = 0;
37126 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
37127 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
37128 (lpfc_debugfs_max_disc_trc - 1);
37129 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
37130 dtp = vport->disc_trc + i;
37131 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
37132 lpfc_debugfs_enable = 0;
37133
37134 len = 0;
37135 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
37136 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
37137 (lpfc_debugfs_max_slow_ring_trc - 1);
37138 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
37139 dtp = phba->slow_ring_trc + i;
37140 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
37141 !vport || !vport->disc_trc)
37142 return;
37143
37144 - index = atomic_inc_return(&vport->disc_trc_cnt) &
37145 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
37146 (lpfc_debugfs_max_disc_trc - 1);
37147 dtp = vport->disc_trc + index;
37148 dtp->fmt = fmt;
37149 dtp->data1 = data1;
37150 dtp->data2 = data2;
37151 dtp->data3 = data3;
37152 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37153 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37154 dtp->jif = jiffies;
37155 #endif
37156 return;
37157 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
37158 !phba || !phba->slow_ring_trc)
37159 return;
37160
37161 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37162 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37163 (lpfc_debugfs_max_slow_ring_trc - 1);
37164 dtp = phba->slow_ring_trc + index;
37165 dtp->fmt = fmt;
37166 dtp->data1 = data1;
37167 dtp->data2 = data2;
37168 dtp->data3 = data3;
37169 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37170 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37171 dtp->jif = jiffies;
37172 #endif
37173 return;
37174 @@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
37175 "slow_ring buffer\n");
37176 goto debug_failed;
37177 }
37178 - atomic_set(&phba->slow_ring_trc_cnt, 0);
37179 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37180 memset(phba->slow_ring_trc, 0,
37181 (sizeof(struct lpfc_debugfs_trc) *
37182 lpfc_debugfs_max_slow_ring_trc));
37183 @@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
37184 "buffer\n");
37185 goto debug_failed;
37186 }
37187 - atomic_set(&vport->disc_trc_cnt, 0);
37188 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37189
37190 snprintf(name, sizeof(name), "discovery_trace");
37191 vport->debug_disc_trc =
37192 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
37193 index 411ed48..967f553 100644
37194 --- a/drivers/scsi/lpfc/lpfc_init.c
37195 +++ b/drivers/scsi/lpfc/lpfc_init.c
37196 @@ -10341,8 +10341,10 @@ lpfc_init(void)
37197 "misc_register returned with status %d", error);
37198
37199 if (lpfc_enable_npiv) {
37200 - lpfc_transport_functions.vport_create = lpfc_vport_create;
37201 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37202 + pax_open_kernel();
37203 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37204 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37205 + pax_close_kernel();
37206 }
37207 lpfc_transport_template =
37208 fc_attach_transport(&lpfc_transport_functions);
37209 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
37210 index 66e0906..1620281 100644
37211 --- a/drivers/scsi/lpfc/lpfc_scsi.c
37212 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
37213 @@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
37214 uint32_t evt_posted;
37215
37216 spin_lock_irqsave(&phba->hbalock, flags);
37217 - atomic_inc(&phba->num_rsrc_err);
37218 + atomic_inc_unchecked(&phba->num_rsrc_err);
37219 phba->last_rsrc_error_time = jiffies;
37220
37221 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37222 @@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
37223 unsigned long flags;
37224 struct lpfc_hba *phba = vport->phba;
37225 uint32_t evt_posted;
37226 - atomic_inc(&phba->num_cmd_success);
37227 + atomic_inc_unchecked(&phba->num_cmd_success);
37228
37229 if (vport->cfg_lun_queue_depth <= queue_depth)
37230 return;
37231 @@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37232 unsigned long num_rsrc_err, num_cmd_success;
37233 int i;
37234
37235 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37236 - num_cmd_success = atomic_read(&phba->num_cmd_success);
37237 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37238 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37239
37240 /*
37241 * The error and success command counters are global per
37242 @@ -425,8 +425,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37243 }
37244 }
37245 lpfc_destroy_vport_work_array(phba, vports);
37246 - atomic_set(&phba->num_rsrc_err, 0);
37247 - atomic_set(&phba->num_cmd_success, 0);
37248 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37249 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37250 }
37251
37252 /**
37253 @@ -460,8 +460,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
37254 }
37255 }
37256 lpfc_destroy_vport_work_array(phba, vports);
37257 - atomic_set(&phba->num_rsrc_err, 0);
37258 - atomic_set(&phba->num_cmd_success, 0);
37259 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37260 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37261 }
37262
37263 /**
37264 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
37265 index ea8a0b4..812a124 100644
37266 --- a/drivers/scsi/pmcraid.c
37267 +++ b/drivers/scsi/pmcraid.c
37268 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
37269 res->scsi_dev = scsi_dev;
37270 scsi_dev->hostdata = res;
37271 res->change_detected = 0;
37272 - atomic_set(&res->read_failures, 0);
37273 - atomic_set(&res->write_failures, 0);
37274 + atomic_set_unchecked(&res->read_failures, 0);
37275 + atomic_set_unchecked(&res->write_failures, 0);
37276 rc = 0;
37277 }
37278 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37279 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
37280
37281 /* If this was a SCSI read/write command keep count of errors */
37282 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37283 - atomic_inc(&res->read_failures);
37284 + atomic_inc_unchecked(&res->read_failures);
37285 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37286 - atomic_inc(&res->write_failures);
37287 + atomic_inc_unchecked(&res->write_failures);
37288
37289 if (!RES_IS_GSCSI(res->cfg_entry) &&
37290 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37291 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
37292 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37293 * hrrq_id assigned here in queuecommand
37294 */
37295 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37296 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37297 pinstance->num_hrrq;
37298 cmd->cmd_done = pmcraid_io_done;
37299
37300 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
37301 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37302 * hrrq_id assigned here in queuecommand
37303 */
37304 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37305 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37306 pinstance->num_hrrq;
37307
37308 if (request_size) {
37309 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
37310
37311 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37312 /* add resources only after host is added into system */
37313 - if (!atomic_read(&pinstance->expose_resources))
37314 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37315 return;
37316
37317 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
37318 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
37319 init_waitqueue_head(&pinstance->reset_wait_q);
37320
37321 atomic_set(&pinstance->outstanding_cmds, 0);
37322 - atomic_set(&pinstance->last_message_id, 0);
37323 - atomic_set(&pinstance->expose_resources, 0);
37324 + atomic_set_unchecked(&pinstance->last_message_id, 0);
37325 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37326
37327 INIT_LIST_HEAD(&pinstance->free_res_q);
37328 INIT_LIST_HEAD(&pinstance->used_res_q);
37329 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
37330 /* Schedule worker thread to handle CCN and take care of adding and
37331 * removing devices to OS
37332 */
37333 - atomic_set(&pinstance->expose_resources, 1);
37334 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37335 schedule_work(&pinstance->worker_q);
37336 return rc;
37337
37338 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
37339 index e1d150f..6c6df44 100644
37340 --- a/drivers/scsi/pmcraid.h
37341 +++ b/drivers/scsi/pmcraid.h
37342 @@ -748,7 +748,7 @@ struct pmcraid_instance {
37343 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37344
37345 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37346 - atomic_t last_message_id;
37347 + atomic_unchecked_t last_message_id;
37348
37349 /* configuration table */
37350 struct pmcraid_config_table *cfg_table;
37351 @@ -777,7 +777,7 @@ struct pmcraid_instance {
37352 atomic_t outstanding_cmds;
37353
37354 /* should add/delete resources to mid-layer now ?*/
37355 - atomic_t expose_resources;
37356 + atomic_unchecked_t expose_resources;
37357
37358
37359
37360 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37361 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37362 };
37363 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37364 - atomic_t read_failures; /* count of failed READ commands */
37365 - atomic_t write_failures; /* count of failed WRITE commands */
37366 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37367 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37368
37369 /* To indicate add/delete/modify during CCN */
37370 u8 change_detected;
37371 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
37372 index 5ab9530..2dd80f7 100644
37373 --- a/drivers/scsi/qla2xxx/qla_attr.c
37374 +++ b/drivers/scsi/qla2xxx/qla_attr.c
37375 @@ -1855,7 +1855,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
37376 return 0;
37377 }
37378
37379 -struct fc_function_template qla2xxx_transport_functions = {
37380 +fc_function_template_no_const qla2xxx_transport_functions = {
37381
37382 .show_host_node_name = 1,
37383 .show_host_port_name = 1,
37384 @@ -1902,7 +1902,7 @@ struct fc_function_template qla2xxx_transport_functions = {
37385 .bsg_timeout = qla24xx_bsg_timeout,
37386 };
37387
37388 -struct fc_function_template qla2xxx_transport_vport_functions = {
37389 +fc_function_template_no_const qla2xxx_transport_vport_functions = {
37390
37391 .show_host_node_name = 1,
37392 .show_host_port_name = 1,
37393 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37394 index 39007f5..7fafc64 100644
37395 --- a/drivers/scsi/qla2xxx/qla_def.h
37396 +++ b/drivers/scsi/qla2xxx/qla_def.h
37397 @@ -2284,7 +2284,7 @@ struct isp_operations {
37398 int (*start_scsi) (srb_t *);
37399 int (*abort_isp) (struct scsi_qla_host *);
37400 int (*iospace_config)(struct qla_hw_data*);
37401 -};
37402 +} __no_const;
37403
37404 /* MSI-X Support *************************************************************/
37405
37406 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
37407 index 9eacd2d..d79629c 100644
37408 --- a/drivers/scsi/qla2xxx/qla_gbl.h
37409 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
37410 @@ -484,8 +484,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
37411 struct device_attribute;
37412 extern struct device_attribute *qla2x00_host_attrs[];
37413 struct fc_function_template;
37414 -extern struct fc_function_template qla2xxx_transport_functions;
37415 -extern struct fc_function_template qla2xxx_transport_vport_functions;
37416 +extern fc_function_template_no_const qla2xxx_transport_functions;
37417 +extern fc_function_template_no_const qla2xxx_transport_vport_functions;
37418 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
37419 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
37420 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
37421 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37422 index 96a5616..eeb185a 100644
37423 --- a/drivers/scsi/qla4xxx/ql4_def.h
37424 +++ b/drivers/scsi/qla4xxx/ql4_def.h
37425 @@ -268,7 +268,7 @@ struct ddb_entry {
37426 * (4000 only) */
37427 atomic_t relogin_timer; /* Max Time to wait for
37428 * relogin to complete */
37429 - atomic_t relogin_retry_count; /* Num of times relogin has been
37430 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37431 * retried */
37432 uint32_t default_time2wait; /* Default Min time between
37433 * relogins (+aens) */
37434 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37435 index cd15678..f7e6846 100644
37436 --- a/drivers/scsi/qla4xxx/ql4_os.c
37437 +++ b/drivers/scsi/qla4xxx/ql4_os.c
37438 @@ -2615,12 +2615,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37439 */
37440 if (!iscsi_is_session_online(cls_sess)) {
37441 /* Reset retry relogin timer */
37442 - atomic_inc(&ddb_entry->relogin_retry_count);
37443 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37444 DEBUG2(ql4_printk(KERN_INFO, ha,
37445 "%s: index[%d] relogin timed out-retrying"
37446 " relogin (%d), retry (%d)\n", __func__,
37447 ddb_entry->fw_ddb_index,
37448 - atomic_read(&ddb_entry->relogin_retry_count),
37449 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37450 ddb_entry->default_time2wait + 4));
37451 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37452 atomic_set(&ddb_entry->retry_relogin_timer,
37453 @@ -4517,7 +4517,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37454
37455 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37456 atomic_set(&ddb_entry->relogin_timer, 0);
37457 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37458 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37459 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37460 ddb_entry->default_relogin_timeout =
37461 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37462 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37463 index bbbc9c9..ce22f77 100644
37464 --- a/drivers/scsi/scsi.c
37465 +++ b/drivers/scsi/scsi.c
37466 @@ -659,7 +659,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37467 unsigned long timeout;
37468 int rtn = 0;
37469
37470 - atomic_inc(&cmd->device->iorequest_cnt);
37471 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37472
37473 /* check if the device is still usable */
37474 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37475 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37476 index dae3873..bb4bee6 100644
37477 --- a/drivers/scsi/scsi_lib.c
37478 +++ b/drivers/scsi/scsi_lib.c
37479 @@ -1425,7 +1425,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37480 shost = sdev->host;
37481 scsi_init_cmd_errh(cmd);
37482 cmd->result = DID_NO_CONNECT << 16;
37483 - atomic_inc(&cmd->device->iorequest_cnt);
37484 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37485
37486 /*
37487 * SCSI request completion path will do scsi_device_unbusy(),
37488 @@ -1451,9 +1451,9 @@ static void scsi_softirq_done(struct request *rq)
37489
37490 INIT_LIST_HEAD(&cmd->eh_entry);
37491
37492 - atomic_inc(&cmd->device->iodone_cnt);
37493 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37494 if (cmd->result)
37495 - atomic_inc(&cmd->device->ioerr_cnt);
37496 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37497
37498 disposition = scsi_decide_disposition(cmd);
37499 if (disposition != SUCCESS &&
37500 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37501 index bb7c482..7551a95 100644
37502 --- a/drivers/scsi/scsi_sysfs.c
37503 +++ b/drivers/scsi/scsi_sysfs.c
37504 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37505 char *buf) \
37506 { \
37507 struct scsi_device *sdev = to_scsi_device(dev); \
37508 - unsigned long long count = atomic_read(&sdev->field); \
37509 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37510 return snprintf(buf, 20, "0x%llx\n", count); \
37511 } \
37512 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37513 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37514 index 84a1fdf..693b0d6 100644
37515 --- a/drivers/scsi/scsi_tgt_lib.c
37516 +++ b/drivers/scsi/scsi_tgt_lib.c
37517 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37518 int err;
37519
37520 dprintk("%lx %u\n", uaddr, len);
37521 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37522 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37523 if (err) {
37524 /*
37525 * TODO: need to fixup sg_tablesize, max_segment_size,
37526 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37527 index 5797604..289a5b5 100644
37528 --- a/drivers/scsi/scsi_transport_fc.c
37529 +++ b/drivers/scsi/scsi_transport_fc.c
37530 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37531 * Netlink Infrastructure
37532 */
37533
37534 -static atomic_t fc_event_seq;
37535 +static atomic_unchecked_t fc_event_seq;
37536
37537 /**
37538 * fc_get_event_number - Obtain the next sequential FC event number
37539 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
37540 u32
37541 fc_get_event_number(void)
37542 {
37543 - return atomic_add_return(1, &fc_event_seq);
37544 + return atomic_add_return_unchecked(1, &fc_event_seq);
37545 }
37546 EXPORT_SYMBOL(fc_get_event_number);
37547
37548 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
37549 {
37550 int error;
37551
37552 - atomic_set(&fc_event_seq, 0);
37553 + atomic_set_unchecked(&fc_event_seq, 0);
37554
37555 error = transport_class_register(&fc_host_class);
37556 if (error)
37557 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37558 char *cp;
37559
37560 *val = simple_strtoul(buf, &cp, 0);
37561 - if ((*cp && (*cp != '\n')) || (*val < 0))
37562 + if (*cp && (*cp != '\n'))
37563 return -EINVAL;
37564 /*
37565 * Check for overflow; dev_loss_tmo is u32
37566 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37567 index 1cf640e..78e9014 100644
37568 --- a/drivers/scsi/scsi_transport_iscsi.c
37569 +++ b/drivers/scsi/scsi_transport_iscsi.c
37570 @@ -79,7 +79,7 @@ struct iscsi_internal {
37571 struct transport_container session_cont;
37572 };
37573
37574 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37575 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37576 static struct workqueue_struct *iscsi_eh_timer_workq;
37577
37578 static DEFINE_IDA(iscsi_sess_ida);
37579 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37580 int err;
37581
37582 ihost = shost->shost_data;
37583 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37584 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37585
37586 if (target_id == ISCSI_MAX_TARGET) {
37587 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37588 @@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
37589 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37590 ISCSI_TRANSPORT_VERSION);
37591
37592 - atomic_set(&iscsi_session_nr, 0);
37593 + atomic_set_unchecked(&iscsi_session_nr, 0);
37594
37595 err = class_register(&iscsi_transport_class);
37596 if (err)
37597 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37598 index 21a045e..ec89e03 100644
37599 --- a/drivers/scsi/scsi_transport_srp.c
37600 +++ b/drivers/scsi/scsi_transport_srp.c
37601 @@ -33,7 +33,7 @@
37602 #include "scsi_transport_srp_internal.h"
37603
37604 struct srp_host_attrs {
37605 - atomic_t next_port_id;
37606 + atomic_unchecked_t next_port_id;
37607 };
37608 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37609
37610 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37611 struct Scsi_Host *shost = dev_to_shost(dev);
37612 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37613
37614 - atomic_set(&srp_host->next_port_id, 0);
37615 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37616 return 0;
37617 }
37618
37619 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37620 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37621 rport->roles = ids->roles;
37622
37623 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37624 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37625 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37626
37627 transport_setup_device(&rport->dev);
37628 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37629 index 9c5c5f2..8414557 100644
37630 --- a/drivers/scsi/sg.c
37631 +++ b/drivers/scsi/sg.c
37632 @@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37633 sdp->disk->disk_name,
37634 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37635 NULL,
37636 - (char *)arg);
37637 + (char __user *)arg);
37638 case BLKTRACESTART:
37639 return blk_trace_startstop(sdp->device->request_queue, 1);
37640 case BLKTRACESTOP:
37641 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37642 index 1041cb8..4a946fa 100644
37643 --- a/drivers/spi/spi.c
37644 +++ b/drivers/spi/spi.c
37645 @@ -1453,7 +1453,7 @@ int spi_bus_unlock(struct spi_master *master)
37646 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37647
37648 /* portable code must never pass more than 32 bytes */
37649 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37650 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37651
37652 static u8 *buf;
37653
37654 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37655 index 34afc16..ffe44dd 100644
37656 --- a/drivers/staging/octeon/ethernet-rx.c
37657 +++ b/drivers/staging/octeon/ethernet-rx.c
37658 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37659 /* Increment RX stats for virtual ports */
37660 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37661 #ifdef CONFIG_64BIT
37662 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37663 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37664 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37665 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37666 #else
37667 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37668 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37669 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37670 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37671 #endif
37672 }
37673 netif_receive_skb(skb);
37674 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37675 dev->name);
37676 */
37677 #ifdef CONFIG_64BIT
37678 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37679 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37680 #else
37681 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37682 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37683 #endif
37684 dev_kfree_skb_irq(skb);
37685 }
37686 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37687 index 18f7a79..cc3bc24 100644
37688 --- a/drivers/staging/octeon/ethernet.c
37689 +++ b/drivers/staging/octeon/ethernet.c
37690 @@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37691 * since the RX tasklet also increments it.
37692 */
37693 #ifdef CONFIG_64BIT
37694 - atomic64_add(rx_status.dropped_packets,
37695 - (atomic64_t *)&priv->stats.rx_dropped);
37696 + atomic64_add_unchecked(rx_status.dropped_packets,
37697 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37698 #else
37699 - atomic_add(rx_status.dropped_packets,
37700 - (atomic_t *)&priv->stats.rx_dropped);
37701 + atomic_add_unchecked(rx_status.dropped_packets,
37702 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37703 #endif
37704 }
37705
37706 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37707 index dc23395..cf7e9b1 100644
37708 --- a/drivers/staging/rtl8712/rtl871x_io.h
37709 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37710 @@ -108,7 +108,7 @@ struct _io_ops {
37711 u8 *pmem);
37712 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37713 u8 *pmem);
37714 -};
37715 +} __no_const;
37716
37717 struct io_req {
37718 struct list_head list;
37719 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37720 index c7b5e8b..783d6cb 100644
37721 --- a/drivers/staging/sbe-2t3e3/netdev.c
37722 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37723 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37724 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37725
37726 if (rlen)
37727 - if (copy_to_user(data, &resp, rlen))
37728 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37729 return -EFAULT;
37730
37731 return 0;
37732 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37733 index 42cdafe..2769103 100644
37734 --- a/drivers/staging/speakup/speakup_soft.c
37735 +++ b/drivers/staging/speakup/speakup_soft.c
37736 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37737 break;
37738 } else if (!initialized) {
37739 if (*init) {
37740 - ch = *init;
37741 init++;
37742 } else {
37743 initialized = 1;
37744 }
37745 + ch = *init;
37746 } else {
37747 ch = synth_buffer_getc();
37748 }
37749 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37750 index 5d89c0f..9261317 100644
37751 --- a/drivers/staging/usbip/usbip_common.h
37752 +++ b/drivers/staging/usbip/usbip_common.h
37753 @@ -289,7 +289,7 @@ struct usbip_device {
37754 void (*shutdown)(struct usbip_device *);
37755 void (*reset)(struct usbip_device *);
37756 void (*unusable)(struct usbip_device *);
37757 - } eh_ops;
37758 + } __no_const eh_ops;
37759 };
37760
37761 #define kthread_get_run(threadfn, data, namefmt, ...) \
37762 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37763 index 88b3298..3783eee 100644
37764 --- a/drivers/staging/usbip/vhci.h
37765 +++ b/drivers/staging/usbip/vhci.h
37766 @@ -88,7 +88,7 @@ struct vhci_hcd {
37767 unsigned resuming:1;
37768 unsigned long re_timeout;
37769
37770 - atomic_t seqnum;
37771 + atomic_unchecked_t seqnum;
37772
37773 /*
37774 * NOTE:
37775 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37776 index f708cba..2de6d72 100644
37777 --- a/drivers/staging/usbip/vhci_hcd.c
37778 +++ b/drivers/staging/usbip/vhci_hcd.c
37779 @@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37780 return;
37781 }
37782
37783 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37784 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37785 if (priv->seqnum == 0xffff)
37786 dev_info(&urb->dev->dev, "seqnum max\n");
37787
37788 @@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37789 return -ENOMEM;
37790 }
37791
37792 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37793 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37794 if (unlink->seqnum == 0xffff)
37795 pr_info("seqnum max\n");
37796
37797 @@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37798 vdev->rhport = rhport;
37799 }
37800
37801 - atomic_set(&vhci->seqnum, 0);
37802 + atomic_set_unchecked(&vhci->seqnum, 0);
37803 spin_lock_init(&vhci->lock);
37804
37805 hcd->power_budget = 0; /* no limit */
37806 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37807 index f0eaf04..5a82e06 100644
37808 --- a/drivers/staging/usbip/vhci_rx.c
37809 +++ b/drivers/staging/usbip/vhci_rx.c
37810 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37811 if (!urb) {
37812 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37813 pr_info("max seqnum %d\n",
37814 - atomic_read(&the_controller->seqnum));
37815 + atomic_read_unchecked(&the_controller->seqnum));
37816 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37817 return;
37818 }
37819 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37820 index 7735027..30eed13 100644
37821 --- a/drivers/staging/vt6655/hostap.c
37822 +++ b/drivers/staging/vt6655/hostap.c
37823 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37824 *
37825 */
37826
37827 +static net_device_ops_no_const apdev_netdev_ops;
37828 +
37829 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37830 {
37831 PSDevice apdev_priv;
37832 struct net_device *dev = pDevice->dev;
37833 int ret;
37834 - const struct net_device_ops apdev_netdev_ops = {
37835 - .ndo_start_xmit = pDevice->tx_80211,
37836 - };
37837
37838 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37839
37840 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37841 *apdev_priv = *pDevice;
37842 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37843
37844 + /* only half broken now */
37845 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37846 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37847
37848 pDevice->apdev->type = ARPHRD_IEEE80211;
37849 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37850 index 51b5adf..098e320 100644
37851 --- a/drivers/staging/vt6656/hostap.c
37852 +++ b/drivers/staging/vt6656/hostap.c
37853 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37854 *
37855 */
37856
37857 +static net_device_ops_no_const apdev_netdev_ops;
37858 +
37859 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37860 {
37861 PSDevice apdev_priv;
37862 struct net_device *dev = pDevice->dev;
37863 int ret;
37864 - const struct net_device_ops apdev_netdev_ops = {
37865 - .ndo_start_xmit = pDevice->tx_80211,
37866 - };
37867
37868 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37869
37870 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37871 *apdev_priv = *pDevice;
37872 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37873
37874 + /* only half broken now */
37875 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37876 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37877
37878 pDevice->apdev->type = ARPHRD_IEEE80211;
37879 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37880 index 7843dfd..3db105f 100644
37881 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37882 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37883 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37884
37885 struct usbctlx_completor {
37886 int (*complete) (struct usbctlx_completor *);
37887 -};
37888 +} __no_const;
37889
37890 static int
37891 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37892 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37893 index 1ca66ea..76f1343 100644
37894 --- a/drivers/staging/zcache/tmem.c
37895 +++ b/drivers/staging/zcache/tmem.c
37896 @@ -39,7 +39,7 @@
37897 * A tmem host implementation must use this function to register callbacks
37898 * for memory allocation.
37899 */
37900 -static struct tmem_hostops tmem_hostops;
37901 +static tmem_hostops_no_const tmem_hostops;
37902
37903 static void tmem_objnode_tree_init(void);
37904
37905 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37906 * A tmem host implementation must use this function to register
37907 * callbacks for a page-accessible memory (PAM) implementation
37908 */
37909 -static struct tmem_pamops tmem_pamops;
37910 +static tmem_pamops_no_const tmem_pamops;
37911
37912 void tmem_register_pamops(struct tmem_pamops *m)
37913 {
37914 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37915 index 0d4aa82..f7832d4 100644
37916 --- a/drivers/staging/zcache/tmem.h
37917 +++ b/drivers/staging/zcache/tmem.h
37918 @@ -180,6 +180,7 @@ struct tmem_pamops {
37919 void (*new_obj)(struct tmem_obj *);
37920 int (*replace_in_obj)(void *, struct tmem_obj *);
37921 };
37922 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37923 extern void tmem_register_pamops(struct tmem_pamops *m);
37924
37925 /* memory allocation methods provided by the host implementation */
37926 @@ -189,6 +190,7 @@ struct tmem_hostops {
37927 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37928 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37929 };
37930 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37931 extern void tmem_register_hostops(struct tmem_hostops *m);
37932
37933 /* core tmem accessor functions */
37934 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37935 index 0981707..fa256ac 100644
37936 --- a/drivers/target/target_core_transport.c
37937 +++ b/drivers/target/target_core_transport.c
37938 @@ -1233,7 +1233,7 @@ struct se_device *transport_add_device_to_core_hba(
37939 spin_lock_init(&dev->se_port_lock);
37940 spin_lock_init(&dev->se_tmr_lock);
37941 spin_lock_init(&dev->qf_cmd_lock);
37942 - atomic_set(&dev->dev_ordered_id, 0);
37943 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37944
37945 se_dev_set_default_attribs(dev, dev_limits);
37946
37947 @@ -1402,7 +1402,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37948 * Used to determine when ORDERED commands should go from
37949 * Dormant to Active status.
37950 */
37951 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37952 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37953 smp_mb__after_atomic_inc();
37954 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37955 cmd->se_ordered_id, cmd->sam_task_attr,
37956 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
37957 index e61cabd..7617d26 100644
37958 --- a/drivers/tty/cyclades.c
37959 +++ b/drivers/tty/cyclades.c
37960 @@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
37961 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
37962 info->port.count);
37963 #endif
37964 - info->port.count++;
37965 + atomic_inc(&info->port.count);
37966 #ifdef CY_DEBUG_COUNT
37967 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
37968 - current->pid, info->port.count);
37969 + current->pid, atomic_read(&info->port.count));
37970 #endif
37971
37972 /*
37973 @@ -3987,7 +3987,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
37974 for (j = 0; j < cy_card[i].nports; j++) {
37975 info = &cy_card[i].ports[j];
37976
37977 - if (info->port.count) {
37978 + if (atomic_read(&info->port.count)) {
37979 /* XXX is the ldisc num worth this? */
37980 struct tty_struct *tty;
37981 struct tty_ldisc *ld;
37982 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
37983 index 2d691eb..be02ebd 100644
37984 --- a/drivers/tty/hvc/hvc_console.c
37985 +++ b/drivers/tty/hvc/hvc_console.c
37986 @@ -315,7 +315,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
37987
37988 spin_lock_irqsave(&hp->port.lock, flags);
37989 /* Check and then increment for fast path open. */
37990 - if (hp->port.count++ > 0) {
37991 + if (atomic_inc_return(&hp->port.count) > 1) {
37992 spin_unlock_irqrestore(&hp->port.lock, flags);
37993 hvc_kick();
37994 return 0;
37995 @@ -366,7 +366,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
37996
37997 spin_lock_irqsave(&hp->port.lock, flags);
37998
37999 - if (--hp->port.count == 0) {
38000 + if (atomic_dec_return(&hp->port.count) == 0) {
38001 spin_unlock_irqrestore(&hp->port.lock, flags);
38002 /* We are done with the tty pointer now. */
38003 tty_port_tty_set(&hp->port, NULL);
38004 @@ -384,9 +384,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
38005 */
38006 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
38007 } else {
38008 - if (hp->port.count < 0)
38009 + if (atomic_read(&hp->port.count) < 0)
38010 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
38011 - hp->vtermno, hp->port.count);
38012 + hp->vtermno, atomic_read(&hp->port.count));
38013 spin_unlock_irqrestore(&hp->port.lock, flags);
38014 }
38015
38016 @@ -412,13 +412,13 @@ static void hvc_hangup(struct tty_struct *tty)
38017 * open->hangup case this can be called after the final close so prevent
38018 * that from happening for now.
38019 */
38020 - if (hp->port.count <= 0) {
38021 + if (atomic_read(&hp->port.count) <= 0) {
38022 spin_unlock_irqrestore(&hp->port.lock, flags);
38023 return;
38024 }
38025
38026 - temp_open_count = hp->port.count;
38027 - hp->port.count = 0;
38028 + temp_open_count = atomic_read(&hp->port.count);
38029 + atomic_set(&hp->port.count, 0);
38030 spin_unlock_irqrestore(&hp->port.lock, flags);
38031 tty_port_tty_set(&hp->port, NULL);
38032
38033 @@ -471,7 +471,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
38034 return -EPIPE;
38035
38036 /* FIXME what's this (unprotected) check for? */
38037 - if (hp->port.count <= 0)
38038 + if (atomic_read(&hp->port.count) <= 0)
38039 return -EIO;
38040
38041 spin_lock_irqsave(&hp->lock, flags);
38042 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
38043 index d56788c..12d8f85 100644
38044 --- a/drivers/tty/hvc/hvcs.c
38045 +++ b/drivers/tty/hvc/hvcs.c
38046 @@ -83,6 +83,7 @@
38047 #include <asm/hvcserver.h>
38048 #include <asm/uaccess.h>
38049 #include <asm/vio.h>
38050 +#include <asm/local.h>
38051
38052 /*
38053 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
38054 @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
38055
38056 spin_lock_irqsave(&hvcsd->lock, flags);
38057
38058 - if (hvcsd->port.count > 0) {
38059 + if (atomic_read(&hvcsd->port.count) > 0) {
38060 spin_unlock_irqrestore(&hvcsd->lock, flags);
38061 printk(KERN_INFO "HVCS: vterm state unchanged. "
38062 "The hvcs device node is still in use.\n");
38063 @@ -1134,7 +1135,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
38064 if ((retval = hvcs_partner_connect(hvcsd)))
38065 goto error_release;
38066
38067 - hvcsd->port.count = 1;
38068 + atomic_set(&hvcsd->port.count, 1);
38069 hvcsd->port.tty = tty;
38070 tty->driver_data = hvcsd;
38071
38072 @@ -1168,7 +1169,7 @@ fast_open:
38073
38074 spin_lock_irqsave(&hvcsd->lock, flags);
38075 tty_port_get(&hvcsd->port);
38076 - hvcsd->port.count++;
38077 + atomic_inc(&hvcsd->port.count);
38078 hvcsd->todo_mask |= HVCS_SCHED_READ;
38079 spin_unlock_irqrestore(&hvcsd->lock, flags);
38080
38081 @@ -1212,7 +1213,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
38082 hvcsd = tty->driver_data;
38083
38084 spin_lock_irqsave(&hvcsd->lock, flags);
38085 - if (--hvcsd->port.count == 0) {
38086 + if (atomic_dec_and_test(&hvcsd->port.count)) {
38087
38088 vio_disable_interrupts(hvcsd->vdev);
38089
38090 @@ -1238,10 +1239,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
38091 free_irq(irq, hvcsd);
38092 tty_port_put(&hvcsd->port);
38093 return;
38094 - } else if (hvcsd->port.count < 0) {
38095 + } else if (atomic_read(&hvcsd->port.count) < 0) {
38096 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
38097 " is missmanaged.\n",
38098 - hvcsd->vdev->unit_address, hvcsd->port.count);
38099 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
38100 }
38101
38102 spin_unlock_irqrestore(&hvcsd->lock, flags);
38103 @@ -1257,7 +1258,7 @@ static void hvcs_hangup(struct tty_struct * tty)
38104
38105 spin_lock_irqsave(&hvcsd->lock, flags);
38106 /* Preserve this so that we know how many kref refs to put */
38107 - temp_open_count = hvcsd->port.count;
38108 + temp_open_count = atomic_read(&hvcsd->port.count);
38109
38110 /*
38111 * Don't kref put inside the spinlock because the destruction
38112 @@ -1272,7 +1273,7 @@ static void hvcs_hangup(struct tty_struct * tty)
38113 tty->driver_data = NULL;
38114 hvcsd->port.tty = NULL;
38115
38116 - hvcsd->port.count = 0;
38117 + atomic_set(&hvcsd->port.count, 0);
38118
38119 /* This will drop any buffered data on the floor which is OK in a hangup
38120 * scenario. */
38121 @@ -1343,7 +1344,7 @@ static int hvcs_write(struct tty_struct *tty,
38122 * the middle of a write operation? This is a crummy place to do this
38123 * but we want to keep it all in the spinlock.
38124 */
38125 - if (hvcsd->port.count <= 0) {
38126 + if (atomic_read(&hvcsd->port.count) <= 0) {
38127 spin_unlock_irqrestore(&hvcsd->lock, flags);
38128 return -ENODEV;
38129 }
38130 @@ -1417,7 +1418,7 @@ static int hvcs_write_room(struct tty_struct *tty)
38131 {
38132 struct hvcs_struct *hvcsd = tty->driver_data;
38133
38134 - if (!hvcsd || hvcsd->port.count <= 0)
38135 + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
38136 return 0;
38137
38138 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
38139 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
38140 index f8b5fa0..4ba9f89 100644
38141 --- a/drivers/tty/ipwireless/tty.c
38142 +++ b/drivers/tty/ipwireless/tty.c
38143 @@ -29,6 +29,7 @@
38144 #include <linux/tty_driver.h>
38145 #include <linux/tty_flip.h>
38146 #include <linux/uaccess.h>
38147 +#include <asm/local.h>
38148
38149 #include "tty.h"
38150 #include "network.h"
38151 @@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38152 mutex_unlock(&tty->ipw_tty_mutex);
38153 return -ENODEV;
38154 }
38155 - if (tty->port.count == 0)
38156 + if (atomic_read(&tty->port.count) == 0)
38157 tty->tx_bytes_queued = 0;
38158
38159 - tty->port.count++;
38160 + atomic_inc(&tty->port.count);
38161
38162 tty->port.tty = linux_tty;
38163 linux_tty->driver_data = tty;
38164 @@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38165
38166 static void do_ipw_close(struct ipw_tty *tty)
38167 {
38168 - tty->port.count--;
38169 -
38170 - if (tty->port.count == 0) {
38171 + if (atomic_dec_return(&tty->port.count) == 0) {
38172 struct tty_struct *linux_tty = tty->port.tty;
38173
38174 if (linux_tty != NULL) {
38175 @@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
38176 return;
38177
38178 mutex_lock(&tty->ipw_tty_mutex);
38179 - if (tty->port.count == 0) {
38180 + if (atomic_read(&tty->port.count) == 0) {
38181 mutex_unlock(&tty->ipw_tty_mutex);
38182 return;
38183 }
38184 @@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
38185 return;
38186 }
38187
38188 - if (!tty->port.count) {
38189 + if (!atomic_read(&tty->port.count)) {
38190 mutex_unlock(&tty->ipw_tty_mutex);
38191 return;
38192 }
38193 @@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
38194 return -ENODEV;
38195
38196 mutex_lock(&tty->ipw_tty_mutex);
38197 - if (!tty->port.count) {
38198 + if (!atomic_read(&tty->port.count)) {
38199 mutex_unlock(&tty->ipw_tty_mutex);
38200 return -EINVAL;
38201 }
38202 @@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
38203 if (!tty)
38204 return -ENODEV;
38205
38206 - if (!tty->port.count)
38207 + if (!atomic_read(&tty->port.count))
38208 return -EINVAL;
38209
38210 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
38211 @@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
38212 if (!tty)
38213 return 0;
38214
38215 - if (!tty->port.count)
38216 + if (!atomic_read(&tty->port.count))
38217 return 0;
38218
38219 return tty->tx_bytes_queued;
38220 @@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
38221 if (!tty)
38222 return -ENODEV;
38223
38224 - if (!tty->port.count)
38225 + if (!atomic_read(&tty->port.count))
38226 return -EINVAL;
38227
38228 return get_control_lines(tty);
38229 @@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
38230 if (!tty)
38231 return -ENODEV;
38232
38233 - if (!tty->port.count)
38234 + if (!atomic_read(&tty->port.count))
38235 return -EINVAL;
38236
38237 return set_control_lines(tty, set, clear);
38238 @@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
38239 if (!tty)
38240 return -ENODEV;
38241
38242 - if (!tty->port.count)
38243 + if (!atomic_read(&tty->port.count))
38244 return -EINVAL;
38245
38246 /* FIXME: Exactly how is the tty object locked here .. */
38247 @@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
38248 * are gone */
38249 mutex_lock(&ttyj->ipw_tty_mutex);
38250 }
38251 - while (ttyj->port.count)
38252 + while (atomic_read(&ttyj->port.count))
38253 do_ipw_close(ttyj);
38254 ipwireless_disassociate_network_ttys(network,
38255 ttyj->channel_idx);
38256 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
38257 index 324467d..504cc25 100644
38258 --- a/drivers/tty/moxa.c
38259 +++ b/drivers/tty/moxa.c
38260 @@ -1172,7 +1172,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
38261 }
38262
38263 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
38264 - ch->port.count++;
38265 + atomic_inc(&ch->port.count);
38266 tty->driver_data = ch;
38267 tty_port_tty_set(&ch->port, tty);
38268 mutex_lock(&ch->port.mutex);
38269 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
38270 index c43b683..4dab83e 100644
38271 --- a/drivers/tty/n_gsm.c
38272 +++ b/drivers/tty/n_gsm.c
38273 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
38274 kref_init(&dlci->ref);
38275 mutex_init(&dlci->mutex);
38276 dlci->fifo = &dlci->_fifo;
38277 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38278 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38279 kfree(dlci);
38280 return NULL;
38281 }
38282 @@ -2895,7 +2895,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
38283 if (dlci == NULL)
38284 return -ENOMEM;
38285 port = &dlci->port;
38286 - port->count++;
38287 + atomic_inc(&port->count);
38288 tty->driver_data = dlci;
38289 dlci_get(dlci);
38290 dlci_get(dlci->gsm->dlci[0]);
38291 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
38292 index ee1c268..0e97caf 100644
38293 --- a/drivers/tty/n_tty.c
38294 +++ b/drivers/tty/n_tty.c
38295 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
38296 {
38297 *ops = tty_ldisc_N_TTY;
38298 ops->owner = NULL;
38299 - ops->refcount = ops->flags = 0;
38300 + atomic_set(&ops->refcount, 0);
38301 + ops->flags = 0;
38302 }
38303 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
38304 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
38305 index 5505ffc..7affff9 100644
38306 --- a/drivers/tty/pty.c
38307 +++ b/drivers/tty/pty.c
38308 @@ -718,8 +718,10 @@ static void __init unix98_pty_init(void)
38309 panic("Couldn't register Unix98 pts driver");
38310
38311 /* Now create the /dev/ptmx special device */
38312 + pax_open_kernel();
38313 tty_default_fops(&ptmx_fops);
38314 - ptmx_fops.open = ptmx_open;
38315 + *(void **)&ptmx_fops.open = ptmx_open;
38316 + pax_close_kernel();
38317
38318 cdev_init(&ptmx_cdev, &ptmx_fops);
38319 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
38320 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
38321 index 777d5f9..56d67ca 100644
38322 --- a/drivers/tty/rocket.c
38323 +++ b/drivers/tty/rocket.c
38324 @@ -924,7 +924,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
38325 tty->driver_data = info;
38326 tty_port_tty_set(port, tty);
38327
38328 - if (port->count++ == 0) {
38329 + if (atomic_inc_return(&port->count) == 1) {
38330 atomic_inc(&rp_num_ports_open);
38331
38332 #ifdef ROCKET_DEBUG_OPEN
38333 @@ -933,7 +933,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
38334 #endif
38335 }
38336 #ifdef ROCKET_DEBUG_OPEN
38337 - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
38338 + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
38339 #endif
38340
38341 /*
38342 @@ -1528,7 +1528,7 @@ static void rp_hangup(struct tty_struct *tty)
38343 spin_unlock_irqrestore(&info->port.lock, flags);
38344 return;
38345 }
38346 - if (info->port.count)
38347 + if (atomic_read(&info->port.count))
38348 atomic_dec(&rp_num_ports_open);
38349 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
38350 spin_unlock_irqrestore(&info->port.lock, flags);
38351 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
38352 index 2b42a01..32a2ed3 100644
38353 --- a/drivers/tty/serial/kgdboc.c
38354 +++ b/drivers/tty/serial/kgdboc.c
38355 @@ -24,8 +24,9 @@
38356 #define MAX_CONFIG_LEN 40
38357
38358 static struct kgdb_io kgdboc_io_ops;
38359 +static struct kgdb_io kgdboc_io_ops_console;
38360
38361 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38362 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38363 static int configured = -1;
38364
38365 static char config[MAX_CONFIG_LEN];
38366 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
38367 kgdboc_unregister_kbd();
38368 if (configured == 1)
38369 kgdb_unregister_io_module(&kgdboc_io_ops);
38370 + else if (configured == 2)
38371 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
38372 }
38373
38374 static int configure_kgdboc(void)
38375 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
38376 int err;
38377 char *cptr = config;
38378 struct console *cons;
38379 + int is_console = 0;
38380
38381 err = kgdboc_option_setup(config);
38382 if (err || !strlen(config) || isspace(config[0]))
38383 goto noconfig;
38384
38385 err = -ENODEV;
38386 - kgdboc_io_ops.is_console = 0;
38387 kgdb_tty_driver = NULL;
38388
38389 kgdboc_use_kms = 0;
38390 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
38391 int idx;
38392 if (cons->device && cons->device(cons, &idx) == p &&
38393 idx == tty_line) {
38394 - kgdboc_io_ops.is_console = 1;
38395 + is_console = 1;
38396 break;
38397 }
38398 cons = cons->next;
38399 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38400 kgdb_tty_line = tty_line;
38401
38402 do_register:
38403 - err = kgdb_register_io_module(&kgdboc_io_ops);
38404 + if (is_console) {
38405 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
38406 + configured = 2;
38407 + } else {
38408 + err = kgdb_register_io_module(&kgdboc_io_ops);
38409 + configured = 1;
38410 + }
38411 if (err)
38412 goto noconfig;
38413
38414 - configured = 1;
38415 -
38416 return 0;
38417
38418 noconfig:
38419 @@ -213,7 +220,7 @@ noconfig:
38420 static int __init init_kgdboc(void)
38421 {
38422 /* Already configured? */
38423 - if (configured == 1)
38424 + if (configured >= 1)
38425 return 0;
38426
38427 return configure_kgdboc();
38428 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38429 if (config[len - 1] == '\n')
38430 config[len - 1] = '\0';
38431
38432 - if (configured == 1)
38433 + if (configured >= 1)
38434 cleanup_kgdboc();
38435
38436 /* Go and configure with the new params. */
38437 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38438 .post_exception = kgdboc_post_exp_handler,
38439 };
38440
38441 +static struct kgdb_io kgdboc_io_ops_console = {
38442 + .name = "kgdboc",
38443 + .read_char = kgdboc_get_char,
38444 + .write_char = kgdboc_put_char,
38445 + .pre_exception = kgdboc_pre_exp_handler,
38446 + .post_exception = kgdboc_post_exp_handler,
38447 + .is_console = 1
38448 +};
38449 +
38450 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38451 /* This is only available if kgdboc is a built in for early debugging */
38452 static int __init kgdboc_early_init(char *opt)
38453 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
38454 index 246b823..9e0db76 100644
38455 --- a/drivers/tty/serial/serial_core.c
38456 +++ b/drivers/tty/serial/serial_core.c
38457 @@ -1392,7 +1392,7 @@ static void uart_hangup(struct tty_struct *tty)
38458 uart_flush_buffer(tty);
38459 uart_shutdown(tty, state);
38460 spin_lock_irqsave(&port->lock, flags);
38461 - port->count = 0;
38462 + atomic_set(&port->count, 0);
38463 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
38464 spin_unlock_irqrestore(&port->lock, flags);
38465 tty_port_tty_set(port, NULL);
38466 @@ -1488,7 +1488,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38467 goto end;
38468 }
38469
38470 - port->count++;
38471 + atomic_inc(&port->count);
38472 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
38473 retval = -ENXIO;
38474 goto err_dec_count;
38475 @@ -1515,7 +1515,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38476 /*
38477 * Make sure the device is in D0 state.
38478 */
38479 - if (port->count == 1)
38480 + if (atomic_read(&port->count) == 1)
38481 uart_change_pm(state, 0);
38482
38483 /*
38484 @@ -1533,7 +1533,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38485 end:
38486 return retval;
38487 err_dec_count:
38488 - port->count--;
38489 + atomic_inc(&port->count);
38490 mutex_unlock(&port->mutex);
38491 goto end;
38492 }
38493 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
38494 index 593d40a..bdc61f3 100644
38495 --- a/drivers/tty/synclink.c
38496 +++ b/drivers/tty/synclink.c
38497 @@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
38498
38499 if (debug_level >= DEBUG_LEVEL_INFO)
38500 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
38501 - __FILE__,__LINE__, info->device_name, info->port.count);
38502 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
38503
38504 if (tty_port_close_start(&info->port, tty, filp) == 0)
38505 goto cleanup;
38506 @@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
38507 cleanup:
38508 if (debug_level >= DEBUG_LEVEL_INFO)
38509 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
38510 - tty->driver->name, info->port.count);
38511 + tty->driver->name, atomic_read(&info->port.count));
38512
38513 } /* end of mgsl_close() */
38514
38515 @@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
38516
38517 mgsl_flush_buffer(tty);
38518 shutdown(info);
38519 -
38520 - info->port.count = 0;
38521 +
38522 + atomic_set(&info->port.count, 0);
38523 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38524 info->port.tty = NULL;
38525
38526 @@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38527
38528 if (debug_level >= DEBUG_LEVEL_INFO)
38529 printk("%s(%d):block_til_ready before block on %s count=%d\n",
38530 - __FILE__,__LINE__, tty->driver->name, port->count );
38531 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38532
38533 spin_lock_irqsave(&info->irq_spinlock, flags);
38534 if (!tty_hung_up_p(filp)) {
38535 extra_count = true;
38536 - port->count--;
38537 + atomic_dec(&port->count);
38538 }
38539 spin_unlock_irqrestore(&info->irq_spinlock, flags);
38540 port->blocked_open++;
38541 @@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38542
38543 if (debug_level >= DEBUG_LEVEL_INFO)
38544 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
38545 - __FILE__,__LINE__, tty->driver->name, port->count );
38546 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38547
38548 tty_unlock();
38549 schedule();
38550 @@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38551
38552 /* FIXME: Racy on hangup during close wait */
38553 if (extra_count)
38554 - port->count++;
38555 + atomic_inc(&port->count);
38556 port->blocked_open--;
38557
38558 if (debug_level >= DEBUG_LEVEL_INFO)
38559 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
38560 - __FILE__,__LINE__, tty->driver->name, port->count );
38561 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38562
38563 if (!retval)
38564 port->flags |= ASYNC_NORMAL_ACTIVE;
38565 @@ -3398,7 +3398,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
38566
38567 if (debug_level >= DEBUG_LEVEL_INFO)
38568 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
38569 - __FILE__,__LINE__,tty->driver->name, info->port.count);
38570 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
38571
38572 /* If port is closing, signal caller to try again */
38573 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38574 @@ -3417,10 +3417,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
38575 spin_unlock_irqrestore(&info->netlock, flags);
38576 goto cleanup;
38577 }
38578 - info->port.count++;
38579 + atomic_inc(&info->port.count);
38580 spin_unlock_irqrestore(&info->netlock, flags);
38581
38582 - if (info->port.count == 1) {
38583 + if (atomic_read(&info->port.count) == 1) {
38584 /* 1st open on this device, init hardware */
38585 retval = startup(info);
38586 if (retval < 0)
38587 @@ -3444,8 +3444,8 @@ cleanup:
38588 if (retval) {
38589 if (tty->count == 1)
38590 info->port.tty = NULL; /* tty layer will release tty struct */
38591 - if(info->port.count)
38592 - info->port.count--;
38593 + if (atomic_read(&info->port.count))
38594 + atomic_dec(&info->port.count);
38595 }
38596
38597 return retval;
38598 @@ -7653,7 +7653,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38599 unsigned short new_crctype;
38600
38601 /* return error if TTY interface open */
38602 - if (info->port.count)
38603 + if (atomic_read(&info->port.count))
38604 return -EBUSY;
38605
38606 switch (encoding)
38607 @@ -7748,7 +7748,7 @@ static int hdlcdev_open(struct net_device *dev)
38608
38609 /* arbitrate between network and tty opens */
38610 spin_lock_irqsave(&info->netlock, flags);
38611 - if (info->port.count != 0 || info->netcount != 0) {
38612 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38613 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38614 spin_unlock_irqrestore(&info->netlock, flags);
38615 return -EBUSY;
38616 @@ -7834,7 +7834,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38617 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
38618
38619 /* return error if TTY interface open */
38620 - if (info->port.count)
38621 + if (atomic_read(&info->port.count))
38622 return -EBUSY;
38623
38624 if (cmd != SIOCWANDEV)
38625 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
38626 index aa1debf..9297a16 100644
38627 --- a/drivers/tty/synclink_gt.c
38628 +++ b/drivers/tty/synclink_gt.c
38629 @@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
38630 tty->driver_data = info;
38631 info->port.tty = tty;
38632
38633 - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
38634 + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
38635
38636 /* If port is closing, signal caller to try again */
38637 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38638 @@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
38639 mutex_unlock(&info->port.mutex);
38640 goto cleanup;
38641 }
38642 - info->port.count++;
38643 + atomic_inc(&info->port.count);
38644 spin_unlock_irqrestore(&info->netlock, flags);
38645
38646 - if (info->port.count == 1) {
38647 + if (atomic_read(&info->port.count) == 1) {
38648 /* 1st open on this device, init hardware */
38649 retval = startup(info);
38650 if (retval < 0) {
38651 @@ -716,8 +716,8 @@ cleanup:
38652 if (retval) {
38653 if (tty->count == 1)
38654 info->port.tty = NULL; /* tty layer will release tty struct */
38655 - if(info->port.count)
38656 - info->port.count--;
38657 + if(atomic_read(&info->port.count))
38658 + atomic_dec(&info->port.count);
38659 }
38660
38661 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
38662 @@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38663
38664 if (sanity_check(info, tty->name, "close"))
38665 return;
38666 - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
38667 + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
38668
38669 if (tty_port_close_start(&info->port, tty, filp) == 0)
38670 goto cleanup;
38671 @@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38672 tty_port_close_end(&info->port, tty);
38673 info->port.tty = NULL;
38674 cleanup:
38675 - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
38676 + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
38677 }
38678
38679 static void hangup(struct tty_struct *tty)
38680 @@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
38681 shutdown(info);
38682
38683 spin_lock_irqsave(&info->port.lock, flags);
38684 - info->port.count = 0;
38685 + atomic_set(&info->port.count, 0);
38686 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38687 info->port.tty = NULL;
38688 spin_unlock_irqrestore(&info->port.lock, flags);
38689 @@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38690 unsigned short new_crctype;
38691
38692 /* return error if TTY interface open */
38693 - if (info->port.count)
38694 + if (atomic_read(&info->port.count))
38695 return -EBUSY;
38696
38697 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
38698 @@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
38699
38700 /* arbitrate between network and tty opens */
38701 spin_lock_irqsave(&info->netlock, flags);
38702 - if (info->port.count != 0 || info->netcount != 0) {
38703 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38704 DBGINFO(("%s hdlc_open busy\n", dev->name));
38705 spin_unlock_irqrestore(&info->netlock, flags);
38706 return -EBUSY;
38707 @@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38708 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
38709
38710 /* return error if TTY interface open */
38711 - if (info->port.count)
38712 + if (atomic_read(&info->port.count))
38713 return -EBUSY;
38714
38715 if (cmd != SIOCWANDEV)
38716 @@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
38717 if (port == NULL)
38718 continue;
38719 spin_lock(&port->lock);
38720 - if ((port->port.count || port->netcount) &&
38721 + if ((atomic_read(&port->port.count) || port->netcount) &&
38722 port->pending_bh && !port->bh_running &&
38723 !port->bh_requested) {
38724 DBGISR(("%s bh queued\n", port->device_name));
38725 @@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38726 spin_lock_irqsave(&info->lock, flags);
38727 if (!tty_hung_up_p(filp)) {
38728 extra_count = true;
38729 - port->count--;
38730 + atomic_dec(&port->count);
38731 }
38732 spin_unlock_irqrestore(&info->lock, flags);
38733 port->blocked_open++;
38734 @@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38735 remove_wait_queue(&port->open_wait, &wait);
38736
38737 if (extra_count)
38738 - port->count++;
38739 + atomic_inc(&port->count);
38740 port->blocked_open--;
38741
38742 if (!retval)
38743 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
38744 index a3dddc1..8905ab2 100644
38745 --- a/drivers/tty/synclinkmp.c
38746 +++ b/drivers/tty/synclinkmp.c
38747 @@ -742,7 +742,7 @@ static int open(struct tty_struct *tty, struct file *filp)
38748
38749 if (debug_level >= DEBUG_LEVEL_INFO)
38750 printk("%s(%d):%s open(), old ref count = %d\n",
38751 - __FILE__,__LINE__,tty->driver->name, info->port.count);
38752 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
38753
38754 /* If port is closing, signal caller to try again */
38755 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38756 @@ -761,10 +761,10 @@ static int open(struct tty_struct *tty, struct file *filp)
38757 spin_unlock_irqrestore(&info->netlock, flags);
38758 goto cleanup;
38759 }
38760 - info->port.count++;
38761 + atomic_inc(&info->port.count);
38762 spin_unlock_irqrestore(&info->netlock, flags);
38763
38764 - if (info->port.count == 1) {
38765 + if (atomic_read(&info->port.count) == 1) {
38766 /* 1st open on this device, init hardware */
38767 retval = startup(info);
38768 if (retval < 0)
38769 @@ -788,8 +788,8 @@ cleanup:
38770 if (retval) {
38771 if (tty->count == 1)
38772 info->port.tty = NULL; /* tty layer will release tty struct */
38773 - if(info->port.count)
38774 - info->port.count--;
38775 + if(atomic_read(&info->port.count))
38776 + atomic_dec(&info->port.count);
38777 }
38778
38779 return retval;
38780 @@ -807,7 +807,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38781
38782 if (debug_level >= DEBUG_LEVEL_INFO)
38783 printk("%s(%d):%s close() entry, count=%d\n",
38784 - __FILE__,__LINE__, info->device_name, info->port.count);
38785 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
38786
38787 if (tty_port_close_start(&info->port, tty, filp) == 0)
38788 goto cleanup;
38789 @@ -826,7 +826,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38790 cleanup:
38791 if (debug_level >= DEBUG_LEVEL_INFO)
38792 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
38793 - tty->driver->name, info->port.count);
38794 + tty->driver->name, atomic_read(&info->port.count));
38795 }
38796
38797 /* Called by tty_hangup() when a hangup is signaled.
38798 @@ -849,7 +849,7 @@ static void hangup(struct tty_struct *tty)
38799 shutdown(info);
38800
38801 spin_lock_irqsave(&info->port.lock, flags);
38802 - info->port.count = 0;
38803 + atomic_set(&info->port.count, 0);
38804 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38805 info->port.tty = NULL;
38806 spin_unlock_irqrestore(&info->port.lock, flags);
38807 @@ -1557,7 +1557,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38808 unsigned short new_crctype;
38809
38810 /* return error if TTY interface open */
38811 - if (info->port.count)
38812 + if (atomic_read(&info->port.count))
38813 return -EBUSY;
38814
38815 switch (encoding)
38816 @@ -1652,7 +1652,7 @@ static int hdlcdev_open(struct net_device *dev)
38817
38818 /* arbitrate between network and tty opens */
38819 spin_lock_irqsave(&info->netlock, flags);
38820 - if (info->port.count != 0 || info->netcount != 0) {
38821 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38822 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38823 spin_unlock_irqrestore(&info->netlock, flags);
38824 return -EBUSY;
38825 @@ -1738,7 +1738,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38826 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
38827
38828 /* return error if TTY interface open */
38829 - if (info->port.count)
38830 + if (atomic_read(&info->port.count))
38831 return -EBUSY;
38832
38833 if (cmd != SIOCWANDEV)
38834 @@ -2623,7 +2623,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
38835 * do not request bottom half processing if the
38836 * device is not open in a normal mode.
38837 */
38838 - if ( port && (port->port.count || port->netcount) &&
38839 + if ( port && (atomic_read(&port->port.count) || port->netcount) &&
38840 port->pending_bh && !port->bh_running &&
38841 !port->bh_requested ) {
38842 if ( debug_level >= DEBUG_LEVEL_ISR )
38843 @@ -3321,12 +3321,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38844
38845 if (debug_level >= DEBUG_LEVEL_INFO)
38846 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
38847 - __FILE__,__LINE__, tty->driver->name, port->count );
38848 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38849
38850 spin_lock_irqsave(&info->lock, flags);
38851 if (!tty_hung_up_p(filp)) {
38852 extra_count = true;
38853 - port->count--;
38854 + atomic_dec(&port->count);
38855 }
38856 spin_unlock_irqrestore(&info->lock, flags);
38857 port->blocked_open++;
38858 @@ -3355,7 +3355,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38859
38860 if (debug_level >= DEBUG_LEVEL_INFO)
38861 printk("%s(%d):%s block_til_ready() count=%d\n",
38862 - __FILE__,__LINE__, tty->driver->name, port->count );
38863 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38864
38865 tty_unlock();
38866 schedule();
38867 @@ -3366,12 +3366,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38868 remove_wait_queue(&port->open_wait, &wait);
38869
38870 if (extra_count)
38871 - port->count++;
38872 + atomic_inc(&port->count);
38873 port->blocked_open--;
38874
38875 if (debug_level >= DEBUG_LEVEL_INFO)
38876 printk("%s(%d):%s block_til_ready() after, count=%d\n",
38877 - __FILE__,__LINE__, tty->driver->name, port->count );
38878 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38879
38880 if (!retval)
38881 port->flags |= ASYNC_NORMAL_ACTIVE;
38882 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38883 index 05728894..b9d44c6 100644
38884 --- a/drivers/tty/sysrq.c
38885 +++ b/drivers/tty/sysrq.c
38886 @@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38887 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38888 size_t count, loff_t *ppos)
38889 {
38890 - if (count) {
38891 + if (count && capable(CAP_SYS_ADMIN)) {
38892 char c;
38893
38894 if (get_user(c, buf))
38895 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38896 index b425c79..08a3f06 100644
38897 --- a/drivers/tty/tty_io.c
38898 +++ b/drivers/tty/tty_io.c
38899 @@ -3283,7 +3283,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38900
38901 void tty_default_fops(struct file_operations *fops)
38902 {
38903 - *fops = tty_fops;
38904 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38905 }
38906
38907 /*
38908 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38909 index 9911eb6..5abe0e1 100644
38910 --- a/drivers/tty/tty_ldisc.c
38911 +++ b/drivers/tty/tty_ldisc.c
38912 @@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38913 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38914 struct tty_ldisc_ops *ldo = ld->ops;
38915
38916 - ldo->refcount--;
38917 + atomic_dec(&ldo->refcount);
38918 module_put(ldo->owner);
38919 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38920
38921 @@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38922 spin_lock_irqsave(&tty_ldisc_lock, flags);
38923 tty_ldiscs[disc] = new_ldisc;
38924 new_ldisc->num = disc;
38925 - new_ldisc->refcount = 0;
38926 + atomic_set(&new_ldisc->refcount, 0);
38927 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38928
38929 return ret;
38930 @@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
38931 return -EINVAL;
38932
38933 spin_lock_irqsave(&tty_ldisc_lock, flags);
38934 - if (tty_ldiscs[disc]->refcount)
38935 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38936 ret = -EBUSY;
38937 else
38938 tty_ldiscs[disc] = NULL;
38939 @@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38940 if (ldops) {
38941 ret = ERR_PTR(-EAGAIN);
38942 if (try_module_get(ldops->owner)) {
38943 - ldops->refcount++;
38944 + atomic_inc(&ldops->refcount);
38945 ret = ldops;
38946 }
38947 }
38948 @@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38949 unsigned long flags;
38950
38951 spin_lock_irqsave(&tty_ldisc_lock, flags);
38952 - ldops->refcount--;
38953 + atomic_dec(&ldops->refcount);
38954 module_put(ldops->owner);
38955 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38956 }
38957 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
38958 index bf6e238..d401c04 100644
38959 --- a/drivers/tty/tty_port.c
38960 +++ b/drivers/tty/tty_port.c
38961 @@ -138,7 +138,7 @@ void tty_port_hangup(struct tty_port *port)
38962 unsigned long flags;
38963
38964 spin_lock_irqsave(&port->lock, flags);
38965 - port->count = 0;
38966 + atomic_set(&port->count, 0);
38967 port->flags &= ~ASYNC_NORMAL_ACTIVE;
38968 if (port->tty) {
38969 set_bit(TTY_IO_ERROR, &port->tty->flags);
38970 @@ -264,7 +264,7 @@ int tty_port_block_til_ready(struct tty_port *port,
38971 /* The port lock protects the port counts */
38972 spin_lock_irqsave(&port->lock, flags);
38973 if (!tty_hung_up_p(filp))
38974 - port->count--;
38975 + atomic_dec(&port->count);
38976 port->blocked_open++;
38977 spin_unlock_irqrestore(&port->lock, flags);
38978
38979 @@ -306,7 +306,7 @@ int tty_port_block_til_ready(struct tty_port *port,
38980 we must not mess that up further */
38981 spin_lock_irqsave(&port->lock, flags);
38982 if (!tty_hung_up_p(filp))
38983 - port->count++;
38984 + atomic_inc(&port->count);
38985 port->blocked_open--;
38986 if (retval == 0)
38987 port->flags |= ASYNC_NORMAL_ACTIVE;
38988 @@ -326,19 +326,19 @@ int tty_port_close_start(struct tty_port *port,
38989 return 0;
38990 }
38991
38992 - if (tty->count == 1 && port->count != 1) {
38993 + if (tty->count == 1 && atomic_read(&port->count) != 1) {
38994 printk(KERN_WARNING
38995 "tty_port_close_start: tty->count = 1 port count = %d.\n",
38996 - port->count);
38997 - port->count = 1;
38998 + atomic_read(&port->count));
38999 + atomic_set(&port->count, 1);
39000 }
39001 - if (--port->count < 0) {
39002 + if (atomic_dec_return(&port->count) < 0) {
39003 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
39004 - port->count);
39005 - port->count = 0;
39006 + atomic_read(&port->count));
39007 + atomic_set(&port->count, 0);
39008 }
39009
39010 - if (port->count) {
39011 + if (atomic_read(&port->count)) {
39012 spin_unlock_irqrestore(&port->lock, flags);
39013 if (port->ops->drop)
39014 port->ops->drop(port);
39015 @@ -418,7 +418,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
39016 {
39017 spin_lock_irq(&port->lock);
39018 if (!tty_hung_up_p(filp))
39019 - ++port->count;
39020 + atomic_inc(&port->count);
39021 spin_unlock_irq(&port->lock);
39022 tty_port_tty_set(port, tty);
39023
39024 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
39025 index 48cc6f2..85584dd 100644
39026 --- a/drivers/tty/vt/keyboard.c
39027 +++ b/drivers/tty/vt/keyboard.c
39028 @@ -659,6 +659,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
39029 kbd->kbdmode == VC_OFF) &&
39030 value != KVAL(K_SAK))
39031 return; /* SAK is allowed even in raw mode */
39032 +
39033 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39034 + {
39035 + void *func = fn_handler[value];
39036 + if (func == fn_show_state || func == fn_show_ptregs ||
39037 + func == fn_show_mem)
39038 + return;
39039 + }
39040 +#endif
39041 +
39042 fn_handler[value](vc);
39043 }
39044
39045 @@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
39046 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
39047 return -EFAULT;
39048
39049 - if (!capable(CAP_SYS_TTY_CONFIG))
39050 - perm = 0;
39051 -
39052 switch (cmd) {
39053 case KDGKBENT:
39054 /* Ensure another thread doesn't free it under us */
39055 @@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
39056 spin_unlock_irqrestore(&kbd_event_lock, flags);
39057 return put_user(val, &user_kbe->kb_value);
39058 case KDSKBENT:
39059 + if (!capable(CAP_SYS_TTY_CONFIG))
39060 + perm = 0;
39061 +
39062 if (!perm)
39063 return -EPERM;
39064 if (!i && v == K_NOSUCHMAP) {
39065 @@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
39066 int i, j, k;
39067 int ret;
39068
39069 - if (!capable(CAP_SYS_TTY_CONFIG))
39070 - perm = 0;
39071 -
39072 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
39073 if (!kbs) {
39074 ret = -ENOMEM;
39075 @@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
39076 kfree(kbs);
39077 return ((p && *p) ? -EOVERFLOW : 0);
39078 case KDSKBSENT:
39079 + if (!capable(CAP_SYS_TTY_CONFIG))
39080 + perm = 0;
39081 +
39082 if (!perm) {
39083 ret = -EPERM;
39084 goto reterr;
39085 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
39086 index a783d53..cb30d94 100644
39087 --- a/drivers/uio/uio.c
39088 +++ b/drivers/uio/uio.c
39089 @@ -25,6 +25,7 @@
39090 #include <linux/kobject.h>
39091 #include <linux/cdev.h>
39092 #include <linux/uio_driver.h>
39093 +#include <asm/local.h>
39094
39095 #define UIO_MAX_DEVICES (1U << MINORBITS)
39096
39097 @@ -32,10 +33,10 @@ struct uio_device {
39098 struct module *owner;
39099 struct device *dev;
39100 int minor;
39101 - atomic_t event;
39102 + atomic_unchecked_t event;
39103 struct fasync_struct *async_queue;
39104 wait_queue_head_t wait;
39105 - int vma_count;
39106 + local_t vma_count;
39107 struct uio_info *info;
39108 struct kobject *map_dir;
39109 struct kobject *portio_dir;
39110 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
39111 struct device_attribute *attr, char *buf)
39112 {
39113 struct uio_device *idev = dev_get_drvdata(dev);
39114 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
39115 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
39116 }
39117
39118 static struct device_attribute uio_class_attributes[] = {
39119 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
39120 {
39121 struct uio_device *idev = info->uio_dev;
39122
39123 - atomic_inc(&idev->event);
39124 + atomic_inc_unchecked(&idev->event);
39125 wake_up_interruptible(&idev->wait);
39126 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
39127 }
39128 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
39129 }
39130
39131 listener->dev = idev;
39132 - listener->event_count = atomic_read(&idev->event);
39133 + listener->event_count = atomic_read_unchecked(&idev->event);
39134 filep->private_data = listener;
39135
39136 if (idev->info->open) {
39137 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
39138 return -EIO;
39139
39140 poll_wait(filep, &idev->wait, wait);
39141 - if (listener->event_count != atomic_read(&idev->event))
39142 + if (listener->event_count != atomic_read_unchecked(&idev->event))
39143 return POLLIN | POLLRDNORM;
39144 return 0;
39145 }
39146 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
39147 do {
39148 set_current_state(TASK_INTERRUPTIBLE);
39149
39150 - event_count = atomic_read(&idev->event);
39151 + event_count = atomic_read_unchecked(&idev->event);
39152 if (event_count != listener->event_count) {
39153 if (copy_to_user(buf, &event_count, count))
39154 retval = -EFAULT;
39155 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
39156 static void uio_vma_open(struct vm_area_struct *vma)
39157 {
39158 struct uio_device *idev = vma->vm_private_data;
39159 - idev->vma_count++;
39160 + local_inc(&idev->vma_count);
39161 }
39162
39163 static void uio_vma_close(struct vm_area_struct *vma)
39164 {
39165 struct uio_device *idev = vma->vm_private_data;
39166 - idev->vma_count--;
39167 + local_dec(&idev->vma_count);
39168 }
39169
39170 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39171 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
39172 idev->owner = owner;
39173 idev->info = info;
39174 init_waitqueue_head(&idev->wait);
39175 - atomic_set(&idev->event, 0);
39176 + atomic_set_unchecked(&idev->event, 0);
39177
39178 ret = uio_get_minor(idev);
39179 if (ret)
39180 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
39181 index b7eb86a..36d28af 100644
39182 --- a/drivers/usb/atm/cxacru.c
39183 +++ b/drivers/usb/atm/cxacru.c
39184 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
39185 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
39186 if (ret < 2)
39187 return -EINVAL;
39188 - if (index < 0 || index > 0x7f)
39189 + if (index > 0x7f)
39190 return -EINVAL;
39191 pos += tmp;
39192
39193 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
39194 index ee62b35..b663594 100644
39195 --- a/drivers/usb/atm/usbatm.c
39196 +++ b/drivers/usb/atm/usbatm.c
39197 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39198 if (printk_ratelimit())
39199 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
39200 __func__, vpi, vci);
39201 - atomic_inc(&vcc->stats->rx_err);
39202 + atomic_inc_unchecked(&vcc->stats->rx_err);
39203 return;
39204 }
39205
39206 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39207 if (length > ATM_MAX_AAL5_PDU) {
39208 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
39209 __func__, length, vcc);
39210 - atomic_inc(&vcc->stats->rx_err);
39211 + atomic_inc_unchecked(&vcc->stats->rx_err);
39212 goto out;
39213 }
39214
39215 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39216 if (sarb->len < pdu_length) {
39217 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
39218 __func__, pdu_length, sarb->len, vcc);
39219 - atomic_inc(&vcc->stats->rx_err);
39220 + atomic_inc_unchecked(&vcc->stats->rx_err);
39221 goto out;
39222 }
39223
39224 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
39225 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
39226 __func__, vcc);
39227 - atomic_inc(&vcc->stats->rx_err);
39228 + atomic_inc_unchecked(&vcc->stats->rx_err);
39229 goto out;
39230 }
39231
39232 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39233 if (printk_ratelimit())
39234 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
39235 __func__, length);
39236 - atomic_inc(&vcc->stats->rx_drop);
39237 + atomic_inc_unchecked(&vcc->stats->rx_drop);
39238 goto out;
39239 }
39240
39241 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39242
39243 vcc->push(vcc, skb);
39244
39245 - atomic_inc(&vcc->stats->rx);
39246 + atomic_inc_unchecked(&vcc->stats->rx);
39247 out:
39248 skb_trim(sarb, 0);
39249 }
39250 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
39251 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
39252
39253 usbatm_pop(vcc, skb);
39254 - atomic_inc(&vcc->stats->tx);
39255 + atomic_inc_unchecked(&vcc->stats->tx);
39256
39257 skb = skb_dequeue(&instance->sndqueue);
39258 }
39259 @@ -770,11 +770,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
39260 if (!left--)
39261 return sprintf(page,
39262 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
39263 - atomic_read(&atm_dev->stats.aal5.tx),
39264 - atomic_read(&atm_dev->stats.aal5.tx_err),
39265 - atomic_read(&atm_dev->stats.aal5.rx),
39266 - atomic_read(&atm_dev->stats.aal5.rx_err),
39267 - atomic_read(&atm_dev->stats.aal5.rx_drop));
39268 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
39269 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
39270 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
39271 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
39272 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
39273
39274 if (!left--) {
39275 if (instance->disconnected)
39276 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
39277 index 3440812..2a4ef1f 100644
39278 --- a/drivers/usb/core/devices.c
39279 +++ b/drivers/usb/core/devices.c
39280 @@ -126,7 +126,7 @@ static const char format_endpt[] =
39281 * time it gets called.
39282 */
39283 static struct device_connect_event {
39284 - atomic_t count;
39285 + atomic_unchecked_t count;
39286 wait_queue_head_t wait;
39287 } device_event = {
39288 .count = ATOMIC_INIT(1),
39289 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
39290
39291 void usbfs_conn_disc_event(void)
39292 {
39293 - atomic_add(2, &device_event.count);
39294 + atomic_add_unchecked(2, &device_event.count);
39295 wake_up(&device_event.wait);
39296 }
39297
39298 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
39299
39300 poll_wait(file, &device_event.wait, wait);
39301
39302 - event_count = atomic_read(&device_event.count);
39303 + event_count = atomic_read_unchecked(&device_event.count);
39304 if (file->f_version != event_count) {
39305 file->f_version = event_count;
39306 return POLLIN | POLLRDNORM;
39307 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
39308 index 347bb05..63e1b73 100644
39309 --- a/drivers/usb/early/ehci-dbgp.c
39310 +++ b/drivers/usb/early/ehci-dbgp.c
39311 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
39312
39313 #ifdef CONFIG_KGDB
39314 static struct kgdb_io kgdbdbgp_io_ops;
39315 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
39316 +static struct kgdb_io kgdbdbgp_io_ops_console;
39317 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
39318 #else
39319 #define dbgp_kgdb_mode (0)
39320 #endif
39321 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
39322 .write_char = kgdbdbgp_write_char,
39323 };
39324
39325 +static struct kgdb_io kgdbdbgp_io_ops_console = {
39326 + .name = "kgdbdbgp",
39327 + .read_char = kgdbdbgp_read_char,
39328 + .write_char = kgdbdbgp_write_char,
39329 + .is_console = 1
39330 +};
39331 +
39332 static int kgdbdbgp_wait_time;
39333
39334 static int __init kgdbdbgp_parse_config(char *str)
39335 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
39336 ptr++;
39337 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
39338 }
39339 - kgdb_register_io_module(&kgdbdbgp_io_ops);
39340 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
39341 + if (early_dbgp_console.index != -1)
39342 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
39343 + else
39344 + kgdb_register_io_module(&kgdbdbgp_io_ops);
39345
39346 return 0;
39347 }
39348 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
39349 index 5b3f5ff..6e00893 100644
39350 --- a/drivers/usb/gadget/u_serial.c
39351 +++ b/drivers/usb/gadget/u_serial.c
39352 @@ -731,9 +731,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
39353 spin_lock_irq(&port->port_lock);
39354
39355 /* already open? Great. */
39356 - if (port->port.count) {
39357 + if (atomic_read(&port->port.count)) {
39358 status = 0;
39359 - port->port.count++;
39360 + atomic_inc(&port->port.count);
39361
39362 /* currently opening/closing? wait ... */
39363 } else if (port->openclose) {
39364 @@ -792,7 +792,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
39365 tty->driver_data = port;
39366 port->port.tty = tty;
39367
39368 - port->port.count = 1;
39369 + atomic_set(&port->port.count, 1);
39370 port->openclose = false;
39371
39372 /* if connected, start the I/O stream */
39373 @@ -834,11 +834,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
39374
39375 spin_lock_irq(&port->port_lock);
39376
39377 - if (port->port.count != 1) {
39378 - if (port->port.count == 0)
39379 + if (atomic_read(&port->port.count) != 1) {
39380 + if (atomic_read(&port->port.count) == 0)
39381 WARN_ON(1);
39382 else
39383 - --port->port.count;
39384 + atomic_dec(&port->port.count);
39385 goto exit;
39386 }
39387
39388 @@ -848,7 +848,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
39389 * and sleep if necessary
39390 */
39391 port->openclose = true;
39392 - port->port.count = 0;
39393 + atomic_set(&port->port.count, 0);
39394
39395 gser = port->port_usb;
39396 if (gser && gser->disconnect)
39397 @@ -1152,7 +1152,7 @@ static int gs_closed(struct gs_port *port)
39398 int cond;
39399
39400 spin_lock_irq(&port->port_lock);
39401 - cond = (port->port.count == 0) && !port->openclose;
39402 + cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
39403 spin_unlock_irq(&port->port_lock);
39404 return cond;
39405 }
39406 @@ -1265,7 +1265,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
39407 /* if it's already open, start I/O ... and notify the serial
39408 * protocol about open/close status (connect/disconnect).
39409 */
39410 - if (port->port.count) {
39411 + if (atomic_read(&port->port.count)) {
39412 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
39413 gs_start_io(port);
39414 if (gser->connect)
39415 @@ -1312,7 +1312,7 @@ void gserial_disconnect(struct gserial *gser)
39416
39417 port->port_usb = NULL;
39418 gser->ioport = NULL;
39419 - if (port->port.count > 0 || port->openclose) {
39420 + if (atomic_read(&port->port.count) > 0 || port->openclose) {
39421 wake_up_interruptible(&port->drain_wait);
39422 if (port->port.tty)
39423 tty_hangup(port->port.tty);
39424 @@ -1328,7 +1328,7 @@ void gserial_disconnect(struct gserial *gser)
39425
39426 /* finally, free any unused/unusable I/O buffers */
39427 spin_lock_irqsave(&port->port_lock, flags);
39428 - if (port->port.count == 0 && !port->openclose)
39429 + if (atomic_read(&port->port.count) == 0 && !port->openclose)
39430 gs_buf_free(&port->port_write_buf);
39431 gs_free_requests(gser->out, &port->read_pool, NULL);
39432 gs_free_requests(gser->out, &port->read_queue, NULL);
39433 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
39434 index b9cca6d..75c75df 100644
39435 --- a/drivers/usb/serial/console.c
39436 +++ b/drivers/usb/serial/console.c
39437 @@ -127,7 +127,7 @@ static int usb_console_setup(struct console *co, char *options)
39438
39439 info->port = port;
39440
39441 - ++port->port.count;
39442 + atomic_inc(&port->port.count);
39443 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
39444 if (serial->type->set_termios) {
39445 /*
39446 @@ -177,7 +177,7 @@ static int usb_console_setup(struct console *co, char *options)
39447 }
39448 /* Now that any required fake tty operations are completed restore
39449 * the tty port count */
39450 - --port->port.count;
39451 + atomic_dec(&port->port.count);
39452 /* The console is special in terms of closing the device so
39453 * indicate this port is now acting as a system console. */
39454 port->port.console = 1;
39455 @@ -190,7 +190,7 @@ static int usb_console_setup(struct console *co, char *options)
39456 free_tty:
39457 kfree(tty);
39458 reset_open_count:
39459 - port->port.count = 0;
39460 + atomic_set(&port->port.count, 0);
39461 usb_autopm_put_interface(serial->interface);
39462 error_get_interface:
39463 usb_serial_put(serial);
39464 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
39465 index d6bea3e..60b250e 100644
39466 --- a/drivers/usb/wusbcore/wa-hc.h
39467 +++ b/drivers/usb/wusbcore/wa-hc.h
39468 @@ -192,7 +192,7 @@ struct wahc {
39469 struct list_head xfer_delayed_list;
39470 spinlock_t xfer_list_lock;
39471 struct work_struct xfer_work;
39472 - atomic_t xfer_id_count;
39473 + atomic_unchecked_t xfer_id_count;
39474 };
39475
39476
39477 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
39478 INIT_LIST_HEAD(&wa->xfer_delayed_list);
39479 spin_lock_init(&wa->xfer_list_lock);
39480 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
39481 - atomic_set(&wa->xfer_id_count, 1);
39482 + atomic_set_unchecked(&wa->xfer_id_count, 1);
39483 }
39484
39485 /**
39486 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
39487 index 57c01ab..8a05959 100644
39488 --- a/drivers/usb/wusbcore/wa-xfer.c
39489 +++ b/drivers/usb/wusbcore/wa-xfer.c
39490 @@ -296,7 +296,7 @@ out:
39491 */
39492 static void wa_xfer_id_init(struct wa_xfer *xfer)
39493 {
39494 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
39495 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
39496 }
39497
39498 /*
39499 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
39500 index 112156f..eb81154 100644
39501 --- a/drivers/vhost/vhost.c
39502 +++ b/drivers/vhost/vhost.c
39503 @@ -635,7 +635,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
39504 return 0;
39505 }
39506
39507 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
39508 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
39509 {
39510 struct file *eventfp, *filep = NULL,
39511 *pollstart = NULL, *pollstop = NULL;
39512 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
39513 index b0b2ac3..89a4399 100644
39514 --- a/drivers/video/aty/aty128fb.c
39515 +++ b/drivers/video/aty/aty128fb.c
39516 @@ -148,7 +148,7 @@ enum {
39517 };
39518
39519 /* Must match above enum */
39520 -static const char *r128_family[] __devinitdata = {
39521 +static const char *r128_family[] __devinitconst = {
39522 "AGP",
39523 "PCI",
39524 "PRO AGP",
39525 diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
39526 index 88e9204..fdefa8f 100644
39527 --- a/drivers/video/console/fbcon.c
39528 +++ b/drivers/video/console/fbcon.c
39529 @@ -449,7 +449,7 @@ static int __init fb_console_setup(char *this_opt)
39530
39531 while ((options = strsep(&this_opt, ",")) != NULL) {
39532 if (!strncmp(options, "font:", 5))
39533 - strcpy(fontname, options + 5);
39534 + strlcpy(fontname, options + 5, sizeof(fontname));
39535
39536 if (!strncmp(options, "scrollback:", 11)) {
39537 options += 11;
39538 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
39539 index 5c3960d..15cf8fc 100644
39540 --- a/drivers/video/fbcmap.c
39541 +++ b/drivers/video/fbcmap.c
39542 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
39543 rc = -ENODEV;
39544 goto out;
39545 }
39546 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39547 - !info->fbops->fb_setcmap)) {
39548 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39549 rc = -EINVAL;
39550 goto out1;
39551 }
39552 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
39553 index 0dff12a..2ef47b3 100644
39554 --- a/drivers/video/fbmem.c
39555 +++ b/drivers/video/fbmem.c
39556 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39557 image->dx += image->width + 8;
39558 }
39559 } else if (rotate == FB_ROTATE_UD) {
39560 - for (x = 0; x < num && image->dx >= 0; x++) {
39561 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39562 info->fbops->fb_imageblit(info, image);
39563 image->dx -= image->width + 8;
39564 }
39565 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39566 image->dy += image->height + 8;
39567 }
39568 } else if (rotate == FB_ROTATE_CCW) {
39569 - for (x = 0; x < num && image->dy >= 0; x++) {
39570 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39571 info->fbops->fb_imageblit(info, image);
39572 image->dy -= image->height + 8;
39573 }
39574 @@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
39575 return -EFAULT;
39576 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39577 return -EINVAL;
39578 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39579 + if (con2fb.framebuffer >= FB_MAX)
39580 return -EINVAL;
39581 if (!registered_fb[con2fb.framebuffer])
39582 request_module("fb%d", con2fb.framebuffer);
39583 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
39584 index 5a5d092..265c5ed 100644
39585 --- a/drivers/video/geode/gx1fb_core.c
39586 +++ b/drivers/video/geode/gx1fb_core.c
39587 @@ -29,7 +29,7 @@ static int crt_option = 1;
39588 static char panel_option[32] = "";
39589
39590 /* Modes relevant to the GX1 (taken from modedb.c) */
39591 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
39592 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
39593 /* 640x480-60 VESA */
39594 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
39595 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
39596 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
39597 index 0fad23f..0e9afa4 100644
39598 --- a/drivers/video/gxt4500.c
39599 +++ b/drivers/video/gxt4500.c
39600 @@ -156,7 +156,7 @@ struct gxt4500_par {
39601 static char *mode_option;
39602
39603 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
39604 -static const struct fb_videomode defaultmode __devinitdata = {
39605 +static const struct fb_videomode defaultmode __devinitconst = {
39606 .refresh = 60,
39607 .xres = 1280,
39608 .yres = 1024,
39609 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
39610 return 0;
39611 }
39612
39613 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
39614 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
39615 .id = "IBM GXT4500P",
39616 .type = FB_TYPE_PACKED_PIXELS,
39617 .visual = FB_VISUAL_PSEUDOCOLOR,
39618 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
39619 index 7672d2e..b56437f 100644
39620 --- a/drivers/video/i810/i810_accel.c
39621 +++ b/drivers/video/i810/i810_accel.c
39622 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
39623 }
39624 }
39625 printk("ringbuffer lockup!!!\n");
39626 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39627 i810_report_error(mmio);
39628 par->dev_flags |= LOCKUP;
39629 info->pixmap.scan_align = 1;
39630 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
39631 index b83f361..2b05a91 100644
39632 --- a/drivers/video/i810/i810_main.c
39633 +++ b/drivers/video/i810/i810_main.c
39634 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
39635 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
39636
39637 /* PCI */
39638 -static const char *i810_pci_list[] __devinitdata = {
39639 +static const char *i810_pci_list[] __devinitconst = {
39640 "Intel(R) 810 Framebuffer Device" ,
39641 "Intel(R) 810-DC100 Framebuffer Device" ,
39642 "Intel(R) 810E Framebuffer Device" ,
39643 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
39644 index de36693..3c63fc2 100644
39645 --- a/drivers/video/jz4740_fb.c
39646 +++ b/drivers/video/jz4740_fb.c
39647 @@ -136,7 +136,7 @@ struct jzfb {
39648 uint32_t pseudo_palette[16];
39649 };
39650
39651 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
39652 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
39653 .id = "JZ4740 FB",
39654 .type = FB_TYPE_PACKED_PIXELS,
39655 .visual = FB_VISUAL_TRUECOLOR,
39656 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
39657 index 3c14e43..eafa544 100644
39658 --- a/drivers/video/logo/logo_linux_clut224.ppm
39659 +++ b/drivers/video/logo/logo_linux_clut224.ppm
39660 @@ -1,1604 +1,1123 @@
39661 P3
39662 -# Standard 224-color Linux logo
39663 80 80
39664 255
39665 - 0 0 0 0 0 0 0 0 0 0 0 0
39666 - 0 0 0 0 0 0 0 0 0 0 0 0
39667 - 0 0 0 0 0 0 0 0 0 0 0 0
39668 - 0 0 0 0 0 0 0 0 0 0 0 0
39669 - 0 0 0 0 0 0 0 0 0 0 0 0
39670 - 0 0 0 0 0 0 0 0 0 0 0 0
39671 - 0 0 0 0 0 0 0 0 0 0 0 0
39672 - 0 0 0 0 0 0 0 0 0 0 0 0
39673 - 0 0 0 0 0 0 0 0 0 0 0 0
39674 - 6 6 6 6 6 6 10 10 10 10 10 10
39675 - 10 10 10 6 6 6 6 6 6 6 6 6
39676 - 0 0 0 0 0 0 0 0 0 0 0 0
39677 - 0 0 0 0 0 0 0 0 0 0 0 0
39678 - 0 0 0 0 0 0 0 0 0 0 0 0
39679 - 0 0 0 0 0 0 0 0 0 0 0 0
39680 - 0 0 0 0 0 0 0 0 0 0 0 0
39681 - 0 0 0 0 0 0 0 0 0 0 0 0
39682 - 0 0 0 0 0 0 0 0 0 0 0 0
39683 - 0 0 0 0 0 0 0 0 0 0 0 0
39684 - 0 0 0 0 0 0 0 0 0 0 0 0
39685 - 0 0 0 0 0 0 0 0 0 0 0 0
39686 - 0 0 0 0 0 0 0 0 0 0 0 0
39687 - 0 0 0 0 0 0 0 0 0 0 0 0
39688 - 0 0 0 0 0 0 0 0 0 0 0 0
39689 - 0 0 0 0 0 0 0 0 0 0 0 0
39690 - 0 0 0 0 0 0 0 0 0 0 0 0
39691 - 0 0 0 0 0 0 0 0 0 0 0 0
39692 - 0 0 0 0 0 0 0 0 0 0 0 0
39693 - 0 0 0 6 6 6 10 10 10 14 14 14
39694 - 22 22 22 26 26 26 30 30 30 34 34 34
39695 - 30 30 30 30 30 30 26 26 26 18 18 18
39696 - 14 14 14 10 10 10 6 6 6 0 0 0
39697 - 0 0 0 0 0 0 0 0 0 0 0 0
39698 - 0 0 0 0 0 0 0 0 0 0 0 0
39699 - 0 0 0 0 0 0 0 0 0 0 0 0
39700 - 0 0 0 0 0 0 0 0 0 0 0 0
39701 - 0 0 0 0 0 0 0 0 0 0 0 0
39702 - 0 0 0 0 0 0 0 0 0 0 0 0
39703 - 0 0 0 0 0 0 0 0 0 0 0 0
39704 - 0 0 0 0 0 0 0 0 0 0 0 0
39705 - 0 0 0 0 0 0 0 0 0 0 0 0
39706 - 0 0 0 0 0 1 0 0 1 0 0 0
39707 - 0 0 0 0 0 0 0 0 0 0 0 0
39708 - 0 0 0 0 0 0 0 0 0 0 0 0
39709 - 0 0 0 0 0 0 0 0 0 0 0 0
39710 - 0 0 0 0 0 0 0 0 0 0 0 0
39711 - 0 0 0 0 0 0 0 0 0 0 0 0
39712 - 0 0 0 0 0 0 0 0 0 0 0 0
39713 - 6 6 6 14 14 14 26 26 26 42 42 42
39714 - 54 54 54 66 66 66 78 78 78 78 78 78
39715 - 78 78 78 74 74 74 66 66 66 54 54 54
39716 - 42 42 42 26 26 26 18 18 18 10 10 10
39717 - 6 6 6 0 0 0 0 0 0 0 0 0
39718 - 0 0 0 0 0 0 0 0 0 0 0 0
39719 - 0 0 0 0 0 0 0 0 0 0 0 0
39720 - 0 0 0 0 0 0 0 0 0 0 0 0
39721 - 0 0 0 0 0 0 0 0 0 0 0 0
39722 - 0 0 0 0 0 0 0 0 0 0 0 0
39723 - 0 0 0 0 0 0 0 0 0 0 0 0
39724 - 0 0 0 0 0 0 0 0 0 0 0 0
39725 - 0 0 0 0 0 0 0 0 0 0 0 0
39726 - 0 0 1 0 0 0 0 0 0 0 0 0
39727 - 0 0 0 0 0 0 0 0 0 0 0 0
39728 - 0 0 0 0 0 0 0 0 0 0 0 0
39729 - 0 0 0 0 0 0 0 0 0 0 0 0
39730 - 0 0 0 0 0 0 0 0 0 0 0 0
39731 - 0 0 0 0 0 0 0 0 0 0 0 0
39732 - 0 0 0 0 0 0 0 0 0 10 10 10
39733 - 22 22 22 42 42 42 66 66 66 86 86 86
39734 - 66 66 66 38 38 38 38 38 38 22 22 22
39735 - 26 26 26 34 34 34 54 54 54 66 66 66
39736 - 86 86 86 70 70 70 46 46 46 26 26 26
39737 - 14 14 14 6 6 6 0 0 0 0 0 0
39738 - 0 0 0 0 0 0 0 0 0 0 0 0
39739 - 0 0 0 0 0 0 0 0 0 0 0 0
39740 - 0 0 0 0 0 0 0 0 0 0 0 0
39741 - 0 0 0 0 0 0 0 0 0 0 0 0
39742 - 0 0 0 0 0 0 0 0 0 0 0 0
39743 - 0 0 0 0 0 0 0 0 0 0 0 0
39744 - 0 0 0 0 0 0 0 0 0 0 0 0
39745 - 0 0 0 0 0 0 0 0 0 0 0 0
39746 - 0 0 1 0 0 1 0 0 1 0 0 0
39747 - 0 0 0 0 0 0 0 0 0 0 0 0
39748 - 0 0 0 0 0 0 0 0 0 0 0 0
39749 - 0 0 0 0 0 0 0 0 0 0 0 0
39750 - 0 0 0 0 0 0 0 0 0 0 0 0
39751 - 0 0 0 0 0 0 0 0 0 0 0 0
39752 - 0 0 0 0 0 0 10 10 10 26 26 26
39753 - 50 50 50 82 82 82 58 58 58 6 6 6
39754 - 2 2 6 2 2 6 2 2 6 2 2 6
39755 - 2 2 6 2 2 6 2 2 6 2 2 6
39756 - 6 6 6 54 54 54 86 86 86 66 66 66
39757 - 38 38 38 18 18 18 6 6 6 0 0 0
39758 - 0 0 0 0 0 0 0 0 0 0 0 0
39759 - 0 0 0 0 0 0 0 0 0 0 0 0
39760 - 0 0 0 0 0 0 0 0 0 0 0 0
39761 - 0 0 0 0 0 0 0 0 0 0 0 0
39762 - 0 0 0 0 0 0 0 0 0 0 0 0
39763 - 0 0 0 0 0 0 0 0 0 0 0 0
39764 - 0 0 0 0 0 0 0 0 0 0 0 0
39765 - 0 0 0 0 0 0 0 0 0 0 0 0
39766 - 0 0 0 0 0 0 0 0 0 0 0 0
39767 - 0 0 0 0 0 0 0 0 0 0 0 0
39768 - 0 0 0 0 0 0 0 0 0 0 0 0
39769 - 0 0 0 0 0 0 0 0 0 0 0 0
39770 - 0 0 0 0 0 0 0 0 0 0 0 0
39771 - 0 0 0 0 0 0 0 0 0 0 0 0
39772 - 0 0 0 6 6 6 22 22 22 50 50 50
39773 - 78 78 78 34 34 34 2 2 6 2 2 6
39774 - 2 2 6 2 2 6 2 2 6 2 2 6
39775 - 2 2 6 2 2 6 2 2 6 2 2 6
39776 - 2 2 6 2 2 6 6 6 6 70 70 70
39777 - 78 78 78 46 46 46 22 22 22 6 6 6
39778 - 0 0 0 0 0 0 0 0 0 0 0 0
39779 - 0 0 0 0 0 0 0 0 0 0 0 0
39780 - 0 0 0 0 0 0 0 0 0 0 0 0
39781 - 0 0 0 0 0 0 0 0 0 0 0 0
39782 - 0 0 0 0 0 0 0 0 0 0 0 0
39783 - 0 0 0 0 0 0 0 0 0 0 0 0
39784 - 0 0 0 0 0 0 0 0 0 0 0 0
39785 - 0 0 0 0 0 0 0 0 0 0 0 0
39786 - 0 0 1 0 0 1 0 0 1 0 0 0
39787 - 0 0 0 0 0 0 0 0 0 0 0 0
39788 - 0 0 0 0 0 0 0 0 0 0 0 0
39789 - 0 0 0 0 0 0 0 0 0 0 0 0
39790 - 0 0 0 0 0 0 0 0 0 0 0 0
39791 - 0 0 0 0 0 0 0 0 0 0 0 0
39792 - 6 6 6 18 18 18 42 42 42 82 82 82
39793 - 26 26 26 2 2 6 2 2 6 2 2 6
39794 - 2 2 6 2 2 6 2 2 6 2 2 6
39795 - 2 2 6 2 2 6 2 2 6 14 14 14
39796 - 46 46 46 34 34 34 6 6 6 2 2 6
39797 - 42 42 42 78 78 78 42 42 42 18 18 18
39798 - 6 6 6 0 0 0 0 0 0 0 0 0
39799 - 0 0 0 0 0 0 0 0 0 0 0 0
39800 - 0 0 0 0 0 0 0 0 0 0 0 0
39801 - 0 0 0 0 0 0 0 0 0 0 0 0
39802 - 0 0 0 0 0 0 0 0 0 0 0 0
39803 - 0 0 0 0 0 0 0 0 0 0 0 0
39804 - 0 0 0 0 0 0 0 0 0 0 0 0
39805 - 0 0 0 0 0 0 0 0 0 0 0 0
39806 - 0 0 1 0 0 0 0 0 1 0 0 0
39807 - 0 0 0 0 0 0 0 0 0 0 0 0
39808 - 0 0 0 0 0 0 0 0 0 0 0 0
39809 - 0 0 0 0 0 0 0 0 0 0 0 0
39810 - 0 0 0 0 0 0 0 0 0 0 0 0
39811 - 0 0 0 0 0 0 0 0 0 0 0 0
39812 - 10 10 10 30 30 30 66 66 66 58 58 58
39813 - 2 2 6 2 2 6 2 2 6 2 2 6
39814 - 2 2 6 2 2 6 2 2 6 2 2 6
39815 - 2 2 6 2 2 6 2 2 6 26 26 26
39816 - 86 86 86 101 101 101 46 46 46 10 10 10
39817 - 2 2 6 58 58 58 70 70 70 34 34 34
39818 - 10 10 10 0 0 0 0 0 0 0 0 0
39819 - 0 0 0 0 0 0 0 0 0 0 0 0
39820 - 0 0 0 0 0 0 0 0 0 0 0 0
39821 - 0 0 0 0 0 0 0 0 0 0 0 0
39822 - 0 0 0 0 0 0 0 0 0 0 0 0
39823 - 0 0 0 0 0 0 0 0 0 0 0 0
39824 - 0 0 0 0 0 0 0 0 0 0 0 0
39825 - 0 0 0 0 0 0 0 0 0 0 0 0
39826 - 0 0 1 0 0 1 0 0 1 0 0 0
39827 - 0 0 0 0 0 0 0 0 0 0 0 0
39828 - 0 0 0 0 0 0 0 0 0 0 0 0
39829 - 0 0 0 0 0 0 0 0 0 0 0 0
39830 - 0 0 0 0 0 0 0 0 0 0 0 0
39831 - 0 0 0 0 0 0 0 0 0 0 0 0
39832 - 14 14 14 42 42 42 86 86 86 10 10 10
39833 - 2 2 6 2 2 6 2 2 6 2 2 6
39834 - 2 2 6 2 2 6 2 2 6 2 2 6
39835 - 2 2 6 2 2 6 2 2 6 30 30 30
39836 - 94 94 94 94 94 94 58 58 58 26 26 26
39837 - 2 2 6 6 6 6 78 78 78 54 54 54
39838 - 22 22 22 6 6 6 0 0 0 0 0 0
39839 - 0 0 0 0 0 0 0 0 0 0 0 0
39840 - 0 0 0 0 0 0 0 0 0 0 0 0
39841 - 0 0 0 0 0 0 0 0 0 0 0 0
39842 - 0 0 0 0 0 0 0 0 0 0 0 0
39843 - 0 0 0 0 0 0 0 0 0 0 0 0
39844 - 0 0 0 0 0 0 0 0 0 0 0 0
39845 - 0 0 0 0 0 0 0 0 0 0 0 0
39846 - 0 0 0 0 0 0 0 0 0 0 0 0
39847 - 0 0 0 0 0 0 0 0 0 0 0 0
39848 - 0 0 0 0 0 0 0 0 0 0 0 0
39849 - 0 0 0 0 0 0 0 0 0 0 0 0
39850 - 0 0 0 0 0 0 0 0 0 0 0 0
39851 - 0 0 0 0 0 0 0 0 0 6 6 6
39852 - 22 22 22 62 62 62 62 62 62 2 2 6
39853 - 2 2 6 2 2 6 2 2 6 2 2 6
39854 - 2 2 6 2 2 6 2 2 6 2 2 6
39855 - 2 2 6 2 2 6 2 2 6 26 26 26
39856 - 54 54 54 38 38 38 18 18 18 10 10 10
39857 - 2 2 6 2 2 6 34 34 34 82 82 82
39858 - 38 38 38 14 14 14 0 0 0 0 0 0
39859 - 0 0 0 0 0 0 0 0 0 0 0 0
39860 - 0 0 0 0 0 0 0 0 0 0 0 0
39861 - 0 0 0 0 0 0 0 0 0 0 0 0
39862 - 0 0 0 0 0 0 0 0 0 0 0 0
39863 - 0 0 0 0 0 0 0 0 0 0 0 0
39864 - 0 0 0 0 0 0 0 0 0 0 0 0
39865 - 0 0 0 0 0 0 0 0 0 0 0 0
39866 - 0 0 0 0 0 1 0 0 1 0 0 0
39867 - 0 0 0 0 0 0 0 0 0 0 0 0
39868 - 0 0 0 0 0 0 0 0 0 0 0 0
39869 - 0 0 0 0 0 0 0 0 0 0 0 0
39870 - 0 0 0 0 0 0 0 0 0 0 0 0
39871 - 0 0 0 0 0 0 0 0 0 6 6 6
39872 - 30 30 30 78 78 78 30 30 30 2 2 6
39873 - 2 2 6 2 2 6 2 2 6 2 2 6
39874 - 2 2 6 2 2 6 2 2 6 2 2 6
39875 - 2 2 6 2 2 6 2 2 6 10 10 10
39876 - 10 10 10 2 2 6 2 2 6 2 2 6
39877 - 2 2 6 2 2 6 2 2 6 78 78 78
39878 - 50 50 50 18 18 18 6 6 6 0 0 0
39879 - 0 0 0 0 0 0 0 0 0 0 0 0
39880 - 0 0 0 0 0 0 0 0 0 0 0 0
39881 - 0 0 0 0 0 0 0 0 0 0 0 0
39882 - 0 0 0 0 0 0 0 0 0 0 0 0
39883 - 0 0 0 0 0 0 0 0 0 0 0 0
39884 - 0 0 0 0 0 0 0 0 0 0 0 0
39885 - 0 0 0 0 0 0 0 0 0 0 0 0
39886 - 0 0 1 0 0 0 0 0 0 0 0 0
39887 - 0 0 0 0 0 0 0 0 0 0 0 0
39888 - 0 0 0 0 0 0 0 0 0 0 0 0
39889 - 0 0 0 0 0 0 0 0 0 0 0 0
39890 - 0 0 0 0 0 0 0 0 0 0 0 0
39891 - 0 0 0 0 0 0 0 0 0 10 10 10
39892 - 38 38 38 86 86 86 14 14 14 2 2 6
39893 - 2 2 6 2 2 6 2 2 6 2 2 6
39894 - 2 2 6 2 2 6 2 2 6 2 2 6
39895 - 2 2 6 2 2 6 2 2 6 2 2 6
39896 - 2 2 6 2 2 6 2 2 6 2 2 6
39897 - 2 2 6 2 2 6 2 2 6 54 54 54
39898 - 66 66 66 26 26 26 6 6 6 0 0 0
39899 - 0 0 0 0 0 0 0 0 0 0 0 0
39900 - 0 0 0 0 0 0 0 0 0 0 0 0
39901 - 0 0 0 0 0 0 0 0 0 0 0 0
39902 - 0 0 0 0 0 0 0 0 0 0 0 0
39903 - 0 0 0 0 0 0 0 0 0 0 0 0
39904 - 0 0 0 0 0 0 0 0 0 0 0 0
39905 - 0 0 0 0 0 0 0 0 0 0 0 0
39906 - 0 0 0 0 0 1 0 0 1 0 0 0
39907 - 0 0 0 0 0 0 0 0 0 0 0 0
39908 - 0 0 0 0 0 0 0 0 0 0 0 0
39909 - 0 0 0 0 0 0 0 0 0 0 0 0
39910 - 0 0 0 0 0 0 0 0 0 0 0 0
39911 - 0 0 0 0 0 0 0 0 0 14 14 14
39912 - 42 42 42 82 82 82 2 2 6 2 2 6
39913 - 2 2 6 6 6 6 10 10 10 2 2 6
39914 - 2 2 6 2 2 6 2 2 6 2 2 6
39915 - 2 2 6 2 2 6 2 2 6 6 6 6
39916 - 14 14 14 10 10 10 2 2 6 2 2 6
39917 - 2 2 6 2 2 6 2 2 6 18 18 18
39918 - 82 82 82 34 34 34 10 10 10 0 0 0
39919 - 0 0 0 0 0 0 0 0 0 0 0 0
39920 - 0 0 0 0 0 0 0 0 0 0 0 0
39921 - 0 0 0 0 0 0 0 0 0 0 0 0
39922 - 0 0 0 0 0 0 0 0 0 0 0 0
39923 - 0 0 0 0 0 0 0 0 0 0 0 0
39924 - 0 0 0 0 0 0 0 0 0 0 0 0
39925 - 0 0 0 0 0 0 0 0 0 0 0 0
39926 - 0 0 1 0 0 0 0 0 0 0 0 0
39927 - 0 0 0 0 0 0 0 0 0 0 0 0
39928 - 0 0 0 0 0 0 0 0 0 0 0 0
39929 - 0 0 0 0 0 0 0 0 0 0 0 0
39930 - 0 0 0 0 0 0 0 0 0 0 0 0
39931 - 0 0 0 0 0 0 0 0 0 14 14 14
39932 - 46 46 46 86 86 86 2 2 6 2 2 6
39933 - 6 6 6 6 6 6 22 22 22 34 34 34
39934 - 6 6 6 2 2 6 2 2 6 2 2 6
39935 - 2 2 6 2 2 6 18 18 18 34 34 34
39936 - 10 10 10 50 50 50 22 22 22 2 2 6
39937 - 2 2 6 2 2 6 2 2 6 10 10 10
39938 - 86 86 86 42 42 42 14 14 14 0 0 0
39939 - 0 0 0 0 0 0 0 0 0 0 0 0
39940 - 0 0 0 0 0 0 0 0 0 0 0 0
39941 - 0 0 0 0 0 0 0 0 0 0 0 0
39942 - 0 0 0 0 0 0 0 0 0 0 0 0
39943 - 0 0 0 0 0 0 0 0 0 0 0 0
39944 - 0 0 0 0 0 0 0 0 0 0 0 0
39945 - 0 0 0 0 0 0 0 0 0 0 0 0
39946 - 0 0 1 0 0 1 0 0 1 0 0 0
39947 - 0 0 0 0 0 0 0 0 0 0 0 0
39948 - 0 0 0 0 0 0 0 0 0 0 0 0
39949 - 0 0 0 0 0 0 0 0 0 0 0 0
39950 - 0 0 0 0 0 0 0 0 0 0 0 0
39951 - 0 0 0 0 0 0 0 0 0 14 14 14
39952 - 46 46 46 86 86 86 2 2 6 2 2 6
39953 - 38 38 38 116 116 116 94 94 94 22 22 22
39954 - 22 22 22 2 2 6 2 2 6 2 2 6
39955 - 14 14 14 86 86 86 138 138 138 162 162 162
39956 -154 154 154 38 38 38 26 26 26 6 6 6
39957 - 2 2 6 2 2 6 2 2 6 2 2 6
39958 - 86 86 86 46 46 46 14 14 14 0 0 0
39959 - 0 0 0 0 0 0 0 0 0 0 0 0
39960 - 0 0 0 0 0 0 0 0 0 0 0 0
39961 - 0 0 0 0 0 0 0 0 0 0 0 0
39962 - 0 0 0 0 0 0 0 0 0 0 0 0
39963 - 0 0 0 0 0 0 0 0 0 0 0 0
39964 - 0 0 0 0 0 0 0 0 0 0 0 0
39965 - 0 0 0 0 0 0 0 0 0 0 0 0
39966 - 0 0 0 0 0 0 0 0 0 0 0 0
39967 - 0 0 0 0 0 0 0 0 0 0 0 0
39968 - 0 0 0 0 0 0 0 0 0 0 0 0
39969 - 0 0 0 0 0 0 0 0 0 0 0 0
39970 - 0 0 0 0 0 0 0 0 0 0 0 0
39971 - 0 0 0 0 0 0 0 0 0 14 14 14
39972 - 46 46 46 86 86 86 2 2 6 14 14 14
39973 -134 134 134 198 198 198 195 195 195 116 116 116
39974 - 10 10 10 2 2 6 2 2 6 6 6 6
39975 -101 98 89 187 187 187 210 210 210 218 218 218
39976 -214 214 214 134 134 134 14 14 14 6 6 6
39977 - 2 2 6 2 2 6 2 2 6 2 2 6
39978 - 86 86 86 50 50 50 18 18 18 6 6 6
39979 - 0 0 0 0 0 0 0 0 0 0 0 0
39980 - 0 0 0 0 0 0 0 0 0 0 0 0
39981 - 0 0 0 0 0 0 0 0 0 0 0 0
39982 - 0 0 0 0 0 0 0 0 0 0 0 0
39983 - 0 0 0 0 0 0 0 0 0 0 0 0
39984 - 0 0 0 0 0 0 0 0 0 0 0 0
39985 - 0 0 0 0 0 0 0 0 1 0 0 0
39986 - 0 0 1 0 0 1 0 0 1 0 0 0
39987 - 0 0 0 0 0 0 0 0 0 0 0 0
39988 - 0 0 0 0 0 0 0 0 0 0 0 0
39989 - 0 0 0 0 0 0 0 0 0 0 0 0
39990 - 0 0 0 0 0 0 0 0 0 0 0 0
39991 - 0 0 0 0 0 0 0 0 0 14 14 14
39992 - 46 46 46 86 86 86 2 2 6 54 54 54
39993 -218 218 218 195 195 195 226 226 226 246 246 246
39994 - 58 58 58 2 2 6 2 2 6 30 30 30
39995 -210 210 210 253 253 253 174 174 174 123 123 123
39996 -221 221 221 234 234 234 74 74 74 2 2 6
39997 - 2 2 6 2 2 6 2 2 6 2 2 6
39998 - 70 70 70 58 58 58 22 22 22 6 6 6
39999 - 0 0 0 0 0 0 0 0 0 0 0 0
40000 - 0 0 0 0 0 0 0 0 0 0 0 0
40001 - 0 0 0 0 0 0 0 0 0 0 0 0
40002 - 0 0 0 0 0 0 0 0 0 0 0 0
40003 - 0 0 0 0 0 0 0 0 0 0 0 0
40004 - 0 0 0 0 0 0 0 0 0 0 0 0
40005 - 0 0 0 0 0 0 0 0 0 0 0 0
40006 - 0 0 0 0 0 0 0 0 0 0 0 0
40007 - 0 0 0 0 0 0 0 0 0 0 0 0
40008 - 0 0 0 0 0 0 0 0 0 0 0 0
40009 - 0 0 0 0 0 0 0 0 0 0 0 0
40010 - 0 0 0 0 0 0 0 0 0 0 0 0
40011 - 0 0 0 0 0 0 0 0 0 14 14 14
40012 - 46 46 46 82 82 82 2 2 6 106 106 106
40013 -170 170 170 26 26 26 86 86 86 226 226 226
40014 -123 123 123 10 10 10 14 14 14 46 46 46
40015 -231 231 231 190 190 190 6 6 6 70 70 70
40016 - 90 90 90 238 238 238 158 158 158 2 2 6
40017 - 2 2 6 2 2 6 2 2 6 2 2 6
40018 - 70 70 70 58 58 58 22 22 22 6 6 6
40019 - 0 0 0 0 0 0 0 0 0 0 0 0
40020 - 0 0 0 0 0 0 0 0 0 0 0 0
40021 - 0 0 0 0 0 0 0 0 0 0 0 0
40022 - 0 0 0 0 0 0 0 0 0 0 0 0
40023 - 0 0 0 0 0 0 0 0 0 0 0 0
40024 - 0 0 0 0 0 0 0 0 0 0 0 0
40025 - 0 0 0 0 0 0 0 0 1 0 0 0
40026 - 0 0 1 0 0 1 0 0 1 0 0 0
40027 - 0 0 0 0 0 0 0 0 0 0 0 0
40028 - 0 0 0 0 0 0 0 0 0 0 0 0
40029 - 0 0 0 0 0 0 0 0 0 0 0 0
40030 - 0 0 0 0 0 0 0 0 0 0 0 0
40031 - 0 0 0 0 0 0 0 0 0 14 14 14
40032 - 42 42 42 86 86 86 6 6 6 116 116 116
40033 -106 106 106 6 6 6 70 70 70 149 149 149
40034 -128 128 128 18 18 18 38 38 38 54 54 54
40035 -221 221 221 106 106 106 2 2 6 14 14 14
40036 - 46 46 46 190 190 190 198 198 198 2 2 6
40037 - 2 2 6 2 2 6 2 2 6 2 2 6
40038 - 74 74 74 62 62 62 22 22 22 6 6 6
40039 - 0 0 0 0 0 0 0 0 0 0 0 0
40040 - 0 0 0 0 0 0 0 0 0 0 0 0
40041 - 0 0 0 0 0 0 0 0 0 0 0 0
40042 - 0 0 0 0 0 0 0 0 0 0 0 0
40043 - 0 0 0 0 0 0 0 0 0 0 0 0
40044 - 0 0 0 0 0 0 0 0 0 0 0 0
40045 - 0 0 0 0 0 0 0 0 1 0 0 0
40046 - 0 0 1 0 0 0 0 0 1 0 0 0
40047 - 0 0 0 0 0 0 0 0 0 0 0 0
40048 - 0 0 0 0 0 0 0 0 0 0 0 0
40049 - 0 0 0 0 0 0 0 0 0 0 0 0
40050 - 0 0 0 0 0 0 0 0 0 0 0 0
40051 - 0 0 0 0 0 0 0 0 0 14 14 14
40052 - 42 42 42 94 94 94 14 14 14 101 101 101
40053 -128 128 128 2 2 6 18 18 18 116 116 116
40054 -118 98 46 121 92 8 121 92 8 98 78 10
40055 -162 162 162 106 106 106 2 2 6 2 2 6
40056 - 2 2 6 195 195 195 195 195 195 6 6 6
40057 - 2 2 6 2 2 6 2 2 6 2 2 6
40058 - 74 74 74 62 62 62 22 22 22 6 6 6
40059 - 0 0 0 0 0 0 0 0 0 0 0 0
40060 - 0 0 0 0 0 0 0 0 0 0 0 0
40061 - 0 0 0 0 0 0 0 0 0 0 0 0
40062 - 0 0 0 0 0 0 0 0 0 0 0 0
40063 - 0 0 0 0 0 0 0 0 0 0 0 0
40064 - 0 0 0 0 0 0 0 0 0 0 0 0
40065 - 0 0 0 0 0 0 0 0 1 0 0 1
40066 - 0 0 1 0 0 0 0 0 1 0 0 0
40067 - 0 0 0 0 0 0 0 0 0 0 0 0
40068 - 0 0 0 0 0 0 0 0 0 0 0 0
40069 - 0 0 0 0 0 0 0 0 0 0 0 0
40070 - 0 0 0 0 0 0 0 0 0 0 0 0
40071 - 0 0 0 0 0 0 0 0 0 10 10 10
40072 - 38 38 38 90 90 90 14 14 14 58 58 58
40073 -210 210 210 26 26 26 54 38 6 154 114 10
40074 -226 170 11 236 186 11 225 175 15 184 144 12
40075 -215 174 15 175 146 61 37 26 9 2 2 6
40076 - 70 70 70 246 246 246 138 138 138 2 2 6
40077 - 2 2 6 2 2 6 2 2 6 2 2 6
40078 - 70 70 70 66 66 66 26 26 26 6 6 6
40079 - 0 0 0 0 0 0 0 0 0 0 0 0
40080 - 0 0 0 0 0 0 0 0 0 0 0 0
40081 - 0 0 0 0 0 0 0 0 0 0 0 0
40082 - 0 0 0 0 0 0 0 0 0 0 0 0
40083 - 0 0 0 0 0 0 0 0 0 0 0 0
40084 - 0 0 0 0 0 0 0 0 0 0 0 0
40085 - 0 0 0 0 0 0 0 0 0 0 0 0
40086 - 0 0 0 0 0 0 0 0 0 0 0 0
40087 - 0 0 0 0 0 0 0 0 0 0 0 0
40088 - 0 0 0 0 0 0 0 0 0 0 0 0
40089 - 0 0 0 0 0 0 0 0 0 0 0 0
40090 - 0 0 0 0 0 0 0 0 0 0 0 0
40091 - 0 0 0 0 0 0 0 0 0 10 10 10
40092 - 38 38 38 86 86 86 14 14 14 10 10 10
40093 -195 195 195 188 164 115 192 133 9 225 175 15
40094 -239 182 13 234 190 10 232 195 16 232 200 30
40095 -245 207 45 241 208 19 232 195 16 184 144 12
40096 -218 194 134 211 206 186 42 42 42 2 2 6
40097 - 2 2 6 2 2 6 2 2 6 2 2 6
40098 - 50 50 50 74 74 74 30 30 30 6 6 6
40099 - 0 0 0 0 0 0 0 0 0 0 0 0
40100 - 0 0 0 0 0 0 0 0 0 0 0 0
40101 - 0 0 0 0 0 0 0 0 0 0 0 0
40102 - 0 0 0 0 0 0 0 0 0 0 0 0
40103 - 0 0 0 0 0 0 0 0 0 0 0 0
40104 - 0 0 0 0 0 0 0 0 0 0 0 0
40105 - 0 0 0 0 0 0 0 0 0 0 0 0
40106 - 0 0 0 0 0 0 0 0 0 0 0 0
40107 - 0 0 0 0 0 0 0 0 0 0 0 0
40108 - 0 0 0 0 0 0 0 0 0 0 0 0
40109 - 0 0 0 0 0 0 0 0 0 0 0 0
40110 - 0 0 0 0 0 0 0 0 0 0 0 0
40111 - 0 0 0 0 0 0 0 0 0 10 10 10
40112 - 34 34 34 86 86 86 14 14 14 2 2 6
40113 -121 87 25 192 133 9 219 162 10 239 182 13
40114 -236 186 11 232 195 16 241 208 19 244 214 54
40115 -246 218 60 246 218 38 246 215 20 241 208 19
40116 -241 208 19 226 184 13 121 87 25 2 2 6
40117 - 2 2 6 2 2 6 2 2 6 2 2 6
40118 - 50 50 50 82 82 82 34 34 34 10 10 10
40119 - 0 0 0 0 0 0 0 0 0 0 0 0
40120 - 0 0 0 0 0 0 0 0 0 0 0 0
40121 - 0 0 0 0 0 0 0 0 0 0 0 0
40122 - 0 0 0 0 0 0 0 0 0 0 0 0
40123 - 0 0 0 0 0 0 0 0 0 0 0 0
40124 - 0 0 0 0 0 0 0 0 0 0 0 0
40125 - 0 0 0 0 0 0 0 0 0 0 0 0
40126 - 0 0 0 0 0 0 0 0 0 0 0 0
40127 - 0 0 0 0 0 0 0 0 0 0 0 0
40128 - 0 0 0 0 0 0 0 0 0 0 0 0
40129 - 0 0 0 0 0 0 0 0 0 0 0 0
40130 - 0 0 0 0 0 0 0 0 0 0 0 0
40131 - 0 0 0 0 0 0 0 0 0 10 10 10
40132 - 34 34 34 82 82 82 30 30 30 61 42 6
40133 -180 123 7 206 145 10 230 174 11 239 182 13
40134 -234 190 10 238 202 15 241 208 19 246 218 74
40135 -246 218 38 246 215 20 246 215 20 246 215 20
40136 -226 184 13 215 174 15 184 144 12 6 6 6
40137 - 2 2 6 2 2 6 2 2 6 2 2 6
40138 - 26 26 26 94 94 94 42 42 42 14 14 14
40139 - 0 0 0 0 0 0 0 0 0 0 0 0
40140 - 0 0 0 0 0 0 0 0 0 0 0 0
40141 - 0 0 0 0 0 0 0 0 0 0 0 0
40142 - 0 0 0 0 0 0 0 0 0 0 0 0
40143 - 0 0 0 0 0 0 0 0 0 0 0 0
40144 - 0 0 0 0 0 0 0 0 0 0 0 0
40145 - 0 0 0 0 0 0 0 0 0 0 0 0
40146 - 0 0 0 0 0 0 0 0 0 0 0 0
40147 - 0 0 0 0 0 0 0 0 0 0 0 0
40148 - 0 0 0 0 0 0 0 0 0 0 0 0
40149 - 0 0 0 0 0 0 0 0 0 0 0 0
40150 - 0 0 0 0 0 0 0 0 0 0 0 0
40151 - 0 0 0 0 0 0 0 0 0 10 10 10
40152 - 30 30 30 78 78 78 50 50 50 104 69 6
40153 -192 133 9 216 158 10 236 178 12 236 186 11
40154 -232 195 16 241 208 19 244 214 54 245 215 43
40155 -246 215 20 246 215 20 241 208 19 198 155 10
40156 -200 144 11 216 158 10 156 118 10 2 2 6
40157 - 2 2 6 2 2 6 2 2 6 2 2 6
40158 - 6 6 6 90 90 90 54 54 54 18 18 18
40159 - 6 6 6 0 0 0 0 0 0 0 0 0
40160 - 0 0 0 0 0 0 0 0 0 0 0 0
40161 - 0 0 0 0 0 0 0 0 0 0 0 0
40162 - 0 0 0 0 0 0 0 0 0 0 0 0
40163 - 0 0 0 0 0 0 0 0 0 0 0 0
40164 - 0 0 0 0 0 0 0 0 0 0 0 0
40165 - 0 0 0 0 0 0 0 0 0 0 0 0
40166 - 0 0 0 0 0 0 0 0 0 0 0 0
40167 - 0 0 0 0 0 0 0 0 0 0 0 0
40168 - 0 0 0 0 0 0 0 0 0 0 0 0
40169 - 0 0 0 0 0 0 0 0 0 0 0 0
40170 - 0 0 0 0 0 0 0 0 0 0 0 0
40171 - 0 0 0 0 0 0 0 0 0 10 10 10
40172 - 30 30 30 78 78 78 46 46 46 22 22 22
40173 -137 92 6 210 162 10 239 182 13 238 190 10
40174 -238 202 15 241 208 19 246 215 20 246 215 20
40175 -241 208 19 203 166 17 185 133 11 210 150 10
40176 -216 158 10 210 150 10 102 78 10 2 2 6
40177 - 6 6 6 54 54 54 14 14 14 2 2 6
40178 - 2 2 6 62 62 62 74 74 74 30 30 30
40179 - 10 10 10 0 0 0 0 0 0 0 0 0
40180 - 0 0 0 0 0 0 0 0 0 0 0 0
40181 - 0 0 0 0 0 0 0 0 0 0 0 0
40182 - 0 0 0 0 0 0 0 0 0 0 0 0
40183 - 0 0 0 0 0 0 0 0 0 0 0 0
40184 - 0 0 0 0 0 0 0 0 0 0 0 0
40185 - 0 0 0 0 0 0 0 0 0 0 0 0
40186 - 0 0 0 0 0 0 0 0 0 0 0 0
40187 - 0 0 0 0 0 0 0 0 0 0 0 0
40188 - 0 0 0 0 0 0 0 0 0 0 0 0
40189 - 0 0 0 0 0 0 0 0 0 0 0 0
40190 - 0 0 0 0 0 0 0 0 0 0 0 0
40191 - 0 0 0 0 0 0 0 0 0 10 10 10
40192 - 34 34 34 78 78 78 50 50 50 6 6 6
40193 - 94 70 30 139 102 15 190 146 13 226 184 13
40194 -232 200 30 232 195 16 215 174 15 190 146 13
40195 -168 122 10 192 133 9 210 150 10 213 154 11
40196 -202 150 34 182 157 106 101 98 89 2 2 6
40197 - 2 2 6 78 78 78 116 116 116 58 58 58
40198 - 2 2 6 22 22 22 90 90 90 46 46 46
40199 - 18 18 18 6 6 6 0 0 0 0 0 0
40200 - 0 0 0 0 0 0 0 0 0 0 0 0
40201 - 0 0 0 0 0 0 0 0 0 0 0 0
40202 - 0 0 0 0 0 0 0 0 0 0 0 0
40203 - 0 0 0 0 0 0 0 0 0 0 0 0
40204 - 0 0 0 0 0 0 0 0 0 0 0 0
40205 - 0 0 0 0 0 0 0 0 0 0 0 0
40206 - 0 0 0 0 0 0 0 0 0 0 0 0
40207 - 0 0 0 0 0 0 0 0 0 0 0 0
40208 - 0 0 0 0 0 0 0 0 0 0 0 0
40209 - 0 0 0 0 0 0 0 0 0 0 0 0
40210 - 0 0 0 0 0 0 0 0 0 0 0 0
40211 - 0 0 0 0 0 0 0 0 0 10 10 10
40212 - 38 38 38 86 86 86 50 50 50 6 6 6
40213 -128 128 128 174 154 114 156 107 11 168 122 10
40214 -198 155 10 184 144 12 197 138 11 200 144 11
40215 -206 145 10 206 145 10 197 138 11 188 164 115
40216 -195 195 195 198 198 198 174 174 174 14 14 14
40217 - 2 2 6 22 22 22 116 116 116 116 116 116
40218 - 22 22 22 2 2 6 74 74 74 70 70 70
40219 - 30 30 30 10 10 10 0 0 0 0 0 0
40220 - 0 0 0 0 0 0 0 0 0 0 0 0
40221 - 0 0 0 0 0 0 0 0 0 0 0 0
40222 - 0 0 0 0 0 0 0 0 0 0 0 0
40223 - 0 0 0 0 0 0 0 0 0 0 0 0
40224 - 0 0 0 0 0 0 0 0 0 0 0 0
40225 - 0 0 0 0 0 0 0 0 0 0 0 0
40226 - 0 0 0 0 0 0 0 0 0 0 0 0
40227 - 0 0 0 0 0 0 0 0 0 0 0 0
40228 - 0 0 0 0 0 0 0 0 0 0 0 0
40229 - 0 0 0 0 0 0 0 0 0 0 0 0
40230 - 0 0 0 0 0 0 0 0 0 0 0 0
40231 - 0 0 0 0 0 0 6 6 6 18 18 18
40232 - 50 50 50 101 101 101 26 26 26 10 10 10
40233 -138 138 138 190 190 190 174 154 114 156 107 11
40234 -197 138 11 200 144 11 197 138 11 192 133 9
40235 -180 123 7 190 142 34 190 178 144 187 187 187
40236 -202 202 202 221 221 221 214 214 214 66 66 66
40237 - 2 2 6 2 2 6 50 50 50 62 62 62
40238 - 6 6 6 2 2 6 10 10 10 90 90 90
40239 - 50 50 50 18 18 18 6 6 6 0 0 0
40240 - 0 0 0 0 0 0 0 0 0 0 0 0
40241 - 0 0 0 0 0 0 0 0 0 0 0 0
40242 - 0 0 0 0 0 0 0 0 0 0 0 0
40243 - 0 0 0 0 0 0 0 0 0 0 0 0
40244 - 0 0 0 0 0 0 0 0 0 0 0 0
40245 - 0 0 0 0 0 0 0 0 0 0 0 0
40246 - 0 0 0 0 0 0 0 0 0 0 0 0
40247 - 0 0 0 0 0 0 0 0 0 0 0 0
40248 - 0 0 0 0 0 0 0 0 0 0 0 0
40249 - 0 0 0 0 0 0 0 0 0 0 0 0
40250 - 0 0 0 0 0 0 0 0 0 0 0 0
40251 - 0 0 0 0 0 0 10 10 10 34 34 34
40252 - 74 74 74 74 74 74 2 2 6 6 6 6
40253 -144 144 144 198 198 198 190 190 190 178 166 146
40254 -154 121 60 156 107 11 156 107 11 168 124 44
40255 -174 154 114 187 187 187 190 190 190 210 210 210
40256 -246 246 246 253 253 253 253 253 253 182 182 182
40257 - 6 6 6 2 2 6 2 2 6 2 2 6
40258 - 2 2 6 2 2 6 2 2 6 62 62 62
40259 - 74 74 74 34 34 34 14 14 14 0 0 0
40260 - 0 0 0 0 0 0 0 0 0 0 0 0
40261 - 0 0 0 0 0 0 0 0 0 0 0 0
40262 - 0 0 0 0 0 0 0 0 0 0 0 0
40263 - 0 0 0 0 0 0 0 0 0 0 0 0
40264 - 0 0 0 0 0 0 0 0 0 0 0 0
40265 - 0 0 0 0 0 0 0 0 0 0 0 0
40266 - 0 0 0 0 0 0 0 0 0 0 0 0
40267 - 0 0 0 0 0 0 0 0 0 0 0 0
40268 - 0 0 0 0 0 0 0 0 0 0 0 0
40269 - 0 0 0 0 0 0 0 0 0 0 0 0
40270 - 0 0 0 0 0 0 0 0 0 0 0 0
40271 - 0 0 0 10 10 10 22 22 22 54 54 54
40272 - 94 94 94 18 18 18 2 2 6 46 46 46
40273 -234 234 234 221 221 221 190 190 190 190 190 190
40274 -190 190 190 187 187 187 187 187 187 190 190 190
40275 -190 190 190 195 195 195 214 214 214 242 242 242
40276 -253 253 253 253 253 253 253 253 253 253 253 253
40277 - 82 82 82 2 2 6 2 2 6 2 2 6
40278 - 2 2 6 2 2 6 2 2 6 14 14 14
40279 - 86 86 86 54 54 54 22 22 22 6 6 6
40280 - 0 0 0 0 0 0 0 0 0 0 0 0
40281 - 0 0 0 0 0 0 0 0 0 0 0 0
40282 - 0 0 0 0 0 0 0 0 0 0 0 0
40283 - 0 0 0 0 0 0 0 0 0 0 0 0
40284 - 0 0 0 0 0 0 0 0 0 0 0 0
40285 - 0 0 0 0 0 0 0 0 0 0 0 0
40286 - 0 0 0 0 0 0 0 0 0 0 0 0
40287 - 0 0 0 0 0 0 0 0 0 0 0 0
40288 - 0 0 0 0 0 0 0 0 0 0 0 0
40289 - 0 0 0 0 0 0 0 0 0 0 0 0
40290 - 0 0 0 0 0 0 0 0 0 0 0 0
40291 - 6 6 6 18 18 18 46 46 46 90 90 90
40292 - 46 46 46 18 18 18 6 6 6 182 182 182
40293 -253 253 253 246 246 246 206 206 206 190 190 190
40294 -190 190 190 190 190 190 190 190 190 190 190 190
40295 -206 206 206 231 231 231 250 250 250 253 253 253
40296 -253 253 253 253 253 253 253 253 253 253 253 253
40297 -202 202 202 14 14 14 2 2 6 2 2 6
40298 - 2 2 6 2 2 6 2 2 6 2 2 6
40299 - 42 42 42 86 86 86 42 42 42 18 18 18
40300 - 6 6 6 0 0 0 0 0 0 0 0 0
40301 - 0 0 0 0 0 0 0 0 0 0 0 0
40302 - 0 0 0 0 0 0 0 0 0 0 0 0
40303 - 0 0 0 0 0 0 0 0 0 0 0 0
40304 - 0 0 0 0 0 0 0 0 0 0 0 0
40305 - 0 0 0 0 0 0 0 0 0 0 0 0
40306 - 0 0 0 0 0 0 0 0 0 0 0 0
40307 - 0 0 0 0 0 0 0 0 0 0 0 0
40308 - 0 0 0 0 0 0 0 0 0 0 0 0
40309 - 0 0 0 0 0 0 0 0 0 0 0 0
40310 - 0 0 0 0 0 0 0 0 0 6 6 6
40311 - 14 14 14 38 38 38 74 74 74 66 66 66
40312 - 2 2 6 6 6 6 90 90 90 250 250 250
40313 -253 253 253 253 253 253 238 238 238 198 198 198
40314 -190 190 190 190 190 190 195 195 195 221 221 221
40315 -246 246 246 253 253 253 253 253 253 253 253 253
40316 -253 253 253 253 253 253 253 253 253 253 253 253
40317 -253 253 253 82 82 82 2 2 6 2 2 6
40318 - 2 2 6 2 2 6 2 2 6 2 2 6
40319 - 2 2 6 78 78 78 70 70 70 34 34 34
40320 - 14 14 14 6 6 6 0 0 0 0 0 0
40321 - 0 0 0 0 0 0 0 0 0 0 0 0
40322 - 0 0 0 0 0 0 0 0 0 0 0 0
40323 - 0 0 0 0 0 0 0 0 0 0 0 0
40324 - 0 0 0 0 0 0 0 0 0 0 0 0
40325 - 0 0 0 0 0 0 0 0 0 0 0 0
40326 - 0 0 0 0 0 0 0 0 0 0 0 0
40327 - 0 0 0 0 0 0 0 0 0 0 0 0
40328 - 0 0 0 0 0 0 0 0 0 0 0 0
40329 - 0 0 0 0 0 0 0 0 0 0 0 0
40330 - 0 0 0 0 0 0 0 0 0 14 14 14
40331 - 34 34 34 66 66 66 78 78 78 6 6 6
40332 - 2 2 6 18 18 18 218 218 218 253 253 253
40333 -253 253 253 253 253 253 253 253 253 246 246 246
40334 -226 226 226 231 231 231 246 246 246 253 253 253
40335 -253 253 253 253 253 253 253 253 253 253 253 253
40336 -253 253 253 253 253 253 253 253 253 253 253 253
40337 -253 253 253 178 178 178 2 2 6 2 2 6
40338 - 2 2 6 2 2 6 2 2 6 2 2 6
40339 - 2 2 6 18 18 18 90 90 90 62 62 62
40340 - 30 30 30 10 10 10 0 0 0 0 0 0
40341 - 0 0 0 0 0 0 0 0 0 0 0 0
40342 - 0 0 0 0 0 0 0 0 0 0 0 0
40343 - 0 0 0 0 0 0 0 0 0 0 0 0
40344 - 0 0 0 0 0 0 0 0 0 0 0 0
40345 - 0 0 0 0 0 0 0 0 0 0 0 0
40346 - 0 0 0 0 0 0 0 0 0 0 0 0
40347 - 0 0 0 0 0 0 0 0 0 0 0 0
40348 - 0 0 0 0 0 0 0 0 0 0 0 0
40349 - 0 0 0 0 0 0 0 0 0 0 0 0
40350 - 0 0 0 0 0 0 10 10 10 26 26 26
40351 - 58 58 58 90 90 90 18 18 18 2 2 6
40352 - 2 2 6 110 110 110 253 253 253 253 253 253
40353 -253 253 253 253 253 253 253 253 253 253 253 253
40354 -250 250 250 253 253 253 253 253 253 253 253 253
40355 -253 253 253 253 253 253 253 253 253 253 253 253
40356 -253 253 253 253 253 253 253 253 253 253 253 253
40357 -253 253 253 231 231 231 18 18 18 2 2 6
40358 - 2 2 6 2 2 6 2 2 6 2 2 6
40359 - 2 2 6 2 2 6 18 18 18 94 94 94
40360 - 54 54 54 26 26 26 10 10 10 0 0 0
40361 - 0 0 0 0 0 0 0 0 0 0 0 0
40362 - 0 0 0 0 0 0 0 0 0 0 0 0
40363 - 0 0 0 0 0 0 0 0 0 0 0 0
40364 - 0 0 0 0 0 0 0 0 0 0 0 0
40365 - 0 0 0 0 0 0 0 0 0 0 0 0
40366 - 0 0 0 0 0 0 0 0 0 0 0 0
40367 - 0 0 0 0 0 0 0 0 0 0 0 0
40368 - 0 0 0 0 0 0 0 0 0 0 0 0
40369 - 0 0 0 0 0 0 0 0 0 0 0 0
40370 - 0 0 0 6 6 6 22 22 22 50 50 50
40371 - 90 90 90 26 26 26 2 2 6 2 2 6
40372 - 14 14 14 195 195 195 250 250 250 253 253 253
40373 -253 253 253 253 253 253 253 253 253 253 253 253
40374 -253 253 253 253 253 253 253 253 253 253 253 253
40375 -253 253 253 253 253 253 253 253 253 253 253 253
40376 -253 253 253 253 253 253 253 253 253 253 253 253
40377 -250 250 250 242 242 242 54 54 54 2 2 6
40378 - 2 2 6 2 2 6 2 2 6 2 2 6
40379 - 2 2 6 2 2 6 2 2 6 38 38 38
40380 - 86 86 86 50 50 50 22 22 22 6 6 6
40381 - 0 0 0 0 0 0 0 0 0 0 0 0
40382 - 0 0 0 0 0 0 0 0 0 0 0 0
40383 - 0 0 0 0 0 0 0 0 0 0 0 0
40384 - 0 0 0 0 0 0 0 0 0 0 0 0
40385 - 0 0 0 0 0 0 0 0 0 0 0 0
40386 - 0 0 0 0 0 0 0 0 0 0 0 0
40387 - 0 0 0 0 0 0 0 0 0 0 0 0
40388 - 0 0 0 0 0 0 0 0 0 0 0 0
40389 - 0 0 0 0 0 0 0 0 0 0 0 0
40390 - 6 6 6 14 14 14 38 38 38 82 82 82
40391 - 34 34 34 2 2 6 2 2 6 2 2 6
40392 - 42 42 42 195 195 195 246 246 246 253 253 253
40393 -253 253 253 253 253 253 253 253 253 250 250 250
40394 -242 242 242 242 242 242 250 250 250 253 253 253
40395 -253 253 253 253 253 253 253 253 253 253 253 253
40396 -253 253 253 250 250 250 246 246 246 238 238 238
40397 -226 226 226 231 231 231 101 101 101 6 6 6
40398 - 2 2 6 2 2 6 2 2 6 2 2 6
40399 - 2 2 6 2 2 6 2 2 6 2 2 6
40400 - 38 38 38 82 82 82 42 42 42 14 14 14
40401 - 6 6 6 0 0 0 0 0 0 0 0 0
40402 - 0 0 0 0 0 0 0 0 0 0 0 0
40403 - 0 0 0 0 0 0 0 0 0 0 0 0
40404 - 0 0 0 0 0 0 0 0 0 0 0 0
40405 - 0 0 0 0 0 0 0 0 0 0 0 0
40406 - 0 0 0 0 0 0 0 0 0 0 0 0
40407 - 0 0 0 0 0 0 0 0 0 0 0 0
40408 - 0 0 0 0 0 0 0 0 0 0 0 0
40409 - 0 0 0 0 0 0 0 0 0 0 0 0
40410 - 10 10 10 26 26 26 62 62 62 66 66 66
40411 - 2 2 6 2 2 6 2 2 6 6 6 6
40412 - 70 70 70 170 170 170 206 206 206 234 234 234
40413 -246 246 246 250 250 250 250 250 250 238 238 238
40414 -226 226 226 231 231 231 238 238 238 250 250 250
40415 -250 250 250 250 250 250 246 246 246 231 231 231
40416 -214 214 214 206 206 206 202 202 202 202 202 202
40417 -198 198 198 202 202 202 182 182 182 18 18 18
40418 - 2 2 6 2 2 6 2 2 6 2 2 6
40419 - 2 2 6 2 2 6 2 2 6 2 2 6
40420 - 2 2 6 62 62 62 66 66 66 30 30 30
40421 - 10 10 10 0 0 0 0 0 0 0 0 0
40422 - 0 0 0 0 0 0 0 0 0 0 0 0
40423 - 0 0 0 0 0 0 0 0 0 0 0 0
40424 - 0 0 0 0 0 0 0 0 0 0 0 0
40425 - 0 0 0 0 0 0 0 0 0 0 0 0
40426 - 0 0 0 0 0 0 0 0 0 0 0 0
40427 - 0 0 0 0 0 0 0 0 0 0 0 0
40428 - 0 0 0 0 0 0 0 0 0 0 0 0
40429 - 0 0 0 0 0 0 0 0 0 0 0 0
40430 - 14 14 14 42 42 42 82 82 82 18 18 18
40431 - 2 2 6 2 2 6 2 2 6 10 10 10
40432 - 94 94 94 182 182 182 218 218 218 242 242 242
40433 -250 250 250 253 253 253 253 253 253 250 250 250
40434 -234 234 234 253 253 253 253 253 253 253 253 253
40435 -253 253 253 253 253 253 253 253 253 246 246 246
40436 -238 238 238 226 226 226 210 210 210 202 202 202
40437 -195 195 195 195 195 195 210 210 210 158 158 158
40438 - 6 6 6 14 14 14 50 50 50 14 14 14
40439 - 2 2 6 2 2 6 2 2 6 2 2 6
40440 - 2 2 6 6 6 6 86 86 86 46 46 46
40441 - 18 18 18 6 6 6 0 0 0 0 0 0
40442 - 0 0 0 0 0 0 0 0 0 0 0 0
40443 - 0 0 0 0 0 0 0 0 0 0 0 0
40444 - 0 0 0 0 0 0 0 0 0 0 0 0
40445 - 0 0 0 0 0 0 0 0 0 0 0 0
40446 - 0 0 0 0 0 0 0 0 0 0 0 0
40447 - 0 0 0 0 0 0 0 0 0 0 0 0
40448 - 0 0 0 0 0 0 0 0 0 0 0 0
40449 - 0 0 0 0 0 0 0 0 0 6 6 6
40450 - 22 22 22 54 54 54 70 70 70 2 2 6
40451 - 2 2 6 10 10 10 2 2 6 22 22 22
40452 -166 166 166 231 231 231 250 250 250 253 253 253
40453 -253 253 253 253 253 253 253 253 253 250 250 250
40454 -242 242 242 253 253 253 253 253 253 253 253 253
40455 -253 253 253 253 253 253 253 253 253 253 253 253
40456 -253 253 253 253 253 253 253 253 253 246 246 246
40457 -231 231 231 206 206 206 198 198 198 226 226 226
40458 - 94 94 94 2 2 6 6 6 6 38 38 38
40459 - 30 30 30 2 2 6 2 2 6 2 2 6
40460 - 2 2 6 2 2 6 62 62 62 66 66 66
40461 - 26 26 26 10 10 10 0 0 0 0 0 0
40462 - 0 0 0 0 0 0 0 0 0 0 0 0
40463 - 0 0 0 0 0 0 0 0 0 0 0 0
40464 - 0 0 0 0 0 0 0 0 0 0 0 0
40465 - 0 0 0 0 0 0 0 0 0 0 0 0
40466 - 0 0 0 0 0 0 0 0 0 0 0 0
40467 - 0 0 0 0 0 0 0 0 0 0 0 0
40468 - 0 0 0 0 0 0 0 0 0 0 0 0
40469 - 0 0 0 0 0 0 0 0 0 10 10 10
40470 - 30 30 30 74 74 74 50 50 50 2 2 6
40471 - 26 26 26 26 26 26 2 2 6 106 106 106
40472 -238 238 238 253 253 253 253 253 253 253 253 253
40473 -253 253 253 253 253 253 253 253 253 253 253 253
40474 -253 253 253 253 253 253 253 253 253 253 253 253
40475 -253 253 253 253 253 253 253 253 253 253 253 253
40476 -253 253 253 253 253 253 253 253 253 253 253 253
40477 -253 253 253 246 246 246 218 218 218 202 202 202
40478 -210 210 210 14 14 14 2 2 6 2 2 6
40479 - 30 30 30 22 22 22 2 2 6 2 2 6
40480 - 2 2 6 2 2 6 18 18 18 86 86 86
40481 - 42 42 42 14 14 14 0 0 0 0 0 0
40482 - 0 0 0 0 0 0 0 0 0 0 0 0
40483 - 0 0 0 0 0 0 0 0 0 0 0 0
40484 - 0 0 0 0 0 0 0 0 0 0 0 0
40485 - 0 0 0 0 0 0 0 0 0 0 0 0
40486 - 0 0 0 0 0 0 0 0 0 0 0 0
40487 - 0 0 0 0 0 0 0 0 0 0 0 0
40488 - 0 0 0 0 0 0 0 0 0 0 0 0
40489 - 0 0 0 0 0 0 0 0 0 14 14 14
40490 - 42 42 42 90 90 90 22 22 22 2 2 6
40491 - 42 42 42 2 2 6 18 18 18 218 218 218
40492 -253 253 253 253 253 253 253 253 253 253 253 253
40493 -253 253 253 253 253 253 253 253 253 253 253 253
40494 -253 253 253 253 253 253 253 253 253 253 253 253
40495 -253 253 253 253 253 253 253 253 253 253 253 253
40496 -253 253 253 253 253 253 253 253 253 253 253 253
40497 -253 253 253 253 253 253 250 250 250 221 221 221
40498 -218 218 218 101 101 101 2 2 6 14 14 14
40499 - 18 18 18 38 38 38 10 10 10 2 2 6
40500 - 2 2 6 2 2 6 2 2 6 78 78 78
40501 - 58 58 58 22 22 22 6 6 6 0 0 0
40502 - 0 0 0 0 0 0 0 0 0 0 0 0
40503 - 0 0 0 0 0 0 0 0 0 0 0 0
40504 - 0 0 0 0 0 0 0 0 0 0 0 0
40505 - 0 0 0 0 0 0 0 0 0 0 0 0
40506 - 0 0 0 0 0 0 0 0 0 0 0 0
40507 - 0 0 0 0 0 0 0 0 0 0 0 0
40508 - 0 0 0 0 0 0 0 0 0 0 0 0
40509 - 0 0 0 0 0 0 6 6 6 18 18 18
40510 - 54 54 54 82 82 82 2 2 6 26 26 26
40511 - 22 22 22 2 2 6 123 123 123 253 253 253
40512 -253 253 253 253 253 253 253 253 253 253 253 253
40513 -253 253 253 253 253 253 253 253 253 253 253 253
40514 -253 253 253 253 253 253 253 253 253 253 253 253
40515 -253 253 253 253 253 253 253 253 253 253 253 253
40516 -253 253 253 253 253 253 253 253 253 253 253 253
40517 -253 253 253 253 253 253 253 253 253 250 250 250
40518 -238 238 238 198 198 198 6 6 6 38 38 38
40519 - 58 58 58 26 26 26 38 38 38 2 2 6
40520 - 2 2 6 2 2 6 2 2 6 46 46 46
40521 - 78 78 78 30 30 30 10 10 10 0 0 0
40522 - 0 0 0 0 0 0 0 0 0 0 0 0
40523 - 0 0 0 0 0 0 0 0 0 0 0 0
40524 - 0 0 0 0 0 0 0 0 0 0 0 0
40525 - 0 0 0 0 0 0 0 0 0 0 0 0
40526 - 0 0 0 0 0 0 0 0 0 0 0 0
40527 - 0 0 0 0 0 0 0 0 0 0 0 0
40528 - 0 0 0 0 0 0 0 0 0 0 0 0
40529 - 0 0 0 0 0 0 10 10 10 30 30 30
40530 - 74 74 74 58 58 58 2 2 6 42 42 42
40531 - 2 2 6 22 22 22 231 231 231 253 253 253
40532 -253 253 253 253 253 253 253 253 253 253 253 253
40533 -253 253 253 253 253 253 253 253 253 250 250 250
40534 -253 253 253 253 253 253 253 253 253 253 253 253
40535 -253 253 253 253 253 253 253 253 253 253 253 253
40536 -253 253 253 253 253 253 253 253 253 253 253 253
40537 -253 253 253 253 253 253 253 253 253 253 253 253
40538 -253 253 253 246 246 246 46 46 46 38 38 38
40539 - 42 42 42 14 14 14 38 38 38 14 14 14
40540 - 2 2 6 2 2 6 2 2 6 6 6 6
40541 - 86 86 86 46 46 46 14 14 14 0 0 0
40542 - 0 0 0 0 0 0 0 0 0 0 0 0
40543 - 0 0 0 0 0 0 0 0 0 0 0 0
40544 - 0 0 0 0 0 0 0 0 0 0 0 0
40545 - 0 0 0 0 0 0 0 0 0 0 0 0
40546 - 0 0 0 0 0 0 0 0 0 0 0 0
40547 - 0 0 0 0 0 0 0 0 0 0 0 0
40548 - 0 0 0 0 0 0 0 0 0 0 0 0
40549 - 0 0 0 6 6 6 14 14 14 42 42 42
40550 - 90 90 90 18 18 18 18 18 18 26 26 26
40551 - 2 2 6 116 116 116 253 253 253 253 253 253
40552 -253 253 253 253 253 253 253 253 253 253 253 253
40553 -253 253 253 253 253 253 250 250 250 238 238 238
40554 -253 253 253 253 253 253 253 253 253 253 253 253
40555 -253 253 253 253 253 253 253 253 253 253 253 253
40556 -253 253 253 253 253 253 253 253 253 253 253 253
40557 -253 253 253 253 253 253 253 253 253 253 253 253
40558 -253 253 253 253 253 253 94 94 94 6 6 6
40559 - 2 2 6 2 2 6 10 10 10 34 34 34
40560 - 2 2 6 2 2 6 2 2 6 2 2 6
40561 - 74 74 74 58 58 58 22 22 22 6 6 6
40562 - 0 0 0 0 0 0 0 0 0 0 0 0
40563 - 0 0 0 0 0 0 0 0 0 0 0 0
40564 - 0 0 0 0 0 0 0 0 0 0 0 0
40565 - 0 0 0 0 0 0 0 0 0 0 0 0
40566 - 0 0 0 0 0 0 0 0 0 0 0 0
40567 - 0 0 0 0 0 0 0 0 0 0 0 0
40568 - 0 0 0 0 0 0 0 0 0 0 0 0
40569 - 0 0 0 10 10 10 26 26 26 66 66 66
40570 - 82 82 82 2 2 6 38 38 38 6 6 6
40571 - 14 14 14 210 210 210 253 253 253 253 253 253
40572 -253 253 253 253 253 253 253 253 253 253 253 253
40573 -253 253 253 253 253 253 246 246 246 242 242 242
40574 -253 253 253 253 253 253 253 253 253 253 253 253
40575 -253 253 253 253 253 253 253 253 253 253 253 253
40576 -253 253 253 253 253 253 253 253 253 253 253 253
40577 -253 253 253 253 253 253 253 253 253 253 253 253
40578 -253 253 253 253 253 253 144 144 144 2 2 6
40579 - 2 2 6 2 2 6 2 2 6 46 46 46
40580 - 2 2 6 2 2 6 2 2 6 2 2 6
40581 - 42 42 42 74 74 74 30 30 30 10 10 10
40582 - 0 0 0 0 0 0 0 0 0 0 0 0
40583 - 0 0 0 0 0 0 0 0 0 0 0 0
40584 - 0 0 0 0 0 0 0 0 0 0 0 0
40585 - 0 0 0 0 0 0 0 0 0 0 0 0
40586 - 0 0 0 0 0 0 0 0 0 0 0 0
40587 - 0 0 0 0 0 0 0 0 0 0 0 0
40588 - 0 0 0 0 0 0 0 0 0 0 0 0
40589 - 6 6 6 14 14 14 42 42 42 90 90 90
40590 - 26 26 26 6 6 6 42 42 42 2 2 6
40591 - 74 74 74 250 250 250 253 253 253 253 253 253
40592 -253 253 253 253 253 253 253 253 253 253 253 253
40593 -253 253 253 253 253 253 242 242 242 242 242 242
40594 -253 253 253 253 253 253 253 253 253 253 253 253
40595 -253 253 253 253 253 253 253 253 253 253 253 253
40596 -253 253 253 253 253 253 253 253 253 253 253 253
40597 -253 253 253 253 253 253 253 253 253 253 253 253
40598 -253 253 253 253 253 253 182 182 182 2 2 6
40599 - 2 2 6 2 2 6 2 2 6 46 46 46
40600 - 2 2 6 2 2 6 2 2 6 2 2 6
40601 - 10 10 10 86 86 86 38 38 38 10 10 10
40602 - 0 0 0 0 0 0 0 0 0 0 0 0
40603 - 0 0 0 0 0 0 0 0 0 0 0 0
40604 - 0 0 0 0 0 0 0 0 0 0 0 0
40605 - 0 0 0 0 0 0 0 0 0 0 0 0
40606 - 0 0 0 0 0 0 0 0 0 0 0 0
40607 - 0 0 0 0 0 0 0 0 0 0 0 0
40608 - 0 0 0 0 0 0 0 0 0 0 0 0
40609 - 10 10 10 26 26 26 66 66 66 82 82 82
40610 - 2 2 6 22 22 22 18 18 18 2 2 6
40611 -149 149 149 253 253 253 253 253 253 253 253 253
40612 -253 253 253 253 253 253 253 253 253 253 253 253
40613 -253 253 253 253 253 253 234 234 234 242 242 242
40614 -253 253 253 253 253 253 253 253 253 253 253 253
40615 -253 253 253 253 253 253 253 253 253 253 253 253
40616 -253 253 253 253 253 253 253 253 253 253 253 253
40617 -253 253 253 253 253 253 253 253 253 253 253 253
40618 -253 253 253 253 253 253 206 206 206 2 2 6
40619 - 2 2 6 2 2 6 2 2 6 38 38 38
40620 - 2 2 6 2 2 6 2 2 6 2 2 6
40621 - 6 6 6 86 86 86 46 46 46 14 14 14
40622 - 0 0 0 0 0 0 0 0 0 0 0 0
40623 - 0 0 0 0 0 0 0 0 0 0 0 0
40624 - 0 0 0 0 0 0 0 0 0 0 0 0
40625 - 0 0 0 0 0 0 0 0 0 0 0 0
40626 - 0 0 0 0 0 0 0 0 0 0 0 0
40627 - 0 0 0 0 0 0 0 0 0 0 0 0
40628 - 0 0 0 0 0 0 0 0 0 6 6 6
40629 - 18 18 18 46 46 46 86 86 86 18 18 18
40630 - 2 2 6 34 34 34 10 10 10 6 6 6
40631 -210 210 210 253 253 253 253 253 253 253 253 253
40632 -253 253 253 253 253 253 253 253 253 253 253 253
40633 -253 253 253 253 253 253 234 234 234 242 242 242
40634 -253 253 253 253 253 253 253 253 253 253 253 253
40635 -253 253 253 253 253 253 253 253 253 253 253 253
40636 -253 253 253 253 253 253 253 253 253 253 253 253
40637 -253 253 253 253 253 253 253 253 253 253 253 253
40638 -253 253 253 253 253 253 221 221 221 6 6 6
40639 - 2 2 6 2 2 6 6 6 6 30 30 30
40640 - 2 2 6 2 2 6 2 2 6 2 2 6
40641 - 2 2 6 82 82 82 54 54 54 18 18 18
40642 - 6 6 6 0 0 0 0 0 0 0 0 0
40643 - 0 0 0 0 0 0 0 0 0 0 0 0
40644 - 0 0 0 0 0 0 0 0 0 0 0 0
40645 - 0 0 0 0 0 0 0 0 0 0 0 0
40646 - 0 0 0 0 0 0 0 0 0 0 0 0
40647 - 0 0 0 0 0 0 0 0 0 0 0 0
40648 - 0 0 0 0 0 0 0 0 0 10 10 10
40649 - 26 26 26 66 66 66 62 62 62 2 2 6
40650 - 2 2 6 38 38 38 10 10 10 26 26 26
40651 -238 238 238 253 253 253 253 253 253 253 253 253
40652 -253 253 253 253 253 253 253 253 253 253 253 253
40653 -253 253 253 253 253 253 231 231 231 238 238 238
40654 -253 253 253 253 253 253 253 253 253 253 253 253
40655 -253 253 253 253 253 253 253 253 253 253 253 253
40656 -253 253 253 253 253 253 253 253 253 253 253 253
40657 -253 253 253 253 253 253 253 253 253 253 253 253
40658 -253 253 253 253 253 253 231 231 231 6 6 6
40659 - 2 2 6 2 2 6 10 10 10 30 30 30
40660 - 2 2 6 2 2 6 2 2 6 2 2 6
40661 - 2 2 6 66 66 66 58 58 58 22 22 22
40662 - 6 6 6 0 0 0 0 0 0 0 0 0
40663 - 0 0 0 0 0 0 0 0 0 0 0 0
40664 - 0 0 0 0 0 0 0 0 0 0 0 0
40665 - 0 0 0 0 0 0 0 0 0 0 0 0
40666 - 0 0 0 0 0 0 0 0 0 0 0 0
40667 - 0 0 0 0 0 0 0 0 0 0 0 0
40668 - 0 0 0 0 0 0 0 0 0 10 10 10
40669 - 38 38 38 78 78 78 6 6 6 2 2 6
40670 - 2 2 6 46 46 46 14 14 14 42 42 42
40671 -246 246 246 253 253 253 253 253 253 253 253 253
40672 -253 253 253 253 253 253 253 253 253 253 253 253
40673 -253 253 253 253 253 253 231 231 231 242 242 242
40674 -253 253 253 253 253 253 253 253 253 253 253 253
40675 -253 253 253 253 253 253 253 253 253 253 253 253
40676 -253 253 253 253 253 253 253 253 253 253 253 253
40677 -253 253 253 253 253 253 253 253 253 253 253 253
40678 -253 253 253 253 253 253 234 234 234 10 10 10
40679 - 2 2 6 2 2 6 22 22 22 14 14 14
40680 - 2 2 6 2 2 6 2 2 6 2 2 6
40681 - 2 2 6 66 66 66 62 62 62 22 22 22
40682 - 6 6 6 0 0 0 0 0 0 0 0 0
40683 - 0 0 0 0 0 0 0 0 0 0 0 0
40684 - 0 0 0 0 0 0 0 0 0 0 0 0
40685 - 0 0 0 0 0 0 0 0 0 0 0 0
40686 - 0 0 0 0 0 0 0 0 0 0 0 0
40687 - 0 0 0 0 0 0 0 0 0 0 0 0
40688 - 0 0 0 0 0 0 6 6 6 18 18 18
40689 - 50 50 50 74 74 74 2 2 6 2 2 6
40690 - 14 14 14 70 70 70 34 34 34 62 62 62
40691 -250 250 250 253 253 253 253 253 253 253 253 253
40692 -253 253 253 253 253 253 253 253 253 253 253 253
40693 -253 253 253 253 253 253 231 231 231 246 246 246
40694 -253 253 253 253 253 253 253 253 253 253 253 253
40695 -253 253 253 253 253 253 253 253 253 253 253 253
40696 -253 253 253 253 253 253 253 253 253 253 253 253
40697 -253 253 253 253 253 253 253 253 253 253 253 253
40698 -253 253 253 253 253 253 234 234 234 14 14 14
40699 - 2 2 6 2 2 6 30 30 30 2 2 6
40700 - 2 2 6 2 2 6 2 2 6 2 2 6
40701 - 2 2 6 66 66 66 62 62 62 22 22 22
40702 - 6 6 6 0 0 0 0 0 0 0 0 0
40703 - 0 0 0 0 0 0 0 0 0 0 0 0
40704 - 0 0 0 0 0 0 0 0 0 0 0 0
40705 - 0 0 0 0 0 0 0 0 0 0 0 0
40706 - 0 0 0 0 0 0 0 0 0 0 0 0
40707 - 0 0 0 0 0 0 0 0 0 0 0 0
40708 - 0 0 0 0 0 0 6 6 6 18 18 18
40709 - 54 54 54 62 62 62 2 2 6 2 2 6
40710 - 2 2 6 30 30 30 46 46 46 70 70 70
40711 -250 250 250 253 253 253 253 253 253 253 253 253
40712 -253 253 253 253 253 253 253 253 253 253 253 253
40713 -253 253 253 253 253 253 231 231 231 246 246 246
40714 -253 253 253 253 253 253 253 253 253 253 253 253
40715 -253 253 253 253 253 253 253 253 253 253 253 253
40716 -253 253 253 253 253 253 253 253 253 253 253 253
40717 -253 253 253 253 253 253 253 253 253 253 253 253
40718 -253 253 253 253 253 253 226 226 226 10 10 10
40719 - 2 2 6 6 6 6 30 30 30 2 2 6
40720 - 2 2 6 2 2 6 2 2 6 2 2 6
40721 - 2 2 6 66 66 66 58 58 58 22 22 22
40722 - 6 6 6 0 0 0 0 0 0 0 0 0
40723 - 0 0 0 0 0 0 0 0 0 0 0 0
40724 - 0 0 0 0 0 0 0 0 0 0 0 0
40725 - 0 0 0 0 0 0 0 0 0 0 0 0
40726 - 0 0 0 0 0 0 0 0 0 0 0 0
40727 - 0 0 0 0 0 0 0 0 0 0 0 0
40728 - 0 0 0 0 0 0 6 6 6 22 22 22
40729 - 58 58 58 62 62 62 2 2 6 2 2 6
40730 - 2 2 6 2 2 6 30 30 30 78 78 78
40731 -250 250 250 253 253 253 253 253 253 253 253 253
40732 -253 253 253 253 253 253 253 253 253 253 253 253
40733 -253 253 253 253 253 253 231 231 231 246 246 246
40734 -253 253 253 253 253 253 253 253 253 253 253 253
40735 -253 253 253 253 253 253 253 253 253 253 253 253
40736 -253 253 253 253 253 253 253 253 253 253 253 253
40737 -253 253 253 253 253 253 253 253 253 253 253 253
40738 -253 253 253 253 253 253 206 206 206 2 2 6
40739 - 22 22 22 34 34 34 18 14 6 22 22 22
40740 - 26 26 26 18 18 18 6 6 6 2 2 6
40741 - 2 2 6 82 82 82 54 54 54 18 18 18
40742 - 6 6 6 0 0 0 0 0 0 0 0 0
40743 - 0 0 0 0 0 0 0 0 0 0 0 0
40744 - 0 0 0 0 0 0 0 0 0 0 0 0
40745 - 0 0 0 0 0 0 0 0 0 0 0 0
40746 - 0 0 0 0 0 0 0 0 0 0 0 0
40747 - 0 0 0 0 0 0 0 0 0 0 0 0
40748 - 0 0 0 0 0 0 6 6 6 26 26 26
40749 - 62 62 62 106 106 106 74 54 14 185 133 11
40750 -210 162 10 121 92 8 6 6 6 62 62 62
40751 -238 238 238 253 253 253 253 253 253 253 253 253
40752 -253 253 253 253 253 253 253 253 253 253 253 253
40753 -253 253 253 253 253 253 231 231 231 246 246 246
40754 -253 253 253 253 253 253 253 253 253 253 253 253
40755 -253 253 253 253 253 253 253 253 253 253 253 253
40756 -253 253 253 253 253 253 253 253 253 253 253 253
40757 -253 253 253 253 253 253 253 253 253 253 253 253
40758 -253 253 253 253 253 253 158 158 158 18 18 18
40759 - 14 14 14 2 2 6 2 2 6 2 2 6
40760 - 6 6 6 18 18 18 66 66 66 38 38 38
40761 - 6 6 6 94 94 94 50 50 50 18 18 18
40762 - 6 6 6 0 0 0 0 0 0 0 0 0
40763 - 0 0 0 0 0 0 0 0 0 0 0 0
40764 - 0 0 0 0 0 0 0 0 0 0 0 0
40765 - 0 0 0 0 0 0 0 0 0 0 0 0
40766 - 0 0 0 0 0 0 0 0 0 0 0 0
40767 - 0 0 0 0 0 0 0 0 0 6 6 6
40768 - 10 10 10 10 10 10 18 18 18 38 38 38
40769 - 78 78 78 142 134 106 216 158 10 242 186 14
40770 -246 190 14 246 190 14 156 118 10 10 10 10
40771 - 90 90 90 238 238 238 253 253 253 253 253 253
40772 -253 253 253 253 253 253 253 253 253 253 253 253
40773 -253 253 253 253 253 253 231 231 231 250 250 250
40774 -253 253 253 253 253 253 253 253 253 253 253 253
40775 -253 253 253 253 253 253 253 253 253 253 253 253
40776 -253 253 253 253 253 253 253 253 253 253 253 253
40777 -253 253 253 253 253 253 253 253 253 246 230 190
40778 -238 204 91 238 204 91 181 142 44 37 26 9
40779 - 2 2 6 2 2 6 2 2 6 2 2 6
40780 - 2 2 6 2 2 6 38 38 38 46 46 46
40781 - 26 26 26 106 106 106 54 54 54 18 18 18
40782 - 6 6 6 0 0 0 0 0 0 0 0 0
40783 - 0 0 0 0 0 0 0 0 0 0 0 0
40784 - 0 0 0 0 0 0 0 0 0 0 0 0
40785 - 0 0 0 0 0 0 0 0 0 0 0 0
40786 - 0 0 0 0 0 0 0 0 0 0 0 0
40787 - 0 0 0 6 6 6 14 14 14 22 22 22
40788 - 30 30 30 38 38 38 50 50 50 70 70 70
40789 -106 106 106 190 142 34 226 170 11 242 186 14
40790 -246 190 14 246 190 14 246 190 14 154 114 10
40791 - 6 6 6 74 74 74 226 226 226 253 253 253
40792 -253 253 253 253 253 253 253 253 253 253 253 253
40793 -253 253 253 253 253 253 231 231 231 250 250 250
40794 -253 253 253 253 253 253 253 253 253 253 253 253
40795 -253 253 253 253 253 253 253 253 253 253 253 253
40796 -253 253 253 253 253 253 253 253 253 253 253 253
40797 -253 253 253 253 253 253 253 253 253 228 184 62
40798 -241 196 14 241 208 19 232 195 16 38 30 10
40799 - 2 2 6 2 2 6 2 2 6 2 2 6
40800 - 2 2 6 6 6 6 30 30 30 26 26 26
40801 -203 166 17 154 142 90 66 66 66 26 26 26
40802 - 6 6 6 0 0 0 0 0 0 0 0 0
40803 - 0 0 0 0 0 0 0 0 0 0 0 0
40804 - 0 0 0 0 0 0 0 0 0 0 0 0
40805 - 0 0 0 0 0 0 0 0 0 0 0 0
40806 - 0 0 0 0 0 0 0 0 0 0 0 0
40807 - 6 6 6 18 18 18 38 38 38 58 58 58
40808 - 78 78 78 86 86 86 101 101 101 123 123 123
40809 -175 146 61 210 150 10 234 174 13 246 186 14
40810 -246 190 14 246 190 14 246 190 14 238 190 10
40811 -102 78 10 2 2 6 46 46 46 198 198 198
40812 -253 253 253 253 253 253 253 253 253 253 253 253
40813 -253 253 253 253 253 253 234 234 234 242 242 242
40814 -253 253 253 253 253 253 253 253 253 253 253 253
40815 -253 253 253 253 253 253 253 253 253 253 253 253
40816 -253 253 253 253 253 253 253 253 253 253 253 253
40817 -253 253 253 253 253 253 253 253 253 224 178 62
40818 -242 186 14 241 196 14 210 166 10 22 18 6
40819 - 2 2 6 2 2 6 2 2 6 2 2 6
40820 - 2 2 6 2 2 6 6 6 6 121 92 8
40821 -238 202 15 232 195 16 82 82 82 34 34 34
40822 - 10 10 10 0 0 0 0 0 0 0 0 0
40823 - 0 0 0 0 0 0 0 0 0 0 0 0
40824 - 0 0 0 0 0 0 0 0 0 0 0 0
40825 - 0 0 0 0 0 0 0 0 0 0 0 0
40826 - 0 0 0 0 0 0 0 0 0 0 0 0
40827 - 14 14 14 38 38 38 70 70 70 154 122 46
40828 -190 142 34 200 144 11 197 138 11 197 138 11
40829 -213 154 11 226 170 11 242 186 14 246 190 14
40830 -246 190 14 246 190 14 246 190 14 246 190 14
40831 -225 175 15 46 32 6 2 2 6 22 22 22
40832 -158 158 158 250 250 250 253 253 253 253 253 253
40833 -253 253 253 253 253 253 253 253 253 253 253 253
40834 -253 253 253 253 253 253 253 253 253 253 253 253
40835 -253 253 253 253 253 253 253 253 253 253 253 253
40836 -253 253 253 253 253 253 253 253 253 253 253 253
40837 -253 253 253 250 250 250 242 242 242 224 178 62
40838 -239 182 13 236 186 11 213 154 11 46 32 6
40839 - 2 2 6 2 2 6 2 2 6 2 2 6
40840 - 2 2 6 2 2 6 61 42 6 225 175 15
40841 -238 190 10 236 186 11 112 100 78 42 42 42
40842 - 14 14 14 0 0 0 0 0 0 0 0 0
40843 - 0 0 0 0 0 0 0 0 0 0 0 0
40844 - 0 0 0 0 0 0 0 0 0 0 0 0
40845 - 0 0 0 0 0 0 0 0 0 0 0 0
40846 - 0 0 0 0 0 0 0 0 0 6 6 6
40847 - 22 22 22 54 54 54 154 122 46 213 154 11
40848 -226 170 11 230 174 11 226 170 11 226 170 11
40849 -236 178 12 242 186 14 246 190 14 246 190 14
40850 -246 190 14 246 190 14 246 190 14 246 190 14
40851 -241 196 14 184 144 12 10 10 10 2 2 6
40852 - 6 6 6 116 116 116 242 242 242 253 253 253
40853 -253 253 253 253 253 253 253 253 253 253 253 253
40854 -253 253 253 253 253 253 253 253 253 253 253 253
40855 -253 253 253 253 253 253 253 253 253 253 253 253
40856 -253 253 253 253 253 253 253 253 253 253 253 253
40857 -253 253 253 231 231 231 198 198 198 214 170 54
40858 -236 178 12 236 178 12 210 150 10 137 92 6
40859 - 18 14 6 2 2 6 2 2 6 2 2 6
40860 - 6 6 6 70 47 6 200 144 11 236 178 12
40861 -239 182 13 239 182 13 124 112 88 58 58 58
40862 - 22 22 22 6 6 6 0 0 0 0 0 0
40863 - 0 0 0 0 0 0 0 0 0 0 0 0
40864 - 0 0 0 0 0 0 0 0 0 0 0 0
40865 - 0 0 0 0 0 0 0 0 0 0 0 0
40866 - 0 0 0 0 0 0 0 0 0 10 10 10
40867 - 30 30 30 70 70 70 180 133 36 226 170 11
40868 -239 182 13 242 186 14 242 186 14 246 186 14
40869 -246 190 14 246 190 14 246 190 14 246 190 14
40870 -246 190 14 246 190 14 246 190 14 246 190 14
40871 -246 190 14 232 195 16 98 70 6 2 2 6
40872 - 2 2 6 2 2 6 66 66 66 221 221 221
40873 -253 253 253 253 253 253 253 253 253 253 253 253
40874 -253 253 253 253 253 253 253 253 253 253 253 253
40875 -253 253 253 253 253 253 253 253 253 253 253 253
40876 -253 253 253 253 253 253 253 253 253 253 253 253
40877 -253 253 253 206 206 206 198 198 198 214 166 58
40878 -230 174 11 230 174 11 216 158 10 192 133 9
40879 -163 110 8 116 81 8 102 78 10 116 81 8
40880 -167 114 7 197 138 11 226 170 11 239 182 13
40881 -242 186 14 242 186 14 162 146 94 78 78 78
40882 - 34 34 34 14 14 14 6 6 6 0 0 0
40883 - 0 0 0 0 0 0 0 0 0 0 0 0
40884 - 0 0 0 0 0 0 0 0 0 0 0 0
40885 - 0 0 0 0 0 0 0 0 0 0 0 0
40886 - 0 0 0 0 0 0 0 0 0 6 6 6
40887 - 30 30 30 78 78 78 190 142 34 226 170 11
40888 -239 182 13 246 190 14 246 190 14 246 190 14
40889 -246 190 14 246 190 14 246 190 14 246 190 14
40890 -246 190 14 246 190 14 246 190 14 246 190 14
40891 -246 190 14 241 196 14 203 166 17 22 18 6
40892 - 2 2 6 2 2 6 2 2 6 38 38 38
40893 -218 218 218 253 253 253 253 253 253 253 253 253
40894 -253 253 253 253 253 253 253 253 253 253 253 253
40895 -253 253 253 253 253 253 253 253 253 253 253 253
40896 -253 253 253 253 253 253 253 253 253 253 253 253
40897 -250 250 250 206 206 206 198 198 198 202 162 69
40898 -226 170 11 236 178 12 224 166 10 210 150 10
40899 -200 144 11 197 138 11 192 133 9 197 138 11
40900 -210 150 10 226 170 11 242 186 14 246 190 14
40901 -246 190 14 246 186 14 225 175 15 124 112 88
40902 - 62 62 62 30 30 30 14 14 14 6 6 6
40903 - 0 0 0 0 0 0 0 0 0 0 0 0
40904 - 0 0 0 0 0 0 0 0 0 0 0 0
40905 - 0 0 0 0 0 0 0 0 0 0 0 0
40906 - 0 0 0 0 0 0 0 0 0 10 10 10
40907 - 30 30 30 78 78 78 174 135 50 224 166 10
40908 -239 182 13 246 190 14 246 190 14 246 190 14
40909 -246 190 14 246 190 14 246 190 14 246 190 14
40910 -246 190 14 246 190 14 246 190 14 246 190 14
40911 -246 190 14 246 190 14 241 196 14 139 102 15
40912 - 2 2 6 2 2 6 2 2 6 2 2 6
40913 - 78 78 78 250 250 250 253 253 253 253 253 253
40914 -253 253 253 253 253 253 253 253 253 253 253 253
40915 -253 253 253 253 253 253 253 253 253 253 253 253
40916 -253 253 253 253 253 253 253 253 253 253 253 253
40917 -250 250 250 214 214 214 198 198 198 190 150 46
40918 -219 162 10 236 178 12 234 174 13 224 166 10
40919 -216 158 10 213 154 11 213 154 11 216 158 10
40920 -226 170 11 239 182 13 246 190 14 246 190 14
40921 -246 190 14 246 190 14 242 186 14 206 162 42
40922 -101 101 101 58 58 58 30 30 30 14 14 14
40923 - 6 6 6 0 0 0 0 0 0 0 0 0
40924 - 0 0 0 0 0 0 0 0 0 0 0 0
40925 - 0 0 0 0 0 0 0 0 0 0 0 0
40926 - 0 0 0 0 0 0 0 0 0 10 10 10
40927 - 30 30 30 74 74 74 174 135 50 216 158 10
40928 -236 178 12 246 190 14 246 190 14 246 190 14
40929 -246 190 14 246 190 14 246 190 14 246 190 14
40930 -246 190 14 246 190 14 246 190 14 246 190 14
40931 -246 190 14 246 190 14 241 196 14 226 184 13
40932 - 61 42 6 2 2 6 2 2 6 2 2 6
40933 - 22 22 22 238 238 238 253 253 253 253 253 253
40934 -253 253 253 253 253 253 253 253 253 253 253 253
40935 -253 253 253 253 253 253 253 253 253 253 253 253
40936 -253 253 253 253 253 253 253 253 253 253 253 253
40937 -253 253 253 226 226 226 187 187 187 180 133 36
40938 -216 158 10 236 178 12 239 182 13 236 178 12
40939 -230 174 11 226 170 11 226 170 11 230 174 11
40940 -236 178 12 242 186 14 246 190 14 246 190 14
40941 -246 190 14 246 190 14 246 186 14 239 182 13
40942 -206 162 42 106 106 106 66 66 66 34 34 34
40943 - 14 14 14 6 6 6 0 0 0 0 0 0
40944 - 0 0 0 0 0 0 0 0 0 0 0 0
40945 - 0 0 0 0 0 0 0 0 0 0 0 0
40946 - 0 0 0 0 0 0 0 0 0 6 6 6
40947 - 26 26 26 70 70 70 163 133 67 213 154 11
40948 -236 178 12 246 190 14 246 190 14 246 190 14
40949 -246 190 14 246 190 14 246 190 14 246 190 14
40950 -246 190 14 246 190 14 246 190 14 246 190 14
40951 -246 190 14 246 190 14 246 190 14 241 196 14
40952 -190 146 13 18 14 6 2 2 6 2 2 6
40953 - 46 46 46 246 246 246 253 253 253 253 253 253
40954 -253 253 253 253 253 253 253 253 253 253 253 253
40955 -253 253 253 253 253 253 253 253 253 253 253 253
40956 -253 253 253 253 253 253 253 253 253 253 253 253
40957 -253 253 253 221 221 221 86 86 86 156 107 11
40958 -216 158 10 236 178 12 242 186 14 246 186 14
40959 -242 186 14 239 182 13 239 182 13 242 186 14
40960 -242 186 14 246 186 14 246 190 14 246 190 14
40961 -246 190 14 246 190 14 246 190 14 246 190 14
40962 -242 186 14 225 175 15 142 122 72 66 66 66
40963 - 30 30 30 10 10 10 0 0 0 0 0 0
40964 - 0 0 0 0 0 0 0 0 0 0 0 0
40965 - 0 0 0 0 0 0 0 0 0 0 0 0
40966 - 0 0 0 0 0 0 0 0 0 6 6 6
40967 - 26 26 26 70 70 70 163 133 67 210 150 10
40968 -236 178 12 246 190 14 246 190 14 246 190 14
40969 -246 190 14 246 190 14 246 190 14 246 190 14
40970 -246 190 14 246 190 14 246 190 14 246 190 14
40971 -246 190 14 246 190 14 246 190 14 246 190 14
40972 -232 195 16 121 92 8 34 34 34 106 106 106
40973 -221 221 221 253 253 253 253 253 253 253 253 253
40974 -253 253 253 253 253 253 253 253 253 253 253 253
40975 -253 253 253 253 253 253 253 253 253 253 253 253
40976 -253 253 253 253 253 253 253 253 253 253 253 253
40977 -242 242 242 82 82 82 18 14 6 163 110 8
40978 -216 158 10 236 178 12 242 186 14 246 190 14
40979 -246 190 14 246 190 14 246 190 14 246 190 14
40980 -246 190 14 246 190 14 246 190 14 246 190 14
40981 -246 190 14 246 190 14 246 190 14 246 190 14
40982 -246 190 14 246 190 14 242 186 14 163 133 67
40983 - 46 46 46 18 18 18 6 6 6 0 0 0
40984 - 0 0 0 0 0 0 0 0 0 0 0 0
40985 - 0 0 0 0 0 0 0 0 0 0 0 0
40986 - 0 0 0 0 0 0 0 0 0 10 10 10
40987 - 30 30 30 78 78 78 163 133 67 210 150 10
40988 -236 178 12 246 186 14 246 190 14 246 190 14
40989 -246 190 14 246 190 14 246 190 14 246 190 14
40990 -246 190 14 246 190 14 246 190 14 246 190 14
40991 -246 190 14 246 190 14 246 190 14 246 190 14
40992 -241 196 14 215 174 15 190 178 144 253 253 253
40993 -253 253 253 253 253 253 253 253 253 253 253 253
40994 -253 253 253 253 253 253 253 253 253 253 253 253
40995 -253 253 253 253 253 253 253 253 253 253 253 253
40996 -253 253 253 253 253 253 253 253 253 218 218 218
40997 - 58 58 58 2 2 6 22 18 6 167 114 7
40998 -216 158 10 236 178 12 246 186 14 246 190 14
40999 -246 190 14 246 190 14 246 190 14 246 190 14
41000 -246 190 14 246 190 14 246 190 14 246 190 14
41001 -246 190 14 246 190 14 246 190 14 246 190 14
41002 -246 190 14 246 186 14 242 186 14 190 150 46
41003 - 54 54 54 22 22 22 6 6 6 0 0 0
41004 - 0 0 0 0 0 0 0 0 0 0 0 0
41005 - 0 0 0 0 0 0 0 0 0 0 0 0
41006 - 0 0 0 0 0 0 0 0 0 14 14 14
41007 - 38 38 38 86 86 86 180 133 36 213 154 11
41008 -236 178 12 246 186 14 246 190 14 246 190 14
41009 -246 190 14 246 190 14 246 190 14 246 190 14
41010 -246 190 14 246 190 14 246 190 14 246 190 14
41011 -246 190 14 246 190 14 246 190 14 246 190 14
41012 -246 190 14 232 195 16 190 146 13 214 214 214
41013 -253 253 253 253 253 253 253 253 253 253 253 253
41014 -253 253 253 253 253 253 253 253 253 253 253 253
41015 -253 253 253 253 253 253 253 253 253 253 253 253
41016 -253 253 253 250 250 250 170 170 170 26 26 26
41017 - 2 2 6 2 2 6 37 26 9 163 110 8
41018 -219 162 10 239 182 13 246 186 14 246 190 14
41019 -246 190 14 246 190 14 246 190 14 246 190 14
41020 -246 190 14 246 190 14 246 190 14 246 190 14
41021 -246 190 14 246 190 14 246 190 14 246 190 14
41022 -246 186 14 236 178 12 224 166 10 142 122 72
41023 - 46 46 46 18 18 18 6 6 6 0 0 0
41024 - 0 0 0 0 0 0 0 0 0 0 0 0
41025 - 0 0 0 0 0 0 0 0 0 0 0 0
41026 - 0 0 0 0 0 0 6 6 6 18 18 18
41027 - 50 50 50 109 106 95 192 133 9 224 166 10
41028 -242 186 14 246 190 14 246 190 14 246 190 14
41029 -246 190 14 246 190 14 246 190 14 246 190 14
41030 -246 190 14 246 190 14 246 190 14 246 190 14
41031 -246 190 14 246 190 14 246 190 14 246 190 14
41032 -242 186 14 226 184 13 210 162 10 142 110 46
41033 -226 226 226 253 253 253 253 253 253 253 253 253
41034 -253 253 253 253 253 253 253 253 253 253 253 253
41035 -253 253 253 253 253 253 253 253 253 253 253 253
41036 -198 198 198 66 66 66 2 2 6 2 2 6
41037 - 2 2 6 2 2 6 50 34 6 156 107 11
41038 -219 162 10 239 182 13 246 186 14 246 190 14
41039 -246 190 14 246 190 14 246 190 14 246 190 14
41040 -246 190 14 246 190 14 246 190 14 246 190 14
41041 -246 190 14 246 190 14 246 190 14 242 186 14
41042 -234 174 13 213 154 11 154 122 46 66 66 66
41043 - 30 30 30 10 10 10 0 0 0 0 0 0
41044 - 0 0 0 0 0 0 0 0 0 0 0 0
41045 - 0 0 0 0 0 0 0 0 0 0 0 0
41046 - 0 0 0 0 0 0 6 6 6 22 22 22
41047 - 58 58 58 154 121 60 206 145 10 234 174 13
41048 -242 186 14 246 186 14 246 190 14 246 190 14
41049 -246 190 14 246 190 14 246 190 14 246 190 14
41050 -246 190 14 246 190 14 246 190 14 246 190 14
41051 -246 190 14 246 190 14 246 190 14 246 190 14
41052 -246 186 14 236 178 12 210 162 10 163 110 8
41053 - 61 42 6 138 138 138 218 218 218 250 250 250
41054 -253 253 253 253 253 253 253 253 253 250 250 250
41055 -242 242 242 210 210 210 144 144 144 66 66 66
41056 - 6 6 6 2 2 6 2 2 6 2 2 6
41057 - 2 2 6 2 2 6 61 42 6 163 110 8
41058 -216 158 10 236 178 12 246 190 14 246 190 14
41059 -246 190 14 246 190 14 246 190 14 246 190 14
41060 -246 190 14 246 190 14 246 190 14 246 190 14
41061 -246 190 14 239 182 13 230 174 11 216 158 10
41062 -190 142 34 124 112 88 70 70 70 38 38 38
41063 - 18 18 18 6 6 6 0 0 0 0 0 0
41064 - 0 0 0 0 0 0 0 0 0 0 0 0
41065 - 0 0 0 0 0 0 0 0 0 0 0 0
41066 - 0 0 0 0 0 0 6 6 6 22 22 22
41067 - 62 62 62 168 124 44 206 145 10 224 166 10
41068 -236 178 12 239 182 13 242 186 14 242 186 14
41069 -246 186 14 246 190 14 246 190 14 246 190 14
41070 -246 190 14 246 190 14 246 190 14 246 190 14
41071 -246 190 14 246 190 14 246 190 14 246 190 14
41072 -246 190 14 236 178 12 216 158 10 175 118 6
41073 - 80 54 7 2 2 6 6 6 6 30 30 30
41074 - 54 54 54 62 62 62 50 50 50 38 38 38
41075 - 14 14 14 2 2 6 2 2 6 2 2 6
41076 - 2 2 6 2 2 6 2 2 6 2 2 6
41077 - 2 2 6 6 6 6 80 54 7 167 114 7
41078 -213 154 11 236 178 12 246 190 14 246 190 14
41079 -246 190 14 246 190 14 246 190 14 246 190 14
41080 -246 190 14 242 186 14 239 182 13 239 182 13
41081 -230 174 11 210 150 10 174 135 50 124 112 88
41082 - 82 82 82 54 54 54 34 34 34 18 18 18
41083 - 6 6 6 0 0 0 0 0 0 0 0 0
41084 - 0 0 0 0 0 0 0 0 0 0 0 0
41085 - 0 0 0 0 0 0 0 0 0 0 0 0
41086 - 0 0 0 0 0 0 6 6 6 18 18 18
41087 - 50 50 50 158 118 36 192 133 9 200 144 11
41088 -216 158 10 219 162 10 224 166 10 226 170 11
41089 -230 174 11 236 178 12 239 182 13 239 182 13
41090 -242 186 14 246 186 14 246 190 14 246 190 14
41091 -246 190 14 246 190 14 246 190 14 246 190 14
41092 -246 186 14 230 174 11 210 150 10 163 110 8
41093 -104 69 6 10 10 10 2 2 6 2 2 6
41094 - 2 2 6 2 2 6 2 2 6 2 2 6
41095 - 2 2 6 2 2 6 2 2 6 2 2 6
41096 - 2 2 6 2 2 6 2 2 6 2 2 6
41097 - 2 2 6 6 6 6 91 60 6 167 114 7
41098 -206 145 10 230 174 11 242 186 14 246 190 14
41099 -246 190 14 246 190 14 246 186 14 242 186 14
41100 -239 182 13 230 174 11 224 166 10 213 154 11
41101 -180 133 36 124 112 88 86 86 86 58 58 58
41102 - 38 38 38 22 22 22 10 10 10 6 6 6
41103 - 0 0 0 0 0 0 0 0 0 0 0 0
41104 - 0 0 0 0 0 0 0 0 0 0 0 0
41105 - 0 0 0 0 0 0 0 0 0 0 0 0
41106 - 0 0 0 0 0 0 0 0 0 14 14 14
41107 - 34 34 34 70 70 70 138 110 50 158 118 36
41108 -167 114 7 180 123 7 192 133 9 197 138 11
41109 -200 144 11 206 145 10 213 154 11 219 162 10
41110 -224 166 10 230 174 11 239 182 13 242 186 14
41111 -246 186 14 246 186 14 246 186 14 246 186 14
41112 -239 182 13 216 158 10 185 133 11 152 99 6
41113 -104 69 6 18 14 6 2 2 6 2 2 6
41114 - 2 2 6 2 2 6 2 2 6 2 2 6
41115 - 2 2 6 2 2 6 2 2 6 2 2 6
41116 - 2 2 6 2 2 6 2 2 6 2 2 6
41117 - 2 2 6 6 6 6 80 54 7 152 99 6
41118 -192 133 9 219 162 10 236 178 12 239 182 13
41119 -246 186 14 242 186 14 239 182 13 236 178 12
41120 -224 166 10 206 145 10 192 133 9 154 121 60
41121 - 94 94 94 62 62 62 42 42 42 22 22 22
41122 - 14 14 14 6 6 6 0 0 0 0 0 0
41123 - 0 0 0 0 0 0 0 0 0 0 0 0
41124 - 0 0 0 0 0 0 0 0 0 0 0 0
41125 - 0 0 0 0 0 0 0 0 0 0 0 0
41126 - 0 0 0 0 0 0 0 0 0 6 6 6
41127 - 18 18 18 34 34 34 58 58 58 78 78 78
41128 -101 98 89 124 112 88 142 110 46 156 107 11
41129 -163 110 8 167 114 7 175 118 6 180 123 7
41130 -185 133 11 197 138 11 210 150 10 219 162 10
41131 -226 170 11 236 178 12 236 178 12 234 174 13
41132 -219 162 10 197 138 11 163 110 8 130 83 6
41133 - 91 60 6 10 10 10 2 2 6 2 2 6
41134 - 18 18 18 38 38 38 38 38 38 38 38 38
41135 - 38 38 38 38 38 38 38 38 38 38 38 38
41136 - 38 38 38 38 38 38 26 26 26 2 2 6
41137 - 2 2 6 6 6 6 70 47 6 137 92 6
41138 -175 118 6 200 144 11 219 162 10 230 174 11
41139 -234 174 13 230 174 11 219 162 10 210 150 10
41140 -192 133 9 163 110 8 124 112 88 82 82 82
41141 - 50 50 50 30 30 30 14 14 14 6 6 6
41142 - 0 0 0 0 0 0 0 0 0 0 0 0
41143 - 0 0 0 0 0 0 0 0 0 0 0 0
41144 - 0 0 0 0 0 0 0 0 0 0 0 0
41145 - 0 0 0 0 0 0 0 0 0 0 0 0
41146 - 0 0 0 0 0 0 0 0 0 0 0 0
41147 - 6 6 6 14 14 14 22 22 22 34 34 34
41148 - 42 42 42 58 58 58 74 74 74 86 86 86
41149 -101 98 89 122 102 70 130 98 46 121 87 25
41150 -137 92 6 152 99 6 163 110 8 180 123 7
41151 -185 133 11 197 138 11 206 145 10 200 144 11
41152 -180 123 7 156 107 11 130 83 6 104 69 6
41153 - 50 34 6 54 54 54 110 110 110 101 98 89
41154 - 86 86 86 82 82 82 78 78 78 78 78 78
41155 - 78 78 78 78 78 78 78 78 78 78 78 78
41156 - 78 78 78 82 82 82 86 86 86 94 94 94
41157 -106 106 106 101 101 101 86 66 34 124 80 6
41158 -156 107 11 180 123 7 192 133 9 200 144 11
41159 -206 145 10 200 144 11 192 133 9 175 118 6
41160 -139 102 15 109 106 95 70 70 70 42 42 42
41161 - 22 22 22 10 10 10 0 0 0 0 0 0
41162 - 0 0 0 0 0 0 0 0 0 0 0 0
41163 - 0 0 0 0 0 0 0 0 0 0 0 0
41164 - 0 0 0 0 0 0 0 0 0 0 0 0
41165 - 0 0 0 0 0 0 0 0 0 0 0 0
41166 - 0 0 0 0 0 0 0 0 0 0 0 0
41167 - 0 0 0 0 0 0 6 6 6 10 10 10
41168 - 14 14 14 22 22 22 30 30 30 38 38 38
41169 - 50 50 50 62 62 62 74 74 74 90 90 90
41170 -101 98 89 112 100 78 121 87 25 124 80 6
41171 -137 92 6 152 99 6 152 99 6 152 99 6
41172 -138 86 6 124 80 6 98 70 6 86 66 30
41173 -101 98 89 82 82 82 58 58 58 46 46 46
41174 - 38 38 38 34 34 34 34 34 34 34 34 34
41175 - 34 34 34 34 34 34 34 34 34 34 34 34
41176 - 34 34 34 34 34 34 38 38 38 42 42 42
41177 - 54 54 54 82 82 82 94 86 76 91 60 6
41178 -134 86 6 156 107 11 167 114 7 175 118 6
41179 -175 118 6 167 114 7 152 99 6 121 87 25
41180 -101 98 89 62 62 62 34 34 34 18 18 18
41181 - 6 6 6 0 0 0 0 0 0 0 0 0
41182 - 0 0 0 0 0 0 0 0 0 0 0 0
41183 - 0 0 0 0 0 0 0 0 0 0 0 0
41184 - 0 0 0 0 0 0 0 0 0 0 0 0
41185 - 0 0 0 0 0 0 0 0 0 0 0 0
41186 - 0 0 0 0 0 0 0 0 0 0 0 0
41187 - 0 0 0 0 0 0 0 0 0 0 0 0
41188 - 0 0 0 6 6 6 6 6 6 10 10 10
41189 - 18 18 18 22 22 22 30 30 30 42 42 42
41190 - 50 50 50 66 66 66 86 86 86 101 98 89
41191 -106 86 58 98 70 6 104 69 6 104 69 6
41192 -104 69 6 91 60 6 82 62 34 90 90 90
41193 - 62 62 62 38 38 38 22 22 22 14 14 14
41194 - 10 10 10 10 10 10 10 10 10 10 10 10
41195 - 10 10 10 10 10 10 6 6 6 10 10 10
41196 - 10 10 10 10 10 10 10 10 10 14 14 14
41197 - 22 22 22 42 42 42 70 70 70 89 81 66
41198 - 80 54 7 104 69 6 124 80 6 137 92 6
41199 -134 86 6 116 81 8 100 82 52 86 86 86
41200 - 58 58 58 30 30 30 14 14 14 6 6 6
41201 - 0 0 0 0 0 0 0 0 0 0 0 0
41202 - 0 0 0 0 0 0 0 0 0 0 0 0
41203 - 0 0 0 0 0 0 0 0 0 0 0 0
41204 - 0 0 0 0 0 0 0 0 0 0 0 0
41205 - 0 0 0 0 0 0 0 0 0 0 0 0
41206 - 0 0 0 0 0 0 0 0 0 0 0 0
41207 - 0 0 0 0 0 0 0 0 0 0 0 0
41208 - 0 0 0 0 0 0 0 0 0 0 0 0
41209 - 0 0 0 6 6 6 10 10 10 14 14 14
41210 - 18 18 18 26 26 26 38 38 38 54 54 54
41211 - 70 70 70 86 86 86 94 86 76 89 81 66
41212 - 89 81 66 86 86 86 74 74 74 50 50 50
41213 - 30 30 30 14 14 14 6 6 6 0 0 0
41214 - 0 0 0 0 0 0 0 0 0 0 0 0
41215 - 0 0 0 0 0 0 0 0 0 0 0 0
41216 - 0 0 0 0 0 0 0 0 0 0 0 0
41217 - 6 6 6 18 18 18 34 34 34 58 58 58
41218 - 82 82 82 89 81 66 89 81 66 89 81 66
41219 - 94 86 66 94 86 76 74 74 74 50 50 50
41220 - 26 26 26 14 14 14 6 6 6 0 0 0
41221 - 0 0 0 0 0 0 0 0 0 0 0 0
41222 - 0 0 0 0 0 0 0 0 0 0 0 0
41223 - 0 0 0 0 0 0 0 0 0 0 0 0
41224 - 0 0 0 0 0 0 0 0 0 0 0 0
41225 - 0 0 0 0 0 0 0 0 0 0 0 0
41226 - 0 0 0 0 0 0 0 0 0 0 0 0
41227 - 0 0 0 0 0 0 0 0 0 0 0 0
41228 - 0 0 0 0 0 0 0 0 0 0 0 0
41229 - 0 0 0 0 0 0 0 0 0 0 0 0
41230 - 6 6 6 6 6 6 14 14 14 18 18 18
41231 - 30 30 30 38 38 38 46 46 46 54 54 54
41232 - 50 50 50 42 42 42 30 30 30 18 18 18
41233 - 10 10 10 0 0 0 0 0 0 0 0 0
41234 - 0 0 0 0 0 0 0 0 0 0 0 0
41235 - 0 0 0 0 0 0 0 0 0 0 0 0
41236 - 0 0 0 0 0 0 0 0 0 0 0 0
41237 - 0 0 0 6 6 6 14 14 14 26 26 26
41238 - 38 38 38 50 50 50 58 58 58 58 58 58
41239 - 54 54 54 42 42 42 30 30 30 18 18 18
41240 - 10 10 10 0 0 0 0 0 0 0 0 0
41241 - 0 0 0 0 0 0 0 0 0 0 0 0
41242 - 0 0 0 0 0 0 0 0 0 0 0 0
41243 - 0 0 0 0 0 0 0 0 0 0 0 0
41244 - 0 0 0 0 0 0 0 0 0 0 0 0
41245 - 0 0 0 0 0 0 0 0 0 0 0 0
41246 - 0 0 0 0 0 0 0 0 0 0 0 0
41247 - 0 0 0 0 0 0 0 0 0 0 0 0
41248 - 0 0 0 0 0 0 0 0 0 0 0 0
41249 - 0 0 0 0 0 0 0 0 0 0 0 0
41250 - 0 0 0 0 0 0 0 0 0 6 6 6
41251 - 6 6 6 10 10 10 14 14 14 18 18 18
41252 - 18 18 18 14 14 14 10 10 10 6 6 6
41253 - 0 0 0 0 0 0 0 0 0 0 0 0
41254 - 0 0 0 0 0 0 0 0 0 0 0 0
41255 - 0 0 0 0 0 0 0 0 0 0 0 0
41256 - 0 0 0 0 0 0 0 0 0 0 0 0
41257 - 0 0 0 0 0 0 0 0 0 6 6 6
41258 - 14 14 14 18 18 18 22 22 22 22 22 22
41259 - 18 18 18 14 14 14 10 10 10 6 6 6
41260 - 0 0 0 0 0 0 0 0 0 0 0 0
41261 - 0 0 0 0 0 0 0 0 0 0 0 0
41262 - 0 0 0 0 0 0 0 0 0 0 0 0
41263 - 0 0 0 0 0 0 0 0 0 0 0 0
41264 - 0 0 0 0 0 0 0 0 0 0 0 0
41265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278 +4 4 4 4 4 4
41279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292 +4 4 4 4 4 4
41293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306 +4 4 4 4 4 4
41307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320 +4 4 4 4 4 4
41321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334 +4 4 4 4 4 4
41335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41348 +4 4 4 4 4 4
41349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41353 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
41354 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
41355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41358 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
41359 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41360 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
41361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41362 +4 4 4 4 4 4
41363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41367 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
41368 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
41369 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41370 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41371 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41372 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
41373 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
41374 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
41375 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41376 +4 4 4 4 4 4
41377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41381 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
41382 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
41383 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41384 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41385 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41386 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
41387 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
41388 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
41389 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
41390 +4 4 4 4 4 4
41391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41394 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
41395 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
41396 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
41397 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
41398 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41399 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41400 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
41401 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
41402 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
41403 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
41404 +4 4 4 4 4 4
41405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41408 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
41409 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
41410 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
41411 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
41412 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41413 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
41414 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
41415 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
41416 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
41417 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
41418 +4 4 4 4 4 4
41419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41422 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
41423 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
41424 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
41425 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
41426 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41427 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
41428 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
41429 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
41430 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
41431 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
41432 +4 4 4 4 4 4
41433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41435 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
41436 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
41437 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
41438 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
41439 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
41440 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
41441 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
41442 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
41443 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
41444 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
41445 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
41446 +4 4 4 4 4 4
41447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41449 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
41450 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
41451 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
41452 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
41453 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
41454 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
41455 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
41456 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
41457 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
41458 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
41459 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
41460 +4 4 4 4 4 4
41461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41463 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
41464 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
41465 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
41466 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
41467 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
41468 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
41469 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
41470 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
41471 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
41472 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
41473 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41474 +4 4 4 4 4 4
41475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41477 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
41478 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
41479 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
41480 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
41481 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
41482 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
41483 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
41484 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
41485 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
41486 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
41487 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
41488 +4 4 4 4 4 4
41489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41490 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
41491 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
41492 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
41493 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
41494 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
41495 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
41496 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
41497 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
41498 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
41499 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
41500 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
41501 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
41502 +4 4 4 4 4 4
41503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41504 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
41505 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
41506 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
41507 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41508 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
41509 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
41510 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
41511 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
41512 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
41513 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
41514 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
41515 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
41516 +0 0 0 4 4 4
41517 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41518 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
41519 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
41520 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
41521 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
41522 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
41523 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
41524 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
41525 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
41526 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
41527 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
41528 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
41529 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
41530 +2 0 0 0 0 0
41531 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
41532 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
41533 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
41534 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
41535 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
41536 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
41537 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
41538 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
41539 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
41540 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
41541 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
41542 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
41543 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
41544 +37 38 37 0 0 0
41545 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41546 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
41547 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
41548 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
41549 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
41550 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
41551 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
41552 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
41553 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
41554 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
41555 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
41556 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
41557 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
41558 +85 115 134 4 0 0
41559 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
41560 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
41561 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
41562 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
41563 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
41564 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
41565 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
41566 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
41567 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
41568 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
41569 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
41570 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
41571 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
41572 +60 73 81 4 0 0
41573 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
41574 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
41575 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
41576 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
41577 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
41578 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
41579 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
41580 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
41581 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
41582 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
41583 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
41584 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
41585 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
41586 +16 19 21 4 0 0
41587 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
41588 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
41589 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
41590 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
41591 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
41592 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
41593 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
41594 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
41595 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
41596 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
41597 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
41598 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
41599 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
41600 +4 0 0 4 3 3
41601 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
41602 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
41603 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
41604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
41605 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
41606 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
41607 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
41608 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
41609 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
41610 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
41611 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
41612 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
41613 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
41614 +3 2 2 4 4 4
41615 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
41616 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
41617 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
41618 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41619 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
41620 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
41621 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
41622 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
41623 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
41624 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
41625 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
41626 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
41627 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
41628 +4 4 4 4 4 4
41629 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
41630 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
41631 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
41632 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
41633 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
41634 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
41635 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
41636 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
41637 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
41638 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
41639 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
41640 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
41641 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
41642 +4 4 4 4 4 4
41643 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
41644 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
41645 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
41646 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
41647 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
41648 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41649 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
41650 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
41651 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
41652 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
41653 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
41654 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
41655 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
41656 +5 5 5 5 5 5
41657 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
41658 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
41659 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
41660 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
41661 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
41662 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41663 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
41664 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
41665 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
41666 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
41667 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
41668 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
41669 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41670 +5 5 5 4 4 4
41671 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
41672 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
41673 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
41674 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
41675 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41676 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
41677 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
41678 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
41679 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
41680 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
41681 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
41682 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41684 +4 4 4 4 4 4
41685 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
41686 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
41687 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
41688 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
41689 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
41690 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41691 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41692 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
41693 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
41694 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
41695 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
41696 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
41697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41698 +4 4 4 4 4 4
41699 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
41700 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
41701 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
41702 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
41703 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41704 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
41705 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
41706 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
41707 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
41708 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
41709 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
41710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41712 +4 4 4 4 4 4
41713 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
41714 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
41715 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
41716 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
41717 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41718 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41719 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41720 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
41721 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
41722 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
41723 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
41724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41726 +4 4 4 4 4 4
41727 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
41728 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
41729 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
41730 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
41731 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41732 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
41733 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41734 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
41735 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
41736 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
41737 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41740 +4 4 4 4 4 4
41741 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
41742 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
41743 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
41744 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
41745 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41746 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
41747 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
41748 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
41749 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
41750 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
41751 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
41752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41754 +4 4 4 4 4 4
41755 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
41756 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
41757 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
41758 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
41759 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41760 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
41761 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
41762 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
41763 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
41764 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
41765 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
41766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41768 +4 4 4 4 4 4
41769 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
41770 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
41771 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
41772 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41773 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
41774 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
41775 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
41776 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
41777 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
41778 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
41779 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41782 +4 4 4 4 4 4
41783 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
41784 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
41785 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
41786 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41787 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41788 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
41789 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
41790 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
41791 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
41792 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
41793 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41796 +4 4 4 4 4 4
41797 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
41798 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
41799 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41800 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41801 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41802 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
41803 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
41804 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
41805 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
41806 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
41807 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41810 +4 4 4 4 4 4
41811 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
41812 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
41813 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41814 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41815 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41816 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
41817 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
41818 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
41819 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41820 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41821 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41824 +4 4 4 4 4 4
41825 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41826 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
41827 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41828 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
41829 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
41830 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
41831 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
41832 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
41833 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41834 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41835 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41838 +4 4 4 4 4 4
41839 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41840 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
41841 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41842 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
41843 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41844 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
41845 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
41846 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
41847 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41848 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41849 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41852 +4 4 4 4 4 4
41853 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41854 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41855 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41856 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41857 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41858 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41859 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41860 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41861 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41862 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41863 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41866 +4 4 4 4 4 4
41867 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41868 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41869 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41870 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41871 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41872 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41873 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41874 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41875 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41876 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41877 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41880 +4 4 4 4 4 4
41881 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41882 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41883 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41884 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41885 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41886 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41887 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41888 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41889 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41890 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41891 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41894 +4 4 4 4 4 4
41895 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41896 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41897 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41898 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41899 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41900 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41901 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41902 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41903 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41904 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41905 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41908 +4 4 4 4 4 4
41909 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41910 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41911 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41912 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41913 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41914 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41915 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41916 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41917 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41918 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41919 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41922 +4 4 4 4 4 4
41923 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41924 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41925 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41926 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41927 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41928 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41929 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41930 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41931 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41932 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41933 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41936 +4 4 4 4 4 4
41937 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41938 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41939 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41940 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41941 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41942 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41943 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41944 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41945 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41946 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41947 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41950 +4 4 4 4 4 4
41951 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41952 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41953 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41954 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41955 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41956 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41957 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41958 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41959 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41960 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41961 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41964 +4 4 4 4 4 4
41965 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41966 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41967 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41968 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41969 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41970 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41971 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41972 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41973 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41974 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41975 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41978 +4 4 4 4 4 4
41979 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41980 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41981 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41982 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41983 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41984 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41985 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41986 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41987 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41988 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41989 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41992 +4 4 4 4 4 4
41993 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41994 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41995 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41996 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41997 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41998 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41999 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
42000 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
42001 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
42002 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
42003 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42006 +4 4 4 4 4 4
42007 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
42008 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
42009 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
42010 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
42011 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
42012 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
42013 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
42014 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
42015 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
42016 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
42017 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42020 +4 4 4 4 4 4
42021 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
42022 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
42023 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
42024 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
42025 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
42026 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
42027 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
42028 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
42029 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
42030 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
42031 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42034 +4 4 4 4 4 4
42035 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
42036 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
42037 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
42038 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
42039 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
42040 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
42041 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
42042 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
42043 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
42044 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
42045 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42048 +4 4 4 4 4 4
42049 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
42050 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
42051 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
42052 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
42053 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
42054 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
42055 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42056 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
42057 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
42058 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
42059 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42062 +4 4 4 4 4 4
42063 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
42064 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
42065 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
42066 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
42067 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
42068 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
42069 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
42070 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
42071 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
42072 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
42073 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42076 +4 4 4 4 4 4
42077 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
42078 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
42079 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
42080 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
42081 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
42082 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
42083 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
42084 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
42085 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
42086 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
42087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42090 +4 4 4 4 4 4
42091 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42092 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
42093 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
42094 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
42095 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
42096 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
42097 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
42098 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
42099 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
42100 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
42101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42104 +4 4 4 4 4 4
42105 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
42106 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
42107 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
42108 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
42109 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
42110 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
42111 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
42112 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
42113 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
42114 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
42115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42118 +4 4 4 4 4 4
42119 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
42120 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
42121 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
42122 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
42123 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
42124 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
42125 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
42126 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
42127 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
42128 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42132 +4 4 4 4 4 4
42133 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
42134 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42135 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
42136 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
42137 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
42138 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
42139 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
42140 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
42141 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
42142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42146 +4 4 4 4 4 4
42147 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
42148 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
42149 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
42150 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
42151 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
42152 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
42153 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
42154 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
42155 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
42156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42160 +4 4 4 4 4 4
42161 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42162 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
42163 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
42164 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
42165 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
42166 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
42167 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
42168 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
42169 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42174 +4 4 4 4 4 4
42175 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
42176 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
42177 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42178 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
42179 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
42180 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
42181 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
42182 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
42183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42188 +4 4 4 4 4 4
42189 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42190 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
42191 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
42192 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
42193 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
42194 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
42195 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
42196 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
42197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42202 +4 4 4 4 4 4
42203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42204 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
42205 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42206 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
42207 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
42208 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
42209 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
42210 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
42211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42216 +4 4 4 4 4 4
42217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42218 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
42219 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
42220 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
42221 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
42222 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
42223 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
42224 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
42225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42230 +4 4 4 4 4 4
42231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42232 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
42233 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
42234 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42235 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
42236 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
42237 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
42238 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42244 +4 4 4 4 4 4
42245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42247 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42248 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
42249 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
42250 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
42251 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
42252 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42258 +4 4 4 4 4 4
42259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42262 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42263 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
42264 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
42265 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
42266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42272 +4 4 4 4 4 4
42273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42276 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42277 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42278 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
42279 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
42280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42286 +4 4 4 4 4 4
42287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42290 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42291 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42292 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42293 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
42294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42300 +4 4 4 4 4 4
42301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42304 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
42305 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
42306 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
42307 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
42308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42314 +4 4 4 4 4 4
42315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42319 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
42320 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42321 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42328 +4 4 4 4 4 4
42329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42333 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
42334 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
42335 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42342 +4 4 4 4 4 4
42343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42347 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
42348 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
42349 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42356 +4 4 4 4 4 4
42357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42361 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
42362 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
42363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42369 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42370 +4 4 4 4 4 4
42371 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42375 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42376 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
42377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42383 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42384 +4 4 4 4 4 4
42385 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
42386 index 8af6414..658c030 100644
42387 --- a/drivers/video/udlfb.c
42388 +++ b/drivers/video/udlfb.c
42389 @@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
42390 dlfb_urb_completion(urb);
42391
42392 error:
42393 - atomic_add(bytes_sent, &dev->bytes_sent);
42394 - atomic_add(bytes_identical, &dev->bytes_identical);
42395 - atomic_add(width*height*2, &dev->bytes_rendered);
42396 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42397 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42398 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
42399 end_cycles = get_cycles();
42400 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
42401 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42402 >> 10)), /* Kcycles */
42403 &dev->cpu_kcycles_used);
42404
42405 @@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
42406 dlfb_urb_completion(urb);
42407
42408 error:
42409 - atomic_add(bytes_sent, &dev->bytes_sent);
42410 - atomic_add(bytes_identical, &dev->bytes_identical);
42411 - atomic_add(bytes_rendered, &dev->bytes_rendered);
42412 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42413 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42414 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
42415 end_cycles = get_cycles();
42416 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
42417 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42418 >> 10)), /* Kcycles */
42419 &dev->cpu_kcycles_used);
42420 }
42421 @@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
42422 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42423 struct dlfb_data *dev = fb_info->par;
42424 return snprintf(buf, PAGE_SIZE, "%u\n",
42425 - atomic_read(&dev->bytes_rendered));
42426 + atomic_read_unchecked(&dev->bytes_rendered));
42427 }
42428
42429 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
42430 @@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
42431 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42432 struct dlfb_data *dev = fb_info->par;
42433 return snprintf(buf, PAGE_SIZE, "%u\n",
42434 - atomic_read(&dev->bytes_identical));
42435 + atomic_read_unchecked(&dev->bytes_identical));
42436 }
42437
42438 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42439 @@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42440 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42441 struct dlfb_data *dev = fb_info->par;
42442 return snprintf(buf, PAGE_SIZE, "%u\n",
42443 - atomic_read(&dev->bytes_sent));
42444 + atomic_read_unchecked(&dev->bytes_sent));
42445 }
42446
42447 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42448 @@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42449 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42450 struct dlfb_data *dev = fb_info->par;
42451 return snprintf(buf, PAGE_SIZE, "%u\n",
42452 - atomic_read(&dev->cpu_kcycles_used));
42453 + atomic_read_unchecked(&dev->cpu_kcycles_used));
42454 }
42455
42456 static ssize_t edid_show(
42457 @@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
42458 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42459 struct dlfb_data *dev = fb_info->par;
42460
42461 - atomic_set(&dev->bytes_rendered, 0);
42462 - atomic_set(&dev->bytes_identical, 0);
42463 - atomic_set(&dev->bytes_sent, 0);
42464 - atomic_set(&dev->cpu_kcycles_used, 0);
42465 + atomic_set_unchecked(&dev->bytes_rendered, 0);
42466 + atomic_set_unchecked(&dev->bytes_identical, 0);
42467 + atomic_set_unchecked(&dev->bytes_sent, 0);
42468 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
42469
42470 return count;
42471 }
42472 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
42473 index b0e2a42..e2df3ad 100644
42474 --- a/drivers/video/uvesafb.c
42475 +++ b/drivers/video/uvesafb.c
42476 @@ -19,6 +19,7 @@
42477 #include <linux/io.h>
42478 #include <linux/mutex.h>
42479 #include <linux/slab.h>
42480 +#include <linux/moduleloader.h>
42481 #include <video/edid.h>
42482 #include <video/uvesafb.h>
42483 #ifdef CONFIG_X86
42484 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
42485 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
42486 par->pmi_setpal = par->ypan = 0;
42487 } else {
42488 +
42489 +#ifdef CONFIG_PAX_KERNEXEC
42490 +#ifdef CONFIG_MODULES
42491 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
42492 +#endif
42493 + if (!par->pmi_code) {
42494 + par->pmi_setpal = par->ypan = 0;
42495 + return 0;
42496 + }
42497 +#endif
42498 +
42499 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
42500 + task->t.regs.edi);
42501 +
42502 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42503 + pax_open_kernel();
42504 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
42505 + pax_close_kernel();
42506 +
42507 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
42508 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
42509 +#else
42510 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
42511 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
42512 +#endif
42513 +
42514 printk(KERN_INFO "uvesafb: protected mode interface info at "
42515 "%04x:%04x\n",
42516 (u16)task->t.regs.es, (u16)task->t.regs.edi);
42517 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
42518 par->ypan = ypan;
42519
42520 if (par->pmi_setpal || par->ypan) {
42521 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
42522 if (__supported_pte_mask & _PAGE_NX) {
42523 par->pmi_setpal = par->ypan = 0;
42524 printk(KERN_WARNING "uvesafb: NX protection is actively."
42525 "We have better not to use the PMI.\n");
42526 - } else {
42527 + } else
42528 +#endif
42529 uvesafb_vbe_getpmi(task, par);
42530 - }
42531 }
42532 #else
42533 /* The protected mode interface is not available on non-x86. */
42534 @@ -1836,6 +1860,11 @@ out:
42535 if (par->vbe_modes)
42536 kfree(par->vbe_modes);
42537
42538 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42539 + if (par->pmi_code)
42540 + module_free_exec(NULL, par->pmi_code);
42541 +#endif
42542 +
42543 framebuffer_release(info);
42544 return err;
42545 }
42546 @@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
42547 kfree(par->vbe_state_orig);
42548 if (par->vbe_state_saved)
42549 kfree(par->vbe_state_saved);
42550 +
42551 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42552 + if (par->pmi_code)
42553 + module_free_exec(NULL, par->pmi_code);
42554 +#endif
42555 +
42556 }
42557
42558 framebuffer_release(info);
42559 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
42560 index 501b340..86bd4cf 100644
42561 --- a/drivers/video/vesafb.c
42562 +++ b/drivers/video/vesafb.c
42563 @@ -9,6 +9,7 @@
42564 */
42565
42566 #include <linux/module.h>
42567 +#include <linux/moduleloader.h>
42568 #include <linux/kernel.h>
42569 #include <linux/errno.h>
42570 #include <linux/string.h>
42571 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
42572 static int vram_total __initdata; /* Set total amount of memory */
42573 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
42574 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
42575 -static void (*pmi_start)(void) __read_mostly;
42576 -static void (*pmi_pal) (void) __read_mostly;
42577 +static void (*pmi_start)(void) __read_only;
42578 +static void (*pmi_pal) (void) __read_only;
42579 static int depth __read_mostly;
42580 static int vga_compat __read_mostly;
42581 /* --------------------------------------------------------------------- */
42582 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
42583 unsigned int size_vmode;
42584 unsigned int size_remap;
42585 unsigned int size_total;
42586 + void *pmi_code = NULL;
42587
42588 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
42589 return -ENODEV;
42590 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
42591 size_remap = size_total;
42592 vesafb_fix.smem_len = size_remap;
42593
42594 -#ifndef __i386__
42595 - screen_info.vesapm_seg = 0;
42596 -#endif
42597 -
42598 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
42599 printk(KERN_WARNING
42600 "vesafb: cannot reserve video memory at 0x%lx\n",
42601 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
42602 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
42603 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
42604
42605 +#ifdef __i386__
42606 +
42607 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42608 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
42609 + if (!pmi_code)
42610 +#elif !defined(CONFIG_PAX_KERNEXEC)
42611 + if (0)
42612 +#endif
42613 +
42614 +#endif
42615 + screen_info.vesapm_seg = 0;
42616 +
42617 if (screen_info.vesapm_seg) {
42618 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
42619 - screen_info.vesapm_seg,screen_info.vesapm_off);
42620 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
42621 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
42622 }
42623
42624 if (screen_info.vesapm_seg < 0xc000)
42625 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
42626
42627 if (ypan || pmi_setpal) {
42628 unsigned short *pmi_base;
42629 +
42630 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
42631 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
42632 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
42633 +
42634 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42635 + pax_open_kernel();
42636 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
42637 +#else
42638 + pmi_code = pmi_base;
42639 +#endif
42640 +
42641 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
42642 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
42643 +
42644 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42645 + pmi_start = ktva_ktla(pmi_start);
42646 + pmi_pal = ktva_ktla(pmi_pal);
42647 + pax_close_kernel();
42648 +#endif
42649 +
42650 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
42651 if (pmi_base[3]) {
42652 printk(KERN_INFO "vesafb: pmi: ports = ");
42653 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
42654 info->node, info->fix.id);
42655 return 0;
42656 err:
42657 +
42658 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42659 + module_free_exec(NULL, pmi_code);
42660 +#endif
42661 +
42662 if (info->screen_base)
42663 iounmap(info->screen_base);
42664 framebuffer_release(info);
42665 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
42666 index 88714ae..16c2e11 100644
42667 --- a/drivers/video/via/via_clock.h
42668 +++ b/drivers/video/via/via_clock.h
42669 @@ -56,7 +56,7 @@ struct via_clock {
42670
42671 void (*set_engine_pll_state)(u8 state);
42672 void (*set_engine_pll)(struct via_pll_config config);
42673 -};
42674 +} __no_const;
42675
42676
42677 static inline u32 get_pll_internal_frequency(u32 ref_freq,
42678 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
42679 index e56c934..fc22f4b 100644
42680 --- a/drivers/xen/xen-pciback/conf_space.h
42681 +++ b/drivers/xen/xen-pciback/conf_space.h
42682 @@ -44,15 +44,15 @@ struct config_field {
42683 struct {
42684 conf_dword_write write;
42685 conf_dword_read read;
42686 - } dw;
42687 + } __no_const dw;
42688 struct {
42689 conf_word_write write;
42690 conf_word_read read;
42691 - } w;
42692 + } __no_const w;
42693 struct {
42694 conf_byte_write write;
42695 conf_byte_read read;
42696 - } b;
42697 + } __no_const b;
42698 } u;
42699 struct list_head list;
42700 };
42701 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
42702 index 57ccb75..f6d05f8 100644
42703 --- a/fs/9p/vfs_inode.c
42704 +++ b/fs/9p/vfs_inode.c
42705 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42706 void
42707 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42708 {
42709 - char *s = nd_get_link(nd);
42710 + const char *s = nd_get_link(nd);
42711
42712 p9_debug(P9_DEBUG_VFS, " %s %s\n",
42713 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
42714 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
42715 index 0225742..1cd4732 100644
42716 --- a/fs/Kconfig.binfmt
42717 +++ b/fs/Kconfig.binfmt
42718 @@ -89,7 +89,7 @@ config HAVE_AOUT
42719
42720 config BINFMT_AOUT
42721 tristate "Kernel support for a.out and ECOFF binaries"
42722 - depends on HAVE_AOUT
42723 + depends on HAVE_AOUT && BROKEN
42724 ---help---
42725 A.out (Assembler.OUTput) is a set of formats for libraries and
42726 executables used in the earliest versions of UNIX. Linux used
42727 diff --git a/fs/aio.c b/fs/aio.c
42728 index 55c4c76..11aee6f 100644
42729 --- a/fs/aio.c
42730 +++ b/fs/aio.c
42731 @@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
42732 size += sizeof(struct io_event) * nr_events;
42733 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
42734
42735 - if (nr_pages < 0)
42736 + if (nr_pages <= 0)
42737 return -EINVAL;
42738
42739 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
42740 @@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
42741 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42742 {
42743 ssize_t ret;
42744 + struct iovec iovstack;
42745
42746 #ifdef CONFIG_COMPAT
42747 if (compat)
42748 ret = compat_rw_copy_check_uvector(type,
42749 (struct compat_iovec __user *)kiocb->ki_buf,
42750 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42751 + kiocb->ki_nbytes, 1, &iovstack,
42752 &kiocb->ki_iovec);
42753 else
42754 #endif
42755 ret = rw_copy_check_uvector(type,
42756 (struct iovec __user *)kiocb->ki_buf,
42757 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42758 + kiocb->ki_nbytes, 1, &iovstack,
42759 &kiocb->ki_iovec);
42760 if (ret < 0)
42761 goto out;
42762 @@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42763 if (ret < 0)
42764 goto out;
42765
42766 + if (kiocb->ki_iovec == &iovstack) {
42767 + kiocb->ki_inline_vec = iovstack;
42768 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
42769 + }
42770 kiocb->ki_nr_segs = kiocb->ki_nbytes;
42771 kiocb->ki_cur_seg = 0;
42772 /* ki_nbytes/left now reflect bytes instead of segs */
42773 diff --git a/fs/attr.c b/fs/attr.c
42774 index 0da9095..1386693 100644
42775 --- a/fs/attr.c
42776 +++ b/fs/attr.c
42777 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
42778 unsigned long limit;
42779
42780 limit = rlimit(RLIMIT_FSIZE);
42781 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
42782 if (limit != RLIM_INFINITY && offset > limit)
42783 goto out_sig;
42784 if (offset > inode->i_sb->s_maxbytes)
42785 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
42786 index da8876d..4456166 100644
42787 --- a/fs/autofs4/waitq.c
42788 +++ b/fs/autofs4/waitq.c
42789 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
42790 {
42791 unsigned long sigpipe, flags;
42792 mm_segment_t fs;
42793 - const char *data = (const char *)addr;
42794 + const char __user *data = (const char __force_user *)addr;
42795 ssize_t wr = 0;
42796
42797 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
42798 @@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
42799 return 1;
42800 }
42801
42802 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42803 +static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
42804 +#endif
42805 +
42806 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
42807 enum autofs_notify notify)
42808 {
42809 @@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
42810
42811 /* If this is a direct mount request create a dummy name */
42812 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
42813 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42814 + /* this name does get written to userland via autofs4_write() */
42815 + qstr.len = sprintf(name, "%08lx", atomic_inc_return_unchecked(&autofs_dummy_name_id));
42816 +#else
42817 qstr.len = sprintf(name, "%p", dentry);
42818 +#endif
42819 else {
42820 qstr.len = autofs4_getpath(sbi, dentry, &name);
42821 if (!qstr.len) {
42822 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
42823 index e18da23..affc30e 100644
42824 --- a/fs/befs/linuxvfs.c
42825 +++ b/fs/befs/linuxvfs.c
42826 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42827 {
42828 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
42829 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
42830 - char *link = nd_get_link(nd);
42831 + const char *link = nd_get_link(nd);
42832 if (!IS_ERR(link))
42833 kfree(link);
42834 }
42835 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
42836 index d146e18..12d1bd1 100644
42837 --- a/fs/binfmt_aout.c
42838 +++ b/fs/binfmt_aout.c
42839 @@ -16,6 +16,7 @@
42840 #include <linux/string.h>
42841 #include <linux/fs.h>
42842 #include <linux/file.h>
42843 +#include <linux/security.h>
42844 #include <linux/stat.h>
42845 #include <linux/fcntl.h>
42846 #include <linux/ptrace.h>
42847 @@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
42848 #endif
42849 # define START_STACK(u) ((void __user *)u.start_stack)
42850
42851 + memset(&dump, 0, sizeof(dump));
42852 +
42853 fs = get_fs();
42854 set_fs(KERNEL_DS);
42855 has_dumped = 1;
42856 @@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
42857
42858 /* If the size of the dump file exceeds the rlimit, then see what would happen
42859 if we wrote the stack, but not the data area. */
42860 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
42861 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
42862 dump.u_dsize = 0;
42863
42864 /* Make sure we have enough room to write the stack and data areas. */
42865 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
42866 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
42867 dump.u_ssize = 0;
42868
42869 @@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42870 rlim = rlimit(RLIMIT_DATA);
42871 if (rlim >= RLIM_INFINITY)
42872 rlim = ~0;
42873 +
42874 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42875 if (ex.a_data + ex.a_bss > rlim)
42876 return -ENOMEM;
42877
42878 @@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42879
42880 install_exec_creds(bprm);
42881
42882 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42883 + current->mm->pax_flags = 0UL;
42884 +#endif
42885 +
42886 +#ifdef CONFIG_PAX_PAGEEXEC
42887 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42888 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42889 +
42890 +#ifdef CONFIG_PAX_EMUTRAMP
42891 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42892 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42893 +#endif
42894 +
42895 +#ifdef CONFIG_PAX_MPROTECT
42896 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42897 + current->mm->pax_flags |= MF_PAX_MPROTECT;
42898 +#endif
42899 +
42900 + }
42901 +#endif
42902 +
42903 if (N_MAGIC(ex) == OMAGIC) {
42904 unsigned long text_addr, map_size;
42905 loff_t pos;
42906 @@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42907 }
42908
42909 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42910 - PROT_READ | PROT_WRITE | PROT_EXEC,
42911 + PROT_READ | PROT_WRITE,
42912 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42913 fd_offset + ex.a_text);
42914 if (error != N_DATADDR(ex)) {
42915 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
42916 index 1b52956..725eca7 100644
42917 --- a/fs/binfmt_elf.c
42918 +++ b/fs/binfmt_elf.c
42919 @@ -32,6 +32,7 @@
42920 #include <linux/elf.h>
42921 #include <linux/utsname.h>
42922 #include <linux/coredump.h>
42923 +#include <linux/xattr.h>
42924 #include <asm/uaccess.h>
42925 #include <asm/param.h>
42926 #include <asm/page.h>
42927 @@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
42928 #define elf_core_dump NULL
42929 #endif
42930
42931 +#ifdef CONFIG_PAX_MPROTECT
42932 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42933 +#endif
42934 +
42935 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42936 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42937 #else
42938 @@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
42939 .load_binary = load_elf_binary,
42940 .load_shlib = load_elf_library,
42941 .core_dump = elf_core_dump,
42942 +
42943 +#ifdef CONFIG_PAX_MPROTECT
42944 + .handle_mprotect= elf_handle_mprotect,
42945 +#endif
42946 +
42947 .min_coredump = ELF_EXEC_PAGESIZE,
42948 };
42949
42950 @@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
42951
42952 static int set_brk(unsigned long start, unsigned long end)
42953 {
42954 + unsigned long e = end;
42955 +
42956 start = ELF_PAGEALIGN(start);
42957 end = ELF_PAGEALIGN(end);
42958 if (end > start) {
42959 @@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
42960 if (BAD_ADDR(addr))
42961 return addr;
42962 }
42963 - current->mm->start_brk = current->mm->brk = end;
42964 + current->mm->start_brk = current->mm->brk = e;
42965 return 0;
42966 }
42967
42968 @@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42969 elf_addr_t __user *u_rand_bytes;
42970 const char *k_platform = ELF_PLATFORM;
42971 const char *k_base_platform = ELF_BASE_PLATFORM;
42972 - unsigned char k_rand_bytes[16];
42973 + u32 k_rand_bytes[4];
42974 int items;
42975 elf_addr_t *elf_info;
42976 int ei_index = 0;
42977 const struct cred *cred = current_cred();
42978 struct vm_area_struct *vma;
42979 + unsigned long saved_auxv[AT_VECTOR_SIZE];
42980
42981 /*
42982 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42983 @@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42984 * Generate 16 random bytes for userspace PRNG seeding.
42985 */
42986 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42987 - u_rand_bytes = (elf_addr_t __user *)
42988 - STACK_ALLOC(p, sizeof(k_rand_bytes));
42989 + srandom32(k_rand_bytes[0] ^ random32());
42990 + srandom32(k_rand_bytes[1] ^ random32());
42991 + srandom32(k_rand_bytes[2] ^ random32());
42992 + srandom32(k_rand_bytes[3] ^ random32());
42993 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
42994 + u_rand_bytes = (elf_addr_t __user *) p;
42995 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42996 return -EFAULT;
42997
42998 @@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42999 return -EFAULT;
43000 current->mm->env_end = p;
43001
43002 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
43003 +
43004 /* Put the elf_info on the stack in the right place. */
43005 sp = (elf_addr_t __user *)envp + 1;
43006 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
43007 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
43008 return -EFAULT;
43009 return 0;
43010 }
43011 @@ -378,10 +397,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
43012 {
43013 struct elf_phdr *elf_phdata;
43014 struct elf_phdr *eppnt;
43015 - unsigned long load_addr = 0;
43016 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
43017 int load_addr_set = 0;
43018 unsigned long last_bss = 0, elf_bss = 0;
43019 - unsigned long error = ~0UL;
43020 + unsigned long error = -EINVAL;
43021 unsigned long total_size;
43022 int retval, i, size;
43023
43024 @@ -427,6 +446,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
43025 goto out_close;
43026 }
43027
43028 +#ifdef CONFIG_PAX_SEGMEXEC
43029 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
43030 + pax_task_size = SEGMEXEC_TASK_SIZE;
43031 +#endif
43032 +
43033 eppnt = elf_phdata;
43034 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
43035 if (eppnt->p_type == PT_LOAD) {
43036 @@ -470,8 +494,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
43037 k = load_addr + eppnt->p_vaddr;
43038 if (BAD_ADDR(k) ||
43039 eppnt->p_filesz > eppnt->p_memsz ||
43040 - eppnt->p_memsz > TASK_SIZE ||
43041 - TASK_SIZE - eppnt->p_memsz < k) {
43042 + eppnt->p_memsz > pax_task_size ||
43043 + pax_task_size - eppnt->p_memsz < k) {
43044 error = -ENOMEM;
43045 goto out_close;
43046 }
43047 @@ -523,6 +547,311 @@ out:
43048 return error;
43049 }
43050
43051 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
43052 +#ifdef CONFIG_PAX_SOFTMODE
43053 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
43054 +{
43055 + unsigned long pax_flags = 0UL;
43056 +
43057 +#ifdef CONFIG_PAX_PAGEEXEC
43058 + if (elf_phdata->p_flags & PF_PAGEEXEC)
43059 + pax_flags |= MF_PAX_PAGEEXEC;
43060 +#endif
43061 +
43062 +#ifdef CONFIG_PAX_SEGMEXEC
43063 + if (elf_phdata->p_flags & PF_SEGMEXEC)
43064 + pax_flags |= MF_PAX_SEGMEXEC;
43065 +#endif
43066 +
43067 +#ifdef CONFIG_PAX_EMUTRAMP
43068 + if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
43069 + pax_flags |= MF_PAX_EMUTRAMP;
43070 +#endif
43071 +
43072 +#ifdef CONFIG_PAX_MPROTECT
43073 + if (elf_phdata->p_flags & PF_MPROTECT)
43074 + pax_flags |= MF_PAX_MPROTECT;
43075 +#endif
43076 +
43077 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43078 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
43079 + pax_flags |= MF_PAX_RANDMMAP;
43080 +#endif
43081 +
43082 + return pax_flags;
43083 +}
43084 +#endif
43085 +
43086 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
43087 +{
43088 + unsigned long pax_flags = 0UL;
43089 +
43090 +#ifdef CONFIG_PAX_PAGEEXEC
43091 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
43092 + pax_flags |= MF_PAX_PAGEEXEC;
43093 +#endif
43094 +
43095 +#ifdef CONFIG_PAX_SEGMEXEC
43096 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
43097 + pax_flags |= MF_PAX_SEGMEXEC;
43098 +#endif
43099 +
43100 +#ifdef CONFIG_PAX_EMUTRAMP
43101 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
43102 + pax_flags |= MF_PAX_EMUTRAMP;
43103 +#endif
43104 +
43105 +#ifdef CONFIG_PAX_MPROTECT
43106 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
43107 + pax_flags |= MF_PAX_MPROTECT;
43108 +#endif
43109 +
43110 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43111 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
43112 + pax_flags |= MF_PAX_RANDMMAP;
43113 +#endif
43114 +
43115 + return pax_flags;
43116 +}
43117 +#endif
43118 +
43119 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
43120 +#ifdef CONFIG_PAX_SOFTMODE
43121 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
43122 +{
43123 + unsigned long pax_flags = 0UL;
43124 +
43125 +#ifdef CONFIG_PAX_PAGEEXEC
43126 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
43127 + pax_flags |= MF_PAX_PAGEEXEC;
43128 +#endif
43129 +
43130 +#ifdef CONFIG_PAX_SEGMEXEC
43131 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
43132 + pax_flags |= MF_PAX_SEGMEXEC;
43133 +#endif
43134 +
43135 +#ifdef CONFIG_PAX_EMUTRAMP
43136 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
43137 + pax_flags |= MF_PAX_EMUTRAMP;
43138 +#endif
43139 +
43140 +#ifdef CONFIG_PAX_MPROTECT
43141 + if (pax_flags_softmode & MF_PAX_MPROTECT)
43142 + pax_flags |= MF_PAX_MPROTECT;
43143 +#endif
43144 +
43145 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43146 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
43147 + pax_flags |= MF_PAX_RANDMMAP;
43148 +#endif
43149 +
43150 + return pax_flags;
43151 +}
43152 +#endif
43153 +
43154 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
43155 +{
43156 + unsigned long pax_flags = 0UL;
43157 +
43158 +#ifdef CONFIG_PAX_PAGEEXEC
43159 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
43160 + pax_flags |= MF_PAX_PAGEEXEC;
43161 +#endif
43162 +
43163 +#ifdef CONFIG_PAX_SEGMEXEC
43164 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
43165 + pax_flags |= MF_PAX_SEGMEXEC;
43166 +#endif
43167 +
43168 +#ifdef CONFIG_PAX_EMUTRAMP
43169 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
43170 + pax_flags |= MF_PAX_EMUTRAMP;
43171 +#endif
43172 +
43173 +#ifdef CONFIG_PAX_MPROTECT
43174 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
43175 + pax_flags |= MF_PAX_MPROTECT;
43176 +#endif
43177 +
43178 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43179 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
43180 + pax_flags |= MF_PAX_RANDMMAP;
43181 +#endif
43182 +
43183 + return pax_flags;
43184 +}
43185 +#endif
43186 +
43187 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43188 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
43189 +{
43190 + unsigned long pax_flags = 0UL;
43191 +
43192 +#ifdef CONFIG_PAX_EI_PAX
43193 +
43194 +#ifdef CONFIG_PAX_PAGEEXEC
43195 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
43196 + pax_flags |= MF_PAX_PAGEEXEC;
43197 +#endif
43198 +
43199 +#ifdef CONFIG_PAX_SEGMEXEC
43200 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
43201 + pax_flags |= MF_PAX_SEGMEXEC;
43202 +#endif
43203 +
43204 +#ifdef CONFIG_PAX_EMUTRAMP
43205 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
43206 + pax_flags |= MF_PAX_EMUTRAMP;
43207 +#endif
43208 +
43209 +#ifdef CONFIG_PAX_MPROTECT
43210 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
43211 + pax_flags |= MF_PAX_MPROTECT;
43212 +#endif
43213 +
43214 +#ifdef CONFIG_PAX_ASLR
43215 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
43216 + pax_flags |= MF_PAX_RANDMMAP;
43217 +#endif
43218 +
43219 +#else
43220 +
43221 +#ifdef CONFIG_PAX_PAGEEXEC
43222 + pax_flags |= MF_PAX_PAGEEXEC;
43223 +#endif
43224 +
43225 +#ifdef CONFIG_PAX_SEGMEXEC
43226 + pax_flags |= MF_PAX_SEGMEXEC;
43227 +#endif
43228 +
43229 +#ifdef CONFIG_PAX_MPROTECT
43230 + pax_flags |= MF_PAX_MPROTECT;
43231 +#endif
43232 +
43233 +#ifdef CONFIG_PAX_RANDMMAP
43234 + if (randomize_va_space)
43235 + pax_flags |= MF_PAX_RANDMMAP;
43236 +#endif
43237 +
43238 +#endif
43239 +
43240 + return pax_flags;
43241 +}
43242 +
43243 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
43244 +{
43245 +
43246 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
43247 + unsigned long i;
43248 +
43249 + for (i = 0UL; i < elf_ex->e_phnum; i++)
43250 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
43251 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
43252 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
43253 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
43254 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
43255 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
43256 + return ~0UL;
43257 +
43258 +#ifdef CONFIG_PAX_SOFTMODE
43259 + if (pax_softmode)
43260 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
43261 + else
43262 +#endif
43263 +
43264 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
43265 + break;
43266 + }
43267 +#endif
43268 +
43269 + return ~0UL;
43270 +}
43271 +
43272 +static unsigned long pax_parse_xattr_pax(struct file * const file)
43273 +{
43274 +
43275 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
43276 + ssize_t xattr_size, i;
43277 + unsigned char xattr_value[5];
43278 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
43279 +
43280 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
43281 + if (xattr_size <= 0)
43282 + return ~0UL;
43283 +
43284 + for (i = 0; i < xattr_size; i++)
43285 + switch (xattr_value[i]) {
43286 + default:
43287 + return ~0UL;
43288 +
43289 +#define parse_flag(option1, option2, flag) \
43290 + case option1: \
43291 + pax_flags_hardmode |= MF_PAX_##flag; \
43292 + break; \
43293 + case option2: \
43294 + pax_flags_softmode |= MF_PAX_##flag; \
43295 + break;
43296 +
43297 + parse_flag('p', 'P', PAGEEXEC);
43298 + parse_flag('e', 'E', EMUTRAMP);
43299 + parse_flag('m', 'M', MPROTECT);
43300 + parse_flag('r', 'R', RANDMMAP);
43301 + parse_flag('s', 'S', SEGMEXEC);
43302 +
43303 +#undef parse_flag
43304 + }
43305 +
43306 + if (pax_flags_hardmode & pax_flags_softmode)
43307 + return ~0UL;
43308 +
43309 +#ifdef CONFIG_PAX_SOFTMODE
43310 + if (pax_softmode)
43311 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
43312 + else
43313 +#endif
43314 +
43315 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
43316 +#else
43317 + return ~0UL;
43318 +#endif
43319 +
43320 +}
43321 +
43322 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
43323 +{
43324 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
43325 +
43326 + pax_flags = pax_parse_ei_pax(elf_ex);
43327 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
43328 + xattr_pax_flags = pax_parse_xattr_pax(file);
43329 +
43330 + if (pt_pax_flags == ~0UL)
43331 + pt_pax_flags = xattr_pax_flags;
43332 + else if (xattr_pax_flags == ~0UL)
43333 + xattr_pax_flags = pt_pax_flags;
43334 + if (pt_pax_flags != xattr_pax_flags)
43335 + return -EINVAL;
43336 + if (pt_pax_flags != ~0UL)
43337 + pax_flags = pt_pax_flags;
43338 +
43339 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43340 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43341 + if ((__supported_pte_mask & _PAGE_NX))
43342 + pax_flags &= ~MF_PAX_SEGMEXEC;
43343 + else
43344 + pax_flags &= ~MF_PAX_PAGEEXEC;
43345 + }
43346 +#endif
43347 +
43348 + if (0 > pax_check_flags(&pax_flags))
43349 + return -EINVAL;
43350 +
43351 + current->mm->pax_flags = pax_flags;
43352 + return 0;
43353 +}
43354 +#endif
43355 +
43356 /*
43357 * These are the functions used to load ELF style executables and shared
43358 * libraries. There is no binary dependent code anywhere else.
43359 @@ -539,6 +868,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
43360 {
43361 unsigned int random_variable = 0;
43362
43363 +#ifdef CONFIG_PAX_RANDUSTACK
43364 + if (randomize_va_space)
43365 + return stack_top - current->mm->delta_stack;
43366 +#endif
43367 +
43368 if ((current->flags & PF_RANDOMIZE) &&
43369 !(current->personality & ADDR_NO_RANDOMIZE)) {
43370 random_variable = get_random_int() & STACK_RND_MASK;
43371 @@ -557,7 +891,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43372 unsigned long load_addr = 0, load_bias = 0;
43373 int load_addr_set = 0;
43374 char * elf_interpreter = NULL;
43375 - unsigned long error;
43376 + unsigned long error = 0;
43377 struct elf_phdr *elf_ppnt, *elf_phdata;
43378 unsigned long elf_bss, elf_brk;
43379 int retval, i;
43380 @@ -567,11 +901,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43381 unsigned long start_code, end_code, start_data, end_data;
43382 unsigned long reloc_func_desc __maybe_unused = 0;
43383 int executable_stack = EXSTACK_DEFAULT;
43384 - unsigned long def_flags = 0;
43385 struct {
43386 struct elfhdr elf_ex;
43387 struct elfhdr interp_elf_ex;
43388 } *loc;
43389 + unsigned long pax_task_size = TASK_SIZE;
43390
43391 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
43392 if (!loc) {
43393 @@ -707,11 +1041,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43394 goto out_free_dentry;
43395
43396 /* OK, This is the point of no return */
43397 - current->mm->def_flags = def_flags;
43398 +
43399 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43400 + current->mm->pax_flags = 0UL;
43401 +#endif
43402 +
43403 +#ifdef CONFIG_PAX_DLRESOLVE
43404 + current->mm->call_dl_resolve = 0UL;
43405 +#endif
43406 +
43407 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
43408 + current->mm->call_syscall = 0UL;
43409 +#endif
43410 +
43411 +#ifdef CONFIG_PAX_ASLR
43412 + current->mm->delta_mmap = 0UL;
43413 + current->mm->delta_stack = 0UL;
43414 +#endif
43415 +
43416 + current->mm->def_flags = 0;
43417 +
43418 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43419 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
43420 + send_sig(SIGKILL, current, 0);
43421 + goto out_free_dentry;
43422 + }
43423 +#endif
43424 +
43425 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43426 + pax_set_initial_flags(bprm);
43427 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
43428 + if (pax_set_initial_flags_func)
43429 + (pax_set_initial_flags_func)(bprm);
43430 +#endif
43431 +
43432 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43433 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
43434 + current->mm->context.user_cs_limit = PAGE_SIZE;
43435 + current->mm->def_flags |= VM_PAGEEXEC;
43436 + }
43437 +#endif
43438 +
43439 +#ifdef CONFIG_PAX_SEGMEXEC
43440 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
43441 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
43442 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
43443 + pax_task_size = SEGMEXEC_TASK_SIZE;
43444 + current->mm->def_flags |= VM_NOHUGEPAGE;
43445 + }
43446 +#endif
43447 +
43448 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
43449 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43450 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
43451 + put_cpu();
43452 + }
43453 +#endif
43454
43455 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
43456 may depend on the personality. */
43457 SET_PERSONALITY(loc->elf_ex);
43458 +
43459 +#ifdef CONFIG_PAX_ASLR
43460 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43461 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
43462 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
43463 + }
43464 +#endif
43465 +
43466 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43467 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43468 + executable_stack = EXSTACK_DISABLE_X;
43469 + current->personality &= ~READ_IMPLIES_EXEC;
43470 + } else
43471 +#endif
43472 +
43473 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
43474 current->personality |= READ_IMPLIES_EXEC;
43475
43476 @@ -802,6 +1206,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43477 #else
43478 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
43479 #endif
43480 +
43481 +#ifdef CONFIG_PAX_RANDMMAP
43482 + /* PaX: randomize base address at the default exe base if requested */
43483 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
43484 +#ifdef CONFIG_SPARC64
43485 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
43486 +#else
43487 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
43488 +#endif
43489 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
43490 + elf_flags |= MAP_FIXED;
43491 + }
43492 +#endif
43493 +
43494 }
43495
43496 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
43497 @@ -834,9 +1252,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43498 * allowed task size. Note that p_filesz must always be
43499 * <= p_memsz so it is only necessary to check p_memsz.
43500 */
43501 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43502 - elf_ppnt->p_memsz > TASK_SIZE ||
43503 - TASK_SIZE - elf_ppnt->p_memsz < k) {
43504 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43505 + elf_ppnt->p_memsz > pax_task_size ||
43506 + pax_task_size - elf_ppnt->p_memsz < k) {
43507 /* set_brk can never work. Avoid overflows. */
43508 send_sig(SIGKILL, current, 0);
43509 retval = -EINVAL;
43510 @@ -875,11 +1293,41 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43511 goto out_free_dentry;
43512 }
43513 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
43514 - send_sig(SIGSEGV, current, 0);
43515 - retval = -EFAULT; /* Nobody gets to see this, but.. */
43516 - goto out_free_dentry;
43517 + /*
43518 + * This bss-zeroing can fail if the ELF
43519 + * file specifies odd protections. So
43520 + * we don't check the return value
43521 + */
43522 }
43523
43524 +#ifdef CONFIG_PAX_RANDMMAP
43525 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43526 + unsigned long start, size;
43527 +
43528 + start = ELF_PAGEALIGN(elf_brk);
43529 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
43530 + down_read(&current->mm->mmap_sem);
43531 + retval = -ENOMEM;
43532 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
43533 + unsigned long prot = PROT_NONE;
43534 +
43535 + up_read(&current->mm->mmap_sem);
43536 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
43537 +// if (current->personality & ADDR_NO_RANDOMIZE)
43538 +// prot = PROT_READ;
43539 + start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
43540 + retval = IS_ERR_VALUE(start) ? start : 0;
43541 + } else
43542 + up_read(&current->mm->mmap_sem);
43543 + if (retval == 0)
43544 + retval = set_brk(start + size, start + size + PAGE_SIZE);
43545 + if (retval < 0) {
43546 + send_sig(SIGKILL, current, 0);
43547 + goto out_free_dentry;
43548 + }
43549 + }
43550 +#endif
43551 +
43552 if (elf_interpreter) {
43553 unsigned long uninitialized_var(interp_map_addr);
43554
43555 @@ -1107,7 +1555,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
43556 * Decide what to dump of a segment, part, all or none.
43557 */
43558 static unsigned long vma_dump_size(struct vm_area_struct *vma,
43559 - unsigned long mm_flags)
43560 + unsigned long mm_flags, long signr)
43561 {
43562 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
43563
43564 @@ -1144,7 +1592,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
43565 if (vma->vm_file == NULL)
43566 return 0;
43567
43568 - if (FILTER(MAPPED_PRIVATE))
43569 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
43570 goto whole;
43571
43572 /*
43573 @@ -1366,9 +1814,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
43574 {
43575 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
43576 int i = 0;
43577 - do
43578 + do {
43579 i += 2;
43580 - while (auxv[i - 2] != AT_NULL);
43581 + } while (auxv[i - 2] != AT_NULL);
43582 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
43583 }
43584
43585 @@ -1890,14 +2338,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
43586 }
43587
43588 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
43589 - unsigned long mm_flags)
43590 + struct coredump_params *cprm)
43591 {
43592 struct vm_area_struct *vma;
43593 size_t size = 0;
43594
43595 for (vma = first_vma(current, gate_vma); vma != NULL;
43596 vma = next_vma(vma, gate_vma))
43597 - size += vma_dump_size(vma, mm_flags);
43598 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43599 return size;
43600 }
43601
43602 @@ -1991,7 +2439,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43603
43604 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
43605
43606 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
43607 + offset += elf_core_vma_data_size(gate_vma, cprm);
43608 offset += elf_core_extra_data_size();
43609 e_shoff = offset;
43610
43611 @@ -2005,10 +2453,12 @@ static int elf_core_dump(struct coredump_params *cprm)
43612 offset = dataoff;
43613
43614 size += sizeof(*elf);
43615 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43616 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
43617 goto end_coredump;
43618
43619 size += sizeof(*phdr4note);
43620 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43621 if (size > cprm->limit
43622 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
43623 goto end_coredump;
43624 @@ -2022,7 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43625 phdr.p_offset = offset;
43626 phdr.p_vaddr = vma->vm_start;
43627 phdr.p_paddr = 0;
43628 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
43629 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43630 phdr.p_memsz = vma->vm_end - vma->vm_start;
43631 offset += phdr.p_filesz;
43632 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
43633 @@ -2033,6 +2483,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43634 phdr.p_align = ELF_EXEC_PAGESIZE;
43635
43636 size += sizeof(phdr);
43637 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43638 if (size > cprm->limit
43639 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
43640 goto end_coredump;
43641 @@ -2057,7 +2508,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43642 unsigned long addr;
43643 unsigned long end;
43644
43645 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
43646 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43647
43648 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
43649 struct page *page;
43650 @@ -2066,6 +2517,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43651 page = get_dump_page(addr);
43652 if (page) {
43653 void *kaddr = kmap(page);
43654 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
43655 stop = ((size += PAGE_SIZE) > cprm->limit) ||
43656 !dump_write(cprm->file, kaddr,
43657 PAGE_SIZE);
43658 @@ -2083,6 +2535,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43659
43660 if (e_phnum == PN_XNUM) {
43661 size += sizeof(*shdr4extnum);
43662 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43663 if (size > cprm->limit
43664 || !dump_write(cprm->file, shdr4extnum,
43665 sizeof(*shdr4extnum)))
43666 @@ -2103,6 +2556,97 @@ out:
43667
43668 #endif /* CONFIG_ELF_CORE */
43669
43670 +#ifdef CONFIG_PAX_MPROTECT
43671 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
43672 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
43673 + * we'll remove VM_MAYWRITE for good on RELRO segments.
43674 + *
43675 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
43676 + * basis because we want to allow the common case and not the special ones.
43677 + */
43678 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
43679 +{
43680 + struct elfhdr elf_h;
43681 + struct elf_phdr elf_p;
43682 + unsigned long i;
43683 + unsigned long oldflags;
43684 + bool is_textrel_rw, is_textrel_rx, is_relro;
43685 +
43686 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
43687 + return;
43688 +
43689 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
43690 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
43691 +
43692 +#ifdef CONFIG_PAX_ELFRELOCS
43693 + /* possible TEXTREL */
43694 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
43695 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
43696 +#else
43697 + is_textrel_rw = false;
43698 + is_textrel_rx = false;
43699 +#endif
43700 +
43701 + /* possible RELRO */
43702 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
43703 +
43704 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
43705 + return;
43706 +
43707 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
43708 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
43709 +
43710 +#ifdef CONFIG_PAX_ETEXECRELOCS
43711 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43712 +#else
43713 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
43714 +#endif
43715 +
43716 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43717 + !elf_check_arch(&elf_h) ||
43718 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
43719 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
43720 + return;
43721 +
43722 + for (i = 0UL; i < elf_h.e_phnum; i++) {
43723 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
43724 + return;
43725 + switch (elf_p.p_type) {
43726 + case PT_DYNAMIC:
43727 + if (!is_textrel_rw && !is_textrel_rx)
43728 + continue;
43729 + i = 0UL;
43730 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
43731 + elf_dyn dyn;
43732 +
43733 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
43734 + return;
43735 + if (dyn.d_tag == DT_NULL)
43736 + return;
43737 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
43738 + gr_log_textrel(vma);
43739 + if (is_textrel_rw)
43740 + vma->vm_flags |= VM_MAYWRITE;
43741 + else
43742 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
43743 + vma->vm_flags &= ~VM_MAYWRITE;
43744 + return;
43745 + }
43746 + i++;
43747 + }
43748 + return;
43749 +
43750 + case PT_GNU_RELRO:
43751 + if (!is_relro)
43752 + continue;
43753 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
43754 + vma->vm_flags &= ~VM_MAYWRITE;
43755 + return;
43756 + }
43757 + }
43758 +}
43759 +#endif
43760 +
43761 static int __init init_elf_binfmt(void)
43762 {
43763 register_binfmt(&elf_format);
43764 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
43765 index 178cb70..8972997 100644
43766 --- a/fs/binfmt_flat.c
43767 +++ b/fs/binfmt_flat.c
43768 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
43769 realdatastart = (unsigned long) -ENOMEM;
43770 printk("Unable to allocate RAM for process data, errno %d\n",
43771 (int)-realdatastart);
43772 + down_write(&current->mm->mmap_sem);
43773 vm_munmap(textpos, text_len);
43774 + up_write(&current->mm->mmap_sem);
43775 ret = realdatastart;
43776 goto err;
43777 }
43778 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43779 }
43780 if (IS_ERR_VALUE(result)) {
43781 printk("Unable to read data+bss, errno %d\n", (int)-result);
43782 + down_write(&current->mm->mmap_sem);
43783 vm_munmap(textpos, text_len);
43784 vm_munmap(realdatastart, len);
43785 + up_write(&current->mm->mmap_sem);
43786 ret = result;
43787 goto err;
43788 }
43789 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43790 }
43791 if (IS_ERR_VALUE(result)) {
43792 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
43793 + down_write(&current->mm->mmap_sem);
43794 vm_munmap(textpos, text_len + data_len + extra +
43795 MAX_SHARED_LIBS * sizeof(unsigned long));
43796 + up_write(&current->mm->mmap_sem);
43797 ret = result;
43798 goto err;
43799 }
43800 diff --git a/fs/bio.c b/fs/bio.c
43801 index 73922ab..16642dd 100644
43802 --- a/fs/bio.c
43803 +++ b/fs/bio.c
43804 @@ -841,7 +841,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
43805 /*
43806 * Overflow, abort
43807 */
43808 - if (end < start)
43809 + if (end < start || end - start > INT_MAX - nr_pages)
43810 return ERR_PTR(-EINVAL);
43811
43812 nr_pages += end - start;
43813 @@ -975,7 +975,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
43814 /*
43815 * Overflow, abort
43816 */
43817 - if (end < start)
43818 + if (end < start || end - start > INT_MAX - nr_pages)
43819 return ERR_PTR(-EINVAL);
43820
43821 nr_pages += end - start;
43822 @@ -1237,7 +1237,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
43823 const int read = bio_data_dir(bio) == READ;
43824 struct bio_map_data *bmd = bio->bi_private;
43825 int i;
43826 - char *p = bmd->sgvecs[0].iov_base;
43827 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
43828
43829 __bio_for_each_segment(bvec, bio, i, 0) {
43830 char *addr = page_address(bvec->bv_page);
43831 diff --git a/fs/block_dev.c b/fs/block_dev.c
43832 index c2bbe1f..9dfbc23 100644
43833 --- a/fs/block_dev.c
43834 +++ b/fs/block_dev.c
43835 @@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
43836 else if (bdev->bd_contains == bdev)
43837 return true; /* is a whole device which isn't held */
43838
43839 - else if (whole->bd_holder == bd_may_claim)
43840 + else if (whole->bd_holder == (void *)bd_may_claim)
43841 return true; /* is a partition of a device that is being partitioned */
43842 else if (whole->bd_holder != NULL)
43843 return false; /* is a partition of a held device */
43844 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
43845 index da6e936..1598dd0 100644
43846 --- a/fs/btrfs/check-integrity.c
43847 +++ b/fs/btrfs/check-integrity.c
43848 @@ -155,7 +155,7 @@ struct btrfsic_block {
43849 union {
43850 bio_end_io_t *bio;
43851 bh_end_io_t *bh;
43852 - } orig_bio_bh_end_io;
43853 + } __no_const orig_bio_bh_end_io;
43854 int submit_bio_bh_rw;
43855 u64 flush_gen; /* only valid if !never_written */
43856 };
43857 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
43858 index 8206b39..06d5654 100644
43859 --- a/fs/btrfs/ctree.c
43860 +++ b/fs/btrfs/ctree.c
43861 @@ -973,9 +973,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
43862 free_extent_buffer(buf);
43863 add_root_to_dirty_list(root);
43864 } else {
43865 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43866 - parent_start = parent->start;
43867 - else
43868 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43869 + if (parent)
43870 + parent_start = parent->start;
43871 + else
43872 + parent_start = 0;
43873 + } else
43874 parent_start = 0;
43875
43876 WARN_ON(trans->transid != btrfs_header_generation(parent));
43877 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
43878 index a7d1921..a32dba2 100644
43879 --- a/fs/btrfs/inode.c
43880 +++ b/fs/btrfs/inode.c
43881 @@ -7111,7 +7111,7 @@ fail:
43882 return -ENOMEM;
43883 }
43884
43885 -static int btrfs_getattr(struct vfsmount *mnt,
43886 +int btrfs_getattr(struct vfsmount *mnt,
43887 struct dentry *dentry, struct kstat *stat)
43888 {
43889 struct inode *inode = dentry->d_inode;
43890 @@ -7125,6 +7125,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
43891 return 0;
43892 }
43893
43894 +EXPORT_SYMBOL(btrfs_getattr);
43895 +
43896 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
43897 +{
43898 + return BTRFS_I(inode)->root->anon_dev;
43899 +}
43900 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43901 +
43902 /*
43903 * If a file is moved, it will inherit the cow and compression flags of the new
43904 * directory.
43905 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43906 index 0e92e57..8b560de 100644
43907 --- a/fs/btrfs/ioctl.c
43908 +++ b/fs/btrfs/ioctl.c
43909 @@ -2902,9 +2902,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43910 for (i = 0; i < num_types; i++) {
43911 struct btrfs_space_info *tmp;
43912
43913 + /* Don't copy in more than we allocated */
43914 if (!slot_count)
43915 break;
43916
43917 + slot_count--;
43918 +
43919 info = NULL;
43920 rcu_read_lock();
43921 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43922 @@ -2926,10 +2929,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43923 memcpy(dest, &space, sizeof(space));
43924 dest++;
43925 space_args.total_spaces++;
43926 - slot_count--;
43927 }
43928 - if (!slot_count)
43929 - break;
43930 }
43931 up_read(&info->groups_sem);
43932 }
43933 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43934 index 646ee21..f020f87 100644
43935 --- a/fs/btrfs/relocation.c
43936 +++ b/fs/btrfs/relocation.c
43937 @@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43938 }
43939 spin_unlock(&rc->reloc_root_tree.lock);
43940
43941 - BUG_ON((struct btrfs_root *)node->data != root);
43942 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
43943
43944 if (!del) {
43945 spin_lock(&rc->reloc_root_tree.lock);
43946 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43947 index 622f469..e8d2d55 100644
43948 --- a/fs/cachefiles/bind.c
43949 +++ b/fs/cachefiles/bind.c
43950 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43951 args);
43952
43953 /* start by checking things over */
43954 - ASSERT(cache->fstop_percent >= 0 &&
43955 - cache->fstop_percent < cache->fcull_percent &&
43956 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
43957 cache->fcull_percent < cache->frun_percent &&
43958 cache->frun_percent < 100);
43959
43960 - ASSERT(cache->bstop_percent >= 0 &&
43961 - cache->bstop_percent < cache->bcull_percent &&
43962 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
43963 cache->bcull_percent < cache->brun_percent &&
43964 cache->brun_percent < 100);
43965
43966 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43967 index 0a1467b..6a53245 100644
43968 --- a/fs/cachefiles/daemon.c
43969 +++ b/fs/cachefiles/daemon.c
43970 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43971 if (n > buflen)
43972 return -EMSGSIZE;
43973
43974 - if (copy_to_user(_buffer, buffer, n) != 0)
43975 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43976 return -EFAULT;
43977
43978 return n;
43979 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43980 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43981 return -EIO;
43982
43983 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
43984 + if (datalen > PAGE_SIZE - 1)
43985 return -EOPNOTSUPP;
43986
43987 /* drag the command string into the kernel so we can parse it */
43988 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43989 if (args[0] != '%' || args[1] != '\0')
43990 return -EINVAL;
43991
43992 - if (fstop < 0 || fstop >= cache->fcull_percent)
43993 + if (fstop >= cache->fcull_percent)
43994 return cachefiles_daemon_range_error(cache, args);
43995
43996 cache->fstop_percent = fstop;
43997 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43998 if (args[0] != '%' || args[1] != '\0')
43999 return -EINVAL;
44000
44001 - if (bstop < 0 || bstop >= cache->bcull_percent)
44002 + if (bstop >= cache->bcull_percent)
44003 return cachefiles_daemon_range_error(cache, args);
44004
44005 cache->bstop_percent = bstop;
44006 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
44007 index bd6bc1b..b627b53 100644
44008 --- a/fs/cachefiles/internal.h
44009 +++ b/fs/cachefiles/internal.h
44010 @@ -57,7 +57,7 @@ struct cachefiles_cache {
44011 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
44012 struct rb_root active_nodes; /* active nodes (can't be culled) */
44013 rwlock_t active_lock; /* lock for active_nodes */
44014 - atomic_t gravecounter; /* graveyard uniquifier */
44015 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
44016 unsigned frun_percent; /* when to stop culling (% files) */
44017 unsigned fcull_percent; /* when to start culling (% files) */
44018 unsigned fstop_percent; /* when to stop allocating (% files) */
44019 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
44020 * proc.c
44021 */
44022 #ifdef CONFIG_CACHEFILES_HISTOGRAM
44023 -extern atomic_t cachefiles_lookup_histogram[HZ];
44024 -extern atomic_t cachefiles_mkdir_histogram[HZ];
44025 -extern atomic_t cachefiles_create_histogram[HZ];
44026 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
44027 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
44028 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
44029
44030 extern int __init cachefiles_proc_init(void);
44031 extern void cachefiles_proc_cleanup(void);
44032 static inline
44033 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
44034 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
44035 {
44036 unsigned long jif = jiffies - start_jif;
44037 if (jif >= HZ)
44038 jif = HZ - 1;
44039 - atomic_inc(&histogram[jif]);
44040 + atomic_inc_unchecked(&histogram[jif]);
44041 }
44042
44043 #else
44044 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
44045 index 7f0771d..87d4f36 100644
44046 --- a/fs/cachefiles/namei.c
44047 +++ b/fs/cachefiles/namei.c
44048 @@ -318,7 +318,7 @@ try_again:
44049 /* first step is to make up a grave dentry in the graveyard */
44050 sprintf(nbuffer, "%08x%08x",
44051 (uint32_t) get_seconds(),
44052 - (uint32_t) atomic_inc_return(&cache->gravecounter));
44053 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
44054
44055 /* do the multiway lock magic */
44056 trap = lock_rename(cache->graveyard, dir);
44057 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
44058 index eccd339..4c1d995 100644
44059 --- a/fs/cachefiles/proc.c
44060 +++ b/fs/cachefiles/proc.c
44061 @@ -14,9 +14,9 @@
44062 #include <linux/seq_file.h>
44063 #include "internal.h"
44064
44065 -atomic_t cachefiles_lookup_histogram[HZ];
44066 -atomic_t cachefiles_mkdir_histogram[HZ];
44067 -atomic_t cachefiles_create_histogram[HZ];
44068 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
44069 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
44070 +atomic_unchecked_t cachefiles_create_histogram[HZ];
44071
44072 /*
44073 * display the latency histogram
44074 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
44075 return 0;
44076 default:
44077 index = (unsigned long) v - 3;
44078 - x = atomic_read(&cachefiles_lookup_histogram[index]);
44079 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
44080 - z = atomic_read(&cachefiles_create_histogram[index]);
44081 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
44082 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
44083 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
44084 if (x == 0 && y == 0 && z == 0)
44085 return 0;
44086
44087 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
44088 index 0e3c092..818480e 100644
44089 --- a/fs/cachefiles/rdwr.c
44090 +++ b/fs/cachefiles/rdwr.c
44091 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
44092 old_fs = get_fs();
44093 set_fs(KERNEL_DS);
44094 ret = file->f_op->write(
44095 - file, (const void __user *) data, len, &pos);
44096 + file, (const void __force_user *) data, len, &pos);
44097 set_fs(old_fs);
44098 kunmap(page);
44099 if (ret != len)
44100 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
44101 index 3e8094b..cb3ff3d 100644
44102 --- a/fs/ceph/dir.c
44103 +++ b/fs/ceph/dir.c
44104 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
44105 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
44106 struct ceph_mds_client *mdsc = fsc->mdsc;
44107 unsigned frag = fpos_frag(filp->f_pos);
44108 - int off = fpos_off(filp->f_pos);
44109 + unsigned int off = fpos_off(filp->f_pos);
44110 int err;
44111 u32 ftype;
44112 struct ceph_mds_reply_info_parsed *rinfo;
44113 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
44114 if (nd &&
44115 (nd->flags & LOOKUP_OPEN) &&
44116 !(nd->intent.open.flags & O_CREAT)) {
44117 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
44118 + int mode = nd->intent.open.create_mode & ~current_umask();
44119 return ceph_lookup_open(dir, dentry, nd, mode, 1);
44120 }
44121
44122 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
44123 index e814052..28dcdf7 100644
44124 --- a/fs/cifs/cifs_debug.c
44125 +++ b/fs/cifs/cifs_debug.c
44126 @@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
44127
44128 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
44129 #ifdef CONFIG_CIFS_STATS2
44130 - atomic_set(&totBufAllocCount, 0);
44131 - atomic_set(&totSmBufAllocCount, 0);
44132 + atomic_set_unchecked(&totBufAllocCount, 0);
44133 + atomic_set_unchecked(&totSmBufAllocCount, 0);
44134 #endif /* CONFIG_CIFS_STATS2 */
44135 spin_lock(&cifs_tcp_ses_lock);
44136 list_for_each(tmp1, &cifs_tcp_ses_list) {
44137 @@ -281,25 +281,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
44138 tcon = list_entry(tmp3,
44139 struct cifs_tcon,
44140 tcon_list);
44141 - atomic_set(&tcon->num_smbs_sent, 0);
44142 - atomic_set(&tcon->num_writes, 0);
44143 - atomic_set(&tcon->num_reads, 0);
44144 - atomic_set(&tcon->num_oplock_brks, 0);
44145 - atomic_set(&tcon->num_opens, 0);
44146 - atomic_set(&tcon->num_posixopens, 0);
44147 - atomic_set(&tcon->num_posixmkdirs, 0);
44148 - atomic_set(&tcon->num_closes, 0);
44149 - atomic_set(&tcon->num_deletes, 0);
44150 - atomic_set(&tcon->num_mkdirs, 0);
44151 - atomic_set(&tcon->num_rmdirs, 0);
44152 - atomic_set(&tcon->num_renames, 0);
44153 - atomic_set(&tcon->num_t2renames, 0);
44154 - atomic_set(&tcon->num_ffirst, 0);
44155 - atomic_set(&tcon->num_fnext, 0);
44156 - atomic_set(&tcon->num_fclose, 0);
44157 - atomic_set(&tcon->num_hardlinks, 0);
44158 - atomic_set(&tcon->num_symlinks, 0);
44159 - atomic_set(&tcon->num_locks, 0);
44160 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
44161 + atomic_set_unchecked(&tcon->num_writes, 0);
44162 + atomic_set_unchecked(&tcon->num_reads, 0);
44163 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
44164 + atomic_set_unchecked(&tcon->num_opens, 0);
44165 + atomic_set_unchecked(&tcon->num_posixopens, 0);
44166 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
44167 + atomic_set_unchecked(&tcon->num_closes, 0);
44168 + atomic_set_unchecked(&tcon->num_deletes, 0);
44169 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
44170 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
44171 + atomic_set_unchecked(&tcon->num_renames, 0);
44172 + atomic_set_unchecked(&tcon->num_t2renames, 0);
44173 + atomic_set_unchecked(&tcon->num_ffirst, 0);
44174 + atomic_set_unchecked(&tcon->num_fnext, 0);
44175 + atomic_set_unchecked(&tcon->num_fclose, 0);
44176 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
44177 + atomic_set_unchecked(&tcon->num_symlinks, 0);
44178 + atomic_set_unchecked(&tcon->num_locks, 0);
44179 }
44180 }
44181 }
44182 @@ -329,8 +329,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
44183 smBufAllocCount.counter, cifs_min_small);
44184 #ifdef CONFIG_CIFS_STATS2
44185 seq_printf(m, "Total Large %d Small %d Allocations\n",
44186 - atomic_read(&totBufAllocCount),
44187 - atomic_read(&totSmBufAllocCount));
44188 + atomic_read_unchecked(&totBufAllocCount),
44189 + atomic_read_unchecked(&totSmBufAllocCount));
44190 #endif /* CONFIG_CIFS_STATS2 */
44191
44192 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
44193 @@ -359,41 +359,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
44194 if (tcon->need_reconnect)
44195 seq_puts(m, "\tDISCONNECTED ");
44196 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
44197 - atomic_read(&tcon->num_smbs_sent),
44198 - atomic_read(&tcon->num_oplock_brks));
44199 + atomic_read_unchecked(&tcon->num_smbs_sent),
44200 + atomic_read_unchecked(&tcon->num_oplock_brks));
44201 seq_printf(m, "\nReads: %d Bytes: %lld",
44202 - atomic_read(&tcon->num_reads),
44203 + atomic_read_unchecked(&tcon->num_reads),
44204 (long long)(tcon->bytes_read));
44205 seq_printf(m, "\nWrites: %d Bytes: %lld",
44206 - atomic_read(&tcon->num_writes),
44207 + atomic_read_unchecked(&tcon->num_writes),
44208 (long long)(tcon->bytes_written));
44209 seq_printf(m, "\nFlushes: %d",
44210 - atomic_read(&tcon->num_flushes));
44211 + atomic_read_unchecked(&tcon->num_flushes));
44212 seq_printf(m, "\nLocks: %d HardLinks: %d "
44213 "Symlinks: %d",
44214 - atomic_read(&tcon->num_locks),
44215 - atomic_read(&tcon->num_hardlinks),
44216 - atomic_read(&tcon->num_symlinks));
44217 + atomic_read_unchecked(&tcon->num_locks),
44218 + atomic_read_unchecked(&tcon->num_hardlinks),
44219 + atomic_read_unchecked(&tcon->num_symlinks));
44220 seq_printf(m, "\nOpens: %d Closes: %d "
44221 "Deletes: %d",
44222 - atomic_read(&tcon->num_opens),
44223 - atomic_read(&tcon->num_closes),
44224 - atomic_read(&tcon->num_deletes));
44225 + atomic_read_unchecked(&tcon->num_opens),
44226 + atomic_read_unchecked(&tcon->num_closes),
44227 + atomic_read_unchecked(&tcon->num_deletes));
44228 seq_printf(m, "\nPosix Opens: %d "
44229 "Posix Mkdirs: %d",
44230 - atomic_read(&tcon->num_posixopens),
44231 - atomic_read(&tcon->num_posixmkdirs));
44232 + atomic_read_unchecked(&tcon->num_posixopens),
44233 + atomic_read_unchecked(&tcon->num_posixmkdirs));
44234 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
44235 - atomic_read(&tcon->num_mkdirs),
44236 - atomic_read(&tcon->num_rmdirs));
44237 + atomic_read_unchecked(&tcon->num_mkdirs),
44238 + atomic_read_unchecked(&tcon->num_rmdirs));
44239 seq_printf(m, "\nRenames: %d T2 Renames %d",
44240 - atomic_read(&tcon->num_renames),
44241 - atomic_read(&tcon->num_t2renames));
44242 + atomic_read_unchecked(&tcon->num_renames),
44243 + atomic_read_unchecked(&tcon->num_t2renames));
44244 seq_printf(m, "\nFindFirst: %d FNext %d "
44245 "FClose %d",
44246 - atomic_read(&tcon->num_ffirst),
44247 - atomic_read(&tcon->num_fnext),
44248 - atomic_read(&tcon->num_fclose));
44249 + atomic_read_unchecked(&tcon->num_ffirst),
44250 + atomic_read_unchecked(&tcon->num_fnext),
44251 + atomic_read_unchecked(&tcon->num_fclose));
44252 }
44253 }
44254 }
44255 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
44256 index 8b6e344..303a662 100644
44257 --- a/fs/cifs/cifsfs.c
44258 +++ b/fs/cifs/cifsfs.c
44259 @@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
44260 cifs_req_cachep = kmem_cache_create("cifs_request",
44261 CIFSMaxBufSize +
44262 MAX_CIFS_HDR_SIZE, 0,
44263 - SLAB_HWCACHE_ALIGN, NULL);
44264 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
44265 if (cifs_req_cachep == NULL)
44266 return -ENOMEM;
44267
44268 @@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
44269 efficient to alloc 1 per page off the slab compared to 17K (5page)
44270 alloc of large cifs buffers even when page debugging is on */
44271 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
44272 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
44273 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
44274 NULL);
44275 if (cifs_sm_req_cachep == NULL) {
44276 mempool_destroy(cifs_req_poolp);
44277 @@ -1106,8 +1106,8 @@ init_cifs(void)
44278 atomic_set(&bufAllocCount, 0);
44279 atomic_set(&smBufAllocCount, 0);
44280 #ifdef CONFIG_CIFS_STATS2
44281 - atomic_set(&totBufAllocCount, 0);
44282 - atomic_set(&totSmBufAllocCount, 0);
44283 + atomic_set_unchecked(&totBufAllocCount, 0);
44284 + atomic_set_unchecked(&totSmBufAllocCount, 0);
44285 #endif /* CONFIG_CIFS_STATS2 */
44286
44287 atomic_set(&midCount, 0);
44288 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
44289 index d86ba9f..e80049d 100644
44290 --- a/fs/cifs/cifsglob.h
44291 +++ b/fs/cifs/cifsglob.h
44292 @@ -491,28 +491,28 @@ struct cifs_tcon {
44293 __u16 Flags; /* optional support bits */
44294 enum statusEnum tidStatus;
44295 #ifdef CONFIG_CIFS_STATS
44296 - atomic_t num_smbs_sent;
44297 - atomic_t num_writes;
44298 - atomic_t num_reads;
44299 - atomic_t num_flushes;
44300 - atomic_t num_oplock_brks;
44301 - atomic_t num_opens;
44302 - atomic_t num_closes;
44303 - atomic_t num_deletes;
44304 - atomic_t num_mkdirs;
44305 - atomic_t num_posixopens;
44306 - atomic_t num_posixmkdirs;
44307 - atomic_t num_rmdirs;
44308 - atomic_t num_renames;
44309 - atomic_t num_t2renames;
44310 - atomic_t num_ffirst;
44311 - atomic_t num_fnext;
44312 - atomic_t num_fclose;
44313 - atomic_t num_hardlinks;
44314 - atomic_t num_symlinks;
44315 - atomic_t num_locks;
44316 - atomic_t num_acl_get;
44317 - atomic_t num_acl_set;
44318 + atomic_unchecked_t num_smbs_sent;
44319 + atomic_unchecked_t num_writes;
44320 + atomic_unchecked_t num_reads;
44321 + atomic_unchecked_t num_flushes;
44322 + atomic_unchecked_t num_oplock_brks;
44323 + atomic_unchecked_t num_opens;
44324 + atomic_unchecked_t num_closes;
44325 + atomic_unchecked_t num_deletes;
44326 + atomic_unchecked_t num_mkdirs;
44327 + atomic_unchecked_t num_posixopens;
44328 + atomic_unchecked_t num_posixmkdirs;
44329 + atomic_unchecked_t num_rmdirs;
44330 + atomic_unchecked_t num_renames;
44331 + atomic_unchecked_t num_t2renames;
44332 + atomic_unchecked_t num_ffirst;
44333 + atomic_unchecked_t num_fnext;
44334 + atomic_unchecked_t num_fclose;
44335 + atomic_unchecked_t num_hardlinks;
44336 + atomic_unchecked_t num_symlinks;
44337 + atomic_unchecked_t num_locks;
44338 + atomic_unchecked_t num_acl_get;
44339 + atomic_unchecked_t num_acl_set;
44340 #ifdef CONFIG_CIFS_STATS2
44341 unsigned long long time_writes;
44342 unsigned long long time_reads;
44343 @@ -735,7 +735,7 @@ convert_delimiter(char *path, char delim)
44344 }
44345
44346 #ifdef CONFIG_CIFS_STATS
44347 -#define cifs_stats_inc atomic_inc
44348 +#define cifs_stats_inc atomic_inc_unchecked
44349
44350 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
44351 unsigned int bytes)
44352 @@ -1093,8 +1093,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
44353 /* Various Debug counters */
44354 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
44355 #ifdef CONFIG_CIFS_STATS2
44356 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
44357 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
44358 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
44359 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
44360 #endif
44361 GLOBAL_EXTERN atomic_t smBufAllocCount;
44362 GLOBAL_EXTERN atomic_t midCount;
44363 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
44364 index 6b0e064..94e6c3c 100644
44365 --- a/fs/cifs/link.c
44366 +++ b/fs/cifs/link.c
44367 @@ -600,7 +600,7 @@ symlink_exit:
44368
44369 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
44370 {
44371 - char *p = nd_get_link(nd);
44372 + const char *p = nd_get_link(nd);
44373 if (!IS_ERR(p))
44374 kfree(p);
44375 }
44376 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
44377 index 557506a..2fd3816 100644
44378 --- a/fs/cifs/misc.c
44379 +++ b/fs/cifs/misc.c
44380 @@ -156,7 +156,7 @@ cifs_buf_get(void)
44381 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
44382 atomic_inc(&bufAllocCount);
44383 #ifdef CONFIG_CIFS_STATS2
44384 - atomic_inc(&totBufAllocCount);
44385 + atomic_inc_unchecked(&totBufAllocCount);
44386 #endif /* CONFIG_CIFS_STATS2 */
44387 }
44388
44389 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
44390 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
44391 atomic_inc(&smBufAllocCount);
44392 #ifdef CONFIG_CIFS_STATS2
44393 - atomic_inc(&totSmBufAllocCount);
44394 + atomic_inc_unchecked(&totSmBufAllocCount);
44395 #endif /* CONFIG_CIFS_STATS2 */
44396
44397 }
44398 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
44399 index 6901578..d402eb5 100644
44400 --- a/fs/coda/cache.c
44401 +++ b/fs/coda/cache.c
44402 @@ -24,7 +24,7 @@
44403 #include "coda_linux.h"
44404 #include "coda_cache.h"
44405
44406 -static atomic_t permission_epoch = ATOMIC_INIT(0);
44407 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
44408
44409 /* replace or extend an acl cache hit */
44410 void coda_cache_enter(struct inode *inode, int mask)
44411 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
44412 struct coda_inode_info *cii = ITOC(inode);
44413
44414 spin_lock(&cii->c_lock);
44415 - cii->c_cached_epoch = atomic_read(&permission_epoch);
44416 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
44417 if (cii->c_uid != current_fsuid()) {
44418 cii->c_uid = current_fsuid();
44419 cii->c_cached_perm = mask;
44420 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
44421 {
44422 struct coda_inode_info *cii = ITOC(inode);
44423 spin_lock(&cii->c_lock);
44424 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
44425 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
44426 spin_unlock(&cii->c_lock);
44427 }
44428
44429 /* remove all acl caches */
44430 void coda_cache_clear_all(struct super_block *sb)
44431 {
44432 - atomic_inc(&permission_epoch);
44433 + atomic_inc_unchecked(&permission_epoch);
44434 }
44435
44436
44437 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
44438 spin_lock(&cii->c_lock);
44439 hit = (mask & cii->c_cached_perm) == mask &&
44440 cii->c_uid == current_fsuid() &&
44441 - cii->c_cached_epoch == atomic_read(&permission_epoch);
44442 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
44443 spin_unlock(&cii->c_lock);
44444
44445 return hit;
44446 diff --git a/fs/compat.c b/fs/compat.c
44447 index 1bdb350..9f28287 100644
44448 --- a/fs/compat.c
44449 +++ b/fs/compat.c
44450 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
44451
44452 set_fs(KERNEL_DS);
44453 /* The __user pointer cast is valid because of the set_fs() */
44454 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
44455 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
44456 set_fs(oldfs);
44457 /* truncating is ok because it's a user address */
44458 if (!ret)
44459 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
44460 goto out;
44461
44462 ret = -EINVAL;
44463 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
44464 + if (nr_segs > UIO_MAXIOV)
44465 goto out;
44466 if (nr_segs > fast_segs) {
44467 ret = -ENOMEM;
44468 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
44469
44470 struct compat_readdir_callback {
44471 struct compat_old_linux_dirent __user *dirent;
44472 + struct file * file;
44473 int result;
44474 };
44475
44476 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
44477 buf->result = -EOVERFLOW;
44478 return -EOVERFLOW;
44479 }
44480 +
44481 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44482 + return 0;
44483 +
44484 buf->result++;
44485 dirent = buf->dirent;
44486 if (!access_ok(VERIFY_WRITE, dirent,
44487 @@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
44488
44489 buf.result = 0;
44490 buf.dirent = dirent;
44491 + buf.file = file;
44492
44493 error = vfs_readdir(file, compat_fillonedir, &buf);
44494 if (buf.result)
44495 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
44496 struct compat_getdents_callback {
44497 struct compat_linux_dirent __user *current_dir;
44498 struct compat_linux_dirent __user *previous;
44499 + struct file * file;
44500 int count;
44501 int error;
44502 };
44503 @@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
44504 buf->error = -EOVERFLOW;
44505 return -EOVERFLOW;
44506 }
44507 +
44508 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44509 + return 0;
44510 +
44511 dirent = buf->previous;
44512 if (dirent) {
44513 if (__put_user(offset, &dirent->d_off))
44514 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
44515 buf.previous = NULL;
44516 buf.count = count;
44517 buf.error = 0;
44518 + buf.file = file;
44519
44520 error = vfs_readdir(file, compat_filldir, &buf);
44521 if (error >= 0)
44522 @@ -986,6 +998,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
44523 struct compat_getdents_callback64 {
44524 struct linux_dirent64 __user *current_dir;
44525 struct linux_dirent64 __user *previous;
44526 + struct file * file;
44527 int count;
44528 int error;
44529 };
44530 @@ -1002,6 +1015,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
44531 buf->error = -EINVAL; /* only used if we fail.. */
44532 if (reclen > buf->count)
44533 return -EINVAL;
44534 +
44535 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44536 + return 0;
44537 +
44538 dirent = buf->previous;
44539
44540 if (dirent) {
44541 @@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
44542 buf.previous = NULL;
44543 buf.count = count;
44544 buf.error = 0;
44545 + buf.file = file;
44546
44547 error = vfs_readdir(file, compat_filldir64, &buf);
44548 if (error >= 0)
44549 error = buf.error;
44550 lastdirent = buf.previous;
44551 if (lastdirent) {
44552 - typeof(lastdirent->d_off) d_off = file->f_pos;
44553 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44554 if (__put_user_unaligned(d_off, &lastdirent->d_off))
44555 error = -EFAULT;
44556 else
44557 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
44558 index 112e45a..b59845b 100644
44559 --- a/fs/compat_binfmt_elf.c
44560 +++ b/fs/compat_binfmt_elf.c
44561 @@ -30,11 +30,13 @@
44562 #undef elf_phdr
44563 #undef elf_shdr
44564 #undef elf_note
44565 +#undef elf_dyn
44566 #undef elf_addr_t
44567 #define elfhdr elf32_hdr
44568 #define elf_phdr elf32_phdr
44569 #define elf_shdr elf32_shdr
44570 #define elf_note elf32_note
44571 +#define elf_dyn Elf32_Dyn
44572 #define elf_addr_t Elf32_Addr
44573
44574 /*
44575 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
44576 index debdfe0..75d31d4 100644
44577 --- a/fs/compat_ioctl.c
44578 +++ b/fs/compat_ioctl.c
44579 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
44580
44581 err = get_user(palp, &up->palette);
44582 err |= get_user(length, &up->length);
44583 + if (err)
44584 + return -EFAULT;
44585
44586 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
44587 err = put_user(compat_ptr(palp), &up_native->palette);
44588 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
44589 return -EFAULT;
44590 if (__get_user(udata, &ss32->iomem_base))
44591 return -EFAULT;
44592 - ss.iomem_base = compat_ptr(udata);
44593 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
44594 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
44595 __get_user(ss.port_high, &ss32->port_high))
44596 return -EFAULT;
44597 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
44598 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
44599 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
44600 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
44601 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44602 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44603 return -EFAULT;
44604
44605 return ioctl_preallocate(file, p);
44606 @@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
44607 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
44608 {
44609 unsigned int a, b;
44610 - a = *(unsigned int *)p;
44611 - b = *(unsigned int *)q;
44612 + a = *(const unsigned int *)p;
44613 + b = *(const unsigned int *)q;
44614 if (a > b)
44615 return 1;
44616 if (a < b)
44617 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
44618 index 7e6c52d..94bc756 100644
44619 --- a/fs/configfs/dir.c
44620 +++ b/fs/configfs/dir.c
44621 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44622 }
44623 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
44624 struct configfs_dirent *next;
44625 - const char * name;
44626 + const unsigned char * name;
44627 + char d_name[sizeof(next->s_dentry->d_iname)];
44628 int len;
44629 struct inode *inode = NULL;
44630
44631 @@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44632 continue;
44633
44634 name = configfs_get_name(next);
44635 - len = strlen(name);
44636 + if (next->s_dentry && name == next->s_dentry->d_iname) {
44637 + len = next->s_dentry->d_name.len;
44638 + memcpy(d_name, name, len);
44639 + name = d_name;
44640 + } else
44641 + len = strlen(name);
44642
44643 /*
44644 * We'll have a dentry and an inode for
44645 diff --git a/fs/dcache.c b/fs/dcache.c
44646 index 4123b92..5381f16 100644
44647 --- a/fs/dcache.c
44648 +++ b/fs/dcache.c
44649 @@ -1132,6 +1132,8 @@ positive:
44650 return 1;
44651
44652 rename_retry:
44653 + if (locked)
44654 + goto again;
44655 locked = 1;
44656 write_seqlock(&rename_lock);
44657 goto again;
44658 @@ -1234,6 +1236,8 @@ out:
44659 rename_retry:
44660 if (found)
44661 return found;
44662 + if (locked)
44663 + goto again;
44664 locked = 1;
44665 write_seqlock(&rename_lock);
44666 goto again;
44667 @@ -3031,6 +3035,8 @@ resume:
44668 return;
44669
44670 rename_retry:
44671 + if (locked)
44672 + goto again;
44673 locked = 1;
44674 write_seqlock(&rename_lock);
44675 goto again;
44676 @@ -3154,7 +3160,7 @@ void __init vfs_caches_init(unsigned long mempages)
44677 mempages -= reserve;
44678
44679 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
44680 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
44681 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
44682
44683 dcache_init();
44684 inode_init();
44685 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
44686 index b80bc84..0d46d1a 100644
44687 --- a/fs/debugfs/inode.c
44688 +++ b/fs/debugfs/inode.c
44689 @@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
44690 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
44691 {
44692 return debugfs_create_file(name,
44693 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44694 + S_IFDIR | S_IRWXU,
44695 +#else
44696 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44697 +#endif
44698 parent, NULL, NULL);
44699 }
44700 EXPORT_SYMBOL_GPL(debugfs_create_dir);
44701 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
44702 index 02e2fec..5c47fa2 100644
44703 --- a/fs/ecryptfs/inode.c
44704 +++ b/fs/ecryptfs/inode.c
44705 @@ -676,7 +676,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
44706 old_fs = get_fs();
44707 set_fs(get_ds());
44708 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
44709 - (char __user *)lower_buf,
44710 + (char __force_user *)lower_buf,
44711 PATH_MAX);
44712 set_fs(old_fs);
44713 if (rc < 0)
44714 @@ -708,7 +708,7 @@ out:
44715 static void
44716 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
44717 {
44718 - char *buf = nd_get_link(nd);
44719 + const char *buf = nd_get_link(nd);
44720 if (!IS_ERR(buf)) {
44721 /* Free the char* */
44722 kfree(buf);
44723 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
44724 index c0038f6..47ab347 100644
44725 --- a/fs/ecryptfs/miscdev.c
44726 +++ b/fs/ecryptfs/miscdev.c
44727 @@ -355,7 +355,7 @@ check_list:
44728 goto out_unlock_msg_ctx;
44729 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
44730 if (msg_ctx->msg) {
44731 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
44732 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
44733 goto out_unlock_msg_ctx;
44734 i += packet_length_size;
44735 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
44736 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
44737 index b2a34a1..162fa69 100644
44738 --- a/fs/ecryptfs/read_write.c
44739 +++ b/fs/ecryptfs/read_write.c
44740 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
44741 return -EIO;
44742 fs_save = get_fs();
44743 set_fs(get_ds());
44744 - rc = vfs_write(lower_file, data, size, &offset);
44745 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
44746 set_fs(fs_save);
44747 mark_inode_dirty_sync(ecryptfs_inode);
44748 return rc;
44749 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
44750 return -EIO;
44751 fs_save = get_fs();
44752 set_fs(get_ds());
44753 - rc = vfs_read(lower_file, data, size, &offset);
44754 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
44755 set_fs(fs_save);
44756 return rc;
44757 }
44758 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
44759 index 1c8b556..eedec84 100644
44760 --- a/fs/eventpoll.c
44761 +++ b/fs/eventpoll.c
44762 @@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
44763 error = PTR_ERR(file);
44764 goto out_free_fd;
44765 }
44766 - fd_install(fd, file);
44767 ep->file = file;
44768 + fd_install(fd, file);
44769 return fd;
44770
44771 out_free_fd:
44772 diff --git a/fs/exec.c b/fs/exec.c
44773 index e95aeed..a943469 100644
44774 --- a/fs/exec.c
44775 +++ b/fs/exec.c
44776 @@ -55,6 +55,15 @@
44777 #include <linux/pipe_fs_i.h>
44778 #include <linux/oom.h>
44779 #include <linux/compat.h>
44780 +#include <linux/random.h>
44781 +#include <linux/seq_file.h>
44782 +
44783 +#ifdef CONFIG_PAX_REFCOUNT
44784 +#include <linux/kallsyms.h>
44785 +#include <linux/kdebug.h>
44786 +#endif
44787 +
44788 +#include <trace/events/fs.h>
44789
44790 #include <asm/uaccess.h>
44791 #include <asm/mmu_context.h>
44792 @@ -66,6 +75,18 @@
44793
44794 #include <trace/events/sched.h>
44795
44796 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
44797 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
44798 +{
44799 + WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
44800 +}
44801 +#endif
44802 +
44803 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
44804 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
44805 +EXPORT_SYMBOL(pax_set_initial_flags_func);
44806 +#endif
44807 +
44808 int core_uses_pid;
44809 char core_pattern[CORENAME_MAX_SIZE] = "core";
44810 unsigned int core_pipe_limit;
44811 @@ -75,7 +96,7 @@ struct core_name {
44812 char *corename;
44813 int used, size;
44814 };
44815 -static atomic_t call_count = ATOMIC_INIT(1);
44816 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
44817
44818 /* The maximal length of core_pattern is also specified in sysctl.c */
44819
44820 @@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44821 int write)
44822 {
44823 struct page *page;
44824 - int ret;
44825
44826 -#ifdef CONFIG_STACK_GROWSUP
44827 - if (write) {
44828 - ret = expand_downwards(bprm->vma, pos);
44829 - if (ret < 0)
44830 - return NULL;
44831 - }
44832 -#endif
44833 - ret = get_user_pages(current, bprm->mm, pos,
44834 - 1, write, 1, &page, NULL);
44835 - if (ret <= 0)
44836 + if (0 > expand_downwards(bprm->vma, pos))
44837 + return NULL;
44838 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44839 return NULL;
44840
44841 if (write) {
44842 @@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44843 if (size <= ARG_MAX)
44844 return page;
44845
44846 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44847 + // only allow 512KB for argv+env on suid/sgid binaries
44848 + // to prevent easy ASLR exhaustion
44849 + if (((bprm->cred->euid != current_euid()) ||
44850 + (bprm->cred->egid != current_egid())) &&
44851 + (size > (512 * 1024))) {
44852 + put_page(page);
44853 + return NULL;
44854 + }
44855 +#endif
44856 +
44857 /*
44858 * Limit to 1/4-th the stack size for the argv+env strings.
44859 * This ensures that:
44860 @@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44861 vma->vm_end = STACK_TOP_MAX;
44862 vma->vm_start = vma->vm_end - PAGE_SIZE;
44863 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
44864 +
44865 +#ifdef CONFIG_PAX_SEGMEXEC
44866 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44867 +#endif
44868 +
44869 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
44870 INIT_LIST_HEAD(&vma->anon_vma_chain);
44871
44872 @@ -287,6 +316,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44873 mm->stack_vm = mm->total_vm = 1;
44874 up_write(&mm->mmap_sem);
44875 bprm->p = vma->vm_end - sizeof(void *);
44876 +
44877 +#ifdef CONFIG_PAX_RANDUSTACK
44878 + if (randomize_va_space)
44879 + bprm->p ^= random32() & ~PAGE_MASK;
44880 +#endif
44881 +
44882 return 0;
44883 err:
44884 up_write(&mm->mmap_sem);
44885 @@ -395,19 +430,7 @@ err:
44886 return err;
44887 }
44888
44889 -struct user_arg_ptr {
44890 -#ifdef CONFIG_COMPAT
44891 - bool is_compat;
44892 -#endif
44893 - union {
44894 - const char __user *const __user *native;
44895 -#ifdef CONFIG_COMPAT
44896 - compat_uptr_t __user *compat;
44897 -#endif
44898 - } ptr;
44899 -};
44900 -
44901 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44902 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44903 {
44904 const char __user *native;
44905
44906 @@ -416,14 +439,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44907 compat_uptr_t compat;
44908
44909 if (get_user(compat, argv.ptr.compat + nr))
44910 - return ERR_PTR(-EFAULT);
44911 + return (const char __force_user *)ERR_PTR(-EFAULT);
44912
44913 return compat_ptr(compat);
44914 }
44915 #endif
44916
44917 if (get_user(native, argv.ptr.native + nr))
44918 - return ERR_PTR(-EFAULT);
44919 + return (const char __force_user *)ERR_PTR(-EFAULT);
44920
44921 return native;
44922 }
44923 @@ -442,7 +465,7 @@ static int count(struct user_arg_ptr argv, int max)
44924 if (!p)
44925 break;
44926
44927 - if (IS_ERR(p))
44928 + if (IS_ERR((const char __force_kernel *)p))
44929 return -EFAULT;
44930
44931 if (i++ >= max)
44932 @@ -476,7 +499,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44933
44934 ret = -EFAULT;
44935 str = get_user_arg_ptr(argv, argc);
44936 - if (IS_ERR(str))
44937 + if (IS_ERR((const char __force_kernel *)str))
44938 goto out;
44939
44940 len = strnlen_user(str, MAX_ARG_STRLEN);
44941 @@ -558,7 +581,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44942 int r;
44943 mm_segment_t oldfs = get_fs();
44944 struct user_arg_ptr argv = {
44945 - .ptr.native = (const char __user *const __user *)__argv,
44946 + .ptr.native = (const char __force_user *const __force_user *)__argv,
44947 };
44948
44949 set_fs(KERNEL_DS);
44950 @@ -593,7 +616,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44951 unsigned long new_end = old_end - shift;
44952 struct mmu_gather tlb;
44953
44954 - BUG_ON(new_start > new_end);
44955 + if (new_start >= new_end || new_start < mmap_min_addr)
44956 + return -ENOMEM;
44957
44958 /*
44959 * ensure there are no vmas between where we want to go
44960 @@ -602,6 +626,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44961 if (vma != find_vma(mm, new_start))
44962 return -EFAULT;
44963
44964 +#ifdef CONFIG_PAX_SEGMEXEC
44965 + BUG_ON(pax_find_mirror_vma(vma));
44966 +#endif
44967 +
44968 /*
44969 * cover the whole range: [new_start, old_end)
44970 */
44971 @@ -682,10 +710,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44972 stack_top = arch_align_stack(stack_top);
44973 stack_top = PAGE_ALIGN(stack_top);
44974
44975 - if (unlikely(stack_top < mmap_min_addr) ||
44976 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44977 - return -ENOMEM;
44978 -
44979 stack_shift = vma->vm_end - stack_top;
44980
44981 bprm->p -= stack_shift;
44982 @@ -697,8 +721,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44983 bprm->exec -= stack_shift;
44984
44985 down_write(&mm->mmap_sem);
44986 +
44987 + /* Move stack pages down in memory. */
44988 + if (stack_shift) {
44989 + ret = shift_arg_pages(vma, stack_shift);
44990 + if (ret)
44991 + goto out_unlock;
44992 + }
44993 +
44994 vm_flags = VM_STACK_FLAGS;
44995
44996 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44997 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44998 + vm_flags &= ~VM_EXEC;
44999 +
45000 +#ifdef CONFIG_PAX_MPROTECT
45001 + if (mm->pax_flags & MF_PAX_MPROTECT)
45002 + vm_flags &= ~VM_MAYEXEC;
45003 +#endif
45004 +
45005 + }
45006 +#endif
45007 +
45008 /*
45009 * Adjust stack execute permissions; explicitly enable for
45010 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
45011 @@ -717,13 +761,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
45012 goto out_unlock;
45013 BUG_ON(prev != vma);
45014
45015 - /* Move stack pages down in memory. */
45016 - if (stack_shift) {
45017 - ret = shift_arg_pages(vma, stack_shift);
45018 - if (ret)
45019 - goto out_unlock;
45020 - }
45021 -
45022 /* mprotect_fixup is overkill to remove the temporary stack flags */
45023 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
45024
45025 @@ -781,6 +818,8 @@ struct file *open_exec(const char *name)
45026
45027 fsnotify_open(file);
45028
45029 + trace_open_exec(name);
45030 +
45031 err = deny_write_access(file);
45032 if (err)
45033 goto exit;
45034 @@ -804,7 +843,7 @@ int kernel_read(struct file *file, loff_t offset,
45035 old_fs = get_fs();
45036 set_fs(get_ds());
45037 /* The cast to a user pointer is valid due to the set_fs() */
45038 - result = vfs_read(file, (void __user *)addr, count, &pos);
45039 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
45040 set_fs(old_fs);
45041 return result;
45042 }
45043 @@ -1257,7 +1296,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
45044 }
45045 rcu_read_unlock();
45046
45047 - if (p->fs->users > n_fs) {
45048 + if (atomic_read(&p->fs->users) > n_fs) {
45049 bprm->unsafe |= LSM_UNSAFE_SHARE;
45050 } else {
45051 res = -EAGAIN;
45052 @@ -1460,6 +1499,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
45053
45054 EXPORT_SYMBOL(search_binary_handler);
45055
45056 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45057 +static DEFINE_PER_CPU(u64, exec_counter);
45058 +static int __init init_exec_counters(void)
45059 +{
45060 + unsigned int cpu;
45061 +
45062 + for_each_possible_cpu(cpu) {
45063 + per_cpu(exec_counter, cpu) = (u64)cpu;
45064 + }
45065 +
45066 + return 0;
45067 +}
45068 +early_initcall(init_exec_counters);
45069 +static inline void increment_exec_counter(void)
45070 +{
45071 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
45072 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
45073 +}
45074 +#else
45075 +static inline void increment_exec_counter(void) {}
45076 +#endif
45077 +
45078 /*
45079 * sys_execve() executes a new program.
45080 */
45081 @@ -1468,6 +1529,11 @@ static int do_execve_common(const char *filename,
45082 struct user_arg_ptr envp,
45083 struct pt_regs *regs)
45084 {
45085 +#ifdef CONFIG_GRKERNSEC
45086 + struct file *old_exec_file;
45087 + struct acl_subject_label *old_acl;
45088 + struct rlimit old_rlim[RLIM_NLIMITS];
45089 +#endif
45090 struct linux_binprm *bprm;
45091 struct file *file;
45092 struct files_struct *displaced;
45093 @@ -1475,6 +1541,8 @@ static int do_execve_common(const char *filename,
45094 int retval;
45095 const struct cred *cred = current_cred();
45096
45097 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
45098 +
45099 /*
45100 * We move the actual failure in case of RLIMIT_NPROC excess from
45101 * set*uid() to execve() because too many poorly written programs
45102 @@ -1515,12 +1583,27 @@ static int do_execve_common(const char *filename,
45103 if (IS_ERR(file))
45104 goto out_unmark;
45105
45106 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
45107 + retval = -EPERM;
45108 + goto out_file;
45109 + }
45110 +
45111 sched_exec();
45112
45113 bprm->file = file;
45114 bprm->filename = filename;
45115 bprm->interp = filename;
45116
45117 + if (gr_process_user_ban()) {
45118 + retval = -EPERM;
45119 + goto out_file;
45120 + }
45121 +
45122 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
45123 + retval = -EACCES;
45124 + goto out_file;
45125 + }
45126 +
45127 retval = bprm_mm_init(bprm);
45128 if (retval)
45129 goto out_file;
45130 @@ -1537,24 +1620,65 @@ static int do_execve_common(const char *filename,
45131 if (retval < 0)
45132 goto out;
45133
45134 +#ifdef CONFIG_GRKERNSEC
45135 + old_acl = current->acl;
45136 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
45137 + old_exec_file = current->exec_file;
45138 + get_file(file);
45139 + current->exec_file = file;
45140 +#endif
45141 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45142 + /* limit suid stack to 8MB
45143 + we saved the old limits above and will restore them if this exec fails
45144 + */
45145 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
45146 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
45147 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
45148 +#endif
45149 +
45150 + if (!gr_tpe_allow(file)) {
45151 + retval = -EACCES;
45152 + goto out_fail;
45153 + }
45154 +
45155 + if (gr_check_crash_exec(file)) {
45156 + retval = -EACCES;
45157 + goto out_fail;
45158 + }
45159 +
45160 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
45161 + bprm->unsafe);
45162 + if (retval < 0)
45163 + goto out_fail;
45164 +
45165 retval = copy_strings_kernel(1, &bprm->filename, bprm);
45166 if (retval < 0)
45167 - goto out;
45168 + goto out_fail;
45169
45170 bprm->exec = bprm->p;
45171 retval = copy_strings(bprm->envc, envp, bprm);
45172 if (retval < 0)
45173 - goto out;
45174 + goto out_fail;
45175
45176 retval = copy_strings(bprm->argc, argv, bprm);
45177 if (retval < 0)
45178 - goto out;
45179 + goto out_fail;
45180 +
45181 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
45182 +
45183 + gr_handle_exec_args(bprm, argv);
45184
45185 retval = search_binary_handler(bprm,regs);
45186 if (retval < 0)
45187 - goto out;
45188 + goto out_fail;
45189 +#ifdef CONFIG_GRKERNSEC
45190 + if (old_exec_file)
45191 + fput(old_exec_file);
45192 +#endif
45193
45194 /* execve succeeded */
45195 +
45196 + increment_exec_counter();
45197 current->fs->in_exec = 0;
45198 current->in_execve = 0;
45199 acct_update_integrals(current);
45200 @@ -1563,6 +1687,14 @@ static int do_execve_common(const char *filename,
45201 put_files_struct(displaced);
45202 return retval;
45203
45204 +out_fail:
45205 +#ifdef CONFIG_GRKERNSEC
45206 + current->acl = old_acl;
45207 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
45208 + fput(current->exec_file);
45209 + current->exec_file = old_exec_file;
45210 +#endif
45211 +
45212 out:
45213 if (bprm->mm) {
45214 acct_arg_size(bprm, 0);
45215 @@ -1636,7 +1768,7 @@ static int expand_corename(struct core_name *cn)
45216 {
45217 char *old_corename = cn->corename;
45218
45219 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
45220 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
45221 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
45222
45223 if (!cn->corename) {
45224 @@ -1733,7 +1865,7 @@ static int format_corename(struct core_name *cn, long signr)
45225 int pid_in_pattern = 0;
45226 int err = 0;
45227
45228 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
45229 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
45230 cn->corename = kmalloc(cn->size, GFP_KERNEL);
45231 cn->used = 0;
45232
45233 @@ -1830,6 +1962,250 @@ out:
45234 return ispipe;
45235 }
45236
45237 +int pax_check_flags(unsigned long *flags)
45238 +{
45239 + int retval = 0;
45240 +
45241 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
45242 + if (*flags & MF_PAX_SEGMEXEC)
45243 + {
45244 + *flags &= ~MF_PAX_SEGMEXEC;
45245 + retval = -EINVAL;
45246 + }
45247 +#endif
45248 +
45249 + if ((*flags & MF_PAX_PAGEEXEC)
45250 +
45251 +#ifdef CONFIG_PAX_PAGEEXEC
45252 + && (*flags & MF_PAX_SEGMEXEC)
45253 +#endif
45254 +
45255 + )
45256 + {
45257 + *flags &= ~MF_PAX_PAGEEXEC;
45258 + retval = -EINVAL;
45259 + }
45260 +
45261 + if ((*flags & MF_PAX_MPROTECT)
45262 +
45263 +#ifdef CONFIG_PAX_MPROTECT
45264 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
45265 +#endif
45266 +
45267 + )
45268 + {
45269 + *flags &= ~MF_PAX_MPROTECT;
45270 + retval = -EINVAL;
45271 + }
45272 +
45273 + if ((*flags & MF_PAX_EMUTRAMP)
45274 +
45275 +#ifdef CONFIG_PAX_EMUTRAMP
45276 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
45277 +#endif
45278 +
45279 + )
45280 + {
45281 + *flags &= ~MF_PAX_EMUTRAMP;
45282 + retval = -EINVAL;
45283 + }
45284 +
45285 + return retval;
45286 +}
45287 +
45288 +EXPORT_SYMBOL(pax_check_flags);
45289 +
45290 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
45291 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
45292 +{
45293 + struct task_struct *tsk = current;
45294 + struct mm_struct *mm = current->mm;
45295 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
45296 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
45297 + char *path_exec = NULL;
45298 + char *path_fault = NULL;
45299 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
45300 +
45301 + if (buffer_exec && buffer_fault) {
45302 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
45303 +
45304 + down_read(&mm->mmap_sem);
45305 + vma = mm->mmap;
45306 + while (vma && (!vma_exec || !vma_fault)) {
45307 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
45308 + vma_exec = vma;
45309 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
45310 + vma_fault = vma;
45311 + vma = vma->vm_next;
45312 + }
45313 + if (vma_exec) {
45314 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
45315 + if (IS_ERR(path_exec))
45316 + path_exec = "<path too long>";
45317 + else {
45318 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
45319 + if (path_exec) {
45320 + *path_exec = 0;
45321 + path_exec = buffer_exec;
45322 + } else
45323 + path_exec = "<path too long>";
45324 + }
45325 + }
45326 + if (vma_fault) {
45327 + start = vma_fault->vm_start;
45328 + end = vma_fault->vm_end;
45329 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
45330 + if (vma_fault->vm_file) {
45331 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
45332 + if (IS_ERR(path_fault))
45333 + path_fault = "<path too long>";
45334 + else {
45335 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
45336 + if (path_fault) {
45337 + *path_fault = 0;
45338 + path_fault = buffer_fault;
45339 + } else
45340 + path_fault = "<path too long>";
45341 + }
45342 + } else
45343 + path_fault = "<anonymous mapping>";
45344 + }
45345 + up_read(&mm->mmap_sem);
45346 + }
45347 + if (tsk->signal->curr_ip)
45348 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
45349 + else
45350 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
45351 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
45352 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
45353 + task_uid(tsk), task_euid(tsk), pc, sp);
45354 + free_page((unsigned long)buffer_exec);
45355 + free_page((unsigned long)buffer_fault);
45356 + pax_report_insns(regs, pc, sp);
45357 + do_coredump(SIGKILL, SIGKILL, regs);
45358 +}
45359 +#endif
45360 +
45361 +#ifdef CONFIG_PAX_REFCOUNT
45362 +void pax_report_refcount_overflow(struct pt_regs *regs)
45363 +{
45364 + if (current->signal->curr_ip)
45365 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45366 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
45367 + else
45368 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45369 + current->comm, task_pid_nr(current), current_uid(), current_euid());
45370 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
45371 + show_regs(regs);
45372 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
45373 +}
45374 +#endif
45375 +
45376 +#ifdef CONFIG_PAX_USERCOPY
45377 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
45378 +static noinline int check_stack_object(const void *obj, unsigned long len)
45379 +{
45380 + const void * const stack = task_stack_page(current);
45381 + const void * const stackend = stack + THREAD_SIZE;
45382 +
45383 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
45384 + const void *frame = NULL;
45385 + const void *oldframe;
45386 +#endif
45387 +
45388 + if (obj + len < obj)
45389 + return -1;
45390 +
45391 + if (obj + len <= stack || stackend <= obj)
45392 + return 0;
45393 +
45394 + if (obj < stack || stackend < obj + len)
45395 + return -1;
45396 +
45397 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
45398 + oldframe = __builtin_frame_address(1);
45399 + if (oldframe)
45400 + frame = __builtin_frame_address(2);
45401 + /*
45402 + low ----------------------------------------------> high
45403 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
45404 + ^----------------^
45405 + allow copies only within here
45406 + */
45407 + while (stack <= frame && frame < stackend) {
45408 + /* if obj + len extends past the last frame, this
45409 + check won't pass and the next frame will be 0,
45410 + causing us to bail out and correctly report
45411 + the copy as invalid
45412 + */
45413 + if (obj + len <= frame)
45414 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
45415 + oldframe = frame;
45416 + frame = *(const void * const *)frame;
45417 + }
45418 + return -1;
45419 +#else
45420 + return 1;
45421 +#endif
45422 +}
45423 +
45424 +static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
45425 +{
45426 + if (current->signal->curr_ip)
45427 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45428 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45429 + else
45430 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45431 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45432 + dump_stack();
45433 + gr_handle_kernel_exploit();
45434 + do_group_exit(SIGKILL);
45435 +}
45436 +#endif
45437 +
45438 +void check_object_size(const void *ptr, unsigned long n, bool to)
45439 +{
45440 +
45441 +#ifdef CONFIG_PAX_USERCOPY
45442 + const char *type;
45443 +
45444 + if (!n)
45445 + return;
45446 +
45447 + type = check_heap_object(ptr, n, to);
45448 + if (!type) {
45449 + if (check_stack_object(ptr, n) != -1)
45450 + return;
45451 + type = "<process stack>";
45452 + }
45453 +
45454 + pax_report_usercopy(ptr, n, to, type);
45455 +#endif
45456 +
45457 +}
45458 +EXPORT_SYMBOL(check_object_size);
45459 +
45460 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
45461 +void pax_track_stack(void)
45462 +{
45463 + unsigned long sp = (unsigned long)&sp;
45464 + if (sp < current_thread_info()->lowest_stack &&
45465 + sp > (unsigned long)task_stack_page(current))
45466 + current_thread_info()->lowest_stack = sp;
45467 +}
45468 +EXPORT_SYMBOL(pax_track_stack);
45469 +#endif
45470 +
45471 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
45472 +void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
45473 +{
45474 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
45475 + dump_stack();
45476 + do_group_exit(SIGKILL);
45477 +}
45478 +EXPORT_SYMBOL(report_size_overflow);
45479 +#endif
45480 +
45481 static int zap_process(struct task_struct *start, int exit_code)
45482 {
45483 struct task_struct *t;
45484 @@ -2002,17 +2378,17 @@ static void coredump_finish(struct mm_struct *mm)
45485 void set_dumpable(struct mm_struct *mm, int value)
45486 {
45487 switch (value) {
45488 - case 0:
45489 + case SUID_DUMPABLE_DISABLED:
45490 clear_bit(MMF_DUMPABLE, &mm->flags);
45491 smp_wmb();
45492 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
45493 break;
45494 - case 1:
45495 + case SUID_DUMPABLE_ENABLED:
45496 set_bit(MMF_DUMPABLE, &mm->flags);
45497 smp_wmb();
45498 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
45499 break;
45500 - case 2:
45501 + case SUID_DUMPABLE_SAFE:
45502 set_bit(MMF_DUMP_SECURELY, &mm->flags);
45503 smp_wmb();
45504 set_bit(MMF_DUMPABLE, &mm->flags);
45505 @@ -2025,7 +2401,7 @@ static int __get_dumpable(unsigned long mm_flags)
45506 int ret;
45507
45508 ret = mm_flags & MMF_DUMPABLE_MASK;
45509 - return (ret >= 2) ? 2 : ret;
45510 + return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
45511 }
45512
45513 int get_dumpable(struct mm_struct *mm)
45514 @@ -2040,17 +2416,17 @@ static void wait_for_dump_helpers(struct file *file)
45515 pipe = file->f_path.dentry->d_inode->i_pipe;
45516
45517 pipe_lock(pipe);
45518 - pipe->readers++;
45519 - pipe->writers--;
45520 + atomic_inc(&pipe->readers);
45521 + atomic_dec(&pipe->writers);
45522
45523 - while ((pipe->readers > 1) && (!signal_pending(current))) {
45524 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
45525 wake_up_interruptible_sync(&pipe->wait);
45526 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45527 pipe_wait(pipe);
45528 }
45529
45530 - pipe->readers--;
45531 - pipe->writers++;
45532 + atomic_dec(&pipe->readers);
45533 + atomic_inc(&pipe->writers);
45534 pipe_unlock(pipe);
45535
45536 }
45537 @@ -2111,7 +2487,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45538 int retval = 0;
45539 int flag = 0;
45540 int ispipe;
45541 - static atomic_t core_dump_count = ATOMIC_INIT(0);
45542 + bool need_nonrelative = false;
45543 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
45544 struct coredump_params cprm = {
45545 .signr = signr,
45546 .regs = regs,
45547 @@ -2126,6 +2503,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45548
45549 audit_core_dumps(signr);
45550
45551 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
45552 + gr_handle_brute_attach(current, cprm.mm_flags);
45553 +
45554 binfmt = mm->binfmt;
45555 if (!binfmt || !binfmt->core_dump)
45556 goto fail;
45557 @@ -2136,14 +2516,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45558 if (!cred)
45559 goto fail;
45560 /*
45561 - * We cannot trust fsuid as being the "true" uid of the
45562 - * process nor do we know its entire history. We only know it
45563 - * was tainted so we dump it as root in mode 2.
45564 + * We cannot trust fsuid as being the "true" uid of the process
45565 + * nor do we know its entire history. We only know it was tainted
45566 + * so we dump it as root in mode 2, and only into a controlled
45567 + * environment (pipe handler or fully qualified path).
45568 */
45569 - if (__get_dumpable(cprm.mm_flags) == 2) {
45570 + if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
45571 /* Setuid core dump mode */
45572 flag = O_EXCL; /* Stop rewrite attacks */
45573 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
45574 + need_nonrelative = true;
45575 }
45576
45577 retval = coredump_wait(exit_code, &core_state);
45578 @@ -2193,7 +2575,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45579 }
45580 cprm.limit = RLIM_INFINITY;
45581
45582 - dump_count = atomic_inc_return(&core_dump_count);
45583 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
45584 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
45585 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
45586 task_tgid_vnr(current), current->comm);
45587 @@ -2220,9 +2602,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45588 } else {
45589 struct inode *inode;
45590
45591 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
45592 +
45593 if (cprm.limit < binfmt->min_coredump)
45594 goto fail_unlock;
45595
45596 + if (need_nonrelative && cn.corename[0] != '/') {
45597 + printk(KERN_WARNING "Pid %d(%s) can only dump core "\
45598 + "to fully qualified path!\n",
45599 + task_tgid_vnr(current), current->comm);
45600 + printk(KERN_WARNING "Skipping core dump\n");
45601 + goto fail_unlock;
45602 + }
45603 +
45604 cprm.file = filp_open(cn.corename,
45605 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
45606 0600);
45607 @@ -2263,7 +2655,7 @@ close_fail:
45608 filp_close(cprm.file, NULL);
45609 fail_dropcount:
45610 if (ispipe)
45611 - atomic_dec(&core_dump_count);
45612 + atomic_dec_unchecked(&core_dump_count);
45613 fail_unlock:
45614 kfree(cn.corename);
45615 fail_corename:
45616 @@ -2282,7 +2674,7 @@ fail:
45617 */
45618 int dump_write(struct file *file, const void *addr, int nr)
45619 {
45620 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
45621 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
45622 }
45623 EXPORT_SYMBOL(dump_write);
45624
45625 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
45626 index 1c36139..cf6b350 100644
45627 --- a/fs/ext2/balloc.c
45628 +++ b/fs/ext2/balloc.c
45629 @@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
45630
45631 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45632 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45633 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45634 + if (free_blocks < root_blocks + 1 &&
45635 !uid_eq(sbi->s_resuid, current_fsuid()) &&
45636 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
45637 - !in_group_p (sbi->s_resgid))) {
45638 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
45639 return 0;
45640 }
45641 return 1;
45642 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
45643 index 25cd608..9ed5294 100644
45644 --- a/fs/ext3/balloc.c
45645 +++ b/fs/ext3/balloc.c
45646 @@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
45647
45648 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45649 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45650 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45651 + if (free_blocks < root_blocks + 1 &&
45652 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
45653 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
45654 - !in_group_p (sbi->s_resgid))) {
45655 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
45656 return 0;
45657 }
45658 return 1;
45659 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
45660 index 1b50890..e56c5ad 100644
45661 --- a/fs/ext4/balloc.c
45662 +++ b/fs/ext4/balloc.c
45663 @@ -500,8 +500,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
45664 /* Hm, nope. Are (enough) root reserved clusters available? */
45665 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
45666 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
45667 - capable(CAP_SYS_RESOURCE) ||
45668 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
45669 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
45670 + capable_nolog(CAP_SYS_RESOURCE)) {
45671
45672 if (free_clusters >= (nclusters + dirty_clusters))
45673 return 1;
45674 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
45675 index 01434f2..bd995b4 100644
45676 --- a/fs/ext4/ext4.h
45677 +++ b/fs/ext4/ext4.h
45678 @@ -1246,19 +1246,19 @@ struct ext4_sb_info {
45679 unsigned long s_mb_last_start;
45680
45681 /* stats for buddy allocator */
45682 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
45683 - atomic_t s_bal_success; /* we found long enough chunks */
45684 - atomic_t s_bal_allocated; /* in blocks */
45685 - atomic_t s_bal_ex_scanned; /* total extents scanned */
45686 - atomic_t s_bal_goals; /* goal hits */
45687 - atomic_t s_bal_breaks; /* too long searches */
45688 - atomic_t s_bal_2orders; /* 2^order hits */
45689 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
45690 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
45691 + atomic_unchecked_t s_bal_allocated; /* in blocks */
45692 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
45693 + atomic_unchecked_t s_bal_goals; /* goal hits */
45694 + atomic_unchecked_t s_bal_breaks; /* too long searches */
45695 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
45696 spinlock_t s_bal_lock;
45697 unsigned long s_mb_buddies_generated;
45698 unsigned long long s_mb_generation_time;
45699 - atomic_t s_mb_lost_chunks;
45700 - atomic_t s_mb_preallocated;
45701 - atomic_t s_mb_discarded;
45702 + atomic_unchecked_t s_mb_lost_chunks;
45703 + atomic_unchecked_t s_mb_preallocated;
45704 + atomic_unchecked_t s_mb_discarded;
45705 atomic_t s_lock_busy;
45706
45707 /* locality groups */
45708 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
45709 index 1cd6994..5799d45 100644
45710 --- a/fs/ext4/mballoc.c
45711 +++ b/fs/ext4/mballoc.c
45712 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
45713 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
45714
45715 if (EXT4_SB(sb)->s_mb_stats)
45716 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
45717 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
45718
45719 break;
45720 }
45721 @@ -2041,7 +2041,7 @@ repeat:
45722 ac->ac_status = AC_STATUS_CONTINUE;
45723 ac->ac_flags |= EXT4_MB_HINT_FIRST;
45724 cr = 3;
45725 - atomic_inc(&sbi->s_mb_lost_chunks);
45726 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
45727 goto repeat;
45728 }
45729 }
45730 @@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
45731 if (sbi->s_mb_stats) {
45732 ext4_msg(sb, KERN_INFO,
45733 "mballoc: %u blocks %u reqs (%u success)",
45734 - atomic_read(&sbi->s_bal_allocated),
45735 - atomic_read(&sbi->s_bal_reqs),
45736 - atomic_read(&sbi->s_bal_success));
45737 + atomic_read_unchecked(&sbi->s_bal_allocated),
45738 + atomic_read_unchecked(&sbi->s_bal_reqs),
45739 + atomic_read_unchecked(&sbi->s_bal_success));
45740 ext4_msg(sb, KERN_INFO,
45741 "mballoc: %u extents scanned, %u goal hits, "
45742 "%u 2^N hits, %u breaks, %u lost",
45743 - atomic_read(&sbi->s_bal_ex_scanned),
45744 - atomic_read(&sbi->s_bal_goals),
45745 - atomic_read(&sbi->s_bal_2orders),
45746 - atomic_read(&sbi->s_bal_breaks),
45747 - atomic_read(&sbi->s_mb_lost_chunks));
45748 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
45749 + atomic_read_unchecked(&sbi->s_bal_goals),
45750 + atomic_read_unchecked(&sbi->s_bal_2orders),
45751 + atomic_read_unchecked(&sbi->s_bal_breaks),
45752 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
45753 ext4_msg(sb, KERN_INFO,
45754 "mballoc: %lu generated and it took %Lu",
45755 sbi->s_mb_buddies_generated,
45756 sbi->s_mb_generation_time);
45757 ext4_msg(sb, KERN_INFO,
45758 "mballoc: %u preallocated, %u discarded",
45759 - atomic_read(&sbi->s_mb_preallocated),
45760 - atomic_read(&sbi->s_mb_discarded));
45761 + atomic_read_unchecked(&sbi->s_mb_preallocated),
45762 + atomic_read_unchecked(&sbi->s_mb_discarded));
45763 }
45764
45765 free_percpu(sbi->s_locality_groups);
45766 @@ -3047,16 +3047,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
45767 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
45768
45769 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
45770 - atomic_inc(&sbi->s_bal_reqs);
45771 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45772 + atomic_inc_unchecked(&sbi->s_bal_reqs);
45773 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45774 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
45775 - atomic_inc(&sbi->s_bal_success);
45776 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
45777 + atomic_inc_unchecked(&sbi->s_bal_success);
45778 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
45779 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
45780 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
45781 - atomic_inc(&sbi->s_bal_goals);
45782 + atomic_inc_unchecked(&sbi->s_bal_goals);
45783 if (ac->ac_found > sbi->s_mb_max_to_scan)
45784 - atomic_inc(&sbi->s_bal_breaks);
45785 + atomic_inc_unchecked(&sbi->s_bal_breaks);
45786 }
45787
45788 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
45789 @@ -3456,7 +3456,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
45790 trace_ext4_mb_new_inode_pa(ac, pa);
45791
45792 ext4_mb_use_inode_pa(ac, pa);
45793 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
45794 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
45795
45796 ei = EXT4_I(ac->ac_inode);
45797 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45798 @@ -3516,7 +3516,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
45799 trace_ext4_mb_new_group_pa(ac, pa);
45800
45801 ext4_mb_use_group_pa(ac, pa);
45802 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45803 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45804
45805 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45806 lg = ac->ac_lg;
45807 @@ -3605,7 +3605,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
45808 * from the bitmap and continue.
45809 */
45810 }
45811 - atomic_add(free, &sbi->s_mb_discarded);
45812 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
45813
45814 return err;
45815 }
45816 @@ -3623,7 +3623,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
45817 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
45818 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
45819 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
45820 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45821 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45822 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
45823
45824 return 0;
45825 diff --git a/fs/fcntl.c b/fs/fcntl.c
45826 index 81b70e6..d9ae6cf 100644
45827 --- a/fs/fcntl.c
45828 +++ b/fs/fcntl.c
45829 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
45830 if (err)
45831 return err;
45832
45833 + if (gr_handle_chroot_fowner(pid, type))
45834 + return -ENOENT;
45835 + if (gr_check_protected_task_fowner(pid, type))
45836 + return -EACCES;
45837 +
45838 f_modown(filp, pid, type, force);
45839 return 0;
45840 }
45841 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
45842
45843 static int f_setown_ex(struct file *filp, unsigned long arg)
45844 {
45845 - struct f_owner_ex * __user owner_p = (void * __user)arg;
45846 + struct f_owner_ex __user *owner_p = (void __user *)arg;
45847 struct f_owner_ex owner;
45848 struct pid *pid;
45849 int type;
45850 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
45851
45852 static int f_getown_ex(struct file *filp, unsigned long arg)
45853 {
45854 - struct f_owner_ex * __user owner_p = (void * __user)arg;
45855 + struct f_owner_ex __user *owner_p = (void __user *)arg;
45856 struct f_owner_ex owner;
45857 int ret = 0;
45858
45859 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
45860 switch (cmd) {
45861 case F_DUPFD:
45862 case F_DUPFD_CLOEXEC:
45863 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
45864 if (arg >= rlimit(RLIMIT_NOFILE))
45865 break;
45866 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
45867 diff --git a/fs/fifo.c b/fs/fifo.c
45868 index cf6f434..3d7942c 100644
45869 --- a/fs/fifo.c
45870 +++ b/fs/fifo.c
45871 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
45872 */
45873 filp->f_op = &read_pipefifo_fops;
45874 pipe->r_counter++;
45875 - if (pipe->readers++ == 0)
45876 + if (atomic_inc_return(&pipe->readers) == 1)
45877 wake_up_partner(inode);
45878
45879 - if (!pipe->writers) {
45880 + if (!atomic_read(&pipe->writers)) {
45881 if ((filp->f_flags & O_NONBLOCK)) {
45882 /* suppress POLLHUP until we have
45883 * seen a writer */
45884 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
45885 * errno=ENXIO when there is no process reading the FIFO.
45886 */
45887 ret = -ENXIO;
45888 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
45889 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
45890 goto err;
45891
45892 filp->f_op = &write_pipefifo_fops;
45893 pipe->w_counter++;
45894 - if (!pipe->writers++)
45895 + if (atomic_inc_return(&pipe->writers) == 1)
45896 wake_up_partner(inode);
45897
45898 - if (!pipe->readers) {
45899 + if (!atomic_read(&pipe->readers)) {
45900 if (wait_for_partner(inode, &pipe->r_counter))
45901 goto err_wr;
45902 }
45903 @@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
45904 */
45905 filp->f_op = &rdwr_pipefifo_fops;
45906
45907 - pipe->readers++;
45908 - pipe->writers++;
45909 + atomic_inc(&pipe->readers);
45910 + atomic_inc(&pipe->writers);
45911 pipe->r_counter++;
45912 pipe->w_counter++;
45913 - if (pipe->readers == 1 || pipe->writers == 1)
45914 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
45915 wake_up_partner(inode);
45916 break;
45917
45918 @@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
45919 return 0;
45920
45921 err_rd:
45922 - if (!--pipe->readers)
45923 + if (atomic_dec_and_test(&pipe->readers))
45924 wake_up_interruptible(&pipe->wait);
45925 ret = -ERESTARTSYS;
45926 goto err;
45927
45928 err_wr:
45929 - if (!--pipe->writers)
45930 + if (atomic_dec_and_test(&pipe->writers))
45931 wake_up_interruptible(&pipe->wait);
45932 ret = -ERESTARTSYS;
45933 goto err;
45934
45935 err:
45936 - if (!pipe->readers && !pipe->writers)
45937 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45938 free_pipe_info(inode);
45939
45940 err_nocleanup:
45941 diff --git a/fs/file.c b/fs/file.c
45942 index ba3f605..fade102 100644
45943 --- a/fs/file.c
45944 +++ b/fs/file.c
45945 @@ -15,6 +15,7 @@
45946 #include <linux/slab.h>
45947 #include <linux/vmalloc.h>
45948 #include <linux/file.h>
45949 +#include <linux/security.h>
45950 #include <linux/fdtable.h>
45951 #include <linux/bitops.h>
45952 #include <linux/interrupt.h>
45953 @@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
45954 * N.B. For clone tasks sharing a files structure, this test
45955 * will limit the total number of files that can be opened.
45956 */
45957 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
45958 if (nr >= rlimit(RLIMIT_NOFILE))
45959 return -EMFILE;
45960
45961 diff --git a/fs/filesystems.c b/fs/filesystems.c
45962 index 96f2428..f5eeb8e 100644
45963 --- a/fs/filesystems.c
45964 +++ b/fs/filesystems.c
45965 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
45966 int len = dot ? dot - name : strlen(name);
45967
45968 fs = __get_fs_type(name, len);
45969 +
45970 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
45971 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45972 +#else
45973 if (!fs && (request_module("%.*s", len, name) == 0))
45974 +#endif
45975 fs = __get_fs_type(name, len);
45976
45977 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
45978 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
45979 index e159e68..e7d2a6f 100644
45980 --- a/fs/fs_struct.c
45981 +++ b/fs/fs_struct.c
45982 @@ -4,6 +4,7 @@
45983 #include <linux/path.h>
45984 #include <linux/slab.h>
45985 #include <linux/fs_struct.h>
45986 +#include <linux/grsecurity.h>
45987 #include "internal.h"
45988
45989 static inline void path_get_longterm(struct path *path)
45990 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45991 write_seqcount_begin(&fs->seq);
45992 old_root = fs->root;
45993 fs->root = *path;
45994 + gr_set_chroot_entries(current, path);
45995 write_seqcount_end(&fs->seq);
45996 spin_unlock(&fs->lock);
45997 if (old_root.dentry)
45998 @@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
45999 return 1;
46000 }
46001
46002 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
46003 +{
46004 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
46005 + return 0;
46006 + *p = *new;
46007 +
46008 + gr_set_chroot_entries(task, new);
46009 +
46010 + return 1;
46011 +}
46012 +
46013 void chroot_fs_refs(struct path *old_root, struct path *new_root)
46014 {
46015 struct task_struct *g, *p;
46016 @@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
46017 int hits = 0;
46018 spin_lock(&fs->lock);
46019 write_seqcount_begin(&fs->seq);
46020 - hits += replace_path(&fs->root, old_root, new_root);
46021 + hits += replace_root_path(p, &fs->root, old_root, new_root);
46022 hits += replace_path(&fs->pwd, old_root, new_root);
46023 write_seqcount_end(&fs->seq);
46024 while (hits--) {
46025 @@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
46026 task_lock(tsk);
46027 spin_lock(&fs->lock);
46028 tsk->fs = NULL;
46029 - kill = !--fs->users;
46030 + gr_clear_chroot_entries(tsk);
46031 + kill = !atomic_dec_return(&fs->users);
46032 spin_unlock(&fs->lock);
46033 task_unlock(tsk);
46034 if (kill)
46035 @@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
46036 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
46037 /* We don't need to lock fs - think why ;-) */
46038 if (fs) {
46039 - fs->users = 1;
46040 + atomic_set(&fs->users, 1);
46041 fs->in_exec = 0;
46042 spin_lock_init(&fs->lock);
46043 seqcount_init(&fs->seq);
46044 @@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
46045 spin_lock(&old->lock);
46046 fs->root = old->root;
46047 path_get_longterm(&fs->root);
46048 + /* instead of calling gr_set_chroot_entries here,
46049 + we call it from every caller of this function
46050 + */
46051 fs->pwd = old->pwd;
46052 path_get_longterm(&fs->pwd);
46053 spin_unlock(&old->lock);
46054 @@ -151,8 +168,9 @@ int unshare_fs_struct(void)
46055
46056 task_lock(current);
46057 spin_lock(&fs->lock);
46058 - kill = !--fs->users;
46059 + kill = !atomic_dec_return(&fs->users);
46060 current->fs = new_fs;
46061 + gr_set_chroot_entries(current, &new_fs->root);
46062 spin_unlock(&fs->lock);
46063 task_unlock(current);
46064
46065 @@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
46066
46067 int current_umask(void)
46068 {
46069 - return current->fs->umask;
46070 + return current->fs->umask | gr_acl_umask();
46071 }
46072 EXPORT_SYMBOL(current_umask);
46073
46074 /* to be mentioned only in INIT_TASK */
46075 struct fs_struct init_fs = {
46076 - .users = 1,
46077 + .users = ATOMIC_INIT(1),
46078 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
46079 .seq = SEQCNT_ZERO,
46080 .umask = 0022,
46081 @@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
46082 task_lock(current);
46083
46084 spin_lock(&init_fs.lock);
46085 - init_fs.users++;
46086 + atomic_inc(&init_fs.users);
46087 spin_unlock(&init_fs.lock);
46088
46089 spin_lock(&fs->lock);
46090 current->fs = &init_fs;
46091 - kill = !--fs->users;
46092 + gr_set_chroot_entries(current, &current->fs->root);
46093 + kill = !atomic_dec_return(&fs->users);
46094 spin_unlock(&fs->lock);
46095
46096 task_unlock(current);
46097 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
46098 index 9905350..02eaec4 100644
46099 --- a/fs/fscache/cookie.c
46100 +++ b/fs/fscache/cookie.c
46101 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
46102 parent ? (char *) parent->def->name : "<no-parent>",
46103 def->name, netfs_data);
46104
46105 - fscache_stat(&fscache_n_acquires);
46106 + fscache_stat_unchecked(&fscache_n_acquires);
46107
46108 /* if there's no parent cookie, then we don't create one here either */
46109 if (!parent) {
46110 - fscache_stat(&fscache_n_acquires_null);
46111 + fscache_stat_unchecked(&fscache_n_acquires_null);
46112 _leave(" [no parent]");
46113 return NULL;
46114 }
46115 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
46116 /* allocate and initialise a cookie */
46117 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
46118 if (!cookie) {
46119 - fscache_stat(&fscache_n_acquires_oom);
46120 + fscache_stat_unchecked(&fscache_n_acquires_oom);
46121 _leave(" [ENOMEM]");
46122 return NULL;
46123 }
46124 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
46125
46126 switch (cookie->def->type) {
46127 case FSCACHE_COOKIE_TYPE_INDEX:
46128 - fscache_stat(&fscache_n_cookie_index);
46129 + fscache_stat_unchecked(&fscache_n_cookie_index);
46130 break;
46131 case FSCACHE_COOKIE_TYPE_DATAFILE:
46132 - fscache_stat(&fscache_n_cookie_data);
46133 + fscache_stat_unchecked(&fscache_n_cookie_data);
46134 break;
46135 default:
46136 - fscache_stat(&fscache_n_cookie_special);
46137 + fscache_stat_unchecked(&fscache_n_cookie_special);
46138 break;
46139 }
46140
46141 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
46142 if (fscache_acquire_non_index_cookie(cookie) < 0) {
46143 atomic_dec(&parent->n_children);
46144 __fscache_cookie_put(cookie);
46145 - fscache_stat(&fscache_n_acquires_nobufs);
46146 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
46147 _leave(" = NULL");
46148 return NULL;
46149 }
46150 }
46151
46152 - fscache_stat(&fscache_n_acquires_ok);
46153 + fscache_stat_unchecked(&fscache_n_acquires_ok);
46154 _leave(" = %p", cookie);
46155 return cookie;
46156 }
46157 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
46158 cache = fscache_select_cache_for_object(cookie->parent);
46159 if (!cache) {
46160 up_read(&fscache_addremove_sem);
46161 - fscache_stat(&fscache_n_acquires_no_cache);
46162 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
46163 _leave(" = -ENOMEDIUM [no cache]");
46164 return -ENOMEDIUM;
46165 }
46166 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
46167 object = cache->ops->alloc_object(cache, cookie);
46168 fscache_stat_d(&fscache_n_cop_alloc_object);
46169 if (IS_ERR(object)) {
46170 - fscache_stat(&fscache_n_object_no_alloc);
46171 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
46172 ret = PTR_ERR(object);
46173 goto error;
46174 }
46175
46176 - fscache_stat(&fscache_n_object_alloc);
46177 + fscache_stat_unchecked(&fscache_n_object_alloc);
46178
46179 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
46180
46181 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
46182 struct fscache_object *object;
46183 struct hlist_node *_p;
46184
46185 - fscache_stat(&fscache_n_updates);
46186 + fscache_stat_unchecked(&fscache_n_updates);
46187
46188 if (!cookie) {
46189 - fscache_stat(&fscache_n_updates_null);
46190 + fscache_stat_unchecked(&fscache_n_updates_null);
46191 _leave(" [no cookie]");
46192 return;
46193 }
46194 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
46195 struct fscache_object *object;
46196 unsigned long event;
46197
46198 - fscache_stat(&fscache_n_relinquishes);
46199 + fscache_stat_unchecked(&fscache_n_relinquishes);
46200 if (retire)
46201 - fscache_stat(&fscache_n_relinquishes_retire);
46202 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
46203
46204 if (!cookie) {
46205 - fscache_stat(&fscache_n_relinquishes_null);
46206 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
46207 _leave(" [no cookie]");
46208 return;
46209 }
46210 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
46211
46212 /* wait for the cookie to finish being instantiated (or to fail) */
46213 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
46214 - fscache_stat(&fscache_n_relinquishes_waitcrt);
46215 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
46216 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
46217 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
46218 }
46219 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
46220 index f6aad48..88dcf26 100644
46221 --- a/fs/fscache/internal.h
46222 +++ b/fs/fscache/internal.h
46223 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
46224 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
46225 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
46226
46227 -extern atomic_t fscache_n_op_pend;
46228 -extern atomic_t fscache_n_op_run;
46229 -extern atomic_t fscache_n_op_enqueue;
46230 -extern atomic_t fscache_n_op_deferred_release;
46231 -extern atomic_t fscache_n_op_release;
46232 -extern atomic_t fscache_n_op_gc;
46233 -extern atomic_t fscache_n_op_cancelled;
46234 -extern atomic_t fscache_n_op_rejected;
46235 +extern atomic_unchecked_t fscache_n_op_pend;
46236 +extern atomic_unchecked_t fscache_n_op_run;
46237 +extern atomic_unchecked_t fscache_n_op_enqueue;
46238 +extern atomic_unchecked_t fscache_n_op_deferred_release;
46239 +extern atomic_unchecked_t fscache_n_op_release;
46240 +extern atomic_unchecked_t fscache_n_op_gc;
46241 +extern atomic_unchecked_t fscache_n_op_cancelled;
46242 +extern atomic_unchecked_t fscache_n_op_rejected;
46243
46244 -extern atomic_t fscache_n_attr_changed;
46245 -extern atomic_t fscache_n_attr_changed_ok;
46246 -extern atomic_t fscache_n_attr_changed_nobufs;
46247 -extern atomic_t fscache_n_attr_changed_nomem;
46248 -extern atomic_t fscache_n_attr_changed_calls;
46249 +extern atomic_unchecked_t fscache_n_attr_changed;
46250 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
46251 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
46252 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
46253 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
46254
46255 -extern atomic_t fscache_n_allocs;
46256 -extern atomic_t fscache_n_allocs_ok;
46257 -extern atomic_t fscache_n_allocs_wait;
46258 -extern atomic_t fscache_n_allocs_nobufs;
46259 -extern atomic_t fscache_n_allocs_intr;
46260 -extern atomic_t fscache_n_allocs_object_dead;
46261 -extern atomic_t fscache_n_alloc_ops;
46262 -extern atomic_t fscache_n_alloc_op_waits;
46263 +extern atomic_unchecked_t fscache_n_allocs;
46264 +extern atomic_unchecked_t fscache_n_allocs_ok;
46265 +extern atomic_unchecked_t fscache_n_allocs_wait;
46266 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
46267 +extern atomic_unchecked_t fscache_n_allocs_intr;
46268 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
46269 +extern atomic_unchecked_t fscache_n_alloc_ops;
46270 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
46271
46272 -extern atomic_t fscache_n_retrievals;
46273 -extern atomic_t fscache_n_retrievals_ok;
46274 -extern atomic_t fscache_n_retrievals_wait;
46275 -extern atomic_t fscache_n_retrievals_nodata;
46276 -extern atomic_t fscache_n_retrievals_nobufs;
46277 -extern atomic_t fscache_n_retrievals_intr;
46278 -extern atomic_t fscache_n_retrievals_nomem;
46279 -extern atomic_t fscache_n_retrievals_object_dead;
46280 -extern atomic_t fscache_n_retrieval_ops;
46281 -extern atomic_t fscache_n_retrieval_op_waits;
46282 +extern atomic_unchecked_t fscache_n_retrievals;
46283 +extern atomic_unchecked_t fscache_n_retrievals_ok;
46284 +extern atomic_unchecked_t fscache_n_retrievals_wait;
46285 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
46286 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
46287 +extern atomic_unchecked_t fscache_n_retrievals_intr;
46288 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
46289 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
46290 +extern atomic_unchecked_t fscache_n_retrieval_ops;
46291 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
46292
46293 -extern atomic_t fscache_n_stores;
46294 -extern atomic_t fscache_n_stores_ok;
46295 -extern atomic_t fscache_n_stores_again;
46296 -extern atomic_t fscache_n_stores_nobufs;
46297 -extern atomic_t fscache_n_stores_oom;
46298 -extern atomic_t fscache_n_store_ops;
46299 -extern atomic_t fscache_n_store_calls;
46300 -extern atomic_t fscache_n_store_pages;
46301 -extern atomic_t fscache_n_store_radix_deletes;
46302 -extern atomic_t fscache_n_store_pages_over_limit;
46303 +extern atomic_unchecked_t fscache_n_stores;
46304 +extern atomic_unchecked_t fscache_n_stores_ok;
46305 +extern atomic_unchecked_t fscache_n_stores_again;
46306 +extern atomic_unchecked_t fscache_n_stores_nobufs;
46307 +extern atomic_unchecked_t fscache_n_stores_oom;
46308 +extern atomic_unchecked_t fscache_n_store_ops;
46309 +extern atomic_unchecked_t fscache_n_store_calls;
46310 +extern atomic_unchecked_t fscache_n_store_pages;
46311 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
46312 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
46313
46314 -extern atomic_t fscache_n_store_vmscan_not_storing;
46315 -extern atomic_t fscache_n_store_vmscan_gone;
46316 -extern atomic_t fscache_n_store_vmscan_busy;
46317 -extern atomic_t fscache_n_store_vmscan_cancelled;
46318 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46319 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
46320 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
46321 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46322
46323 -extern atomic_t fscache_n_marks;
46324 -extern atomic_t fscache_n_uncaches;
46325 +extern atomic_unchecked_t fscache_n_marks;
46326 +extern atomic_unchecked_t fscache_n_uncaches;
46327
46328 -extern atomic_t fscache_n_acquires;
46329 -extern atomic_t fscache_n_acquires_null;
46330 -extern atomic_t fscache_n_acquires_no_cache;
46331 -extern atomic_t fscache_n_acquires_ok;
46332 -extern atomic_t fscache_n_acquires_nobufs;
46333 -extern atomic_t fscache_n_acquires_oom;
46334 +extern atomic_unchecked_t fscache_n_acquires;
46335 +extern atomic_unchecked_t fscache_n_acquires_null;
46336 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
46337 +extern atomic_unchecked_t fscache_n_acquires_ok;
46338 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
46339 +extern atomic_unchecked_t fscache_n_acquires_oom;
46340
46341 -extern atomic_t fscache_n_updates;
46342 -extern atomic_t fscache_n_updates_null;
46343 -extern atomic_t fscache_n_updates_run;
46344 +extern atomic_unchecked_t fscache_n_updates;
46345 +extern atomic_unchecked_t fscache_n_updates_null;
46346 +extern atomic_unchecked_t fscache_n_updates_run;
46347
46348 -extern atomic_t fscache_n_relinquishes;
46349 -extern atomic_t fscache_n_relinquishes_null;
46350 -extern atomic_t fscache_n_relinquishes_waitcrt;
46351 -extern atomic_t fscache_n_relinquishes_retire;
46352 +extern atomic_unchecked_t fscache_n_relinquishes;
46353 +extern atomic_unchecked_t fscache_n_relinquishes_null;
46354 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46355 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
46356
46357 -extern atomic_t fscache_n_cookie_index;
46358 -extern atomic_t fscache_n_cookie_data;
46359 -extern atomic_t fscache_n_cookie_special;
46360 +extern atomic_unchecked_t fscache_n_cookie_index;
46361 +extern atomic_unchecked_t fscache_n_cookie_data;
46362 +extern atomic_unchecked_t fscache_n_cookie_special;
46363
46364 -extern atomic_t fscache_n_object_alloc;
46365 -extern atomic_t fscache_n_object_no_alloc;
46366 -extern atomic_t fscache_n_object_lookups;
46367 -extern atomic_t fscache_n_object_lookups_negative;
46368 -extern atomic_t fscache_n_object_lookups_positive;
46369 -extern atomic_t fscache_n_object_lookups_timed_out;
46370 -extern atomic_t fscache_n_object_created;
46371 -extern atomic_t fscache_n_object_avail;
46372 -extern atomic_t fscache_n_object_dead;
46373 +extern atomic_unchecked_t fscache_n_object_alloc;
46374 +extern atomic_unchecked_t fscache_n_object_no_alloc;
46375 +extern atomic_unchecked_t fscache_n_object_lookups;
46376 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
46377 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
46378 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
46379 +extern atomic_unchecked_t fscache_n_object_created;
46380 +extern atomic_unchecked_t fscache_n_object_avail;
46381 +extern atomic_unchecked_t fscache_n_object_dead;
46382
46383 -extern atomic_t fscache_n_checkaux_none;
46384 -extern atomic_t fscache_n_checkaux_okay;
46385 -extern atomic_t fscache_n_checkaux_update;
46386 -extern atomic_t fscache_n_checkaux_obsolete;
46387 +extern atomic_unchecked_t fscache_n_checkaux_none;
46388 +extern atomic_unchecked_t fscache_n_checkaux_okay;
46389 +extern atomic_unchecked_t fscache_n_checkaux_update;
46390 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
46391
46392 extern atomic_t fscache_n_cop_alloc_object;
46393 extern atomic_t fscache_n_cop_lookup_object;
46394 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
46395 atomic_inc(stat);
46396 }
46397
46398 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
46399 +{
46400 + atomic_inc_unchecked(stat);
46401 +}
46402 +
46403 static inline void fscache_stat_d(atomic_t *stat)
46404 {
46405 atomic_dec(stat);
46406 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
46407
46408 #define __fscache_stat(stat) (NULL)
46409 #define fscache_stat(stat) do {} while (0)
46410 +#define fscache_stat_unchecked(stat) do {} while (0)
46411 #define fscache_stat_d(stat) do {} while (0)
46412 #endif
46413
46414 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
46415 index b6b897c..0ffff9c 100644
46416 --- a/fs/fscache/object.c
46417 +++ b/fs/fscache/object.c
46418 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46419 /* update the object metadata on disk */
46420 case FSCACHE_OBJECT_UPDATING:
46421 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
46422 - fscache_stat(&fscache_n_updates_run);
46423 + fscache_stat_unchecked(&fscache_n_updates_run);
46424 fscache_stat(&fscache_n_cop_update_object);
46425 object->cache->ops->update_object(object);
46426 fscache_stat_d(&fscache_n_cop_update_object);
46427 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46428 spin_lock(&object->lock);
46429 object->state = FSCACHE_OBJECT_DEAD;
46430 spin_unlock(&object->lock);
46431 - fscache_stat(&fscache_n_object_dead);
46432 + fscache_stat_unchecked(&fscache_n_object_dead);
46433 goto terminal_transit;
46434
46435 /* handle the parent cache of this object being withdrawn from
46436 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46437 spin_lock(&object->lock);
46438 object->state = FSCACHE_OBJECT_DEAD;
46439 spin_unlock(&object->lock);
46440 - fscache_stat(&fscache_n_object_dead);
46441 + fscache_stat_unchecked(&fscache_n_object_dead);
46442 goto terminal_transit;
46443
46444 /* complain about the object being woken up once it is
46445 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
46446 parent->cookie->def->name, cookie->def->name,
46447 object->cache->tag->name);
46448
46449 - fscache_stat(&fscache_n_object_lookups);
46450 + fscache_stat_unchecked(&fscache_n_object_lookups);
46451 fscache_stat(&fscache_n_cop_lookup_object);
46452 ret = object->cache->ops->lookup_object(object);
46453 fscache_stat_d(&fscache_n_cop_lookup_object);
46454 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
46455 if (ret == -ETIMEDOUT) {
46456 /* probably stuck behind another object, so move this one to
46457 * the back of the queue */
46458 - fscache_stat(&fscache_n_object_lookups_timed_out);
46459 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
46460 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46461 }
46462
46463 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
46464
46465 spin_lock(&object->lock);
46466 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46467 - fscache_stat(&fscache_n_object_lookups_negative);
46468 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
46469
46470 /* transit here to allow write requests to begin stacking up
46471 * and read requests to begin returning ENODATA */
46472 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
46473 * result, in which case there may be data available */
46474 spin_lock(&object->lock);
46475 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46476 - fscache_stat(&fscache_n_object_lookups_positive);
46477 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
46478
46479 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
46480
46481 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
46482 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46483 } else {
46484 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
46485 - fscache_stat(&fscache_n_object_created);
46486 + fscache_stat_unchecked(&fscache_n_object_created);
46487
46488 object->state = FSCACHE_OBJECT_AVAILABLE;
46489 spin_unlock(&object->lock);
46490 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
46491 fscache_enqueue_dependents(object);
46492
46493 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
46494 - fscache_stat(&fscache_n_object_avail);
46495 + fscache_stat_unchecked(&fscache_n_object_avail);
46496
46497 _leave("");
46498 }
46499 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
46500 enum fscache_checkaux result;
46501
46502 if (!object->cookie->def->check_aux) {
46503 - fscache_stat(&fscache_n_checkaux_none);
46504 + fscache_stat_unchecked(&fscache_n_checkaux_none);
46505 return FSCACHE_CHECKAUX_OKAY;
46506 }
46507
46508 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
46509 switch (result) {
46510 /* entry okay as is */
46511 case FSCACHE_CHECKAUX_OKAY:
46512 - fscache_stat(&fscache_n_checkaux_okay);
46513 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
46514 break;
46515
46516 /* entry requires update */
46517 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
46518 - fscache_stat(&fscache_n_checkaux_update);
46519 + fscache_stat_unchecked(&fscache_n_checkaux_update);
46520 break;
46521
46522 /* entry requires deletion */
46523 case FSCACHE_CHECKAUX_OBSOLETE:
46524 - fscache_stat(&fscache_n_checkaux_obsolete);
46525 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
46526 break;
46527
46528 default:
46529 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
46530 index 30afdfa..2256596 100644
46531 --- a/fs/fscache/operation.c
46532 +++ b/fs/fscache/operation.c
46533 @@ -17,7 +17,7 @@
46534 #include <linux/slab.h>
46535 #include "internal.h"
46536
46537 -atomic_t fscache_op_debug_id;
46538 +atomic_unchecked_t fscache_op_debug_id;
46539 EXPORT_SYMBOL(fscache_op_debug_id);
46540
46541 /**
46542 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
46543 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
46544 ASSERTCMP(atomic_read(&op->usage), >, 0);
46545
46546 - fscache_stat(&fscache_n_op_enqueue);
46547 + fscache_stat_unchecked(&fscache_n_op_enqueue);
46548 switch (op->flags & FSCACHE_OP_TYPE) {
46549 case FSCACHE_OP_ASYNC:
46550 _debug("queue async");
46551 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
46552 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
46553 if (op->processor)
46554 fscache_enqueue_operation(op);
46555 - fscache_stat(&fscache_n_op_run);
46556 + fscache_stat_unchecked(&fscache_n_op_run);
46557 }
46558
46559 /*
46560 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46561 if (object->n_ops > 1) {
46562 atomic_inc(&op->usage);
46563 list_add_tail(&op->pend_link, &object->pending_ops);
46564 - fscache_stat(&fscache_n_op_pend);
46565 + fscache_stat_unchecked(&fscache_n_op_pend);
46566 } else if (!list_empty(&object->pending_ops)) {
46567 atomic_inc(&op->usage);
46568 list_add_tail(&op->pend_link, &object->pending_ops);
46569 - fscache_stat(&fscache_n_op_pend);
46570 + fscache_stat_unchecked(&fscache_n_op_pend);
46571 fscache_start_operations(object);
46572 } else {
46573 ASSERTCMP(object->n_in_progress, ==, 0);
46574 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46575 object->n_exclusive++; /* reads and writes must wait */
46576 atomic_inc(&op->usage);
46577 list_add_tail(&op->pend_link, &object->pending_ops);
46578 - fscache_stat(&fscache_n_op_pend);
46579 + fscache_stat_unchecked(&fscache_n_op_pend);
46580 ret = 0;
46581 } else {
46582 /* not allowed to submit ops in any other state */
46583 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
46584 if (object->n_exclusive > 0) {
46585 atomic_inc(&op->usage);
46586 list_add_tail(&op->pend_link, &object->pending_ops);
46587 - fscache_stat(&fscache_n_op_pend);
46588 + fscache_stat_unchecked(&fscache_n_op_pend);
46589 } else if (!list_empty(&object->pending_ops)) {
46590 atomic_inc(&op->usage);
46591 list_add_tail(&op->pend_link, &object->pending_ops);
46592 - fscache_stat(&fscache_n_op_pend);
46593 + fscache_stat_unchecked(&fscache_n_op_pend);
46594 fscache_start_operations(object);
46595 } else {
46596 ASSERTCMP(object->n_exclusive, ==, 0);
46597 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
46598 object->n_ops++;
46599 atomic_inc(&op->usage);
46600 list_add_tail(&op->pend_link, &object->pending_ops);
46601 - fscache_stat(&fscache_n_op_pend);
46602 + fscache_stat_unchecked(&fscache_n_op_pend);
46603 ret = 0;
46604 } else if (object->state == FSCACHE_OBJECT_DYING ||
46605 object->state == FSCACHE_OBJECT_LC_DYING ||
46606 object->state == FSCACHE_OBJECT_WITHDRAWING) {
46607 - fscache_stat(&fscache_n_op_rejected);
46608 + fscache_stat_unchecked(&fscache_n_op_rejected);
46609 ret = -ENOBUFS;
46610 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
46611 fscache_report_unexpected_submission(object, op, ostate);
46612 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
46613
46614 ret = -EBUSY;
46615 if (!list_empty(&op->pend_link)) {
46616 - fscache_stat(&fscache_n_op_cancelled);
46617 + fscache_stat_unchecked(&fscache_n_op_cancelled);
46618 list_del_init(&op->pend_link);
46619 object->n_ops--;
46620 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
46621 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
46622 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
46623 BUG();
46624
46625 - fscache_stat(&fscache_n_op_release);
46626 + fscache_stat_unchecked(&fscache_n_op_release);
46627
46628 if (op->release) {
46629 op->release(op);
46630 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
46631 * lock, and defer it otherwise */
46632 if (!spin_trylock(&object->lock)) {
46633 _debug("defer put");
46634 - fscache_stat(&fscache_n_op_deferred_release);
46635 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
46636
46637 cache = object->cache;
46638 spin_lock(&cache->op_gc_list_lock);
46639 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
46640
46641 _debug("GC DEFERRED REL OBJ%x OP%x",
46642 object->debug_id, op->debug_id);
46643 - fscache_stat(&fscache_n_op_gc);
46644 + fscache_stat_unchecked(&fscache_n_op_gc);
46645
46646 ASSERTCMP(atomic_read(&op->usage), ==, 0);
46647
46648 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
46649 index 3f7a59b..cf196cc 100644
46650 --- a/fs/fscache/page.c
46651 +++ b/fs/fscache/page.c
46652 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46653 val = radix_tree_lookup(&cookie->stores, page->index);
46654 if (!val) {
46655 rcu_read_unlock();
46656 - fscache_stat(&fscache_n_store_vmscan_not_storing);
46657 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
46658 __fscache_uncache_page(cookie, page);
46659 return true;
46660 }
46661 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46662 spin_unlock(&cookie->stores_lock);
46663
46664 if (xpage) {
46665 - fscache_stat(&fscache_n_store_vmscan_cancelled);
46666 - fscache_stat(&fscache_n_store_radix_deletes);
46667 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
46668 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46669 ASSERTCMP(xpage, ==, page);
46670 } else {
46671 - fscache_stat(&fscache_n_store_vmscan_gone);
46672 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
46673 }
46674
46675 wake_up_bit(&cookie->flags, 0);
46676 @@ -107,7 +107,7 @@ page_busy:
46677 /* we might want to wait here, but that could deadlock the allocator as
46678 * the work threads writing to the cache may all end up sleeping
46679 * on memory allocation */
46680 - fscache_stat(&fscache_n_store_vmscan_busy);
46681 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
46682 return false;
46683 }
46684 EXPORT_SYMBOL(__fscache_maybe_release_page);
46685 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
46686 FSCACHE_COOKIE_STORING_TAG);
46687 if (!radix_tree_tag_get(&cookie->stores, page->index,
46688 FSCACHE_COOKIE_PENDING_TAG)) {
46689 - fscache_stat(&fscache_n_store_radix_deletes);
46690 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46691 xpage = radix_tree_delete(&cookie->stores, page->index);
46692 }
46693 spin_unlock(&cookie->stores_lock);
46694 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
46695
46696 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
46697
46698 - fscache_stat(&fscache_n_attr_changed_calls);
46699 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
46700
46701 if (fscache_object_is_active(object)) {
46702 fscache_stat(&fscache_n_cop_attr_changed);
46703 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46704
46705 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46706
46707 - fscache_stat(&fscache_n_attr_changed);
46708 + fscache_stat_unchecked(&fscache_n_attr_changed);
46709
46710 op = kzalloc(sizeof(*op), GFP_KERNEL);
46711 if (!op) {
46712 - fscache_stat(&fscache_n_attr_changed_nomem);
46713 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
46714 _leave(" = -ENOMEM");
46715 return -ENOMEM;
46716 }
46717 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46718 if (fscache_submit_exclusive_op(object, op) < 0)
46719 goto nobufs;
46720 spin_unlock(&cookie->lock);
46721 - fscache_stat(&fscache_n_attr_changed_ok);
46722 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
46723 fscache_put_operation(op);
46724 _leave(" = 0");
46725 return 0;
46726 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46727 nobufs:
46728 spin_unlock(&cookie->lock);
46729 kfree(op);
46730 - fscache_stat(&fscache_n_attr_changed_nobufs);
46731 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
46732 _leave(" = %d", -ENOBUFS);
46733 return -ENOBUFS;
46734 }
46735 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
46736 /* allocate a retrieval operation and attempt to submit it */
46737 op = kzalloc(sizeof(*op), GFP_NOIO);
46738 if (!op) {
46739 - fscache_stat(&fscache_n_retrievals_nomem);
46740 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46741 return NULL;
46742 }
46743
46744 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46745 return 0;
46746 }
46747
46748 - fscache_stat(&fscache_n_retrievals_wait);
46749 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
46750
46751 jif = jiffies;
46752 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
46753 fscache_wait_bit_interruptible,
46754 TASK_INTERRUPTIBLE) != 0) {
46755 - fscache_stat(&fscache_n_retrievals_intr);
46756 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46757 _leave(" = -ERESTARTSYS");
46758 return -ERESTARTSYS;
46759 }
46760 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46761 */
46762 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46763 struct fscache_retrieval *op,
46764 - atomic_t *stat_op_waits,
46765 - atomic_t *stat_object_dead)
46766 + atomic_unchecked_t *stat_op_waits,
46767 + atomic_unchecked_t *stat_object_dead)
46768 {
46769 int ret;
46770
46771 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46772 goto check_if_dead;
46773
46774 _debug(">>> WT");
46775 - fscache_stat(stat_op_waits);
46776 + fscache_stat_unchecked(stat_op_waits);
46777 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
46778 fscache_wait_bit_interruptible,
46779 TASK_INTERRUPTIBLE) < 0) {
46780 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46781
46782 check_if_dead:
46783 if (unlikely(fscache_object_is_dead(object))) {
46784 - fscache_stat(stat_object_dead);
46785 + fscache_stat_unchecked(stat_object_dead);
46786 return -ENOBUFS;
46787 }
46788 return 0;
46789 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46790
46791 _enter("%p,%p,,,", cookie, page);
46792
46793 - fscache_stat(&fscache_n_retrievals);
46794 + fscache_stat_unchecked(&fscache_n_retrievals);
46795
46796 if (hlist_empty(&cookie->backing_objects))
46797 goto nobufs;
46798 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46799 goto nobufs_unlock;
46800 spin_unlock(&cookie->lock);
46801
46802 - fscache_stat(&fscache_n_retrieval_ops);
46803 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
46804
46805 /* pin the netfs read context in case we need to do the actual netfs
46806 * read because we've encountered a cache read failure */
46807 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46808
46809 error:
46810 if (ret == -ENOMEM)
46811 - fscache_stat(&fscache_n_retrievals_nomem);
46812 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46813 else if (ret == -ERESTARTSYS)
46814 - fscache_stat(&fscache_n_retrievals_intr);
46815 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46816 else if (ret == -ENODATA)
46817 - fscache_stat(&fscache_n_retrievals_nodata);
46818 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46819 else if (ret < 0)
46820 - fscache_stat(&fscache_n_retrievals_nobufs);
46821 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46822 else
46823 - fscache_stat(&fscache_n_retrievals_ok);
46824 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
46825
46826 fscache_put_retrieval(op);
46827 _leave(" = %d", ret);
46828 @@ -429,7 +429,7 @@ nobufs_unlock:
46829 spin_unlock(&cookie->lock);
46830 kfree(op);
46831 nobufs:
46832 - fscache_stat(&fscache_n_retrievals_nobufs);
46833 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46834 _leave(" = -ENOBUFS");
46835 return -ENOBUFS;
46836 }
46837 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46838
46839 _enter("%p,,%d,,,", cookie, *nr_pages);
46840
46841 - fscache_stat(&fscache_n_retrievals);
46842 + fscache_stat_unchecked(&fscache_n_retrievals);
46843
46844 if (hlist_empty(&cookie->backing_objects))
46845 goto nobufs;
46846 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46847 goto nobufs_unlock;
46848 spin_unlock(&cookie->lock);
46849
46850 - fscache_stat(&fscache_n_retrieval_ops);
46851 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
46852
46853 /* pin the netfs read context in case we need to do the actual netfs
46854 * read because we've encountered a cache read failure */
46855 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46856
46857 error:
46858 if (ret == -ENOMEM)
46859 - fscache_stat(&fscache_n_retrievals_nomem);
46860 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46861 else if (ret == -ERESTARTSYS)
46862 - fscache_stat(&fscache_n_retrievals_intr);
46863 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46864 else if (ret == -ENODATA)
46865 - fscache_stat(&fscache_n_retrievals_nodata);
46866 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46867 else if (ret < 0)
46868 - fscache_stat(&fscache_n_retrievals_nobufs);
46869 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46870 else
46871 - fscache_stat(&fscache_n_retrievals_ok);
46872 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
46873
46874 fscache_put_retrieval(op);
46875 _leave(" = %d", ret);
46876 @@ -545,7 +545,7 @@ nobufs_unlock:
46877 spin_unlock(&cookie->lock);
46878 kfree(op);
46879 nobufs:
46880 - fscache_stat(&fscache_n_retrievals_nobufs);
46881 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46882 _leave(" = -ENOBUFS");
46883 return -ENOBUFS;
46884 }
46885 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46886
46887 _enter("%p,%p,,,", cookie, page);
46888
46889 - fscache_stat(&fscache_n_allocs);
46890 + fscache_stat_unchecked(&fscache_n_allocs);
46891
46892 if (hlist_empty(&cookie->backing_objects))
46893 goto nobufs;
46894 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46895 goto nobufs_unlock;
46896 spin_unlock(&cookie->lock);
46897
46898 - fscache_stat(&fscache_n_alloc_ops);
46899 + fscache_stat_unchecked(&fscache_n_alloc_ops);
46900
46901 ret = fscache_wait_for_retrieval_activation(
46902 object, op,
46903 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46904
46905 error:
46906 if (ret == -ERESTARTSYS)
46907 - fscache_stat(&fscache_n_allocs_intr);
46908 + fscache_stat_unchecked(&fscache_n_allocs_intr);
46909 else if (ret < 0)
46910 - fscache_stat(&fscache_n_allocs_nobufs);
46911 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46912 else
46913 - fscache_stat(&fscache_n_allocs_ok);
46914 + fscache_stat_unchecked(&fscache_n_allocs_ok);
46915
46916 fscache_put_retrieval(op);
46917 _leave(" = %d", ret);
46918 @@ -625,7 +625,7 @@ nobufs_unlock:
46919 spin_unlock(&cookie->lock);
46920 kfree(op);
46921 nobufs:
46922 - fscache_stat(&fscache_n_allocs_nobufs);
46923 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46924 _leave(" = -ENOBUFS");
46925 return -ENOBUFS;
46926 }
46927 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46928
46929 spin_lock(&cookie->stores_lock);
46930
46931 - fscache_stat(&fscache_n_store_calls);
46932 + fscache_stat_unchecked(&fscache_n_store_calls);
46933
46934 /* find a page to store */
46935 page = NULL;
46936 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46937 page = results[0];
46938 _debug("gang %d [%lx]", n, page->index);
46939 if (page->index > op->store_limit) {
46940 - fscache_stat(&fscache_n_store_pages_over_limit);
46941 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
46942 goto superseded;
46943 }
46944
46945 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46946 spin_unlock(&cookie->stores_lock);
46947 spin_unlock(&object->lock);
46948
46949 - fscache_stat(&fscache_n_store_pages);
46950 + fscache_stat_unchecked(&fscache_n_store_pages);
46951 fscache_stat(&fscache_n_cop_write_page);
46952 ret = object->cache->ops->write_page(op, page);
46953 fscache_stat_d(&fscache_n_cop_write_page);
46954 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46955 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46956 ASSERT(PageFsCache(page));
46957
46958 - fscache_stat(&fscache_n_stores);
46959 + fscache_stat_unchecked(&fscache_n_stores);
46960
46961 op = kzalloc(sizeof(*op), GFP_NOIO);
46962 if (!op)
46963 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46964 spin_unlock(&cookie->stores_lock);
46965 spin_unlock(&object->lock);
46966
46967 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46968 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46969 op->store_limit = object->store_limit;
46970
46971 if (fscache_submit_op(object, &op->op) < 0)
46972 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46973
46974 spin_unlock(&cookie->lock);
46975 radix_tree_preload_end();
46976 - fscache_stat(&fscache_n_store_ops);
46977 - fscache_stat(&fscache_n_stores_ok);
46978 + fscache_stat_unchecked(&fscache_n_store_ops);
46979 + fscache_stat_unchecked(&fscache_n_stores_ok);
46980
46981 /* the work queue now carries its own ref on the object */
46982 fscache_put_operation(&op->op);
46983 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46984 return 0;
46985
46986 already_queued:
46987 - fscache_stat(&fscache_n_stores_again);
46988 + fscache_stat_unchecked(&fscache_n_stores_again);
46989 already_pending:
46990 spin_unlock(&cookie->stores_lock);
46991 spin_unlock(&object->lock);
46992 spin_unlock(&cookie->lock);
46993 radix_tree_preload_end();
46994 kfree(op);
46995 - fscache_stat(&fscache_n_stores_ok);
46996 + fscache_stat_unchecked(&fscache_n_stores_ok);
46997 _leave(" = 0");
46998 return 0;
46999
47000 @@ -851,14 +851,14 @@ nobufs:
47001 spin_unlock(&cookie->lock);
47002 radix_tree_preload_end();
47003 kfree(op);
47004 - fscache_stat(&fscache_n_stores_nobufs);
47005 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
47006 _leave(" = -ENOBUFS");
47007 return -ENOBUFS;
47008
47009 nomem_free:
47010 kfree(op);
47011 nomem:
47012 - fscache_stat(&fscache_n_stores_oom);
47013 + fscache_stat_unchecked(&fscache_n_stores_oom);
47014 _leave(" = -ENOMEM");
47015 return -ENOMEM;
47016 }
47017 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
47018 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
47019 ASSERTCMP(page, !=, NULL);
47020
47021 - fscache_stat(&fscache_n_uncaches);
47022 + fscache_stat_unchecked(&fscache_n_uncaches);
47023
47024 /* cache withdrawal may beat us to it */
47025 if (!PageFsCache(page))
47026 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
47027 unsigned long loop;
47028
47029 #ifdef CONFIG_FSCACHE_STATS
47030 - atomic_add(pagevec->nr, &fscache_n_marks);
47031 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
47032 #endif
47033
47034 for (loop = 0; loop < pagevec->nr; loop++) {
47035 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
47036 index 4765190..2a067f2 100644
47037 --- a/fs/fscache/stats.c
47038 +++ b/fs/fscache/stats.c
47039 @@ -18,95 +18,95 @@
47040 /*
47041 * operation counters
47042 */
47043 -atomic_t fscache_n_op_pend;
47044 -atomic_t fscache_n_op_run;
47045 -atomic_t fscache_n_op_enqueue;
47046 -atomic_t fscache_n_op_requeue;
47047 -atomic_t fscache_n_op_deferred_release;
47048 -atomic_t fscache_n_op_release;
47049 -atomic_t fscache_n_op_gc;
47050 -atomic_t fscache_n_op_cancelled;
47051 -atomic_t fscache_n_op_rejected;
47052 +atomic_unchecked_t fscache_n_op_pend;
47053 +atomic_unchecked_t fscache_n_op_run;
47054 +atomic_unchecked_t fscache_n_op_enqueue;
47055 +atomic_unchecked_t fscache_n_op_requeue;
47056 +atomic_unchecked_t fscache_n_op_deferred_release;
47057 +atomic_unchecked_t fscache_n_op_release;
47058 +atomic_unchecked_t fscache_n_op_gc;
47059 +atomic_unchecked_t fscache_n_op_cancelled;
47060 +atomic_unchecked_t fscache_n_op_rejected;
47061
47062 -atomic_t fscache_n_attr_changed;
47063 -atomic_t fscache_n_attr_changed_ok;
47064 -atomic_t fscache_n_attr_changed_nobufs;
47065 -atomic_t fscache_n_attr_changed_nomem;
47066 -atomic_t fscache_n_attr_changed_calls;
47067 +atomic_unchecked_t fscache_n_attr_changed;
47068 +atomic_unchecked_t fscache_n_attr_changed_ok;
47069 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
47070 +atomic_unchecked_t fscache_n_attr_changed_nomem;
47071 +atomic_unchecked_t fscache_n_attr_changed_calls;
47072
47073 -atomic_t fscache_n_allocs;
47074 -atomic_t fscache_n_allocs_ok;
47075 -atomic_t fscache_n_allocs_wait;
47076 -atomic_t fscache_n_allocs_nobufs;
47077 -atomic_t fscache_n_allocs_intr;
47078 -atomic_t fscache_n_allocs_object_dead;
47079 -atomic_t fscache_n_alloc_ops;
47080 -atomic_t fscache_n_alloc_op_waits;
47081 +atomic_unchecked_t fscache_n_allocs;
47082 +atomic_unchecked_t fscache_n_allocs_ok;
47083 +atomic_unchecked_t fscache_n_allocs_wait;
47084 +atomic_unchecked_t fscache_n_allocs_nobufs;
47085 +atomic_unchecked_t fscache_n_allocs_intr;
47086 +atomic_unchecked_t fscache_n_allocs_object_dead;
47087 +atomic_unchecked_t fscache_n_alloc_ops;
47088 +atomic_unchecked_t fscache_n_alloc_op_waits;
47089
47090 -atomic_t fscache_n_retrievals;
47091 -atomic_t fscache_n_retrievals_ok;
47092 -atomic_t fscache_n_retrievals_wait;
47093 -atomic_t fscache_n_retrievals_nodata;
47094 -atomic_t fscache_n_retrievals_nobufs;
47095 -atomic_t fscache_n_retrievals_intr;
47096 -atomic_t fscache_n_retrievals_nomem;
47097 -atomic_t fscache_n_retrievals_object_dead;
47098 -atomic_t fscache_n_retrieval_ops;
47099 -atomic_t fscache_n_retrieval_op_waits;
47100 +atomic_unchecked_t fscache_n_retrievals;
47101 +atomic_unchecked_t fscache_n_retrievals_ok;
47102 +atomic_unchecked_t fscache_n_retrievals_wait;
47103 +atomic_unchecked_t fscache_n_retrievals_nodata;
47104 +atomic_unchecked_t fscache_n_retrievals_nobufs;
47105 +atomic_unchecked_t fscache_n_retrievals_intr;
47106 +atomic_unchecked_t fscache_n_retrievals_nomem;
47107 +atomic_unchecked_t fscache_n_retrievals_object_dead;
47108 +atomic_unchecked_t fscache_n_retrieval_ops;
47109 +atomic_unchecked_t fscache_n_retrieval_op_waits;
47110
47111 -atomic_t fscache_n_stores;
47112 -atomic_t fscache_n_stores_ok;
47113 -atomic_t fscache_n_stores_again;
47114 -atomic_t fscache_n_stores_nobufs;
47115 -atomic_t fscache_n_stores_oom;
47116 -atomic_t fscache_n_store_ops;
47117 -atomic_t fscache_n_store_calls;
47118 -atomic_t fscache_n_store_pages;
47119 -atomic_t fscache_n_store_radix_deletes;
47120 -atomic_t fscache_n_store_pages_over_limit;
47121 +atomic_unchecked_t fscache_n_stores;
47122 +atomic_unchecked_t fscache_n_stores_ok;
47123 +atomic_unchecked_t fscache_n_stores_again;
47124 +atomic_unchecked_t fscache_n_stores_nobufs;
47125 +atomic_unchecked_t fscache_n_stores_oom;
47126 +atomic_unchecked_t fscache_n_store_ops;
47127 +atomic_unchecked_t fscache_n_store_calls;
47128 +atomic_unchecked_t fscache_n_store_pages;
47129 +atomic_unchecked_t fscache_n_store_radix_deletes;
47130 +atomic_unchecked_t fscache_n_store_pages_over_limit;
47131
47132 -atomic_t fscache_n_store_vmscan_not_storing;
47133 -atomic_t fscache_n_store_vmscan_gone;
47134 -atomic_t fscache_n_store_vmscan_busy;
47135 -atomic_t fscache_n_store_vmscan_cancelled;
47136 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
47137 +atomic_unchecked_t fscache_n_store_vmscan_gone;
47138 +atomic_unchecked_t fscache_n_store_vmscan_busy;
47139 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
47140
47141 -atomic_t fscache_n_marks;
47142 -atomic_t fscache_n_uncaches;
47143 +atomic_unchecked_t fscache_n_marks;
47144 +atomic_unchecked_t fscache_n_uncaches;
47145
47146 -atomic_t fscache_n_acquires;
47147 -atomic_t fscache_n_acquires_null;
47148 -atomic_t fscache_n_acquires_no_cache;
47149 -atomic_t fscache_n_acquires_ok;
47150 -atomic_t fscache_n_acquires_nobufs;
47151 -atomic_t fscache_n_acquires_oom;
47152 +atomic_unchecked_t fscache_n_acquires;
47153 +atomic_unchecked_t fscache_n_acquires_null;
47154 +atomic_unchecked_t fscache_n_acquires_no_cache;
47155 +atomic_unchecked_t fscache_n_acquires_ok;
47156 +atomic_unchecked_t fscache_n_acquires_nobufs;
47157 +atomic_unchecked_t fscache_n_acquires_oom;
47158
47159 -atomic_t fscache_n_updates;
47160 -atomic_t fscache_n_updates_null;
47161 -atomic_t fscache_n_updates_run;
47162 +atomic_unchecked_t fscache_n_updates;
47163 +atomic_unchecked_t fscache_n_updates_null;
47164 +atomic_unchecked_t fscache_n_updates_run;
47165
47166 -atomic_t fscache_n_relinquishes;
47167 -atomic_t fscache_n_relinquishes_null;
47168 -atomic_t fscache_n_relinquishes_waitcrt;
47169 -atomic_t fscache_n_relinquishes_retire;
47170 +atomic_unchecked_t fscache_n_relinquishes;
47171 +atomic_unchecked_t fscache_n_relinquishes_null;
47172 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
47173 +atomic_unchecked_t fscache_n_relinquishes_retire;
47174
47175 -atomic_t fscache_n_cookie_index;
47176 -atomic_t fscache_n_cookie_data;
47177 -atomic_t fscache_n_cookie_special;
47178 +atomic_unchecked_t fscache_n_cookie_index;
47179 +atomic_unchecked_t fscache_n_cookie_data;
47180 +atomic_unchecked_t fscache_n_cookie_special;
47181
47182 -atomic_t fscache_n_object_alloc;
47183 -atomic_t fscache_n_object_no_alloc;
47184 -atomic_t fscache_n_object_lookups;
47185 -atomic_t fscache_n_object_lookups_negative;
47186 -atomic_t fscache_n_object_lookups_positive;
47187 -atomic_t fscache_n_object_lookups_timed_out;
47188 -atomic_t fscache_n_object_created;
47189 -atomic_t fscache_n_object_avail;
47190 -atomic_t fscache_n_object_dead;
47191 +atomic_unchecked_t fscache_n_object_alloc;
47192 +atomic_unchecked_t fscache_n_object_no_alloc;
47193 +atomic_unchecked_t fscache_n_object_lookups;
47194 +atomic_unchecked_t fscache_n_object_lookups_negative;
47195 +atomic_unchecked_t fscache_n_object_lookups_positive;
47196 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
47197 +atomic_unchecked_t fscache_n_object_created;
47198 +atomic_unchecked_t fscache_n_object_avail;
47199 +atomic_unchecked_t fscache_n_object_dead;
47200
47201 -atomic_t fscache_n_checkaux_none;
47202 -atomic_t fscache_n_checkaux_okay;
47203 -atomic_t fscache_n_checkaux_update;
47204 -atomic_t fscache_n_checkaux_obsolete;
47205 +atomic_unchecked_t fscache_n_checkaux_none;
47206 +atomic_unchecked_t fscache_n_checkaux_okay;
47207 +atomic_unchecked_t fscache_n_checkaux_update;
47208 +atomic_unchecked_t fscache_n_checkaux_obsolete;
47209
47210 atomic_t fscache_n_cop_alloc_object;
47211 atomic_t fscache_n_cop_lookup_object;
47212 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
47213 seq_puts(m, "FS-Cache statistics\n");
47214
47215 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
47216 - atomic_read(&fscache_n_cookie_index),
47217 - atomic_read(&fscache_n_cookie_data),
47218 - atomic_read(&fscache_n_cookie_special));
47219 + atomic_read_unchecked(&fscache_n_cookie_index),
47220 + atomic_read_unchecked(&fscache_n_cookie_data),
47221 + atomic_read_unchecked(&fscache_n_cookie_special));
47222
47223 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
47224 - atomic_read(&fscache_n_object_alloc),
47225 - atomic_read(&fscache_n_object_no_alloc),
47226 - atomic_read(&fscache_n_object_avail),
47227 - atomic_read(&fscache_n_object_dead));
47228 + atomic_read_unchecked(&fscache_n_object_alloc),
47229 + atomic_read_unchecked(&fscache_n_object_no_alloc),
47230 + atomic_read_unchecked(&fscache_n_object_avail),
47231 + atomic_read_unchecked(&fscache_n_object_dead));
47232 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
47233 - atomic_read(&fscache_n_checkaux_none),
47234 - atomic_read(&fscache_n_checkaux_okay),
47235 - atomic_read(&fscache_n_checkaux_update),
47236 - atomic_read(&fscache_n_checkaux_obsolete));
47237 + atomic_read_unchecked(&fscache_n_checkaux_none),
47238 + atomic_read_unchecked(&fscache_n_checkaux_okay),
47239 + atomic_read_unchecked(&fscache_n_checkaux_update),
47240 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
47241
47242 seq_printf(m, "Pages : mrk=%u unc=%u\n",
47243 - atomic_read(&fscache_n_marks),
47244 - atomic_read(&fscache_n_uncaches));
47245 + atomic_read_unchecked(&fscache_n_marks),
47246 + atomic_read_unchecked(&fscache_n_uncaches));
47247
47248 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
47249 " oom=%u\n",
47250 - atomic_read(&fscache_n_acquires),
47251 - atomic_read(&fscache_n_acquires_null),
47252 - atomic_read(&fscache_n_acquires_no_cache),
47253 - atomic_read(&fscache_n_acquires_ok),
47254 - atomic_read(&fscache_n_acquires_nobufs),
47255 - atomic_read(&fscache_n_acquires_oom));
47256 + atomic_read_unchecked(&fscache_n_acquires),
47257 + atomic_read_unchecked(&fscache_n_acquires_null),
47258 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
47259 + atomic_read_unchecked(&fscache_n_acquires_ok),
47260 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
47261 + atomic_read_unchecked(&fscache_n_acquires_oom));
47262
47263 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
47264 - atomic_read(&fscache_n_object_lookups),
47265 - atomic_read(&fscache_n_object_lookups_negative),
47266 - atomic_read(&fscache_n_object_lookups_positive),
47267 - atomic_read(&fscache_n_object_created),
47268 - atomic_read(&fscache_n_object_lookups_timed_out));
47269 + atomic_read_unchecked(&fscache_n_object_lookups),
47270 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
47271 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
47272 + atomic_read_unchecked(&fscache_n_object_created),
47273 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
47274
47275 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
47276 - atomic_read(&fscache_n_updates),
47277 - atomic_read(&fscache_n_updates_null),
47278 - atomic_read(&fscache_n_updates_run));
47279 + atomic_read_unchecked(&fscache_n_updates),
47280 + atomic_read_unchecked(&fscache_n_updates_null),
47281 + atomic_read_unchecked(&fscache_n_updates_run));
47282
47283 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
47284 - atomic_read(&fscache_n_relinquishes),
47285 - atomic_read(&fscache_n_relinquishes_null),
47286 - atomic_read(&fscache_n_relinquishes_waitcrt),
47287 - atomic_read(&fscache_n_relinquishes_retire));
47288 + atomic_read_unchecked(&fscache_n_relinquishes),
47289 + atomic_read_unchecked(&fscache_n_relinquishes_null),
47290 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
47291 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
47292
47293 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
47294 - atomic_read(&fscache_n_attr_changed),
47295 - atomic_read(&fscache_n_attr_changed_ok),
47296 - atomic_read(&fscache_n_attr_changed_nobufs),
47297 - atomic_read(&fscache_n_attr_changed_nomem),
47298 - atomic_read(&fscache_n_attr_changed_calls));
47299 + atomic_read_unchecked(&fscache_n_attr_changed),
47300 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
47301 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
47302 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
47303 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
47304
47305 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
47306 - atomic_read(&fscache_n_allocs),
47307 - atomic_read(&fscache_n_allocs_ok),
47308 - atomic_read(&fscache_n_allocs_wait),
47309 - atomic_read(&fscache_n_allocs_nobufs),
47310 - atomic_read(&fscache_n_allocs_intr));
47311 + atomic_read_unchecked(&fscache_n_allocs),
47312 + atomic_read_unchecked(&fscache_n_allocs_ok),
47313 + atomic_read_unchecked(&fscache_n_allocs_wait),
47314 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
47315 + atomic_read_unchecked(&fscache_n_allocs_intr));
47316 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
47317 - atomic_read(&fscache_n_alloc_ops),
47318 - atomic_read(&fscache_n_alloc_op_waits),
47319 - atomic_read(&fscache_n_allocs_object_dead));
47320 + atomic_read_unchecked(&fscache_n_alloc_ops),
47321 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
47322 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
47323
47324 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
47325 " int=%u oom=%u\n",
47326 - atomic_read(&fscache_n_retrievals),
47327 - atomic_read(&fscache_n_retrievals_ok),
47328 - atomic_read(&fscache_n_retrievals_wait),
47329 - atomic_read(&fscache_n_retrievals_nodata),
47330 - atomic_read(&fscache_n_retrievals_nobufs),
47331 - atomic_read(&fscache_n_retrievals_intr),
47332 - atomic_read(&fscache_n_retrievals_nomem));
47333 + atomic_read_unchecked(&fscache_n_retrievals),
47334 + atomic_read_unchecked(&fscache_n_retrievals_ok),
47335 + atomic_read_unchecked(&fscache_n_retrievals_wait),
47336 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
47337 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
47338 + atomic_read_unchecked(&fscache_n_retrievals_intr),
47339 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
47340 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
47341 - atomic_read(&fscache_n_retrieval_ops),
47342 - atomic_read(&fscache_n_retrieval_op_waits),
47343 - atomic_read(&fscache_n_retrievals_object_dead));
47344 + atomic_read_unchecked(&fscache_n_retrieval_ops),
47345 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
47346 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
47347
47348 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
47349 - atomic_read(&fscache_n_stores),
47350 - atomic_read(&fscache_n_stores_ok),
47351 - atomic_read(&fscache_n_stores_again),
47352 - atomic_read(&fscache_n_stores_nobufs),
47353 - atomic_read(&fscache_n_stores_oom));
47354 + atomic_read_unchecked(&fscache_n_stores),
47355 + atomic_read_unchecked(&fscache_n_stores_ok),
47356 + atomic_read_unchecked(&fscache_n_stores_again),
47357 + atomic_read_unchecked(&fscache_n_stores_nobufs),
47358 + atomic_read_unchecked(&fscache_n_stores_oom));
47359 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
47360 - atomic_read(&fscache_n_store_ops),
47361 - atomic_read(&fscache_n_store_calls),
47362 - atomic_read(&fscache_n_store_pages),
47363 - atomic_read(&fscache_n_store_radix_deletes),
47364 - atomic_read(&fscache_n_store_pages_over_limit));
47365 + atomic_read_unchecked(&fscache_n_store_ops),
47366 + atomic_read_unchecked(&fscache_n_store_calls),
47367 + atomic_read_unchecked(&fscache_n_store_pages),
47368 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
47369 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
47370
47371 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
47372 - atomic_read(&fscache_n_store_vmscan_not_storing),
47373 - atomic_read(&fscache_n_store_vmscan_gone),
47374 - atomic_read(&fscache_n_store_vmscan_busy),
47375 - atomic_read(&fscache_n_store_vmscan_cancelled));
47376 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
47377 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
47378 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
47379 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
47380
47381 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
47382 - atomic_read(&fscache_n_op_pend),
47383 - atomic_read(&fscache_n_op_run),
47384 - atomic_read(&fscache_n_op_enqueue),
47385 - atomic_read(&fscache_n_op_cancelled),
47386 - atomic_read(&fscache_n_op_rejected));
47387 + atomic_read_unchecked(&fscache_n_op_pend),
47388 + atomic_read_unchecked(&fscache_n_op_run),
47389 + atomic_read_unchecked(&fscache_n_op_enqueue),
47390 + atomic_read_unchecked(&fscache_n_op_cancelled),
47391 + atomic_read_unchecked(&fscache_n_op_rejected));
47392 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
47393 - atomic_read(&fscache_n_op_deferred_release),
47394 - atomic_read(&fscache_n_op_release),
47395 - atomic_read(&fscache_n_op_gc));
47396 + atomic_read_unchecked(&fscache_n_op_deferred_release),
47397 + atomic_read_unchecked(&fscache_n_op_release),
47398 + atomic_read_unchecked(&fscache_n_op_gc));
47399
47400 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
47401 atomic_read(&fscache_n_cop_alloc_object),
47402 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
47403 index 3426521..3b75162 100644
47404 --- a/fs/fuse/cuse.c
47405 +++ b/fs/fuse/cuse.c
47406 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
47407 INIT_LIST_HEAD(&cuse_conntbl[i]);
47408
47409 /* inherit and extend fuse_dev_operations */
47410 - cuse_channel_fops = fuse_dev_operations;
47411 - cuse_channel_fops.owner = THIS_MODULE;
47412 - cuse_channel_fops.open = cuse_channel_open;
47413 - cuse_channel_fops.release = cuse_channel_release;
47414 + pax_open_kernel();
47415 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
47416 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
47417 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
47418 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
47419 + pax_close_kernel();
47420
47421 cuse_class = class_create(THIS_MODULE, "cuse");
47422 if (IS_ERR(cuse_class))
47423 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
47424 index f4246cf..b4aed1d 100644
47425 --- a/fs/fuse/dev.c
47426 +++ b/fs/fuse/dev.c
47427 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
47428 ret = 0;
47429 pipe_lock(pipe);
47430
47431 - if (!pipe->readers) {
47432 + if (!atomic_read(&pipe->readers)) {
47433 send_sig(SIGPIPE, current, 0);
47434 if (!ret)
47435 ret = -EPIPE;
47436 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
47437 index 334e0b1..fc571e8 100644
47438 --- a/fs/fuse/dir.c
47439 +++ b/fs/fuse/dir.c
47440 @@ -1189,7 +1189,7 @@ static char *read_link(struct dentry *dentry)
47441 return link;
47442 }
47443
47444 -static void free_link(char *link)
47445 +static void free_link(const char *link)
47446 {
47447 if (!IS_ERR(link))
47448 free_page((unsigned long) link);
47449 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
47450 index a9ba244..d9df391 100644
47451 --- a/fs/gfs2/inode.c
47452 +++ b/fs/gfs2/inode.c
47453 @@ -1496,7 +1496,7 @@ out:
47454
47455 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47456 {
47457 - char *s = nd_get_link(nd);
47458 + const char *s = nd_get_link(nd);
47459 if (!IS_ERR(s))
47460 kfree(s);
47461 }
47462 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
47463 index cc9281b..58996fb 100644
47464 --- a/fs/hugetlbfs/inode.c
47465 +++ b/fs/hugetlbfs/inode.c
47466 @@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
47467 .kill_sb = kill_litter_super,
47468 };
47469
47470 -static struct vfsmount *hugetlbfs_vfsmount;
47471 +struct vfsmount *hugetlbfs_vfsmount;
47472
47473 static int can_do_hugetlb_shm(void)
47474 {
47475 diff --git a/fs/inode.c b/fs/inode.c
47476 index c99163b..a11ad40 100644
47477 --- a/fs/inode.c
47478 +++ b/fs/inode.c
47479 @@ -867,8 +867,8 @@ unsigned int get_next_ino(void)
47480
47481 #ifdef CONFIG_SMP
47482 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
47483 - static atomic_t shared_last_ino;
47484 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
47485 + static atomic_unchecked_t shared_last_ino;
47486 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
47487
47488 res = next - LAST_INO_BATCH;
47489 }
47490 diff --git a/fs/isofs/export.c b/fs/isofs/export.c
47491 index aa4356d..1d38044 100644
47492 --- a/fs/isofs/export.c
47493 +++ b/fs/isofs/export.c
47494 @@ -134,6 +134,7 @@ isofs_export_encode_fh(struct inode *inode,
47495 len = 3;
47496 fh32[0] = ei->i_iget5_block;
47497 fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
47498 + fh16[3] = 0; /* avoid leaking uninitialized data */
47499 fh32[2] = inode->i_generation;
47500 if (parent) {
47501 struct iso_inode_info *eparent;
47502 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
47503 index 4a6cf28..d3a29d3 100644
47504 --- a/fs/jffs2/erase.c
47505 +++ b/fs/jffs2/erase.c
47506 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
47507 struct jffs2_unknown_node marker = {
47508 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
47509 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47510 - .totlen = cpu_to_je32(c->cleanmarker_size)
47511 + .totlen = cpu_to_je32(c->cleanmarker_size),
47512 + .hdr_crc = cpu_to_je32(0)
47513 };
47514
47515 jffs2_prealloc_raw_node_refs(c, jeb, 1);
47516 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
47517 index 6f4529d..bf12806 100644
47518 --- a/fs/jffs2/wbuf.c
47519 +++ b/fs/jffs2/wbuf.c
47520 @@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
47521 {
47522 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
47523 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47524 - .totlen = constant_cpu_to_je32(8)
47525 + .totlen = constant_cpu_to_je32(8),
47526 + .hdr_crc = constant_cpu_to_je32(0)
47527 };
47528
47529 /*
47530 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
47531 index 4a82950..bcaa0cb 100644
47532 --- a/fs/jfs/super.c
47533 +++ b/fs/jfs/super.c
47534 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
47535
47536 jfs_inode_cachep =
47537 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
47538 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
47539 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
47540 init_once);
47541 if (jfs_inode_cachep == NULL)
47542 return -ENOMEM;
47543 diff --git a/fs/libfs.c b/fs/libfs.c
47544 index f86ec27..4734776 100644
47545 --- a/fs/libfs.c
47546 +++ b/fs/libfs.c
47547 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47548
47549 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
47550 struct dentry *next;
47551 + char d_name[sizeof(next->d_iname)];
47552 + const unsigned char *name;
47553 +
47554 next = list_entry(p, struct dentry, d_u.d_child);
47555 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
47556 if (!simple_positive(next)) {
47557 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47558
47559 spin_unlock(&next->d_lock);
47560 spin_unlock(&dentry->d_lock);
47561 - if (filldir(dirent, next->d_name.name,
47562 + name = next->d_name.name;
47563 + if (name == next->d_iname) {
47564 + memcpy(d_name, name, next->d_name.len);
47565 + name = d_name;
47566 + }
47567 + if (filldir(dirent, name,
47568 next->d_name.len, filp->f_pos,
47569 next->d_inode->i_ino,
47570 dt_type(next->d_inode)) < 0)
47571 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
47572 index 8392cb8..80d6193 100644
47573 --- a/fs/lockd/clntproc.c
47574 +++ b/fs/lockd/clntproc.c
47575 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
47576 /*
47577 * Cookie counter for NLM requests
47578 */
47579 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
47580 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
47581
47582 void nlmclnt_next_cookie(struct nlm_cookie *c)
47583 {
47584 - u32 cookie = atomic_inc_return(&nlm_cookie);
47585 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
47586
47587 memcpy(c->data, &cookie, 4);
47588 c->len=4;
47589 diff --git a/fs/locks.c b/fs/locks.c
47590 index 82c3533..34e929c 100644
47591 --- a/fs/locks.c
47592 +++ b/fs/locks.c
47593 @@ -2076,16 +2076,16 @@ void locks_remove_flock(struct file *filp)
47594 return;
47595
47596 if (filp->f_op && filp->f_op->flock) {
47597 - struct file_lock fl = {
47598 + struct file_lock flock = {
47599 .fl_pid = current->tgid,
47600 .fl_file = filp,
47601 .fl_flags = FL_FLOCK,
47602 .fl_type = F_UNLCK,
47603 .fl_end = OFFSET_MAX,
47604 };
47605 - filp->f_op->flock(filp, F_SETLKW, &fl);
47606 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
47607 - fl.fl_ops->fl_release_private(&fl);
47608 + filp->f_op->flock(filp, F_SETLKW, &flock);
47609 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
47610 + flock.fl_ops->fl_release_private(&flock);
47611 }
47612
47613 lock_flocks();
47614 diff --git a/fs/namei.c b/fs/namei.c
47615 index 7d69419..c7a09f0 100644
47616 --- a/fs/namei.c
47617 +++ b/fs/namei.c
47618 @@ -265,16 +265,32 @@ int generic_permission(struct inode *inode, int mask)
47619 if (ret != -EACCES)
47620 return ret;
47621
47622 +#ifdef CONFIG_GRKERNSEC
47623 + /* we'll block if we have to log due to a denied capability use */
47624 + if (mask & MAY_NOT_BLOCK)
47625 + return -ECHILD;
47626 +#endif
47627 +
47628 if (S_ISDIR(inode->i_mode)) {
47629 /* DACs are overridable for directories */
47630 - if (inode_capable(inode, CAP_DAC_OVERRIDE))
47631 - return 0;
47632 if (!(mask & MAY_WRITE))
47633 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
47634 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
47635 + inode_capable(inode, CAP_DAC_READ_SEARCH))
47636 return 0;
47637 + if (inode_capable(inode, CAP_DAC_OVERRIDE))
47638 + return 0;
47639 return -EACCES;
47640 }
47641 /*
47642 + * Searching includes executable on directories, else just read.
47643 + */
47644 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47645 + if (mask == MAY_READ)
47646 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
47647 + inode_capable(inode, CAP_DAC_READ_SEARCH))
47648 + return 0;
47649 +
47650 + /*
47651 * Read/write DACs are always overridable.
47652 * Executable DACs are overridable when there is
47653 * at least one exec bit set.
47654 @@ -283,14 +299,6 @@ int generic_permission(struct inode *inode, int mask)
47655 if (inode_capable(inode, CAP_DAC_OVERRIDE))
47656 return 0;
47657
47658 - /*
47659 - * Searching includes executable on directories, else just read.
47660 - */
47661 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47662 - if (mask == MAY_READ)
47663 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
47664 - return 0;
47665 -
47666 return -EACCES;
47667 }
47668
47669 @@ -639,11 +647,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
47670 return error;
47671 }
47672
47673 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
47674 + dentry->d_inode, dentry, nd->path.mnt)) {
47675 + error = -EACCES;
47676 + *p = ERR_PTR(error); /* no ->put_link(), please */
47677 + path_put(&nd->path);
47678 + return error;
47679 + }
47680 +
47681 nd->last_type = LAST_BIND;
47682 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
47683 error = PTR_ERR(*p);
47684 if (!IS_ERR(*p)) {
47685 - char *s = nd_get_link(nd);
47686 + const char *s = nd_get_link(nd);
47687 error = 0;
47688 if (s)
47689 error = __vfs_follow_link(nd, s);
47690 @@ -1386,6 +1402,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
47691 if (!res)
47692 res = walk_component(nd, path, &nd->last,
47693 nd->last_type, LOOKUP_FOLLOW);
47694 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
47695 + res = -EACCES;
47696 put_link(nd, &link, cookie);
47697 } while (res > 0);
47698
47699 @@ -1779,6 +1797,8 @@ static int path_lookupat(int dfd, const char *name,
47700 err = follow_link(&link, nd, &cookie);
47701 if (!err)
47702 err = lookup_last(nd, &path);
47703 + if (!err && gr_handle_symlink_owner(&link, nd->inode))
47704 + err = -EACCES;
47705 put_link(nd, &link, cookie);
47706 }
47707 }
47708 @@ -1786,6 +1806,21 @@ static int path_lookupat(int dfd, const char *name,
47709 if (!err)
47710 err = complete_walk(nd);
47711
47712 + if (!(nd->flags & LOOKUP_PARENT)) {
47713 +#ifdef CONFIG_GRKERNSEC
47714 + if (flags & LOOKUP_RCU) {
47715 + if (!err)
47716 + path_put(&nd->path);
47717 + err = -ECHILD;
47718 + } else
47719 +#endif
47720 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47721 + if (!err)
47722 + path_put(&nd->path);
47723 + err = -ENOENT;
47724 + }
47725 + }
47726 +
47727 if (!err && nd->flags & LOOKUP_DIRECTORY) {
47728 if (!nd->inode->i_op->lookup) {
47729 path_put(&nd->path);
47730 @@ -1813,6 +1848,15 @@ static int do_path_lookup(int dfd, const char *name,
47731 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
47732
47733 if (likely(!retval)) {
47734 + if (*name != '/' && nd->path.dentry && nd->inode) {
47735 +#ifdef CONFIG_GRKERNSEC
47736 + if (flags & LOOKUP_RCU)
47737 + return -ECHILD;
47738 +#endif
47739 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
47740 + return -ENOENT;
47741 + }
47742 +
47743 if (unlikely(!audit_dummy_context())) {
47744 if (nd->path.dentry && nd->inode)
47745 audit_inode(name, nd->path.dentry);
47746 @@ -2155,6 +2199,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
47747 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
47748 return -EPERM;
47749
47750 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
47751 + return -EPERM;
47752 + if (gr_handle_rawio(inode))
47753 + return -EPERM;
47754 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
47755 + return -EACCES;
47756 +
47757 return 0;
47758 }
47759
47760 @@ -2190,7 +2241,7 @@ static inline int open_to_namei_flags(int flag)
47761 /*
47762 * Handle the last step of open()
47763 */
47764 -static struct file *do_last(struct nameidata *nd, struct path *path,
47765 +static struct file *do_last(struct nameidata *nd, struct path *path, struct path *link,
47766 const struct open_flags *op, const char *pathname)
47767 {
47768 struct dentry *dir = nd->path.dentry;
47769 @@ -2220,16 +2271,44 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47770 error = complete_walk(nd);
47771 if (error)
47772 return ERR_PTR(error);
47773 +#ifdef CONFIG_GRKERNSEC
47774 + if (nd->flags & LOOKUP_RCU) {
47775 + error = -ECHILD;
47776 + goto exit;
47777 + }
47778 +#endif
47779 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47780 + error = -ENOENT;
47781 + goto exit;
47782 + }
47783 audit_inode(pathname, nd->path.dentry);
47784 if (open_flag & O_CREAT) {
47785 error = -EISDIR;
47786 goto exit;
47787 }
47788 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
47789 + error = -EACCES;
47790 + goto exit;
47791 + }
47792 goto ok;
47793 case LAST_BIND:
47794 error = complete_walk(nd);
47795 if (error)
47796 return ERR_PTR(error);
47797 +#ifdef CONFIG_GRKERNSEC
47798 + if (nd->flags & LOOKUP_RCU) {
47799 + error = -ECHILD;
47800 + goto exit;
47801 + }
47802 +#endif
47803 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
47804 + error = -ENOENT;
47805 + goto exit;
47806 + }
47807 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
47808 + error = -EACCES;
47809 + goto exit;
47810 + }
47811 audit_inode(pathname, dir);
47812 goto ok;
47813 }
47814 @@ -2285,6 +2364,17 @@ retry_lookup:
47815 /* Negative dentry, just create the file */
47816 if (!dentry->d_inode) {
47817 umode_t mode = op->mode;
47818 +
47819 + if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
47820 + error = -EACCES;
47821 + goto exit_mutex_unlock;
47822 + }
47823 +
47824 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
47825 + error = -EACCES;
47826 + goto exit_mutex_unlock;
47827 + }
47828 +
47829 if (!IS_POSIXACL(dir->d_inode))
47830 mode &= ~current_umask();
47831 /*
47832 @@ -2308,6 +2398,8 @@ retry_lookup:
47833 error = vfs_create(dir->d_inode, dentry, mode, nd);
47834 if (error)
47835 goto exit_mutex_unlock;
47836 + else
47837 + gr_handle_create(path->dentry, path->mnt);
47838 mutex_unlock(&dir->d_inode->i_mutex);
47839 dput(nd->path.dentry);
47840 nd->path.dentry = dentry;
47841 @@ -2317,6 +2409,23 @@ retry_lookup:
47842 /*
47843 * It already exists.
47844 */
47845 +
47846 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
47847 + error = -ENOENT;
47848 + goto exit_mutex_unlock;
47849 + }
47850 + if (link && gr_handle_symlink_owner(link, dentry->d_inode)) {
47851 + error = -EACCES;
47852 + goto exit_mutex_unlock;
47853 + }
47854 +
47855 + /* only check if O_CREAT is specified, all other checks need to go
47856 + into may_open */
47857 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
47858 + error = -EACCES;
47859 + goto exit_mutex_unlock;
47860 + }
47861 +
47862 mutex_unlock(&dir->d_inode->i_mutex);
47863 audit_inode(pathname, path->dentry);
47864
47865 @@ -2349,6 +2458,11 @@ finish_lookup:
47866 }
47867 }
47868 BUG_ON(inode != path->dentry->d_inode);
47869 + /* if we're resolving a symlink to another symlink */
47870 + if (link && gr_handle_symlink_owner(link, inode)) {
47871 + error = -EACCES;
47872 + goto exit;
47873 + }
47874 return NULL;
47875 }
47876
47877 @@ -2358,7 +2472,6 @@ finish_lookup:
47878 save_parent.dentry = nd->path.dentry;
47879 save_parent.mnt = mntget(path->mnt);
47880 nd->path.dentry = path->dentry;
47881 -
47882 }
47883 nd->inode = inode;
47884 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
47885 @@ -2367,6 +2480,21 @@ finish_lookup:
47886 path_put(&save_parent);
47887 return ERR_PTR(error);
47888 }
47889 +#ifdef CONFIG_GRKERNSEC
47890 + if (nd->flags & LOOKUP_RCU) {
47891 + error = -ECHILD;
47892 + goto exit;
47893 + }
47894 +#endif
47895 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47896 + error = -ENOENT;
47897 + goto exit;
47898 + }
47899 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
47900 + error = -EACCES;
47901 + goto exit;
47902 + }
47903 +
47904 error = -EISDIR;
47905 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
47906 goto exit;
47907 @@ -2461,7 +2589,7 @@ static struct file *path_openat(int dfd, const char *pathname,
47908 if (unlikely(error))
47909 goto out_filp;
47910
47911 - filp = do_last(nd, &path, op, pathname);
47912 + filp = do_last(nd, &path, NULL, op, pathname);
47913 while (unlikely(!filp)) { /* trailing symlink */
47914 struct path link = path;
47915 void *cookie;
47916 @@ -2476,8 +2604,9 @@ static struct file *path_openat(int dfd, const char *pathname,
47917 error = follow_link(&link, nd, &cookie);
47918 if (unlikely(error))
47919 filp = ERR_PTR(error);
47920 - else
47921 - filp = do_last(nd, &path, op, pathname);
47922 + else {
47923 + filp = do_last(nd, &path, &link, op, pathname);
47924 + }
47925 put_link(nd, &link, cookie);
47926 }
47927 out:
47928 @@ -2577,6 +2706,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
47929 *path = nd.path;
47930 return dentry;
47931 eexist:
47932 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
47933 + dput(dentry);
47934 + dentry = ERR_PTR(-ENOENT);
47935 + goto fail;
47936 + }
47937 dput(dentry);
47938 dentry = ERR_PTR(-EEXIST);
47939 fail:
47940 @@ -2599,6 +2733,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
47941 }
47942 EXPORT_SYMBOL(user_path_create);
47943
47944 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
47945 +{
47946 + char *tmp = getname(pathname);
47947 + struct dentry *res;
47948 + if (IS_ERR(tmp))
47949 + return ERR_CAST(tmp);
47950 + res = kern_path_create(dfd, tmp, path, is_dir);
47951 + if (IS_ERR(res))
47952 + putname(tmp);
47953 + else
47954 + *to = tmp;
47955 + return res;
47956 +}
47957 +
47958 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
47959 {
47960 int error = may_create(dir, dentry);
47961 @@ -2665,6 +2813,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47962 error = mnt_want_write(path.mnt);
47963 if (error)
47964 goto out_dput;
47965 +
47966 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
47967 + error = -EPERM;
47968 + goto out_drop_write;
47969 + }
47970 +
47971 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
47972 + error = -EACCES;
47973 + goto out_drop_write;
47974 + }
47975 +
47976 error = security_path_mknod(&path, dentry, mode, dev);
47977 if (error)
47978 goto out_drop_write;
47979 @@ -2682,6 +2841,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47980 }
47981 out_drop_write:
47982 mnt_drop_write(path.mnt);
47983 +
47984 + if (!error)
47985 + gr_handle_create(dentry, path.mnt);
47986 out_dput:
47987 dput(dentry);
47988 mutex_unlock(&path.dentry->d_inode->i_mutex);
47989 @@ -2735,12 +2897,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
47990 error = mnt_want_write(path.mnt);
47991 if (error)
47992 goto out_dput;
47993 +
47994 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
47995 + error = -EACCES;
47996 + goto out_drop_write;
47997 + }
47998 +
47999 error = security_path_mkdir(&path, dentry, mode);
48000 if (error)
48001 goto out_drop_write;
48002 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
48003 out_drop_write:
48004 mnt_drop_write(path.mnt);
48005 +
48006 + if (!error)
48007 + gr_handle_create(dentry, path.mnt);
48008 out_dput:
48009 dput(dentry);
48010 mutex_unlock(&path.dentry->d_inode->i_mutex);
48011 @@ -2820,6 +2991,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
48012 char * name;
48013 struct dentry *dentry;
48014 struct nameidata nd;
48015 + ino_t saved_ino = 0;
48016 + dev_t saved_dev = 0;
48017
48018 error = user_path_parent(dfd, pathname, &nd, &name);
48019 if (error)
48020 @@ -2848,6 +3021,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
48021 error = -ENOENT;
48022 goto exit3;
48023 }
48024 +
48025 + saved_ino = dentry->d_inode->i_ino;
48026 + saved_dev = gr_get_dev_from_dentry(dentry);
48027 +
48028 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
48029 + error = -EACCES;
48030 + goto exit3;
48031 + }
48032 +
48033 error = mnt_want_write(nd.path.mnt);
48034 if (error)
48035 goto exit3;
48036 @@ -2855,6 +3037,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
48037 if (error)
48038 goto exit4;
48039 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
48040 + if (!error && (saved_dev || saved_ino))
48041 + gr_handle_delete(saved_ino, saved_dev);
48042 exit4:
48043 mnt_drop_write(nd.path.mnt);
48044 exit3:
48045 @@ -2917,6 +3101,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
48046 struct dentry *dentry;
48047 struct nameidata nd;
48048 struct inode *inode = NULL;
48049 + ino_t saved_ino = 0;
48050 + dev_t saved_dev = 0;
48051
48052 error = user_path_parent(dfd, pathname, &nd, &name);
48053 if (error)
48054 @@ -2939,6 +3125,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
48055 if (!inode)
48056 goto slashes;
48057 ihold(inode);
48058 +
48059 + if (inode->i_nlink <= 1) {
48060 + saved_ino = inode->i_ino;
48061 + saved_dev = gr_get_dev_from_dentry(dentry);
48062 + }
48063 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
48064 + error = -EACCES;
48065 + goto exit2;
48066 + }
48067 +
48068 error = mnt_want_write(nd.path.mnt);
48069 if (error)
48070 goto exit2;
48071 @@ -2946,6 +3142,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
48072 if (error)
48073 goto exit3;
48074 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
48075 + if (!error && (saved_ino || saved_dev))
48076 + gr_handle_delete(saved_ino, saved_dev);
48077 exit3:
48078 mnt_drop_write(nd.path.mnt);
48079 exit2:
48080 @@ -3021,10 +3219,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
48081 error = mnt_want_write(path.mnt);
48082 if (error)
48083 goto out_dput;
48084 +
48085 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
48086 + error = -EACCES;
48087 + goto out_drop_write;
48088 + }
48089 +
48090 error = security_path_symlink(&path, dentry, from);
48091 if (error)
48092 goto out_drop_write;
48093 error = vfs_symlink(path.dentry->d_inode, dentry, from);
48094 + if (!error)
48095 + gr_handle_create(dentry, path.mnt);
48096 out_drop_write:
48097 mnt_drop_write(path.mnt);
48098 out_dput:
48099 @@ -3099,6 +3305,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
48100 {
48101 struct dentry *new_dentry;
48102 struct path old_path, new_path;
48103 + char *to = NULL;
48104 int how = 0;
48105 int error;
48106
48107 @@ -3122,7 +3329,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
48108 if (error)
48109 return error;
48110
48111 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
48112 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
48113 error = PTR_ERR(new_dentry);
48114 if (IS_ERR(new_dentry))
48115 goto out;
48116 @@ -3133,13 +3340,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
48117 error = mnt_want_write(new_path.mnt);
48118 if (error)
48119 goto out_dput;
48120 +
48121 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
48122 + old_path.dentry->d_inode,
48123 + old_path.dentry->d_inode->i_mode, to)) {
48124 + error = -EACCES;
48125 + goto out_drop_write;
48126 + }
48127 +
48128 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
48129 + old_path.dentry, old_path.mnt, to)) {
48130 + error = -EACCES;
48131 + goto out_drop_write;
48132 + }
48133 +
48134 error = security_path_link(old_path.dentry, &new_path, new_dentry);
48135 if (error)
48136 goto out_drop_write;
48137 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
48138 + if (!error)
48139 + gr_handle_create(new_dentry, new_path.mnt);
48140 out_drop_write:
48141 mnt_drop_write(new_path.mnt);
48142 out_dput:
48143 + putname(to);
48144 dput(new_dentry);
48145 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
48146 path_put(&new_path);
48147 @@ -3373,6 +3597,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
48148 if (new_dentry == trap)
48149 goto exit5;
48150
48151 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
48152 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
48153 + to);
48154 + if (error)
48155 + goto exit5;
48156 +
48157 error = mnt_want_write(oldnd.path.mnt);
48158 if (error)
48159 goto exit5;
48160 @@ -3382,6 +3612,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
48161 goto exit6;
48162 error = vfs_rename(old_dir->d_inode, old_dentry,
48163 new_dir->d_inode, new_dentry);
48164 + if (!error)
48165 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
48166 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
48167 exit6:
48168 mnt_drop_write(oldnd.path.mnt);
48169 exit5:
48170 @@ -3407,6 +3640,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
48171
48172 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
48173 {
48174 + char tmpbuf[64];
48175 + const char *newlink;
48176 int len;
48177
48178 len = PTR_ERR(link);
48179 @@ -3416,7 +3651,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
48180 len = strlen(link);
48181 if (len > (unsigned) buflen)
48182 len = buflen;
48183 - if (copy_to_user(buffer, link, len))
48184 +
48185 + if (len < sizeof(tmpbuf)) {
48186 + memcpy(tmpbuf, link, len);
48187 + newlink = tmpbuf;
48188 + } else
48189 + newlink = link;
48190 +
48191 + if (copy_to_user(buffer, newlink, len))
48192 len = -EFAULT;
48193 out:
48194 return len;
48195 diff --git a/fs/namespace.c b/fs/namespace.c
48196 index 1e4a5fe..a5ce747 100644
48197 --- a/fs/namespace.c
48198 +++ b/fs/namespace.c
48199 @@ -1157,6 +1157,9 @@ static int do_umount(struct mount *mnt, int flags)
48200 if (!(sb->s_flags & MS_RDONLY))
48201 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
48202 up_write(&sb->s_umount);
48203 +
48204 + gr_log_remount(mnt->mnt_devname, retval);
48205 +
48206 return retval;
48207 }
48208
48209 @@ -1176,6 +1179,9 @@ static int do_umount(struct mount *mnt, int flags)
48210 br_write_unlock(&vfsmount_lock);
48211 up_write(&namespace_sem);
48212 release_mounts(&umount_list);
48213 +
48214 + gr_log_unmount(mnt->mnt_devname, retval);
48215 +
48216 return retval;
48217 }
48218
48219 @@ -2177,6 +2183,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
48220 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
48221 MS_STRICTATIME);
48222
48223 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
48224 + retval = -EPERM;
48225 + goto dput_out;
48226 + }
48227 +
48228 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
48229 + retval = -EPERM;
48230 + goto dput_out;
48231 + }
48232 +
48233 if (flags & MS_REMOUNT)
48234 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
48235 data_page);
48236 @@ -2191,6 +2207,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
48237 dev_name, data_page);
48238 dput_out:
48239 path_put(&path);
48240 +
48241 + gr_log_mount(dev_name, dir_name, retval);
48242 +
48243 return retval;
48244 }
48245
48246 @@ -2472,6 +2491,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
48247 if (error)
48248 goto out2;
48249
48250 + if (gr_handle_chroot_pivot()) {
48251 + error = -EPERM;
48252 + goto out2;
48253 + }
48254 +
48255 get_fs_root(current->fs, &root);
48256 error = lock_mount(&old);
48257 if (error)
48258 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
48259 index 30bd45a..2ad67a5 100644
48260 --- a/fs/nfs/inode.c
48261 +++ b/fs/nfs/inode.c
48262 @@ -1008,16 +1008,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
48263 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
48264 }
48265
48266 -static atomic_long_t nfs_attr_generation_counter;
48267 +static atomic_long_unchecked_t nfs_attr_generation_counter;
48268
48269 static unsigned long nfs_read_attr_generation_counter(void)
48270 {
48271 - return atomic_long_read(&nfs_attr_generation_counter);
48272 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
48273 }
48274
48275 unsigned long nfs_inc_attr_generation_counter(void)
48276 {
48277 - return atomic_long_inc_return(&nfs_attr_generation_counter);
48278 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
48279 }
48280
48281 void nfs_fattr_init(struct nfs_fattr *fattr)
48282 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
48283 index c8bd9c3..4f83416 100644
48284 --- a/fs/nfsd/vfs.c
48285 +++ b/fs/nfsd/vfs.c
48286 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
48287 } else {
48288 oldfs = get_fs();
48289 set_fs(KERNEL_DS);
48290 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
48291 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
48292 set_fs(oldfs);
48293 }
48294
48295 @@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
48296
48297 /* Write the data. */
48298 oldfs = get_fs(); set_fs(KERNEL_DS);
48299 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
48300 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
48301 set_fs(oldfs);
48302 if (host_err < 0)
48303 goto out_nfserr;
48304 @@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
48305 */
48306
48307 oldfs = get_fs(); set_fs(KERNEL_DS);
48308 - host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
48309 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
48310 set_fs(oldfs);
48311
48312 if (host_err < 0)
48313 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
48314 index 3568c8a..e0240d8 100644
48315 --- a/fs/notify/fanotify/fanotify_user.c
48316 +++ b/fs/notify/fanotify/fanotify_user.c
48317 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
48318 goto out_close_fd;
48319
48320 ret = -EFAULT;
48321 - if (copy_to_user(buf, &fanotify_event_metadata,
48322 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
48323 + copy_to_user(buf, &fanotify_event_metadata,
48324 fanotify_event_metadata.event_len))
48325 goto out_kill_access_response;
48326
48327 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
48328 index c887b13..0fdf472 100644
48329 --- a/fs/notify/notification.c
48330 +++ b/fs/notify/notification.c
48331 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
48332 * get set to 0 so it will never get 'freed'
48333 */
48334 static struct fsnotify_event *q_overflow_event;
48335 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48336 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48337
48338 /**
48339 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
48340 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48341 */
48342 u32 fsnotify_get_cookie(void)
48343 {
48344 - return atomic_inc_return(&fsnotify_sync_cookie);
48345 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
48346 }
48347 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
48348
48349 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
48350 index 99e3610..02c1068 100644
48351 --- a/fs/ntfs/dir.c
48352 +++ b/fs/ntfs/dir.c
48353 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
48354 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
48355 ~(s64)(ndir->itype.index.block_size - 1)));
48356 /* Bounds checks. */
48357 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48358 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48359 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
48360 "inode 0x%lx or driver bug.", vdir->i_ino);
48361 goto err_out;
48362 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
48363 index 7389d2d..dfd5dbe 100644
48364 --- a/fs/ntfs/file.c
48365 +++ b/fs/ntfs/file.c
48366 @@ -2231,6 +2231,6 @@ const struct inode_operations ntfs_file_inode_ops = {
48367 #endif /* NTFS_RW */
48368 };
48369
48370 -const struct file_operations ntfs_empty_file_ops = {};
48371 +const struct file_operations ntfs_empty_file_ops __read_only;
48372
48373 -const struct inode_operations ntfs_empty_inode_ops = {};
48374 +const struct inode_operations ntfs_empty_inode_ops __read_only;
48375 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
48376 index 210c352..a174f83 100644
48377 --- a/fs/ocfs2/localalloc.c
48378 +++ b/fs/ocfs2/localalloc.c
48379 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
48380 goto bail;
48381 }
48382
48383 - atomic_inc(&osb->alloc_stats.moves);
48384 + atomic_inc_unchecked(&osb->alloc_stats.moves);
48385
48386 bail:
48387 if (handle)
48388 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
48389 index d355e6e..578d905 100644
48390 --- a/fs/ocfs2/ocfs2.h
48391 +++ b/fs/ocfs2/ocfs2.h
48392 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
48393
48394 struct ocfs2_alloc_stats
48395 {
48396 - atomic_t moves;
48397 - atomic_t local_data;
48398 - atomic_t bitmap_data;
48399 - atomic_t bg_allocs;
48400 - atomic_t bg_extends;
48401 + atomic_unchecked_t moves;
48402 + atomic_unchecked_t local_data;
48403 + atomic_unchecked_t bitmap_data;
48404 + atomic_unchecked_t bg_allocs;
48405 + atomic_unchecked_t bg_extends;
48406 };
48407
48408 enum ocfs2_local_alloc_state
48409 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
48410 index f169da4..9112253 100644
48411 --- a/fs/ocfs2/suballoc.c
48412 +++ b/fs/ocfs2/suballoc.c
48413 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
48414 mlog_errno(status);
48415 goto bail;
48416 }
48417 - atomic_inc(&osb->alloc_stats.bg_extends);
48418 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
48419
48420 /* You should never ask for this much metadata */
48421 BUG_ON(bits_wanted >
48422 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
48423 mlog_errno(status);
48424 goto bail;
48425 }
48426 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48427 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48428
48429 *suballoc_loc = res.sr_bg_blkno;
48430 *suballoc_bit_start = res.sr_bit_offset;
48431 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
48432 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
48433 res->sr_bits);
48434
48435 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48436 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48437
48438 BUG_ON(res->sr_bits != 1);
48439
48440 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
48441 mlog_errno(status);
48442 goto bail;
48443 }
48444 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48445 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48446
48447 BUG_ON(res.sr_bits != 1);
48448
48449 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
48450 cluster_start,
48451 num_clusters);
48452 if (!status)
48453 - atomic_inc(&osb->alloc_stats.local_data);
48454 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
48455 } else {
48456 if (min_clusters > (osb->bitmap_cpg - 1)) {
48457 /* The only paths asking for contiguousness
48458 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
48459 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
48460 res.sr_bg_blkno,
48461 res.sr_bit_offset);
48462 - atomic_inc(&osb->alloc_stats.bitmap_data);
48463 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
48464 *num_clusters = res.sr_bits;
48465 }
48466 }
48467 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
48468 index 68f4541..89cfe6a 100644
48469 --- a/fs/ocfs2/super.c
48470 +++ b/fs/ocfs2/super.c
48471 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
48472 "%10s => GlobalAllocs: %d LocalAllocs: %d "
48473 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
48474 "Stats",
48475 - atomic_read(&osb->alloc_stats.bitmap_data),
48476 - atomic_read(&osb->alloc_stats.local_data),
48477 - atomic_read(&osb->alloc_stats.bg_allocs),
48478 - atomic_read(&osb->alloc_stats.moves),
48479 - atomic_read(&osb->alloc_stats.bg_extends));
48480 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
48481 + atomic_read_unchecked(&osb->alloc_stats.local_data),
48482 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
48483 + atomic_read_unchecked(&osb->alloc_stats.moves),
48484 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
48485
48486 out += snprintf(buf + out, len - out,
48487 "%10s => State: %u Descriptor: %llu Size: %u bits "
48488 @@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
48489 spin_lock_init(&osb->osb_xattr_lock);
48490 ocfs2_init_steal_slots(osb);
48491
48492 - atomic_set(&osb->alloc_stats.moves, 0);
48493 - atomic_set(&osb->alloc_stats.local_data, 0);
48494 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
48495 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
48496 - atomic_set(&osb->alloc_stats.bg_extends, 0);
48497 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
48498 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
48499 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
48500 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
48501 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
48502
48503 /* Copy the blockcheck stats from the superblock probe */
48504 osb->osb_ecc_stats = *stats;
48505 diff --git a/fs/open.c b/fs/open.c
48506 index 5d9c71b..adb5b19 100644
48507 --- a/fs/open.c
48508 +++ b/fs/open.c
48509 @@ -31,6 +31,8 @@
48510 #include <linux/ima.h>
48511 #include <linux/dnotify.h>
48512
48513 +#define CREATE_TRACE_POINTS
48514 +#include <trace/events/fs.h>
48515 #include "internal.h"
48516
48517 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
48518 @@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
48519 error = locks_verify_truncate(inode, NULL, length);
48520 if (!error)
48521 error = security_path_truncate(&path);
48522 +
48523 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
48524 + error = -EACCES;
48525 +
48526 if (!error)
48527 error = do_truncate(path.dentry, length, 0, NULL);
48528
48529 @@ -359,6 +365,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
48530 if (__mnt_is_readonly(path.mnt))
48531 res = -EROFS;
48532
48533 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
48534 + res = -EACCES;
48535 +
48536 out_path_release:
48537 path_put(&path);
48538 out:
48539 @@ -385,6 +394,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
48540 if (error)
48541 goto dput_and_out;
48542
48543 + gr_log_chdir(path.dentry, path.mnt);
48544 +
48545 set_fs_pwd(current->fs, &path);
48546
48547 dput_and_out:
48548 @@ -411,6 +422,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
48549 goto out_putf;
48550
48551 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
48552 +
48553 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
48554 + error = -EPERM;
48555 +
48556 + if (!error)
48557 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
48558 +
48559 if (!error)
48560 set_fs_pwd(current->fs, &file->f_path);
48561 out_putf:
48562 @@ -439,7 +457,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
48563 if (error)
48564 goto dput_and_out;
48565
48566 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
48567 + goto dput_and_out;
48568 +
48569 set_fs_root(current->fs, &path);
48570 +
48571 + gr_handle_chroot_chdir(&path);
48572 +
48573 error = 0;
48574 dput_and_out:
48575 path_put(&path);
48576 @@ -457,6 +481,16 @@ static int chmod_common(struct path *path, umode_t mode)
48577 if (error)
48578 return error;
48579 mutex_lock(&inode->i_mutex);
48580 +
48581 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
48582 + error = -EACCES;
48583 + goto out_unlock;
48584 + }
48585 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
48586 + error = -EACCES;
48587 + goto out_unlock;
48588 + }
48589 +
48590 error = security_path_chmod(path, mode);
48591 if (error)
48592 goto out_unlock;
48593 @@ -512,6 +546,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
48594 uid = make_kuid(current_user_ns(), user);
48595 gid = make_kgid(current_user_ns(), group);
48596
48597 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
48598 + return -EACCES;
48599 +
48600 newattrs.ia_valid = ATTR_CTIME;
48601 if (user != (uid_t) -1) {
48602 if (!uid_valid(uid))
48603 @@ -1036,6 +1073,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
48604 } else {
48605 fsnotify_open(f);
48606 fd_install(fd, f);
48607 + trace_do_sys_open(tmp, flags, mode);
48608 }
48609 }
48610 putname(tmp);
48611 diff --git a/fs/pipe.c b/fs/pipe.c
48612 index 49c1065..13b9e12 100644
48613 --- a/fs/pipe.c
48614 +++ b/fs/pipe.c
48615 @@ -438,9 +438,9 @@ redo:
48616 }
48617 if (bufs) /* More to do? */
48618 continue;
48619 - if (!pipe->writers)
48620 + if (!atomic_read(&pipe->writers))
48621 break;
48622 - if (!pipe->waiting_writers) {
48623 + if (!atomic_read(&pipe->waiting_writers)) {
48624 /* syscall merging: Usually we must not sleep
48625 * if O_NONBLOCK is set, or if we got some data.
48626 * But if a writer sleeps in kernel space, then
48627 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
48628 mutex_lock(&inode->i_mutex);
48629 pipe = inode->i_pipe;
48630
48631 - if (!pipe->readers) {
48632 + if (!atomic_read(&pipe->readers)) {
48633 send_sig(SIGPIPE, current, 0);
48634 ret = -EPIPE;
48635 goto out;
48636 @@ -553,7 +553,7 @@ redo1:
48637 for (;;) {
48638 int bufs;
48639
48640 - if (!pipe->readers) {
48641 + if (!atomic_read(&pipe->readers)) {
48642 send_sig(SIGPIPE, current, 0);
48643 if (!ret)
48644 ret = -EPIPE;
48645 @@ -644,9 +644,9 @@ redo2:
48646 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48647 do_wakeup = 0;
48648 }
48649 - pipe->waiting_writers++;
48650 + atomic_inc(&pipe->waiting_writers);
48651 pipe_wait(pipe);
48652 - pipe->waiting_writers--;
48653 + atomic_dec(&pipe->waiting_writers);
48654 }
48655 out:
48656 mutex_unlock(&inode->i_mutex);
48657 @@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48658 mask = 0;
48659 if (filp->f_mode & FMODE_READ) {
48660 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
48661 - if (!pipe->writers && filp->f_version != pipe->w_counter)
48662 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
48663 mask |= POLLHUP;
48664 }
48665
48666 @@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48667 * Most Unices do not set POLLERR for FIFOs but on Linux they
48668 * behave exactly like pipes for poll().
48669 */
48670 - if (!pipe->readers)
48671 + if (!atomic_read(&pipe->readers))
48672 mask |= POLLERR;
48673 }
48674
48675 @@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
48676
48677 mutex_lock(&inode->i_mutex);
48678 pipe = inode->i_pipe;
48679 - pipe->readers -= decr;
48680 - pipe->writers -= decw;
48681 + atomic_sub(decr, &pipe->readers);
48682 + atomic_sub(decw, &pipe->writers);
48683
48684 - if (!pipe->readers && !pipe->writers) {
48685 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
48686 free_pipe_info(inode);
48687 } else {
48688 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
48689 @@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
48690
48691 if (inode->i_pipe) {
48692 ret = 0;
48693 - inode->i_pipe->readers++;
48694 + atomic_inc(&inode->i_pipe->readers);
48695 }
48696
48697 mutex_unlock(&inode->i_mutex);
48698 @@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
48699
48700 if (inode->i_pipe) {
48701 ret = 0;
48702 - inode->i_pipe->writers++;
48703 + atomic_inc(&inode->i_pipe->writers);
48704 }
48705
48706 mutex_unlock(&inode->i_mutex);
48707 @@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
48708 if (inode->i_pipe) {
48709 ret = 0;
48710 if (filp->f_mode & FMODE_READ)
48711 - inode->i_pipe->readers++;
48712 + atomic_inc(&inode->i_pipe->readers);
48713 if (filp->f_mode & FMODE_WRITE)
48714 - inode->i_pipe->writers++;
48715 + atomic_inc(&inode->i_pipe->writers);
48716 }
48717
48718 mutex_unlock(&inode->i_mutex);
48719 @@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
48720 inode->i_pipe = NULL;
48721 }
48722
48723 -static struct vfsmount *pipe_mnt __read_mostly;
48724 +struct vfsmount *pipe_mnt __read_mostly;
48725
48726 /*
48727 * pipefs_dname() is called from d_path().
48728 @@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
48729 goto fail_iput;
48730 inode->i_pipe = pipe;
48731
48732 - pipe->readers = pipe->writers = 1;
48733 + atomic_set(&pipe->readers, 1);
48734 + atomic_set(&pipe->writers, 1);
48735 inode->i_fop = &rdwr_pipefifo_fops;
48736
48737 /*
48738 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
48739 index 15af622..0e9f4467 100644
48740 --- a/fs/proc/Kconfig
48741 +++ b/fs/proc/Kconfig
48742 @@ -30,12 +30,12 @@ config PROC_FS
48743
48744 config PROC_KCORE
48745 bool "/proc/kcore support" if !ARM
48746 - depends on PROC_FS && MMU
48747 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
48748
48749 config PROC_VMCORE
48750 bool "/proc/vmcore support"
48751 - depends on PROC_FS && CRASH_DUMP
48752 - default y
48753 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
48754 + default n
48755 help
48756 Exports the dump image of crashed kernel in ELF format.
48757
48758 @@ -59,8 +59,8 @@ config PROC_SYSCTL
48759 limited in memory.
48760
48761 config PROC_PAGE_MONITOR
48762 - default y
48763 - depends on PROC_FS && MMU
48764 + default n
48765 + depends on PROC_FS && MMU && !GRKERNSEC
48766 bool "Enable /proc page monitoring" if EXPERT
48767 help
48768 Various /proc files exist to monitor process memory utilization:
48769 diff --git a/fs/proc/array.c b/fs/proc/array.c
48770 index c1c207c..01ce725 100644
48771 --- a/fs/proc/array.c
48772 +++ b/fs/proc/array.c
48773 @@ -60,6 +60,7 @@
48774 #include <linux/tty.h>
48775 #include <linux/string.h>
48776 #include <linux/mman.h>
48777 +#include <linux/grsecurity.h>
48778 #include <linux/proc_fs.h>
48779 #include <linux/ioport.h>
48780 #include <linux/uaccess.h>
48781 @@ -346,6 +347,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
48782 seq_putc(m, '\n');
48783 }
48784
48785 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48786 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
48787 +{
48788 + if (p->mm)
48789 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
48790 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
48791 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
48792 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
48793 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
48794 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
48795 + else
48796 + seq_printf(m, "PaX:\t-----\n");
48797 +}
48798 +#endif
48799 +
48800 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48801 struct pid *pid, struct task_struct *task)
48802 {
48803 @@ -363,9 +379,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48804 task_cpus_allowed(m, task);
48805 cpuset_task_status_allowed(m, task);
48806 task_context_switch_counts(m, task);
48807 +
48808 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48809 + task_pax(m, task);
48810 +#endif
48811 +
48812 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
48813 + task_grsec_rbac(m, task);
48814 +#endif
48815 +
48816 return 0;
48817 }
48818
48819 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48820 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48821 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48822 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48823 +#endif
48824 +
48825 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48826 struct pid *pid, struct task_struct *task, int whole)
48827 {
48828 @@ -387,6 +418,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48829 char tcomm[sizeof(task->comm)];
48830 unsigned long flags;
48831
48832 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48833 + if (current->exec_id != m->exec_id) {
48834 + gr_log_badprocpid("stat");
48835 + return 0;
48836 + }
48837 +#endif
48838 +
48839 state = *get_task_state(task);
48840 vsize = eip = esp = 0;
48841 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48842 @@ -458,6 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48843 gtime = task->gtime;
48844 }
48845
48846 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48847 + if (PAX_RAND_FLAGS(mm)) {
48848 + eip = 0;
48849 + esp = 0;
48850 + wchan = 0;
48851 + }
48852 +#endif
48853 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48854 + wchan = 0;
48855 + eip =0;
48856 + esp =0;
48857 +#endif
48858 +
48859 /* scale priority and nice values from timeslices to -20..20 */
48860 /* to make it look like a "normal" Unix priority/nice value */
48861 priority = task_prio(task);
48862 @@ -494,9 +545,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48863 seq_put_decimal_ull(m, ' ', vsize);
48864 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
48865 seq_put_decimal_ull(m, ' ', rsslim);
48866 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48867 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
48868 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
48869 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
48870 +#else
48871 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
48872 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
48873 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
48874 +#endif
48875 seq_put_decimal_ull(m, ' ', esp);
48876 seq_put_decimal_ull(m, ' ', eip);
48877 /* The signal information here is obsolete.
48878 @@ -518,7 +575,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48879 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
48880 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
48881
48882 - if (mm && permitted) {
48883 + if (mm && permitted
48884 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48885 + && !PAX_RAND_FLAGS(mm)
48886 +#endif
48887 + ) {
48888 seq_put_decimal_ull(m, ' ', mm->start_data);
48889 seq_put_decimal_ull(m, ' ', mm->end_data);
48890 seq_put_decimal_ull(m, ' ', mm->start_brk);
48891 @@ -556,8 +617,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48892 struct pid *pid, struct task_struct *task)
48893 {
48894 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
48895 - struct mm_struct *mm = get_task_mm(task);
48896 + struct mm_struct *mm;
48897
48898 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48899 + if (current->exec_id != m->exec_id) {
48900 + gr_log_badprocpid("statm");
48901 + return 0;
48902 + }
48903 +#endif
48904 + mm = get_task_mm(task);
48905 if (mm) {
48906 size = task_statm(mm, &shared, &text, &data, &resident);
48907 mmput(mm);
48908 @@ -580,6 +648,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48909 return 0;
48910 }
48911
48912 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48913 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48914 +{
48915 + return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
48916 +}
48917 +#endif
48918 +
48919 #ifdef CONFIG_CHECKPOINT_RESTORE
48920 static struct pid *
48921 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
48922 diff --git a/fs/proc/base.c b/fs/proc/base.c
48923 index 437195f..cd2210d 100644
48924 --- a/fs/proc/base.c
48925 +++ b/fs/proc/base.c
48926 @@ -110,6 +110,14 @@ struct pid_entry {
48927 union proc_op op;
48928 };
48929
48930 +struct getdents_callback {
48931 + struct linux_dirent __user * current_dir;
48932 + struct linux_dirent __user * previous;
48933 + struct file * file;
48934 + int count;
48935 + int error;
48936 +};
48937 +
48938 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48939 .name = (NAME), \
48940 .len = sizeof(NAME) - 1, \
48941 @@ -209,6 +217,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
48942 if (!mm->arg_end)
48943 goto out_mm; /* Shh! No looking before we're done */
48944
48945 + if (gr_acl_handle_procpidmem(task))
48946 + goto out_mm;
48947 +
48948 len = mm->arg_end - mm->arg_start;
48949
48950 if (len > PAGE_SIZE)
48951 @@ -236,12 +247,28 @@ out:
48952 return res;
48953 }
48954
48955 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48956 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48957 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48958 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48959 +#endif
48960 +
48961 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48962 {
48963 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
48964 int res = PTR_ERR(mm);
48965 if (mm && !IS_ERR(mm)) {
48966 unsigned int nwords = 0;
48967 +
48968 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48969 + /* allow if we're currently ptracing this task */
48970 + if (PAX_RAND_FLAGS(mm) &&
48971 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
48972 + mmput(mm);
48973 + return 0;
48974 + }
48975 +#endif
48976 +
48977 do {
48978 nwords += 2;
48979 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
48980 @@ -255,7 +282,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
48981 }
48982
48983
48984 -#ifdef CONFIG_KALLSYMS
48985 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48986 /*
48987 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48988 * Returns the resolved symbol. If that fails, simply return the address.
48989 @@ -294,7 +321,7 @@ static void unlock_trace(struct task_struct *task)
48990 mutex_unlock(&task->signal->cred_guard_mutex);
48991 }
48992
48993 -#ifdef CONFIG_STACKTRACE
48994 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48995
48996 #define MAX_STACK_TRACE_DEPTH 64
48997
48998 @@ -486,7 +513,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
48999 return count;
49000 }
49001
49002 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49003 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49004 static int proc_pid_syscall(struct task_struct *task, char *buffer)
49005 {
49006 long nr;
49007 @@ -515,7 +542,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
49008 /************************************************************************/
49009
49010 /* permission checks */
49011 -static int proc_fd_access_allowed(struct inode *inode)
49012 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
49013 {
49014 struct task_struct *task;
49015 int allowed = 0;
49016 @@ -525,7 +552,10 @@ static int proc_fd_access_allowed(struct inode *inode)
49017 */
49018 task = get_proc_task(inode);
49019 if (task) {
49020 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
49021 + if (log)
49022 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
49023 + else
49024 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
49025 put_task_struct(task);
49026 }
49027 return allowed;
49028 @@ -563,10 +593,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
49029 struct task_struct *task,
49030 int hide_pid_min)
49031 {
49032 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49033 + return false;
49034 +
49035 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49036 + rcu_read_lock();
49037 + {
49038 + const struct cred *tmpcred = current_cred();
49039 + const struct cred *cred = __task_cred(task);
49040 +
49041 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
49042 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49043 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
49044 +#endif
49045 + ) {
49046 + rcu_read_unlock();
49047 + return true;
49048 + }
49049 + }
49050 + rcu_read_unlock();
49051 +
49052 + if (!pid->hide_pid)
49053 + return false;
49054 +#endif
49055 +
49056 if (pid->hide_pid < hide_pid_min)
49057 return true;
49058 if (in_group_p(pid->pid_gid))
49059 return true;
49060 +
49061 return ptrace_may_access(task, PTRACE_MODE_READ);
49062 }
49063
49064 @@ -584,7 +639,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
49065 put_task_struct(task);
49066
49067 if (!has_perms) {
49068 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49069 + {
49070 +#else
49071 if (pid->hide_pid == 2) {
49072 +#endif
49073 /*
49074 * Let's make getdents(), stat(), and open()
49075 * consistent with each other. If a process
49076 @@ -682,6 +741,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
49077 if (!task)
49078 return -ESRCH;
49079
49080 + if (gr_acl_handle_procpidmem(task)) {
49081 + put_task_struct(task);
49082 + return -EPERM;
49083 + }
49084 +
49085 mm = mm_access(task, mode);
49086 put_task_struct(task);
49087
49088 @@ -695,16 +759,24 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
49089 mmput(mm);
49090 }
49091
49092 - /* OK to pass negative loff_t, we can catch out-of-range */
49093 - file->f_mode |= FMODE_UNSIGNED_OFFSET;
49094 file->private_data = mm;
49095
49096 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49097 + file->f_version = current->exec_id;
49098 +#endif
49099 +
49100 return 0;
49101 }
49102
49103 static int mem_open(struct inode *inode, struct file *file)
49104 {
49105 - return __mem_open(inode, file, PTRACE_MODE_ATTACH);
49106 + int ret;
49107 + ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
49108 +
49109 + /* OK to pass negative loff_t, we can catch out-of-range */
49110 + file->f_mode |= FMODE_UNSIGNED_OFFSET;
49111 +
49112 + return ret;
49113 }
49114
49115 static ssize_t mem_rw(struct file *file, char __user *buf,
49116 @@ -715,6 +787,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
49117 ssize_t copied;
49118 char *page;
49119
49120 +#ifdef CONFIG_GRKERNSEC
49121 + if (write)
49122 + return -EPERM;
49123 +#endif
49124 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49125 + if (file->f_version != current->exec_id) {
49126 + gr_log_badprocpid("mem");
49127 + return 0;
49128 + }
49129 +#endif
49130 +
49131 if (!mm)
49132 return 0;
49133
49134 @@ -819,6 +902,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
49135 if (!mm)
49136 return 0;
49137
49138 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49139 + if (file->f_version != current->exec_id) {
49140 + gr_log_badprocpid("environ");
49141 + return 0;
49142 + }
49143 +#endif
49144 +
49145 page = (char *)__get_free_page(GFP_TEMPORARY);
49146 if (!page)
49147 return -ENOMEM;
49148 @@ -827,15 +917,17 @@ static ssize_t environ_read(struct file *file, char __user *buf,
49149 if (!atomic_inc_not_zero(&mm->mm_users))
49150 goto free;
49151 while (count > 0) {
49152 - int this_len, retval, max_len;
49153 + size_t this_len, max_len;
49154 + int retval;
49155 +
49156 + if (src >= (mm->env_end - mm->env_start))
49157 + break;
49158
49159 this_len = mm->env_end - (mm->env_start + src);
49160
49161 - if (this_len <= 0)
49162 - break;
49163
49164 - max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
49165 - this_len = (this_len > max_len) ? max_len : this_len;
49166 + max_len = min_t(size_t, PAGE_SIZE, count);
49167 + this_len = min(max_len, this_len);
49168
49169 retval = access_remote_vm(mm, (mm->env_start + src),
49170 page, this_len, 0);
49171 @@ -1433,7 +1525,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
49172 path_put(&nd->path);
49173
49174 /* Are we allowed to snoop on the tasks file descriptors? */
49175 - if (!proc_fd_access_allowed(inode))
49176 + if (!proc_fd_access_allowed(inode, 0))
49177 goto out;
49178
49179 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
49180 @@ -1472,8 +1564,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
49181 struct path path;
49182
49183 /* Are we allowed to snoop on the tasks file descriptors? */
49184 - if (!proc_fd_access_allowed(inode))
49185 - goto out;
49186 + /* logging this is needed for learning on chromium to work properly,
49187 + but we don't want to flood the logs from 'ps' which does a readlink
49188 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
49189 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
49190 + */
49191 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
49192 + if (!proc_fd_access_allowed(inode,0))
49193 + goto out;
49194 + } else {
49195 + if (!proc_fd_access_allowed(inode,1))
49196 + goto out;
49197 + }
49198
49199 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
49200 if (error)
49201 @@ -1538,7 +1640,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
49202 rcu_read_lock();
49203 cred = __task_cred(task);
49204 inode->i_uid = cred->euid;
49205 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49206 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49207 +#else
49208 inode->i_gid = cred->egid;
49209 +#endif
49210 rcu_read_unlock();
49211 }
49212 security_task_to_inode(task, inode);
49213 @@ -1574,10 +1680,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
49214 return -ENOENT;
49215 }
49216 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
49217 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49218 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
49219 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49220 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
49221 +#endif
49222 task_dumpable(task)) {
49223 cred = __task_cred(task);
49224 stat->uid = cred->euid;
49225 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49226 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
49227 +#else
49228 stat->gid = cred->egid;
49229 +#endif
49230 }
49231 }
49232 rcu_read_unlock();
49233 @@ -1615,11 +1730,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
49234
49235 if (task) {
49236 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
49237 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49238 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
49239 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49240 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
49241 +#endif
49242 task_dumpable(task)) {
49243 rcu_read_lock();
49244 cred = __task_cred(task);
49245 inode->i_uid = cred->euid;
49246 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49247 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49248 +#else
49249 inode->i_gid = cred->egid;
49250 +#endif
49251 rcu_read_unlock();
49252 } else {
49253 inode->i_uid = GLOBAL_ROOT_UID;
49254 @@ -1737,7 +1861,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
49255 int fd = proc_fd(inode);
49256
49257 if (task) {
49258 - files = get_files_struct(task);
49259 + if (!gr_acl_handle_procpidmem(task))
49260 + files = get_files_struct(task);
49261 put_task_struct(task);
49262 }
49263 if (files) {
49264 @@ -2336,11 +2461,21 @@ static const struct file_operations proc_map_files_operations = {
49265 */
49266 static int proc_fd_permission(struct inode *inode, int mask)
49267 {
49268 + struct task_struct *task;
49269 int rv = generic_permission(inode, mask);
49270 - if (rv == 0)
49271 - return 0;
49272 +
49273 if (task_pid(current) == proc_pid(inode))
49274 rv = 0;
49275 +
49276 + task = get_proc_task(inode);
49277 + if (task == NULL)
49278 + return rv;
49279 +
49280 + if (gr_acl_handle_procpidmem(task))
49281 + rv = -EACCES;
49282 +
49283 + put_task_struct(task);
49284 +
49285 return rv;
49286 }
49287
49288 @@ -2450,6 +2585,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
49289 if (!task)
49290 goto out_no_task;
49291
49292 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49293 + goto out;
49294 +
49295 /*
49296 * Yes, it does not scale. And it should not. Don't add
49297 * new entries into /proc/<tgid>/ without very good reasons.
49298 @@ -2494,6 +2632,9 @@ static int proc_pident_readdir(struct file *filp,
49299 if (!task)
49300 goto out_no_task;
49301
49302 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49303 + goto out;
49304 +
49305 ret = 0;
49306 i = filp->f_pos;
49307 switch (i) {
49308 @@ -2764,7 +2905,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
49309 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
49310 void *cookie)
49311 {
49312 - char *s = nd_get_link(nd);
49313 + const char *s = nd_get_link(nd);
49314 if (!IS_ERR(s))
49315 __putname(s);
49316 }
49317 @@ -3033,7 +3174,7 @@ static const struct pid_entry tgid_base_stuff[] = {
49318 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
49319 #endif
49320 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49321 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49322 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49323 INF("syscall", S_IRUGO, proc_pid_syscall),
49324 #endif
49325 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49326 @@ -3058,10 +3199,10 @@ static const struct pid_entry tgid_base_stuff[] = {
49327 #ifdef CONFIG_SECURITY
49328 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49329 #endif
49330 -#ifdef CONFIG_KALLSYMS
49331 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49332 INF("wchan", S_IRUGO, proc_pid_wchan),
49333 #endif
49334 -#ifdef CONFIG_STACKTRACE
49335 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49336 ONE("stack", S_IRUGO, proc_pid_stack),
49337 #endif
49338 #ifdef CONFIG_SCHEDSTATS
49339 @@ -3095,6 +3236,9 @@ static const struct pid_entry tgid_base_stuff[] = {
49340 #ifdef CONFIG_HARDWALL
49341 INF("hardwall", S_IRUGO, proc_pid_hardwall),
49342 #endif
49343 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49344 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
49345 +#endif
49346 #ifdef CONFIG_USER_NS
49347 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
49348 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
49349 @@ -3225,7 +3369,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
49350 if (!inode)
49351 goto out;
49352
49353 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49354 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
49355 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49356 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49357 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
49358 +#else
49359 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
49360 +#endif
49361 inode->i_op = &proc_tgid_base_inode_operations;
49362 inode->i_fop = &proc_tgid_base_operations;
49363 inode->i_flags|=S_IMMUTABLE;
49364 @@ -3267,7 +3418,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
49365 if (!task)
49366 goto out;
49367
49368 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49369 + goto out_put_task;
49370 +
49371 result = proc_pid_instantiate(dir, dentry, task, NULL);
49372 +out_put_task:
49373 put_task_struct(task);
49374 out:
49375 return result;
49376 @@ -3330,6 +3485,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
49377 static int fake_filldir(void *buf, const char *name, int namelen,
49378 loff_t offset, u64 ino, unsigned d_type)
49379 {
49380 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
49381 + __buf->error = -EINVAL;
49382 return 0;
49383 }
49384
49385 @@ -3396,7 +3553,7 @@ static const struct pid_entry tid_base_stuff[] = {
49386 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
49387 #endif
49388 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49389 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49390 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49391 INF("syscall", S_IRUGO, proc_pid_syscall),
49392 #endif
49393 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49394 @@ -3423,10 +3580,10 @@ static const struct pid_entry tid_base_stuff[] = {
49395 #ifdef CONFIG_SECURITY
49396 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49397 #endif
49398 -#ifdef CONFIG_KALLSYMS
49399 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49400 INF("wchan", S_IRUGO, proc_pid_wchan),
49401 #endif
49402 -#ifdef CONFIG_STACKTRACE
49403 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49404 ONE("stack", S_IRUGO, proc_pid_stack),
49405 #endif
49406 #ifdef CONFIG_SCHEDSTATS
49407 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
49408 index 82676e3..5f8518a 100644
49409 --- a/fs/proc/cmdline.c
49410 +++ b/fs/proc/cmdline.c
49411 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
49412
49413 static int __init proc_cmdline_init(void)
49414 {
49415 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
49416 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
49417 +#else
49418 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
49419 +#endif
49420 return 0;
49421 }
49422 module_init(proc_cmdline_init);
49423 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
49424 index b143471..bb105e5 100644
49425 --- a/fs/proc/devices.c
49426 +++ b/fs/proc/devices.c
49427 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
49428
49429 static int __init proc_devices_init(void)
49430 {
49431 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
49432 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
49433 +#else
49434 proc_create("devices", 0, NULL, &proc_devinfo_operations);
49435 +#endif
49436 return 0;
49437 }
49438 module_init(proc_devices_init);
49439 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
49440 index 7ac817b..abab1a5 100644
49441 --- a/fs/proc/inode.c
49442 +++ b/fs/proc/inode.c
49443 @@ -21,11 +21,17 @@
49444 #include <linux/seq_file.h>
49445 #include <linux/slab.h>
49446 #include <linux/mount.h>
49447 +#include <linux/grsecurity.h>
49448
49449 #include <asm/uaccess.h>
49450
49451 #include "internal.h"
49452
49453 +#ifdef CONFIG_PROC_SYSCTL
49454 +extern const struct inode_operations proc_sys_inode_operations;
49455 +extern const struct inode_operations proc_sys_dir_operations;
49456 +#endif
49457 +
49458 static void proc_evict_inode(struct inode *inode)
49459 {
49460 struct proc_dir_entry *de;
49461 @@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
49462 ns_ops = PROC_I(inode)->ns_ops;
49463 if (ns_ops && ns_ops->put)
49464 ns_ops->put(PROC_I(inode)->ns);
49465 +
49466 +#ifdef CONFIG_PROC_SYSCTL
49467 + if (inode->i_op == &proc_sys_inode_operations ||
49468 + inode->i_op == &proc_sys_dir_operations)
49469 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
49470 +#endif
49471 +
49472 }
49473
49474 static struct kmem_cache * proc_inode_cachep;
49475 @@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
49476 if (de->mode) {
49477 inode->i_mode = de->mode;
49478 inode->i_uid = de->uid;
49479 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49480 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49481 +#else
49482 inode->i_gid = de->gid;
49483 +#endif
49484 }
49485 if (de->size)
49486 inode->i_size = de->size;
49487 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
49488 index eca4aca..19166b2 100644
49489 --- a/fs/proc/internal.h
49490 +++ b/fs/proc/internal.h
49491 @@ -52,6 +52,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
49492 struct pid *pid, struct task_struct *task);
49493 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
49494 struct pid *pid, struct task_struct *task);
49495 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49496 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
49497 +#endif
49498 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
49499
49500 extern const struct file_operations proc_tid_children_operations;
49501 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
49502 index 86c67ee..cdca321 100644
49503 --- a/fs/proc/kcore.c
49504 +++ b/fs/proc/kcore.c
49505 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49506 * the addresses in the elf_phdr on our list.
49507 */
49508 start = kc_offset_to_vaddr(*fpos - elf_buflen);
49509 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
49510 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
49511 + if (tsz > buflen)
49512 tsz = buflen;
49513 -
49514 +
49515 while (buflen) {
49516 struct kcore_list *m;
49517
49518 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49519 kfree(elf_buf);
49520 } else {
49521 if (kern_addr_valid(start)) {
49522 - unsigned long n;
49523 + char *elf_buf;
49524 + mm_segment_t oldfs;
49525
49526 - n = copy_to_user(buffer, (char *)start, tsz);
49527 - /*
49528 - * We cannot distinguish between fault on source
49529 - * and fault on destination. When this happens
49530 - * we clear too and hope it will trigger the
49531 - * EFAULT again.
49532 - */
49533 - if (n) {
49534 - if (clear_user(buffer + tsz - n,
49535 - n))
49536 + elf_buf = kmalloc(tsz, GFP_KERNEL);
49537 + if (!elf_buf)
49538 + return -ENOMEM;
49539 + oldfs = get_fs();
49540 + set_fs(KERNEL_DS);
49541 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
49542 + set_fs(oldfs);
49543 + if (copy_to_user(buffer, elf_buf, tsz)) {
49544 + kfree(elf_buf);
49545 return -EFAULT;
49546 + }
49547 }
49548 + set_fs(oldfs);
49549 + kfree(elf_buf);
49550 } else {
49551 if (clear_user(buffer, tsz))
49552 return -EFAULT;
49553 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49554
49555 static int open_kcore(struct inode *inode, struct file *filp)
49556 {
49557 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
49558 + return -EPERM;
49559 +#endif
49560 if (!capable(CAP_SYS_RAWIO))
49561 return -EPERM;
49562 if (kcore_need_update)
49563 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
49564 index 80e4645..53e5fcf 100644
49565 --- a/fs/proc/meminfo.c
49566 +++ b/fs/proc/meminfo.c
49567 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
49568 vmi.used >> 10,
49569 vmi.largest_chunk >> 10
49570 #ifdef CONFIG_MEMORY_FAILURE
49571 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
49572 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
49573 #endif
49574 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
49575 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
49576 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
49577 index b1822dd..df622cb 100644
49578 --- a/fs/proc/nommu.c
49579 +++ b/fs/proc/nommu.c
49580 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
49581 if (len < 1)
49582 len = 1;
49583 seq_printf(m, "%*c", len, ' ');
49584 - seq_path(m, &file->f_path, "");
49585 + seq_path(m, &file->f_path, "\n\\");
49586 }
49587
49588 seq_putc(m, '\n');
49589 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
49590 index 06e1cc1..177cd98 100644
49591 --- a/fs/proc/proc_net.c
49592 +++ b/fs/proc/proc_net.c
49593 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
49594 struct task_struct *task;
49595 struct nsproxy *ns;
49596 struct net *net = NULL;
49597 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49598 + const struct cred *cred = current_cred();
49599 +#endif
49600 +
49601 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49602 + if (cred->fsuid)
49603 + return net;
49604 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49605 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
49606 + return net;
49607 +#endif
49608
49609 rcu_read_lock();
49610 task = pid_task(proc_pid(dir), PIDTYPE_PID);
49611 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
49612 index 5d00eb8..75e1775 100644
49613 --- a/fs/proc/proc_sysctl.c
49614 +++ b/fs/proc/proc_sysctl.c
49615 @@ -12,11 +12,15 @@
49616 #include <linux/module.h>
49617 #include "internal.h"
49618
49619 +extern int gr_handle_chroot_sysctl(const int op);
49620 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
49621 + const int op);
49622 +
49623 static const struct dentry_operations proc_sys_dentry_operations;
49624 static const struct file_operations proc_sys_file_operations;
49625 -static const struct inode_operations proc_sys_inode_operations;
49626 +const struct inode_operations proc_sys_inode_operations;
49627 static const struct file_operations proc_sys_dir_file_operations;
49628 -static const struct inode_operations proc_sys_dir_operations;
49629 +const struct inode_operations proc_sys_dir_operations;
49630
49631 void proc_sys_poll_notify(struct ctl_table_poll *poll)
49632 {
49633 @@ -467,8 +471,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
49634
49635 err = NULL;
49636 d_set_d_op(dentry, &proc_sys_dentry_operations);
49637 +
49638 + gr_handle_proc_create(dentry, inode);
49639 +
49640 d_add(dentry, inode);
49641
49642 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
49643 + err = ERR_PTR(-ENOENT);
49644 +
49645 out:
49646 if (h)
49647 sysctl_head_finish(h);
49648 @@ -482,18 +492,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
49649 struct inode *inode = filp->f_path.dentry->d_inode;
49650 struct ctl_table_header *head = grab_header(inode);
49651 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
49652 + int op = write ? MAY_WRITE : MAY_READ;
49653 ssize_t error;
49654 size_t res;
49655
49656 if (IS_ERR(head))
49657 return PTR_ERR(head);
49658
49659 +
49660 /*
49661 * At this point we know that the sysctl was not unregistered
49662 * and won't be until we finish.
49663 */
49664 error = -EPERM;
49665 - if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
49666 + if (sysctl_perm(head->root, table, op))
49667 goto out;
49668
49669 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
49670 @@ -501,6 +513,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
49671 if (!table->proc_handler)
49672 goto out;
49673
49674 +#ifdef CONFIG_GRKERNSEC
49675 + error = -EPERM;
49676 + if (gr_handle_chroot_sysctl(op))
49677 + goto out;
49678 + dget(filp->f_path.dentry);
49679 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
49680 + dput(filp->f_path.dentry);
49681 + goto out;
49682 + }
49683 + dput(filp->f_path.dentry);
49684 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
49685 + goto out;
49686 + if (write && !capable(CAP_SYS_ADMIN))
49687 + goto out;
49688 +#endif
49689 +
49690 /* careful: calling conventions are nasty here */
49691 res = count;
49692 error = table->proc_handler(table, write, buf, &res, ppos);
49693 @@ -598,6 +626,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
49694 return -ENOMEM;
49695 } else {
49696 d_set_d_op(child, &proc_sys_dentry_operations);
49697 +
49698 + gr_handle_proc_create(child, inode);
49699 +
49700 d_add(child, inode);
49701 }
49702 } else {
49703 @@ -641,6 +672,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
49704 if ((*pos)++ < file->f_pos)
49705 return 0;
49706
49707 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
49708 + return 0;
49709 +
49710 if (unlikely(S_ISLNK(table->mode)))
49711 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
49712 else
49713 @@ -758,6 +792,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
49714 if (IS_ERR(head))
49715 return PTR_ERR(head);
49716
49717 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
49718 + return -ENOENT;
49719 +
49720 generic_fillattr(inode, stat);
49721 if (table)
49722 stat->mode = (stat->mode & S_IFMT) | table->mode;
49723 @@ -780,13 +817,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
49724 .llseek = generic_file_llseek,
49725 };
49726
49727 -static const struct inode_operations proc_sys_inode_operations = {
49728 +const struct inode_operations proc_sys_inode_operations = {
49729 .permission = proc_sys_permission,
49730 .setattr = proc_sys_setattr,
49731 .getattr = proc_sys_getattr,
49732 };
49733
49734 -static const struct inode_operations proc_sys_dir_operations = {
49735 +const struct inode_operations proc_sys_dir_operations = {
49736 .lookup = proc_sys_lookup,
49737 .permission = proc_sys_permission,
49738 .setattr = proc_sys_setattr,
49739 diff --git a/fs/proc/root.c b/fs/proc/root.c
49740 index 7c30fce..b3d3aa2 100644
49741 --- a/fs/proc/root.c
49742 +++ b/fs/proc/root.c
49743 @@ -188,7 +188,15 @@ void __init proc_root_init(void)
49744 #ifdef CONFIG_PROC_DEVICETREE
49745 proc_device_tree_init();
49746 #endif
49747 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
49748 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49749 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
49750 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49751 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49752 +#endif
49753 +#else
49754 proc_mkdir("bus", NULL);
49755 +#endif
49756 proc_sys_init();
49757 }
49758
49759 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
49760 index 4540b8f..1b9772f 100644
49761 --- a/fs/proc/task_mmu.c
49762 +++ b/fs/proc/task_mmu.c
49763 @@ -11,12 +11,19 @@
49764 #include <linux/rmap.h>
49765 #include <linux/swap.h>
49766 #include <linux/swapops.h>
49767 +#include <linux/grsecurity.h>
49768
49769 #include <asm/elf.h>
49770 #include <asm/uaccess.h>
49771 #include <asm/tlbflush.h>
49772 #include "internal.h"
49773
49774 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49775 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
49776 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
49777 + _mm->pax_flags & MF_PAX_SEGMEXEC))
49778 +#endif
49779 +
49780 void task_mem(struct seq_file *m, struct mm_struct *mm)
49781 {
49782 unsigned long data, text, lib, swap;
49783 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49784 "VmExe:\t%8lu kB\n"
49785 "VmLib:\t%8lu kB\n"
49786 "VmPTE:\t%8lu kB\n"
49787 - "VmSwap:\t%8lu kB\n",
49788 - hiwater_vm << (PAGE_SHIFT-10),
49789 + "VmSwap:\t%8lu kB\n"
49790 +
49791 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49792 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
49793 +#endif
49794 +
49795 + ,hiwater_vm << (PAGE_SHIFT-10),
49796 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49797 mm->locked_vm << (PAGE_SHIFT-10),
49798 mm->pinned_vm << (PAGE_SHIFT-10),
49799 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49800 data << (PAGE_SHIFT-10),
49801 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
49802 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
49803 - swap << (PAGE_SHIFT-10));
49804 + swap << (PAGE_SHIFT-10)
49805 +
49806 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49807 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49808 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
49809 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
49810 +#else
49811 + , mm->context.user_cs_base
49812 + , mm->context.user_cs_limit
49813 +#endif
49814 +#endif
49815 +
49816 + );
49817 }
49818
49819 unsigned long task_vsize(struct mm_struct *mm)
49820 @@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49821 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
49822 }
49823
49824 - /* We don't show the stack guard page in /proc/maps */
49825 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49826 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
49827 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
49828 +#else
49829 start = vma->vm_start;
49830 - if (stack_guard_page_start(vma, start))
49831 - start += PAGE_SIZE;
49832 end = vma->vm_end;
49833 - if (stack_guard_page_end(vma, end))
49834 - end -= PAGE_SIZE;
49835 +#endif
49836
49837 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
49838 start,
49839 @@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49840 flags & VM_WRITE ? 'w' : '-',
49841 flags & VM_EXEC ? 'x' : '-',
49842 flags & VM_MAYSHARE ? 's' : 'p',
49843 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49844 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
49845 +#else
49846 pgoff,
49847 +#endif
49848 MAJOR(dev), MINOR(dev), ino, &len);
49849
49850 /*
49851 @@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49852 */
49853 if (file) {
49854 pad_len_spaces(m, len);
49855 - seq_path(m, &file->f_path, "\n");
49856 + seq_path(m, &file->f_path, "\n\\");
49857 goto done;
49858 }
49859
49860 @@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49861 * Thread stack in /proc/PID/task/TID/maps or
49862 * the main process stack.
49863 */
49864 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
49865 - vma->vm_end >= mm->start_stack)) {
49866 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
49867 + (vma->vm_start <= mm->start_stack &&
49868 + vma->vm_end >= mm->start_stack)) {
49869 name = "[stack]";
49870 } else {
49871 /* Thread stack in /proc/PID/maps */
49872 @@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
49873 struct proc_maps_private *priv = m->private;
49874 struct task_struct *task = priv->task;
49875
49876 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49877 + if (current->exec_id != m->exec_id) {
49878 + gr_log_badprocpid("maps");
49879 + return 0;
49880 + }
49881 +#endif
49882 +
49883 show_map_vma(m, vma, is_pid);
49884
49885 if (m->count < m->size) /* vma is copied successfully */
49886 @@ -492,12 +528,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
49887 .private = &mss,
49888 };
49889
49890 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49891 + if (current->exec_id != m->exec_id) {
49892 + gr_log_badprocpid("smaps");
49893 + return 0;
49894 + }
49895 +#endif
49896 memset(&mss, 0, sizeof mss);
49897 - mss.vma = vma;
49898 - /* mmap_sem is held in m_start */
49899 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49900 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49901 -
49902 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49903 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
49904 +#endif
49905 + mss.vma = vma;
49906 + /* mmap_sem is held in m_start */
49907 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49908 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49909 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49910 + }
49911 +#endif
49912 show_map_vma(m, vma, is_pid);
49913
49914 seq_printf(m,
49915 @@ -515,7 +562,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
49916 "KernelPageSize: %8lu kB\n"
49917 "MMUPageSize: %8lu kB\n"
49918 "Locked: %8lu kB\n",
49919 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49920 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
49921 +#else
49922 (vma->vm_end - vma->vm_start) >> 10,
49923 +#endif
49924 mss.resident >> 10,
49925 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
49926 mss.shared_clean >> 10,
49927 @@ -1164,6 +1215,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
49928 int n;
49929 char buffer[50];
49930
49931 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49932 + if (current->exec_id != m->exec_id) {
49933 + gr_log_badprocpid("numa_maps");
49934 + return 0;
49935 + }
49936 +#endif
49937 +
49938 if (!mm)
49939 return 0;
49940
49941 @@ -1181,11 +1239,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
49942 mpol_to_str(buffer, sizeof(buffer), pol, 0);
49943 mpol_cond_put(pol);
49944
49945 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49946 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
49947 +#else
49948 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
49949 +#endif
49950
49951 if (file) {
49952 seq_printf(m, " file=");
49953 - seq_path(m, &file->f_path, "\n\t= ");
49954 + seq_path(m, &file->f_path, "\n\t\\= ");
49955 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
49956 seq_printf(m, " heap");
49957 } else {
49958 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
49959 index 1ccfa53..0848f95 100644
49960 --- a/fs/proc/task_nommu.c
49961 +++ b/fs/proc/task_nommu.c
49962 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49963 else
49964 bytes += kobjsize(mm);
49965
49966 - if (current->fs && current->fs->users > 1)
49967 + if (current->fs && atomic_read(&current->fs->users) > 1)
49968 sbytes += kobjsize(current->fs);
49969 else
49970 bytes += kobjsize(current->fs);
49971 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
49972
49973 if (file) {
49974 pad_len_spaces(m, len);
49975 - seq_path(m, &file->f_path, "");
49976 + seq_path(m, &file->f_path, "\n\\");
49977 } else if (mm) {
49978 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
49979
49980 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
49981 index d67908b..d13f6a6 100644
49982 --- a/fs/quota/netlink.c
49983 +++ b/fs/quota/netlink.c
49984 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
49985 void quota_send_warning(short type, unsigned int id, dev_t dev,
49986 const char warntype)
49987 {
49988 - static atomic_t seq;
49989 + static atomic_unchecked_t seq;
49990 struct sk_buff *skb;
49991 void *msg_head;
49992 int ret;
49993 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
49994 "VFS: Not enough memory to send quota warning.\n");
49995 return;
49996 }
49997 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49998 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49999 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
50000 if (!msg_head) {
50001 printk(KERN_ERR
50002 diff --git a/fs/readdir.c b/fs/readdir.c
50003 index 39e3370..20d446d 100644
50004 --- a/fs/readdir.c
50005 +++ b/fs/readdir.c
50006 @@ -17,6 +17,7 @@
50007 #include <linux/security.h>
50008 #include <linux/syscalls.h>
50009 #include <linux/unistd.h>
50010 +#include <linux/namei.h>
50011
50012 #include <asm/uaccess.h>
50013
50014 @@ -67,6 +68,7 @@ struct old_linux_dirent {
50015
50016 struct readdir_callback {
50017 struct old_linux_dirent __user * dirent;
50018 + struct file * file;
50019 int result;
50020 };
50021
50022 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
50023 buf->result = -EOVERFLOW;
50024 return -EOVERFLOW;
50025 }
50026 +
50027 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50028 + return 0;
50029 +
50030 buf->result++;
50031 dirent = buf->dirent;
50032 if (!access_ok(VERIFY_WRITE, dirent,
50033 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
50034
50035 buf.result = 0;
50036 buf.dirent = dirent;
50037 + buf.file = file;
50038
50039 error = vfs_readdir(file, fillonedir, &buf);
50040 if (buf.result)
50041 @@ -141,6 +148,7 @@ struct linux_dirent {
50042 struct getdents_callback {
50043 struct linux_dirent __user * current_dir;
50044 struct linux_dirent __user * previous;
50045 + struct file * file;
50046 int count;
50047 int error;
50048 };
50049 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
50050 buf->error = -EOVERFLOW;
50051 return -EOVERFLOW;
50052 }
50053 +
50054 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50055 + return 0;
50056 +
50057 dirent = buf->previous;
50058 if (dirent) {
50059 if (__put_user(offset, &dirent->d_off))
50060 @@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
50061 buf.previous = NULL;
50062 buf.count = count;
50063 buf.error = 0;
50064 + buf.file = file;
50065
50066 error = vfs_readdir(file, filldir, &buf);
50067 if (error >= 0)
50068 @@ -226,6 +239,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
50069 struct getdents_callback64 {
50070 struct linux_dirent64 __user * current_dir;
50071 struct linux_dirent64 __user * previous;
50072 + struct file *file;
50073 int count;
50074 int error;
50075 };
50076 @@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
50077 buf->error = -EINVAL; /* only used if we fail.. */
50078 if (reclen > buf->count)
50079 return -EINVAL;
50080 +
50081 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50082 + return 0;
50083 +
50084 dirent = buf->previous;
50085 if (dirent) {
50086 if (__put_user(offset, &dirent->d_off))
50087 @@ -287,6 +305,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
50088
50089 buf.current_dir = dirent;
50090 buf.previous = NULL;
50091 + buf.file = file;
50092 buf.count = count;
50093 buf.error = 0;
50094
50095 @@ -295,7 +314,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
50096 error = buf.error;
50097 lastdirent = buf.previous;
50098 if (lastdirent) {
50099 - typeof(lastdirent->d_off) d_off = file->f_pos;
50100 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
50101 if (__put_user(d_off, &lastdirent->d_off))
50102 error = -EFAULT;
50103 else
50104 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
50105 index 2b7882b..1c5ef48 100644
50106 --- a/fs/reiserfs/do_balan.c
50107 +++ b/fs/reiserfs/do_balan.c
50108 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
50109 return;
50110 }
50111
50112 - atomic_inc(&(fs_generation(tb->tb_sb)));
50113 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
50114 do_balance_starts(tb);
50115
50116 /* balance leaf returns 0 except if combining L R and S into
50117 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
50118 index 2c1ade6..8c59d8d 100644
50119 --- a/fs/reiserfs/procfs.c
50120 +++ b/fs/reiserfs/procfs.c
50121 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
50122 "SMALL_TAILS " : "NO_TAILS ",
50123 replay_only(sb) ? "REPLAY_ONLY " : "",
50124 convert_reiserfs(sb) ? "CONV " : "",
50125 - atomic_read(&r->s_generation_counter),
50126 + atomic_read_unchecked(&r->s_generation_counter),
50127 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
50128 SF(s_do_balance), SF(s_unneeded_left_neighbor),
50129 SF(s_good_search_by_key_reada), SF(s_bmaps),
50130 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
50131 index 33215f5..c5d427a 100644
50132 --- a/fs/reiserfs/reiserfs.h
50133 +++ b/fs/reiserfs/reiserfs.h
50134 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
50135 /* Comment? -Hans */
50136 wait_queue_head_t s_wait;
50137 /* To be obsoleted soon by per buffer seals.. -Hans */
50138 - atomic_t s_generation_counter; // increased by one every time the
50139 + atomic_unchecked_t s_generation_counter; // increased by one every time the
50140 // tree gets re-balanced
50141 unsigned long s_properties; /* File system properties. Currently holds
50142 on-disk FS format */
50143 @@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
50144 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
50145
50146 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
50147 -#define get_generation(s) atomic_read (&fs_generation(s))
50148 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
50149 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
50150 #define __fs_changed(gen,s) (gen != get_generation (s))
50151 #define fs_changed(gen,s) \
50152 diff --git a/fs/select.c b/fs/select.c
50153 index db14c78..3aae1bd 100644
50154 --- a/fs/select.c
50155 +++ b/fs/select.c
50156 @@ -20,6 +20,7 @@
50157 #include <linux/export.h>
50158 #include <linux/slab.h>
50159 #include <linux/poll.h>
50160 +#include <linux/security.h>
50161 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
50162 #include <linux/file.h>
50163 #include <linux/fdtable.h>
50164 @@ -831,6 +832,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
50165 struct poll_list *walk = head;
50166 unsigned long todo = nfds;
50167
50168 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
50169 if (nfds > rlimit(RLIMIT_NOFILE))
50170 return -EINVAL;
50171
50172 diff --git a/fs/seq_file.c b/fs/seq_file.c
50173 index 0cbd049..64e705c 100644
50174 --- a/fs/seq_file.c
50175 +++ b/fs/seq_file.c
50176 @@ -9,6 +9,7 @@
50177 #include <linux/export.h>
50178 #include <linux/seq_file.h>
50179 #include <linux/slab.h>
50180 +#include <linux/sched.h>
50181
50182 #include <asm/uaccess.h>
50183 #include <asm/page.h>
50184 @@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
50185 memset(p, 0, sizeof(*p));
50186 mutex_init(&p->lock);
50187 p->op = op;
50188 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50189 + p->exec_id = current->exec_id;
50190 +#endif
50191
50192 /*
50193 * Wrappers around seq_open(e.g. swaps_open) need to be
50194 @@ -92,7 +96,7 @@ static int traverse(struct seq_file *m, loff_t offset)
50195 return 0;
50196 }
50197 if (!m->buf) {
50198 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
50199 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
50200 if (!m->buf)
50201 return -ENOMEM;
50202 }
50203 @@ -132,7 +136,7 @@ static int traverse(struct seq_file *m, loff_t offset)
50204 Eoverflow:
50205 m->op->stop(m, p);
50206 kfree(m->buf);
50207 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
50208 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
50209 return !m->buf ? -ENOMEM : -EAGAIN;
50210 }
50211
50212 @@ -187,7 +191,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
50213
50214 /* grab buffer if we didn't have one */
50215 if (!m->buf) {
50216 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
50217 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
50218 if (!m->buf)
50219 goto Enomem;
50220 }
50221 @@ -228,7 +232,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
50222 goto Fill;
50223 m->op->stop(m, p);
50224 kfree(m->buf);
50225 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
50226 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
50227 if (!m->buf)
50228 goto Enomem;
50229 m->count = 0;
50230 @@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
50231 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
50232 void *data)
50233 {
50234 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
50235 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
50236 int res = -ENOMEM;
50237
50238 if (op) {
50239 diff --git a/fs/splice.c b/fs/splice.c
50240 index 7bf08fa..eb35c2f 100644
50241 --- a/fs/splice.c
50242 +++ b/fs/splice.c
50243 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
50244 pipe_lock(pipe);
50245
50246 for (;;) {
50247 - if (!pipe->readers) {
50248 + if (!atomic_read(&pipe->readers)) {
50249 send_sig(SIGPIPE, current, 0);
50250 if (!ret)
50251 ret = -EPIPE;
50252 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
50253 do_wakeup = 0;
50254 }
50255
50256 - pipe->waiting_writers++;
50257 + atomic_inc(&pipe->waiting_writers);
50258 pipe_wait(pipe);
50259 - pipe->waiting_writers--;
50260 + atomic_dec(&pipe->waiting_writers);
50261 }
50262
50263 pipe_unlock(pipe);
50264 @@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
50265 old_fs = get_fs();
50266 set_fs(get_ds());
50267 /* The cast to a user pointer is valid due to the set_fs() */
50268 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
50269 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
50270 set_fs(old_fs);
50271
50272 return res;
50273 @@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
50274 old_fs = get_fs();
50275 set_fs(get_ds());
50276 /* The cast to a user pointer is valid due to the set_fs() */
50277 - res = vfs_write(file, (const char __user *)buf, count, &pos);
50278 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
50279 set_fs(old_fs);
50280
50281 return res;
50282 @@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
50283 goto err;
50284
50285 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
50286 - vec[i].iov_base = (void __user *) page_address(page);
50287 + vec[i].iov_base = (void __force_user *) page_address(page);
50288 vec[i].iov_len = this_len;
50289 spd.pages[i] = page;
50290 spd.nr_pages++;
50291 @@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
50292 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
50293 {
50294 while (!pipe->nrbufs) {
50295 - if (!pipe->writers)
50296 + if (!atomic_read(&pipe->writers))
50297 return 0;
50298
50299 - if (!pipe->waiting_writers && sd->num_spliced)
50300 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
50301 return 0;
50302
50303 if (sd->flags & SPLICE_F_NONBLOCK)
50304 @@ -1187,7 +1187,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
50305 * out of the pipe right after the splice_to_pipe(). So set
50306 * PIPE_READERS appropriately.
50307 */
50308 - pipe->readers = 1;
50309 + atomic_set(&pipe->readers, 1);
50310
50311 current->splice_pipe = pipe;
50312 }
50313 @@ -1740,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
50314 ret = -ERESTARTSYS;
50315 break;
50316 }
50317 - if (!pipe->writers)
50318 + if (!atomic_read(&pipe->writers))
50319 break;
50320 - if (!pipe->waiting_writers) {
50321 + if (!atomic_read(&pipe->waiting_writers)) {
50322 if (flags & SPLICE_F_NONBLOCK) {
50323 ret = -EAGAIN;
50324 break;
50325 @@ -1774,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
50326 pipe_lock(pipe);
50327
50328 while (pipe->nrbufs >= pipe->buffers) {
50329 - if (!pipe->readers) {
50330 + if (!atomic_read(&pipe->readers)) {
50331 send_sig(SIGPIPE, current, 0);
50332 ret = -EPIPE;
50333 break;
50334 @@ -1787,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
50335 ret = -ERESTARTSYS;
50336 break;
50337 }
50338 - pipe->waiting_writers++;
50339 + atomic_inc(&pipe->waiting_writers);
50340 pipe_wait(pipe);
50341 - pipe->waiting_writers--;
50342 + atomic_dec(&pipe->waiting_writers);
50343 }
50344
50345 pipe_unlock(pipe);
50346 @@ -1825,14 +1825,14 @@ retry:
50347 pipe_double_lock(ipipe, opipe);
50348
50349 do {
50350 - if (!opipe->readers) {
50351 + if (!atomic_read(&opipe->readers)) {
50352 send_sig(SIGPIPE, current, 0);
50353 if (!ret)
50354 ret = -EPIPE;
50355 break;
50356 }
50357
50358 - if (!ipipe->nrbufs && !ipipe->writers)
50359 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
50360 break;
50361
50362 /*
50363 @@ -1929,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
50364 pipe_double_lock(ipipe, opipe);
50365
50366 do {
50367 - if (!opipe->readers) {
50368 + if (!atomic_read(&opipe->readers)) {
50369 send_sig(SIGPIPE, current, 0);
50370 if (!ret)
50371 ret = -EPIPE;
50372 @@ -1974,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
50373 * return EAGAIN if we have the potential of some data in the
50374 * future, otherwise just return 0
50375 */
50376 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
50377 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
50378 ret = -EAGAIN;
50379
50380 pipe_unlock(ipipe);
50381 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
50382 index e6bb9b2..d8e3951 100644
50383 --- a/fs/sysfs/dir.c
50384 +++ b/fs/sysfs/dir.c
50385 @@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
50386 struct sysfs_dirent *sd;
50387 int rc;
50388
50389 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50390 + const char *parent_name = parent_sd->s_name;
50391 +
50392 + mode = S_IFDIR | S_IRWXU;
50393 +
50394 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
50395 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
50396 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
50397 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
50398 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
50399 +#endif
50400 +
50401 /* allocate */
50402 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
50403 if (!sd)
50404 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
50405 index 00012e3..8392349 100644
50406 --- a/fs/sysfs/file.c
50407 +++ b/fs/sysfs/file.c
50408 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
50409
50410 struct sysfs_open_dirent {
50411 atomic_t refcnt;
50412 - atomic_t event;
50413 + atomic_unchecked_t event;
50414 wait_queue_head_t poll;
50415 struct list_head buffers; /* goes through sysfs_buffer.list */
50416 };
50417 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
50418 if (!sysfs_get_active(attr_sd))
50419 return -ENODEV;
50420
50421 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
50422 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
50423 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
50424
50425 sysfs_put_active(attr_sd);
50426 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
50427 return -ENOMEM;
50428
50429 atomic_set(&new_od->refcnt, 0);
50430 - atomic_set(&new_od->event, 1);
50431 + atomic_set_unchecked(&new_od->event, 1);
50432 init_waitqueue_head(&new_od->poll);
50433 INIT_LIST_HEAD(&new_od->buffers);
50434 goto retry;
50435 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
50436
50437 sysfs_put_active(attr_sd);
50438
50439 - if (buffer->event != atomic_read(&od->event))
50440 + if (buffer->event != atomic_read_unchecked(&od->event))
50441 goto trigger;
50442
50443 return DEFAULT_POLLMASK;
50444 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
50445
50446 od = sd->s_attr.open;
50447 if (od) {
50448 - atomic_inc(&od->event);
50449 + atomic_inc_unchecked(&od->event);
50450 wake_up_interruptible(&od->poll);
50451 }
50452
50453 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
50454 index a7ac78f..02158e1 100644
50455 --- a/fs/sysfs/symlink.c
50456 +++ b/fs/sysfs/symlink.c
50457 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
50458
50459 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
50460 {
50461 - char *page = nd_get_link(nd);
50462 + const char *page = nd_get_link(nd);
50463 if (!IS_ERR(page))
50464 free_page((unsigned long)page);
50465 }
50466 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
50467 index c175b4d..8f36a16 100644
50468 --- a/fs/udf/misc.c
50469 +++ b/fs/udf/misc.c
50470 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
50471
50472 u8 udf_tag_checksum(const struct tag *t)
50473 {
50474 - u8 *data = (u8 *)t;
50475 + const u8 *data = (const u8 *)t;
50476 u8 checksum = 0;
50477 int i;
50478 for (i = 0; i < sizeof(struct tag); ++i)
50479 diff --git a/fs/udf/namei.c b/fs/udf/namei.c
50480 index 1802417..c31deb3 100644
50481 --- a/fs/udf/namei.c
50482 +++ b/fs/udf/namei.c
50483 @@ -1279,6 +1279,7 @@ static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
50484 *lenp = 3;
50485 fid->udf.block = location.logicalBlockNum;
50486 fid->udf.partref = location.partitionReferenceNum;
50487 + fid->udf.parent_partref = 0;
50488 fid->udf.generation = inode->i_generation;
50489
50490 if (parent) {
50491 diff --git a/fs/utimes.c b/fs/utimes.c
50492 index fa4dbe4..e12d1b9 100644
50493 --- a/fs/utimes.c
50494 +++ b/fs/utimes.c
50495 @@ -1,6 +1,7 @@
50496 #include <linux/compiler.h>
50497 #include <linux/file.h>
50498 #include <linux/fs.h>
50499 +#include <linux/security.h>
50500 #include <linux/linkage.h>
50501 #include <linux/mount.h>
50502 #include <linux/namei.h>
50503 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
50504 goto mnt_drop_write_and_out;
50505 }
50506 }
50507 +
50508 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
50509 + error = -EACCES;
50510 + goto mnt_drop_write_and_out;
50511 + }
50512 +
50513 mutex_lock(&inode->i_mutex);
50514 error = notify_change(path->dentry, &newattrs);
50515 mutex_unlock(&inode->i_mutex);
50516 diff --git a/fs/xattr.c b/fs/xattr.c
50517 index 1d7ac37..23cb9ec 100644
50518 --- a/fs/xattr.c
50519 +++ b/fs/xattr.c
50520 @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
50521 * Extended attribute SET operations
50522 */
50523 static long
50524 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
50525 +setxattr(struct path *path, const char __user *name, const void __user *value,
50526 size_t size, int flags)
50527 {
50528 int error;
50529 @@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
50530 }
50531 }
50532
50533 - error = vfs_setxattr(d, kname, kvalue, size, flags);
50534 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
50535 + error = -EACCES;
50536 + goto out;
50537 + }
50538 +
50539 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
50540 out:
50541 if (vvalue)
50542 vfree(vvalue);
50543 @@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
50544 return error;
50545 error = mnt_want_write(path.mnt);
50546 if (!error) {
50547 - error = setxattr(path.dentry, name, value, size, flags);
50548 + error = setxattr(&path, name, value, size, flags);
50549 mnt_drop_write(path.mnt);
50550 }
50551 path_put(&path);
50552 @@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
50553 return error;
50554 error = mnt_want_write(path.mnt);
50555 if (!error) {
50556 - error = setxattr(path.dentry, name, value, size, flags);
50557 + error = setxattr(&path, name, value, size, flags);
50558 mnt_drop_write(path.mnt);
50559 }
50560 path_put(&path);
50561 @@ -401,17 +406,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
50562 {
50563 int fput_needed;
50564 struct file *f;
50565 - struct dentry *dentry;
50566 int error = -EBADF;
50567
50568 f = fget_light(fd, &fput_needed);
50569 if (!f)
50570 return error;
50571 - dentry = f->f_path.dentry;
50572 - audit_inode(NULL, dentry);
50573 + audit_inode(NULL, f->f_path.dentry);
50574 error = mnt_want_write_file(f);
50575 if (!error) {
50576 - error = setxattr(dentry, name, value, size, flags);
50577 + error = setxattr(&f->f_path, name, value, size, flags);
50578 mnt_drop_write_file(f);
50579 }
50580 fput_light(f, fput_needed);
50581 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
50582 index 69d06b0..c0996e5 100644
50583 --- a/fs/xattr_acl.c
50584 +++ b/fs/xattr_acl.c
50585 @@ -17,8 +17,8 @@
50586 struct posix_acl *
50587 posix_acl_from_xattr(const void *value, size_t size)
50588 {
50589 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
50590 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
50591 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
50592 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
50593 int count;
50594 struct posix_acl *acl;
50595 struct posix_acl_entry *acl_e;
50596 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
50597 index 58b815e..595ddee 100644
50598 --- a/fs/xfs/xfs_bmap.c
50599 +++ b/fs/xfs/xfs_bmap.c
50600 @@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
50601 int nmap,
50602 int ret_nmap);
50603 #else
50604 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
50605 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
50606 #endif /* DEBUG */
50607
50608 STATIC int
50609 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
50610 index 19bf0c5..9f26b02 100644
50611 --- a/fs/xfs/xfs_dir2_sf.c
50612 +++ b/fs/xfs/xfs_dir2_sf.c
50613 @@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
50614 }
50615
50616 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
50617 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50618 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
50619 + char name[sfep->namelen];
50620 + memcpy(name, sfep->name, sfep->namelen);
50621 + if (filldir(dirent, name, sfep->namelen,
50622 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
50623 + *offset = off & 0x7fffffff;
50624 + return 0;
50625 + }
50626 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50627 off & 0x7fffffff, ino, DT_UNKNOWN)) {
50628 *offset = off & 0x7fffffff;
50629 return 0;
50630 diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
50631 index f9c3fe3..69cf4fc 100644
50632 --- a/fs/xfs/xfs_discard.c
50633 +++ b/fs/xfs/xfs_discard.c
50634 @@ -179,12 +179,14 @@ xfs_ioc_trim(
50635 * used by the fstrim application. In the end it really doesn't
50636 * matter as trimming blocks is an advisory interface.
50637 */
50638 + if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
50639 + range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
50640 + return -XFS_ERROR(EINVAL);
50641 +
50642 start = BTOBB(range.start);
50643 end = start + BTOBBT(range.len) - 1;
50644 minlen = BTOBB(max_t(u64, granularity, range.minlen));
50645
50646 - if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
50647 - return -XFS_ERROR(EINVAL);
50648 if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
50649 end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
50650
50651 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
50652 index 3a05a41..320bec6 100644
50653 --- a/fs/xfs/xfs_ioctl.c
50654 +++ b/fs/xfs/xfs_ioctl.c
50655 @@ -126,7 +126,7 @@ xfs_find_handle(
50656 }
50657
50658 error = -EFAULT;
50659 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
50660 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
50661 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
50662 goto out_put;
50663
50664 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
50665 index 1a25fd8..e935581 100644
50666 --- a/fs/xfs/xfs_iops.c
50667 +++ b/fs/xfs/xfs_iops.c
50668 @@ -394,7 +394,7 @@ xfs_vn_put_link(
50669 struct nameidata *nd,
50670 void *p)
50671 {
50672 - char *s = nd_get_link(nd);
50673 + const char *s = nd_get_link(nd);
50674
50675 if (!IS_ERR(s))
50676 kfree(s);
50677 diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
50678 index 92d4331..ca28a4b 100644
50679 --- a/fs/xfs/xfs_rtalloc.c
50680 +++ b/fs/xfs/xfs_rtalloc.c
50681 @@ -857,7 +857,7 @@ xfs_rtbuf_get(
50682 xfs_buf_t *bp; /* block buffer, result */
50683 xfs_inode_t *ip; /* bitmap or summary inode */
50684 xfs_bmbt_irec_t map;
50685 - int nmap;
50686 + int nmap = 1;
50687 int error; /* error value */
50688
50689 ip = issum ? mp->m_rsumip : mp->m_rbmip;
50690 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
50691 new file mode 100644
50692 index 0000000..4d533f1
50693 --- /dev/null
50694 +++ b/grsecurity/Kconfig
50695 @@ -0,0 +1,941 @@
50696 +#
50697 +# grecurity configuration
50698 +#
50699 +menu "Memory Protections"
50700 +depends on GRKERNSEC
50701 +
50702 +config GRKERNSEC_KMEM
50703 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
50704 + default y if GRKERNSEC_CONFIG_AUTO
50705 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50706 + help
50707 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50708 + be written to or read from to modify or leak the contents of the running
50709 + kernel. /dev/port will also not be allowed to be opened. If you have module
50710 + support disabled, enabling this will close up four ways that are
50711 + currently used to insert malicious code into the running kernel.
50712 + Even with all these features enabled, we still highly recommend that
50713 + you use the RBAC system, as it is still possible for an attacker to
50714 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50715 + If you are not using XFree86, you may be able to stop this additional
50716 + case by enabling the 'Disable privileged I/O' option. Though nothing
50717 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50718 + but only to video memory, which is the only writing we allow in this
50719 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50720 + not be allowed to mprotect it with PROT_WRITE later.
50721 + It is highly recommended that you say Y here if you meet all the
50722 + conditions above.
50723 +
50724 +config GRKERNSEC_VM86
50725 + bool "Restrict VM86 mode"
50726 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
50727 + depends on X86_32
50728 +
50729 + help
50730 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50731 + make use of a special execution mode on 32bit x86 processors called
50732 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50733 + video cards and will still work with this option enabled. The purpose
50734 + of the option is to prevent exploitation of emulation errors in
50735 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50736 + Nearly all users should be able to enable this option.
50737 +
50738 +config GRKERNSEC_IO
50739 + bool "Disable privileged I/O"
50740 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
50741 + depends on X86
50742 + select RTC_CLASS
50743 + select RTC_INTF_DEV
50744 + select RTC_DRV_CMOS
50745 +
50746 + help
50747 + If you say Y here, all ioperm and iopl calls will return an error.
50748 + Ioperm and iopl can be used to modify the running kernel.
50749 + Unfortunately, some programs need this access to operate properly,
50750 + the most notable of which are XFree86 and hwclock. hwclock can be
50751 + remedied by having RTC support in the kernel, so real-time
50752 + clock support is enabled if this option is enabled, to ensure
50753 + that hwclock operates correctly. XFree86 still will not
50754 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50755 + IF YOU USE XFree86. If you use XFree86 and you still want to
50756 + protect your kernel against modification, use the RBAC system.
50757 +
50758 +config GRKERNSEC_PROC_MEMMAP
50759 + bool "Harden ASLR against information leaks and entropy reduction"
50760 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
50761 + depends on PAX_NOEXEC || PAX_ASLR
50762 + help
50763 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50764 + give no information about the addresses of its mappings if
50765 + PaX features that rely on random addresses are enabled on the task.
50766 + In addition to sanitizing this information and disabling other
50767 + dangerous sources of information, this option causes reads of sensitive
50768 + /proc/<pid> entries where the file descriptor was opened in a different
50769 + task than the one performing the read. Such attempts are logged.
50770 + This option also limits argv/env strings for suid/sgid binaries
50771 + to 512KB to prevent a complete exhaustion of the stack entropy provided
50772 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
50773 + binaries to prevent alternative mmap layouts from being abused.
50774 +
50775 + If you use PaX it is essential that you say Y here as it closes up
50776 + several holes that make full ASLR useless locally.
50777 +
50778 +config GRKERNSEC_BRUTE
50779 + bool "Deter exploit bruteforcing"
50780 + default y if GRKERNSEC_CONFIG_AUTO
50781 + help
50782 + If you say Y here, attempts to bruteforce exploits against forking
50783 + daemons such as apache or sshd, as well as against suid/sgid binaries
50784 + will be deterred. When a child of a forking daemon is killed by PaX
50785 + or crashes due to an illegal instruction or other suspicious signal,
50786 + the parent process will be delayed 30 seconds upon every subsequent
50787 + fork until the administrator is able to assess the situation and
50788 + restart the daemon.
50789 + In the suid/sgid case, the attempt is logged, the user has all their
50790 + processes terminated, and they are prevented from executing any further
50791 + processes for 15 minutes.
50792 + It is recommended that you also enable signal logging in the auditing
50793 + section so that logs are generated when a process triggers a suspicious
50794 + signal.
50795 + If the sysctl option is enabled, a sysctl option with name
50796 + "deter_bruteforce" is created.
50797 +
50798 +
50799 +config GRKERNSEC_MODHARDEN
50800 + bool "Harden module auto-loading"
50801 + default y if GRKERNSEC_CONFIG_AUTO
50802 + depends on MODULES
50803 + help
50804 + If you say Y here, module auto-loading in response to use of some
50805 + feature implemented by an unloaded module will be restricted to
50806 + root users. Enabling this option helps defend against attacks
50807 + by unprivileged users who abuse the auto-loading behavior to
50808 + cause a vulnerable module to load that is then exploited.
50809 +
50810 + If this option prevents a legitimate use of auto-loading for a
50811 + non-root user, the administrator can execute modprobe manually
50812 + with the exact name of the module mentioned in the alert log.
50813 + Alternatively, the administrator can add the module to the list
50814 + of modules loaded at boot by modifying init scripts.
50815 +
50816 + Modification of init scripts will most likely be needed on
50817 + Ubuntu servers with encrypted home directory support enabled,
50818 + as the first non-root user logging in will cause the ecb(aes),
50819 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50820 +
50821 +config GRKERNSEC_HIDESYM
50822 + bool "Hide kernel symbols"
50823 + default y if GRKERNSEC_CONFIG_AUTO
50824 + select PAX_USERCOPY_SLABS
50825 + help
50826 + If you say Y here, getting information on loaded modules, and
50827 + displaying all kernel symbols through a syscall will be restricted
50828 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50829 + /proc/kallsyms will be restricted to the root user. The RBAC
50830 + system can hide that entry even from root.
50831 +
50832 + This option also prevents leaking of kernel addresses through
50833 + several /proc entries.
50834 +
50835 + Note that this option is only effective provided the following
50836 + conditions are met:
50837 + 1) The kernel using grsecurity is not precompiled by some distribution
50838 + 2) You have also enabled GRKERNSEC_DMESG
50839 + 3) You are using the RBAC system and hiding other files such as your
50840 + kernel image and System.map. Alternatively, enabling this option
50841 + causes the permissions on /boot, /lib/modules, and the kernel
50842 + source directory to change at compile time to prevent
50843 + reading by non-root users.
50844 + If the above conditions are met, this option will aid in providing a
50845 + useful protection against local kernel exploitation of overflows
50846 + and arbitrary read/write vulnerabilities.
50847 +
50848 +config GRKERNSEC_KERN_LOCKOUT
50849 + bool "Active kernel exploit response"
50850 + default y if GRKERNSEC_CONFIG_AUTO
50851 + depends on X86 || ARM || PPC || SPARC
50852 + help
50853 + If you say Y here, when a PaX alert is triggered due to suspicious
50854 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50855 + or an OOPS occurs due to bad memory accesses, instead of just
50856 + terminating the offending process (and potentially allowing
50857 + a subsequent exploit from the same user), we will take one of two
50858 + actions:
50859 + If the user was root, we will panic the system
50860 + If the user was non-root, we will log the attempt, terminate
50861 + all processes owned by the user, then prevent them from creating
50862 + any new processes until the system is restarted
50863 + This deters repeated kernel exploitation/bruteforcing attempts
50864 + and is useful for later forensics.
50865 +
50866 +endmenu
50867 +menu "Role Based Access Control Options"
50868 +depends on GRKERNSEC
50869 +
50870 +config GRKERNSEC_RBAC_DEBUG
50871 + bool
50872 +
50873 +config GRKERNSEC_NO_RBAC
50874 + bool "Disable RBAC system"
50875 + help
50876 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50877 + preventing the RBAC system from being enabled. You should only say Y
50878 + here if you have no intention of using the RBAC system, so as to prevent
50879 + an attacker with root access from misusing the RBAC system to hide files
50880 + and processes when loadable module support and /dev/[k]mem have been
50881 + locked down.
50882 +
50883 +config GRKERNSEC_ACL_HIDEKERN
50884 + bool "Hide kernel processes"
50885 + help
50886 + If you say Y here, all kernel threads will be hidden to all
50887 + processes but those whose subject has the "view hidden processes"
50888 + flag.
50889 +
50890 +config GRKERNSEC_ACL_MAXTRIES
50891 + int "Maximum tries before password lockout"
50892 + default 3
50893 + help
50894 + This option enforces the maximum number of times a user can attempt
50895 + to authorize themselves with the grsecurity RBAC system before being
50896 + denied the ability to attempt authorization again for a specified time.
50897 + The lower the number, the harder it will be to brute-force a password.
50898 +
50899 +config GRKERNSEC_ACL_TIMEOUT
50900 + int "Time to wait after max password tries, in seconds"
50901 + default 30
50902 + help
50903 + This option specifies the time the user must wait after attempting to
50904 + authorize to the RBAC system with the maximum number of invalid
50905 + passwords. The higher the number, the harder it will be to brute-force
50906 + a password.
50907 +
50908 +endmenu
50909 +menu "Filesystem Protections"
50910 +depends on GRKERNSEC
50911 +
50912 +config GRKERNSEC_PROC
50913 + bool "Proc restrictions"
50914 + default y if GRKERNSEC_CONFIG_AUTO
50915 + help
50916 + If you say Y here, the permissions of the /proc filesystem
50917 + will be altered to enhance system security and privacy. You MUST
50918 + choose either a user only restriction or a user and group restriction.
50919 + Depending upon the option you choose, you can either restrict users to
50920 + see only the processes they themselves run, or choose a group that can
50921 + view all processes and files normally restricted to root if you choose
50922 + the "restrict to user only" option. NOTE: If you're running identd or
50923 + ntpd as a non-root user, you will have to run it as the group you
50924 + specify here.
50925 +
50926 +config GRKERNSEC_PROC_USER
50927 + bool "Restrict /proc to user only"
50928 + depends on GRKERNSEC_PROC
50929 + help
50930 + If you say Y here, non-root users will only be able to view their own
50931 + processes, and restricts them from viewing network-related information,
50932 + and viewing kernel symbol and module information.
50933 +
50934 +config GRKERNSEC_PROC_USERGROUP
50935 + bool "Allow special group"
50936 + default y if GRKERNSEC_CONFIG_AUTO
50937 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50938 + help
50939 + If you say Y here, you will be able to select a group that will be
50940 + able to view all processes and network-related information. If you've
50941 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50942 + remain hidden. This option is useful if you want to run identd as
50943 + a non-root user.
50944 +
50945 +config GRKERNSEC_PROC_GID
50946 + int "GID for special group"
50947 + depends on GRKERNSEC_PROC_USERGROUP
50948 + default 1001
50949 +
50950 +config GRKERNSEC_PROC_ADD
50951 + bool "Additional restrictions"
50952 + default y if GRKERNSEC_CONFIG_AUTO
50953 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50954 + help
50955 + If you say Y here, additional restrictions will be placed on
50956 + /proc that keep normal users from viewing device information and
50957 + slabinfo information that could be useful for exploits.
50958 +
50959 +config GRKERNSEC_LINK
50960 + bool "Linking restrictions"
50961 + default y if GRKERNSEC_CONFIG_AUTO
50962 + help
50963 + If you say Y here, /tmp race exploits will be prevented, since users
50964 + will no longer be able to follow symlinks owned by other users in
50965 + world-writable +t directories (e.g. /tmp), unless the owner of the
50966 + symlink is the owner of the directory. users will also not be
50967 + able to hardlink to files they do not own. If the sysctl option is
50968 + enabled, a sysctl option with name "linking_restrictions" is created.
50969 +
50970 +config GRKERNSEC_SYMLINKOWN
50971 + bool "Kernel-enforced SymlinksIfOwnerMatch"
50972 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
50973 + help
50974 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
50975 + that prevents it from being used as a security feature. As Apache
50976 + verifies the symlink by performing a stat() against the target of
50977 + the symlink before it is followed, an attacker can setup a symlink
50978 + to point to a same-owned file, then replace the symlink with one
50979 + that targets another user's file just after Apache "validates" the
50980 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
50981 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
50982 + will be in place for the group you specify. If the sysctl option
50983 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
50984 + created.
50985 +
50986 +config GRKERNSEC_SYMLINKOWN_GID
50987 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
50988 + depends on GRKERNSEC_SYMLINKOWN
50989 + default 1006
50990 + help
50991 + Setting this GID determines what group kernel-enforced
50992 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
50993 + is enabled, a sysctl option with name "symlinkown_gid" is created.
50994 +
50995 +config GRKERNSEC_FIFO
50996 + bool "FIFO restrictions"
50997 + default y if GRKERNSEC_CONFIG_AUTO
50998 + help
50999 + If you say Y here, users will not be able to write to FIFOs they don't
51000 + own in world-writable +t directories (e.g. /tmp), unless the owner of
51001 + the FIFO is the same owner of the directory it's held in. If the sysctl
51002 + option is enabled, a sysctl option with name "fifo_restrictions" is
51003 + created.
51004 +
51005 +config GRKERNSEC_SYSFS_RESTRICT
51006 + bool "Sysfs/debugfs restriction"
51007 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
51008 + depends on SYSFS
51009 + help
51010 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
51011 + any filesystem normally mounted under it (e.g. debugfs) will be
51012 + mostly accessible only by root. These filesystems generally provide access
51013 + to hardware and debug information that isn't appropriate for unprivileged
51014 + users of the system. Sysfs and debugfs have also become a large source
51015 + of new vulnerabilities, ranging from infoleaks to local compromise.
51016 + There has been very little oversight with an eye toward security involved
51017 + in adding new exporters of information to these filesystems, so their
51018 + use is discouraged.
51019 + For reasons of compatibility, a few directories have been whitelisted
51020 + for access by non-root users:
51021 + /sys/fs/selinux
51022 + /sys/fs/fuse
51023 + /sys/devices/system/cpu
51024 +
51025 +config GRKERNSEC_ROFS
51026 + bool "Runtime read-only mount protection"
51027 + help
51028 + If you say Y here, a sysctl option with name "romount_protect" will
51029 + be created. By setting this option to 1 at runtime, filesystems
51030 + will be protected in the following ways:
51031 + * No new writable mounts will be allowed
51032 + * Existing read-only mounts won't be able to be remounted read/write
51033 + * Write operations will be denied on all block devices
51034 + This option acts independently of grsec_lock: once it is set to 1,
51035 + it cannot be turned off. Therefore, please be mindful of the resulting
51036 + behavior if this option is enabled in an init script on a read-only
51037 + filesystem. This feature is mainly intended for secure embedded systems.
51038 +
51039 +config GRKERNSEC_CHROOT
51040 + bool "Chroot jail restrictions"
51041 + default y if GRKERNSEC_CONFIG_AUTO
51042 + help
51043 + If you say Y here, you will be able to choose several options that will
51044 + make breaking out of a chrooted jail much more difficult. If you
51045 + encounter no software incompatibilities with the following options, it
51046 + is recommended that you enable each one.
51047 +
51048 +config GRKERNSEC_CHROOT_MOUNT
51049 + bool "Deny mounts"
51050 + default y if GRKERNSEC_CONFIG_AUTO
51051 + depends on GRKERNSEC_CHROOT
51052 + help
51053 + If you say Y here, processes inside a chroot will not be able to
51054 + mount or remount filesystems. If the sysctl option is enabled, a
51055 + sysctl option with name "chroot_deny_mount" is created.
51056 +
51057 +config GRKERNSEC_CHROOT_DOUBLE
51058 + bool "Deny double-chroots"
51059 + default y if GRKERNSEC_CONFIG_AUTO
51060 + depends on GRKERNSEC_CHROOT
51061 + help
51062 + If you say Y here, processes inside a chroot will not be able to chroot
51063 + again outside the chroot. This is a widely used method of breaking
51064 + out of a chroot jail and should not be allowed. If the sysctl
51065 + option is enabled, a sysctl option with name
51066 + "chroot_deny_chroot" is created.
51067 +
51068 +config GRKERNSEC_CHROOT_PIVOT
51069 + bool "Deny pivot_root in chroot"
51070 + default y if GRKERNSEC_CONFIG_AUTO
51071 + depends on GRKERNSEC_CHROOT
51072 + help
51073 + If you say Y here, processes inside a chroot will not be able to use
51074 + a function called pivot_root() that was introduced in Linux 2.3.41. It
51075 + works similar to chroot in that it changes the root filesystem. This
51076 + function could be misused in a chrooted process to attempt to break out
51077 + of the chroot, and therefore should not be allowed. If the sysctl
51078 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
51079 + created.
51080 +
51081 +config GRKERNSEC_CHROOT_CHDIR
51082 + bool "Enforce chdir(\"/\") on all chroots"
51083 + default y if GRKERNSEC_CONFIG_AUTO
51084 + depends on GRKERNSEC_CHROOT
51085 + help
51086 + If you say Y here, the current working directory of all newly-chrooted
51087 + applications will be set to the the root directory of the chroot.
51088 + The man page on chroot(2) states:
51089 + Note that this call does not change the current working
51090 + directory, so that `.' can be outside the tree rooted at
51091 + `/'. In particular, the super-user can escape from a
51092 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
51093 +
51094 + It is recommended that you say Y here, since it's not known to break
51095 + any software. If the sysctl option is enabled, a sysctl option with
51096 + name "chroot_enforce_chdir" is created.
51097 +
51098 +config GRKERNSEC_CHROOT_CHMOD
51099 + bool "Deny (f)chmod +s"
51100 + default y if GRKERNSEC_CONFIG_AUTO
51101 + depends on GRKERNSEC_CHROOT
51102 + help
51103 + If you say Y here, processes inside a chroot will not be able to chmod
51104 + or fchmod files to make them have suid or sgid bits. This protects
51105 + against another published method of breaking a chroot. If the sysctl
51106 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
51107 + created.
51108 +
51109 +config GRKERNSEC_CHROOT_FCHDIR
51110 + bool "Deny fchdir out of chroot"
51111 + default y if GRKERNSEC_CONFIG_AUTO
51112 + depends on GRKERNSEC_CHROOT
51113 + help
51114 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
51115 + to a file descriptor of the chrooting process that points to a directory
51116 + outside the filesystem will be stopped. If the sysctl option
51117 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
51118 +
51119 +config GRKERNSEC_CHROOT_MKNOD
51120 + bool "Deny mknod"
51121 + default y if GRKERNSEC_CONFIG_AUTO
51122 + depends on GRKERNSEC_CHROOT
51123 + help
51124 + If you say Y here, processes inside a chroot will not be allowed to
51125 + mknod. The problem with using mknod inside a chroot is that it
51126 + would allow an attacker to create a device entry that is the same
51127 + as one on the physical root of your system, which could range from
51128 + anything from the console device to a device for your harddrive (which
51129 + they could then use to wipe the drive or steal data). It is recommended
51130 + that you say Y here, unless you run into software incompatibilities.
51131 + If the sysctl option is enabled, a sysctl option with name
51132 + "chroot_deny_mknod" is created.
51133 +
51134 +config GRKERNSEC_CHROOT_SHMAT
51135 + bool "Deny shmat() out of chroot"
51136 + default y if GRKERNSEC_CONFIG_AUTO
51137 + depends on GRKERNSEC_CHROOT
51138 + help
51139 + If you say Y here, processes inside a chroot will not be able to attach
51140 + to shared memory segments that were created outside of the chroot jail.
51141 + It is recommended that you say Y here. If the sysctl option is enabled,
51142 + a sysctl option with name "chroot_deny_shmat" is created.
51143 +
51144 +config GRKERNSEC_CHROOT_UNIX
51145 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
51146 + default y if GRKERNSEC_CONFIG_AUTO
51147 + depends on GRKERNSEC_CHROOT
51148 + help
51149 + If you say Y here, processes inside a chroot will not be able to
51150 + connect to abstract (meaning not belonging to a filesystem) Unix
51151 + domain sockets that were bound outside of a chroot. It is recommended
51152 + that you say Y here. If the sysctl option is enabled, a sysctl option
51153 + with name "chroot_deny_unix" is created.
51154 +
51155 +config GRKERNSEC_CHROOT_FINDTASK
51156 + bool "Protect outside processes"
51157 + default y if GRKERNSEC_CONFIG_AUTO
51158 + depends on GRKERNSEC_CHROOT
51159 + help
51160 + If you say Y here, processes inside a chroot will not be able to
51161 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
51162 + getsid, or view any process outside of the chroot. If the sysctl
51163 + option is enabled, a sysctl option with name "chroot_findtask" is
51164 + created.
51165 +
51166 +config GRKERNSEC_CHROOT_NICE
51167 + bool "Restrict priority changes"
51168 + default y if GRKERNSEC_CONFIG_AUTO
51169 + depends on GRKERNSEC_CHROOT
51170 + help
51171 + If you say Y here, processes inside a chroot will not be able to raise
51172 + the priority of processes in the chroot, or alter the priority of
51173 + processes outside the chroot. This provides more security than simply
51174 + removing CAP_SYS_NICE from the process' capability set. If the
51175 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
51176 + is created.
51177 +
51178 +config GRKERNSEC_CHROOT_SYSCTL
51179 + bool "Deny sysctl writes"
51180 + default y if GRKERNSEC_CONFIG_AUTO
51181 + depends on GRKERNSEC_CHROOT
51182 + help
51183 + If you say Y here, an attacker in a chroot will not be able to
51184 + write to sysctl entries, either by sysctl(2) or through a /proc
51185 + interface. It is strongly recommended that you say Y here. If the
51186 + sysctl option is enabled, a sysctl option with name
51187 + "chroot_deny_sysctl" is created.
51188 +
51189 +config GRKERNSEC_CHROOT_CAPS
51190 + bool "Capability restrictions"
51191 + default y if GRKERNSEC_CONFIG_AUTO
51192 + depends on GRKERNSEC_CHROOT
51193 + help
51194 + If you say Y here, the capabilities on all processes within a
51195 + chroot jail will be lowered to stop module insertion, raw i/o,
51196 + system and net admin tasks, rebooting the system, modifying immutable
51197 + files, modifying IPC owned by another, and changing the system time.
51198 + This is left an option because it can break some apps. Disable this
51199 + if your chrooted apps are having problems performing those kinds of
51200 + tasks. If the sysctl option is enabled, a sysctl option with
51201 + name "chroot_caps" is created.
51202 +
51203 +endmenu
51204 +menu "Kernel Auditing"
51205 +depends on GRKERNSEC
51206 +
51207 +config GRKERNSEC_AUDIT_GROUP
51208 + bool "Single group for auditing"
51209 + help
51210 + If you say Y here, the exec, chdir, and (un)mount logging features
51211 + will only operate on a group you specify. This option is recommended
51212 + if you only want to watch certain users instead of having a large
51213 + amount of logs from the entire system. If the sysctl option is enabled,
51214 + a sysctl option with name "audit_group" is created.
51215 +
51216 +config GRKERNSEC_AUDIT_GID
51217 + int "GID for auditing"
51218 + depends on GRKERNSEC_AUDIT_GROUP
51219 + default 1007
51220 +
51221 +config GRKERNSEC_EXECLOG
51222 + bool "Exec logging"
51223 + help
51224 + If you say Y here, all execve() calls will be logged (since the
51225 + other exec*() calls are frontends to execve(), all execution
51226 + will be logged). Useful for shell-servers that like to keep track
51227 + of their users. If the sysctl option is enabled, a sysctl option with
51228 + name "exec_logging" is created.
51229 + WARNING: This option when enabled will produce a LOT of logs, especially
51230 + on an active system.
51231 +
51232 +config GRKERNSEC_RESLOG
51233 + bool "Resource logging"
51234 + default y if GRKERNSEC_CONFIG_AUTO
51235 + help
51236 + If you say Y here, all attempts to overstep resource limits will
51237 + be logged with the resource name, the requested size, and the current
51238 + limit. It is highly recommended that you say Y here. If the sysctl
51239 + option is enabled, a sysctl option with name "resource_logging" is
51240 + created. If the RBAC system is enabled, the sysctl value is ignored.
51241 +
51242 +config GRKERNSEC_CHROOT_EXECLOG
51243 + bool "Log execs within chroot"
51244 + help
51245 + If you say Y here, all executions inside a chroot jail will be logged
51246 + to syslog. This can cause a large amount of logs if certain
51247 + applications (eg. djb's daemontools) are installed on the system, and
51248 + is therefore left as an option. If the sysctl option is enabled, a
51249 + sysctl option with name "chroot_execlog" is created.
51250 +
51251 +config GRKERNSEC_AUDIT_PTRACE
51252 + bool "Ptrace logging"
51253 + help
51254 + If you say Y here, all attempts to attach to a process via ptrace
51255 + will be logged. If the sysctl option is enabled, a sysctl option
51256 + with name "audit_ptrace" is created.
51257 +
51258 +config GRKERNSEC_AUDIT_CHDIR
51259 + bool "Chdir logging"
51260 + help
51261 + If you say Y here, all chdir() calls will be logged. If the sysctl
51262 + option is enabled, a sysctl option with name "audit_chdir" is created.
51263 +
51264 +config GRKERNSEC_AUDIT_MOUNT
51265 + bool "(Un)Mount logging"
51266 + help
51267 + If you say Y here, all mounts and unmounts will be logged. If the
51268 + sysctl option is enabled, a sysctl option with name "audit_mount" is
51269 + created.
51270 +
51271 +config GRKERNSEC_SIGNAL
51272 + bool "Signal logging"
51273 + default y if GRKERNSEC_CONFIG_AUTO
51274 + help
51275 + If you say Y here, certain important signals will be logged, such as
51276 + SIGSEGV, which will as a result inform you of when a error in a program
51277 + occurred, which in some cases could mean a possible exploit attempt.
51278 + If the sysctl option is enabled, a sysctl option with name
51279 + "signal_logging" is created.
51280 +
51281 +config GRKERNSEC_FORKFAIL
51282 + bool "Fork failure logging"
51283 + help
51284 + If you say Y here, all failed fork() attempts will be logged.
51285 + This could suggest a fork bomb, or someone attempting to overstep
51286 + their process limit. If the sysctl option is enabled, a sysctl option
51287 + with name "forkfail_logging" is created.
51288 +
51289 +config GRKERNSEC_TIME
51290 + bool "Time change logging"
51291 + default y if GRKERNSEC_CONFIG_AUTO
51292 + help
51293 + If you say Y here, any changes of the system clock will be logged.
51294 + If the sysctl option is enabled, a sysctl option with name
51295 + "timechange_logging" is created.
51296 +
51297 +config GRKERNSEC_PROC_IPADDR
51298 + bool "/proc/<pid>/ipaddr support"
51299 + default y if GRKERNSEC_CONFIG_AUTO
51300 + help
51301 + If you say Y here, a new entry will be added to each /proc/<pid>
51302 + directory that contains the IP address of the person using the task.
51303 + The IP is carried across local TCP and AF_UNIX stream sockets.
51304 + This information can be useful for IDS/IPSes to perform remote response
51305 + to a local attack. The entry is readable by only the owner of the
51306 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
51307 + the RBAC system), and thus does not create privacy concerns.
51308 +
51309 +config GRKERNSEC_RWXMAP_LOG
51310 + bool 'Denied RWX mmap/mprotect logging'
51311 + default y if GRKERNSEC_CONFIG_AUTO
51312 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
51313 + help
51314 + If you say Y here, calls to mmap() and mprotect() with explicit
51315 + usage of PROT_WRITE and PROT_EXEC together will be logged when
51316 + denied by the PAX_MPROTECT feature. If the sysctl option is
51317 + enabled, a sysctl option with name "rwxmap_logging" is created.
51318 +
51319 +config GRKERNSEC_AUDIT_TEXTREL
51320 + bool 'ELF text relocations logging (READ HELP)'
51321 + depends on PAX_MPROTECT
51322 + help
51323 + If you say Y here, text relocations will be logged with the filename
51324 + of the offending library or binary. The purpose of the feature is
51325 + to help Linux distribution developers get rid of libraries and
51326 + binaries that need text relocations which hinder the future progress
51327 + of PaX. Only Linux distribution developers should say Y here, and
51328 + never on a production machine, as this option creates an information
51329 + leak that could aid an attacker in defeating the randomization of
51330 + a single memory region. If the sysctl option is enabled, a sysctl
51331 + option with name "audit_textrel" is created.
51332 +
51333 +endmenu
51334 +
51335 +menu "Executable Protections"
51336 +depends on GRKERNSEC
51337 +
51338 +config GRKERNSEC_DMESG
51339 + bool "Dmesg(8) restriction"
51340 + default y if GRKERNSEC_CONFIG_AUTO
51341 + help
51342 + If you say Y here, non-root users will not be able to use dmesg(8)
51343 + to view up to the last 4kb of messages in the kernel's log buffer.
51344 + The kernel's log buffer often contains kernel addresses and other
51345 + identifying information useful to an attacker in fingerprinting a
51346 + system for a targeted exploit.
51347 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
51348 + created.
51349 +
51350 +config GRKERNSEC_HARDEN_PTRACE
51351 + bool "Deter ptrace-based process snooping"
51352 + default y if GRKERNSEC_CONFIG_AUTO
51353 + help
51354 + If you say Y here, TTY sniffers and other malicious monitoring
51355 + programs implemented through ptrace will be defeated. If you
51356 + have been using the RBAC system, this option has already been
51357 + enabled for several years for all users, with the ability to make
51358 + fine-grained exceptions.
51359 +
51360 + This option only affects the ability of non-root users to ptrace
51361 + processes that are not a descendent of the ptracing process.
51362 + This means that strace ./binary and gdb ./binary will still work,
51363 + but attaching to arbitrary processes will not. If the sysctl
51364 + option is enabled, a sysctl option with name "harden_ptrace" is
51365 + created.
51366 +
51367 +config GRKERNSEC_PTRACE_READEXEC
51368 + bool "Require read access to ptrace sensitive binaries"
51369 + default y if GRKERNSEC_CONFIG_AUTO
51370 + help
51371 + If you say Y here, unprivileged users will not be able to ptrace unreadable
51372 + binaries. This option is useful in environments that
51373 + remove the read bits (e.g. file mode 4711) from suid binaries to
51374 + prevent infoleaking of their contents. This option adds
51375 + consistency to the use of that file mode, as the binary could normally
51376 + be read out when run without privileges while ptracing.
51377 +
51378 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
51379 + is created.
51380 +
51381 +config GRKERNSEC_SETXID
51382 + bool "Enforce consistent multithreaded privileges"
51383 + default y if GRKERNSEC_CONFIG_AUTO
51384 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
51385 + help
51386 + If you say Y here, a change from a root uid to a non-root uid
51387 + in a multithreaded application will cause the resulting uids,
51388 + gids, supplementary groups, and capabilities in that thread
51389 + to be propagated to the other threads of the process. In most
51390 + cases this is unnecessary, as glibc will emulate this behavior
51391 + on behalf of the application. Other libcs do not act in the
51392 + same way, allowing the other threads of the process to continue
51393 + running with root privileges. If the sysctl option is enabled,
51394 + a sysctl option with name "consistent_setxid" is created.
51395 +
51396 +config GRKERNSEC_TPE
51397 + bool "Trusted Path Execution (TPE)"
51398 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
51399 + help
51400 + If you say Y here, you will be able to choose a gid to add to the
51401 + supplementary groups of users you want to mark as "untrusted."
51402 + These users will not be able to execute any files that are not in
51403 + root-owned directories writable only by root. If the sysctl option
51404 + is enabled, a sysctl option with name "tpe" is created.
51405 +
51406 +config GRKERNSEC_TPE_ALL
51407 + bool "Partially restrict all non-root users"
51408 + depends on GRKERNSEC_TPE
51409 + help
51410 + If you say Y here, all non-root users will be covered under
51411 + a weaker TPE restriction. This is separate from, and in addition to,
51412 + the main TPE options that you have selected elsewhere. Thus, if a
51413 + "trusted" GID is chosen, this restriction applies to even that GID.
51414 + Under this restriction, all non-root users will only be allowed to
51415 + execute files in directories they own that are not group or
51416 + world-writable, or in directories owned by root and writable only by
51417 + root. If the sysctl option is enabled, a sysctl option with name
51418 + "tpe_restrict_all" is created.
51419 +
51420 +config GRKERNSEC_TPE_INVERT
51421 + bool "Invert GID option"
51422 + depends on GRKERNSEC_TPE
51423 + help
51424 + If you say Y here, the group you specify in the TPE configuration will
51425 + decide what group TPE restrictions will be *disabled* for. This
51426 + option is useful if you want TPE restrictions to be applied to most
51427 + users on the system. If the sysctl option is enabled, a sysctl option
51428 + with name "tpe_invert" is created. Unlike other sysctl options, this
51429 + entry will default to on for backward-compatibility.
51430 +
51431 +config GRKERNSEC_TPE_GID
51432 + int "GID for untrusted users"
51433 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
51434 + default 1005
51435 + help
51436 + Setting this GID determines what group TPE restrictions will be
51437 + *enabled* for. If the sysctl option is enabled, a sysctl option
51438 + with name "tpe_gid" is created.
51439 +
51440 +config GRKERNSEC_TPE_GID
51441 + int "GID for trusted users"
51442 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
51443 + default 1005
51444 + help
51445 + Setting this GID determines what group TPE restrictions will be
51446 + *disabled* for. If the sysctl option is enabled, a sysctl option
51447 + with name "tpe_gid" is created.
51448 +
51449 +endmenu
51450 +menu "Network Protections"
51451 +depends on GRKERNSEC
51452 +
51453 +config GRKERNSEC_RANDNET
51454 + bool "Larger entropy pools"
51455 + default y if GRKERNSEC_CONFIG_AUTO
51456 + help
51457 + If you say Y here, the entropy pools used for many features of Linux
51458 + and grsecurity will be doubled in size. Since several grsecurity
51459 + features use additional randomness, it is recommended that you say Y
51460 + here. Saying Y here has a similar effect as modifying
51461 + /proc/sys/kernel/random/poolsize.
51462 +
51463 +config GRKERNSEC_BLACKHOLE
51464 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
51465 + default y if GRKERNSEC_CONFIG_AUTO
51466 + depends on NET
51467 + help
51468 + If you say Y here, neither TCP resets nor ICMP
51469 + destination-unreachable packets will be sent in response to packets
51470 + sent to ports for which no associated listening process exists.
51471 + This feature supports both IPV4 and IPV6 and exempts the
51472 + loopback interface from blackholing. Enabling this feature
51473 + makes a host more resilient to DoS attacks and reduces network
51474 + visibility against scanners.
51475 +
51476 + The blackhole feature as-implemented is equivalent to the FreeBSD
51477 + blackhole feature, as it prevents RST responses to all packets, not
51478 + just SYNs. Under most application behavior this causes no
51479 + problems, but applications (like haproxy) may not close certain
51480 + connections in a way that cleanly terminates them on the remote
51481 + end, leaving the remote host in LAST_ACK state. Because of this
51482 + side-effect and to prevent intentional LAST_ACK DoSes, this
51483 + feature also adds automatic mitigation against such attacks.
51484 + The mitigation drastically reduces the amount of time a socket
51485 + can spend in LAST_ACK state. If you're using haproxy and not
51486 + all servers it connects to have this option enabled, consider
51487 + disabling this feature on the haproxy host.
51488 +
51489 + If the sysctl option is enabled, two sysctl options with names
51490 + "ip_blackhole" and "lastack_retries" will be created.
51491 + While "ip_blackhole" takes the standard zero/non-zero on/off
51492 + toggle, "lastack_retries" uses the same kinds of values as
51493 + "tcp_retries1" and "tcp_retries2". The default value of 4
51494 + prevents a socket from lasting more than 45 seconds in LAST_ACK
51495 + state.
51496 +
51497 +config GRKERNSEC_SOCKET
51498 + bool "Socket restrictions"
51499 + depends on NET
51500 + help
51501 + If you say Y here, you will be able to choose from several options.
51502 + If you assign a GID on your system and add it to the supplementary
51503 + groups of users you want to restrict socket access to, this patch
51504 + will perform up to three things, based on the option(s) you choose.
51505 +
51506 +config GRKERNSEC_SOCKET_ALL
51507 + bool "Deny any sockets to group"
51508 + depends on GRKERNSEC_SOCKET
51509 + help
51510 + If you say Y here, you will be able to choose a GID of whose users will
51511 + be unable to connect to other hosts from your machine or run server
51512 + applications from your machine. If the sysctl option is enabled, a
51513 + sysctl option with name "socket_all" is created.
51514 +
51515 +config GRKERNSEC_SOCKET_ALL_GID
51516 + int "GID to deny all sockets for"
51517 + depends on GRKERNSEC_SOCKET_ALL
51518 + default 1004
51519 + help
51520 + Here you can choose the GID to disable socket access for. Remember to
51521 + add the users you want socket access disabled for to the GID
51522 + specified here. If the sysctl option is enabled, a sysctl option
51523 + with name "socket_all_gid" is created.
51524 +
51525 +config GRKERNSEC_SOCKET_CLIENT
51526 + bool "Deny client sockets to group"
51527 + depends on GRKERNSEC_SOCKET
51528 + help
51529 + If you say Y here, you will be able to choose a GID of whose users will
51530 + be unable to connect to other hosts from your machine, but will be
51531 + able to run servers. If this option is enabled, all users in the group
51532 + you specify will have to use passive mode when initiating ftp transfers
51533 + from the shell on your machine. If the sysctl option is enabled, a
51534 + sysctl option with name "socket_client" is created.
51535 +
51536 +config GRKERNSEC_SOCKET_CLIENT_GID
51537 + int "GID to deny client sockets for"
51538 + depends on GRKERNSEC_SOCKET_CLIENT
51539 + default 1003
51540 + help
51541 + Here you can choose the GID to disable client socket access for.
51542 + Remember to add the users you want client socket access disabled for to
51543 + the GID specified here. If the sysctl option is enabled, a sysctl
51544 + option with name "socket_client_gid" is created.
51545 +
51546 +config GRKERNSEC_SOCKET_SERVER
51547 + bool "Deny server sockets to group"
51548 + depends on GRKERNSEC_SOCKET
51549 + help
51550 + If you say Y here, you will be able to choose a GID of whose users will
51551 + be unable to run server applications from your machine. If the sysctl
51552 + option is enabled, a sysctl option with name "socket_server" is created.
51553 +
51554 +config GRKERNSEC_SOCKET_SERVER_GID
51555 + int "GID to deny server sockets for"
51556 + depends on GRKERNSEC_SOCKET_SERVER
51557 + default 1002
51558 + help
51559 + Here you can choose the GID to disable server socket access for.
51560 + Remember to add the users you want server socket access disabled for to
51561 + the GID specified here. If the sysctl option is enabled, a sysctl
51562 + option with name "socket_server_gid" is created.
51563 +
51564 +endmenu
51565 +menu "Sysctl Support"
51566 +depends on GRKERNSEC && SYSCTL
51567 +
51568 +config GRKERNSEC_SYSCTL
51569 + bool "Sysctl support"
51570 + default y if GRKERNSEC_CONFIG_AUTO
51571 + help
51572 + If you say Y here, you will be able to change the options that
51573 + grsecurity runs with at bootup, without having to recompile your
51574 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51575 + to enable (1) or disable (0) various features. All the sysctl entries
51576 + are mutable until the "grsec_lock" entry is set to a non-zero value.
51577 + All features enabled in the kernel configuration are disabled at boot
51578 + if you do not say Y to the "Turn on features by default" option.
51579 + All options should be set at startup, and the grsec_lock entry should
51580 + be set to a non-zero value after all the options are set.
51581 + *THIS IS EXTREMELY IMPORTANT*
51582 +
51583 +config GRKERNSEC_SYSCTL_DISTRO
51584 + bool "Extra sysctl support for distro makers (READ HELP)"
51585 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51586 + help
51587 + If you say Y here, additional sysctl options will be created
51588 + for features that affect processes running as root. Therefore,
51589 + it is critical when using this option that the grsec_lock entry be
51590 + enabled after boot. Only distros with prebuilt kernel packages
51591 + with this option enabled that can ensure grsec_lock is enabled
51592 + after boot should use this option.
51593 + *Failure to set grsec_lock after boot makes all grsec features
51594 + this option covers useless*
51595 +
51596 + Currently this option creates the following sysctl entries:
51597 + "Disable Privileged I/O": "disable_priv_io"
51598 +
51599 +config GRKERNSEC_SYSCTL_ON
51600 + bool "Turn on features by default"
51601 + default y if GRKERNSEC_CONFIG_AUTO
51602 + depends on GRKERNSEC_SYSCTL
51603 + help
51604 + If you say Y here, instead of having all features enabled in the
51605 + kernel configuration disabled at boot time, the features will be
51606 + enabled at boot time. It is recommended you say Y here unless
51607 + there is some reason you would want all sysctl-tunable features to
51608 + be disabled by default. As mentioned elsewhere, it is important
51609 + to enable the grsec_lock entry once you have finished modifying
51610 + the sysctl entries.
51611 +
51612 +endmenu
51613 +menu "Logging Options"
51614 +depends on GRKERNSEC
51615 +
51616 +config GRKERNSEC_FLOODTIME
51617 + int "Seconds in between log messages (minimum)"
51618 + default 10
51619 + help
51620 + This option allows you to enforce the number of seconds between
51621 + grsecurity log messages. The default should be suitable for most
51622 + people, however, if you choose to change it, choose a value small enough
51623 + to allow informative logs to be produced, but large enough to
51624 + prevent flooding.
51625 +
51626 +config GRKERNSEC_FLOODBURST
51627 + int "Number of messages in a burst (maximum)"
51628 + default 6
51629 + help
51630 + This option allows you to choose the maximum number of messages allowed
51631 + within the flood time interval you chose in a separate option. The
51632 + default should be suitable for most people, however if you find that
51633 + many of your logs are being interpreted as flooding, you may want to
51634 + raise this value.
51635 +
51636 +endmenu
51637 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
51638 new file mode 100644
51639 index 0000000..1b9afa9
51640 --- /dev/null
51641 +++ b/grsecurity/Makefile
51642 @@ -0,0 +1,38 @@
51643 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51644 +# during 2001-2009 it has been completely redesigned by Brad Spengler
51645 +# into an RBAC system
51646 +#
51647 +# All code in this directory and various hooks inserted throughout the kernel
51648 +# are copyright Brad Spengler - Open Source Security, Inc., and released
51649 +# under the GPL v2 or higher
51650 +
51651 +KBUILD_CFLAGS += -Werror
51652 +
51653 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51654 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
51655 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51656 +
51657 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51658 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51659 + gracl_learn.o grsec_log.o
51660 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51661 +
51662 +ifdef CONFIG_NET
51663 +obj-y += grsec_sock.o
51664 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51665 +endif
51666 +
51667 +ifndef CONFIG_GRKERNSEC
51668 +obj-y += grsec_disabled.o
51669 +endif
51670 +
51671 +ifdef CONFIG_GRKERNSEC_HIDESYM
51672 +extra-y := grsec_hidesym.o
51673 +$(obj)/grsec_hidesym.o:
51674 + @-chmod -f 500 /boot
51675 + @-chmod -f 500 /lib/modules
51676 + @-chmod -f 500 /lib64/modules
51677 + @-chmod -f 500 /lib32/modules
51678 + @-chmod -f 700 .
51679 + @echo ' grsec: protected kernel image paths'
51680 +endif
51681 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
51682 new file mode 100644
51683 index 0000000..1561617
51684 --- /dev/null
51685 +++ b/grsecurity/gracl.c
51686 @@ -0,0 +1,4017 @@
51687 +#include <linux/kernel.h>
51688 +#include <linux/module.h>
51689 +#include <linux/sched.h>
51690 +#include <linux/mm.h>
51691 +#include <linux/file.h>
51692 +#include <linux/fs.h>
51693 +#include <linux/namei.h>
51694 +#include <linux/mount.h>
51695 +#include <linux/tty.h>
51696 +#include <linux/proc_fs.h>
51697 +#include <linux/lglock.h>
51698 +#include <linux/slab.h>
51699 +#include <linux/vmalloc.h>
51700 +#include <linux/types.h>
51701 +#include <linux/sysctl.h>
51702 +#include <linux/netdevice.h>
51703 +#include <linux/ptrace.h>
51704 +#include <linux/gracl.h>
51705 +#include <linux/gralloc.h>
51706 +#include <linux/security.h>
51707 +#include <linux/grinternal.h>
51708 +#include <linux/pid_namespace.h>
51709 +#include <linux/stop_machine.h>
51710 +#include <linux/fdtable.h>
51711 +#include <linux/percpu.h>
51712 +#include <linux/lglock.h>
51713 +#include "../fs/mount.h"
51714 +
51715 +#include <asm/uaccess.h>
51716 +#include <asm/errno.h>
51717 +#include <asm/mman.h>
51718 +
51719 +extern struct lglock vfsmount_lock;
51720 +
51721 +static struct acl_role_db acl_role_set;
51722 +static struct name_db name_set;
51723 +static struct inodev_db inodev_set;
51724 +
51725 +/* for keeping track of userspace pointers used for subjects, so we
51726 + can share references in the kernel as well
51727 +*/
51728 +
51729 +static struct path real_root;
51730 +
51731 +static struct acl_subj_map_db subj_map_set;
51732 +
51733 +static struct acl_role_label *default_role;
51734 +
51735 +static struct acl_role_label *role_list;
51736 +
51737 +static u16 acl_sp_role_value;
51738 +
51739 +extern char *gr_shared_page[4];
51740 +static DEFINE_MUTEX(gr_dev_mutex);
51741 +DEFINE_RWLOCK(gr_inode_lock);
51742 +
51743 +struct gr_arg *gr_usermode;
51744 +
51745 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
51746 +
51747 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
51748 +extern void gr_clear_learn_entries(void);
51749 +
51750 +#ifdef CONFIG_GRKERNSEC_RESLOG
51751 +extern void gr_log_resource(const struct task_struct *task,
51752 + const int res, const unsigned long wanted, const int gt);
51753 +#endif
51754 +
51755 +unsigned char *gr_system_salt;
51756 +unsigned char *gr_system_sum;
51757 +
51758 +static struct sprole_pw **acl_special_roles = NULL;
51759 +static __u16 num_sprole_pws = 0;
51760 +
51761 +static struct acl_role_label *kernel_role = NULL;
51762 +
51763 +static unsigned int gr_auth_attempts = 0;
51764 +static unsigned long gr_auth_expires = 0UL;
51765 +
51766 +#ifdef CONFIG_NET
51767 +extern struct vfsmount *sock_mnt;
51768 +#endif
51769 +
51770 +extern struct vfsmount *pipe_mnt;
51771 +extern struct vfsmount *shm_mnt;
51772 +#ifdef CONFIG_HUGETLBFS
51773 +extern struct vfsmount *hugetlbfs_vfsmount;
51774 +#endif
51775 +
51776 +static struct acl_object_label *fakefs_obj_rw;
51777 +static struct acl_object_label *fakefs_obj_rwx;
51778 +
51779 +extern int gr_init_uidset(void);
51780 +extern void gr_free_uidset(void);
51781 +extern void gr_remove_uid(uid_t uid);
51782 +extern int gr_find_uid(uid_t uid);
51783 +
51784 +__inline__ int
51785 +gr_acl_is_enabled(void)
51786 +{
51787 + return (gr_status & GR_READY);
51788 +}
51789 +
51790 +#ifdef CONFIG_BTRFS_FS
51791 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
51792 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
51793 +#endif
51794 +
51795 +static inline dev_t __get_dev(const struct dentry *dentry)
51796 +{
51797 +#ifdef CONFIG_BTRFS_FS
51798 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51799 + return get_btrfs_dev_from_inode(dentry->d_inode);
51800 + else
51801 +#endif
51802 + return dentry->d_inode->i_sb->s_dev;
51803 +}
51804 +
51805 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51806 +{
51807 + return __get_dev(dentry);
51808 +}
51809 +
51810 +static char gr_task_roletype_to_char(struct task_struct *task)
51811 +{
51812 + switch (task->role->roletype &
51813 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
51814 + GR_ROLE_SPECIAL)) {
51815 + case GR_ROLE_DEFAULT:
51816 + return 'D';
51817 + case GR_ROLE_USER:
51818 + return 'U';
51819 + case GR_ROLE_GROUP:
51820 + return 'G';
51821 + case GR_ROLE_SPECIAL:
51822 + return 'S';
51823 + }
51824 +
51825 + return 'X';
51826 +}
51827 +
51828 +char gr_roletype_to_char(void)
51829 +{
51830 + return gr_task_roletype_to_char(current);
51831 +}
51832 +
51833 +__inline__ int
51834 +gr_acl_tpe_check(void)
51835 +{
51836 + if (unlikely(!(gr_status & GR_READY)))
51837 + return 0;
51838 + if (current->role->roletype & GR_ROLE_TPE)
51839 + return 1;
51840 + else
51841 + return 0;
51842 +}
51843 +
51844 +int
51845 +gr_handle_rawio(const struct inode *inode)
51846 +{
51847 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51848 + if (inode && S_ISBLK(inode->i_mode) &&
51849 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51850 + !capable(CAP_SYS_RAWIO))
51851 + return 1;
51852 +#endif
51853 + return 0;
51854 +}
51855 +
51856 +static int
51857 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
51858 +{
51859 + if (likely(lena != lenb))
51860 + return 0;
51861 +
51862 + return !memcmp(a, b, lena);
51863 +}
51864 +
51865 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
51866 +{
51867 + *buflen -= namelen;
51868 + if (*buflen < 0)
51869 + return -ENAMETOOLONG;
51870 + *buffer -= namelen;
51871 + memcpy(*buffer, str, namelen);
51872 + return 0;
51873 +}
51874 +
51875 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
51876 +{
51877 + return prepend(buffer, buflen, name->name, name->len);
51878 +}
51879 +
51880 +static int prepend_path(const struct path *path, struct path *root,
51881 + char **buffer, int *buflen)
51882 +{
51883 + struct dentry *dentry = path->dentry;
51884 + struct vfsmount *vfsmnt = path->mnt;
51885 + struct mount *mnt = real_mount(vfsmnt);
51886 + bool slash = false;
51887 + int error = 0;
51888 +
51889 + while (dentry != root->dentry || vfsmnt != root->mnt) {
51890 + struct dentry * parent;
51891 +
51892 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
51893 + /* Global root? */
51894 + if (!mnt_has_parent(mnt)) {
51895 + goto out;
51896 + }
51897 + dentry = mnt->mnt_mountpoint;
51898 + mnt = mnt->mnt_parent;
51899 + vfsmnt = &mnt->mnt;
51900 + continue;
51901 + }
51902 + parent = dentry->d_parent;
51903 + prefetch(parent);
51904 + spin_lock(&dentry->d_lock);
51905 + error = prepend_name(buffer, buflen, &dentry->d_name);
51906 + spin_unlock(&dentry->d_lock);
51907 + if (!error)
51908 + error = prepend(buffer, buflen, "/", 1);
51909 + if (error)
51910 + break;
51911 +
51912 + slash = true;
51913 + dentry = parent;
51914 + }
51915 +
51916 +out:
51917 + if (!error && !slash)
51918 + error = prepend(buffer, buflen, "/", 1);
51919 +
51920 + return error;
51921 +}
51922 +
51923 +/* this must be called with vfsmount_lock and rename_lock held */
51924 +
51925 +static char *__our_d_path(const struct path *path, struct path *root,
51926 + char *buf, int buflen)
51927 +{
51928 + char *res = buf + buflen;
51929 + int error;
51930 +
51931 + prepend(&res, &buflen, "\0", 1);
51932 + error = prepend_path(path, root, &res, &buflen);
51933 + if (error)
51934 + return ERR_PTR(error);
51935 +
51936 + return res;
51937 +}
51938 +
51939 +static char *
51940 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
51941 +{
51942 + char *retval;
51943 +
51944 + retval = __our_d_path(path, root, buf, buflen);
51945 + if (unlikely(IS_ERR(retval)))
51946 + retval = strcpy(buf, "<path too long>");
51947 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
51948 + retval[1] = '\0';
51949 +
51950 + return retval;
51951 +}
51952 +
51953 +static char *
51954 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51955 + char *buf, int buflen)
51956 +{
51957 + struct path path;
51958 + char *res;
51959 +
51960 + path.dentry = (struct dentry *)dentry;
51961 + path.mnt = (struct vfsmount *)vfsmnt;
51962 +
51963 + /* we can use real_root.dentry, real_root.mnt, because this is only called
51964 + by the RBAC system */
51965 + res = gen_full_path(&path, &real_root, buf, buflen);
51966 +
51967 + return res;
51968 +}
51969 +
51970 +static char *
51971 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51972 + char *buf, int buflen)
51973 +{
51974 + char *res;
51975 + struct path path;
51976 + struct path root;
51977 + struct task_struct *reaper = init_pid_ns.child_reaper;
51978 +
51979 + path.dentry = (struct dentry *)dentry;
51980 + path.mnt = (struct vfsmount *)vfsmnt;
51981 +
51982 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51983 + get_fs_root(reaper->fs, &root);
51984 +
51985 + write_seqlock(&rename_lock);
51986 + br_read_lock(&vfsmount_lock);
51987 + res = gen_full_path(&path, &root, buf, buflen);
51988 + br_read_unlock(&vfsmount_lock);
51989 + write_sequnlock(&rename_lock);
51990 +
51991 + path_put(&root);
51992 + return res;
51993 +}
51994 +
51995 +static char *
51996 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51997 +{
51998 + char *ret;
51999 + write_seqlock(&rename_lock);
52000 + br_read_lock(&vfsmount_lock);
52001 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
52002 + PAGE_SIZE);
52003 + br_read_unlock(&vfsmount_lock);
52004 + write_sequnlock(&rename_lock);
52005 + return ret;
52006 +}
52007 +
52008 +static char *
52009 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
52010 +{
52011 + char *ret;
52012 + char *buf;
52013 + int buflen;
52014 +
52015 + write_seqlock(&rename_lock);
52016 + br_read_lock(&vfsmount_lock);
52017 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
52018 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
52019 + buflen = (int)(ret - buf);
52020 + if (buflen >= 5)
52021 + prepend(&ret, &buflen, "/proc", 5);
52022 + else
52023 + ret = strcpy(buf, "<path too long>");
52024 + br_read_unlock(&vfsmount_lock);
52025 + write_sequnlock(&rename_lock);
52026 + return ret;
52027 +}
52028 +
52029 +char *
52030 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
52031 +{
52032 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
52033 + PAGE_SIZE);
52034 +}
52035 +
52036 +char *
52037 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
52038 +{
52039 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52040 + PAGE_SIZE);
52041 +}
52042 +
52043 +char *
52044 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
52045 +{
52046 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
52047 + PAGE_SIZE);
52048 +}
52049 +
52050 +char *
52051 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
52052 +{
52053 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
52054 + PAGE_SIZE);
52055 +}
52056 +
52057 +char *
52058 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
52059 +{
52060 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
52061 + PAGE_SIZE);
52062 +}
52063 +
52064 +__inline__ __u32
52065 +to_gr_audit(const __u32 reqmode)
52066 +{
52067 + /* masks off auditable permission flags, then shifts them to create
52068 + auditing flags, and adds the special case of append auditing if
52069 + we're requesting write */
52070 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
52071 +}
52072 +
52073 +struct acl_subject_label *
52074 +lookup_subject_map(const struct acl_subject_label *userp)
52075 +{
52076 + unsigned int index = shash(userp, subj_map_set.s_size);
52077 + struct subject_map *match;
52078 +
52079 + match = subj_map_set.s_hash[index];
52080 +
52081 + while (match && match->user != userp)
52082 + match = match->next;
52083 +
52084 + if (match != NULL)
52085 + return match->kernel;
52086 + else
52087 + return NULL;
52088 +}
52089 +
52090 +static void
52091 +insert_subj_map_entry(struct subject_map *subjmap)
52092 +{
52093 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
52094 + struct subject_map **curr;
52095 +
52096 + subjmap->prev = NULL;
52097 +
52098 + curr = &subj_map_set.s_hash[index];
52099 + if (*curr != NULL)
52100 + (*curr)->prev = subjmap;
52101 +
52102 + subjmap->next = *curr;
52103 + *curr = subjmap;
52104 +
52105 + return;
52106 +}
52107 +
52108 +static struct acl_role_label *
52109 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
52110 + const gid_t gid)
52111 +{
52112 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
52113 + struct acl_role_label *match;
52114 + struct role_allowed_ip *ipp;
52115 + unsigned int x;
52116 + u32 curr_ip = task->signal->curr_ip;
52117 +
52118 + task->signal->saved_ip = curr_ip;
52119 +
52120 + match = acl_role_set.r_hash[index];
52121 +
52122 + while (match) {
52123 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
52124 + for (x = 0; x < match->domain_child_num; x++) {
52125 + if (match->domain_children[x] == uid)
52126 + goto found;
52127 + }
52128 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
52129 + break;
52130 + match = match->next;
52131 + }
52132 +found:
52133 + if (match == NULL) {
52134 + try_group:
52135 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
52136 + match = acl_role_set.r_hash[index];
52137 +
52138 + while (match) {
52139 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
52140 + for (x = 0; x < match->domain_child_num; x++) {
52141 + if (match->domain_children[x] == gid)
52142 + goto found2;
52143 + }
52144 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
52145 + break;
52146 + match = match->next;
52147 + }
52148 +found2:
52149 + if (match == NULL)
52150 + match = default_role;
52151 + if (match->allowed_ips == NULL)
52152 + return match;
52153 + else {
52154 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
52155 + if (likely
52156 + ((ntohl(curr_ip) & ipp->netmask) ==
52157 + (ntohl(ipp->addr) & ipp->netmask)))
52158 + return match;
52159 + }
52160 + match = default_role;
52161 + }
52162 + } else if (match->allowed_ips == NULL) {
52163 + return match;
52164 + } else {
52165 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
52166 + if (likely
52167 + ((ntohl(curr_ip) & ipp->netmask) ==
52168 + (ntohl(ipp->addr) & ipp->netmask)))
52169 + return match;
52170 + }
52171 + goto try_group;
52172 + }
52173 +
52174 + return match;
52175 +}
52176 +
52177 +struct acl_subject_label *
52178 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
52179 + const struct acl_role_label *role)
52180 +{
52181 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
52182 + struct acl_subject_label *match;
52183 +
52184 + match = role->subj_hash[index];
52185 +
52186 + while (match && (match->inode != ino || match->device != dev ||
52187 + (match->mode & GR_DELETED))) {
52188 + match = match->next;
52189 + }
52190 +
52191 + if (match && !(match->mode & GR_DELETED))
52192 + return match;
52193 + else
52194 + return NULL;
52195 +}
52196 +
52197 +struct acl_subject_label *
52198 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
52199 + const struct acl_role_label *role)
52200 +{
52201 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
52202 + struct acl_subject_label *match;
52203 +
52204 + match = role->subj_hash[index];
52205 +
52206 + while (match && (match->inode != ino || match->device != dev ||
52207 + !(match->mode & GR_DELETED))) {
52208 + match = match->next;
52209 + }
52210 +
52211 + if (match && (match->mode & GR_DELETED))
52212 + return match;
52213 + else
52214 + return NULL;
52215 +}
52216 +
52217 +static struct acl_object_label *
52218 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
52219 + const struct acl_subject_label *subj)
52220 +{
52221 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
52222 + struct acl_object_label *match;
52223 +
52224 + match = subj->obj_hash[index];
52225 +
52226 + while (match && (match->inode != ino || match->device != dev ||
52227 + (match->mode & GR_DELETED))) {
52228 + match = match->next;
52229 + }
52230 +
52231 + if (match && !(match->mode & GR_DELETED))
52232 + return match;
52233 + else
52234 + return NULL;
52235 +}
52236 +
52237 +static struct acl_object_label *
52238 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
52239 + const struct acl_subject_label *subj)
52240 +{
52241 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
52242 + struct acl_object_label *match;
52243 +
52244 + match = subj->obj_hash[index];
52245 +
52246 + while (match && (match->inode != ino || match->device != dev ||
52247 + !(match->mode & GR_DELETED))) {
52248 + match = match->next;
52249 + }
52250 +
52251 + if (match && (match->mode & GR_DELETED))
52252 + return match;
52253 +
52254 + match = subj->obj_hash[index];
52255 +
52256 + while (match && (match->inode != ino || match->device != dev ||
52257 + (match->mode & GR_DELETED))) {
52258 + match = match->next;
52259 + }
52260 +
52261 + if (match && !(match->mode & GR_DELETED))
52262 + return match;
52263 + else
52264 + return NULL;
52265 +}
52266 +
52267 +static struct name_entry *
52268 +lookup_name_entry(const char *name)
52269 +{
52270 + unsigned int len = strlen(name);
52271 + unsigned int key = full_name_hash(name, len);
52272 + unsigned int index = key % name_set.n_size;
52273 + struct name_entry *match;
52274 +
52275 + match = name_set.n_hash[index];
52276 +
52277 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
52278 + match = match->next;
52279 +
52280 + return match;
52281 +}
52282 +
52283 +static struct name_entry *
52284 +lookup_name_entry_create(const char *name)
52285 +{
52286 + unsigned int len = strlen(name);
52287 + unsigned int key = full_name_hash(name, len);
52288 + unsigned int index = key % name_set.n_size;
52289 + struct name_entry *match;
52290 +
52291 + match = name_set.n_hash[index];
52292 +
52293 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52294 + !match->deleted))
52295 + match = match->next;
52296 +
52297 + if (match && match->deleted)
52298 + return match;
52299 +
52300 + match = name_set.n_hash[index];
52301 +
52302 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52303 + match->deleted))
52304 + match = match->next;
52305 +
52306 + if (match && !match->deleted)
52307 + return match;
52308 + else
52309 + return NULL;
52310 +}
52311 +
52312 +static struct inodev_entry *
52313 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
52314 +{
52315 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
52316 + struct inodev_entry *match;
52317 +
52318 + match = inodev_set.i_hash[index];
52319 +
52320 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
52321 + match = match->next;
52322 +
52323 + return match;
52324 +}
52325 +
52326 +static void
52327 +insert_inodev_entry(struct inodev_entry *entry)
52328 +{
52329 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
52330 + inodev_set.i_size);
52331 + struct inodev_entry **curr;
52332 +
52333 + entry->prev = NULL;
52334 +
52335 + curr = &inodev_set.i_hash[index];
52336 + if (*curr != NULL)
52337 + (*curr)->prev = entry;
52338 +
52339 + entry->next = *curr;
52340 + *curr = entry;
52341 +
52342 + return;
52343 +}
52344 +
52345 +static void
52346 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
52347 +{
52348 + unsigned int index =
52349 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
52350 + struct acl_role_label **curr;
52351 + struct acl_role_label *tmp, *tmp2;
52352 +
52353 + curr = &acl_role_set.r_hash[index];
52354 +
52355 + /* simple case, slot is empty, just set it to our role */
52356 + if (*curr == NULL) {
52357 + *curr = role;
52358 + } else {
52359 + /* example:
52360 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
52361 + 2 -> 3
52362 + */
52363 + /* first check to see if we can already be reached via this slot */
52364 + tmp = *curr;
52365 + while (tmp && tmp != role)
52366 + tmp = tmp->next;
52367 + if (tmp == role) {
52368 + /* we don't need to add ourselves to this slot's chain */
52369 + return;
52370 + }
52371 + /* we need to add ourselves to this chain, two cases */
52372 + if (role->next == NULL) {
52373 + /* simple case, append the current chain to our role */
52374 + role->next = *curr;
52375 + *curr = role;
52376 + } else {
52377 + /* 1 -> 2 -> 3 -> 4
52378 + 2 -> 3 -> 4
52379 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
52380 + */
52381 + /* trickier case: walk our role's chain until we find
52382 + the role for the start of the current slot's chain */
52383 + tmp = role;
52384 + tmp2 = *curr;
52385 + while (tmp->next && tmp->next != tmp2)
52386 + tmp = tmp->next;
52387 + if (tmp->next == tmp2) {
52388 + /* from example above, we found 3, so just
52389 + replace this slot's chain with ours */
52390 + *curr = role;
52391 + } else {
52392 + /* we didn't find a subset of our role's chain
52393 + in the current slot's chain, so append their
52394 + chain to ours, and set us as the first role in
52395 + the slot's chain
52396 +
52397 + we could fold this case with the case above,
52398 + but making it explicit for clarity
52399 + */
52400 + tmp->next = tmp2;
52401 + *curr = role;
52402 + }
52403 + }
52404 + }
52405 +
52406 + return;
52407 +}
52408 +
52409 +static void
52410 +insert_acl_role_label(struct acl_role_label *role)
52411 +{
52412 + int i;
52413 +
52414 + if (role_list == NULL) {
52415 + role_list = role;
52416 + role->prev = NULL;
52417 + } else {
52418 + role->prev = role_list;
52419 + role_list = role;
52420 + }
52421 +
52422 + /* used for hash chains */
52423 + role->next = NULL;
52424 +
52425 + if (role->roletype & GR_ROLE_DOMAIN) {
52426 + for (i = 0; i < role->domain_child_num; i++)
52427 + __insert_acl_role_label(role, role->domain_children[i]);
52428 + } else
52429 + __insert_acl_role_label(role, role->uidgid);
52430 +}
52431 +
52432 +static int
52433 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
52434 +{
52435 + struct name_entry **curr, *nentry;
52436 + struct inodev_entry *ientry;
52437 + unsigned int len = strlen(name);
52438 + unsigned int key = full_name_hash(name, len);
52439 + unsigned int index = key % name_set.n_size;
52440 +
52441 + curr = &name_set.n_hash[index];
52442 +
52443 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
52444 + curr = &((*curr)->next);
52445 +
52446 + if (*curr != NULL)
52447 + return 1;
52448 +
52449 + nentry = acl_alloc(sizeof (struct name_entry));
52450 + if (nentry == NULL)
52451 + return 0;
52452 + ientry = acl_alloc(sizeof (struct inodev_entry));
52453 + if (ientry == NULL)
52454 + return 0;
52455 + ientry->nentry = nentry;
52456 +
52457 + nentry->key = key;
52458 + nentry->name = name;
52459 + nentry->inode = inode;
52460 + nentry->device = device;
52461 + nentry->len = len;
52462 + nentry->deleted = deleted;
52463 +
52464 + nentry->prev = NULL;
52465 + curr = &name_set.n_hash[index];
52466 + if (*curr != NULL)
52467 + (*curr)->prev = nentry;
52468 + nentry->next = *curr;
52469 + *curr = nentry;
52470 +
52471 + /* insert us into the table searchable by inode/dev */
52472 + insert_inodev_entry(ientry);
52473 +
52474 + return 1;
52475 +}
52476 +
52477 +static void
52478 +insert_acl_obj_label(struct acl_object_label *obj,
52479 + struct acl_subject_label *subj)
52480 +{
52481 + unsigned int index =
52482 + fhash(obj->inode, obj->device, subj->obj_hash_size);
52483 + struct acl_object_label **curr;
52484 +
52485 +
52486 + obj->prev = NULL;
52487 +
52488 + curr = &subj->obj_hash[index];
52489 + if (*curr != NULL)
52490 + (*curr)->prev = obj;
52491 +
52492 + obj->next = *curr;
52493 + *curr = obj;
52494 +
52495 + return;
52496 +}
52497 +
52498 +static void
52499 +insert_acl_subj_label(struct acl_subject_label *obj,
52500 + struct acl_role_label *role)
52501 +{
52502 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
52503 + struct acl_subject_label **curr;
52504 +
52505 + obj->prev = NULL;
52506 +
52507 + curr = &role->subj_hash[index];
52508 + if (*curr != NULL)
52509 + (*curr)->prev = obj;
52510 +
52511 + obj->next = *curr;
52512 + *curr = obj;
52513 +
52514 + return;
52515 +}
52516 +
52517 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
52518 +
52519 +static void *
52520 +create_table(__u32 * len, int elementsize)
52521 +{
52522 + unsigned int table_sizes[] = {
52523 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
52524 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
52525 + 4194301, 8388593, 16777213, 33554393, 67108859
52526 + };
52527 + void *newtable = NULL;
52528 + unsigned int pwr = 0;
52529 +
52530 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
52531 + table_sizes[pwr] <= *len)
52532 + pwr++;
52533 +
52534 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
52535 + return newtable;
52536 +
52537 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
52538 + newtable =
52539 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
52540 + else
52541 + newtable = vmalloc(table_sizes[pwr] * elementsize);
52542 +
52543 + *len = table_sizes[pwr];
52544 +
52545 + return newtable;
52546 +}
52547 +
52548 +static int
52549 +init_variables(const struct gr_arg *arg)
52550 +{
52551 + struct task_struct *reaper = init_pid_ns.child_reaper;
52552 + unsigned int stacksize;
52553 +
52554 + subj_map_set.s_size = arg->role_db.num_subjects;
52555 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
52556 + name_set.n_size = arg->role_db.num_objects;
52557 + inodev_set.i_size = arg->role_db.num_objects;
52558 +
52559 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
52560 + !name_set.n_size || !inodev_set.i_size)
52561 + return 1;
52562 +
52563 + if (!gr_init_uidset())
52564 + return 1;
52565 +
52566 + /* set up the stack that holds allocation info */
52567 +
52568 + stacksize = arg->role_db.num_pointers + 5;
52569 +
52570 + if (!acl_alloc_stack_init(stacksize))
52571 + return 1;
52572 +
52573 + /* grab reference for the real root dentry and vfsmount */
52574 + get_fs_root(reaper->fs, &real_root);
52575 +
52576 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52577 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
52578 +#endif
52579 +
52580 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
52581 + if (fakefs_obj_rw == NULL)
52582 + return 1;
52583 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
52584 +
52585 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
52586 + if (fakefs_obj_rwx == NULL)
52587 + return 1;
52588 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
52589 +
52590 + subj_map_set.s_hash =
52591 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
52592 + acl_role_set.r_hash =
52593 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
52594 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
52595 + inodev_set.i_hash =
52596 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
52597 +
52598 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
52599 + !name_set.n_hash || !inodev_set.i_hash)
52600 + return 1;
52601 +
52602 + memset(subj_map_set.s_hash, 0,
52603 + sizeof(struct subject_map *) * subj_map_set.s_size);
52604 + memset(acl_role_set.r_hash, 0,
52605 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
52606 + memset(name_set.n_hash, 0,
52607 + sizeof (struct name_entry *) * name_set.n_size);
52608 + memset(inodev_set.i_hash, 0,
52609 + sizeof (struct inodev_entry *) * inodev_set.i_size);
52610 +
52611 + return 0;
52612 +}
52613 +
52614 +/* free information not needed after startup
52615 + currently contains user->kernel pointer mappings for subjects
52616 +*/
52617 +
52618 +static void
52619 +free_init_variables(void)
52620 +{
52621 + __u32 i;
52622 +
52623 + if (subj_map_set.s_hash) {
52624 + for (i = 0; i < subj_map_set.s_size; i++) {
52625 + if (subj_map_set.s_hash[i]) {
52626 + kfree(subj_map_set.s_hash[i]);
52627 + subj_map_set.s_hash[i] = NULL;
52628 + }
52629 + }
52630 +
52631 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
52632 + PAGE_SIZE)
52633 + kfree(subj_map_set.s_hash);
52634 + else
52635 + vfree(subj_map_set.s_hash);
52636 + }
52637 +
52638 + return;
52639 +}
52640 +
52641 +static void
52642 +free_variables(void)
52643 +{
52644 + struct acl_subject_label *s;
52645 + struct acl_role_label *r;
52646 + struct task_struct *task, *task2;
52647 + unsigned int x;
52648 +
52649 + gr_clear_learn_entries();
52650 +
52651 + read_lock(&tasklist_lock);
52652 + do_each_thread(task2, task) {
52653 + task->acl_sp_role = 0;
52654 + task->acl_role_id = 0;
52655 + task->acl = NULL;
52656 + task->role = NULL;
52657 + } while_each_thread(task2, task);
52658 + read_unlock(&tasklist_lock);
52659 +
52660 + /* release the reference to the real root dentry and vfsmount */
52661 + path_put(&real_root);
52662 + memset(&real_root, 0, sizeof(real_root));
52663 +
52664 + /* free all object hash tables */
52665 +
52666 + FOR_EACH_ROLE_START(r)
52667 + if (r->subj_hash == NULL)
52668 + goto next_role;
52669 + FOR_EACH_SUBJECT_START(r, s, x)
52670 + if (s->obj_hash == NULL)
52671 + break;
52672 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52673 + kfree(s->obj_hash);
52674 + else
52675 + vfree(s->obj_hash);
52676 + FOR_EACH_SUBJECT_END(s, x)
52677 + FOR_EACH_NESTED_SUBJECT_START(r, s)
52678 + if (s->obj_hash == NULL)
52679 + break;
52680 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52681 + kfree(s->obj_hash);
52682 + else
52683 + vfree(s->obj_hash);
52684 + FOR_EACH_NESTED_SUBJECT_END(s)
52685 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
52686 + kfree(r->subj_hash);
52687 + else
52688 + vfree(r->subj_hash);
52689 + r->subj_hash = NULL;
52690 +next_role:
52691 + FOR_EACH_ROLE_END(r)
52692 +
52693 + acl_free_all();
52694 +
52695 + if (acl_role_set.r_hash) {
52696 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
52697 + PAGE_SIZE)
52698 + kfree(acl_role_set.r_hash);
52699 + else
52700 + vfree(acl_role_set.r_hash);
52701 + }
52702 + if (name_set.n_hash) {
52703 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
52704 + PAGE_SIZE)
52705 + kfree(name_set.n_hash);
52706 + else
52707 + vfree(name_set.n_hash);
52708 + }
52709 +
52710 + if (inodev_set.i_hash) {
52711 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
52712 + PAGE_SIZE)
52713 + kfree(inodev_set.i_hash);
52714 + else
52715 + vfree(inodev_set.i_hash);
52716 + }
52717 +
52718 + gr_free_uidset();
52719 +
52720 + memset(&name_set, 0, sizeof (struct name_db));
52721 + memset(&inodev_set, 0, sizeof (struct inodev_db));
52722 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
52723 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
52724 +
52725 + default_role = NULL;
52726 + kernel_role = NULL;
52727 + role_list = NULL;
52728 +
52729 + return;
52730 +}
52731 +
52732 +static __u32
52733 +count_user_objs(struct acl_object_label *userp)
52734 +{
52735 + struct acl_object_label o_tmp;
52736 + __u32 num = 0;
52737 +
52738 + while (userp) {
52739 + if (copy_from_user(&o_tmp, userp,
52740 + sizeof (struct acl_object_label)))
52741 + break;
52742 +
52743 + userp = o_tmp.prev;
52744 + num++;
52745 + }
52746 +
52747 + return num;
52748 +}
52749 +
52750 +static struct acl_subject_label *
52751 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
52752 +
52753 +static int
52754 +copy_user_glob(struct acl_object_label *obj)
52755 +{
52756 + struct acl_object_label *g_tmp, **guser;
52757 + unsigned int len;
52758 + char *tmp;
52759 +
52760 + if (obj->globbed == NULL)
52761 + return 0;
52762 +
52763 + guser = &obj->globbed;
52764 + while (*guser) {
52765 + g_tmp = (struct acl_object_label *)
52766 + acl_alloc(sizeof (struct acl_object_label));
52767 + if (g_tmp == NULL)
52768 + return -ENOMEM;
52769 +
52770 + if (copy_from_user(g_tmp, *guser,
52771 + sizeof (struct acl_object_label)))
52772 + return -EFAULT;
52773 +
52774 + len = strnlen_user(g_tmp->filename, PATH_MAX);
52775 +
52776 + if (!len || len >= PATH_MAX)
52777 + return -EINVAL;
52778 +
52779 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52780 + return -ENOMEM;
52781 +
52782 + if (copy_from_user(tmp, g_tmp->filename, len))
52783 + return -EFAULT;
52784 + tmp[len-1] = '\0';
52785 + g_tmp->filename = tmp;
52786 +
52787 + *guser = g_tmp;
52788 + guser = &(g_tmp->next);
52789 + }
52790 +
52791 + return 0;
52792 +}
52793 +
52794 +static int
52795 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
52796 + struct acl_role_label *role)
52797 +{
52798 + struct acl_object_label *o_tmp;
52799 + unsigned int len;
52800 + int ret;
52801 + char *tmp;
52802 +
52803 + while (userp) {
52804 + if ((o_tmp = (struct acl_object_label *)
52805 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
52806 + return -ENOMEM;
52807 +
52808 + if (copy_from_user(o_tmp, userp,
52809 + sizeof (struct acl_object_label)))
52810 + return -EFAULT;
52811 +
52812 + userp = o_tmp->prev;
52813 +
52814 + len = strnlen_user(o_tmp->filename, PATH_MAX);
52815 +
52816 + if (!len || len >= PATH_MAX)
52817 + return -EINVAL;
52818 +
52819 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52820 + return -ENOMEM;
52821 +
52822 + if (copy_from_user(tmp, o_tmp->filename, len))
52823 + return -EFAULT;
52824 + tmp[len-1] = '\0';
52825 + o_tmp->filename = tmp;
52826 +
52827 + insert_acl_obj_label(o_tmp, subj);
52828 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
52829 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
52830 + return -ENOMEM;
52831 +
52832 + ret = copy_user_glob(o_tmp);
52833 + if (ret)
52834 + return ret;
52835 +
52836 + if (o_tmp->nested) {
52837 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
52838 + if (IS_ERR(o_tmp->nested))
52839 + return PTR_ERR(o_tmp->nested);
52840 +
52841 + /* insert into nested subject list */
52842 + o_tmp->nested->next = role->hash->first;
52843 + role->hash->first = o_tmp->nested;
52844 + }
52845 + }
52846 +
52847 + return 0;
52848 +}
52849 +
52850 +static __u32
52851 +count_user_subjs(struct acl_subject_label *userp)
52852 +{
52853 + struct acl_subject_label s_tmp;
52854 + __u32 num = 0;
52855 +
52856 + while (userp) {
52857 + if (copy_from_user(&s_tmp, userp,
52858 + sizeof (struct acl_subject_label)))
52859 + break;
52860 +
52861 + userp = s_tmp.prev;
52862 + /* do not count nested subjects against this count, since
52863 + they are not included in the hash table, but are
52864 + attached to objects. We have already counted
52865 + the subjects in userspace for the allocation
52866 + stack
52867 + */
52868 + if (!(s_tmp.mode & GR_NESTED))
52869 + num++;
52870 + }
52871 +
52872 + return num;
52873 +}
52874 +
52875 +static int
52876 +copy_user_allowedips(struct acl_role_label *rolep)
52877 +{
52878 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
52879 +
52880 + ruserip = rolep->allowed_ips;
52881 +
52882 + while (ruserip) {
52883 + rlast = rtmp;
52884 +
52885 + if ((rtmp = (struct role_allowed_ip *)
52886 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
52887 + return -ENOMEM;
52888 +
52889 + if (copy_from_user(rtmp, ruserip,
52890 + sizeof (struct role_allowed_ip)))
52891 + return -EFAULT;
52892 +
52893 + ruserip = rtmp->prev;
52894 +
52895 + if (!rlast) {
52896 + rtmp->prev = NULL;
52897 + rolep->allowed_ips = rtmp;
52898 + } else {
52899 + rlast->next = rtmp;
52900 + rtmp->prev = rlast;
52901 + }
52902 +
52903 + if (!ruserip)
52904 + rtmp->next = NULL;
52905 + }
52906 +
52907 + return 0;
52908 +}
52909 +
52910 +static int
52911 +copy_user_transitions(struct acl_role_label *rolep)
52912 +{
52913 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
52914 +
52915 + unsigned int len;
52916 + char *tmp;
52917 +
52918 + rusertp = rolep->transitions;
52919 +
52920 + while (rusertp) {
52921 + rlast = rtmp;
52922 +
52923 + if ((rtmp = (struct role_transition *)
52924 + acl_alloc(sizeof (struct role_transition))) == NULL)
52925 + return -ENOMEM;
52926 +
52927 + if (copy_from_user(rtmp, rusertp,
52928 + sizeof (struct role_transition)))
52929 + return -EFAULT;
52930 +
52931 + rusertp = rtmp->prev;
52932 +
52933 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
52934 +
52935 + if (!len || len >= GR_SPROLE_LEN)
52936 + return -EINVAL;
52937 +
52938 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52939 + return -ENOMEM;
52940 +
52941 + if (copy_from_user(tmp, rtmp->rolename, len))
52942 + return -EFAULT;
52943 + tmp[len-1] = '\0';
52944 + rtmp->rolename = tmp;
52945 +
52946 + if (!rlast) {
52947 + rtmp->prev = NULL;
52948 + rolep->transitions = rtmp;
52949 + } else {
52950 + rlast->next = rtmp;
52951 + rtmp->prev = rlast;
52952 + }
52953 +
52954 + if (!rusertp)
52955 + rtmp->next = NULL;
52956 + }
52957 +
52958 + return 0;
52959 +}
52960 +
52961 +static struct acl_subject_label *
52962 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
52963 +{
52964 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
52965 + unsigned int len;
52966 + char *tmp;
52967 + __u32 num_objs;
52968 + struct acl_ip_label **i_tmp, *i_utmp2;
52969 + struct gr_hash_struct ghash;
52970 + struct subject_map *subjmap;
52971 + unsigned int i_num;
52972 + int err;
52973 +
52974 + s_tmp = lookup_subject_map(userp);
52975 +
52976 + /* we've already copied this subject into the kernel, just return
52977 + the reference to it, and don't copy it over again
52978 + */
52979 + if (s_tmp)
52980 + return(s_tmp);
52981 +
52982 + if ((s_tmp = (struct acl_subject_label *)
52983 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
52984 + return ERR_PTR(-ENOMEM);
52985 +
52986 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
52987 + if (subjmap == NULL)
52988 + return ERR_PTR(-ENOMEM);
52989 +
52990 + subjmap->user = userp;
52991 + subjmap->kernel = s_tmp;
52992 + insert_subj_map_entry(subjmap);
52993 +
52994 + if (copy_from_user(s_tmp, userp,
52995 + sizeof (struct acl_subject_label)))
52996 + return ERR_PTR(-EFAULT);
52997 +
52998 + len = strnlen_user(s_tmp->filename, PATH_MAX);
52999 +
53000 + if (!len || len >= PATH_MAX)
53001 + return ERR_PTR(-EINVAL);
53002 +
53003 + if ((tmp = (char *) acl_alloc(len)) == NULL)
53004 + return ERR_PTR(-ENOMEM);
53005 +
53006 + if (copy_from_user(tmp, s_tmp->filename, len))
53007 + return ERR_PTR(-EFAULT);
53008 + tmp[len-1] = '\0';
53009 + s_tmp->filename = tmp;
53010 +
53011 + if (!strcmp(s_tmp->filename, "/"))
53012 + role->root_label = s_tmp;
53013 +
53014 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
53015 + return ERR_PTR(-EFAULT);
53016 +
53017 + /* copy user and group transition tables */
53018 +
53019 + if (s_tmp->user_trans_num) {
53020 + uid_t *uidlist;
53021 +
53022 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
53023 + if (uidlist == NULL)
53024 + return ERR_PTR(-ENOMEM);
53025 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
53026 + return ERR_PTR(-EFAULT);
53027 +
53028 + s_tmp->user_transitions = uidlist;
53029 + }
53030 +
53031 + if (s_tmp->group_trans_num) {
53032 + gid_t *gidlist;
53033 +
53034 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
53035 + if (gidlist == NULL)
53036 + return ERR_PTR(-ENOMEM);
53037 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
53038 + return ERR_PTR(-EFAULT);
53039 +
53040 + s_tmp->group_transitions = gidlist;
53041 + }
53042 +
53043 + /* set up object hash table */
53044 + num_objs = count_user_objs(ghash.first);
53045 +
53046 + s_tmp->obj_hash_size = num_objs;
53047 + s_tmp->obj_hash =
53048 + (struct acl_object_label **)
53049 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
53050 +
53051 + if (!s_tmp->obj_hash)
53052 + return ERR_PTR(-ENOMEM);
53053 +
53054 + memset(s_tmp->obj_hash, 0,
53055 + s_tmp->obj_hash_size *
53056 + sizeof (struct acl_object_label *));
53057 +
53058 + /* add in objects */
53059 + err = copy_user_objs(ghash.first, s_tmp, role);
53060 +
53061 + if (err)
53062 + return ERR_PTR(err);
53063 +
53064 + /* set pointer for parent subject */
53065 + if (s_tmp->parent_subject) {
53066 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
53067 +
53068 + if (IS_ERR(s_tmp2))
53069 + return s_tmp2;
53070 +
53071 + s_tmp->parent_subject = s_tmp2;
53072 + }
53073 +
53074 + /* add in ip acls */
53075 +
53076 + if (!s_tmp->ip_num) {
53077 + s_tmp->ips = NULL;
53078 + goto insert;
53079 + }
53080 +
53081 + i_tmp =
53082 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
53083 + sizeof (struct acl_ip_label *));
53084 +
53085 + if (!i_tmp)
53086 + return ERR_PTR(-ENOMEM);
53087 +
53088 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
53089 + *(i_tmp + i_num) =
53090 + (struct acl_ip_label *)
53091 + acl_alloc(sizeof (struct acl_ip_label));
53092 + if (!*(i_tmp + i_num))
53093 + return ERR_PTR(-ENOMEM);
53094 +
53095 + if (copy_from_user
53096 + (&i_utmp2, s_tmp->ips + i_num,
53097 + sizeof (struct acl_ip_label *)))
53098 + return ERR_PTR(-EFAULT);
53099 +
53100 + if (copy_from_user
53101 + (*(i_tmp + i_num), i_utmp2,
53102 + sizeof (struct acl_ip_label)))
53103 + return ERR_PTR(-EFAULT);
53104 +
53105 + if ((*(i_tmp + i_num))->iface == NULL)
53106 + continue;
53107 +
53108 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
53109 + if (!len || len >= IFNAMSIZ)
53110 + return ERR_PTR(-EINVAL);
53111 + tmp = acl_alloc(len);
53112 + if (tmp == NULL)
53113 + return ERR_PTR(-ENOMEM);
53114 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
53115 + return ERR_PTR(-EFAULT);
53116 + (*(i_tmp + i_num))->iface = tmp;
53117 + }
53118 +
53119 + s_tmp->ips = i_tmp;
53120 +
53121 +insert:
53122 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
53123 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
53124 + return ERR_PTR(-ENOMEM);
53125 +
53126 + return s_tmp;
53127 +}
53128 +
53129 +static int
53130 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
53131 +{
53132 + struct acl_subject_label s_pre;
53133 + struct acl_subject_label * ret;
53134 + int err;
53135 +
53136 + while (userp) {
53137 + if (copy_from_user(&s_pre, userp,
53138 + sizeof (struct acl_subject_label)))
53139 + return -EFAULT;
53140 +
53141 + /* do not add nested subjects here, add
53142 + while parsing objects
53143 + */
53144 +
53145 + if (s_pre.mode & GR_NESTED) {
53146 + userp = s_pre.prev;
53147 + continue;
53148 + }
53149 +
53150 + ret = do_copy_user_subj(userp, role);
53151 +
53152 + err = PTR_ERR(ret);
53153 + if (IS_ERR(ret))
53154 + return err;
53155 +
53156 + insert_acl_subj_label(ret, role);
53157 +
53158 + userp = s_pre.prev;
53159 + }
53160 +
53161 + return 0;
53162 +}
53163 +
53164 +static int
53165 +copy_user_acl(struct gr_arg *arg)
53166 +{
53167 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
53168 + struct sprole_pw *sptmp;
53169 + struct gr_hash_struct *ghash;
53170 + uid_t *domainlist;
53171 + unsigned int r_num;
53172 + unsigned int len;
53173 + char *tmp;
53174 + int err = 0;
53175 + __u16 i;
53176 + __u32 num_subjs;
53177 +
53178 + /* we need a default and kernel role */
53179 + if (arg->role_db.num_roles < 2)
53180 + return -EINVAL;
53181 +
53182 + /* copy special role authentication info from userspace */
53183 +
53184 + num_sprole_pws = arg->num_sprole_pws;
53185 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
53186 +
53187 + if (!acl_special_roles && num_sprole_pws)
53188 + return -ENOMEM;
53189 +
53190 + for (i = 0; i < num_sprole_pws; i++) {
53191 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
53192 + if (!sptmp)
53193 + return -ENOMEM;
53194 + if (copy_from_user(sptmp, arg->sprole_pws + i,
53195 + sizeof (struct sprole_pw)))
53196 + return -EFAULT;
53197 +
53198 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
53199 +
53200 + if (!len || len >= GR_SPROLE_LEN)
53201 + return -EINVAL;
53202 +
53203 + if ((tmp = (char *) acl_alloc(len)) == NULL)
53204 + return -ENOMEM;
53205 +
53206 + if (copy_from_user(tmp, sptmp->rolename, len))
53207 + return -EFAULT;
53208 +
53209 + tmp[len-1] = '\0';
53210 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53211 + printk(KERN_ALERT "Copying special role %s\n", tmp);
53212 +#endif
53213 + sptmp->rolename = tmp;
53214 + acl_special_roles[i] = sptmp;
53215 + }
53216 +
53217 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
53218 +
53219 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
53220 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
53221 +
53222 + if (!r_tmp)
53223 + return -ENOMEM;
53224 +
53225 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
53226 + sizeof (struct acl_role_label *)))
53227 + return -EFAULT;
53228 +
53229 + if (copy_from_user(r_tmp, r_utmp2,
53230 + sizeof (struct acl_role_label)))
53231 + return -EFAULT;
53232 +
53233 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
53234 +
53235 + if (!len || len >= PATH_MAX)
53236 + return -EINVAL;
53237 +
53238 + if ((tmp = (char *) acl_alloc(len)) == NULL)
53239 + return -ENOMEM;
53240 +
53241 + if (copy_from_user(tmp, r_tmp->rolename, len))
53242 + return -EFAULT;
53243 +
53244 + tmp[len-1] = '\0';
53245 + r_tmp->rolename = tmp;
53246 +
53247 + if (!strcmp(r_tmp->rolename, "default")
53248 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
53249 + default_role = r_tmp;
53250 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
53251 + kernel_role = r_tmp;
53252 + }
53253 +
53254 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
53255 + return -ENOMEM;
53256 +
53257 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
53258 + return -EFAULT;
53259 +
53260 + r_tmp->hash = ghash;
53261 +
53262 + num_subjs = count_user_subjs(r_tmp->hash->first);
53263 +
53264 + r_tmp->subj_hash_size = num_subjs;
53265 + r_tmp->subj_hash =
53266 + (struct acl_subject_label **)
53267 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
53268 +
53269 + if (!r_tmp->subj_hash)
53270 + return -ENOMEM;
53271 +
53272 + err = copy_user_allowedips(r_tmp);
53273 + if (err)
53274 + return err;
53275 +
53276 + /* copy domain info */
53277 + if (r_tmp->domain_children != NULL) {
53278 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
53279 + if (domainlist == NULL)
53280 + return -ENOMEM;
53281 +
53282 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
53283 + return -EFAULT;
53284 +
53285 + r_tmp->domain_children = domainlist;
53286 + }
53287 +
53288 + err = copy_user_transitions(r_tmp);
53289 + if (err)
53290 + return err;
53291 +
53292 + memset(r_tmp->subj_hash, 0,
53293 + r_tmp->subj_hash_size *
53294 + sizeof (struct acl_subject_label *));
53295 +
53296 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
53297 +
53298 + if (err)
53299 + return err;
53300 +
53301 + /* set nested subject list to null */
53302 + r_tmp->hash->first = NULL;
53303 +
53304 + insert_acl_role_label(r_tmp);
53305 + }
53306 +
53307 + if (default_role == NULL || kernel_role == NULL)
53308 + return -EINVAL;
53309 +
53310 + return err;
53311 +}
53312 +
53313 +static int
53314 +gracl_init(struct gr_arg *args)
53315 +{
53316 + int error = 0;
53317 +
53318 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
53319 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
53320 +
53321 + if (init_variables(args)) {
53322 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
53323 + error = -ENOMEM;
53324 + free_variables();
53325 + goto out;
53326 + }
53327 +
53328 + error = copy_user_acl(args);
53329 + free_init_variables();
53330 + if (error) {
53331 + free_variables();
53332 + goto out;
53333 + }
53334 +
53335 + if ((error = gr_set_acls(0))) {
53336 + free_variables();
53337 + goto out;
53338 + }
53339 +
53340 + pax_open_kernel();
53341 + gr_status |= GR_READY;
53342 + pax_close_kernel();
53343 +
53344 + out:
53345 + return error;
53346 +}
53347 +
53348 +/* derived from glibc fnmatch() 0: match, 1: no match*/
53349 +
53350 +static int
53351 +glob_match(const char *p, const char *n)
53352 +{
53353 + char c;
53354 +
53355 + while ((c = *p++) != '\0') {
53356 + switch (c) {
53357 + case '?':
53358 + if (*n == '\0')
53359 + return 1;
53360 + else if (*n == '/')
53361 + return 1;
53362 + break;
53363 + case '\\':
53364 + if (*n != c)
53365 + return 1;
53366 + break;
53367 + case '*':
53368 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
53369 + if (*n == '/')
53370 + return 1;
53371 + else if (c == '?') {
53372 + if (*n == '\0')
53373 + return 1;
53374 + else
53375 + ++n;
53376 + }
53377 + }
53378 + if (c == '\0') {
53379 + return 0;
53380 + } else {
53381 + const char *endp;
53382 +
53383 + if ((endp = strchr(n, '/')) == NULL)
53384 + endp = n + strlen(n);
53385 +
53386 + if (c == '[') {
53387 + for (--p; n < endp; ++n)
53388 + if (!glob_match(p, n))
53389 + return 0;
53390 + } else if (c == '/') {
53391 + while (*n != '\0' && *n != '/')
53392 + ++n;
53393 + if (*n == '/' && !glob_match(p, n + 1))
53394 + return 0;
53395 + } else {
53396 + for (--p; n < endp; ++n)
53397 + if (*n == c && !glob_match(p, n))
53398 + return 0;
53399 + }
53400 +
53401 + return 1;
53402 + }
53403 + case '[':
53404 + {
53405 + int not;
53406 + char cold;
53407 +
53408 + if (*n == '\0' || *n == '/')
53409 + return 1;
53410 +
53411 + not = (*p == '!' || *p == '^');
53412 + if (not)
53413 + ++p;
53414 +
53415 + c = *p++;
53416 + for (;;) {
53417 + unsigned char fn = (unsigned char)*n;
53418 +
53419 + if (c == '\0')
53420 + return 1;
53421 + else {
53422 + if (c == fn)
53423 + goto matched;
53424 + cold = c;
53425 + c = *p++;
53426 +
53427 + if (c == '-' && *p != ']') {
53428 + unsigned char cend = *p++;
53429 +
53430 + if (cend == '\0')
53431 + return 1;
53432 +
53433 + if (cold <= fn && fn <= cend)
53434 + goto matched;
53435 +
53436 + c = *p++;
53437 + }
53438 + }
53439 +
53440 + if (c == ']')
53441 + break;
53442 + }
53443 + if (!not)
53444 + return 1;
53445 + break;
53446 + matched:
53447 + while (c != ']') {
53448 + if (c == '\0')
53449 + return 1;
53450 +
53451 + c = *p++;
53452 + }
53453 + if (not)
53454 + return 1;
53455 + }
53456 + break;
53457 + default:
53458 + if (c != *n)
53459 + return 1;
53460 + }
53461 +
53462 + ++n;
53463 + }
53464 +
53465 + if (*n == '\0')
53466 + return 0;
53467 +
53468 + if (*n == '/')
53469 + return 0;
53470 +
53471 + return 1;
53472 +}
53473 +
53474 +static struct acl_object_label *
53475 +chk_glob_label(struct acl_object_label *globbed,
53476 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
53477 +{
53478 + struct acl_object_label *tmp;
53479 +
53480 + if (*path == NULL)
53481 + *path = gr_to_filename_nolock(dentry, mnt);
53482 +
53483 + tmp = globbed;
53484 +
53485 + while (tmp) {
53486 + if (!glob_match(tmp->filename, *path))
53487 + return tmp;
53488 + tmp = tmp->next;
53489 + }
53490 +
53491 + return NULL;
53492 +}
53493 +
53494 +static struct acl_object_label *
53495 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53496 + const ino_t curr_ino, const dev_t curr_dev,
53497 + const struct acl_subject_label *subj, char **path, const int checkglob)
53498 +{
53499 + struct acl_subject_label *tmpsubj;
53500 + struct acl_object_label *retval;
53501 + struct acl_object_label *retval2;
53502 +
53503 + tmpsubj = (struct acl_subject_label *) subj;
53504 + read_lock(&gr_inode_lock);
53505 + do {
53506 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
53507 + if (retval) {
53508 + if (checkglob && retval->globbed) {
53509 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
53510 + if (retval2)
53511 + retval = retval2;
53512 + }
53513 + break;
53514 + }
53515 + } while ((tmpsubj = tmpsubj->parent_subject));
53516 + read_unlock(&gr_inode_lock);
53517 +
53518 + return retval;
53519 +}
53520 +
53521 +static __inline__ struct acl_object_label *
53522 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53523 + struct dentry *curr_dentry,
53524 + const struct acl_subject_label *subj, char **path, const int checkglob)
53525 +{
53526 + int newglob = checkglob;
53527 + ino_t inode;
53528 + dev_t device;
53529 +
53530 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
53531 + as we don't want a / * rule to match instead of the / object
53532 + don't do this for create lookups that call this function though, since they're looking up
53533 + on the parent and thus need globbing checks on all paths
53534 + */
53535 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
53536 + newglob = GR_NO_GLOB;
53537 +
53538 + spin_lock(&curr_dentry->d_lock);
53539 + inode = curr_dentry->d_inode->i_ino;
53540 + device = __get_dev(curr_dentry);
53541 + spin_unlock(&curr_dentry->d_lock);
53542 +
53543 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
53544 +}
53545 +
53546 +static struct acl_object_label *
53547 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53548 + const struct acl_subject_label *subj, char *path, const int checkglob)
53549 +{
53550 + struct dentry *dentry = (struct dentry *) l_dentry;
53551 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53552 + struct mount *real_mnt = real_mount(mnt);
53553 + struct acl_object_label *retval;
53554 + struct dentry *parent;
53555 +
53556 + write_seqlock(&rename_lock);
53557 + br_read_lock(&vfsmount_lock);
53558 +
53559 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
53560 +#ifdef CONFIG_NET
53561 + mnt == sock_mnt ||
53562 +#endif
53563 +#ifdef CONFIG_HUGETLBFS
53564 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
53565 +#endif
53566 + /* ignore Eric Biederman */
53567 + IS_PRIVATE(l_dentry->d_inode))) {
53568 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
53569 + goto out;
53570 + }
53571 +
53572 + for (;;) {
53573 + if (dentry == real_root.dentry && mnt == real_root.mnt)
53574 + break;
53575 +
53576 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53577 + if (!mnt_has_parent(real_mnt))
53578 + break;
53579 +
53580 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53581 + if (retval != NULL)
53582 + goto out;
53583 +
53584 + dentry = real_mnt->mnt_mountpoint;
53585 + real_mnt = real_mnt->mnt_parent;
53586 + mnt = &real_mnt->mnt;
53587 + continue;
53588 + }
53589 +
53590 + parent = dentry->d_parent;
53591 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53592 + if (retval != NULL)
53593 + goto out;
53594 +
53595 + dentry = parent;
53596 + }
53597 +
53598 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53599 +
53600 + /* real_root is pinned so we don't have to hold a reference */
53601 + if (retval == NULL)
53602 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
53603 +out:
53604 + br_read_unlock(&vfsmount_lock);
53605 + write_sequnlock(&rename_lock);
53606 +
53607 + BUG_ON(retval == NULL);
53608 +
53609 + return retval;
53610 +}
53611 +
53612 +static __inline__ struct acl_object_label *
53613 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53614 + const struct acl_subject_label *subj)
53615 +{
53616 + char *path = NULL;
53617 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
53618 +}
53619 +
53620 +static __inline__ struct acl_object_label *
53621 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53622 + const struct acl_subject_label *subj)
53623 +{
53624 + char *path = NULL;
53625 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
53626 +}
53627 +
53628 +static __inline__ struct acl_object_label *
53629 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53630 + const struct acl_subject_label *subj, char *path)
53631 +{
53632 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
53633 +}
53634 +
53635 +static struct acl_subject_label *
53636 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53637 + const struct acl_role_label *role)
53638 +{
53639 + struct dentry *dentry = (struct dentry *) l_dentry;
53640 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53641 + struct mount *real_mnt = real_mount(mnt);
53642 + struct acl_subject_label *retval;
53643 + struct dentry *parent;
53644 +
53645 + write_seqlock(&rename_lock);
53646 + br_read_lock(&vfsmount_lock);
53647 +
53648 + for (;;) {
53649 + if (dentry == real_root.dentry && mnt == real_root.mnt)
53650 + break;
53651 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53652 + if (!mnt_has_parent(real_mnt))
53653 + break;
53654 +
53655 + spin_lock(&dentry->d_lock);
53656 + read_lock(&gr_inode_lock);
53657 + retval =
53658 + lookup_acl_subj_label(dentry->d_inode->i_ino,
53659 + __get_dev(dentry), role);
53660 + read_unlock(&gr_inode_lock);
53661 + spin_unlock(&dentry->d_lock);
53662 + if (retval != NULL)
53663 + goto out;
53664 +
53665 + dentry = real_mnt->mnt_mountpoint;
53666 + real_mnt = real_mnt->mnt_parent;
53667 + mnt = &real_mnt->mnt;
53668 + continue;
53669 + }
53670 +
53671 + spin_lock(&dentry->d_lock);
53672 + read_lock(&gr_inode_lock);
53673 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53674 + __get_dev(dentry), role);
53675 + read_unlock(&gr_inode_lock);
53676 + parent = dentry->d_parent;
53677 + spin_unlock(&dentry->d_lock);
53678 +
53679 + if (retval != NULL)
53680 + goto out;
53681 +
53682 + dentry = parent;
53683 + }
53684 +
53685 + spin_lock(&dentry->d_lock);
53686 + read_lock(&gr_inode_lock);
53687 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53688 + __get_dev(dentry), role);
53689 + read_unlock(&gr_inode_lock);
53690 + spin_unlock(&dentry->d_lock);
53691 +
53692 + if (unlikely(retval == NULL)) {
53693 + /* real_root is pinned, we don't need to hold a reference */
53694 + read_lock(&gr_inode_lock);
53695 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
53696 + __get_dev(real_root.dentry), role);
53697 + read_unlock(&gr_inode_lock);
53698 + }
53699 +out:
53700 + br_read_unlock(&vfsmount_lock);
53701 + write_sequnlock(&rename_lock);
53702 +
53703 + BUG_ON(retval == NULL);
53704 +
53705 + return retval;
53706 +}
53707 +
53708 +static void
53709 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
53710 +{
53711 + struct task_struct *task = current;
53712 + const struct cred *cred = current_cred();
53713 +
53714 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53715 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53716 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53717 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
53718 +
53719 + return;
53720 +}
53721 +
53722 +static void
53723 +gr_log_learn_id_change(const char type, const unsigned int real,
53724 + const unsigned int effective, const unsigned int fs)
53725 +{
53726 + struct task_struct *task = current;
53727 + const struct cred *cred = current_cred();
53728 +
53729 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
53730 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53731 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53732 + type, real, effective, fs, &task->signal->saved_ip);
53733 +
53734 + return;
53735 +}
53736 +
53737 +__u32
53738 +gr_search_file(const struct dentry * dentry, const __u32 mode,
53739 + const struct vfsmount * mnt)
53740 +{
53741 + __u32 retval = mode;
53742 + struct acl_subject_label *curracl;
53743 + struct acl_object_label *currobj;
53744 +
53745 + if (unlikely(!(gr_status & GR_READY)))
53746 + return (mode & ~GR_AUDITS);
53747 +
53748 + curracl = current->acl;
53749 +
53750 + currobj = chk_obj_label(dentry, mnt, curracl);
53751 + retval = currobj->mode & mode;
53752 +
53753 + /* if we're opening a specified transfer file for writing
53754 + (e.g. /dev/initctl), then transfer our role to init
53755 + */
53756 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
53757 + current->role->roletype & GR_ROLE_PERSIST)) {
53758 + struct task_struct *task = init_pid_ns.child_reaper;
53759 +
53760 + if (task->role != current->role) {
53761 + task->acl_sp_role = 0;
53762 + task->acl_role_id = current->acl_role_id;
53763 + task->role = current->role;
53764 + rcu_read_lock();
53765 + read_lock(&grsec_exec_file_lock);
53766 + gr_apply_subject_to_task(task);
53767 + read_unlock(&grsec_exec_file_lock);
53768 + rcu_read_unlock();
53769 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
53770 + }
53771 + }
53772 +
53773 + if (unlikely
53774 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
53775 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
53776 + __u32 new_mode = mode;
53777 +
53778 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53779 +
53780 + retval = new_mode;
53781 +
53782 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
53783 + new_mode |= GR_INHERIT;
53784 +
53785 + if (!(mode & GR_NOLEARN))
53786 + gr_log_learn(dentry, mnt, new_mode);
53787 + }
53788 +
53789 + return retval;
53790 +}
53791 +
53792 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
53793 + const struct dentry *parent,
53794 + const struct vfsmount *mnt)
53795 +{
53796 + struct name_entry *match;
53797 + struct acl_object_label *matchpo;
53798 + struct acl_subject_label *curracl;
53799 + char *path;
53800 +
53801 + if (unlikely(!(gr_status & GR_READY)))
53802 + return NULL;
53803 +
53804 + preempt_disable();
53805 + path = gr_to_filename_rbac(new_dentry, mnt);
53806 + match = lookup_name_entry_create(path);
53807 +
53808 + curracl = current->acl;
53809 +
53810 + if (match) {
53811 + read_lock(&gr_inode_lock);
53812 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
53813 + read_unlock(&gr_inode_lock);
53814 +
53815 + if (matchpo) {
53816 + preempt_enable();
53817 + return matchpo;
53818 + }
53819 + }
53820 +
53821 + // lookup parent
53822 +
53823 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
53824 +
53825 + preempt_enable();
53826 + return matchpo;
53827 +}
53828 +
53829 +__u32
53830 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
53831 + const struct vfsmount * mnt, const __u32 mode)
53832 +{
53833 + struct acl_object_label *matchpo;
53834 + __u32 retval;
53835 +
53836 + if (unlikely(!(gr_status & GR_READY)))
53837 + return (mode & ~GR_AUDITS);
53838 +
53839 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
53840 +
53841 + retval = matchpo->mode & mode;
53842 +
53843 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
53844 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53845 + __u32 new_mode = mode;
53846 +
53847 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53848 +
53849 + gr_log_learn(new_dentry, mnt, new_mode);
53850 + return new_mode;
53851 + }
53852 +
53853 + return retval;
53854 +}
53855 +
53856 +__u32
53857 +gr_check_link(const struct dentry * new_dentry,
53858 + const struct dentry * parent_dentry,
53859 + const struct vfsmount * parent_mnt,
53860 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
53861 +{
53862 + struct acl_object_label *obj;
53863 + __u32 oldmode, newmode;
53864 + __u32 needmode;
53865 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
53866 + GR_DELETE | GR_INHERIT;
53867 +
53868 + if (unlikely(!(gr_status & GR_READY)))
53869 + return (GR_CREATE | GR_LINK);
53870 +
53871 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
53872 + oldmode = obj->mode;
53873 +
53874 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
53875 + newmode = obj->mode;
53876 +
53877 + needmode = newmode & checkmodes;
53878 +
53879 + // old name for hardlink must have at least the permissions of the new name
53880 + if ((oldmode & needmode) != needmode)
53881 + goto bad;
53882 +
53883 + // if old name had restrictions/auditing, make sure the new name does as well
53884 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
53885 +
53886 + // don't allow hardlinking of suid/sgid/fcapped files without permission
53887 + if (is_privileged_binary(old_dentry))
53888 + needmode |= GR_SETID;
53889 +
53890 + if ((newmode & needmode) != needmode)
53891 + goto bad;
53892 +
53893 + // enforce minimum permissions
53894 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
53895 + return newmode;
53896 +bad:
53897 + needmode = oldmode;
53898 + if (is_privileged_binary(old_dentry))
53899 + needmode |= GR_SETID;
53900 +
53901 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
53902 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
53903 + return (GR_CREATE | GR_LINK);
53904 + } else if (newmode & GR_SUPPRESS)
53905 + return GR_SUPPRESS;
53906 + else
53907 + return 0;
53908 +}
53909 +
53910 +int
53911 +gr_check_hidden_task(const struct task_struct *task)
53912 +{
53913 + if (unlikely(!(gr_status & GR_READY)))
53914 + return 0;
53915 +
53916 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
53917 + return 1;
53918 +
53919 + return 0;
53920 +}
53921 +
53922 +int
53923 +gr_check_protected_task(const struct task_struct *task)
53924 +{
53925 + if (unlikely(!(gr_status & GR_READY) || !task))
53926 + return 0;
53927 +
53928 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53929 + task->acl != current->acl)
53930 + return 1;
53931 +
53932 + return 0;
53933 +}
53934 +
53935 +int
53936 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53937 +{
53938 + struct task_struct *p;
53939 + int ret = 0;
53940 +
53941 + if (unlikely(!(gr_status & GR_READY) || !pid))
53942 + return ret;
53943 +
53944 + read_lock(&tasklist_lock);
53945 + do_each_pid_task(pid, type, p) {
53946 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53947 + p->acl != current->acl) {
53948 + ret = 1;
53949 + goto out;
53950 + }
53951 + } while_each_pid_task(pid, type, p);
53952 +out:
53953 + read_unlock(&tasklist_lock);
53954 +
53955 + return ret;
53956 +}
53957 +
53958 +void
53959 +gr_copy_label(struct task_struct *tsk)
53960 +{
53961 + tsk->signal->used_accept = 0;
53962 + tsk->acl_sp_role = 0;
53963 + tsk->acl_role_id = current->acl_role_id;
53964 + tsk->acl = current->acl;
53965 + tsk->role = current->role;
53966 + tsk->signal->curr_ip = current->signal->curr_ip;
53967 + tsk->signal->saved_ip = current->signal->saved_ip;
53968 + if (current->exec_file)
53969 + get_file(current->exec_file);
53970 + tsk->exec_file = current->exec_file;
53971 + tsk->is_writable = current->is_writable;
53972 + if (unlikely(current->signal->used_accept)) {
53973 + current->signal->curr_ip = 0;
53974 + current->signal->saved_ip = 0;
53975 + }
53976 +
53977 + return;
53978 +}
53979 +
53980 +static void
53981 +gr_set_proc_res(struct task_struct *task)
53982 +{
53983 + struct acl_subject_label *proc;
53984 + unsigned short i;
53985 +
53986 + proc = task->acl;
53987 +
53988 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53989 + return;
53990 +
53991 + for (i = 0; i < RLIM_NLIMITS; i++) {
53992 + if (!(proc->resmask & (1 << i)))
53993 + continue;
53994 +
53995 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53996 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53997 + }
53998 +
53999 + return;
54000 +}
54001 +
54002 +extern int __gr_process_user_ban(struct user_struct *user);
54003 +
54004 +int
54005 +gr_check_user_change(int real, int effective, int fs)
54006 +{
54007 + unsigned int i;
54008 + __u16 num;
54009 + uid_t *uidlist;
54010 + int curuid;
54011 + int realok = 0;
54012 + int effectiveok = 0;
54013 + int fsok = 0;
54014 +
54015 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54016 + struct user_struct *user;
54017 +
54018 + if (real == -1)
54019 + goto skipit;
54020 +
54021 + user = find_user(real);
54022 + if (user == NULL)
54023 + goto skipit;
54024 +
54025 + if (__gr_process_user_ban(user)) {
54026 + /* for find_user */
54027 + free_uid(user);
54028 + return 1;
54029 + }
54030 +
54031 + /* for find_user */
54032 + free_uid(user);
54033 +
54034 +skipit:
54035 +#endif
54036 +
54037 + if (unlikely(!(gr_status & GR_READY)))
54038 + return 0;
54039 +
54040 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54041 + gr_log_learn_id_change('u', real, effective, fs);
54042 +
54043 + num = current->acl->user_trans_num;
54044 + uidlist = current->acl->user_transitions;
54045 +
54046 + if (uidlist == NULL)
54047 + return 0;
54048 +
54049 + if (real == -1)
54050 + realok = 1;
54051 + if (effective == -1)
54052 + effectiveok = 1;
54053 + if (fs == -1)
54054 + fsok = 1;
54055 +
54056 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
54057 + for (i = 0; i < num; i++) {
54058 + curuid = (int)uidlist[i];
54059 + if (real == curuid)
54060 + realok = 1;
54061 + if (effective == curuid)
54062 + effectiveok = 1;
54063 + if (fs == curuid)
54064 + fsok = 1;
54065 + }
54066 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
54067 + for (i = 0; i < num; i++) {
54068 + curuid = (int)uidlist[i];
54069 + if (real == curuid)
54070 + break;
54071 + if (effective == curuid)
54072 + break;
54073 + if (fs == curuid)
54074 + break;
54075 + }
54076 + /* not in deny list */
54077 + if (i == num) {
54078 + realok = 1;
54079 + effectiveok = 1;
54080 + fsok = 1;
54081 + }
54082 + }
54083 +
54084 + if (realok && effectiveok && fsok)
54085 + return 0;
54086 + else {
54087 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
54088 + return 1;
54089 + }
54090 +}
54091 +
54092 +int
54093 +gr_check_group_change(int real, int effective, int fs)
54094 +{
54095 + unsigned int i;
54096 + __u16 num;
54097 + gid_t *gidlist;
54098 + int curgid;
54099 + int realok = 0;
54100 + int effectiveok = 0;
54101 + int fsok = 0;
54102 +
54103 + if (unlikely(!(gr_status & GR_READY)))
54104 + return 0;
54105 +
54106 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54107 + gr_log_learn_id_change('g', real, effective, fs);
54108 +
54109 + num = current->acl->group_trans_num;
54110 + gidlist = current->acl->group_transitions;
54111 +
54112 + if (gidlist == NULL)
54113 + return 0;
54114 +
54115 + if (real == -1)
54116 + realok = 1;
54117 + if (effective == -1)
54118 + effectiveok = 1;
54119 + if (fs == -1)
54120 + fsok = 1;
54121 +
54122 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
54123 + for (i = 0; i < num; i++) {
54124 + curgid = (int)gidlist[i];
54125 + if (real == curgid)
54126 + realok = 1;
54127 + if (effective == curgid)
54128 + effectiveok = 1;
54129 + if (fs == curgid)
54130 + fsok = 1;
54131 + }
54132 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
54133 + for (i = 0; i < num; i++) {
54134 + curgid = (int)gidlist[i];
54135 + if (real == curgid)
54136 + break;
54137 + if (effective == curgid)
54138 + break;
54139 + if (fs == curgid)
54140 + break;
54141 + }
54142 + /* not in deny list */
54143 + if (i == num) {
54144 + realok = 1;
54145 + effectiveok = 1;
54146 + fsok = 1;
54147 + }
54148 + }
54149 +
54150 + if (realok && effectiveok && fsok)
54151 + return 0;
54152 + else {
54153 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
54154 + return 1;
54155 + }
54156 +}
54157 +
54158 +extern int gr_acl_is_capable(const int cap);
54159 +
54160 +void
54161 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
54162 +{
54163 + struct acl_role_label *role = task->role;
54164 + struct acl_subject_label *subj = NULL;
54165 + struct acl_object_label *obj;
54166 + struct file *filp;
54167 +
54168 + if (unlikely(!(gr_status & GR_READY)))
54169 + return;
54170 +
54171 + filp = task->exec_file;
54172 +
54173 + /* kernel process, we'll give them the kernel role */
54174 + if (unlikely(!filp)) {
54175 + task->role = kernel_role;
54176 + task->acl = kernel_role->root_label;
54177 + return;
54178 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
54179 + role = lookup_acl_role_label(task, uid, gid);
54180 +
54181 + /* don't change the role if we're not a privileged process */
54182 + if (role && task->role != role &&
54183 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
54184 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
54185 + return;
54186 +
54187 + /* perform subject lookup in possibly new role
54188 + we can use this result below in the case where role == task->role
54189 + */
54190 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
54191 +
54192 + /* if we changed uid/gid, but result in the same role
54193 + and are using inheritance, don't lose the inherited subject
54194 + if current subject is other than what normal lookup
54195 + would result in, we arrived via inheritance, don't
54196 + lose subject
54197 + */
54198 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
54199 + (subj == task->acl)))
54200 + task->acl = subj;
54201 +
54202 + task->role = role;
54203 +
54204 + task->is_writable = 0;
54205 +
54206 + /* ignore additional mmap checks for processes that are writable
54207 + by the default ACL */
54208 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54209 + if (unlikely(obj->mode & GR_WRITE))
54210 + task->is_writable = 1;
54211 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54212 + if (unlikely(obj->mode & GR_WRITE))
54213 + task->is_writable = 1;
54214 +
54215 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54216 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54217 +#endif
54218 +
54219 + gr_set_proc_res(task);
54220 +
54221 + return;
54222 +}
54223 +
54224 +int
54225 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54226 + const int unsafe_flags)
54227 +{
54228 + struct task_struct *task = current;
54229 + struct acl_subject_label *newacl;
54230 + struct acl_object_label *obj;
54231 + __u32 retmode;
54232 +
54233 + if (unlikely(!(gr_status & GR_READY)))
54234 + return 0;
54235 +
54236 + newacl = chk_subj_label(dentry, mnt, task->role);
54237 +
54238 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
54239 + did an exec
54240 + */
54241 + rcu_read_lock();
54242 + read_lock(&tasklist_lock);
54243 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
54244 + (task->parent->acl->mode & GR_POVERRIDE))) {
54245 + read_unlock(&tasklist_lock);
54246 + rcu_read_unlock();
54247 + goto skip_check;
54248 + }
54249 + read_unlock(&tasklist_lock);
54250 + rcu_read_unlock();
54251 +
54252 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
54253 + !(task->role->roletype & GR_ROLE_GOD) &&
54254 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
54255 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
54256 + if (unsafe_flags & LSM_UNSAFE_SHARE)
54257 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
54258 + else
54259 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
54260 + return -EACCES;
54261 + }
54262 +
54263 +skip_check:
54264 +
54265 + obj = chk_obj_label(dentry, mnt, task->acl);
54266 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
54267 +
54268 + if (!(task->acl->mode & GR_INHERITLEARN) &&
54269 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
54270 + if (obj->nested)
54271 + task->acl = obj->nested;
54272 + else
54273 + task->acl = newacl;
54274 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
54275 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
54276 +
54277 + task->is_writable = 0;
54278 +
54279 + /* ignore additional mmap checks for processes that are writable
54280 + by the default ACL */
54281 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
54282 + if (unlikely(obj->mode & GR_WRITE))
54283 + task->is_writable = 1;
54284 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
54285 + if (unlikely(obj->mode & GR_WRITE))
54286 + task->is_writable = 1;
54287 +
54288 + gr_set_proc_res(task);
54289 +
54290 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54291 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54292 +#endif
54293 + return 0;
54294 +}
54295 +
54296 +/* always called with valid inodev ptr */
54297 +static void
54298 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
54299 +{
54300 + struct acl_object_label *matchpo;
54301 + struct acl_subject_label *matchps;
54302 + struct acl_subject_label *subj;
54303 + struct acl_role_label *role;
54304 + unsigned int x;
54305 +
54306 + FOR_EACH_ROLE_START(role)
54307 + FOR_EACH_SUBJECT_START(role, subj, x)
54308 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
54309 + matchpo->mode |= GR_DELETED;
54310 + FOR_EACH_SUBJECT_END(subj,x)
54311 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
54312 + if (subj->inode == ino && subj->device == dev)
54313 + subj->mode |= GR_DELETED;
54314 + FOR_EACH_NESTED_SUBJECT_END(subj)
54315 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
54316 + matchps->mode |= GR_DELETED;
54317 + FOR_EACH_ROLE_END(role)
54318 +
54319 + inodev->nentry->deleted = 1;
54320 +
54321 + return;
54322 +}
54323 +
54324 +void
54325 +gr_handle_delete(const ino_t ino, const dev_t dev)
54326 +{
54327 + struct inodev_entry *inodev;
54328 +
54329 + if (unlikely(!(gr_status & GR_READY)))
54330 + return;
54331 +
54332 + write_lock(&gr_inode_lock);
54333 + inodev = lookup_inodev_entry(ino, dev);
54334 + if (inodev != NULL)
54335 + do_handle_delete(inodev, ino, dev);
54336 + write_unlock(&gr_inode_lock);
54337 +
54338 + return;
54339 +}
54340 +
54341 +static void
54342 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
54343 + const ino_t newinode, const dev_t newdevice,
54344 + struct acl_subject_label *subj)
54345 +{
54346 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
54347 + struct acl_object_label *match;
54348 +
54349 + match = subj->obj_hash[index];
54350 +
54351 + while (match && (match->inode != oldinode ||
54352 + match->device != olddevice ||
54353 + !(match->mode & GR_DELETED)))
54354 + match = match->next;
54355 +
54356 + if (match && (match->inode == oldinode)
54357 + && (match->device == olddevice)
54358 + && (match->mode & GR_DELETED)) {
54359 + if (match->prev == NULL) {
54360 + subj->obj_hash[index] = match->next;
54361 + if (match->next != NULL)
54362 + match->next->prev = NULL;
54363 + } else {
54364 + match->prev->next = match->next;
54365 + if (match->next != NULL)
54366 + match->next->prev = match->prev;
54367 + }
54368 + match->prev = NULL;
54369 + match->next = NULL;
54370 + match->inode = newinode;
54371 + match->device = newdevice;
54372 + match->mode &= ~GR_DELETED;
54373 +
54374 + insert_acl_obj_label(match, subj);
54375 + }
54376 +
54377 + return;
54378 +}
54379 +
54380 +static void
54381 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
54382 + const ino_t newinode, const dev_t newdevice,
54383 + struct acl_role_label *role)
54384 +{
54385 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
54386 + struct acl_subject_label *match;
54387 +
54388 + match = role->subj_hash[index];
54389 +
54390 + while (match && (match->inode != oldinode ||
54391 + match->device != olddevice ||
54392 + !(match->mode & GR_DELETED)))
54393 + match = match->next;
54394 +
54395 + if (match && (match->inode == oldinode)
54396 + && (match->device == olddevice)
54397 + && (match->mode & GR_DELETED)) {
54398 + if (match->prev == NULL) {
54399 + role->subj_hash[index] = match->next;
54400 + if (match->next != NULL)
54401 + match->next->prev = NULL;
54402 + } else {
54403 + match->prev->next = match->next;
54404 + if (match->next != NULL)
54405 + match->next->prev = match->prev;
54406 + }
54407 + match->prev = NULL;
54408 + match->next = NULL;
54409 + match->inode = newinode;
54410 + match->device = newdevice;
54411 + match->mode &= ~GR_DELETED;
54412 +
54413 + insert_acl_subj_label(match, role);
54414 + }
54415 +
54416 + return;
54417 +}
54418 +
54419 +static void
54420 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
54421 + const ino_t newinode, const dev_t newdevice)
54422 +{
54423 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
54424 + struct inodev_entry *match;
54425 +
54426 + match = inodev_set.i_hash[index];
54427 +
54428 + while (match && (match->nentry->inode != oldinode ||
54429 + match->nentry->device != olddevice || !match->nentry->deleted))
54430 + match = match->next;
54431 +
54432 + if (match && (match->nentry->inode == oldinode)
54433 + && (match->nentry->device == olddevice) &&
54434 + match->nentry->deleted) {
54435 + if (match->prev == NULL) {
54436 + inodev_set.i_hash[index] = match->next;
54437 + if (match->next != NULL)
54438 + match->next->prev = NULL;
54439 + } else {
54440 + match->prev->next = match->next;
54441 + if (match->next != NULL)
54442 + match->next->prev = match->prev;
54443 + }
54444 + match->prev = NULL;
54445 + match->next = NULL;
54446 + match->nentry->inode = newinode;
54447 + match->nentry->device = newdevice;
54448 + match->nentry->deleted = 0;
54449 +
54450 + insert_inodev_entry(match);
54451 + }
54452 +
54453 + return;
54454 +}
54455 +
54456 +static void
54457 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
54458 +{
54459 + struct acl_subject_label *subj;
54460 + struct acl_role_label *role;
54461 + unsigned int x;
54462 +
54463 + FOR_EACH_ROLE_START(role)
54464 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
54465 +
54466 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
54467 + if ((subj->inode == ino) && (subj->device == dev)) {
54468 + subj->inode = ino;
54469 + subj->device = dev;
54470 + }
54471 + FOR_EACH_NESTED_SUBJECT_END(subj)
54472 + FOR_EACH_SUBJECT_START(role, subj, x)
54473 + update_acl_obj_label(matchn->inode, matchn->device,
54474 + ino, dev, subj);
54475 + FOR_EACH_SUBJECT_END(subj,x)
54476 + FOR_EACH_ROLE_END(role)
54477 +
54478 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
54479 +
54480 + return;
54481 +}
54482 +
54483 +static void
54484 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
54485 + const struct vfsmount *mnt)
54486 +{
54487 + ino_t ino = dentry->d_inode->i_ino;
54488 + dev_t dev = __get_dev(dentry);
54489 +
54490 + __do_handle_create(matchn, ino, dev);
54491 +
54492 + return;
54493 +}
54494 +
54495 +void
54496 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54497 +{
54498 + struct name_entry *matchn;
54499 +
54500 + if (unlikely(!(gr_status & GR_READY)))
54501 + return;
54502 +
54503 + preempt_disable();
54504 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
54505 +
54506 + if (unlikely((unsigned long)matchn)) {
54507 + write_lock(&gr_inode_lock);
54508 + do_handle_create(matchn, dentry, mnt);
54509 + write_unlock(&gr_inode_lock);
54510 + }
54511 + preempt_enable();
54512 +
54513 + return;
54514 +}
54515 +
54516 +void
54517 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54518 +{
54519 + struct name_entry *matchn;
54520 +
54521 + if (unlikely(!(gr_status & GR_READY)))
54522 + return;
54523 +
54524 + preempt_disable();
54525 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
54526 +
54527 + if (unlikely((unsigned long)matchn)) {
54528 + write_lock(&gr_inode_lock);
54529 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
54530 + write_unlock(&gr_inode_lock);
54531 + }
54532 + preempt_enable();
54533 +
54534 + return;
54535 +}
54536 +
54537 +void
54538 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54539 + struct dentry *old_dentry,
54540 + struct dentry *new_dentry,
54541 + struct vfsmount *mnt, const __u8 replace)
54542 +{
54543 + struct name_entry *matchn;
54544 + struct inodev_entry *inodev;
54545 + struct inode *inode = new_dentry->d_inode;
54546 + ino_t old_ino = old_dentry->d_inode->i_ino;
54547 + dev_t old_dev = __get_dev(old_dentry);
54548 +
54549 + /* vfs_rename swaps the name and parent link for old_dentry and
54550 + new_dentry
54551 + at this point, old_dentry has the new name, parent link, and inode
54552 + for the renamed file
54553 + if a file is being replaced by a rename, new_dentry has the inode
54554 + and name for the replaced file
54555 + */
54556 +
54557 + if (unlikely(!(gr_status & GR_READY)))
54558 + return;
54559 +
54560 + preempt_disable();
54561 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
54562 +
54563 + /* we wouldn't have to check d_inode if it weren't for
54564 + NFS silly-renaming
54565 + */
54566 +
54567 + write_lock(&gr_inode_lock);
54568 + if (unlikely(replace && inode)) {
54569 + ino_t new_ino = inode->i_ino;
54570 + dev_t new_dev = __get_dev(new_dentry);
54571 +
54572 + inodev = lookup_inodev_entry(new_ino, new_dev);
54573 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
54574 + do_handle_delete(inodev, new_ino, new_dev);
54575 + }
54576 +
54577 + inodev = lookup_inodev_entry(old_ino, old_dev);
54578 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
54579 + do_handle_delete(inodev, old_ino, old_dev);
54580 +
54581 + if (unlikely((unsigned long)matchn))
54582 + do_handle_create(matchn, old_dentry, mnt);
54583 +
54584 + write_unlock(&gr_inode_lock);
54585 + preempt_enable();
54586 +
54587 + return;
54588 +}
54589 +
54590 +static int
54591 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
54592 + unsigned char **sum)
54593 +{
54594 + struct acl_role_label *r;
54595 + struct role_allowed_ip *ipp;
54596 + struct role_transition *trans;
54597 + unsigned int i;
54598 + int found = 0;
54599 + u32 curr_ip = current->signal->curr_ip;
54600 +
54601 + current->signal->saved_ip = curr_ip;
54602 +
54603 + /* check transition table */
54604 +
54605 + for (trans = current->role->transitions; trans; trans = trans->next) {
54606 + if (!strcmp(rolename, trans->rolename)) {
54607 + found = 1;
54608 + break;
54609 + }
54610 + }
54611 +
54612 + if (!found)
54613 + return 0;
54614 +
54615 + /* handle special roles that do not require authentication
54616 + and check ip */
54617 +
54618 + FOR_EACH_ROLE_START(r)
54619 + if (!strcmp(rolename, r->rolename) &&
54620 + (r->roletype & GR_ROLE_SPECIAL)) {
54621 + found = 0;
54622 + if (r->allowed_ips != NULL) {
54623 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
54624 + if ((ntohl(curr_ip) & ipp->netmask) ==
54625 + (ntohl(ipp->addr) & ipp->netmask))
54626 + found = 1;
54627 + }
54628 + } else
54629 + found = 2;
54630 + if (!found)
54631 + return 0;
54632 +
54633 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
54634 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
54635 + *salt = NULL;
54636 + *sum = NULL;
54637 + return 1;
54638 + }
54639 + }
54640 + FOR_EACH_ROLE_END(r)
54641 +
54642 + for (i = 0; i < num_sprole_pws; i++) {
54643 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
54644 + *salt = acl_special_roles[i]->salt;
54645 + *sum = acl_special_roles[i]->sum;
54646 + return 1;
54647 + }
54648 + }
54649 +
54650 + return 0;
54651 +}
54652 +
54653 +static void
54654 +assign_special_role(char *rolename)
54655 +{
54656 + struct acl_object_label *obj;
54657 + struct acl_role_label *r;
54658 + struct acl_role_label *assigned = NULL;
54659 + struct task_struct *tsk;
54660 + struct file *filp;
54661 +
54662 + FOR_EACH_ROLE_START(r)
54663 + if (!strcmp(rolename, r->rolename) &&
54664 + (r->roletype & GR_ROLE_SPECIAL)) {
54665 + assigned = r;
54666 + break;
54667 + }
54668 + FOR_EACH_ROLE_END(r)
54669 +
54670 + if (!assigned)
54671 + return;
54672 +
54673 + read_lock(&tasklist_lock);
54674 + read_lock(&grsec_exec_file_lock);
54675 +
54676 + tsk = current->real_parent;
54677 + if (tsk == NULL)
54678 + goto out_unlock;
54679 +
54680 + filp = tsk->exec_file;
54681 + if (filp == NULL)
54682 + goto out_unlock;
54683 +
54684 + tsk->is_writable = 0;
54685 +
54686 + tsk->acl_sp_role = 1;
54687 + tsk->acl_role_id = ++acl_sp_role_value;
54688 + tsk->role = assigned;
54689 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
54690 +
54691 + /* ignore additional mmap checks for processes that are writable
54692 + by the default ACL */
54693 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54694 + if (unlikely(obj->mode & GR_WRITE))
54695 + tsk->is_writable = 1;
54696 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
54697 + if (unlikely(obj->mode & GR_WRITE))
54698 + tsk->is_writable = 1;
54699 +
54700 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54701 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
54702 +#endif
54703 +
54704 +out_unlock:
54705 + read_unlock(&grsec_exec_file_lock);
54706 + read_unlock(&tasklist_lock);
54707 + return;
54708 +}
54709 +
54710 +int gr_check_secure_terminal(struct task_struct *task)
54711 +{
54712 + struct task_struct *p, *p2, *p3;
54713 + struct files_struct *files;
54714 + struct fdtable *fdt;
54715 + struct file *our_file = NULL, *file;
54716 + int i;
54717 +
54718 + if (task->signal->tty == NULL)
54719 + return 1;
54720 +
54721 + files = get_files_struct(task);
54722 + if (files != NULL) {
54723 + rcu_read_lock();
54724 + fdt = files_fdtable(files);
54725 + for (i=0; i < fdt->max_fds; i++) {
54726 + file = fcheck_files(files, i);
54727 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
54728 + get_file(file);
54729 + our_file = file;
54730 + }
54731 + }
54732 + rcu_read_unlock();
54733 + put_files_struct(files);
54734 + }
54735 +
54736 + if (our_file == NULL)
54737 + return 1;
54738 +
54739 + read_lock(&tasklist_lock);
54740 + do_each_thread(p2, p) {
54741 + files = get_files_struct(p);
54742 + if (files == NULL ||
54743 + (p->signal && p->signal->tty == task->signal->tty)) {
54744 + if (files != NULL)
54745 + put_files_struct(files);
54746 + continue;
54747 + }
54748 + rcu_read_lock();
54749 + fdt = files_fdtable(files);
54750 + for (i=0; i < fdt->max_fds; i++) {
54751 + file = fcheck_files(files, i);
54752 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
54753 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
54754 + p3 = task;
54755 + while (p3->pid > 0) {
54756 + if (p3 == p)
54757 + break;
54758 + p3 = p3->real_parent;
54759 + }
54760 + if (p3 == p)
54761 + break;
54762 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
54763 + gr_handle_alertkill(p);
54764 + rcu_read_unlock();
54765 + put_files_struct(files);
54766 + read_unlock(&tasklist_lock);
54767 + fput(our_file);
54768 + return 0;
54769 + }
54770 + }
54771 + rcu_read_unlock();
54772 + put_files_struct(files);
54773 + } while_each_thread(p2, p);
54774 + read_unlock(&tasklist_lock);
54775 +
54776 + fput(our_file);
54777 + return 1;
54778 +}
54779 +
54780 +static int gr_rbac_disable(void *unused)
54781 +{
54782 + pax_open_kernel();
54783 + gr_status &= ~GR_READY;
54784 + pax_close_kernel();
54785 +
54786 + return 0;
54787 +}
54788 +
54789 +ssize_t
54790 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
54791 +{
54792 + struct gr_arg_wrapper uwrap;
54793 + unsigned char *sprole_salt = NULL;
54794 + unsigned char *sprole_sum = NULL;
54795 + int error = sizeof (struct gr_arg_wrapper);
54796 + int error2 = 0;
54797 +
54798 + mutex_lock(&gr_dev_mutex);
54799 +
54800 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
54801 + error = -EPERM;
54802 + goto out;
54803 + }
54804 +
54805 + if (count != sizeof (struct gr_arg_wrapper)) {
54806 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
54807 + error = -EINVAL;
54808 + goto out;
54809 + }
54810 +
54811 +
54812 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
54813 + gr_auth_expires = 0;
54814 + gr_auth_attempts = 0;
54815 + }
54816 +
54817 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
54818 + error = -EFAULT;
54819 + goto out;
54820 + }
54821 +
54822 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
54823 + error = -EINVAL;
54824 + goto out;
54825 + }
54826 +
54827 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
54828 + error = -EFAULT;
54829 + goto out;
54830 + }
54831 +
54832 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54833 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54834 + time_after(gr_auth_expires, get_seconds())) {
54835 + error = -EBUSY;
54836 + goto out;
54837 + }
54838 +
54839 + /* if non-root trying to do anything other than use a special role,
54840 + do not attempt authentication, do not count towards authentication
54841 + locking
54842 + */
54843 +
54844 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
54845 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54846 + !uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
54847 + error = -EPERM;
54848 + goto out;
54849 + }
54850 +
54851 + /* ensure pw and special role name are null terminated */
54852 +
54853 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
54854 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
54855 +
54856 + /* Okay.
54857 + * We have our enough of the argument structure..(we have yet
54858 + * to copy_from_user the tables themselves) . Copy the tables
54859 + * only if we need them, i.e. for loading operations. */
54860 +
54861 + switch (gr_usermode->mode) {
54862 + case GR_STATUS:
54863 + if (gr_status & GR_READY) {
54864 + error = 1;
54865 + if (!gr_check_secure_terminal(current))
54866 + error = 3;
54867 + } else
54868 + error = 2;
54869 + goto out;
54870 + case GR_SHUTDOWN:
54871 + if ((gr_status & GR_READY)
54872 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54873 + stop_machine(gr_rbac_disable, NULL, NULL);
54874 + free_variables();
54875 + memset(gr_usermode, 0, sizeof (struct gr_arg));
54876 + memset(gr_system_salt, 0, GR_SALT_LEN);
54877 + memset(gr_system_sum, 0, GR_SHA_LEN);
54878 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
54879 + } else if (gr_status & GR_READY) {
54880 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
54881 + error = -EPERM;
54882 + } else {
54883 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
54884 + error = -EAGAIN;
54885 + }
54886 + break;
54887 + case GR_ENABLE:
54888 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
54889 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
54890 + else {
54891 + if (gr_status & GR_READY)
54892 + error = -EAGAIN;
54893 + else
54894 + error = error2;
54895 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
54896 + }
54897 + break;
54898 + case GR_RELOAD:
54899 + if (!(gr_status & GR_READY)) {
54900 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
54901 + error = -EAGAIN;
54902 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54903 + stop_machine(gr_rbac_disable, NULL, NULL);
54904 + free_variables();
54905 + error2 = gracl_init(gr_usermode);
54906 + if (!error2)
54907 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
54908 + else {
54909 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54910 + error = error2;
54911 + }
54912 + } else {
54913 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54914 + error = -EPERM;
54915 + }
54916 + break;
54917 + case GR_SEGVMOD:
54918 + if (unlikely(!(gr_status & GR_READY))) {
54919 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
54920 + error = -EAGAIN;
54921 + break;
54922 + }
54923 +
54924 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54925 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
54926 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
54927 + struct acl_subject_label *segvacl;
54928 + segvacl =
54929 + lookup_acl_subj_label(gr_usermode->segv_inode,
54930 + gr_usermode->segv_device,
54931 + current->role);
54932 + if (segvacl) {
54933 + segvacl->crashes = 0;
54934 + segvacl->expires = 0;
54935 + }
54936 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
54937 + gr_remove_uid(gr_usermode->segv_uid);
54938 + }
54939 + } else {
54940 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
54941 + error = -EPERM;
54942 + }
54943 + break;
54944 + case GR_SPROLE:
54945 + case GR_SPROLEPAM:
54946 + if (unlikely(!(gr_status & GR_READY))) {
54947 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
54948 + error = -EAGAIN;
54949 + break;
54950 + }
54951 +
54952 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
54953 + current->role->expires = 0;
54954 + current->role->auth_attempts = 0;
54955 + }
54956 +
54957 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54958 + time_after(current->role->expires, get_seconds())) {
54959 + error = -EBUSY;
54960 + goto out;
54961 + }
54962 +
54963 + if (lookup_special_role_auth
54964 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
54965 + && ((!sprole_salt && !sprole_sum)
54966 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
54967 + char *p = "";
54968 + assign_special_role(gr_usermode->sp_role);
54969 + read_lock(&tasklist_lock);
54970 + if (current->real_parent)
54971 + p = current->real_parent->role->rolename;
54972 + read_unlock(&tasklist_lock);
54973 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
54974 + p, acl_sp_role_value);
54975 + } else {
54976 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
54977 + error = -EPERM;
54978 + if(!(current->role->auth_attempts++))
54979 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54980 +
54981 + goto out;
54982 + }
54983 + break;
54984 + case GR_UNSPROLE:
54985 + if (unlikely(!(gr_status & GR_READY))) {
54986 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
54987 + error = -EAGAIN;
54988 + break;
54989 + }
54990 +
54991 + if (current->role->roletype & GR_ROLE_SPECIAL) {
54992 + char *p = "";
54993 + int i = 0;
54994 +
54995 + read_lock(&tasklist_lock);
54996 + if (current->real_parent) {
54997 + p = current->real_parent->role->rolename;
54998 + i = current->real_parent->acl_role_id;
54999 + }
55000 + read_unlock(&tasklist_lock);
55001 +
55002 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
55003 + gr_set_acls(1);
55004 + } else {
55005 + error = -EPERM;
55006 + goto out;
55007 + }
55008 + break;
55009 + default:
55010 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
55011 + error = -EINVAL;
55012 + break;
55013 + }
55014 +
55015 + if (error != -EPERM)
55016 + goto out;
55017 +
55018 + if(!(gr_auth_attempts++))
55019 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
55020 +
55021 + out:
55022 + mutex_unlock(&gr_dev_mutex);
55023 + return error;
55024 +}
55025 +
55026 +/* must be called with
55027 + rcu_read_lock();
55028 + read_lock(&tasklist_lock);
55029 + read_lock(&grsec_exec_file_lock);
55030 +*/
55031 +int gr_apply_subject_to_task(struct task_struct *task)
55032 +{
55033 + struct acl_object_label *obj;
55034 + char *tmpname;
55035 + struct acl_subject_label *tmpsubj;
55036 + struct file *filp;
55037 + struct name_entry *nmatch;
55038 +
55039 + filp = task->exec_file;
55040 + if (filp == NULL)
55041 + return 0;
55042 +
55043 + /* the following is to apply the correct subject
55044 + on binaries running when the RBAC system
55045 + is enabled, when the binaries have been
55046 + replaced or deleted since their execution
55047 + -----
55048 + when the RBAC system starts, the inode/dev
55049 + from exec_file will be one the RBAC system
55050 + is unaware of. It only knows the inode/dev
55051 + of the present file on disk, or the absence
55052 + of it.
55053 + */
55054 + preempt_disable();
55055 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
55056 +
55057 + nmatch = lookup_name_entry(tmpname);
55058 + preempt_enable();
55059 + tmpsubj = NULL;
55060 + if (nmatch) {
55061 + if (nmatch->deleted)
55062 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
55063 + else
55064 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
55065 + if (tmpsubj != NULL)
55066 + task->acl = tmpsubj;
55067 + }
55068 + if (tmpsubj == NULL)
55069 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
55070 + task->role);
55071 + if (task->acl) {
55072 + task->is_writable = 0;
55073 + /* ignore additional mmap checks for processes that are writable
55074 + by the default ACL */
55075 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55076 + if (unlikely(obj->mode & GR_WRITE))
55077 + task->is_writable = 1;
55078 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
55079 + if (unlikely(obj->mode & GR_WRITE))
55080 + task->is_writable = 1;
55081 +
55082 + gr_set_proc_res(task);
55083 +
55084 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55085 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
55086 +#endif
55087 + } else {
55088 + return 1;
55089 + }
55090 +
55091 + return 0;
55092 +}
55093 +
55094 +int
55095 +gr_set_acls(const int type)
55096 +{
55097 + struct task_struct *task, *task2;
55098 + struct acl_role_label *role = current->role;
55099 + __u16 acl_role_id = current->acl_role_id;
55100 + const struct cred *cred;
55101 + int ret;
55102 +
55103 + rcu_read_lock();
55104 + read_lock(&tasklist_lock);
55105 + read_lock(&grsec_exec_file_lock);
55106 + do_each_thread(task2, task) {
55107 + /* check to see if we're called from the exit handler,
55108 + if so, only replace ACLs that have inherited the admin
55109 + ACL */
55110 +
55111 + if (type && (task->role != role ||
55112 + task->acl_role_id != acl_role_id))
55113 + continue;
55114 +
55115 + task->acl_role_id = 0;
55116 + task->acl_sp_role = 0;
55117 +
55118 + if (task->exec_file) {
55119 + cred = __task_cred(task);
55120 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
55121 + ret = gr_apply_subject_to_task(task);
55122 + if (ret) {
55123 + read_unlock(&grsec_exec_file_lock);
55124 + read_unlock(&tasklist_lock);
55125 + rcu_read_unlock();
55126 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
55127 + return ret;
55128 + }
55129 + } else {
55130 + // it's a kernel process
55131 + task->role = kernel_role;
55132 + task->acl = kernel_role->root_label;
55133 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
55134 + task->acl->mode &= ~GR_PROCFIND;
55135 +#endif
55136 + }
55137 + } while_each_thread(task2, task);
55138 + read_unlock(&grsec_exec_file_lock);
55139 + read_unlock(&tasklist_lock);
55140 + rcu_read_unlock();
55141 +
55142 + return 0;
55143 +}
55144 +
55145 +void
55146 +gr_learn_resource(const struct task_struct *task,
55147 + const int res, const unsigned long wanted, const int gt)
55148 +{
55149 + struct acl_subject_label *acl;
55150 + const struct cred *cred;
55151 +
55152 + if (unlikely((gr_status & GR_READY) &&
55153 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
55154 + goto skip_reslog;
55155 +
55156 +#ifdef CONFIG_GRKERNSEC_RESLOG
55157 + gr_log_resource(task, res, wanted, gt);
55158 +#endif
55159 + skip_reslog:
55160 +
55161 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
55162 + return;
55163 +
55164 + acl = task->acl;
55165 +
55166 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
55167 + !(acl->resmask & (1 << (unsigned short) res))))
55168 + return;
55169 +
55170 + if (wanted >= acl->res[res].rlim_cur) {
55171 + unsigned long res_add;
55172 +
55173 + res_add = wanted;
55174 + switch (res) {
55175 + case RLIMIT_CPU:
55176 + res_add += GR_RLIM_CPU_BUMP;
55177 + break;
55178 + case RLIMIT_FSIZE:
55179 + res_add += GR_RLIM_FSIZE_BUMP;
55180 + break;
55181 + case RLIMIT_DATA:
55182 + res_add += GR_RLIM_DATA_BUMP;
55183 + break;
55184 + case RLIMIT_STACK:
55185 + res_add += GR_RLIM_STACK_BUMP;
55186 + break;
55187 + case RLIMIT_CORE:
55188 + res_add += GR_RLIM_CORE_BUMP;
55189 + break;
55190 + case RLIMIT_RSS:
55191 + res_add += GR_RLIM_RSS_BUMP;
55192 + break;
55193 + case RLIMIT_NPROC:
55194 + res_add += GR_RLIM_NPROC_BUMP;
55195 + break;
55196 + case RLIMIT_NOFILE:
55197 + res_add += GR_RLIM_NOFILE_BUMP;
55198 + break;
55199 + case RLIMIT_MEMLOCK:
55200 + res_add += GR_RLIM_MEMLOCK_BUMP;
55201 + break;
55202 + case RLIMIT_AS:
55203 + res_add += GR_RLIM_AS_BUMP;
55204 + break;
55205 + case RLIMIT_LOCKS:
55206 + res_add += GR_RLIM_LOCKS_BUMP;
55207 + break;
55208 + case RLIMIT_SIGPENDING:
55209 + res_add += GR_RLIM_SIGPENDING_BUMP;
55210 + break;
55211 + case RLIMIT_MSGQUEUE:
55212 + res_add += GR_RLIM_MSGQUEUE_BUMP;
55213 + break;
55214 + case RLIMIT_NICE:
55215 + res_add += GR_RLIM_NICE_BUMP;
55216 + break;
55217 + case RLIMIT_RTPRIO:
55218 + res_add += GR_RLIM_RTPRIO_BUMP;
55219 + break;
55220 + case RLIMIT_RTTIME:
55221 + res_add += GR_RLIM_RTTIME_BUMP;
55222 + break;
55223 + }
55224 +
55225 + acl->res[res].rlim_cur = res_add;
55226 +
55227 + if (wanted > acl->res[res].rlim_max)
55228 + acl->res[res].rlim_max = res_add;
55229 +
55230 + /* only log the subject filename, since resource logging is supported for
55231 + single-subject learning only */
55232 + rcu_read_lock();
55233 + cred = __task_cred(task);
55234 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55235 + task->role->roletype, cred->uid, cred->gid, acl->filename,
55236 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
55237 + "", (unsigned long) res, &task->signal->saved_ip);
55238 + rcu_read_unlock();
55239 + }
55240 +
55241 + return;
55242 +}
55243 +
55244 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
55245 +void
55246 +pax_set_initial_flags(struct linux_binprm *bprm)
55247 +{
55248 + struct task_struct *task = current;
55249 + struct acl_subject_label *proc;
55250 + unsigned long flags;
55251 +
55252 + if (unlikely(!(gr_status & GR_READY)))
55253 + return;
55254 +
55255 + flags = pax_get_flags(task);
55256 +
55257 + proc = task->acl;
55258 +
55259 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
55260 + flags &= ~MF_PAX_PAGEEXEC;
55261 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
55262 + flags &= ~MF_PAX_SEGMEXEC;
55263 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
55264 + flags &= ~MF_PAX_RANDMMAP;
55265 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
55266 + flags &= ~MF_PAX_EMUTRAMP;
55267 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
55268 + flags &= ~MF_PAX_MPROTECT;
55269 +
55270 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
55271 + flags |= MF_PAX_PAGEEXEC;
55272 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
55273 + flags |= MF_PAX_SEGMEXEC;
55274 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
55275 + flags |= MF_PAX_RANDMMAP;
55276 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
55277 + flags |= MF_PAX_EMUTRAMP;
55278 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
55279 + flags |= MF_PAX_MPROTECT;
55280 +
55281 + pax_set_flags(task, flags);
55282 +
55283 + return;
55284 +}
55285 +#endif
55286 +
55287 +int
55288 +gr_handle_proc_ptrace(struct task_struct *task)
55289 +{
55290 + struct file *filp;
55291 + struct task_struct *tmp = task;
55292 + struct task_struct *curtemp = current;
55293 + __u32 retmode;
55294 +
55295 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55296 + if (unlikely(!(gr_status & GR_READY)))
55297 + return 0;
55298 +#endif
55299 +
55300 + read_lock(&tasklist_lock);
55301 + read_lock(&grsec_exec_file_lock);
55302 + filp = task->exec_file;
55303 +
55304 + while (tmp->pid > 0) {
55305 + if (tmp == curtemp)
55306 + break;
55307 + tmp = tmp->real_parent;
55308 + }
55309 +
55310 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
55311 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
55312 + read_unlock(&grsec_exec_file_lock);
55313 + read_unlock(&tasklist_lock);
55314 + return 1;
55315 + }
55316 +
55317 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55318 + if (!(gr_status & GR_READY)) {
55319 + read_unlock(&grsec_exec_file_lock);
55320 + read_unlock(&tasklist_lock);
55321 + return 0;
55322 + }
55323 +#endif
55324 +
55325 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
55326 + read_unlock(&grsec_exec_file_lock);
55327 + read_unlock(&tasklist_lock);
55328 +
55329 + if (retmode & GR_NOPTRACE)
55330 + return 1;
55331 +
55332 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
55333 + && (current->acl != task->acl || (current->acl != current->role->root_label
55334 + && current->pid != task->pid)))
55335 + return 1;
55336 +
55337 + return 0;
55338 +}
55339 +
55340 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
55341 +{
55342 + if (unlikely(!(gr_status & GR_READY)))
55343 + return;
55344 +
55345 + if (!(current->role->roletype & GR_ROLE_GOD))
55346 + return;
55347 +
55348 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
55349 + p->role->rolename, gr_task_roletype_to_char(p),
55350 + p->acl->filename);
55351 +}
55352 +
55353 +int
55354 +gr_handle_ptrace(struct task_struct *task, const long request)
55355 +{
55356 + struct task_struct *tmp = task;
55357 + struct task_struct *curtemp = current;
55358 + __u32 retmode;
55359 +
55360 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55361 + if (unlikely(!(gr_status & GR_READY)))
55362 + return 0;
55363 +#endif
55364 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
55365 + read_lock(&tasklist_lock);
55366 + while (tmp->pid > 0) {
55367 + if (tmp == curtemp)
55368 + break;
55369 + tmp = tmp->real_parent;
55370 + }
55371 +
55372 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
55373 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
55374 + read_unlock(&tasklist_lock);
55375 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55376 + return 1;
55377 + }
55378 + read_unlock(&tasklist_lock);
55379 + }
55380 +
55381 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55382 + if (!(gr_status & GR_READY))
55383 + return 0;
55384 +#endif
55385 +
55386 + read_lock(&grsec_exec_file_lock);
55387 + if (unlikely(!task->exec_file)) {
55388 + read_unlock(&grsec_exec_file_lock);
55389 + return 0;
55390 + }
55391 +
55392 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
55393 + read_unlock(&grsec_exec_file_lock);
55394 +
55395 + if (retmode & GR_NOPTRACE) {
55396 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55397 + return 1;
55398 + }
55399 +
55400 + if (retmode & GR_PTRACERD) {
55401 + switch (request) {
55402 + case PTRACE_SEIZE:
55403 + case PTRACE_POKETEXT:
55404 + case PTRACE_POKEDATA:
55405 + case PTRACE_POKEUSR:
55406 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
55407 + case PTRACE_SETREGS:
55408 + case PTRACE_SETFPREGS:
55409 +#endif
55410 +#ifdef CONFIG_X86
55411 + case PTRACE_SETFPXREGS:
55412 +#endif
55413 +#ifdef CONFIG_ALTIVEC
55414 + case PTRACE_SETVRREGS:
55415 +#endif
55416 + return 1;
55417 + default:
55418 + return 0;
55419 + }
55420 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
55421 + !(current->role->roletype & GR_ROLE_GOD) &&
55422 + (current->acl != task->acl)) {
55423 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55424 + return 1;
55425 + }
55426 +
55427 + return 0;
55428 +}
55429 +
55430 +static int is_writable_mmap(const struct file *filp)
55431 +{
55432 + struct task_struct *task = current;
55433 + struct acl_object_label *obj, *obj2;
55434 +
55435 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
55436 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
55437 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55438 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
55439 + task->role->root_label);
55440 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
55441 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
55442 + return 1;
55443 + }
55444 + }
55445 + return 0;
55446 +}
55447 +
55448 +int
55449 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
55450 +{
55451 + __u32 mode;
55452 +
55453 + if (unlikely(!file || !(prot & PROT_EXEC)))
55454 + return 1;
55455 +
55456 + if (is_writable_mmap(file))
55457 + return 0;
55458 +
55459 + mode =
55460 + gr_search_file(file->f_path.dentry,
55461 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55462 + file->f_path.mnt);
55463 +
55464 + if (!gr_tpe_allow(file))
55465 + return 0;
55466 +
55467 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55468 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55469 + return 0;
55470 + } else if (unlikely(!(mode & GR_EXEC))) {
55471 + return 0;
55472 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55473 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55474 + return 1;
55475 + }
55476 +
55477 + return 1;
55478 +}
55479 +
55480 +int
55481 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55482 +{
55483 + __u32 mode;
55484 +
55485 + if (unlikely(!file || !(prot & PROT_EXEC)))
55486 + return 1;
55487 +
55488 + if (is_writable_mmap(file))
55489 + return 0;
55490 +
55491 + mode =
55492 + gr_search_file(file->f_path.dentry,
55493 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55494 + file->f_path.mnt);
55495 +
55496 + if (!gr_tpe_allow(file))
55497 + return 0;
55498 +
55499 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55500 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55501 + return 0;
55502 + } else if (unlikely(!(mode & GR_EXEC))) {
55503 + return 0;
55504 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55505 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55506 + return 1;
55507 + }
55508 +
55509 + return 1;
55510 +}
55511 +
55512 +void
55513 +gr_acl_handle_psacct(struct task_struct *task, const long code)
55514 +{
55515 + unsigned long runtime;
55516 + unsigned long cputime;
55517 + unsigned int wday, cday;
55518 + __u8 whr, chr;
55519 + __u8 wmin, cmin;
55520 + __u8 wsec, csec;
55521 + struct timespec timeval;
55522 +
55523 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
55524 + !(task->acl->mode & GR_PROCACCT)))
55525 + return;
55526 +
55527 + do_posix_clock_monotonic_gettime(&timeval);
55528 + runtime = timeval.tv_sec - task->start_time.tv_sec;
55529 + wday = runtime / (3600 * 24);
55530 + runtime -= wday * (3600 * 24);
55531 + whr = runtime / 3600;
55532 + runtime -= whr * 3600;
55533 + wmin = runtime / 60;
55534 + runtime -= wmin * 60;
55535 + wsec = runtime;
55536 +
55537 + cputime = (task->utime + task->stime) / HZ;
55538 + cday = cputime / (3600 * 24);
55539 + cputime -= cday * (3600 * 24);
55540 + chr = cputime / 3600;
55541 + cputime -= chr * 3600;
55542 + cmin = cputime / 60;
55543 + cputime -= cmin * 60;
55544 + csec = cputime;
55545 +
55546 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
55547 +
55548 + return;
55549 +}
55550 +
55551 +void gr_set_kernel_label(struct task_struct *task)
55552 +{
55553 + if (gr_status & GR_READY) {
55554 + task->role = kernel_role;
55555 + task->acl = kernel_role->root_label;
55556 + }
55557 + return;
55558 +}
55559 +
55560 +#ifdef CONFIG_TASKSTATS
55561 +int gr_is_taskstats_denied(int pid)
55562 +{
55563 + struct task_struct *task;
55564 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55565 + const struct cred *cred;
55566 +#endif
55567 + int ret = 0;
55568 +
55569 + /* restrict taskstats viewing to un-chrooted root users
55570 + who have the 'view' subject flag if the RBAC system is enabled
55571 + */
55572 +
55573 + rcu_read_lock();
55574 + read_lock(&tasklist_lock);
55575 + task = find_task_by_vpid(pid);
55576 + if (task) {
55577 +#ifdef CONFIG_GRKERNSEC_CHROOT
55578 + if (proc_is_chrooted(task))
55579 + ret = -EACCES;
55580 +#endif
55581 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55582 + cred = __task_cred(task);
55583 +#ifdef CONFIG_GRKERNSEC_PROC_USER
55584 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
55585 + ret = -EACCES;
55586 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55587 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
55588 + ret = -EACCES;
55589 +#endif
55590 +#endif
55591 + if (gr_status & GR_READY) {
55592 + if (!(task->acl->mode & GR_VIEW))
55593 + ret = -EACCES;
55594 + }
55595 + } else
55596 + ret = -ENOENT;
55597 +
55598 + read_unlock(&tasklist_lock);
55599 + rcu_read_unlock();
55600 +
55601 + return ret;
55602 +}
55603 +#endif
55604 +
55605 +/* AUXV entries are filled via a descendant of search_binary_handler
55606 + after we've already applied the subject for the target
55607 +*/
55608 +int gr_acl_enable_at_secure(void)
55609 +{
55610 + if (unlikely(!(gr_status & GR_READY)))
55611 + return 0;
55612 +
55613 + if (current->acl->mode & GR_ATSECURE)
55614 + return 1;
55615 +
55616 + return 0;
55617 +}
55618 +
55619 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
55620 +{
55621 + struct task_struct *task = current;
55622 + struct dentry *dentry = file->f_path.dentry;
55623 + struct vfsmount *mnt = file->f_path.mnt;
55624 + struct acl_object_label *obj, *tmp;
55625 + struct acl_subject_label *subj;
55626 + unsigned int bufsize;
55627 + int is_not_root;
55628 + char *path;
55629 + dev_t dev = __get_dev(dentry);
55630 +
55631 + if (unlikely(!(gr_status & GR_READY)))
55632 + return 1;
55633 +
55634 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55635 + return 1;
55636 +
55637 + /* ignore Eric Biederman */
55638 + if (IS_PRIVATE(dentry->d_inode))
55639 + return 1;
55640 +
55641 + subj = task->acl;
55642 + read_lock(&gr_inode_lock);
55643 + do {
55644 + obj = lookup_acl_obj_label(ino, dev, subj);
55645 + if (obj != NULL) {
55646 + read_unlock(&gr_inode_lock);
55647 + return (obj->mode & GR_FIND) ? 1 : 0;
55648 + }
55649 + } while ((subj = subj->parent_subject));
55650 + read_unlock(&gr_inode_lock);
55651 +
55652 + /* this is purely an optimization since we're looking for an object
55653 + for the directory we're doing a readdir on
55654 + if it's possible for any globbed object to match the entry we're
55655 + filling into the directory, then the object we find here will be
55656 + an anchor point with attached globbed objects
55657 + */
55658 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
55659 + if (obj->globbed == NULL)
55660 + return (obj->mode & GR_FIND) ? 1 : 0;
55661 +
55662 + is_not_root = ((obj->filename[0] == '/') &&
55663 + (obj->filename[1] == '\0')) ? 0 : 1;
55664 + bufsize = PAGE_SIZE - namelen - is_not_root;
55665 +
55666 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
55667 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
55668 + return 1;
55669 +
55670 + preempt_disable();
55671 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55672 + bufsize);
55673 +
55674 + bufsize = strlen(path);
55675 +
55676 + /* if base is "/", don't append an additional slash */
55677 + if (is_not_root)
55678 + *(path + bufsize) = '/';
55679 + memcpy(path + bufsize + is_not_root, name, namelen);
55680 + *(path + bufsize + namelen + is_not_root) = '\0';
55681 +
55682 + tmp = obj->globbed;
55683 + while (tmp) {
55684 + if (!glob_match(tmp->filename, path)) {
55685 + preempt_enable();
55686 + return (tmp->mode & GR_FIND) ? 1 : 0;
55687 + }
55688 + tmp = tmp->next;
55689 + }
55690 + preempt_enable();
55691 + return (obj->mode & GR_FIND) ? 1 : 0;
55692 +}
55693 +
55694 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
55695 +EXPORT_SYMBOL(gr_acl_is_enabled);
55696 +#endif
55697 +EXPORT_SYMBOL(gr_learn_resource);
55698 +EXPORT_SYMBOL(gr_set_kernel_label);
55699 +#ifdef CONFIG_SECURITY
55700 +EXPORT_SYMBOL(gr_check_user_change);
55701 +EXPORT_SYMBOL(gr_check_group_change);
55702 +#endif
55703 +
55704 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
55705 new file mode 100644
55706 index 0000000..34fefda
55707 --- /dev/null
55708 +++ b/grsecurity/gracl_alloc.c
55709 @@ -0,0 +1,105 @@
55710 +#include <linux/kernel.h>
55711 +#include <linux/mm.h>
55712 +#include <linux/slab.h>
55713 +#include <linux/vmalloc.h>
55714 +#include <linux/gracl.h>
55715 +#include <linux/grsecurity.h>
55716 +
55717 +static unsigned long alloc_stack_next = 1;
55718 +static unsigned long alloc_stack_size = 1;
55719 +static void **alloc_stack;
55720 +
55721 +static __inline__ int
55722 +alloc_pop(void)
55723 +{
55724 + if (alloc_stack_next == 1)
55725 + return 0;
55726 +
55727 + kfree(alloc_stack[alloc_stack_next - 2]);
55728 +
55729 + alloc_stack_next--;
55730 +
55731 + return 1;
55732 +}
55733 +
55734 +static __inline__ int
55735 +alloc_push(void *buf)
55736 +{
55737 + if (alloc_stack_next >= alloc_stack_size)
55738 + return 1;
55739 +
55740 + alloc_stack[alloc_stack_next - 1] = buf;
55741 +
55742 + alloc_stack_next++;
55743 +
55744 + return 0;
55745 +}
55746 +
55747 +void *
55748 +acl_alloc(unsigned long len)
55749 +{
55750 + void *ret = NULL;
55751 +
55752 + if (!len || len > PAGE_SIZE)
55753 + goto out;
55754 +
55755 + ret = kmalloc(len, GFP_KERNEL);
55756 +
55757 + if (ret) {
55758 + if (alloc_push(ret)) {
55759 + kfree(ret);
55760 + ret = NULL;
55761 + }
55762 + }
55763 +
55764 +out:
55765 + return ret;
55766 +}
55767 +
55768 +void *
55769 +acl_alloc_num(unsigned long num, unsigned long len)
55770 +{
55771 + if (!len || (num > (PAGE_SIZE / len)))
55772 + return NULL;
55773 +
55774 + return acl_alloc(num * len);
55775 +}
55776 +
55777 +void
55778 +acl_free_all(void)
55779 +{
55780 + if (gr_acl_is_enabled() || !alloc_stack)
55781 + return;
55782 +
55783 + while (alloc_pop()) ;
55784 +
55785 + if (alloc_stack) {
55786 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
55787 + kfree(alloc_stack);
55788 + else
55789 + vfree(alloc_stack);
55790 + }
55791 +
55792 + alloc_stack = NULL;
55793 + alloc_stack_size = 1;
55794 + alloc_stack_next = 1;
55795 +
55796 + return;
55797 +}
55798 +
55799 +int
55800 +acl_alloc_stack_init(unsigned long size)
55801 +{
55802 + if ((size * sizeof (void *)) <= PAGE_SIZE)
55803 + alloc_stack =
55804 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
55805 + else
55806 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
55807 +
55808 + alloc_stack_size = size;
55809 +
55810 + if (!alloc_stack)
55811 + return 0;
55812 + else
55813 + return 1;
55814 +}
55815 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
55816 new file mode 100644
55817 index 0000000..6d21049
55818 --- /dev/null
55819 +++ b/grsecurity/gracl_cap.c
55820 @@ -0,0 +1,110 @@
55821 +#include <linux/kernel.h>
55822 +#include <linux/module.h>
55823 +#include <linux/sched.h>
55824 +#include <linux/gracl.h>
55825 +#include <linux/grsecurity.h>
55826 +#include <linux/grinternal.h>
55827 +
55828 +extern const char *captab_log[];
55829 +extern int captab_log_entries;
55830 +
55831 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
55832 +{
55833 + struct acl_subject_label *curracl;
55834 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55835 + kernel_cap_t cap_audit = __cap_empty_set;
55836 +
55837 + if (!gr_acl_is_enabled())
55838 + return 1;
55839 +
55840 + curracl = task->acl;
55841 +
55842 + cap_drop = curracl->cap_lower;
55843 + cap_mask = curracl->cap_mask;
55844 + cap_audit = curracl->cap_invert_audit;
55845 +
55846 + while ((curracl = curracl->parent_subject)) {
55847 + /* if the cap isn't specified in the current computed mask but is specified in the
55848 + current level subject, and is lowered in the current level subject, then add
55849 + it to the set of dropped capabilities
55850 + otherwise, add the current level subject's mask to the current computed mask
55851 + */
55852 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55853 + cap_raise(cap_mask, cap);
55854 + if (cap_raised(curracl->cap_lower, cap))
55855 + cap_raise(cap_drop, cap);
55856 + if (cap_raised(curracl->cap_invert_audit, cap))
55857 + cap_raise(cap_audit, cap);
55858 + }
55859 + }
55860 +
55861 + if (!cap_raised(cap_drop, cap)) {
55862 + if (cap_raised(cap_audit, cap))
55863 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
55864 + return 1;
55865 + }
55866 +
55867 + curracl = task->acl;
55868 +
55869 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55870 + && cap_raised(cred->cap_effective, cap)) {
55871 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55872 + task->role->roletype, cred->uid,
55873 + cred->gid, task->exec_file ?
55874 + gr_to_filename(task->exec_file->f_path.dentry,
55875 + task->exec_file->f_path.mnt) : curracl->filename,
55876 + curracl->filename, 0UL,
55877 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
55878 + return 1;
55879 + }
55880 +
55881 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
55882 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55883 +
55884 + return 0;
55885 +}
55886 +
55887 +int
55888 +gr_acl_is_capable(const int cap)
55889 +{
55890 + return gr_task_acl_is_capable(current, current_cred(), cap);
55891 +}
55892 +
55893 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
55894 +{
55895 + struct acl_subject_label *curracl;
55896 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55897 +
55898 + if (!gr_acl_is_enabled())
55899 + return 1;
55900 +
55901 + curracl = task->acl;
55902 +
55903 + cap_drop = curracl->cap_lower;
55904 + cap_mask = curracl->cap_mask;
55905 +
55906 + while ((curracl = curracl->parent_subject)) {
55907 + /* if the cap isn't specified in the current computed mask but is specified in the
55908 + current level subject, and is lowered in the current level subject, then add
55909 + it to the set of dropped capabilities
55910 + otherwise, add the current level subject's mask to the current computed mask
55911 + */
55912 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55913 + cap_raise(cap_mask, cap);
55914 + if (cap_raised(curracl->cap_lower, cap))
55915 + cap_raise(cap_drop, cap);
55916 + }
55917 + }
55918 +
55919 + if (!cap_raised(cap_drop, cap))
55920 + return 1;
55921 +
55922 + return 0;
55923 +}
55924 +
55925 +int
55926 +gr_acl_is_capable_nolog(const int cap)
55927 +{
55928 + return gr_task_acl_is_capable_nolog(current, cap);
55929 +}
55930 +
55931 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55932 new file mode 100644
55933 index 0000000..d28e241
55934 --- /dev/null
55935 +++ b/grsecurity/gracl_fs.c
55936 @@ -0,0 +1,437 @@
55937 +#include <linux/kernel.h>
55938 +#include <linux/sched.h>
55939 +#include <linux/types.h>
55940 +#include <linux/fs.h>
55941 +#include <linux/file.h>
55942 +#include <linux/stat.h>
55943 +#include <linux/grsecurity.h>
55944 +#include <linux/grinternal.h>
55945 +#include <linux/gracl.h>
55946 +
55947 +umode_t
55948 +gr_acl_umask(void)
55949 +{
55950 + if (unlikely(!gr_acl_is_enabled()))
55951 + return 0;
55952 +
55953 + return current->role->umask;
55954 +}
55955 +
55956 +__u32
55957 +gr_acl_handle_hidden_file(const struct dentry * dentry,
55958 + const struct vfsmount * mnt)
55959 +{
55960 + __u32 mode;
55961 +
55962 + if (unlikely(!dentry->d_inode))
55963 + return GR_FIND;
55964 +
55965 + mode =
55966 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55967 +
55968 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55969 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55970 + return mode;
55971 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55972 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55973 + return 0;
55974 + } else if (unlikely(!(mode & GR_FIND)))
55975 + return 0;
55976 +
55977 + return GR_FIND;
55978 +}
55979 +
55980 +__u32
55981 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55982 + int acc_mode)
55983 +{
55984 + __u32 reqmode = GR_FIND;
55985 + __u32 mode;
55986 +
55987 + if (unlikely(!dentry->d_inode))
55988 + return reqmode;
55989 +
55990 + if (acc_mode & MAY_APPEND)
55991 + reqmode |= GR_APPEND;
55992 + else if (acc_mode & MAY_WRITE)
55993 + reqmode |= GR_WRITE;
55994 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55995 + reqmode |= GR_READ;
55996 +
55997 + mode =
55998 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55999 + mnt);
56000 +
56001 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
56002 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
56003 + reqmode & GR_READ ? " reading" : "",
56004 + reqmode & GR_WRITE ? " writing" : reqmode &
56005 + GR_APPEND ? " appending" : "");
56006 + return reqmode;
56007 + } else
56008 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
56009 + {
56010 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
56011 + reqmode & GR_READ ? " reading" : "",
56012 + reqmode & GR_WRITE ? " writing" : reqmode &
56013 + GR_APPEND ? " appending" : "");
56014 + return 0;
56015 + } else if (unlikely((mode & reqmode) != reqmode))
56016 + return 0;
56017 +
56018 + return reqmode;
56019 +}
56020 +
56021 +__u32
56022 +gr_acl_handle_creat(const struct dentry * dentry,
56023 + const struct dentry * p_dentry,
56024 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56025 + const int imode)
56026 +{
56027 + __u32 reqmode = GR_WRITE | GR_CREATE;
56028 + __u32 mode;
56029 +
56030 + if (acc_mode & MAY_APPEND)
56031 + reqmode |= GR_APPEND;
56032 + // if a directory was required or the directory already exists, then
56033 + // don't count this open as a read
56034 + if ((acc_mode & MAY_READ) &&
56035 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
56036 + reqmode |= GR_READ;
56037 + if ((open_flags & O_CREAT) &&
56038 + ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
56039 + reqmode |= GR_SETID;
56040 +
56041 + mode =
56042 + gr_check_create(dentry, p_dentry, p_mnt,
56043 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
56044 +
56045 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
56046 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
56047 + reqmode & GR_READ ? " reading" : "",
56048 + reqmode & GR_WRITE ? " writing" : reqmode &
56049 + GR_APPEND ? " appending" : "");
56050 + return reqmode;
56051 + } else
56052 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
56053 + {
56054 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
56055 + reqmode & GR_READ ? " reading" : "",
56056 + reqmode & GR_WRITE ? " writing" : reqmode &
56057 + GR_APPEND ? " appending" : "");
56058 + return 0;
56059 + } else if (unlikely((mode & reqmode) != reqmode))
56060 + return 0;
56061 +
56062 + return reqmode;
56063 +}
56064 +
56065 +__u32
56066 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
56067 + const int fmode)
56068 +{
56069 + __u32 mode, reqmode = GR_FIND;
56070 +
56071 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
56072 + reqmode |= GR_EXEC;
56073 + if (fmode & S_IWOTH)
56074 + reqmode |= GR_WRITE;
56075 + if (fmode & S_IROTH)
56076 + reqmode |= GR_READ;
56077 +
56078 + mode =
56079 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
56080 + mnt);
56081 +
56082 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
56083 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
56084 + reqmode & GR_READ ? " reading" : "",
56085 + reqmode & GR_WRITE ? " writing" : "",
56086 + reqmode & GR_EXEC ? " executing" : "");
56087 + return reqmode;
56088 + } else
56089 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
56090 + {
56091 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
56092 + reqmode & GR_READ ? " reading" : "",
56093 + reqmode & GR_WRITE ? " writing" : "",
56094 + reqmode & GR_EXEC ? " executing" : "");
56095 + return 0;
56096 + } else if (unlikely((mode & reqmode) != reqmode))
56097 + return 0;
56098 +
56099 + return reqmode;
56100 +}
56101 +
56102 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
56103 +{
56104 + __u32 mode;
56105 +
56106 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
56107 +
56108 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
56109 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
56110 + return mode;
56111 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56112 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
56113 + return 0;
56114 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
56115 + return 0;
56116 +
56117 + return (reqmode);
56118 +}
56119 +
56120 +__u32
56121 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56122 +{
56123 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
56124 +}
56125 +
56126 +__u32
56127 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
56128 +{
56129 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
56130 +}
56131 +
56132 +__u32
56133 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
56134 +{
56135 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
56136 +}
56137 +
56138 +__u32
56139 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
56140 +{
56141 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
56142 +}
56143 +
56144 +__u32
56145 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
56146 + umode_t *modeptr)
56147 +{
56148 + umode_t mode;
56149 +
56150 + *modeptr &= ~gr_acl_umask();
56151 + mode = *modeptr;
56152 +
56153 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
56154 + return 1;
56155 +
56156 + if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
56157 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
56158 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
56159 + GR_CHMOD_ACL_MSG);
56160 + } else {
56161 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
56162 + }
56163 +}
56164 +
56165 +__u32
56166 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
56167 +{
56168 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
56169 +}
56170 +
56171 +__u32
56172 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
56173 +{
56174 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
56175 +}
56176 +
56177 +__u32
56178 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
56179 +{
56180 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
56181 +}
56182 +
56183 +__u32
56184 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
56185 +{
56186 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
56187 + GR_UNIXCONNECT_ACL_MSG);
56188 +}
56189 +
56190 +/* hardlinks require at minimum create and link permission,
56191 + any additional privilege required is based on the
56192 + privilege of the file being linked to
56193 +*/
56194 +__u32
56195 +gr_acl_handle_link(const struct dentry * new_dentry,
56196 + const struct dentry * parent_dentry,
56197 + const struct vfsmount * parent_mnt,
56198 + const struct dentry * old_dentry,
56199 + const struct vfsmount * old_mnt, const char *to)
56200 +{
56201 + __u32 mode;
56202 + __u32 needmode = GR_CREATE | GR_LINK;
56203 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
56204 +
56205 + mode =
56206 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
56207 + old_mnt);
56208 +
56209 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
56210 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56211 + return mode;
56212 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56213 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56214 + return 0;
56215 + } else if (unlikely((mode & needmode) != needmode))
56216 + return 0;
56217 +
56218 + return 1;
56219 +}
56220 +
56221 +__u32
56222 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56223 + const struct dentry * parent_dentry,
56224 + const struct vfsmount * parent_mnt, const char *from)
56225 +{
56226 + __u32 needmode = GR_WRITE | GR_CREATE;
56227 + __u32 mode;
56228 +
56229 + mode =
56230 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
56231 + GR_CREATE | GR_AUDIT_CREATE |
56232 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
56233 +
56234 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
56235 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56236 + return mode;
56237 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56238 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56239 + return 0;
56240 + } else if (unlikely((mode & needmode) != needmode))
56241 + return 0;
56242 +
56243 + return (GR_WRITE | GR_CREATE);
56244 +}
56245 +
56246 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
56247 +{
56248 + __u32 mode;
56249 +
56250 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
56251 +
56252 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
56253 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
56254 + return mode;
56255 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56256 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
56257 + return 0;
56258 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
56259 + return 0;
56260 +
56261 + return (reqmode);
56262 +}
56263 +
56264 +__u32
56265 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56266 + const struct dentry * parent_dentry,
56267 + const struct vfsmount * parent_mnt,
56268 + const int mode)
56269 +{
56270 + __u32 reqmode = GR_WRITE | GR_CREATE;
56271 + if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
56272 + reqmode |= GR_SETID;
56273 +
56274 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56275 + reqmode, GR_MKNOD_ACL_MSG);
56276 +}
56277 +
56278 +__u32
56279 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
56280 + const struct dentry *parent_dentry,
56281 + const struct vfsmount *parent_mnt)
56282 +{
56283 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56284 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
56285 +}
56286 +
56287 +#define RENAME_CHECK_SUCCESS(old, new) \
56288 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
56289 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
56290 +
56291 +int
56292 +gr_acl_handle_rename(struct dentry *new_dentry,
56293 + struct dentry *parent_dentry,
56294 + const struct vfsmount *parent_mnt,
56295 + struct dentry *old_dentry,
56296 + struct inode *old_parent_inode,
56297 + struct vfsmount *old_mnt, const char *newname)
56298 +{
56299 + __u32 comp1, comp2;
56300 + int error = 0;
56301 +
56302 + if (unlikely(!gr_acl_is_enabled()))
56303 + return 0;
56304 +
56305 + if (!new_dentry->d_inode) {
56306 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
56307 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
56308 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
56309 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
56310 + GR_DELETE | GR_AUDIT_DELETE |
56311 + GR_AUDIT_READ | GR_AUDIT_WRITE |
56312 + GR_SUPPRESS, old_mnt);
56313 + } else {
56314 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
56315 + GR_CREATE | GR_DELETE |
56316 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
56317 + GR_AUDIT_READ | GR_AUDIT_WRITE |
56318 + GR_SUPPRESS, parent_mnt);
56319 + comp2 =
56320 + gr_search_file(old_dentry,
56321 + GR_READ | GR_WRITE | GR_AUDIT_READ |
56322 + GR_DELETE | GR_AUDIT_DELETE |
56323 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
56324 + }
56325 +
56326 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
56327 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
56328 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56329 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
56330 + && !(comp2 & GR_SUPPRESS)) {
56331 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56332 + error = -EACCES;
56333 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
56334 + error = -EACCES;
56335 +
56336 + return error;
56337 +}
56338 +
56339 +void
56340 +gr_acl_handle_exit(void)
56341 +{
56342 + u16 id;
56343 + char *rolename;
56344 + struct file *exec_file;
56345 +
56346 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
56347 + !(current->role->roletype & GR_ROLE_PERSIST))) {
56348 + id = current->acl_role_id;
56349 + rolename = current->role->rolename;
56350 + gr_set_acls(1);
56351 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
56352 + }
56353 +
56354 + write_lock(&grsec_exec_file_lock);
56355 + exec_file = current->exec_file;
56356 + current->exec_file = NULL;
56357 + write_unlock(&grsec_exec_file_lock);
56358 +
56359 + if (exec_file)
56360 + fput(exec_file);
56361 +}
56362 +
56363 +int
56364 +gr_acl_handle_procpidmem(const struct task_struct *task)
56365 +{
56366 + if (unlikely(!gr_acl_is_enabled()))
56367 + return 0;
56368 +
56369 + if (task != current && task->acl->mode & GR_PROTPROCFD)
56370 + return -EACCES;
56371 +
56372 + return 0;
56373 +}
56374 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
56375 new file mode 100644
56376 index 0000000..58800a7
56377 --- /dev/null
56378 +++ b/grsecurity/gracl_ip.c
56379 @@ -0,0 +1,384 @@
56380 +#include <linux/kernel.h>
56381 +#include <asm/uaccess.h>
56382 +#include <asm/errno.h>
56383 +#include <net/sock.h>
56384 +#include <linux/file.h>
56385 +#include <linux/fs.h>
56386 +#include <linux/net.h>
56387 +#include <linux/in.h>
56388 +#include <linux/skbuff.h>
56389 +#include <linux/ip.h>
56390 +#include <linux/udp.h>
56391 +#include <linux/types.h>
56392 +#include <linux/sched.h>
56393 +#include <linux/netdevice.h>
56394 +#include <linux/inetdevice.h>
56395 +#include <linux/gracl.h>
56396 +#include <linux/grsecurity.h>
56397 +#include <linux/grinternal.h>
56398 +
56399 +#define GR_BIND 0x01
56400 +#define GR_CONNECT 0x02
56401 +#define GR_INVERT 0x04
56402 +#define GR_BINDOVERRIDE 0x08
56403 +#define GR_CONNECTOVERRIDE 0x10
56404 +#define GR_SOCK_FAMILY 0x20
56405 +
56406 +static const char * gr_protocols[IPPROTO_MAX] = {
56407 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
56408 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
56409 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
56410 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
56411 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
56412 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
56413 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
56414 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
56415 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
56416 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
56417 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
56418 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
56419 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
56420 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
56421 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
56422 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
56423 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
56424 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
56425 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
56426 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
56427 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
56428 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
56429 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
56430 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
56431 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
56432 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
56433 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
56434 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
56435 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
56436 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
56437 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
56438 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
56439 + };
56440 +
56441 +static const char * gr_socktypes[SOCK_MAX] = {
56442 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
56443 + "unknown:7", "unknown:8", "unknown:9", "packet"
56444 + };
56445 +
56446 +static const char * gr_sockfamilies[AF_MAX+1] = {
56447 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
56448 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
56449 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
56450 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
56451 + };
56452 +
56453 +const char *
56454 +gr_proto_to_name(unsigned char proto)
56455 +{
56456 + return gr_protocols[proto];
56457 +}
56458 +
56459 +const char *
56460 +gr_socktype_to_name(unsigned char type)
56461 +{
56462 + return gr_socktypes[type];
56463 +}
56464 +
56465 +const char *
56466 +gr_sockfamily_to_name(unsigned char family)
56467 +{
56468 + return gr_sockfamilies[family];
56469 +}
56470 +
56471 +int
56472 +gr_search_socket(const int domain, const int type, const int protocol)
56473 +{
56474 + struct acl_subject_label *curr;
56475 + const struct cred *cred = current_cred();
56476 +
56477 + if (unlikely(!gr_acl_is_enabled()))
56478 + goto exit;
56479 +
56480 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
56481 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
56482 + goto exit; // let the kernel handle it
56483 +
56484 + curr = current->acl;
56485 +
56486 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
56487 + /* the family is allowed, if this is PF_INET allow it only if
56488 + the extra sock type/protocol checks pass */
56489 + if (domain == PF_INET)
56490 + goto inet_check;
56491 + goto exit;
56492 + } else {
56493 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56494 + __u32 fakeip = 0;
56495 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56496 + current->role->roletype, cred->uid,
56497 + cred->gid, current->exec_file ?
56498 + gr_to_filename(current->exec_file->f_path.dentry,
56499 + current->exec_file->f_path.mnt) :
56500 + curr->filename, curr->filename,
56501 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
56502 + &current->signal->saved_ip);
56503 + goto exit;
56504 + }
56505 + goto exit_fail;
56506 + }
56507 +
56508 +inet_check:
56509 + /* the rest of this checking is for IPv4 only */
56510 + if (!curr->ips)
56511 + goto exit;
56512 +
56513 + if ((curr->ip_type & (1 << type)) &&
56514 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
56515 + goto exit;
56516 +
56517 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56518 + /* we don't place acls on raw sockets , and sometimes
56519 + dgram/ip sockets are opened for ioctl and not
56520 + bind/connect, so we'll fake a bind learn log */
56521 + if (type == SOCK_RAW || type == SOCK_PACKET) {
56522 + __u32 fakeip = 0;
56523 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56524 + current->role->roletype, cred->uid,
56525 + cred->gid, current->exec_file ?
56526 + gr_to_filename(current->exec_file->f_path.dentry,
56527 + current->exec_file->f_path.mnt) :
56528 + curr->filename, curr->filename,
56529 + &fakeip, 0, type,
56530 + protocol, GR_CONNECT, &current->signal->saved_ip);
56531 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
56532 + __u32 fakeip = 0;
56533 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56534 + current->role->roletype, cred->uid,
56535 + cred->gid, current->exec_file ?
56536 + gr_to_filename(current->exec_file->f_path.dentry,
56537 + current->exec_file->f_path.mnt) :
56538 + curr->filename, curr->filename,
56539 + &fakeip, 0, type,
56540 + protocol, GR_BIND, &current->signal->saved_ip);
56541 + }
56542 + /* we'll log when they use connect or bind */
56543 + goto exit;
56544 + }
56545 +
56546 +exit_fail:
56547 + if (domain == PF_INET)
56548 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
56549 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
56550 + else
56551 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
56552 + gr_socktype_to_name(type), protocol);
56553 +
56554 + return 0;
56555 +exit:
56556 + return 1;
56557 +}
56558 +
56559 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
56560 +{
56561 + if ((ip->mode & mode) &&
56562 + (ip_port >= ip->low) &&
56563 + (ip_port <= ip->high) &&
56564 + ((ntohl(ip_addr) & our_netmask) ==
56565 + (ntohl(our_addr) & our_netmask))
56566 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
56567 + && (ip->type & (1 << type))) {
56568 + if (ip->mode & GR_INVERT)
56569 + return 2; // specifically denied
56570 + else
56571 + return 1; // allowed
56572 + }
56573 +
56574 + return 0; // not specifically allowed, may continue parsing
56575 +}
56576 +
56577 +static int
56578 +gr_search_connectbind(const int full_mode, struct sock *sk,
56579 + struct sockaddr_in *addr, const int type)
56580 +{
56581 + char iface[IFNAMSIZ] = {0};
56582 + struct acl_subject_label *curr;
56583 + struct acl_ip_label *ip;
56584 + struct inet_sock *isk;
56585 + struct net_device *dev;
56586 + struct in_device *idev;
56587 + unsigned long i;
56588 + int ret;
56589 + int mode = full_mode & (GR_BIND | GR_CONNECT);
56590 + __u32 ip_addr = 0;
56591 + __u32 our_addr;
56592 + __u32 our_netmask;
56593 + char *p;
56594 + __u16 ip_port = 0;
56595 + const struct cred *cred = current_cred();
56596 +
56597 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
56598 + return 0;
56599 +
56600 + curr = current->acl;
56601 + isk = inet_sk(sk);
56602 +
56603 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
56604 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
56605 + addr->sin_addr.s_addr = curr->inaddr_any_override;
56606 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
56607 + struct sockaddr_in saddr;
56608 + int err;
56609 +
56610 + saddr.sin_family = AF_INET;
56611 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
56612 + saddr.sin_port = isk->inet_sport;
56613 +
56614 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56615 + if (err)
56616 + return err;
56617 +
56618 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56619 + if (err)
56620 + return err;
56621 + }
56622 +
56623 + if (!curr->ips)
56624 + return 0;
56625 +
56626 + ip_addr = addr->sin_addr.s_addr;
56627 + ip_port = ntohs(addr->sin_port);
56628 +
56629 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56630 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56631 + current->role->roletype, cred->uid,
56632 + cred->gid, current->exec_file ?
56633 + gr_to_filename(current->exec_file->f_path.dentry,
56634 + current->exec_file->f_path.mnt) :
56635 + curr->filename, curr->filename,
56636 + &ip_addr, ip_port, type,
56637 + sk->sk_protocol, mode, &current->signal->saved_ip);
56638 + return 0;
56639 + }
56640 +
56641 + for (i = 0; i < curr->ip_num; i++) {
56642 + ip = *(curr->ips + i);
56643 + if (ip->iface != NULL) {
56644 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
56645 + p = strchr(iface, ':');
56646 + if (p != NULL)
56647 + *p = '\0';
56648 + dev = dev_get_by_name(sock_net(sk), iface);
56649 + if (dev == NULL)
56650 + continue;
56651 + idev = in_dev_get(dev);
56652 + if (idev == NULL) {
56653 + dev_put(dev);
56654 + continue;
56655 + }
56656 + rcu_read_lock();
56657 + for_ifa(idev) {
56658 + if (!strcmp(ip->iface, ifa->ifa_label)) {
56659 + our_addr = ifa->ifa_address;
56660 + our_netmask = 0xffffffff;
56661 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56662 + if (ret == 1) {
56663 + rcu_read_unlock();
56664 + in_dev_put(idev);
56665 + dev_put(dev);
56666 + return 0;
56667 + } else if (ret == 2) {
56668 + rcu_read_unlock();
56669 + in_dev_put(idev);
56670 + dev_put(dev);
56671 + goto denied;
56672 + }
56673 + }
56674 + } endfor_ifa(idev);
56675 + rcu_read_unlock();
56676 + in_dev_put(idev);
56677 + dev_put(dev);
56678 + } else {
56679 + our_addr = ip->addr;
56680 + our_netmask = ip->netmask;
56681 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56682 + if (ret == 1)
56683 + return 0;
56684 + else if (ret == 2)
56685 + goto denied;
56686 + }
56687 + }
56688 +
56689 +denied:
56690 + if (mode == GR_BIND)
56691 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56692 + else if (mode == GR_CONNECT)
56693 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56694 +
56695 + return -EACCES;
56696 +}
56697 +
56698 +int
56699 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
56700 +{
56701 + /* always allow disconnection of dgram sockets with connect */
56702 + if (addr->sin_family == AF_UNSPEC)
56703 + return 0;
56704 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
56705 +}
56706 +
56707 +int
56708 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
56709 +{
56710 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
56711 +}
56712 +
56713 +int gr_search_listen(struct socket *sock)
56714 +{
56715 + struct sock *sk = sock->sk;
56716 + struct sockaddr_in addr;
56717 +
56718 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56719 + addr.sin_port = inet_sk(sk)->inet_sport;
56720 +
56721 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56722 +}
56723 +
56724 +int gr_search_accept(struct socket *sock)
56725 +{
56726 + struct sock *sk = sock->sk;
56727 + struct sockaddr_in addr;
56728 +
56729 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56730 + addr.sin_port = inet_sk(sk)->inet_sport;
56731 +
56732 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56733 +}
56734 +
56735 +int
56736 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
56737 +{
56738 + if (addr)
56739 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
56740 + else {
56741 + struct sockaddr_in sin;
56742 + const struct inet_sock *inet = inet_sk(sk);
56743 +
56744 + sin.sin_addr.s_addr = inet->inet_daddr;
56745 + sin.sin_port = inet->inet_dport;
56746 +
56747 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56748 + }
56749 +}
56750 +
56751 +int
56752 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
56753 +{
56754 + struct sockaddr_in sin;
56755 +
56756 + if (unlikely(skb->len < sizeof (struct udphdr)))
56757 + return 0; // skip this packet
56758 +
56759 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
56760 + sin.sin_port = udp_hdr(skb)->source;
56761 +
56762 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56763 +}
56764 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
56765 new file mode 100644
56766 index 0000000..25f54ef
56767 --- /dev/null
56768 +++ b/grsecurity/gracl_learn.c
56769 @@ -0,0 +1,207 @@
56770 +#include <linux/kernel.h>
56771 +#include <linux/mm.h>
56772 +#include <linux/sched.h>
56773 +#include <linux/poll.h>
56774 +#include <linux/string.h>
56775 +#include <linux/file.h>
56776 +#include <linux/types.h>
56777 +#include <linux/vmalloc.h>
56778 +#include <linux/grinternal.h>
56779 +
56780 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
56781 + size_t count, loff_t *ppos);
56782 +extern int gr_acl_is_enabled(void);
56783 +
56784 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
56785 +static int gr_learn_attached;
56786 +
56787 +/* use a 512k buffer */
56788 +#define LEARN_BUFFER_SIZE (512 * 1024)
56789 +
56790 +static DEFINE_SPINLOCK(gr_learn_lock);
56791 +static DEFINE_MUTEX(gr_learn_user_mutex);
56792 +
56793 +/* we need to maintain two buffers, so that the kernel context of grlearn
56794 + uses a semaphore around the userspace copying, and the other kernel contexts
56795 + use a spinlock when copying into the buffer, since they cannot sleep
56796 +*/
56797 +static char *learn_buffer;
56798 +static char *learn_buffer_user;
56799 +static int learn_buffer_len;
56800 +static int learn_buffer_user_len;
56801 +
56802 +static ssize_t
56803 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
56804 +{
56805 + DECLARE_WAITQUEUE(wait, current);
56806 + ssize_t retval = 0;
56807 +
56808 + add_wait_queue(&learn_wait, &wait);
56809 + set_current_state(TASK_INTERRUPTIBLE);
56810 + do {
56811 + mutex_lock(&gr_learn_user_mutex);
56812 + spin_lock(&gr_learn_lock);
56813 + if (learn_buffer_len)
56814 + break;
56815 + spin_unlock(&gr_learn_lock);
56816 + mutex_unlock(&gr_learn_user_mutex);
56817 + if (file->f_flags & O_NONBLOCK) {
56818 + retval = -EAGAIN;
56819 + goto out;
56820 + }
56821 + if (signal_pending(current)) {
56822 + retval = -ERESTARTSYS;
56823 + goto out;
56824 + }
56825 +
56826 + schedule();
56827 + } while (1);
56828 +
56829 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
56830 + learn_buffer_user_len = learn_buffer_len;
56831 + retval = learn_buffer_len;
56832 + learn_buffer_len = 0;
56833 +
56834 + spin_unlock(&gr_learn_lock);
56835 +
56836 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
56837 + retval = -EFAULT;
56838 +
56839 + mutex_unlock(&gr_learn_user_mutex);
56840 +out:
56841 + set_current_state(TASK_RUNNING);
56842 + remove_wait_queue(&learn_wait, &wait);
56843 + return retval;
56844 +}
56845 +
56846 +static unsigned int
56847 +poll_learn(struct file * file, poll_table * wait)
56848 +{
56849 + poll_wait(file, &learn_wait, wait);
56850 +
56851 + if (learn_buffer_len)
56852 + return (POLLIN | POLLRDNORM);
56853 +
56854 + return 0;
56855 +}
56856 +
56857 +void
56858 +gr_clear_learn_entries(void)
56859 +{
56860 + char *tmp;
56861 +
56862 + mutex_lock(&gr_learn_user_mutex);
56863 + spin_lock(&gr_learn_lock);
56864 + tmp = learn_buffer;
56865 + learn_buffer = NULL;
56866 + spin_unlock(&gr_learn_lock);
56867 + if (tmp)
56868 + vfree(tmp);
56869 + if (learn_buffer_user != NULL) {
56870 + vfree(learn_buffer_user);
56871 + learn_buffer_user = NULL;
56872 + }
56873 + learn_buffer_len = 0;
56874 + mutex_unlock(&gr_learn_user_mutex);
56875 +
56876 + return;
56877 +}
56878 +
56879 +void
56880 +gr_add_learn_entry(const char *fmt, ...)
56881 +{
56882 + va_list args;
56883 + unsigned int len;
56884 +
56885 + if (!gr_learn_attached)
56886 + return;
56887 +
56888 + spin_lock(&gr_learn_lock);
56889 +
56890 + /* leave a gap at the end so we know when it's "full" but don't have to
56891 + compute the exact length of the string we're trying to append
56892 + */
56893 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56894 + spin_unlock(&gr_learn_lock);
56895 + wake_up_interruptible(&learn_wait);
56896 + return;
56897 + }
56898 + if (learn_buffer == NULL) {
56899 + spin_unlock(&gr_learn_lock);
56900 + return;
56901 + }
56902 +
56903 + va_start(args, fmt);
56904 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56905 + va_end(args);
56906 +
56907 + learn_buffer_len += len + 1;
56908 +
56909 + spin_unlock(&gr_learn_lock);
56910 + wake_up_interruptible(&learn_wait);
56911 +
56912 + return;
56913 +}
56914 +
56915 +static int
56916 +open_learn(struct inode *inode, struct file *file)
56917 +{
56918 + if (file->f_mode & FMODE_READ && gr_learn_attached)
56919 + return -EBUSY;
56920 + if (file->f_mode & FMODE_READ) {
56921 + int retval = 0;
56922 + mutex_lock(&gr_learn_user_mutex);
56923 + if (learn_buffer == NULL)
56924 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56925 + if (learn_buffer_user == NULL)
56926 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56927 + if (learn_buffer == NULL) {
56928 + retval = -ENOMEM;
56929 + goto out_error;
56930 + }
56931 + if (learn_buffer_user == NULL) {
56932 + retval = -ENOMEM;
56933 + goto out_error;
56934 + }
56935 + learn_buffer_len = 0;
56936 + learn_buffer_user_len = 0;
56937 + gr_learn_attached = 1;
56938 +out_error:
56939 + mutex_unlock(&gr_learn_user_mutex);
56940 + return retval;
56941 + }
56942 + return 0;
56943 +}
56944 +
56945 +static int
56946 +close_learn(struct inode *inode, struct file *file)
56947 +{
56948 + if (file->f_mode & FMODE_READ) {
56949 + char *tmp = NULL;
56950 + mutex_lock(&gr_learn_user_mutex);
56951 + spin_lock(&gr_learn_lock);
56952 + tmp = learn_buffer;
56953 + learn_buffer = NULL;
56954 + spin_unlock(&gr_learn_lock);
56955 + if (tmp)
56956 + vfree(tmp);
56957 + if (learn_buffer_user != NULL) {
56958 + vfree(learn_buffer_user);
56959 + learn_buffer_user = NULL;
56960 + }
56961 + learn_buffer_len = 0;
56962 + learn_buffer_user_len = 0;
56963 + gr_learn_attached = 0;
56964 + mutex_unlock(&gr_learn_user_mutex);
56965 + }
56966 +
56967 + return 0;
56968 +}
56969 +
56970 +const struct file_operations grsec_fops = {
56971 + .read = read_learn,
56972 + .write = write_grsec_handler,
56973 + .open = open_learn,
56974 + .release = close_learn,
56975 + .poll = poll_learn,
56976 +};
56977 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56978 new file mode 100644
56979 index 0000000..39645c9
56980 --- /dev/null
56981 +++ b/grsecurity/gracl_res.c
56982 @@ -0,0 +1,68 @@
56983 +#include <linux/kernel.h>
56984 +#include <linux/sched.h>
56985 +#include <linux/gracl.h>
56986 +#include <linux/grinternal.h>
56987 +
56988 +static const char *restab_log[] = {
56989 + [RLIMIT_CPU] = "RLIMIT_CPU",
56990 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56991 + [RLIMIT_DATA] = "RLIMIT_DATA",
56992 + [RLIMIT_STACK] = "RLIMIT_STACK",
56993 + [RLIMIT_CORE] = "RLIMIT_CORE",
56994 + [RLIMIT_RSS] = "RLIMIT_RSS",
56995 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
56996 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56997 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56998 + [RLIMIT_AS] = "RLIMIT_AS",
56999 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
57000 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
57001 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
57002 + [RLIMIT_NICE] = "RLIMIT_NICE",
57003 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
57004 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
57005 + [GR_CRASH_RES] = "RLIMIT_CRASH"
57006 +};
57007 +
57008 +void
57009 +gr_log_resource(const struct task_struct *task,
57010 + const int res, const unsigned long wanted, const int gt)
57011 +{
57012 + const struct cred *cred;
57013 + unsigned long rlim;
57014 +
57015 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
57016 + return;
57017 +
57018 + // not yet supported resource
57019 + if (unlikely(!restab_log[res]))
57020 + return;
57021 +
57022 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
57023 + rlim = task_rlimit_max(task, res);
57024 + else
57025 + rlim = task_rlimit(task, res);
57026 +
57027 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
57028 + return;
57029 +
57030 + rcu_read_lock();
57031 + cred = __task_cred(task);
57032 +
57033 + if (res == RLIMIT_NPROC &&
57034 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
57035 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
57036 + goto out_rcu_unlock;
57037 + else if (res == RLIMIT_MEMLOCK &&
57038 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
57039 + goto out_rcu_unlock;
57040 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
57041 + goto out_rcu_unlock;
57042 + rcu_read_unlock();
57043 +
57044 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
57045 +
57046 + return;
57047 +out_rcu_unlock:
57048 + rcu_read_unlock();
57049 + return;
57050 +}
57051 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
57052 new file mode 100644
57053 index 0000000..25197e9
57054 --- /dev/null
57055 +++ b/grsecurity/gracl_segv.c
57056 @@ -0,0 +1,299 @@
57057 +#include <linux/kernel.h>
57058 +#include <linux/mm.h>
57059 +#include <asm/uaccess.h>
57060 +#include <asm/errno.h>
57061 +#include <asm/mman.h>
57062 +#include <net/sock.h>
57063 +#include <linux/file.h>
57064 +#include <linux/fs.h>
57065 +#include <linux/net.h>
57066 +#include <linux/in.h>
57067 +#include <linux/slab.h>
57068 +#include <linux/types.h>
57069 +#include <linux/sched.h>
57070 +#include <linux/timer.h>
57071 +#include <linux/gracl.h>
57072 +#include <linux/grsecurity.h>
57073 +#include <linux/grinternal.h>
57074 +
57075 +static struct crash_uid *uid_set;
57076 +static unsigned short uid_used;
57077 +static DEFINE_SPINLOCK(gr_uid_lock);
57078 +extern rwlock_t gr_inode_lock;
57079 +extern struct acl_subject_label *
57080 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
57081 + struct acl_role_label *role);
57082 +
57083 +#ifdef CONFIG_BTRFS_FS
57084 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
57085 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
57086 +#endif
57087 +
57088 +static inline dev_t __get_dev(const struct dentry *dentry)
57089 +{
57090 +#ifdef CONFIG_BTRFS_FS
57091 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
57092 + return get_btrfs_dev_from_inode(dentry->d_inode);
57093 + else
57094 +#endif
57095 + return dentry->d_inode->i_sb->s_dev;
57096 +}
57097 +
57098 +int
57099 +gr_init_uidset(void)
57100 +{
57101 + uid_set =
57102 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
57103 + uid_used = 0;
57104 +
57105 + return uid_set ? 1 : 0;
57106 +}
57107 +
57108 +void
57109 +gr_free_uidset(void)
57110 +{
57111 + if (uid_set)
57112 + kfree(uid_set);
57113 +
57114 + return;
57115 +}
57116 +
57117 +int
57118 +gr_find_uid(const uid_t uid)
57119 +{
57120 + struct crash_uid *tmp = uid_set;
57121 + uid_t buid;
57122 + int low = 0, high = uid_used - 1, mid;
57123 +
57124 + while (high >= low) {
57125 + mid = (low + high) >> 1;
57126 + buid = tmp[mid].uid;
57127 + if (buid == uid)
57128 + return mid;
57129 + if (buid > uid)
57130 + high = mid - 1;
57131 + if (buid < uid)
57132 + low = mid + 1;
57133 + }
57134 +
57135 + return -1;
57136 +}
57137 +
57138 +static __inline__ void
57139 +gr_insertsort(void)
57140 +{
57141 + unsigned short i, j;
57142 + struct crash_uid index;
57143 +
57144 + for (i = 1; i < uid_used; i++) {
57145 + index = uid_set[i];
57146 + j = i;
57147 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
57148 + uid_set[j] = uid_set[j - 1];
57149 + j--;
57150 + }
57151 + uid_set[j] = index;
57152 + }
57153 +
57154 + return;
57155 +}
57156 +
57157 +static __inline__ void
57158 +gr_insert_uid(const uid_t uid, const unsigned long expires)
57159 +{
57160 + int loc;
57161 +
57162 + if (uid_used == GR_UIDTABLE_MAX)
57163 + return;
57164 +
57165 + loc = gr_find_uid(uid);
57166 +
57167 + if (loc >= 0) {
57168 + uid_set[loc].expires = expires;
57169 + return;
57170 + }
57171 +
57172 + uid_set[uid_used].uid = uid;
57173 + uid_set[uid_used].expires = expires;
57174 + uid_used++;
57175 +
57176 + gr_insertsort();
57177 +
57178 + return;
57179 +}
57180 +
57181 +void
57182 +gr_remove_uid(const unsigned short loc)
57183 +{
57184 + unsigned short i;
57185 +
57186 + for (i = loc + 1; i < uid_used; i++)
57187 + uid_set[i - 1] = uid_set[i];
57188 +
57189 + uid_used--;
57190 +
57191 + return;
57192 +}
57193 +
57194 +int
57195 +gr_check_crash_uid(const uid_t uid)
57196 +{
57197 + int loc;
57198 + int ret = 0;
57199 +
57200 + if (unlikely(!gr_acl_is_enabled()))
57201 + return 0;
57202 +
57203 + spin_lock(&gr_uid_lock);
57204 + loc = gr_find_uid(uid);
57205 +
57206 + if (loc < 0)
57207 + goto out_unlock;
57208 +
57209 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
57210 + gr_remove_uid(loc);
57211 + else
57212 + ret = 1;
57213 +
57214 +out_unlock:
57215 + spin_unlock(&gr_uid_lock);
57216 + return ret;
57217 +}
57218 +
57219 +static __inline__ int
57220 +proc_is_setxid(const struct cred *cred)
57221 +{
57222 + if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
57223 + !uid_eq(cred->uid, cred->fsuid))
57224 + return 1;
57225 + if (!uid_eq(cred->gid, cred->egid) || !uid_eq(cred->gid, cred->sgid) ||
57226 + !uid_eq(cred->gid, cred->fsgid))
57227 + return 1;
57228 +
57229 + return 0;
57230 +}
57231 +
57232 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
57233 +
57234 +void
57235 +gr_handle_crash(struct task_struct *task, const int sig)
57236 +{
57237 + struct acl_subject_label *curr;
57238 + struct task_struct *tsk, *tsk2;
57239 + const struct cred *cred;
57240 + const struct cred *cred2;
57241 +
57242 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
57243 + return;
57244 +
57245 + if (unlikely(!gr_acl_is_enabled()))
57246 + return;
57247 +
57248 + curr = task->acl;
57249 +
57250 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
57251 + return;
57252 +
57253 + if (time_before_eq(curr->expires, get_seconds())) {
57254 + curr->expires = 0;
57255 + curr->crashes = 0;
57256 + }
57257 +
57258 + curr->crashes++;
57259 +
57260 + if (!curr->expires)
57261 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
57262 +
57263 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57264 + time_after(curr->expires, get_seconds())) {
57265 + rcu_read_lock();
57266 + cred = __task_cred(task);
57267 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && proc_is_setxid(cred)) {
57268 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57269 + spin_lock(&gr_uid_lock);
57270 + gr_insert_uid(cred->uid, curr->expires);
57271 + spin_unlock(&gr_uid_lock);
57272 + curr->expires = 0;
57273 + curr->crashes = 0;
57274 + read_lock(&tasklist_lock);
57275 + do_each_thread(tsk2, tsk) {
57276 + cred2 = __task_cred(tsk);
57277 + if (tsk != task && uid_eq(cred2->uid, cred->uid))
57278 + gr_fake_force_sig(SIGKILL, tsk);
57279 + } while_each_thread(tsk2, tsk);
57280 + read_unlock(&tasklist_lock);
57281 + } else {
57282 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57283 + read_lock(&tasklist_lock);
57284 + read_lock(&grsec_exec_file_lock);
57285 + do_each_thread(tsk2, tsk) {
57286 + if (likely(tsk != task)) {
57287 + // if this thread has the same subject as the one that triggered
57288 + // RES_CRASH and it's the same binary, kill it
57289 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
57290 + gr_fake_force_sig(SIGKILL, tsk);
57291 + }
57292 + } while_each_thread(tsk2, tsk);
57293 + read_unlock(&grsec_exec_file_lock);
57294 + read_unlock(&tasklist_lock);
57295 + }
57296 + rcu_read_unlock();
57297 + }
57298 +
57299 + return;
57300 +}
57301 +
57302 +int
57303 +gr_check_crash_exec(const struct file *filp)
57304 +{
57305 + struct acl_subject_label *curr;
57306 +
57307 + if (unlikely(!gr_acl_is_enabled()))
57308 + return 0;
57309 +
57310 + read_lock(&gr_inode_lock);
57311 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
57312 + __get_dev(filp->f_path.dentry),
57313 + current->role);
57314 + read_unlock(&gr_inode_lock);
57315 +
57316 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
57317 + (!curr->crashes && !curr->expires))
57318 + return 0;
57319 +
57320 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57321 + time_after(curr->expires, get_seconds()))
57322 + return 1;
57323 + else if (time_before_eq(curr->expires, get_seconds())) {
57324 + curr->crashes = 0;
57325 + curr->expires = 0;
57326 + }
57327 +
57328 + return 0;
57329 +}
57330 +
57331 +void
57332 +gr_handle_alertkill(struct task_struct *task)
57333 +{
57334 + struct acl_subject_label *curracl;
57335 + __u32 curr_ip;
57336 + struct task_struct *p, *p2;
57337 +
57338 + if (unlikely(!gr_acl_is_enabled()))
57339 + return;
57340 +
57341 + curracl = task->acl;
57342 + curr_ip = task->signal->curr_ip;
57343 +
57344 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
57345 + read_lock(&tasklist_lock);
57346 + do_each_thread(p2, p) {
57347 + if (p->signal->curr_ip == curr_ip)
57348 + gr_fake_force_sig(SIGKILL, p);
57349 + } while_each_thread(p2, p);
57350 + read_unlock(&tasklist_lock);
57351 + } else if (curracl->mode & GR_KILLPROC)
57352 + gr_fake_force_sig(SIGKILL, task);
57353 +
57354 + return;
57355 +}
57356 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
57357 new file mode 100644
57358 index 0000000..9d83a69
57359 --- /dev/null
57360 +++ b/grsecurity/gracl_shm.c
57361 @@ -0,0 +1,40 @@
57362 +#include <linux/kernel.h>
57363 +#include <linux/mm.h>
57364 +#include <linux/sched.h>
57365 +#include <linux/file.h>
57366 +#include <linux/ipc.h>
57367 +#include <linux/gracl.h>
57368 +#include <linux/grsecurity.h>
57369 +#include <linux/grinternal.h>
57370 +
57371 +int
57372 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57373 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57374 +{
57375 + struct task_struct *task;
57376 +
57377 + if (!gr_acl_is_enabled())
57378 + return 1;
57379 +
57380 + rcu_read_lock();
57381 + read_lock(&tasklist_lock);
57382 +
57383 + task = find_task_by_vpid(shm_cprid);
57384 +
57385 + if (unlikely(!task))
57386 + task = find_task_by_vpid(shm_lapid);
57387 +
57388 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
57389 + (task->pid == shm_lapid)) &&
57390 + (task->acl->mode & GR_PROTSHM) &&
57391 + (task->acl != current->acl))) {
57392 + read_unlock(&tasklist_lock);
57393 + rcu_read_unlock();
57394 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
57395 + return 0;
57396 + }
57397 + read_unlock(&tasklist_lock);
57398 + rcu_read_unlock();
57399 +
57400 + return 1;
57401 +}
57402 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
57403 new file mode 100644
57404 index 0000000..bc0be01
57405 --- /dev/null
57406 +++ b/grsecurity/grsec_chdir.c
57407 @@ -0,0 +1,19 @@
57408 +#include <linux/kernel.h>
57409 +#include <linux/sched.h>
57410 +#include <linux/fs.h>
57411 +#include <linux/file.h>
57412 +#include <linux/grsecurity.h>
57413 +#include <linux/grinternal.h>
57414 +
57415 +void
57416 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
57417 +{
57418 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57419 + if ((grsec_enable_chdir && grsec_enable_group &&
57420 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
57421 + !grsec_enable_group)) {
57422 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
57423 + }
57424 +#endif
57425 + return;
57426 +}
57427 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
57428 new file mode 100644
57429 index 0000000..9807ee2
57430 --- /dev/null
57431 +++ b/grsecurity/grsec_chroot.c
57432 @@ -0,0 +1,368 @@
57433 +#include <linux/kernel.h>
57434 +#include <linux/module.h>
57435 +#include <linux/sched.h>
57436 +#include <linux/file.h>
57437 +#include <linux/fs.h>
57438 +#include <linux/mount.h>
57439 +#include <linux/types.h>
57440 +#include "../fs/mount.h"
57441 +#include <linux/grsecurity.h>
57442 +#include <linux/grinternal.h>
57443 +
57444 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
57445 +{
57446 +#ifdef CONFIG_GRKERNSEC
57447 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
57448 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
57449 + task->gr_is_chrooted = 1;
57450 + else
57451 + task->gr_is_chrooted = 0;
57452 +
57453 + task->gr_chroot_dentry = path->dentry;
57454 +#endif
57455 + return;
57456 +}
57457 +
57458 +void gr_clear_chroot_entries(struct task_struct *task)
57459 +{
57460 +#ifdef CONFIG_GRKERNSEC
57461 + task->gr_is_chrooted = 0;
57462 + task->gr_chroot_dentry = NULL;
57463 +#endif
57464 + return;
57465 +}
57466 +
57467 +int
57468 +gr_handle_chroot_unix(const pid_t pid)
57469 +{
57470 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57471 + struct task_struct *p;
57472 +
57473 + if (unlikely(!grsec_enable_chroot_unix))
57474 + return 1;
57475 +
57476 + if (likely(!proc_is_chrooted(current)))
57477 + return 1;
57478 +
57479 + rcu_read_lock();
57480 + read_lock(&tasklist_lock);
57481 + p = find_task_by_vpid_unrestricted(pid);
57482 + if (unlikely(p && !have_same_root(current, p))) {
57483 + read_unlock(&tasklist_lock);
57484 + rcu_read_unlock();
57485 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
57486 + return 0;
57487 + }
57488 + read_unlock(&tasklist_lock);
57489 + rcu_read_unlock();
57490 +#endif
57491 + return 1;
57492 +}
57493 +
57494 +int
57495 +gr_handle_chroot_nice(void)
57496 +{
57497 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57498 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
57499 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
57500 + return -EPERM;
57501 + }
57502 +#endif
57503 + return 0;
57504 +}
57505 +
57506 +int
57507 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
57508 +{
57509 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57510 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
57511 + && proc_is_chrooted(current)) {
57512 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
57513 + return -EACCES;
57514 + }
57515 +#endif
57516 + return 0;
57517 +}
57518 +
57519 +int
57520 +gr_handle_chroot_rawio(const struct inode *inode)
57521 +{
57522 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57523 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57524 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
57525 + return 1;
57526 +#endif
57527 + return 0;
57528 +}
57529 +
57530 +int
57531 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
57532 +{
57533 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57534 + struct task_struct *p;
57535 + int ret = 0;
57536 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
57537 + return ret;
57538 +
57539 + read_lock(&tasklist_lock);
57540 + do_each_pid_task(pid, type, p) {
57541 + if (!have_same_root(current, p)) {
57542 + ret = 1;
57543 + goto out;
57544 + }
57545 + } while_each_pid_task(pid, type, p);
57546 +out:
57547 + read_unlock(&tasklist_lock);
57548 + return ret;
57549 +#endif
57550 + return 0;
57551 +}
57552 +
57553 +int
57554 +gr_pid_is_chrooted(struct task_struct *p)
57555 +{
57556 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57557 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
57558 + return 0;
57559 +
57560 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
57561 + !have_same_root(current, p)) {
57562 + return 1;
57563 + }
57564 +#endif
57565 + return 0;
57566 +}
57567 +
57568 +EXPORT_SYMBOL(gr_pid_is_chrooted);
57569 +
57570 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
57571 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
57572 +{
57573 + struct path path, currentroot;
57574 + int ret = 0;
57575 +
57576 + path.dentry = (struct dentry *)u_dentry;
57577 + path.mnt = (struct vfsmount *)u_mnt;
57578 + get_fs_root(current->fs, &currentroot);
57579 + if (path_is_under(&path, &currentroot))
57580 + ret = 1;
57581 + path_put(&currentroot);
57582 +
57583 + return ret;
57584 +}
57585 +#endif
57586 +
57587 +int
57588 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
57589 +{
57590 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57591 + if (!grsec_enable_chroot_fchdir)
57592 + return 1;
57593 +
57594 + if (!proc_is_chrooted(current))
57595 + return 1;
57596 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
57597 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
57598 + return 0;
57599 + }
57600 +#endif
57601 + return 1;
57602 +}
57603 +
57604 +int
57605 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57606 + const time_t shm_createtime)
57607 +{
57608 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57609 + struct task_struct *p;
57610 + time_t starttime;
57611 +
57612 + if (unlikely(!grsec_enable_chroot_shmat))
57613 + return 1;
57614 +
57615 + if (likely(!proc_is_chrooted(current)))
57616 + return 1;
57617 +
57618 + rcu_read_lock();
57619 + read_lock(&tasklist_lock);
57620 +
57621 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
57622 + starttime = p->start_time.tv_sec;
57623 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
57624 + if (have_same_root(current, p)) {
57625 + goto allow;
57626 + } else {
57627 + read_unlock(&tasklist_lock);
57628 + rcu_read_unlock();
57629 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57630 + return 0;
57631 + }
57632 + }
57633 + /* creator exited, pid reuse, fall through to next check */
57634 + }
57635 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
57636 + if (unlikely(!have_same_root(current, p))) {
57637 + read_unlock(&tasklist_lock);
57638 + rcu_read_unlock();
57639 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57640 + return 0;
57641 + }
57642 + }
57643 +
57644 +allow:
57645 + read_unlock(&tasklist_lock);
57646 + rcu_read_unlock();
57647 +#endif
57648 + return 1;
57649 +}
57650 +
57651 +void
57652 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
57653 +{
57654 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57655 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
57656 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
57657 +#endif
57658 + return;
57659 +}
57660 +
57661 +int
57662 +gr_handle_chroot_mknod(const struct dentry *dentry,
57663 + const struct vfsmount *mnt, const int mode)
57664 +{
57665 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57666 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
57667 + proc_is_chrooted(current)) {
57668 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
57669 + return -EPERM;
57670 + }
57671 +#endif
57672 + return 0;
57673 +}
57674 +
57675 +int
57676 +gr_handle_chroot_mount(const struct dentry *dentry,
57677 + const struct vfsmount *mnt, const char *dev_name)
57678 +{
57679 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57680 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
57681 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
57682 + return -EPERM;
57683 + }
57684 +#endif
57685 + return 0;
57686 +}
57687 +
57688 +int
57689 +gr_handle_chroot_pivot(void)
57690 +{
57691 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57692 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
57693 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
57694 + return -EPERM;
57695 + }
57696 +#endif
57697 + return 0;
57698 +}
57699 +
57700 +int
57701 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
57702 +{
57703 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57704 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
57705 + !gr_is_outside_chroot(dentry, mnt)) {
57706 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
57707 + return -EPERM;
57708 + }
57709 +#endif
57710 + return 0;
57711 +}
57712 +
57713 +extern const char *captab_log[];
57714 +extern int captab_log_entries;
57715 +
57716 +int
57717 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57718 +{
57719 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57720 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57721 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57722 + if (cap_raised(chroot_caps, cap)) {
57723 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
57724 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
57725 + }
57726 + return 0;
57727 + }
57728 + }
57729 +#endif
57730 + return 1;
57731 +}
57732 +
57733 +int
57734 +gr_chroot_is_capable(const int cap)
57735 +{
57736 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57737 + return gr_task_chroot_is_capable(current, current_cred(), cap);
57738 +#endif
57739 + return 1;
57740 +}
57741 +
57742 +int
57743 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
57744 +{
57745 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57746 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57747 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57748 + if (cap_raised(chroot_caps, cap)) {
57749 + return 0;
57750 + }
57751 + }
57752 +#endif
57753 + return 1;
57754 +}
57755 +
57756 +int
57757 +gr_chroot_is_capable_nolog(const int cap)
57758 +{
57759 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57760 + return gr_task_chroot_is_capable_nolog(current, cap);
57761 +#endif
57762 + return 1;
57763 +}
57764 +
57765 +int
57766 +gr_handle_chroot_sysctl(const int op)
57767 +{
57768 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57769 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
57770 + proc_is_chrooted(current))
57771 + return -EACCES;
57772 +#endif
57773 + return 0;
57774 +}
57775 +
57776 +void
57777 +gr_handle_chroot_chdir(struct path *path)
57778 +{
57779 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57780 + if (grsec_enable_chroot_chdir)
57781 + set_fs_pwd(current->fs, path);
57782 +#endif
57783 + return;
57784 +}
57785 +
57786 +int
57787 +gr_handle_chroot_chmod(const struct dentry *dentry,
57788 + const struct vfsmount *mnt, const int mode)
57789 +{
57790 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57791 + /* allow chmod +s on directories, but not files */
57792 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
57793 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
57794 + proc_is_chrooted(current)) {
57795 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
57796 + return -EPERM;
57797 + }
57798 +#endif
57799 + return 0;
57800 +}
57801 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
57802 new file mode 100644
57803 index 0000000..213ad8b
57804 --- /dev/null
57805 +++ b/grsecurity/grsec_disabled.c
57806 @@ -0,0 +1,437 @@
57807 +#include <linux/kernel.h>
57808 +#include <linux/module.h>
57809 +#include <linux/sched.h>
57810 +#include <linux/file.h>
57811 +#include <linux/fs.h>
57812 +#include <linux/kdev_t.h>
57813 +#include <linux/net.h>
57814 +#include <linux/in.h>
57815 +#include <linux/ip.h>
57816 +#include <linux/skbuff.h>
57817 +#include <linux/sysctl.h>
57818 +
57819 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57820 +void
57821 +pax_set_initial_flags(struct linux_binprm *bprm)
57822 +{
57823 + return;
57824 +}
57825 +#endif
57826 +
57827 +#ifdef CONFIG_SYSCTL
57828 +__u32
57829 +gr_handle_sysctl(const struct ctl_table * table, const int op)
57830 +{
57831 + return 0;
57832 +}
57833 +#endif
57834 +
57835 +#ifdef CONFIG_TASKSTATS
57836 +int gr_is_taskstats_denied(int pid)
57837 +{
57838 + return 0;
57839 +}
57840 +#endif
57841 +
57842 +int
57843 +gr_acl_is_enabled(void)
57844 +{
57845 + return 0;
57846 +}
57847 +
57848 +void
57849 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57850 +{
57851 + return;
57852 +}
57853 +
57854 +int
57855 +gr_handle_rawio(const struct inode *inode)
57856 +{
57857 + return 0;
57858 +}
57859 +
57860 +void
57861 +gr_acl_handle_psacct(struct task_struct *task, const long code)
57862 +{
57863 + return;
57864 +}
57865 +
57866 +int
57867 +gr_handle_ptrace(struct task_struct *task, const long request)
57868 +{
57869 + return 0;
57870 +}
57871 +
57872 +int
57873 +gr_handle_proc_ptrace(struct task_struct *task)
57874 +{
57875 + return 0;
57876 +}
57877 +
57878 +void
57879 +gr_learn_resource(const struct task_struct *task,
57880 + const int res, const unsigned long wanted, const int gt)
57881 +{
57882 + return;
57883 +}
57884 +
57885 +int
57886 +gr_set_acls(const int type)
57887 +{
57888 + return 0;
57889 +}
57890 +
57891 +int
57892 +gr_check_hidden_task(const struct task_struct *tsk)
57893 +{
57894 + return 0;
57895 +}
57896 +
57897 +int
57898 +gr_check_protected_task(const struct task_struct *task)
57899 +{
57900 + return 0;
57901 +}
57902 +
57903 +int
57904 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57905 +{
57906 + return 0;
57907 +}
57908 +
57909 +void
57910 +gr_copy_label(struct task_struct *tsk)
57911 +{
57912 + return;
57913 +}
57914 +
57915 +void
57916 +gr_set_pax_flags(struct task_struct *task)
57917 +{
57918 + return;
57919 +}
57920 +
57921 +int
57922 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57923 + const int unsafe_share)
57924 +{
57925 + return 0;
57926 +}
57927 +
57928 +void
57929 +gr_handle_delete(const ino_t ino, const dev_t dev)
57930 +{
57931 + return;
57932 +}
57933 +
57934 +void
57935 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57936 +{
57937 + return;
57938 +}
57939 +
57940 +void
57941 +gr_handle_crash(struct task_struct *task, const int sig)
57942 +{
57943 + return;
57944 +}
57945 +
57946 +int
57947 +gr_check_crash_exec(const struct file *filp)
57948 +{
57949 + return 0;
57950 +}
57951 +
57952 +int
57953 +gr_check_crash_uid(const uid_t uid)
57954 +{
57955 + return 0;
57956 +}
57957 +
57958 +void
57959 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57960 + struct dentry *old_dentry,
57961 + struct dentry *new_dentry,
57962 + struct vfsmount *mnt, const __u8 replace)
57963 +{
57964 + return;
57965 +}
57966 +
57967 +int
57968 +gr_search_socket(const int family, const int type, const int protocol)
57969 +{
57970 + return 1;
57971 +}
57972 +
57973 +int
57974 +gr_search_connectbind(const int mode, const struct socket *sock,
57975 + const struct sockaddr_in *addr)
57976 +{
57977 + return 0;
57978 +}
57979 +
57980 +void
57981 +gr_handle_alertkill(struct task_struct *task)
57982 +{
57983 + return;
57984 +}
57985 +
57986 +__u32
57987 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57988 +{
57989 + return 1;
57990 +}
57991 +
57992 +__u32
57993 +gr_acl_handle_hidden_file(const struct dentry * dentry,
57994 + const struct vfsmount * mnt)
57995 +{
57996 + return 1;
57997 +}
57998 +
57999 +__u32
58000 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
58001 + int acc_mode)
58002 +{
58003 + return 1;
58004 +}
58005 +
58006 +__u32
58007 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
58008 +{
58009 + return 1;
58010 +}
58011 +
58012 +__u32
58013 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
58014 +{
58015 + return 1;
58016 +}
58017 +
58018 +int
58019 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
58020 + unsigned int *vm_flags)
58021 +{
58022 + return 1;
58023 +}
58024 +
58025 +__u32
58026 +gr_acl_handle_truncate(const struct dentry * dentry,
58027 + const struct vfsmount * mnt)
58028 +{
58029 + return 1;
58030 +}
58031 +
58032 +__u32
58033 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
58034 +{
58035 + return 1;
58036 +}
58037 +
58038 +__u32
58039 +gr_acl_handle_access(const struct dentry * dentry,
58040 + const struct vfsmount * mnt, const int fmode)
58041 +{
58042 + return 1;
58043 +}
58044 +
58045 +__u32
58046 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
58047 + umode_t *mode)
58048 +{
58049 + return 1;
58050 +}
58051 +
58052 +__u32
58053 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
58054 +{
58055 + return 1;
58056 +}
58057 +
58058 +__u32
58059 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
58060 +{
58061 + return 1;
58062 +}
58063 +
58064 +void
58065 +grsecurity_init(void)
58066 +{
58067 + return;
58068 +}
58069 +
58070 +umode_t gr_acl_umask(void)
58071 +{
58072 + return 0;
58073 +}
58074 +
58075 +__u32
58076 +gr_acl_handle_mknod(const struct dentry * new_dentry,
58077 + const struct dentry * parent_dentry,
58078 + const struct vfsmount * parent_mnt,
58079 + const int mode)
58080 +{
58081 + return 1;
58082 +}
58083 +
58084 +__u32
58085 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
58086 + const struct dentry * parent_dentry,
58087 + const struct vfsmount * parent_mnt)
58088 +{
58089 + return 1;
58090 +}
58091 +
58092 +__u32
58093 +gr_acl_handle_symlink(const struct dentry * new_dentry,
58094 + const struct dentry * parent_dentry,
58095 + const struct vfsmount * parent_mnt, const char *from)
58096 +{
58097 + return 1;
58098 +}
58099 +
58100 +__u32
58101 +gr_acl_handle_link(const struct dentry * new_dentry,
58102 + const struct dentry * parent_dentry,
58103 + const struct vfsmount * parent_mnt,
58104 + const struct dentry * old_dentry,
58105 + const struct vfsmount * old_mnt, const char *to)
58106 +{
58107 + return 1;
58108 +}
58109 +
58110 +int
58111 +gr_acl_handle_rename(const struct dentry *new_dentry,
58112 + const struct dentry *parent_dentry,
58113 + const struct vfsmount *parent_mnt,
58114 + const struct dentry *old_dentry,
58115 + const struct inode *old_parent_inode,
58116 + const struct vfsmount *old_mnt, const char *newname)
58117 +{
58118 + return 0;
58119 +}
58120 +
58121 +int
58122 +gr_acl_handle_filldir(const struct file *file, const char *name,
58123 + const int namelen, const ino_t ino)
58124 +{
58125 + return 1;
58126 +}
58127 +
58128 +int
58129 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58130 + const time_t shm_createtime, const uid_t cuid, const int shmid)
58131 +{
58132 + return 1;
58133 +}
58134 +
58135 +int
58136 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
58137 +{
58138 + return 0;
58139 +}
58140 +
58141 +int
58142 +gr_search_accept(const struct socket *sock)
58143 +{
58144 + return 0;
58145 +}
58146 +
58147 +int
58148 +gr_search_listen(const struct socket *sock)
58149 +{
58150 + return 0;
58151 +}
58152 +
58153 +int
58154 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
58155 +{
58156 + return 0;
58157 +}
58158 +
58159 +__u32
58160 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
58161 +{
58162 + return 1;
58163 +}
58164 +
58165 +__u32
58166 +gr_acl_handle_creat(const struct dentry * dentry,
58167 + const struct dentry * p_dentry,
58168 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58169 + const int imode)
58170 +{
58171 + return 1;
58172 +}
58173 +
58174 +void
58175 +gr_acl_handle_exit(void)
58176 +{
58177 + return;
58178 +}
58179 +
58180 +int
58181 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
58182 +{
58183 + return 1;
58184 +}
58185 +
58186 +void
58187 +gr_set_role_label(const uid_t uid, const gid_t gid)
58188 +{
58189 + return;
58190 +}
58191 +
58192 +int
58193 +gr_acl_handle_procpidmem(const struct task_struct *task)
58194 +{
58195 + return 0;
58196 +}
58197 +
58198 +int
58199 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
58200 +{
58201 + return 0;
58202 +}
58203 +
58204 +int
58205 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
58206 +{
58207 + return 0;
58208 +}
58209 +
58210 +void
58211 +gr_set_kernel_label(struct task_struct *task)
58212 +{
58213 + return;
58214 +}
58215 +
58216 +int
58217 +gr_check_user_change(int real, int effective, int fs)
58218 +{
58219 + return 0;
58220 +}
58221 +
58222 +int
58223 +gr_check_group_change(int real, int effective, int fs)
58224 +{
58225 + return 0;
58226 +}
58227 +
58228 +int gr_acl_enable_at_secure(void)
58229 +{
58230 + return 0;
58231 +}
58232 +
58233 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58234 +{
58235 + return dentry->d_inode->i_sb->s_dev;
58236 +}
58237 +
58238 +EXPORT_SYMBOL(gr_learn_resource);
58239 +EXPORT_SYMBOL(gr_set_kernel_label);
58240 +#ifdef CONFIG_SECURITY
58241 +EXPORT_SYMBOL(gr_check_user_change);
58242 +EXPORT_SYMBOL(gr_check_group_change);
58243 +#endif
58244 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
58245 new file mode 100644
58246 index 0000000..abfa971
58247 --- /dev/null
58248 +++ b/grsecurity/grsec_exec.c
58249 @@ -0,0 +1,174 @@
58250 +#include <linux/kernel.h>
58251 +#include <linux/sched.h>
58252 +#include <linux/file.h>
58253 +#include <linux/binfmts.h>
58254 +#include <linux/fs.h>
58255 +#include <linux/types.h>
58256 +#include <linux/grdefs.h>
58257 +#include <linux/grsecurity.h>
58258 +#include <linux/grinternal.h>
58259 +#include <linux/capability.h>
58260 +#include <linux/module.h>
58261 +
58262 +#include <asm/uaccess.h>
58263 +
58264 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58265 +static char gr_exec_arg_buf[132];
58266 +static DEFINE_MUTEX(gr_exec_arg_mutex);
58267 +#endif
58268 +
58269 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
58270 +
58271 +void
58272 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
58273 +{
58274 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58275 + char *grarg = gr_exec_arg_buf;
58276 + unsigned int i, x, execlen = 0;
58277 + char c;
58278 +
58279 + if (!((grsec_enable_execlog && grsec_enable_group &&
58280 + in_group_p(grsec_audit_gid))
58281 + || (grsec_enable_execlog && !grsec_enable_group)))
58282 + return;
58283 +
58284 + mutex_lock(&gr_exec_arg_mutex);
58285 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
58286 +
58287 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
58288 + const char __user *p;
58289 + unsigned int len;
58290 +
58291 + p = get_user_arg_ptr(argv, i);
58292 + if (IS_ERR(p))
58293 + goto log;
58294 +
58295 + len = strnlen_user(p, 128 - execlen);
58296 + if (len > 128 - execlen)
58297 + len = 128 - execlen;
58298 + else if (len > 0)
58299 + len--;
58300 + if (copy_from_user(grarg + execlen, p, len))
58301 + goto log;
58302 +
58303 + /* rewrite unprintable characters */
58304 + for (x = 0; x < len; x++) {
58305 + c = *(grarg + execlen + x);
58306 + if (c < 32 || c > 126)
58307 + *(grarg + execlen + x) = ' ';
58308 + }
58309 +
58310 + execlen += len;
58311 + *(grarg + execlen) = ' ';
58312 + *(grarg + execlen + 1) = '\0';
58313 + execlen++;
58314 + }
58315 +
58316 + log:
58317 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
58318 + bprm->file->f_path.mnt, grarg);
58319 + mutex_unlock(&gr_exec_arg_mutex);
58320 +#endif
58321 + return;
58322 +}
58323 +
58324 +#ifdef CONFIG_GRKERNSEC
58325 +extern int gr_acl_is_capable(const int cap);
58326 +extern int gr_acl_is_capable_nolog(const int cap);
58327 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
58328 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
58329 +extern int gr_chroot_is_capable(const int cap);
58330 +extern int gr_chroot_is_capable_nolog(const int cap);
58331 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
58332 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
58333 +#endif
58334 +
58335 +const char *captab_log[] = {
58336 + "CAP_CHOWN",
58337 + "CAP_DAC_OVERRIDE",
58338 + "CAP_DAC_READ_SEARCH",
58339 + "CAP_FOWNER",
58340 + "CAP_FSETID",
58341 + "CAP_KILL",
58342 + "CAP_SETGID",
58343 + "CAP_SETUID",
58344 + "CAP_SETPCAP",
58345 + "CAP_LINUX_IMMUTABLE",
58346 + "CAP_NET_BIND_SERVICE",
58347 + "CAP_NET_BROADCAST",
58348 + "CAP_NET_ADMIN",
58349 + "CAP_NET_RAW",
58350 + "CAP_IPC_LOCK",
58351 + "CAP_IPC_OWNER",
58352 + "CAP_SYS_MODULE",
58353 + "CAP_SYS_RAWIO",
58354 + "CAP_SYS_CHROOT",
58355 + "CAP_SYS_PTRACE",
58356 + "CAP_SYS_PACCT",
58357 + "CAP_SYS_ADMIN",
58358 + "CAP_SYS_BOOT",
58359 + "CAP_SYS_NICE",
58360 + "CAP_SYS_RESOURCE",
58361 + "CAP_SYS_TIME",
58362 + "CAP_SYS_TTY_CONFIG",
58363 + "CAP_MKNOD",
58364 + "CAP_LEASE",
58365 + "CAP_AUDIT_WRITE",
58366 + "CAP_AUDIT_CONTROL",
58367 + "CAP_SETFCAP",
58368 + "CAP_MAC_OVERRIDE",
58369 + "CAP_MAC_ADMIN",
58370 + "CAP_SYSLOG",
58371 + "CAP_WAKE_ALARM"
58372 +};
58373 +
58374 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
58375 +
58376 +int gr_is_capable(const int cap)
58377 +{
58378 +#ifdef CONFIG_GRKERNSEC
58379 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
58380 + return 1;
58381 + return 0;
58382 +#else
58383 + return 1;
58384 +#endif
58385 +}
58386 +
58387 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58388 +{
58389 +#ifdef CONFIG_GRKERNSEC
58390 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
58391 + return 1;
58392 + return 0;
58393 +#else
58394 + return 1;
58395 +#endif
58396 +}
58397 +
58398 +int gr_is_capable_nolog(const int cap)
58399 +{
58400 +#ifdef CONFIG_GRKERNSEC
58401 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
58402 + return 1;
58403 + return 0;
58404 +#else
58405 + return 1;
58406 +#endif
58407 +}
58408 +
58409 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
58410 +{
58411 +#ifdef CONFIG_GRKERNSEC
58412 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
58413 + return 1;
58414 + return 0;
58415 +#else
58416 + return 1;
58417 +#endif
58418 +}
58419 +
58420 +EXPORT_SYMBOL(gr_is_capable);
58421 +EXPORT_SYMBOL(gr_is_capable_nolog);
58422 +EXPORT_SYMBOL(gr_task_is_capable);
58423 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
58424 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
58425 new file mode 100644
58426 index 0000000..d3ee748
58427 --- /dev/null
58428 +++ b/grsecurity/grsec_fifo.c
58429 @@ -0,0 +1,24 @@
58430 +#include <linux/kernel.h>
58431 +#include <linux/sched.h>
58432 +#include <linux/fs.h>
58433 +#include <linux/file.h>
58434 +#include <linux/grinternal.h>
58435 +
58436 +int
58437 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
58438 + const struct dentry *dir, const int flag, const int acc_mode)
58439 +{
58440 +#ifdef CONFIG_GRKERNSEC_FIFO
58441 + const struct cred *cred = current_cred();
58442 +
58443 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
58444 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
58445 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
58446 + (cred->fsuid != dentry->d_inode->i_uid)) {
58447 + if (!inode_permission(dentry->d_inode, acc_mode))
58448 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
58449 + return -EACCES;
58450 + }
58451 +#endif
58452 + return 0;
58453 +}
58454 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
58455 new file mode 100644
58456 index 0000000..8ca18bf
58457 --- /dev/null
58458 +++ b/grsecurity/grsec_fork.c
58459 @@ -0,0 +1,23 @@
58460 +#include <linux/kernel.h>
58461 +#include <linux/sched.h>
58462 +#include <linux/grsecurity.h>
58463 +#include <linux/grinternal.h>
58464 +#include <linux/errno.h>
58465 +
58466 +void
58467 +gr_log_forkfail(const int retval)
58468 +{
58469 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58470 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
58471 + switch (retval) {
58472 + case -EAGAIN:
58473 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
58474 + break;
58475 + case -ENOMEM:
58476 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
58477 + break;
58478 + }
58479 + }
58480 +#endif
58481 + return;
58482 +}
58483 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
58484 new file mode 100644
58485 index 0000000..05a6015
58486 --- /dev/null
58487 +++ b/grsecurity/grsec_init.c
58488 @@ -0,0 +1,283 @@
58489 +#include <linux/kernel.h>
58490 +#include <linux/sched.h>
58491 +#include <linux/mm.h>
58492 +#include <linux/gracl.h>
58493 +#include <linux/slab.h>
58494 +#include <linux/vmalloc.h>
58495 +#include <linux/percpu.h>
58496 +#include <linux/module.h>
58497 +
58498 +int grsec_enable_ptrace_readexec;
58499 +int grsec_enable_setxid;
58500 +int grsec_enable_symlinkown;
58501 +int grsec_symlinkown_gid;
58502 +int grsec_enable_brute;
58503 +int grsec_enable_link;
58504 +int grsec_enable_dmesg;
58505 +int grsec_enable_harden_ptrace;
58506 +int grsec_enable_fifo;
58507 +int grsec_enable_execlog;
58508 +int grsec_enable_signal;
58509 +int grsec_enable_forkfail;
58510 +int grsec_enable_audit_ptrace;
58511 +int grsec_enable_time;
58512 +int grsec_enable_audit_textrel;
58513 +int grsec_enable_group;
58514 +int grsec_audit_gid;
58515 +int grsec_enable_chdir;
58516 +int grsec_enable_mount;
58517 +int grsec_enable_rofs;
58518 +int grsec_enable_chroot_findtask;
58519 +int grsec_enable_chroot_mount;
58520 +int grsec_enable_chroot_shmat;
58521 +int grsec_enable_chroot_fchdir;
58522 +int grsec_enable_chroot_double;
58523 +int grsec_enable_chroot_pivot;
58524 +int grsec_enable_chroot_chdir;
58525 +int grsec_enable_chroot_chmod;
58526 +int grsec_enable_chroot_mknod;
58527 +int grsec_enable_chroot_nice;
58528 +int grsec_enable_chroot_execlog;
58529 +int grsec_enable_chroot_caps;
58530 +int grsec_enable_chroot_sysctl;
58531 +int grsec_enable_chroot_unix;
58532 +int grsec_enable_tpe;
58533 +int grsec_tpe_gid;
58534 +int grsec_enable_blackhole;
58535 +#ifdef CONFIG_IPV6_MODULE
58536 +EXPORT_SYMBOL(grsec_enable_blackhole);
58537 +#endif
58538 +int grsec_lastack_retries;
58539 +int grsec_enable_tpe_all;
58540 +int grsec_enable_tpe_invert;
58541 +int grsec_enable_socket_all;
58542 +int grsec_socket_all_gid;
58543 +int grsec_enable_socket_client;
58544 +int grsec_socket_client_gid;
58545 +int grsec_enable_socket_server;
58546 +int grsec_socket_server_gid;
58547 +int grsec_resource_logging;
58548 +int grsec_disable_privio;
58549 +int grsec_enable_log_rwxmaps;
58550 +int grsec_lock;
58551 +
58552 +DEFINE_SPINLOCK(grsec_alert_lock);
58553 +unsigned long grsec_alert_wtime = 0;
58554 +unsigned long grsec_alert_fyet = 0;
58555 +
58556 +DEFINE_SPINLOCK(grsec_audit_lock);
58557 +
58558 +DEFINE_RWLOCK(grsec_exec_file_lock);
58559 +
58560 +char *gr_shared_page[4];
58561 +
58562 +char *gr_alert_log_fmt;
58563 +char *gr_audit_log_fmt;
58564 +char *gr_alert_log_buf;
58565 +char *gr_audit_log_buf;
58566 +
58567 +extern struct gr_arg *gr_usermode;
58568 +extern unsigned char *gr_system_salt;
58569 +extern unsigned char *gr_system_sum;
58570 +
58571 +void __init
58572 +grsecurity_init(void)
58573 +{
58574 + int j;
58575 + /* create the per-cpu shared pages */
58576 +
58577 +#ifdef CONFIG_X86
58578 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
58579 +#endif
58580 +
58581 + for (j = 0; j < 4; j++) {
58582 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
58583 + if (gr_shared_page[j] == NULL) {
58584 + panic("Unable to allocate grsecurity shared page");
58585 + return;
58586 + }
58587 + }
58588 +
58589 + /* allocate log buffers */
58590 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
58591 + if (!gr_alert_log_fmt) {
58592 + panic("Unable to allocate grsecurity alert log format buffer");
58593 + return;
58594 + }
58595 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
58596 + if (!gr_audit_log_fmt) {
58597 + panic("Unable to allocate grsecurity audit log format buffer");
58598 + return;
58599 + }
58600 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58601 + if (!gr_alert_log_buf) {
58602 + panic("Unable to allocate grsecurity alert log buffer");
58603 + return;
58604 + }
58605 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58606 + if (!gr_audit_log_buf) {
58607 + panic("Unable to allocate grsecurity audit log buffer");
58608 + return;
58609 + }
58610 +
58611 + /* allocate memory for authentication structure */
58612 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
58613 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
58614 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
58615 +
58616 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
58617 + panic("Unable to allocate grsecurity authentication structure");
58618 + return;
58619 + }
58620 +
58621 +
58622 +#ifdef CONFIG_GRKERNSEC_IO
58623 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
58624 + grsec_disable_privio = 1;
58625 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58626 + grsec_disable_privio = 1;
58627 +#else
58628 + grsec_disable_privio = 0;
58629 +#endif
58630 +#endif
58631 +
58632 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58633 + /* for backward compatibility, tpe_invert always defaults to on if
58634 + enabled in the kernel
58635 + */
58636 + grsec_enable_tpe_invert = 1;
58637 +#endif
58638 +
58639 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58640 +#ifndef CONFIG_GRKERNSEC_SYSCTL
58641 + grsec_lock = 1;
58642 +#endif
58643 +
58644 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58645 + grsec_enable_audit_textrel = 1;
58646 +#endif
58647 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58648 + grsec_enable_log_rwxmaps = 1;
58649 +#endif
58650 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58651 + grsec_enable_group = 1;
58652 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
58653 +#endif
58654 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58655 + grsec_enable_ptrace_readexec = 1;
58656 +#endif
58657 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58658 + grsec_enable_chdir = 1;
58659 +#endif
58660 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58661 + grsec_enable_harden_ptrace = 1;
58662 +#endif
58663 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58664 + grsec_enable_mount = 1;
58665 +#endif
58666 +#ifdef CONFIG_GRKERNSEC_LINK
58667 + grsec_enable_link = 1;
58668 +#endif
58669 +#ifdef CONFIG_GRKERNSEC_BRUTE
58670 + grsec_enable_brute = 1;
58671 +#endif
58672 +#ifdef CONFIG_GRKERNSEC_DMESG
58673 + grsec_enable_dmesg = 1;
58674 +#endif
58675 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58676 + grsec_enable_blackhole = 1;
58677 + grsec_lastack_retries = 4;
58678 +#endif
58679 +#ifdef CONFIG_GRKERNSEC_FIFO
58680 + grsec_enable_fifo = 1;
58681 +#endif
58682 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58683 + grsec_enable_execlog = 1;
58684 +#endif
58685 +#ifdef CONFIG_GRKERNSEC_SETXID
58686 + grsec_enable_setxid = 1;
58687 +#endif
58688 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58689 + grsec_enable_signal = 1;
58690 +#endif
58691 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58692 + grsec_enable_forkfail = 1;
58693 +#endif
58694 +#ifdef CONFIG_GRKERNSEC_TIME
58695 + grsec_enable_time = 1;
58696 +#endif
58697 +#ifdef CONFIG_GRKERNSEC_RESLOG
58698 + grsec_resource_logging = 1;
58699 +#endif
58700 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58701 + grsec_enable_chroot_findtask = 1;
58702 +#endif
58703 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58704 + grsec_enable_chroot_unix = 1;
58705 +#endif
58706 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58707 + grsec_enable_chroot_mount = 1;
58708 +#endif
58709 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58710 + grsec_enable_chroot_fchdir = 1;
58711 +#endif
58712 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58713 + grsec_enable_chroot_shmat = 1;
58714 +#endif
58715 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58716 + grsec_enable_audit_ptrace = 1;
58717 +#endif
58718 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58719 + grsec_enable_chroot_double = 1;
58720 +#endif
58721 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58722 + grsec_enable_chroot_pivot = 1;
58723 +#endif
58724 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58725 + grsec_enable_chroot_chdir = 1;
58726 +#endif
58727 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58728 + grsec_enable_chroot_chmod = 1;
58729 +#endif
58730 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58731 + grsec_enable_chroot_mknod = 1;
58732 +#endif
58733 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58734 + grsec_enable_chroot_nice = 1;
58735 +#endif
58736 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58737 + grsec_enable_chroot_execlog = 1;
58738 +#endif
58739 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58740 + grsec_enable_chroot_caps = 1;
58741 +#endif
58742 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58743 + grsec_enable_chroot_sysctl = 1;
58744 +#endif
58745 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58746 + grsec_enable_symlinkown = 1;
58747 + grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
58748 +#endif
58749 +#ifdef CONFIG_GRKERNSEC_TPE
58750 + grsec_enable_tpe = 1;
58751 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
58752 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58753 + grsec_enable_tpe_all = 1;
58754 +#endif
58755 +#endif
58756 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58757 + grsec_enable_socket_all = 1;
58758 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
58759 +#endif
58760 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58761 + grsec_enable_socket_client = 1;
58762 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
58763 +#endif
58764 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58765 + grsec_enable_socket_server = 1;
58766 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
58767 +#endif
58768 +#endif
58769 +
58770 + return;
58771 +}
58772 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
58773 new file mode 100644
58774 index 0000000..589481f
58775 --- /dev/null
58776 +++ b/grsecurity/grsec_link.c
58777 @@ -0,0 +1,58 @@
58778 +#include <linux/kernel.h>
58779 +#include <linux/sched.h>
58780 +#include <linux/fs.h>
58781 +#include <linux/file.h>
58782 +#include <linux/grinternal.h>
58783 +
58784 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
58785 +{
58786 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58787 + const struct inode *link_inode = link->dentry->d_inode;
58788 +
58789 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
58790 + /* ignore root-owned links, e.g. /proc/self */
58791 + !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
58792 + !uid_eq(link_inode->i_uid, target->i_uid)) {
58793 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
58794 + return 1;
58795 + }
58796 +#endif
58797 + return 0;
58798 +}
58799 +
58800 +int
58801 +gr_handle_follow_link(const struct inode *parent,
58802 + const struct inode *inode,
58803 + const struct dentry *dentry, const struct vfsmount *mnt)
58804 +{
58805 +#ifdef CONFIG_GRKERNSEC_LINK
58806 + const struct cred *cred = current_cred();
58807 +
58808 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
58809 + (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
58810 + (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
58811 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
58812 + return -EACCES;
58813 + }
58814 +#endif
58815 + return 0;
58816 +}
58817 +
58818 +int
58819 +gr_handle_hardlink(const struct dentry *dentry,
58820 + const struct vfsmount *mnt,
58821 + struct inode *inode, const int mode, const char *to)
58822 +{
58823 +#ifdef CONFIG_GRKERNSEC_LINK
58824 + const struct cred *cred = current_cred();
58825 +
58826 + if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
58827 + (!S_ISREG(mode) || is_privileged_binary(dentry) ||
58828 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58829 + !capable(CAP_FOWNER) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
58830 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
58831 + return -EPERM;
58832 + }
58833 +#endif
58834 + return 0;
58835 +}
58836 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
58837 new file mode 100644
58838 index 0000000..a45d2e9
58839 --- /dev/null
58840 +++ b/grsecurity/grsec_log.c
58841 @@ -0,0 +1,322 @@
58842 +#include <linux/kernel.h>
58843 +#include <linux/sched.h>
58844 +#include <linux/file.h>
58845 +#include <linux/tty.h>
58846 +#include <linux/fs.h>
58847 +#include <linux/grinternal.h>
58848 +
58849 +#ifdef CONFIG_TREE_PREEMPT_RCU
58850 +#define DISABLE_PREEMPT() preempt_disable()
58851 +#define ENABLE_PREEMPT() preempt_enable()
58852 +#else
58853 +#define DISABLE_PREEMPT()
58854 +#define ENABLE_PREEMPT()
58855 +#endif
58856 +
58857 +#define BEGIN_LOCKS(x) \
58858 + DISABLE_PREEMPT(); \
58859 + rcu_read_lock(); \
58860 + read_lock(&tasklist_lock); \
58861 + read_lock(&grsec_exec_file_lock); \
58862 + if (x != GR_DO_AUDIT) \
58863 + spin_lock(&grsec_alert_lock); \
58864 + else \
58865 + spin_lock(&grsec_audit_lock)
58866 +
58867 +#define END_LOCKS(x) \
58868 + if (x != GR_DO_AUDIT) \
58869 + spin_unlock(&grsec_alert_lock); \
58870 + else \
58871 + spin_unlock(&grsec_audit_lock); \
58872 + read_unlock(&grsec_exec_file_lock); \
58873 + read_unlock(&tasklist_lock); \
58874 + rcu_read_unlock(); \
58875 + ENABLE_PREEMPT(); \
58876 + if (x == GR_DONT_AUDIT) \
58877 + gr_handle_alertkill(current)
58878 +
58879 +enum {
58880 + FLOODING,
58881 + NO_FLOODING
58882 +};
58883 +
58884 +extern char *gr_alert_log_fmt;
58885 +extern char *gr_audit_log_fmt;
58886 +extern char *gr_alert_log_buf;
58887 +extern char *gr_audit_log_buf;
58888 +
58889 +static int gr_log_start(int audit)
58890 +{
58891 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
58892 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
58893 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58894 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
58895 + unsigned long curr_secs = get_seconds();
58896 +
58897 + if (audit == GR_DO_AUDIT)
58898 + goto set_fmt;
58899 +
58900 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
58901 + grsec_alert_wtime = curr_secs;
58902 + grsec_alert_fyet = 0;
58903 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
58904 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58905 + grsec_alert_fyet++;
58906 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
58907 + grsec_alert_wtime = curr_secs;
58908 + grsec_alert_fyet++;
58909 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
58910 + return FLOODING;
58911 + }
58912 + else return FLOODING;
58913 +
58914 +set_fmt:
58915 +#endif
58916 + memset(buf, 0, PAGE_SIZE);
58917 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
58918 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58919 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58920 + } else if (current->signal->curr_ip) {
58921 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58922 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58923 + } else if (gr_acl_is_enabled()) {
58924 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58925 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58926 + } else {
58927 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
58928 + strcpy(buf, fmt);
58929 + }
58930 +
58931 + return NO_FLOODING;
58932 +}
58933 +
58934 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58935 + __attribute__ ((format (printf, 2, 0)));
58936 +
58937 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58938 +{
58939 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58940 + unsigned int len = strlen(buf);
58941 +
58942 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58943 +
58944 + return;
58945 +}
58946 +
58947 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58948 + __attribute__ ((format (printf, 2, 3)));
58949 +
58950 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58951 +{
58952 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58953 + unsigned int len = strlen(buf);
58954 + va_list ap;
58955 +
58956 + va_start(ap, msg);
58957 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58958 + va_end(ap);
58959 +
58960 + return;
58961 +}
58962 +
58963 +static void gr_log_end(int audit, int append_default)
58964 +{
58965 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58966 +
58967 + if (append_default) {
58968 + unsigned int len = strlen(buf);
58969 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58970 + }
58971 +
58972 + printk("%s\n", buf);
58973 +
58974 + return;
58975 +}
58976 +
58977 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58978 +{
58979 + int logtype;
58980 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58981 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58982 + void *voidptr = NULL;
58983 + int num1 = 0, num2 = 0;
58984 + unsigned long ulong1 = 0, ulong2 = 0;
58985 + struct dentry *dentry = NULL;
58986 + struct vfsmount *mnt = NULL;
58987 + struct file *file = NULL;
58988 + struct task_struct *task = NULL;
58989 + const struct cred *cred, *pcred;
58990 + va_list ap;
58991 +
58992 + BEGIN_LOCKS(audit);
58993 + logtype = gr_log_start(audit);
58994 + if (logtype == FLOODING) {
58995 + END_LOCKS(audit);
58996 + return;
58997 + }
58998 + va_start(ap, argtypes);
58999 + switch (argtypes) {
59000 + case GR_TTYSNIFF:
59001 + task = va_arg(ap, struct task_struct *);
59002 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
59003 + break;
59004 + case GR_SYSCTL_HIDDEN:
59005 + str1 = va_arg(ap, char *);
59006 + gr_log_middle_varargs(audit, msg, result, str1);
59007 + break;
59008 + case GR_RBAC:
59009 + dentry = va_arg(ap, struct dentry *);
59010 + mnt = va_arg(ap, struct vfsmount *);
59011 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
59012 + break;
59013 + case GR_RBAC_STR:
59014 + dentry = va_arg(ap, struct dentry *);
59015 + mnt = va_arg(ap, struct vfsmount *);
59016 + str1 = va_arg(ap, char *);
59017 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
59018 + break;
59019 + case GR_STR_RBAC:
59020 + str1 = va_arg(ap, char *);
59021 + dentry = va_arg(ap, struct dentry *);
59022 + mnt = va_arg(ap, struct vfsmount *);
59023 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
59024 + break;
59025 + case GR_RBAC_MODE2:
59026 + dentry = va_arg(ap, struct dentry *);
59027 + mnt = va_arg(ap, struct vfsmount *);
59028 + str1 = va_arg(ap, char *);
59029 + str2 = va_arg(ap, char *);
59030 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
59031 + break;
59032 + case GR_RBAC_MODE3:
59033 + dentry = va_arg(ap, struct dentry *);
59034 + mnt = va_arg(ap, struct vfsmount *);
59035 + str1 = va_arg(ap, char *);
59036 + str2 = va_arg(ap, char *);
59037 + str3 = va_arg(ap, char *);
59038 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
59039 + break;
59040 + case GR_FILENAME:
59041 + dentry = va_arg(ap, struct dentry *);
59042 + mnt = va_arg(ap, struct vfsmount *);
59043 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
59044 + break;
59045 + case GR_STR_FILENAME:
59046 + str1 = va_arg(ap, char *);
59047 + dentry = va_arg(ap, struct dentry *);
59048 + mnt = va_arg(ap, struct vfsmount *);
59049 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
59050 + break;
59051 + case GR_FILENAME_STR:
59052 + dentry = va_arg(ap, struct dentry *);
59053 + mnt = va_arg(ap, struct vfsmount *);
59054 + str1 = va_arg(ap, char *);
59055 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
59056 + break;
59057 + case GR_FILENAME_TWO_INT:
59058 + dentry = va_arg(ap, struct dentry *);
59059 + mnt = va_arg(ap, struct vfsmount *);
59060 + num1 = va_arg(ap, int);
59061 + num2 = va_arg(ap, int);
59062 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
59063 + break;
59064 + case GR_FILENAME_TWO_INT_STR:
59065 + dentry = va_arg(ap, struct dentry *);
59066 + mnt = va_arg(ap, struct vfsmount *);
59067 + num1 = va_arg(ap, int);
59068 + num2 = va_arg(ap, int);
59069 + str1 = va_arg(ap, char *);
59070 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
59071 + break;
59072 + case GR_TEXTREL:
59073 + file = va_arg(ap, struct file *);
59074 + ulong1 = va_arg(ap, unsigned long);
59075 + ulong2 = va_arg(ap, unsigned long);
59076 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
59077 + break;
59078 + case GR_PTRACE:
59079 + task = va_arg(ap, struct task_struct *);
59080 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
59081 + break;
59082 + case GR_RESOURCE:
59083 + task = va_arg(ap, struct task_struct *);
59084 + cred = __task_cred(task);
59085 + pcred = __task_cred(task->real_parent);
59086 + ulong1 = va_arg(ap, unsigned long);
59087 + str1 = va_arg(ap, char *);
59088 + ulong2 = va_arg(ap, unsigned long);
59089 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
59090 + break;
59091 + case GR_CAP:
59092 + task = va_arg(ap, struct task_struct *);
59093 + cred = __task_cred(task);
59094 + pcred = __task_cred(task->real_parent);
59095 + str1 = va_arg(ap, char *);
59096 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
59097 + break;
59098 + case GR_SIG:
59099 + str1 = va_arg(ap, char *);
59100 + voidptr = va_arg(ap, void *);
59101 + gr_log_middle_varargs(audit, msg, str1, voidptr);
59102 + break;
59103 + case GR_SIG2:
59104 + task = va_arg(ap, struct task_struct *);
59105 + cred = __task_cred(task);
59106 + pcred = __task_cred(task->real_parent);
59107 + num1 = va_arg(ap, int);
59108 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
59109 + break;
59110 + case GR_CRASH1:
59111 + task = va_arg(ap, struct task_struct *);
59112 + cred = __task_cred(task);
59113 + pcred = __task_cred(task->real_parent);
59114 + ulong1 = va_arg(ap, unsigned long);
59115 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
59116 + break;
59117 + case GR_CRASH2:
59118 + task = va_arg(ap, struct task_struct *);
59119 + cred = __task_cred(task);
59120 + pcred = __task_cred(task->real_parent);
59121 + ulong1 = va_arg(ap, unsigned long);
59122 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
59123 + break;
59124 + case GR_RWXMAP:
59125 + file = va_arg(ap, struct file *);
59126 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
59127 + break;
59128 + case GR_PSACCT:
59129 + {
59130 + unsigned int wday, cday;
59131 + __u8 whr, chr;
59132 + __u8 wmin, cmin;
59133 + __u8 wsec, csec;
59134 + char cur_tty[64] = { 0 };
59135 + char parent_tty[64] = { 0 };
59136 +
59137 + task = va_arg(ap, struct task_struct *);
59138 + wday = va_arg(ap, unsigned int);
59139 + cday = va_arg(ap, unsigned int);
59140 + whr = va_arg(ap, int);
59141 + chr = va_arg(ap, int);
59142 + wmin = va_arg(ap, int);
59143 + cmin = va_arg(ap, int);
59144 + wsec = va_arg(ap, int);
59145 + csec = va_arg(ap, int);
59146 + ulong1 = va_arg(ap, unsigned long);
59147 + cred = __task_cred(task);
59148 + pcred = __task_cred(task->real_parent);
59149 +
59150 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
59151 + }
59152 + break;
59153 + default:
59154 + gr_log_middle(audit, msg, ap);
59155 + }
59156 + va_end(ap);
59157 + // these don't need DEFAULTSECARGS printed on the end
59158 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
59159 + gr_log_end(audit, 0);
59160 + else
59161 + gr_log_end(audit, 1);
59162 + END_LOCKS(audit);
59163 +}
59164 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
59165 new file mode 100644
59166 index 0000000..f536303
59167 --- /dev/null
59168 +++ b/grsecurity/grsec_mem.c
59169 @@ -0,0 +1,40 @@
59170 +#include <linux/kernel.h>
59171 +#include <linux/sched.h>
59172 +#include <linux/mm.h>
59173 +#include <linux/mman.h>
59174 +#include <linux/grinternal.h>
59175 +
59176 +void
59177 +gr_handle_ioperm(void)
59178 +{
59179 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
59180 + return;
59181 +}
59182 +
59183 +void
59184 +gr_handle_iopl(void)
59185 +{
59186 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
59187 + return;
59188 +}
59189 +
59190 +void
59191 +gr_handle_mem_readwrite(u64 from, u64 to)
59192 +{
59193 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
59194 + return;
59195 +}
59196 +
59197 +void
59198 +gr_handle_vm86(void)
59199 +{
59200 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
59201 + return;
59202 +}
59203 +
59204 +void
59205 +gr_log_badprocpid(const char *entry)
59206 +{
59207 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
59208 + return;
59209 +}
59210 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
59211 new file mode 100644
59212 index 0000000..2131422
59213 --- /dev/null
59214 +++ b/grsecurity/grsec_mount.c
59215 @@ -0,0 +1,62 @@
59216 +#include <linux/kernel.h>
59217 +#include <linux/sched.h>
59218 +#include <linux/mount.h>
59219 +#include <linux/grsecurity.h>
59220 +#include <linux/grinternal.h>
59221 +
59222 +void
59223 +gr_log_remount(const char *devname, const int retval)
59224 +{
59225 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59226 + if (grsec_enable_mount && (retval >= 0))
59227 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
59228 +#endif
59229 + return;
59230 +}
59231 +
59232 +void
59233 +gr_log_unmount(const char *devname, const int retval)
59234 +{
59235 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59236 + if (grsec_enable_mount && (retval >= 0))
59237 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
59238 +#endif
59239 + return;
59240 +}
59241 +
59242 +void
59243 +gr_log_mount(const char *from, const char *to, const int retval)
59244 +{
59245 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59246 + if (grsec_enable_mount && (retval >= 0))
59247 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
59248 +#endif
59249 + return;
59250 +}
59251 +
59252 +int
59253 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
59254 +{
59255 +#ifdef CONFIG_GRKERNSEC_ROFS
59256 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
59257 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
59258 + return -EPERM;
59259 + } else
59260 + return 0;
59261 +#endif
59262 + return 0;
59263 +}
59264 +
59265 +int
59266 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
59267 +{
59268 +#ifdef CONFIG_GRKERNSEC_ROFS
59269 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
59270 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
59271 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
59272 + return -EPERM;
59273 + } else
59274 + return 0;
59275 +#endif
59276 + return 0;
59277 +}
59278 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
59279 new file mode 100644
59280 index 0000000..a3b12a0
59281 --- /dev/null
59282 +++ b/grsecurity/grsec_pax.c
59283 @@ -0,0 +1,36 @@
59284 +#include <linux/kernel.h>
59285 +#include <linux/sched.h>
59286 +#include <linux/mm.h>
59287 +#include <linux/file.h>
59288 +#include <linux/grinternal.h>
59289 +#include <linux/grsecurity.h>
59290 +
59291 +void
59292 +gr_log_textrel(struct vm_area_struct * vma)
59293 +{
59294 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59295 + if (grsec_enable_audit_textrel)
59296 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
59297 +#endif
59298 + return;
59299 +}
59300 +
59301 +void
59302 +gr_log_rwxmmap(struct file *file)
59303 +{
59304 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59305 + if (grsec_enable_log_rwxmaps)
59306 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
59307 +#endif
59308 + return;
59309 +}
59310 +
59311 +void
59312 +gr_log_rwxmprotect(struct file *file)
59313 +{
59314 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59315 + if (grsec_enable_log_rwxmaps)
59316 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
59317 +#endif
59318 + return;
59319 +}
59320 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
59321 new file mode 100644
59322 index 0000000..f7f29aa
59323 --- /dev/null
59324 +++ b/grsecurity/grsec_ptrace.c
59325 @@ -0,0 +1,30 @@
59326 +#include <linux/kernel.h>
59327 +#include <linux/sched.h>
59328 +#include <linux/grinternal.h>
59329 +#include <linux/security.h>
59330 +
59331 +void
59332 +gr_audit_ptrace(struct task_struct *task)
59333 +{
59334 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59335 + if (grsec_enable_audit_ptrace)
59336 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
59337 +#endif
59338 + return;
59339 +}
59340 +
59341 +int
59342 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
59343 +{
59344 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59345 + const struct dentry *dentry = file->f_path.dentry;
59346 + const struct vfsmount *mnt = file->f_path.mnt;
59347 +
59348 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
59349 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
59350 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
59351 + return -EACCES;
59352 + }
59353 +#endif
59354 + return 0;
59355 +}
59356 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
59357 new file mode 100644
59358 index 0000000..b4ac94c
59359 --- /dev/null
59360 +++ b/grsecurity/grsec_sig.c
59361 @@ -0,0 +1,209 @@
59362 +#include <linux/kernel.h>
59363 +#include <linux/sched.h>
59364 +#include <linux/delay.h>
59365 +#include <linux/grsecurity.h>
59366 +#include <linux/grinternal.h>
59367 +#include <linux/hardirq.h>
59368 +
59369 +char *signames[] = {
59370 + [SIGSEGV] = "Segmentation fault",
59371 + [SIGILL] = "Illegal instruction",
59372 + [SIGABRT] = "Abort",
59373 + [SIGBUS] = "Invalid alignment/Bus error"
59374 +};
59375 +
59376 +void
59377 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
59378 +{
59379 +#ifdef CONFIG_GRKERNSEC_SIGNAL
59380 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
59381 + (sig == SIGABRT) || (sig == SIGBUS))) {
59382 + if (t->pid == current->pid) {
59383 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
59384 + } else {
59385 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
59386 + }
59387 + }
59388 +#endif
59389 + return;
59390 +}
59391 +
59392 +int
59393 +gr_handle_signal(const struct task_struct *p, const int sig)
59394 +{
59395 +#ifdef CONFIG_GRKERNSEC
59396 + /* ignore the 0 signal for protected task checks */
59397 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
59398 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
59399 + return -EPERM;
59400 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
59401 + return -EPERM;
59402 + }
59403 +#endif
59404 + return 0;
59405 +}
59406 +
59407 +#ifdef CONFIG_GRKERNSEC
59408 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
59409 +
59410 +int gr_fake_force_sig(int sig, struct task_struct *t)
59411 +{
59412 + unsigned long int flags;
59413 + int ret, blocked, ignored;
59414 + struct k_sigaction *action;
59415 +
59416 + spin_lock_irqsave(&t->sighand->siglock, flags);
59417 + action = &t->sighand->action[sig-1];
59418 + ignored = action->sa.sa_handler == SIG_IGN;
59419 + blocked = sigismember(&t->blocked, sig);
59420 + if (blocked || ignored) {
59421 + action->sa.sa_handler = SIG_DFL;
59422 + if (blocked) {
59423 + sigdelset(&t->blocked, sig);
59424 + recalc_sigpending_and_wake(t);
59425 + }
59426 + }
59427 + if (action->sa.sa_handler == SIG_DFL)
59428 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
59429 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
59430 +
59431 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
59432 +
59433 + return ret;
59434 +}
59435 +#endif
59436 +
59437 +#ifdef CONFIG_GRKERNSEC_BRUTE
59438 +#define GR_USER_BAN_TIME (15 * 60)
59439 +
59440 +static int __get_dumpable(unsigned long mm_flags)
59441 +{
59442 + int ret;
59443 +
59444 + ret = mm_flags & MMF_DUMPABLE_MASK;
59445 + return (ret >= 2) ? 2 : ret;
59446 +}
59447 +#endif
59448 +
59449 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
59450 +{
59451 +#ifdef CONFIG_GRKERNSEC_BRUTE
59452 + kuid_t uid = GLOBAL_ROOT_UID;
59453 +
59454 + if (!grsec_enable_brute)
59455 + return;
59456 +
59457 + rcu_read_lock();
59458 + read_lock(&tasklist_lock);
59459 + read_lock(&grsec_exec_file_lock);
59460 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
59461 + p->real_parent->brute = 1;
59462 + else {
59463 + const struct cred *cred = __task_cred(p), *cred2;
59464 + struct task_struct *tsk, *tsk2;
59465 +
59466 + if (!__get_dumpable(mm_flags) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
59467 + struct user_struct *user;
59468 +
59469 + uid = cred->uid;
59470 +
59471 + /* this is put upon execution past expiration */
59472 + user = find_user(uid);
59473 + if (user == NULL)
59474 + goto unlock;
59475 + user->banned = 1;
59476 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
59477 + if (user->ban_expires == ~0UL)
59478 + user->ban_expires--;
59479 +
59480 + do_each_thread(tsk2, tsk) {
59481 + cred2 = __task_cred(tsk);
59482 + if (tsk != p && uid_eq(cred2->uid, uid))
59483 + gr_fake_force_sig(SIGKILL, tsk);
59484 + } while_each_thread(tsk2, tsk);
59485 + }
59486 + }
59487 +unlock:
59488 + read_unlock(&grsec_exec_file_lock);
59489 + read_unlock(&tasklist_lock);
59490 + rcu_read_unlock();
59491 +
59492 + if (!uid_eq(uid, GLOBAL_ROOT_UID))
59493 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
59494 + from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60);
59495 +
59496 +#endif
59497 + return;
59498 +}
59499 +
59500 +void gr_handle_brute_check(void)
59501 +{
59502 +#ifdef CONFIG_GRKERNSEC_BRUTE
59503 + if (current->brute)
59504 + msleep(30 * 1000);
59505 +#endif
59506 + return;
59507 +}
59508 +
59509 +void gr_handle_kernel_exploit(void)
59510 +{
59511 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
59512 + const struct cred *cred;
59513 + struct task_struct *tsk, *tsk2;
59514 + struct user_struct *user;
59515 + kuid_t uid;
59516 +
59517 + if (in_irq() || in_serving_softirq() || in_nmi())
59518 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
59519 +
59520 + uid = current_uid();
59521 +
59522 + if (uid_eq(uid, GLOBAL_ROOT_UID))
59523 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
59524 + else {
59525 + /* kill all the processes of this user, hold a reference
59526 + to their creds struct, and prevent them from creating
59527 + another process until system reset
59528 + */
59529 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
59530 + from_kuid_munged(&init_user_ns, uid));
59531 + /* we intentionally leak this ref */
59532 + user = get_uid(current->cred->user);
59533 + if (user) {
59534 + user->banned = 1;
59535 + user->ban_expires = ~0UL;
59536 + }
59537 +
59538 + read_lock(&tasklist_lock);
59539 + do_each_thread(tsk2, tsk) {
59540 + cred = __task_cred(tsk);
59541 + if (uid_eq(cred->uid, uid))
59542 + gr_fake_force_sig(SIGKILL, tsk);
59543 + } while_each_thread(tsk2, tsk);
59544 + read_unlock(&tasklist_lock);
59545 + }
59546 +#endif
59547 +}
59548 +
59549 +int __gr_process_user_ban(struct user_struct *user)
59550 +{
59551 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59552 + if (unlikely(user->banned)) {
59553 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
59554 + user->banned = 0;
59555 + user->ban_expires = 0;
59556 + free_uid(user);
59557 + } else
59558 + return -EPERM;
59559 + }
59560 +#endif
59561 + return 0;
59562 +}
59563 +
59564 +int gr_process_user_ban(void)
59565 +{
59566 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59567 + return __gr_process_user_ban(current->cred->user);
59568 +#endif
59569 + return 0;
59570 +}
59571 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
59572 new file mode 100644
59573 index 0000000..4030d57
59574 --- /dev/null
59575 +++ b/grsecurity/grsec_sock.c
59576 @@ -0,0 +1,244 @@
59577 +#include <linux/kernel.h>
59578 +#include <linux/module.h>
59579 +#include <linux/sched.h>
59580 +#include <linux/file.h>
59581 +#include <linux/net.h>
59582 +#include <linux/in.h>
59583 +#include <linux/ip.h>
59584 +#include <net/sock.h>
59585 +#include <net/inet_sock.h>
59586 +#include <linux/grsecurity.h>
59587 +#include <linux/grinternal.h>
59588 +#include <linux/gracl.h>
59589 +
59590 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
59591 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
59592 +
59593 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
59594 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
59595 +
59596 +#ifdef CONFIG_UNIX_MODULE
59597 +EXPORT_SYMBOL(gr_acl_handle_unix);
59598 +EXPORT_SYMBOL(gr_acl_handle_mknod);
59599 +EXPORT_SYMBOL(gr_handle_chroot_unix);
59600 +EXPORT_SYMBOL(gr_handle_create);
59601 +#endif
59602 +
59603 +#ifdef CONFIG_GRKERNSEC
59604 +#define gr_conn_table_size 32749
59605 +struct conn_table_entry {
59606 + struct conn_table_entry *next;
59607 + struct signal_struct *sig;
59608 +};
59609 +
59610 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
59611 +DEFINE_SPINLOCK(gr_conn_table_lock);
59612 +
59613 +extern const char * gr_socktype_to_name(unsigned char type);
59614 +extern const char * gr_proto_to_name(unsigned char proto);
59615 +extern const char * gr_sockfamily_to_name(unsigned char family);
59616 +
59617 +static __inline__ int
59618 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
59619 +{
59620 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
59621 +}
59622 +
59623 +static __inline__ int
59624 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
59625 + __u16 sport, __u16 dport)
59626 +{
59627 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
59628 + sig->gr_sport == sport && sig->gr_dport == dport))
59629 + return 1;
59630 + else
59631 + return 0;
59632 +}
59633 +
59634 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
59635 +{
59636 + struct conn_table_entry **match;
59637 + unsigned int index;
59638 +
59639 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59640 + sig->gr_sport, sig->gr_dport,
59641 + gr_conn_table_size);
59642 +
59643 + newent->sig = sig;
59644 +
59645 + match = &gr_conn_table[index];
59646 + newent->next = *match;
59647 + *match = newent;
59648 +
59649 + return;
59650 +}
59651 +
59652 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
59653 +{
59654 + struct conn_table_entry *match, *last = NULL;
59655 + unsigned int index;
59656 +
59657 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59658 + sig->gr_sport, sig->gr_dport,
59659 + gr_conn_table_size);
59660 +
59661 + match = gr_conn_table[index];
59662 + while (match && !conn_match(match->sig,
59663 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
59664 + sig->gr_dport)) {
59665 + last = match;
59666 + match = match->next;
59667 + }
59668 +
59669 + if (match) {
59670 + if (last)
59671 + last->next = match->next;
59672 + else
59673 + gr_conn_table[index] = NULL;
59674 + kfree(match);
59675 + }
59676 +
59677 + return;
59678 +}
59679 +
59680 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
59681 + __u16 sport, __u16 dport)
59682 +{
59683 + struct conn_table_entry *match;
59684 + unsigned int index;
59685 +
59686 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
59687 +
59688 + match = gr_conn_table[index];
59689 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
59690 + match = match->next;
59691 +
59692 + if (match)
59693 + return match->sig;
59694 + else
59695 + return NULL;
59696 +}
59697 +
59698 +#endif
59699 +
59700 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
59701 +{
59702 +#ifdef CONFIG_GRKERNSEC
59703 + struct signal_struct *sig = task->signal;
59704 + struct conn_table_entry *newent;
59705 +
59706 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
59707 + if (newent == NULL)
59708 + return;
59709 + /* no bh lock needed since we are called with bh disabled */
59710 + spin_lock(&gr_conn_table_lock);
59711 + gr_del_task_from_ip_table_nolock(sig);
59712 + sig->gr_saddr = inet->inet_rcv_saddr;
59713 + sig->gr_daddr = inet->inet_daddr;
59714 + sig->gr_sport = inet->inet_sport;
59715 + sig->gr_dport = inet->inet_dport;
59716 + gr_add_to_task_ip_table_nolock(sig, newent);
59717 + spin_unlock(&gr_conn_table_lock);
59718 +#endif
59719 + return;
59720 +}
59721 +
59722 +void gr_del_task_from_ip_table(struct task_struct *task)
59723 +{
59724 +#ifdef CONFIG_GRKERNSEC
59725 + spin_lock_bh(&gr_conn_table_lock);
59726 + gr_del_task_from_ip_table_nolock(task->signal);
59727 + spin_unlock_bh(&gr_conn_table_lock);
59728 +#endif
59729 + return;
59730 +}
59731 +
59732 +void
59733 +gr_attach_curr_ip(const struct sock *sk)
59734 +{
59735 +#ifdef CONFIG_GRKERNSEC
59736 + struct signal_struct *p, *set;
59737 + const struct inet_sock *inet = inet_sk(sk);
59738 +
59739 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
59740 + return;
59741 +
59742 + set = current->signal;
59743 +
59744 + spin_lock_bh(&gr_conn_table_lock);
59745 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
59746 + inet->inet_dport, inet->inet_sport);
59747 + if (unlikely(p != NULL)) {
59748 + set->curr_ip = p->curr_ip;
59749 + set->used_accept = 1;
59750 + gr_del_task_from_ip_table_nolock(p);
59751 + spin_unlock_bh(&gr_conn_table_lock);
59752 + return;
59753 + }
59754 + spin_unlock_bh(&gr_conn_table_lock);
59755 +
59756 + set->curr_ip = inet->inet_daddr;
59757 + set->used_accept = 1;
59758 +#endif
59759 + return;
59760 +}
59761 +
59762 +int
59763 +gr_handle_sock_all(const int family, const int type, const int protocol)
59764 +{
59765 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59766 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
59767 + (family != AF_UNIX)) {
59768 + if (family == AF_INET)
59769 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
59770 + else
59771 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
59772 + return -EACCES;
59773 + }
59774 +#endif
59775 + return 0;
59776 +}
59777 +
59778 +int
59779 +gr_handle_sock_server(const struct sockaddr *sck)
59780 +{
59781 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59782 + if (grsec_enable_socket_server &&
59783 + in_group_p(grsec_socket_server_gid) &&
59784 + sck && (sck->sa_family != AF_UNIX) &&
59785 + (sck->sa_family != AF_LOCAL)) {
59786 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59787 + return -EACCES;
59788 + }
59789 +#endif
59790 + return 0;
59791 +}
59792 +
59793 +int
59794 +gr_handle_sock_server_other(const struct sock *sck)
59795 +{
59796 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59797 + if (grsec_enable_socket_server &&
59798 + in_group_p(grsec_socket_server_gid) &&
59799 + sck && (sck->sk_family != AF_UNIX) &&
59800 + (sck->sk_family != AF_LOCAL)) {
59801 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59802 + return -EACCES;
59803 + }
59804 +#endif
59805 + return 0;
59806 +}
59807 +
59808 +int
59809 +gr_handle_sock_client(const struct sockaddr *sck)
59810 +{
59811 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59812 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
59813 + sck && (sck->sa_family != AF_UNIX) &&
59814 + (sck->sa_family != AF_LOCAL)) {
59815 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
59816 + return -EACCES;
59817 + }
59818 +#endif
59819 + return 0;
59820 +}
59821 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
59822 new file mode 100644
59823 index 0000000..f55ef0f
59824 --- /dev/null
59825 +++ b/grsecurity/grsec_sysctl.c
59826 @@ -0,0 +1,469 @@
59827 +#include <linux/kernel.h>
59828 +#include <linux/sched.h>
59829 +#include <linux/sysctl.h>
59830 +#include <linux/grsecurity.h>
59831 +#include <linux/grinternal.h>
59832 +
59833 +int
59834 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
59835 +{
59836 +#ifdef CONFIG_GRKERNSEC_SYSCTL
59837 + if (dirname == NULL || name == NULL)
59838 + return 0;
59839 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
59840 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
59841 + return -EACCES;
59842 + }
59843 +#endif
59844 + return 0;
59845 +}
59846 +
59847 +#ifdef CONFIG_GRKERNSEC_ROFS
59848 +static int __maybe_unused one = 1;
59849 +#endif
59850 +
59851 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59852 +struct ctl_table grsecurity_table[] = {
59853 +#ifdef CONFIG_GRKERNSEC_SYSCTL
59854 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
59855 +#ifdef CONFIG_GRKERNSEC_IO
59856 + {
59857 + .procname = "disable_priv_io",
59858 + .data = &grsec_disable_privio,
59859 + .maxlen = sizeof(int),
59860 + .mode = 0600,
59861 + .proc_handler = &proc_dointvec,
59862 + },
59863 +#endif
59864 +#endif
59865 +#ifdef CONFIG_GRKERNSEC_LINK
59866 + {
59867 + .procname = "linking_restrictions",
59868 + .data = &grsec_enable_link,
59869 + .maxlen = sizeof(int),
59870 + .mode = 0600,
59871 + .proc_handler = &proc_dointvec,
59872 + },
59873 +#endif
59874 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
59875 + {
59876 + .procname = "enforce_symlinksifowner",
59877 + .data = &grsec_enable_symlinkown,
59878 + .maxlen = sizeof(int),
59879 + .mode = 0600,
59880 + .proc_handler = &proc_dointvec,
59881 + },
59882 + {
59883 + .procname = "symlinkown_gid",
59884 + .data = &grsec_symlinkown_gid,
59885 + .maxlen = sizeof(int),
59886 + .mode = 0600,
59887 + .proc_handler = &proc_dointvec,
59888 + },
59889 +#endif
59890 +#ifdef CONFIG_GRKERNSEC_BRUTE
59891 + {
59892 + .procname = "deter_bruteforce",
59893 + .data = &grsec_enable_brute,
59894 + .maxlen = sizeof(int),
59895 + .mode = 0600,
59896 + .proc_handler = &proc_dointvec,
59897 + },
59898 +#endif
59899 +#ifdef CONFIG_GRKERNSEC_FIFO
59900 + {
59901 + .procname = "fifo_restrictions",
59902 + .data = &grsec_enable_fifo,
59903 + .maxlen = sizeof(int),
59904 + .mode = 0600,
59905 + .proc_handler = &proc_dointvec,
59906 + },
59907 +#endif
59908 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59909 + {
59910 + .procname = "ptrace_readexec",
59911 + .data = &grsec_enable_ptrace_readexec,
59912 + .maxlen = sizeof(int),
59913 + .mode = 0600,
59914 + .proc_handler = &proc_dointvec,
59915 + },
59916 +#endif
59917 +#ifdef CONFIG_GRKERNSEC_SETXID
59918 + {
59919 + .procname = "consistent_setxid",
59920 + .data = &grsec_enable_setxid,
59921 + .maxlen = sizeof(int),
59922 + .mode = 0600,
59923 + .proc_handler = &proc_dointvec,
59924 + },
59925 +#endif
59926 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
59927 + {
59928 + .procname = "ip_blackhole",
59929 + .data = &grsec_enable_blackhole,
59930 + .maxlen = sizeof(int),
59931 + .mode = 0600,
59932 + .proc_handler = &proc_dointvec,
59933 + },
59934 + {
59935 + .procname = "lastack_retries",
59936 + .data = &grsec_lastack_retries,
59937 + .maxlen = sizeof(int),
59938 + .mode = 0600,
59939 + .proc_handler = &proc_dointvec,
59940 + },
59941 +#endif
59942 +#ifdef CONFIG_GRKERNSEC_EXECLOG
59943 + {
59944 + .procname = "exec_logging",
59945 + .data = &grsec_enable_execlog,
59946 + .maxlen = sizeof(int),
59947 + .mode = 0600,
59948 + .proc_handler = &proc_dointvec,
59949 + },
59950 +#endif
59951 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59952 + {
59953 + .procname = "rwxmap_logging",
59954 + .data = &grsec_enable_log_rwxmaps,
59955 + .maxlen = sizeof(int),
59956 + .mode = 0600,
59957 + .proc_handler = &proc_dointvec,
59958 + },
59959 +#endif
59960 +#ifdef CONFIG_GRKERNSEC_SIGNAL
59961 + {
59962 + .procname = "signal_logging",
59963 + .data = &grsec_enable_signal,
59964 + .maxlen = sizeof(int),
59965 + .mode = 0600,
59966 + .proc_handler = &proc_dointvec,
59967 + },
59968 +#endif
59969 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
59970 + {
59971 + .procname = "forkfail_logging",
59972 + .data = &grsec_enable_forkfail,
59973 + .maxlen = sizeof(int),
59974 + .mode = 0600,
59975 + .proc_handler = &proc_dointvec,
59976 + },
59977 +#endif
59978 +#ifdef CONFIG_GRKERNSEC_TIME
59979 + {
59980 + .procname = "timechange_logging",
59981 + .data = &grsec_enable_time,
59982 + .maxlen = sizeof(int),
59983 + .mode = 0600,
59984 + .proc_handler = &proc_dointvec,
59985 + },
59986 +#endif
59987 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59988 + {
59989 + .procname = "chroot_deny_shmat",
59990 + .data = &grsec_enable_chroot_shmat,
59991 + .maxlen = sizeof(int),
59992 + .mode = 0600,
59993 + .proc_handler = &proc_dointvec,
59994 + },
59995 +#endif
59996 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59997 + {
59998 + .procname = "chroot_deny_unix",
59999 + .data = &grsec_enable_chroot_unix,
60000 + .maxlen = sizeof(int),
60001 + .mode = 0600,
60002 + .proc_handler = &proc_dointvec,
60003 + },
60004 +#endif
60005 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
60006 + {
60007 + .procname = "chroot_deny_mount",
60008 + .data = &grsec_enable_chroot_mount,
60009 + .maxlen = sizeof(int),
60010 + .mode = 0600,
60011 + .proc_handler = &proc_dointvec,
60012 + },
60013 +#endif
60014 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
60015 + {
60016 + .procname = "chroot_deny_fchdir",
60017 + .data = &grsec_enable_chroot_fchdir,
60018 + .maxlen = sizeof(int),
60019 + .mode = 0600,
60020 + .proc_handler = &proc_dointvec,
60021 + },
60022 +#endif
60023 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
60024 + {
60025 + .procname = "chroot_deny_chroot",
60026 + .data = &grsec_enable_chroot_double,
60027 + .maxlen = sizeof(int),
60028 + .mode = 0600,
60029 + .proc_handler = &proc_dointvec,
60030 + },
60031 +#endif
60032 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
60033 + {
60034 + .procname = "chroot_deny_pivot",
60035 + .data = &grsec_enable_chroot_pivot,
60036 + .maxlen = sizeof(int),
60037 + .mode = 0600,
60038 + .proc_handler = &proc_dointvec,
60039 + },
60040 +#endif
60041 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
60042 + {
60043 + .procname = "chroot_enforce_chdir",
60044 + .data = &grsec_enable_chroot_chdir,
60045 + .maxlen = sizeof(int),
60046 + .mode = 0600,
60047 + .proc_handler = &proc_dointvec,
60048 + },
60049 +#endif
60050 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
60051 + {
60052 + .procname = "chroot_deny_chmod",
60053 + .data = &grsec_enable_chroot_chmod,
60054 + .maxlen = sizeof(int),
60055 + .mode = 0600,
60056 + .proc_handler = &proc_dointvec,
60057 + },
60058 +#endif
60059 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
60060 + {
60061 + .procname = "chroot_deny_mknod",
60062 + .data = &grsec_enable_chroot_mknod,
60063 + .maxlen = sizeof(int),
60064 + .mode = 0600,
60065 + .proc_handler = &proc_dointvec,
60066 + },
60067 +#endif
60068 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
60069 + {
60070 + .procname = "chroot_restrict_nice",
60071 + .data = &grsec_enable_chroot_nice,
60072 + .maxlen = sizeof(int),
60073 + .mode = 0600,
60074 + .proc_handler = &proc_dointvec,
60075 + },
60076 +#endif
60077 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
60078 + {
60079 + .procname = "chroot_execlog",
60080 + .data = &grsec_enable_chroot_execlog,
60081 + .maxlen = sizeof(int),
60082 + .mode = 0600,
60083 + .proc_handler = &proc_dointvec,
60084 + },
60085 +#endif
60086 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60087 + {
60088 + .procname = "chroot_caps",
60089 + .data = &grsec_enable_chroot_caps,
60090 + .maxlen = sizeof(int),
60091 + .mode = 0600,
60092 + .proc_handler = &proc_dointvec,
60093 + },
60094 +#endif
60095 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
60096 + {
60097 + .procname = "chroot_deny_sysctl",
60098 + .data = &grsec_enable_chroot_sysctl,
60099 + .maxlen = sizeof(int),
60100 + .mode = 0600,
60101 + .proc_handler = &proc_dointvec,
60102 + },
60103 +#endif
60104 +#ifdef CONFIG_GRKERNSEC_TPE
60105 + {
60106 + .procname = "tpe",
60107 + .data = &grsec_enable_tpe,
60108 + .maxlen = sizeof(int),
60109 + .mode = 0600,
60110 + .proc_handler = &proc_dointvec,
60111 + },
60112 + {
60113 + .procname = "tpe_gid",
60114 + .data = &grsec_tpe_gid,
60115 + .maxlen = sizeof(int),
60116 + .mode = 0600,
60117 + .proc_handler = &proc_dointvec,
60118 + },
60119 +#endif
60120 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60121 + {
60122 + .procname = "tpe_invert",
60123 + .data = &grsec_enable_tpe_invert,
60124 + .maxlen = sizeof(int),
60125 + .mode = 0600,
60126 + .proc_handler = &proc_dointvec,
60127 + },
60128 +#endif
60129 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
60130 + {
60131 + .procname = "tpe_restrict_all",
60132 + .data = &grsec_enable_tpe_all,
60133 + .maxlen = sizeof(int),
60134 + .mode = 0600,
60135 + .proc_handler = &proc_dointvec,
60136 + },
60137 +#endif
60138 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
60139 + {
60140 + .procname = "socket_all",
60141 + .data = &grsec_enable_socket_all,
60142 + .maxlen = sizeof(int),
60143 + .mode = 0600,
60144 + .proc_handler = &proc_dointvec,
60145 + },
60146 + {
60147 + .procname = "socket_all_gid",
60148 + .data = &grsec_socket_all_gid,
60149 + .maxlen = sizeof(int),
60150 + .mode = 0600,
60151 + .proc_handler = &proc_dointvec,
60152 + },
60153 +#endif
60154 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
60155 + {
60156 + .procname = "socket_client",
60157 + .data = &grsec_enable_socket_client,
60158 + .maxlen = sizeof(int),
60159 + .mode = 0600,
60160 + .proc_handler = &proc_dointvec,
60161 + },
60162 + {
60163 + .procname = "socket_client_gid",
60164 + .data = &grsec_socket_client_gid,
60165 + .maxlen = sizeof(int),
60166 + .mode = 0600,
60167 + .proc_handler = &proc_dointvec,
60168 + },
60169 +#endif
60170 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
60171 + {
60172 + .procname = "socket_server",
60173 + .data = &grsec_enable_socket_server,
60174 + .maxlen = sizeof(int),
60175 + .mode = 0600,
60176 + .proc_handler = &proc_dointvec,
60177 + },
60178 + {
60179 + .procname = "socket_server_gid",
60180 + .data = &grsec_socket_server_gid,
60181 + .maxlen = sizeof(int),
60182 + .mode = 0600,
60183 + .proc_handler = &proc_dointvec,
60184 + },
60185 +#endif
60186 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
60187 + {
60188 + .procname = "audit_group",
60189 + .data = &grsec_enable_group,
60190 + .maxlen = sizeof(int),
60191 + .mode = 0600,
60192 + .proc_handler = &proc_dointvec,
60193 + },
60194 + {
60195 + .procname = "audit_gid",
60196 + .data = &grsec_audit_gid,
60197 + .maxlen = sizeof(int),
60198 + .mode = 0600,
60199 + .proc_handler = &proc_dointvec,
60200 + },
60201 +#endif
60202 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60203 + {
60204 + .procname = "audit_chdir",
60205 + .data = &grsec_enable_chdir,
60206 + .maxlen = sizeof(int),
60207 + .mode = 0600,
60208 + .proc_handler = &proc_dointvec,
60209 + },
60210 +#endif
60211 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60212 + {
60213 + .procname = "audit_mount",
60214 + .data = &grsec_enable_mount,
60215 + .maxlen = sizeof(int),
60216 + .mode = 0600,
60217 + .proc_handler = &proc_dointvec,
60218 + },
60219 +#endif
60220 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60221 + {
60222 + .procname = "audit_textrel",
60223 + .data = &grsec_enable_audit_textrel,
60224 + .maxlen = sizeof(int),
60225 + .mode = 0600,
60226 + .proc_handler = &proc_dointvec,
60227 + },
60228 +#endif
60229 +#ifdef CONFIG_GRKERNSEC_DMESG
60230 + {
60231 + .procname = "dmesg",
60232 + .data = &grsec_enable_dmesg,
60233 + .maxlen = sizeof(int),
60234 + .mode = 0600,
60235 + .proc_handler = &proc_dointvec,
60236 + },
60237 +#endif
60238 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60239 + {
60240 + .procname = "chroot_findtask",
60241 + .data = &grsec_enable_chroot_findtask,
60242 + .maxlen = sizeof(int),
60243 + .mode = 0600,
60244 + .proc_handler = &proc_dointvec,
60245 + },
60246 +#endif
60247 +#ifdef CONFIG_GRKERNSEC_RESLOG
60248 + {
60249 + .procname = "resource_logging",
60250 + .data = &grsec_resource_logging,
60251 + .maxlen = sizeof(int),
60252 + .mode = 0600,
60253 + .proc_handler = &proc_dointvec,
60254 + },
60255 +#endif
60256 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60257 + {
60258 + .procname = "audit_ptrace",
60259 + .data = &grsec_enable_audit_ptrace,
60260 + .maxlen = sizeof(int),
60261 + .mode = 0600,
60262 + .proc_handler = &proc_dointvec,
60263 + },
60264 +#endif
60265 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60266 + {
60267 + .procname = "harden_ptrace",
60268 + .data = &grsec_enable_harden_ptrace,
60269 + .maxlen = sizeof(int),
60270 + .mode = 0600,
60271 + .proc_handler = &proc_dointvec,
60272 + },
60273 +#endif
60274 + {
60275 + .procname = "grsec_lock",
60276 + .data = &grsec_lock,
60277 + .maxlen = sizeof(int),
60278 + .mode = 0600,
60279 + .proc_handler = &proc_dointvec,
60280 + },
60281 +#endif
60282 +#ifdef CONFIG_GRKERNSEC_ROFS
60283 + {
60284 + .procname = "romount_protect",
60285 + .data = &grsec_enable_rofs,
60286 + .maxlen = sizeof(int),
60287 + .mode = 0600,
60288 + .proc_handler = &proc_dointvec_minmax,
60289 + .extra1 = &one,
60290 + .extra2 = &one,
60291 + },
60292 +#endif
60293 + { }
60294 +};
60295 +#endif
60296 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
60297 new file mode 100644
60298 index 0000000..0dc13c3
60299 --- /dev/null
60300 +++ b/grsecurity/grsec_time.c
60301 @@ -0,0 +1,16 @@
60302 +#include <linux/kernel.h>
60303 +#include <linux/sched.h>
60304 +#include <linux/grinternal.h>
60305 +#include <linux/module.h>
60306 +
60307 +void
60308 +gr_log_timechange(void)
60309 +{
60310 +#ifdef CONFIG_GRKERNSEC_TIME
60311 + if (grsec_enable_time)
60312 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
60313 +#endif
60314 + return;
60315 +}
60316 +
60317 +EXPORT_SYMBOL(gr_log_timechange);
60318 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
60319 new file mode 100644
60320 index 0000000..07e0dc0
60321 --- /dev/null
60322 +++ b/grsecurity/grsec_tpe.c
60323 @@ -0,0 +1,73 @@
60324 +#include <linux/kernel.h>
60325 +#include <linux/sched.h>
60326 +#include <linux/file.h>
60327 +#include <linux/fs.h>
60328 +#include <linux/grinternal.h>
60329 +
60330 +extern int gr_acl_tpe_check(void);
60331 +
60332 +int
60333 +gr_tpe_allow(const struct file *file)
60334 +{
60335 +#ifdef CONFIG_GRKERNSEC
60336 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
60337 + const struct cred *cred = current_cred();
60338 + char *msg = NULL;
60339 + char *msg2 = NULL;
60340 +
60341 + // never restrict root
60342 + if (!cred->uid)
60343 + return 1;
60344 +
60345 + if (grsec_enable_tpe) {
60346 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60347 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
60348 + msg = "not being in trusted group";
60349 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
60350 + msg = "being in untrusted group";
60351 +#else
60352 + if (in_group_p(grsec_tpe_gid))
60353 + msg = "being in untrusted group";
60354 +#endif
60355 + }
60356 + if (!msg && gr_acl_tpe_check())
60357 + msg = "being in untrusted role";
60358 +
60359 + // not in any affected group/role
60360 + if (!msg)
60361 + goto next_check;
60362 +
60363 + if (inode->i_uid)
60364 + msg2 = "file in non-root-owned directory";
60365 + else if (inode->i_mode & S_IWOTH)
60366 + msg2 = "file in world-writable directory";
60367 + else if (inode->i_mode & S_IWGRP)
60368 + msg2 = "file in group-writable directory";
60369 +
60370 + if (msg && msg2) {
60371 + char fullmsg[70] = {0};
60372 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
60373 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
60374 + return 0;
60375 + }
60376 + msg = NULL;
60377 +next_check:
60378 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
60379 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
60380 + return 1;
60381 +
60382 + if (inode->i_uid && (inode->i_uid != cred->uid))
60383 + msg = "directory not owned by user";
60384 + else if (inode->i_mode & S_IWOTH)
60385 + msg = "file in world-writable directory";
60386 + else if (inode->i_mode & S_IWGRP)
60387 + msg = "file in group-writable directory";
60388 +
60389 + if (msg) {
60390 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
60391 + return 0;
60392 + }
60393 +#endif
60394 +#endif
60395 + return 1;
60396 +}
60397 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
60398 new file mode 100644
60399 index 0000000..9f7b1ac
60400 --- /dev/null
60401 +++ b/grsecurity/grsum.c
60402 @@ -0,0 +1,61 @@
60403 +#include <linux/err.h>
60404 +#include <linux/kernel.h>
60405 +#include <linux/sched.h>
60406 +#include <linux/mm.h>
60407 +#include <linux/scatterlist.h>
60408 +#include <linux/crypto.h>
60409 +#include <linux/gracl.h>
60410 +
60411 +
60412 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
60413 +#error "crypto and sha256 must be built into the kernel"
60414 +#endif
60415 +
60416 +int
60417 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
60418 +{
60419 + char *p;
60420 + struct crypto_hash *tfm;
60421 + struct hash_desc desc;
60422 + struct scatterlist sg;
60423 + unsigned char temp_sum[GR_SHA_LEN];
60424 + volatile int retval = 0;
60425 + volatile int dummy = 0;
60426 + unsigned int i;
60427 +
60428 + sg_init_table(&sg, 1);
60429 +
60430 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
60431 + if (IS_ERR(tfm)) {
60432 + /* should never happen, since sha256 should be built in */
60433 + return 1;
60434 + }
60435 +
60436 + desc.tfm = tfm;
60437 + desc.flags = 0;
60438 +
60439 + crypto_hash_init(&desc);
60440 +
60441 + p = salt;
60442 + sg_set_buf(&sg, p, GR_SALT_LEN);
60443 + crypto_hash_update(&desc, &sg, sg.length);
60444 +
60445 + p = entry->pw;
60446 + sg_set_buf(&sg, p, strlen(p));
60447 +
60448 + crypto_hash_update(&desc, &sg, sg.length);
60449 +
60450 + crypto_hash_final(&desc, temp_sum);
60451 +
60452 + memset(entry->pw, 0, GR_PW_LEN);
60453 +
60454 + for (i = 0; i < GR_SHA_LEN; i++)
60455 + if (sum[i] != temp_sum[i])
60456 + retval = 1;
60457 + else
60458 + dummy = 1; // waste a cycle
60459 +
60460 + crypto_free_hash(tfm);
60461 +
60462 + return retval;
60463 +}
60464 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
60465 index 9e6e1c6..d47b906 100644
60466 --- a/include/acpi/acpi_bus.h
60467 +++ b/include/acpi/acpi_bus.h
60468 @@ -138,7 +138,7 @@ struct acpi_device_ops {
60469 acpi_op_bind bind;
60470 acpi_op_unbind unbind;
60471 acpi_op_notify notify;
60472 -};
60473 +} __no_const;
60474
60475 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
60476
60477 diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
60478 index 77ff547..181834f 100644
60479 --- a/include/asm-generic/4level-fixup.h
60480 +++ b/include/asm-generic/4level-fixup.h
60481 @@ -13,8 +13,10 @@
60482 #define pmd_alloc(mm, pud, address) \
60483 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
60484 NULL: pmd_offset(pud, address))
60485 +#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
60486
60487 #define pud_alloc(mm, pgd, address) (pgd)
60488 +#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
60489 #define pud_offset(pgd, start) (pgd)
60490 #define pud_none(pud) 0
60491 #define pud_bad(pud) 0
60492 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
60493 index b7babf0..3ba8aee 100644
60494 --- a/include/asm-generic/atomic-long.h
60495 +++ b/include/asm-generic/atomic-long.h
60496 @@ -22,6 +22,12 @@
60497
60498 typedef atomic64_t atomic_long_t;
60499
60500 +#ifdef CONFIG_PAX_REFCOUNT
60501 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
60502 +#else
60503 +typedef atomic64_t atomic_long_unchecked_t;
60504 +#endif
60505 +
60506 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
60507
60508 static inline long atomic_long_read(atomic_long_t *l)
60509 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60510 return (long)atomic64_read(v);
60511 }
60512
60513 +#ifdef CONFIG_PAX_REFCOUNT
60514 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60515 +{
60516 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60517 +
60518 + return (long)atomic64_read_unchecked(v);
60519 +}
60520 +#endif
60521 +
60522 static inline void atomic_long_set(atomic_long_t *l, long i)
60523 {
60524 atomic64_t *v = (atomic64_t *)l;
60525 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60526 atomic64_set(v, i);
60527 }
60528
60529 +#ifdef CONFIG_PAX_REFCOUNT
60530 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60531 +{
60532 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60533 +
60534 + atomic64_set_unchecked(v, i);
60535 +}
60536 +#endif
60537 +
60538 static inline void atomic_long_inc(atomic_long_t *l)
60539 {
60540 atomic64_t *v = (atomic64_t *)l;
60541 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60542 atomic64_inc(v);
60543 }
60544
60545 +#ifdef CONFIG_PAX_REFCOUNT
60546 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60547 +{
60548 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60549 +
60550 + atomic64_inc_unchecked(v);
60551 +}
60552 +#endif
60553 +
60554 static inline void atomic_long_dec(atomic_long_t *l)
60555 {
60556 atomic64_t *v = (atomic64_t *)l;
60557 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60558 atomic64_dec(v);
60559 }
60560
60561 +#ifdef CONFIG_PAX_REFCOUNT
60562 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60563 +{
60564 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60565 +
60566 + atomic64_dec_unchecked(v);
60567 +}
60568 +#endif
60569 +
60570 static inline void atomic_long_add(long i, atomic_long_t *l)
60571 {
60572 atomic64_t *v = (atomic64_t *)l;
60573 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60574 atomic64_add(i, v);
60575 }
60576
60577 +#ifdef CONFIG_PAX_REFCOUNT
60578 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60579 +{
60580 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60581 +
60582 + atomic64_add_unchecked(i, v);
60583 +}
60584 +#endif
60585 +
60586 static inline void atomic_long_sub(long i, atomic_long_t *l)
60587 {
60588 atomic64_t *v = (atomic64_t *)l;
60589 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60590 atomic64_sub(i, v);
60591 }
60592
60593 +#ifdef CONFIG_PAX_REFCOUNT
60594 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60595 +{
60596 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60597 +
60598 + atomic64_sub_unchecked(i, v);
60599 +}
60600 +#endif
60601 +
60602 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60603 {
60604 atomic64_t *v = (atomic64_t *)l;
60605 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60606 return (long)atomic64_inc_return(v);
60607 }
60608
60609 +#ifdef CONFIG_PAX_REFCOUNT
60610 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60611 +{
60612 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60613 +
60614 + return (long)atomic64_inc_return_unchecked(v);
60615 +}
60616 +#endif
60617 +
60618 static inline long atomic_long_dec_return(atomic_long_t *l)
60619 {
60620 atomic64_t *v = (atomic64_t *)l;
60621 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60622
60623 typedef atomic_t atomic_long_t;
60624
60625 +#ifdef CONFIG_PAX_REFCOUNT
60626 +typedef atomic_unchecked_t atomic_long_unchecked_t;
60627 +#else
60628 +typedef atomic_t atomic_long_unchecked_t;
60629 +#endif
60630 +
60631 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
60632 static inline long atomic_long_read(atomic_long_t *l)
60633 {
60634 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60635 return (long)atomic_read(v);
60636 }
60637
60638 +#ifdef CONFIG_PAX_REFCOUNT
60639 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60640 +{
60641 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60642 +
60643 + return (long)atomic_read_unchecked(v);
60644 +}
60645 +#endif
60646 +
60647 static inline void atomic_long_set(atomic_long_t *l, long i)
60648 {
60649 atomic_t *v = (atomic_t *)l;
60650 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60651 atomic_set(v, i);
60652 }
60653
60654 +#ifdef CONFIG_PAX_REFCOUNT
60655 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60656 +{
60657 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60658 +
60659 + atomic_set_unchecked(v, i);
60660 +}
60661 +#endif
60662 +
60663 static inline void atomic_long_inc(atomic_long_t *l)
60664 {
60665 atomic_t *v = (atomic_t *)l;
60666 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60667 atomic_inc(v);
60668 }
60669
60670 +#ifdef CONFIG_PAX_REFCOUNT
60671 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60672 +{
60673 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60674 +
60675 + atomic_inc_unchecked(v);
60676 +}
60677 +#endif
60678 +
60679 static inline void atomic_long_dec(atomic_long_t *l)
60680 {
60681 atomic_t *v = (atomic_t *)l;
60682 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60683 atomic_dec(v);
60684 }
60685
60686 +#ifdef CONFIG_PAX_REFCOUNT
60687 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60688 +{
60689 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60690 +
60691 + atomic_dec_unchecked(v);
60692 +}
60693 +#endif
60694 +
60695 static inline void atomic_long_add(long i, atomic_long_t *l)
60696 {
60697 atomic_t *v = (atomic_t *)l;
60698 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60699 atomic_add(i, v);
60700 }
60701
60702 +#ifdef CONFIG_PAX_REFCOUNT
60703 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60704 +{
60705 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60706 +
60707 + atomic_add_unchecked(i, v);
60708 +}
60709 +#endif
60710 +
60711 static inline void atomic_long_sub(long i, atomic_long_t *l)
60712 {
60713 atomic_t *v = (atomic_t *)l;
60714 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60715 atomic_sub(i, v);
60716 }
60717
60718 +#ifdef CONFIG_PAX_REFCOUNT
60719 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60720 +{
60721 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60722 +
60723 + atomic_sub_unchecked(i, v);
60724 +}
60725 +#endif
60726 +
60727 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60728 {
60729 atomic_t *v = (atomic_t *)l;
60730 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60731 return (long)atomic_inc_return(v);
60732 }
60733
60734 +#ifdef CONFIG_PAX_REFCOUNT
60735 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60736 +{
60737 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60738 +
60739 + return (long)atomic_inc_return_unchecked(v);
60740 +}
60741 +#endif
60742 +
60743 static inline long atomic_long_dec_return(atomic_long_t *l)
60744 {
60745 atomic_t *v = (atomic_t *)l;
60746 @@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60747
60748 #endif /* BITS_PER_LONG == 64 */
60749
60750 +#ifdef CONFIG_PAX_REFCOUNT
60751 +static inline void pax_refcount_needs_these_functions(void)
60752 +{
60753 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
60754 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
60755 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
60756 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
60757 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
60758 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
60759 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
60760 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
60761 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
60762 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
60763 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
60764 +#ifdef CONFIG_X86
60765 + atomic_clear_mask_unchecked(0, NULL);
60766 + atomic_set_mask_unchecked(0, NULL);
60767 +#endif
60768 +
60769 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
60770 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
60771 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
60772 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
60773 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
60774 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
60775 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
60776 +}
60777 +#else
60778 +#define atomic_read_unchecked(v) atomic_read(v)
60779 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
60780 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
60781 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
60782 +#define atomic_inc_unchecked(v) atomic_inc(v)
60783 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
60784 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
60785 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
60786 +#define atomic_dec_unchecked(v) atomic_dec(v)
60787 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
60788 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
60789 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
60790 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
60791 +
60792 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
60793 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
60794 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
60795 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
60796 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
60797 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
60798 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
60799 +#endif
60800 +
60801 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
60802 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
60803 index 1ced641..c896ee8 100644
60804 --- a/include/asm-generic/atomic.h
60805 +++ b/include/asm-generic/atomic.h
60806 @@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
60807 * Atomically clears the bits set in @mask from @v
60808 */
60809 #ifndef atomic_clear_mask
60810 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
60811 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
60812 {
60813 unsigned long flags;
60814
60815 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
60816 index b18ce4f..2ee2843 100644
60817 --- a/include/asm-generic/atomic64.h
60818 +++ b/include/asm-generic/atomic64.h
60819 @@ -16,6 +16,8 @@ typedef struct {
60820 long long counter;
60821 } atomic64_t;
60822
60823 +typedef atomic64_t atomic64_unchecked_t;
60824 +
60825 #define ATOMIC64_INIT(i) { (i) }
60826
60827 extern long long atomic64_read(const atomic64_t *v);
60828 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
60829 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
60830 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
60831
60832 +#define atomic64_read_unchecked(v) atomic64_read(v)
60833 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
60834 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
60835 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
60836 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
60837 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
60838 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
60839 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
60840 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
60841 +
60842 #endif /* _ASM_GENERIC_ATOMIC64_H */
60843 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
60844 index 1bfcfe5..e04c5c9 100644
60845 --- a/include/asm-generic/cache.h
60846 +++ b/include/asm-generic/cache.h
60847 @@ -6,7 +6,7 @@
60848 * cache lines need to provide their own cache.h.
60849 */
60850
60851 -#define L1_CACHE_SHIFT 5
60852 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
60853 +#define L1_CACHE_SHIFT 5UL
60854 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
60855
60856 #endif /* __ASM_GENERIC_CACHE_H */
60857 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
60858 index 0d68a1e..b74a761 100644
60859 --- a/include/asm-generic/emergency-restart.h
60860 +++ b/include/asm-generic/emergency-restart.h
60861 @@ -1,7 +1,7 @@
60862 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
60863 #define _ASM_GENERIC_EMERGENCY_RESTART_H
60864
60865 -static inline void machine_emergency_restart(void)
60866 +static inline __noreturn void machine_emergency_restart(void)
60867 {
60868 machine_restart(NULL);
60869 }
60870 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
60871 index 0232ccb..13d9165 100644
60872 --- a/include/asm-generic/kmap_types.h
60873 +++ b/include/asm-generic/kmap_types.h
60874 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
60875 KMAP_D(17) KM_NMI,
60876 KMAP_D(18) KM_NMI_PTE,
60877 KMAP_D(19) KM_KDB,
60878 +KMAP_D(20) KM_CLEARPAGE,
60879 /*
60880 * Remember to update debug_kmap_atomic() when adding new kmap types!
60881 */
60882 -KMAP_D(20) KM_TYPE_NR
60883 +KMAP_D(21) KM_TYPE_NR
60884 };
60885
60886 #undef KMAP_D
60887 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
60888 index 9ceb03b..2efbcbd 100644
60889 --- a/include/asm-generic/local.h
60890 +++ b/include/asm-generic/local.h
60891 @@ -39,6 +39,7 @@ typedef struct
60892 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
60893 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
60894 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
60895 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
60896
60897 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
60898 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
60899 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
60900 index 725612b..9cc513a 100644
60901 --- a/include/asm-generic/pgtable-nopmd.h
60902 +++ b/include/asm-generic/pgtable-nopmd.h
60903 @@ -1,14 +1,19 @@
60904 #ifndef _PGTABLE_NOPMD_H
60905 #define _PGTABLE_NOPMD_H
60906
60907 -#ifndef __ASSEMBLY__
60908 -
60909 #include <asm-generic/pgtable-nopud.h>
60910
60911 -struct mm_struct;
60912 -
60913 #define __PAGETABLE_PMD_FOLDED
60914
60915 +#define PMD_SHIFT PUD_SHIFT
60916 +#define PTRS_PER_PMD 1
60917 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60918 +#define PMD_MASK (~(PMD_SIZE-1))
60919 +
60920 +#ifndef __ASSEMBLY__
60921 +
60922 +struct mm_struct;
60923 +
60924 /*
60925 * Having the pmd type consist of a pud gets the size right, and allows
60926 * us to conceptually access the pud entry that this pmd is folded into
60927 @@ -16,11 +21,6 @@ struct mm_struct;
60928 */
60929 typedef struct { pud_t pud; } pmd_t;
60930
60931 -#define PMD_SHIFT PUD_SHIFT
60932 -#define PTRS_PER_PMD 1
60933 -#define PMD_SIZE (1UL << PMD_SHIFT)
60934 -#define PMD_MASK (~(PMD_SIZE-1))
60935 -
60936 /*
60937 * The "pud_xxx()" functions here are trivial for a folded two-level
60938 * setup: the pmd is never bad, and a pmd always exists (as it's folded
60939 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
60940 index 810431d..0ec4804f 100644
60941 --- a/include/asm-generic/pgtable-nopud.h
60942 +++ b/include/asm-generic/pgtable-nopud.h
60943 @@ -1,10 +1,15 @@
60944 #ifndef _PGTABLE_NOPUD_H
60945 #define _PGTABLE_NOPUD_H
60946
60947 -#ifndef __ASSEMBLY__
60948 -
60949 #define __PAGETABLE_PUD_FOLDED
60950
60951 +#define PUD_SHIFT PGDIR_SHIFT
60952 +#define PTRS_PER_PUD 1
60953 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
60954 +#define PUD_MASK (~(PUD_SIZE-1))
60955 +
60956 +#ifndef __ASSEMBLY__
60957 +
60958 /*
60959 * Having the pud type consist of a pgd gets the size right, and allows
60960 * us to conceptually access the pgd entry that this pud is folded into
60961 @@ -12,11 +17,6 @@
60962 */
60963 typedef struct { pgd_t pgd; } pud_t;
60964
60965 -#define PUD_SHIFT PGDIR_SHIFT
60966 -#define PTRS_PER_PUD 1
60967 -#define PUD_SIZE (1UL << PUD_SHIFT)
60968 -#define PUD_MASK (~(PUD_SIZE-1))
60969 -
60970 /*
60971 * The "pgd_xxx()" functions here are trivial for a folded two-level
60972 * setup: the pud is never bad, and a pud always exists (as it's folded
60973 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
60974 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
60975
60976 #define pgd_populate(mm, pgd, pud) do { } while (0)
60977 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
60978 /*
60979 * (puds are folded into pgds so this doesn't get actually called,
60980 * but the define is needed for a generic inline function.)
60981 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
60982 index ff4947b..f48183f 100644
60983 --- a/include/asm-generic/pgtable.h
60984 +++ b/include/asm-generic/pgtable.h
60985 @@ -530,6 +530,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
60986 #endif
60987 }
60988
60989 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60990 +static inline unsigned long pax_open_kernel(void) { return 0; }
60991 +#endif
60992 +
60993 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60994 +static inline unsigned long pax_close_kernel(void) { return 0; }
60995 +#endif
60996 +
60997 #endif /* CONFIG_MMU */
60998
60999 #endif /* !__ASSEMBLY__ */
61000 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
61001 index 4e2e1cc..12c266b 100644
61002 --- a/include/asm-generic/vmlinux.lds.h
61003 +++ b/include/asm-generic/vmlinux.lds.h
61004 @@ -218,6 +218,7 @@
61005 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
61006 VMLINUX_SYMBOL(__start_rodata) = .; \
61007 *(.rodata) *(.rodata.*) \
61008 + *(.data..read_only) \
61009 *(__vermagic) /* Kernel version magic */ \
61010 . = ALIGN(8); \
61011 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
61012 @@ -716,17 +717,18 @@
61013 * section in the linker script will go there too. @phdr should have
61014 * a leading colon.
61015 *
61016 - * Note that this macros defines __per_cpu_load as an absolute symbol.
61017 + * Note that this macros defines per_cpu_load as an absolute symbol.
61018 * If there is no need to put the percpu section at a predetermined
61019 * address, use PERCPU_SECTION.
61020 */
61021 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
61022 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
61023 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
61024 + per_cpu_load = .; \
61025 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
61026 - LOAD_OFFSET) { \
61027 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
61028 PERCPU_INPUT(cacheline) \
61029 } phdr \
61030 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
61031 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
61032
61033 /**
61034 * PERCPU_SECTION - define output section for percpu area, simple version
61035 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
61036 index 31ad880..4e79884 100644
61037 --- a/include/drm/drmP.h
61038 +++ b/include/drm/drmP.h
61039 @@ -72,6 +72,7 @@
61040 #include <linux/workqueue.h>
61041 #include <linux/poll.h>
61042 #include <asm/pgalloc.h>
61043 +#include <asm/local.h>
61044 #include "drm.h"
61045
61046 #include <linux/idr.h>
61047 @@ -1074,7 +1075,7 @@ struct drm_device {
61048
61049 /** \name Usage Counters */
61050 /*@{ */
61051 - int open_count; /**< Outstanding files open */
61052 + local_t open_count; /**< Outstanding files open */
61053 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
61054 atomic_t vma_count; /**< Outstanding vma areas open */
61055 int buf_use; /**< Buffers in use -- cannot alloc */
61056 @@ -1085,7 +1086,7 @@ struct drm_device {
61057 /*@{ */
61058 unsigned long counters;
61059 enum drm_stat_type types[15];
61060 - atomic_t counts[15];
61061 + atomic_unchecked_t counts[15];
61062 /*@} */
61063
61064 struct list_head filelist;
61065 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
61066 index 7988e55..ec974c9 100644
61067 --- a/include/drm/drm_crtc_helper.h
61068 +++ b/include/drm/drm_crtc_helper.h
61069 @@ -81,7 +81,7 @@ struct drm_crtc_helper_funcs {
61070
61071 /* disable crtc when not in use - more explicit than dpms off */
61072 void (*disable)(struct drm_crtc *crtc);
61073 -};
61074 +} __no_const;
61075
61076 /**
61077 * drm_encoder_helper_funcs - helper operations for encoders
61078 @@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
61079 struct drm_connector *connector);
61080 /* disable encoder when not in use - more explicit than dpms off */
61081 void (*disable)(struct drm_encoder *encoder);
61082 -};
61083 +} __no_const;
61084
61085 /**
61086 * drm_connector_helper_funcs - helper operations for connectors
61087 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
61088 index d6d1da4..fdd1ac5 100644
61089 --- a/include/drm/ttm/ttm_memory.h
61090 +++ b/include/drm/ttm/ttm_memory.h
61091 @@ -48,7 +48,7 @@
61092
61093 struct ttm_mem_shrink {
61094 int (*do_shrink) (struct ttm_mem_shrink *);
61095 -};
61096 +} __no_const;
61097
61098 /**
61099 * struct ttm_mem_global - Global memory accounting structure.
61100 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
61101 index e86dfca..40cc55f 100644
61102 --- a/include/linux/a.out.h
61103 +++ b/include/linux/a.out.h
61104 @@ -39,6 +39,14 @@ enum machine_type {
61105 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
61106 };
61107
61108 +/* Constants for the N_FLAGS field */
61109 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61110 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
61111 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
61112 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
61113 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61114 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61115 +
61116 #if !defined (N_MAGIC)
61117 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
61118 #endif
61119 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
61120 index 06fd4bb..1caec0d 100644
61121 --- a/include/linux/atmdev.h
61122 +++ b/include/linux/atmdev.h
61123 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
61124 #endif
61125
61126 struct k_atm_aal_stats {
61127 -#define __HANDLE_ITEM(i) atomic_t i
61128 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
61129 __AAL_STAT_ITEMS
61130 #undef __HANDLE_ITEM
61131 };
61132 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
61133 index 366422b..1fa7f84 100644
61134 --- a/include/linux/binfmts.h
61135 +++ b/include/linux/binfmts.h
61136 @@ -89,6 +89,7 @@ struct linux_binfmt {
61137 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
61138 int (*load_shlib)(struct file *);
61139 int (*core_dump)(struct coredump_params *cprm);
61140 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
61141 unsigned long min_coredump; /* minimal dump size */
61142 };
61143
61144 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
61145 index 07954b0..cb2ae71 100644
61146 --- a/include/linux/blkdev.h
61147 +++ b/include/linux/blkdev.h
61148 @@ -1393,7 +1393,7 @@ struct block_device_operations {
61149 /* this callback is with swap_lock and sometimes page table lock held */
61150 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
61151 struct module *owner;
61152 -};
61153 +} __do_const;
61154
61155 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
61156 unsigned long);
61157 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
61158 index 4d1a074..88f929a 100644
61159 --- a/include/linux/blktrace_api.h
61160 +++ b/include/linux/blktrace_api.h
61161 @@ -162,7 +162,7 @@ struct blk_trace {
61162 struct dentry *dir;
61163 struct dentry *dropped_file;
61164 struct dentry *msg_file;
61165 - atomic_t dropped;
61166 + atomic_unchecked_t dropped;
61167 };
61168
61169 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
61170 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
61171 index 83195fb..0b0f77d 100644
61172 --- a/include/linux/byteorder/little_endian.h
61173 +++ b/include/linux/byteorder/little_endian.h
61174 @@ -42,51 +42,51 @@
61175
61176 static inline __le64 __cpu_to_le64p(const __u64 *p)
61177 {
61178 - return (__force __le64)*p;
61179 + return (__force const __le64)*p;
61180 }
61181 static inline __u64 __le64_to_cpup(const __le64 *p)
61182 {
61183 - return (__force __u64)*p;
61184 + return (__force const __u64)*p;
61185 }
61186 static inline __le32 __cpu_to_le32p(const __u32 *p)
61187 {
61188 - return (__force __le32)*p;
61189 + return (__force const __le32)*p;
61190 }
61191 static inline __u32 __le32_to_cpup(const __le32 *p)
61192 {
61193 - return (__force __u32)*p;
61194 + return (__force const __u32)*p;
61195 }
61196 static inline __le16 __cpu_to_le16p(const __u16 *p)
61197 {
61198 - return (__force __le16)*p;
61199 + return (__force const __le16)*p;
61200 }
61201 static inline __u16 __le16_to_cpup(const __le16 *p)
61202 {
61203 - return (__force __u16)*p;
61204 + return (__force const __u16)*p;
61205 }
61206 static inline __be64 __cpu_to_be64p(const __u64 *p)
61207 {
61208 - return (__force __be64)__swab64p(p);
61209 + return (__force const __be64)__swab64p(p);
61210 }
61211 static inline __u64 __be64_to_cpup(const __be64 *p)
61212 {
61213 - return __swab64p((__u64 *)p);
61214 + return __swab64p((const __u64 *)p);
61215 }
61216 static inline __be32 __cpu_to_be32p(const __u32 *p)
61217 {
61218 - return (__force __be32)__swab32p(p);
61219 + return (__force const __be32)__swab32p(p);
61220 }
61221 static inline __u32 __be32_to_cpup(const __be32 *p)
61222 {
61223 - return __swab32p((__u32 *)p);
61224 + return __swab32p((const __u32 *)p);
61225 }
61226 static inline __be16 __cpu_to_be16p(const __u16 *p)
61227 {
61228 - return (__force __be16)__swab16p(p);
61229 + return (__force const __be16)__swab16p(p);
61230 }
61231 static inline __u16 __be16_to_cpup(const __be16 *p)
61232 {
61233 - return __swab16p((__u16 *)p);
61234 + return __swab16p((const __u16 *)p);
61235 }
61236 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
61237 #define __le64_to_cpus(x) do { (void)(x); } while (0)
61238 diff --git a/include/linux/cache.h b/include/linux/cache.h
61239 index 4c57065..4307975 100644
61240 --- a/include/linux/cache.h
61241 +++ b/include/linux/cache.h
61242 @@ -16,6 +16,10 @@
61243 #define __read_mostly
61244 #endif
61245
61246 +#ifndef __read_only
61247 +#define __read_only __read_mostly
61248 +#endif
61249 +
61250 #ifndef ____cacheline_aligned
61251 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
61252 #endif
61253 diff --git a/include/linux/capability.h b/include/linux/capability.h
61254 index d10b7ed..0288b79 100644
61255 --- a/include/linux/capability.h
61256 +++ b/include/linux/capability.h
61257 @@ -553,10 +553,15 @@ extern bool capable(int cap);
61258 extern bool ns_capable(struct user_namespace *ns, int cap);
61259 extern bool nsown_capable(int cap);
61260 extern bool inode_capable(const struct inode *inode, int cap);
61261 +extern bool capable_nolog(int cap);
61262 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
61263 +extern bool inode_capable_nolog(const struct inode *inode, int cap);
61264
61265 /* audit system wants to get cap info from files as well */
61266 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
61267
61268 +extern int is_privileged_binary(const struct dentry *dentry);
61269 +
61270 #endif /* __KERNEL__ */
61271
61272 #endif /* !_LINUX_CAPABILITY_H */
61273 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
61274 index 42e55de..1cd0e66 100644
61275 --- a/include/linux/cleancache.h
61276 +++ b/include/linux/cleancache.h
61277 @@ -31,7 +31,7 @@ struct cleancache_ops {
61278 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
61279 void (*invalidate_inode)(int, struct cleancache_filekey);
61280 void (*invalidate_fs)(int);
61281 -};
61282 +} __no_const;
61283
61284 extern struct cleancache_ops
61285 cleancache_register_ops(struct cleancache_ops *ops);
61286 diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
61287 index 4a0b483..f1f70ba 100644
61288 --- a/include/linux/clk-provider.h
61289 +++ b/include/linux/clk-provider.h
61290 @@ -110,6 +110,7 @@ struct clk_ops {
61291 unsigned long);
61292 void (*init)(struct clk_hw *hw);
61293 };
61294 +typedef struct clk_ops __no_const clk_ops_no_const;
61295
61296 /**
61297 * struct clk_init_data - holds init data that's common to all clocks and is
61298 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
61299 index 2f40791..938880e 100644
61300 --- a/include/linux/compiler-gcc4.h
61301 +++ b/include/linux/compiler-gcc4.h
61302 @@ -32,6 +32,21 @@
61303 #define __linktime_error(message) __attribute__((__error__(message)))
61304
61305 #if __GNUC_MINOR__ >= 5
61306 +
61307 +#ifdef CONSTIFY_PLUGIN
61308 +#define __no_const __attribute__((no_const))
61309 +#define __do_const __attribute__((do_const))
61310 +#endif
61311 +
61312 +#ifdef SIZE_OVERFLOW_PLUGIN
61313 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
61314 +#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
61315 +#endif
61316 +
61317 +#ifdef LATENT_ENTROPY_PLUGIN
61318 +#define __latent_entropy __attribute__((latent_entropy))
61319 +#endif
61320 +
61321 /*
61322 * Mark a position in code as unreachable. This can be used to
61323 * suppress control flow warnings after asm blocks that transfer
61324 @@ -47,6 +62,11 @@
61325 #define __noclone __attribute__((__noclone__))
61326
61327 #endif
61328 +
61329 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
61330 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
61331 +#define __bos0(ptr) __bos((ptr), 0)
61332 +#define __bos1(ptr) __bos((ptr), 1)
61333 #endif
61334
61335 #if __GNUC_MINOR__ > 0
61336 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
61337 index 923d093..3625de1 100644
61338 --- a/include/linux/compiler.h
61339 +++ b/include/linux/compiler.h
61340 @@ -5,31 +5,62 @@
61341
61342 #ifdef __CHECKER__
61343 # define __user __attribute__((noderef, address_space(1)))
61344 +# define __force_user __force __user
61345 # define __kernel __attribute__((address_space(0)))
61346 +# define __force_kernel __force __kernel
61347 # define __safe __attribute__((safe))
61348 # define __force __attribute__((force))
61349 # define __nocast __attribute__((nocast))
61350 # define __iomem __attribute__((noderef, address_space(2)))
61351 +# define __force_iomem __force __iomem
61352 # define __acquires(x) __attribute__((context(x,0,1)))
61353 # define __releases(x) __attribute__((context(x,1,0)))
61354 # define __acquire(x) __context__(x,1)
61355 # define __release(x) __context__(x,-1)
61356 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
61357 # define __percpu __attribute__((noderef, address_space(3)))
61358 +# define __force_percpu __force __percpu
61359 #ifdef CONFIG_SPARSE_RCU_POINTER
61360 # define __rcu __attribute__((noderef, address_space(4)))
61361 +# define __force_rcu __force __rcu
61362 #else
61363 # define __rcu
61364 +# define __force_rcu
61365 #endif
61366 extern void __chk_user_ptr(const volatile void __user *);
61367 extern void __chk_io_ptr(const volatile void __iomem *);
61368 +#elif defined(CHECKER_PLUGIN)
61369 +//# define __user
61370 +//# define __force_user
61371 +//# define __kernel
61372 +//# define __force_kernel
61373 +# define __safe
61374 +# define __force
61375 +# define __nocast
61376 +# define __iomem
61377 +# define __force_iomem
61378 +# define __chk_user_ptr(x) (void)0
61379 +# define __chk_io_ptr(x) (void)0
61380 +# define __builtin_warning(x, y...) (1)
61381 +# define __acquires(x)
61382 +# define __releases(x)
61383 +# define __acquire(x) (void)0
61384 +# define __release(x) (void)0
61385 +# define __cond_lock(x,c) (c)
61386 +# define __percpu
61387 +# define __force_percpu
61388 +# define __rcu
61389 +# define __force_rcu
61390 #else
61391 # define __user
61392 +# define __force_user
61393 # define __kernel
61394 +# define __force_kernel
61395 # define __safe
61396 # define __force
61397 # define __nocast
61398 # define __iomem
61399 +# define __force_iomem
61400 # define __chk_user_ptr(x) (void)0
61401 # define __chk_io_ptr(x) (void)0
61402 # define __builtin_warning(x, y...) (1)
61403 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
61404 # define __release(x) (void)0
61405 # define __cond_lock(x,c) (c)
61406 # define __percpu
61407 +# define __force_percpu
61408 # define __rcu
61409 +# define __force_rcu
61410 #endif
61411
61412 #ifdef __KERNEL__
61413 @@ -264,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61414 # define __attribute_const__ /* unimplemented */
61415 #endif
61416
61417 +#ifndef __no_const
61418 +# define __no_const
61419 +#endif
61420 +
61421 +#ifndef __do_const
61422 +# define __do_const
61423 +#endif
61424 +
61425 +#ifndef __size_overflow
61426 +# define __size_overflow(...)
61427 +#endif
61428 +
61429 +#ifndef __latent_entropy
61430 +# define __latent_entropy
61431 +#endif
61432 +
61433 +#ifndef __intentional_overflow
61434 +# define __intentional_overflow(...)
61435 +#endif
61436 +
61437 /*
61438 * Tell gcc if a function is cold. The compiler will assume any path
61439 * directly leading to the call is unlikely.
61440 @@ -273,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61441 #define __cold
61442 #endif
61443
61444 +#ifndef __alloc_size
61445 +#define __alloc_size(...)
61446 +#endif
61447 +
61448 +#ifndef __bos
61449 +#define __bos(ptr, arg)
61450 +#endif
61451 +
61452 +#ifndef __bos0
61453 +#define __bos0(ptr)
61454 +#endif
61455 +
61456 +#ifndef __bos1
61457 +#define __bos1(ptr)
61458 +#endif
61459 +
61460 /* Simple shorthand for a section definition */
61461 #ifndef __section
61462 # define __section(S) __attribute__ ((__section__(#S)))
61463 @@ -308,6 +377,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61464 * use is to mediate communication between process-level code and irq/NMI
61465 * handlers, all running on the same CPU.
61466 */
61467 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
61468 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
61469 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
61470
61471 #endif /* __LINUX_COMPILER_H */
61472 diff --git a/include/linux/cred.h b/include/linux/cred.h
61473 index ebbed2c..908cc2c 100644
61474 --- a/include/linux/cred.h
61475 +++ b/include/linux/cred.h
61476 @@ -208,6 +208,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
61477 static inline void validate_process_creds(void)
61478 {
61479 }
61480 +static inline void validate_task_creds(struct task_struct *task)
61481 +{
61482 +}
61483 #endif
61484
61485 /**
61486 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
61487 index b92eadf..b4ecdc1 100644
61488 --- a/include/linux/crypto.h
61489 +++ b/include/linux/crypto.h
61490 @@ -373,7 +373,7 @@ struct cipher_tfm {
61491 const u8 *key, unsigned int keylen);
61492 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61493 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61494 -};
61495 +} __no_const;
61496
61497 struct hash_tfm {
61498 int (*init)(struct hash_desc *desc);
61499 @@ -394,13 +394,13 @@ struct compress_tfm {
61500 int (*cot_decompress)(struct crypto_tfm *tfm,
61501 const u8 *src, unsigned int slen,
61502 u8 *dst, unsigned int *dlen);
61503 -};
61504 +} __no_const;
61505
61506 struct rng_tfm {
61507 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
61508 unsigned int dlen);
61509 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
61510 -};
61511 +} __no_const;
61512
61513 #define crt_ablkcipher crt_u.ablkcipher
61514 #define crt_aead crt_u.aead
61515 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
61516 index 7925bf0..d5143d2 100644
61517 --- a/include/linux/decompress/mm.h
61518 +++ b/include/linux/decompress/mm.h
61519 @@ -77,7 +77,7 @@ static void free(void *where)
61520 * warnings when not needed (indeed large_malloc / large_free are not
61521 * needed by inflate */
61522
61523 -#define malloc(a) kmalloc(a, GFP_KERNEL)
61524 +#define malloc(a) kmalloc((a), GFP_KERNEL)
61525 #define free(a) kfree(a)
61526
61527 #define large_malloc(a) vmalloc(a)
61528 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
61529 index dfc099e..e583e66 100644
61530 --- a/include/linux/dma-mapping.h
61531 +++ b/include/linux/dma-mapping.h
61532 @@ -51,7 +51,7 @@ struct dma_map_ops {
61533 u64 (*get_required_mask)(struct device *dev);
61534 #endif
61535 int is_phys;
61536 -};
61537 +} __do_const;
61538
61539 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
61540
61541 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
61542 index 56377df..4eb4990 100644
61543 --- a/include/linux/dmaengine.h
61544 +++ b/include/linux/dmaengine.h
61545 @@ -1007,9 +1007,9 @@ struct dma_pinned_list {
61546 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
61547 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
61548
61549 -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
61550 +dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
61551 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
61552 -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
61553 +dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
61554 struct dma_pinned_list *pinned_list, struct page *page,
61555 unsigned int offset, size_t len);
61556
61557 diff --git a/include/linux/efi.h b/include/linux/efi.h
61558 index ec45ccd..9923c32 100644
61559 --- a/include/linux/efi.h
61560 +++ b/include/linux/efi.h
61561 @@ -635,7 +635,7 @@ struct efivar_operations {
61562 efi_get_variable_t *get_variable;
61563 efi_get_next_variable_t *get_next_variable;
61564 efi_set_variable_t *set_variable;
61565 -};
61566 +} __no_const;
61567
61568 struct efivars {
61569 /*
61570 diff --git a/include/linux/elf.h b/include/linux/elf.h
61571 index 999b4f5..57753b4 100644
61572 --- a/include/linux/elf.h
61573 +++ b/include/linux/elf.h
61574 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
61575 #define PT_GNU_EH_FRAME 0x6474e550
61576
61577 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
61578 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
61579 +
61580 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
61581 +
61582 +/* Constants for the e_flags field */
61583 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61584 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
61585 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
61586 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
61587 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61588 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61589
61590 /*
61591 * Extended Numbering
61592 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
61593 #define DT_DEBUG 21
61594 #define DT_TEXTREL 22
61595 #define DT_JMPREL 23
61596 +#define DT_FLAGS 30
61597 + #define DF_TEXTREL 0x00000004
61598 #define DT_ENCODING 32
61599 #define OLD_DT_LOOS 0x60000000
61600 #define DT_LOOS 0x6000000d
61601 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
61602 #define PF_W 0x2
61603 #define PF_X 0x1
61604
61605 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
61606 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
61607 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
61608 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
61609 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
61610 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
61611 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
61612 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
61613 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
61614 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
61615 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
61616 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
61617 +
61618 typedef struct elf32_phdr{
61619 Elf32_Word p_type;
61620 Elf32_Off p_offset;
61621 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
61622 #define EI_OSABI 7
61623 #define EI_PAD 8
61624
61625 +#define EI_PAX 14
61626 +
61627 #define ELFMAG0 0x7f /* EI_MAG */
61628 #define ELFMAG1 'E'
61629 #define ELFMAG2 'L'
61630 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
61631 #define elf_note elf32_note
61632 #define elf_addr_t Elf32_Off
61633 #define Elf_Half Elf32_Half
61634 +#define elf_dyn Elf32_Dyn
61635
61636 #else
61637
61638 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
61639 #define elf_note elf64_note
61640 #define elf_addr_t Elf64_Off
61641 #define Elf_Half Elf64_Half
61642 +#define elf_dyn Elf64_Dyn
61643
61644 #endif
61645
61646 diff --git a/include/linux/filter.h b/include/linux/filter.h
61647 index 82b0135..917914d 100644
61648 --- a/include/linux/filter.h
61649 +++ b/include/linux/filter.h
61650 @@ -146,6 +146,7 @@ struct compat_sock_fprog {
61651
61652 struct sk_buff;
61653 struct sock;
61654 +struct bpf_jit_work;
61655
61656 struct sk_filter
61657 {
61658 @@ -153,6 +154,9 @@ struct sk_filter
61659 unsigned int len; /* Number of filter blocks */
61660 unsigned int (*bpf_func)(const struct sk_buff *skb,
61661 const struct sock_filter *filter);
61662 +#ifdef CONFIG_BPF_JIT
61663 + struct bpf_jit_work *work;
61664 +#endif
61665 struct rcu_head rcu;
61666 struct sock_filter insns[0];
61667 };
61668 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
61669 index 7edcf10..714d5e8 100644
61670 --- a/include/linux/firewire.h
61671 +++ b/include/linux/firewire.h
61672 @@ -430,7 +430,7 @@ struct fw_iso_context {
61673 union {
61674 fw_iso_callback_t sc;
61675 fw_iso_mc_callback_t mc;
61676 - } callback;
61677 + } __no_const callback;
61678 void *callback_data;
61679 };
61680
61681 diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
61682 index 0e4e2ee..4ff4312 100644
61683 --- a/include/linux/frontswap.h
61684 +++ b/include/linux/frontswap.h
61685 @@ -11,7 +11,7 @@ struct frontswap_ops {
61686 int (*load)(unsigned, pgoff_t, struct page *);
61687 void (*invalidate_page)(unsigned, pgoff_t);
61688 void (*invalidate_area)(unsigned);
61689 -};
61690 +} __no_const;
61691
61692 extern bool frontswap_enabled;
61693 extern struct frontswap_ops
61694 diff --git a/include/linux/fs.h b/include/linux/fs.h
61695 index 17fd887..8eebca0 100644
61696 --- a/include/linux/fs.h
61697 +++ b/include/linux/fs.h
61698 @@ -1663,7 +1663,8 @@ struct file_operations {
61699 int (*setlease)(struct file *, long, struct file_lock **);
61700 long (*fallocate)(struct file *file, int mode, loff_t offset,
61701 loff_t len);
61702 -};
61703 +} __do_const;
61704 +typedef struct file_operations __no_const file_operations_no_const;
61705
61706 struct inode_operations {
61707 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
61708 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
61709 index 003dc0f..3c4ea97 100644
61710 --- a/include/linux/fs_struct.h
61711 +++ b/include/linux/fs_struct.h
61712 @@ -6,7 +6,7 @@
61713 #include <linux/seqlock.h>
61714
61715 struct fs_struct {
61716 - int users;
61717 + atomic_t users;
61718 spinlock_t lock;
61719 seqcount_t seq;
61720 int umask;
61721 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
61722 index ce31408..b1ad003 100644
61723 --- a/include/linux/fscache-cache.h
61724 +++ b/include/linux/fscache-cache.h
61725 @@ -102,7 +102,7 @@ struct fscache_operation {
61726 fscache_operation_release_t release;
61727 };
61728
61729 -extern atomic_t fscache_op_debug_id;
61730 +extern atomic_unchecked_t fscache_op_debug_id;
61731 extern void fscache_op_work_func(struct work_struct *work);
61732
61733 extern void fscache_enqueue_operation(struct fscache_operation *);
61734 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
61735 {
61736 INIT_WORK(&op->work, fscache_op_work_func);
61737 atomic_set(&op->usage, 1);
61738 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
61739 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61740 op->processor = processor;
61741 op->release = release;
61742 INIT_LIST_HEAD(&op->pend_link);
61743 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
61744 index a6dfe69..569586df 100644
61745 --- a/include/linux/fsnotify.h
61746 +++ b/include/linux/fsnotify.h
61747 @@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
61748 */
61749 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
61750 {
61751 - return kstrdup(name, GFP_KERNEL);
61752 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
61753 }
61754
61755 /*
61756 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
61757 index 63d966d..cdcb717 100644
61758 --- a/include/linux/fsnotify_backend.h
61759 +++ b/include/linux/fsnotify_backend.h
61760 @@ -105,6 +105,7 @@ struct fsnotify_ops {
61761 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
61762 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
61763 };
61764 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
61765
61766 /*
61767 * A group is a "thing" that wants to receive notification about filesystem
61768 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
61769 index 176a939..1462211 100644
61770 --- a/include/linux/ftrace_event.h
61771 +++ b/include/linux/ftrace_event.h
61772 @@ -97,7 +97,7 @@ struct trace_event_functions {
61773 trace_print_func raw;
61774 trace_print_func hex;
61775 trace_print_func binary;
61776 -};
61777 +} __no_const;
61778
61779 struct trace_event {
61780 struct hlist_node node;
61781 @@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
61782 extern int trace_add_event_call(struct ftrace_event_call *call);
61783 extern void trace_remove_event_call(struct ftrace_event_call *call);
61784
61785 -#define is_signed_type(type) (((type)(-1)) < 0)
61786 +#define is_signed_type(type) (((type)(-1)) < (type)1)
61787
61788 int trace_set_clr_event(const char *system, const char *event, int set);
61789
61790 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
61791 index 017a7fb..33a8507 100644
61792 --- a/include/linux/genhd.h
61793 +++ b/include/linux/genhd.h
61794 @@ -185,7 +185,7 @@ struct gendisk {
61795 struct kobject *slave_dir;
61796
61797 struct timer_rand_state *random;
61798 - atomic_t sync_io; /* RAID */
61799 + atomic_unchecked_t sync_io; /* RAID */
61800 struct disk_events *ev;
61801 #ifdef CONFIG_BLK_DEV_INTEGRITY
61802 struct blk_integrity *integrity;
61803 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
61804 index 1e49be4..b8a9305 100644
61805 --- a/include/linux/gfp.h
61806 +++ b/include/linux/gfp.h
61807 @@ -38,6 +38,12 @@ struct vm_area_struct;
61808 #define ___GFP_OTHER_NODE 0x800000u
61809 #define ___GFP_WRITE 0x1000000u
61810
61811 +#ifdef CONFIG_PAX_USERCOPY_SLABS
61812 +#define ___GFP_USERCOPY 0x2000000u
61813 +#else
61814 +#define ___GFP_USERCOPY 0
61815 +#endif
61816 +
61817 /*
61818 * GFP bitmasks..
61819 *
61820 @@ -87,6 +93,7 @@ struct vm_area_struct;
61821 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
61822 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
61823 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
61824 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
61825
61826 /*
61827 * This may seem redundant, but it's a way of annotating false positives vs.
61828 @@ -94,7 +101,7 @@ struct vm_area_struct;
61829 */
61830 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
61831
61832 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
61833 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
61834 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
61835
61836 /* This equals 0, but use constants in case they ever change */
61837 @@ -148,6 +155,8 @@ struct vm_area_struct;
61838 /* 4GB DMA on some platforms */
61839 #define GFP_DMA32 __GFP_DMA32
61840
61841 +#define GFP_USERCOPY __GFP_USERCOPY
61842 +
61843 /* Convert GFP flags to their corresponding migrate type */
61844 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
61845 {
61846 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
61847 new file mode 100644
61848 index 0000000..c938b1f
61849 --- /dev/null
61850 +++ b/include/linux/gracl.h
61851 @@ -0,0 +1,319 @@
61852 +#ifndef GR_ACL_H
61853 +#define GR_ACL_H
61854 +
61855 +#include <linux/grdefs.h>
61856 +#include <linux/resource.h>
61857 +#include <linux/capability.h>
61858 +#include <linux/dcache.h>
61859 +#include <asm/resource.h>
61860 +
61861 +/* Major status information */
61862 +
61863 +#define GR_VERSION "grsecurity 2.9.1"
61864 +#define GRSECURITY_VERSION 0x2901
61865 +
61866 +enum {
61867 + GR_SHUTDOWN = 0,
61868 + GR_ENABLE = 1,
61869 + GR_SPROLE = 2,
61870 + GR_RELOAD = 3,
61871 + GR_SEGVMOD = 4,
61872 + GR_STATUS = 5,
61873 + GR_UNSPROLE = 6,
61874 + GR_PASSSET = 7,
61875 + GR_SPROLEPAM = 8,
61876 +};
61877 +
61878 +/* Password setup definitions
61879 + * kernel/grhash.c */
61880 +enum {
61881 + GR_PW_LEN = 128,
61882 + GR_SALT_LEN = 16,
61883 + GR_SHA_LEN = 32,
61884 +};
61885 +
61886 +enum {
61887 + GR_SPROLE_LEN = 64,
61888 +};
61889 +
61890 +enum {
61891 + GR_NO_GLOB = 0,
61892 + GR_REG_GLOB,
61893 + GR_CREATE_GLOB
61894 +};
61895 +
61896 +#define GR_NLIMITS 32
61897 +
61898 +/* Begin Data Structures */
61899 +
61900 +struct sprole_pw {
61901 + unsigned char *rolename;
61902 + unsigned char salt[GR_SALT_LEN];
61903 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
61904 +};
61905 +
61906 +struct name_entry {
61907 + __u32 key;
61908 + ino_t inode;
61909 + dev_t device;
61910 + char *name;
61911 + __u16 len;
61912 + __u8 deleted;
61913 + struct name_entry *prev;
61914 + struct name_entry *next;
61915 +};
61916 +
61917 +struct inodev_entry {
61918 + struct name_entry *nentry;
61919 + struct inodev_entry *prev;
61920 + struct inodev_entry *next;
61921 +};
61922 +
61923 +struct acl_role_db {
61924 + struct acl_role_label **r_hash;
61925 + __u32 r_size;
61926 +};
61927 +
61928 +struct inodev_db {
61929 + struct inodev_entry **i_hash;
61930 + __u32 i_size;
61931 +};
61932 +
61933 +struct name_db {
61934 + struct name_entry **n_hash;
61935 + __u32 n_size;
61936 +};
61937 +
61938 +struct crash_uid {
61939 + uid_t uid;
61940 + unsigned long expires;
61941 +};
61942 +
61943 +struct gr_hash_struct {
61944 + void **table;
61945 + void **nametable;
61946 + void *first;
61947 + __u32 table_size;
61948 + __u32 used_size;
61949 + int type;
61950 +};
61951 +
61952 +/* Userspace Grsecurity ACL data structures */
61953 +
61954 +struct acl_subject_label {
61955 + char *filename;
61956 + ino_t inode;
61957 + dev_t device;
61958 + __u32 mode;
61959 + kernel_cap_t cap_mask;
61960 + kernel_cap_t cap_lower;
61961 + kernel_cap_t cap_invert_audit;
61962 +
61963 + struct rlimit res[GR_NLIMITS];
61964 + __u32 resmask;
61965 +
61966 + __u8 user_trans_type;
61967 + __u8 group_trans_type;
61968 + uid_t *user_transitions;
61969 + gid_t *group_transitions;
61970 + __u16 user_trans_num;
61971 + __u16 group_trans_num;
61972 +
61973 + __u32 sock_families[2];
61974 + __u32 ip_proto[8];
61975 + __u32 ip_type;
61976 + struct acl_ip_label **ips;
61977 + __u32 ip_num;
61978 + __u32 inaddr_any_override;
61979 +
61980 + __u32 crashes;
61981 + unsigned long expires;
61982 +
61983 + struct acl_subject_label *parent_subject;
61984 + struct gr_hash_struct *hash;
61985 + struct acl_subject_label *prev;
61986 + struct acl_subject_label *next;
61987 +
61988 + struct acl_object_label **obj_hash;
61989 + __u32 obj_hash_size;
61990 + __u16 pax_flags;
61991 +};
61992 +
61993 +struct role_allowed_ip {
61994 + __u32 addr;
61995 + __u32 netmask;
61996 +
61997 + struct role_allowed_ip *prev;
61998 + struct role_allowed_ip *next;
61999 +};
62000 +
62001 +struct role_transition {
62002 + char *rolename;
62003 +
62004 + struct role_transition *prev;
62005 + struct role_transition *next;
62006 +};
62007 +
62008 +struct acl_role_label {
62009 + char *rolename;
62010 + uid_t uidgid;
62011 + __u16 roletype;
62012 +
62013 + __u16 auth_attempts;
62014 + unsigned long expires;
62015 +
62016 + struct acl_subject_label *root_label;
62017 + struct gr_hash_struct *hash;
62018 +
62019 + struct acl_role_label *prev;
62020 + struct acl_role_label *next;
62021 +
62022 + struct role_transition *transitions;
62023 + struct role_allowed_ip *allowed_ips;
62024 + uid_t *domain_children;
62025 + __u16 domain_child_num;
62026 +
62027 + umode_t umask;
62028 +
62029 + struct acl_subject_label **subj_hash;
62030 + __u32 subj_hash_size;
62031 +};
62032 +
62033 +struct user_acl_role_db {
62034 + struct acl_role_label **r_table;
62035 + __u32 num_pointers; /* Number of allocations to track */
62036 + __u32 num_roles; /* Number of roles */
62037 + __u32 num_domain_children; /* Number of domain children */
62038 + __u32 num_subjects; /* Number of subjects */
62039 + __u32 num_objects; /* Number of objects */
62040 +};
62041 +
62042 +struct acl_object_label {
62043 + char *filename;
62044 + ino_t inode;
62045 + dev_t device;
62046 + __u32 mode;
62047 +
62048 + struct acl_subject_label *nested;
62049 + struct acl_object_label *globbed;
62050 +
62051 + /* next two structures not used */
62052 +
62053 + struct acl_object_label *prev;
62054 + struct acl_object_label *next;
62055 +};
62056 +
62057 +struct acl_ip_label {
62058 + char *iface;
62059 + __u32 addr;
62060 + __u32 netmask;
62061 + __u16 low, high;
62062 + __u8 mode;
62063 + __u32 type;
62064 + __u32 proto[8];
62065 +
62066 + /* next two structures not used */
62067 +
62068 + struct acl_ip_label *prev;
62069 + struct acl_ip_label *next;
62070 +};
62071 +
62072 +struct gr_arg {
62073 + struct user_acl_role_db role_db;
62074 + unsigned char pw[GR_PW_LEN];
62075 + unsigned char salt[GR_SALT_LEN];
62076 + unsigned char sum[GR_SHA_LEN];
62077 + unsigned char sp_role[GR_SPROLE_LEN];
62078 + struct sprole_pw *sprole_pws;
62079 + dev_t segv_device;
62080 + ino_t segv_inode;
62081 + uid_t segv_uid;
62082 + __u16 num_sprole_pws;
62083 + __u16 mode;
62084 +};
62085 +
62086 +struct gr_arg_wrapper {
62087 + struct gr_arg *arg;
62088 + __u32 version;
62089 + __u32 size;
62090 +};
62091 +
62092 +struct subject_map {
62093 + struct acl_subject_label *user;
62094 + struct acl_subject_label *kernel;
62095 + struct subject_map *prev;
62096 + struct subject_map *next;
62097 +};
62098 +
62099 +struct acl_subj_map_db {
62100 + struct subject_map **s_hash;
62101 + __u32 s_size;
62102 +};
62103 +
62104 +/* End Data Structures Section */
62105 +
62106 +/* Hash functions generated by empirical testing by Brad Spengler
62107 + Makes good use of the low bits of the inode. Generally 0-1 times
62108 + in loop for successful match. 0-3 for unsuccessful match.
62109 + Shift/add algorithm with modulus of table size and an XOR*/
62110 +
62111 +static __inline__ unsigned int
62112 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
62113 +{
62114 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
62115 +}
62116 +
62117 + static __inline__ unsigned int
62118 +shash(const struct acl_subject_label *userp, const unsigned int sz)
62119 +{
62120 + return ((const unsigned long)userp % sz);
62121 +}
62122 +
62123 +static __inline__ unsigned int
62124 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
62125 +{
62126 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
62127 +}
62128 +
62129 +static __inline__ unsigned int
62130 +nhash(const char *name, const __u16 len, const unsigned int sz)
62131 +{
62132 + return full_name_hash((const unsigned char *)name, len) % sz;
62133 +}
62134 +
62135 +#define FOR_EACH_ROLE_START(role) \
62136 + role = role_list; \
62137 + while (role) {
62138 +
62139 +#define FOR_EACH_ROLE_END(role) \
62140 + role = role->prev; \
62141 + }
62142 +
62143 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
62144 + subj = NULL; \
62145 + iter = 0; \
62146 + while (iter < role->subj_hash_size) { \
62147 + if (subj == NULL) \
62148 + subj = role->subj_hash[iter]; \
62149 + if (subj == NULL) { \
62150 + iter++; \
62151 + continue; \
62152 + }
62153 +
62154 +#define FOR_EACH_SUBJECT_END(subj,iter) \
62155 + subj = subj->next; \
62156 + if (subj == NULL) \
62157 + iter++; \
62158 + }
62159 +
62160 +
62161 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
62162 + subj = role->hash->first; \
62163 + while (subj != NULL) {
62164 +
62165 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
62166 + subj = subj->next; \
62167 + }
62168 +
62169 +#endif
62170 +
62171 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
62172 new file mode 100644
62173 index 0000000..323ecf2
62174 --- /dev/null
62175 +++ b/include/linux/gralloc.h
62176 @@ -0,0 +1,9 @@
62177 +#ifndef __GRALLOC_H
62178 +#define __GRALLOC_H
62179 +
62180 +void acl_free_all(void);
62181 +int acl_alloc_stack_init(unsigned long size);
62182 +void *acl_alloc(unsigned long len);
62183 +void *acl_alloc_num(unsigned long num, unsigned long len);
62184 +
62185 +#endif
62186 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
62187 new file mode 100644
62188 index 0000000..b30e9bc
62189 --- /dev/null
62190 +++ b/include/linux/grdefs.h
62191 @@ -0,0 +1,140 @@
62192 +#ifndef GRDEFS_H
62193 +#define GRDEFS_H
62194 +
62195 +/* Begin grsecurity status declarations */
62196 +
62197 +enum {
62198 + GR_READY = 0x01,
62199 + GR_STATUS_INIT = 0x00 // disabled state
62200 +};
62201 +
62202 +/* Begin ACL declarations */
62203 +
62204 +/* Role flags */
62205 +
62206 +enum {
62207 + GR_ROLE_USER = 0x0001,
62208 + GR_ROLE_GROUP = 0x0002,
62209 + GR_ROLE_DEFAULT = 0x0004,
62210 + GR_ROLE_SPECIAL = 0x0008,
62211 + GR_ROLE_AUTH = 0x0010,
62212 + GR_ROLE_NOPW = 0x0020,
62213 + GR_ROLE_GOD = 0x0040,
62214 + GR_ROLE_LEARN = 0x0080,
62215 + GR_ROLE_TPE = 0x0100,
62216 + GR_ROLE_DOMAIN = 0x0200,
62217 + GR_ROLE_PAM = 0x0400,
62218 + GR_ROLE_PERSIST = 0x0800
62219 +};
62220 +
62221 +/* ACL Subject and Object mode flags */
62222 +enum {
62223 + GR_DELETED = 0x80000000
62224 +};
62225 +
62226 +/* ACL Object-only mode flags */
62227 +enum {
62228 + GR_READ = 0x00000001,
62229 + GR_APPEND = 0x00000002,
62230 + GR_WRITE = 0x00000004,
62231 + GR_EXEC = 0x00000008,
62232 + GR_FIND = 0x00000010,
62233 + GR_INHERIT = 0x00000020,
62234 + GR_SETID = 0x00000040,
62235 + GR_CREATE = 0x00000080,
62236 + GR_DELETE = 0x00000100,
62237 + GR_LINK = 0x00000200,
62238 + GR_AUDIT_READ = 0x00000400,
62239 + GR_AUDIT_APPEND = 0x00000800,
62240 + GR_AUDIT_WRITE = 0x00001000,
62241 + GR_AUDIT_EXEC = 0x00002000,
62242 + GR_AUDIT_FIND = 0x00004000,
62243 + GR_AUDIT_INHERIT= 0x00008000,
62244 + GR_AUDIT_SETID = 0x00010000,
62245 + GR_AUDIT_CREATE = 0x00020000,
62246 + GR_AUDIT_DELETE = 0x00040000,
62247 + GR_AUDIT_LINK = 0x00080000,
62248 + GR_PTRACERD = 0x00100000,
62249 + GR_NOPTRACE = 0x00200000,
62250 + GR_SUPPRESS = 0x00400000,
62251 + GR_NOLEARN = 0x00800000,
62252 + GR_INIT_TRANSFER= 0x01000000
62253 +};
62254 +
62255 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
62256 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
62257 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
62258 +
62259 +/* ACL subject-only mode flags */
62260 +enum {
62261 + GR_KILL = 0x00000001,
62262 + GR_VIEW = 0x00000002,
62263 + GR_PROTECTED = 0x00000004,
62264 + GR_LEARN = 0x00000008,
62265 + GR_OVERRIDE = 0x00000010,
62266 + /* just a placeholder, this mode is only used in userspace */
62267 + GR_DUMMY = 0x00000020,
62268 + GR_PROTSHM = 0x00000040,
62269 + GR_KILLPROC = 0x00000080,
62270 + GR_KILLIPPROC = 0x00000100,
62271 + /* just a placeholder, this mode is only used in userspace */
62272 + GR_NOTROJAN = 0x00000200,
62273 + GR_PROTPROCFD = 0x00000400,
62274 + GR_PROCACCT = 0x00000800,
62275 + GR_RELAXPTRACE = 0x00001000,
62276 + GR_NESTED = 0x00002000,
62277 + GR_INHERITLEARN = 0x00004000,
62278 + GR_PROCFIND = 0x00008000,
62279 + GR_POVERRIDE = 0x00010000,
62280 + GR_KERNELAUTH = 0x00020000,
62281 + GR_ATSECURE = 0x00040000,
62282 + GR_SHMEXEC = 0x00080000
62283 +};
62284 +
62285 +enum {
62286 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
62287 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
62288 + GR_PAX_ENABLE_MPROTECT = 0x0004,
62289 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
62290 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
62291 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
62292 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
62293 + GR_PAX_DISABLE_MPROTECT = 0x0400,
62294 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
62295 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
62296 +};
62297 +
62298 +enum {
62299 + GR_ID_USER = 0x01,
62300 + GR_ID_GROUP = 0x02,
62301 +};
62302 +
62303 +enum {
62304 + GR_ID_ALLOW = 0x01,
62305 + GR_ID_DENY = 0x02,
62306 +};
62307 +
62308 +#define GR_CRASH_RES 31
62309 +#define GR_UIDTABLE_MAX 500
62310 +
62311 +/* begin resource learning section */
62312 +enum {
62313 + GR_RLIM_CPU_BUMP = 60,
62314 + GR_RLIM_FSIZE_BUMP = 50000,
62315 + GR_RLIM_DATA_BUMP = 10000,
62316 + GR_RLIM_STACK_BUMP = 1000,
62317 + GR_RLIM_CORE_BUMP = 10000,
62318 + GR_RLIM_RSS_BUMP = 500000,
62319 + GR_RLIM_NPROC_BUMP = 1,
62320 + GR_RLIM_NOFILE_BUMP = 5,
62321 + GR_RLIM_MEMLOCK_BUMP = 50000,
62322 + GR_RLIM_AS_BUMP = 500000,
62323 + GR_RLIM_LOCKS_BUMP = 2,
62324 + GR_RLIM_SIGPENDING_BUMP = 5,
62325 + GR_RLIM_MSGQUEUE_BUMP = 10000,
62326 + GR_RLIM_NICE_BUMP = 1,
62327 + GR_RLIM_RTPRIO_BUMP = 1,
62328 + GR_RLIM_RTTIME_BUMP = 1000000
62329 +};
62330 +
62331 +#endif
62332 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
62333 new file mode 100644
62334 index 0000000..c9292f7
62335 --- /dev/null
62336 +++ b/include/linux/grinternal.h
62337 @@ -0,0 +1,223 @@
62338 +#ifndef __GRINTERNAL_H
62339 +#define __GRINTERNAL_H
62340 +
62341 +#ifdef CONFIG_GRKERNSEC
62342 +
62343 +#include <linux/fs.h>
62344 +#include <linux/mnt_namespace.h>
62345 +#include <linux/nsproxy.h>
62346 +#include <linux/gracl.h>
62347 +#include <linux/grdefs.h>
62348 +#include <linux/grmsg.h>
62349 +
62350 +void gr_add_learn_entry(const char *fmt, ...)
62351 + __attribute__ ((format (printf, 1, 2)));
62352 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
62353 + const struct vfsmount *mnt);
62354 +__u32 gr_check_create(const struct dentry *new_dentry,
62355 + const struct dentry *parent,
62356 + const struct vfsmount *mnt, const __u32 mode);
62357 +int gr_check_protected_task(const struct task_struct *task);
62358 +__u32 to_gr_audit(const __u32 reqmode);
62359 +int gr_set_acls(const int type);
62360 +int gr_apply_subject_to_task(struct task_struct *task);
62361 +int gr_acl_is_enabled(void);
62362 +char gr_roletype_to_char(void);
62363 +
62364 +void gr_handle_alertkill(struct task_struct *task);
62365 +char *gr_to_filename(const struct dentry *dentry,
62366 + const struct vfsmount *mnt);
62367 +char *gr_to_filename1(const struct dentry *dentry,
62368 + const struct vfsmount *mnt);
62369 +char *gr_to_filename2(const struct dentry *dentry,
62370 + const struct vfsmount *mnt);
62371 +char *gr_to_filename3(const struct dentry *dentry,
62372 + const struct vfsmount *mnt);
62373 +
62374 +extern int grsec_enable_ptrace_readexec;
62375 +extern int grsec_enable_harden_ptrace;
62376 +extern int grsec_enable_link;
62377 +extern int grsec_enable_fifo;
62378 +extern int grsec_enable_execve;
62379 +extern int grsec_enable_shm;
62380 +extern int grsec_enable_execlog;
62381 +extern int grsec_enable_signal;
62382 +extern int grsec_enable_audit_ptrace;
62383 +extern int grsec_enable_forkfail;
62384 +extern int grsec_enable_time;
62385 +extern int grsec_enable_rofs;
62386 +extern int grsec_enable_chroot_shmat;
62387 +extern int grsec_enable_chroot_mount;
62388 +extern int grsec_enable_chroot_double;
62389 +extern int grsec_enable_chroot_pivot;
62390 +extern int grsec_enable_chroot_chdir;
62391 +extern int grsec_enable_chroot_chmod;
62392 +extern int grsec_enable_chroot_mknod;
62393 +extern int grsec_enable_chroot_fchdir;
62394 +extern int grsec_enable_chroot_nice;
62395 +extern int grsec_enable_chroot_execlog;
62396 +extern int grsec_enable_chroot_caps;
62397 +extern int grsec_enable_chroot_sysctl;
62398 +extern int grsec_enable_chroot_unix;
62399 +extern int grsec_enable_symlinkown;
62400 +extern int grsec_symlinkown_gid;
62401 +extern int grsec_enable_tpe;
62402 +extern int grsec_tpe_gid;
62403 +extern int grsec_enable_tpe_all;
62404 +extern int grsec_enable_tpe_invert;
62405 +extern int grsec_enable_socket_all;
62406 +extern int grsec_socket_all_gid;
62407 +extern int grsec_enable_socket_client;
62408 +extern int grsec_socket_client_gid;
62409 +extern int grsec_enable_socket_server;
62410 +extern int grsec_socket_server_gid;
62411 +extern int grsec_audit_gid;
62412 +extern int grsec_enable_group;
62413 +extern int grsec_enable_audit_textrel;
62414 +extern int grsec_enable_log_rwxmaps;
62415 +extern int grsec_enable_mount;
62416 +extern int grsec_enable_chdir;
62417 +extern int grsec_resource_logging;
62418 +extern int grsec_enable_blackhole;
62419 +extern int grsec_lastack_retries;
62420 +extern int grsec_enable_brute;
62421 +extern int grsec_lock;
62422 +
62423 +extern spinlock_t grsec_alert_lock;
62424 +extern unsigned long grsec_alert_wtime;
62425 +extern unsigned long grsec_alert_fyet;
62426 +
62427 +extern spinlock_t grsec_audit_lock;
62428 +
62429 +extern rwlock_t grsec_exec_file_lock;
62430 +
62431 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
62432 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
62433 + (tsk)->exec_file->f_vfsmnt) : "/")
62434 +
62435 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
62436 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
62437 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62438 +
62439 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
62440 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
62441 + (tsk)->exec_file->f_vfsmnt) : "/")
62442 +
62443 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
62444 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
62445 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62446 +
62447 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
62448 +
62449 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
62450 +
62451 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
62452 + (task)->pid, (cred)->uid, \
62453 + (cred)->euid, (cred)->gid, (cred)->egid, \
62454 + gr_parent_task_fullpath(task), \
62455 + (task)->real_parent->comm, (task)->real_parent->pid, \
62456 + (pcred)->uid, (pcred)->euid, \
62457 + (pcred)->gid, (pcred)->egid
62458 +
62459 +#define GR_CHROOT_CAPS {{ \
62460 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
62461 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
62462 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
62463 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
62464 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
62465 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
62466 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
62467 +
62468 +#define security_learn(normal_msg,args...) \
62469 +({ \
62470 + read_lock(&grsec_exec_file_lock); \
62471 + gr_add_learn_entry(normal_msg "\n", ## args); \
62472 + read_unlock(&grsec_exec_file_lock); \
62473 +})
62474 +
62475 +enum {
62476 + GR_DO_AUDIT,
62477 + GR_DONT_AUDIT,
62478 + /* used for non-audit messages that we shouldn't kill the task on */
62479 + GR_DONT_AUDIT_GOOD
62480 +};
62481 +
62482 +enum {
62483 + GR_TTYSNIFF,
62484 + GR_RBAC,
62485 + GR_RBAC_STR,
62486 + GR_STR_RBAC,
62487 + GR_RBAC_MODE2,
62488 + GR_RBAC_MODE3,
62489 + GR_FILENAME,
62490 + GR_SYSCTL_HIDDEN,
62491 + GR_NOARGS,
62492 + GR_ONE_INT,
62493 + GR_ONE_INT_TWO_STR,
62494 + GR_ONE_STR,
62495 + GR_STR_INT,
62496 + GR_TWO_STR_INT,
62497 + GR_TWO_INT,
62498 + GR_TWO_U64,
62499 + GR_THREE_INT,
62500 + GR_FIVE_INT_TWO_STR,
62501 + GR_TWO_STR,
62502 + GR_THREE_STR,
62503 + GR_FOUR_STR,
62504 + GR_STR_FILENAME,
62505 + GR_FILENAME_STR,
62506 + GR_FILENAME_TWO_INT,
62507 + GR_FILENAME_TWO_INT_STR,
62508 + GR_TEXTREL,
62509 + GR_PTRACE,
62510 + GR_RESOURCE,
62511 + GR_CAP,
62512 + GR_SIG,
62513 + GR_SIG2,
62514 + GR_CRASH1,
62515 + GR_CRASH2,
62516 + GR_PSACCT,
62517 + GR_RWXMAP
62518 +};
62519 +
62520 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
62521 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
62522 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
62523 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
62524 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
62525 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
62526 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
62527 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
62528 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
62529 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
62530 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
62531 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
62532 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
62533 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
62534 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
62535 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
62536 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
62537 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
62538 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
62539 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
62540 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
62541 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
62542 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
62543 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
62544 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
62545 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
62546 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
62547 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
62548 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
62549 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
62550 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
62551 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
62552 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
62553 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
62554 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
62555 +
62556 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
62557 +
62558 +#endif
62559 +
62560 +#endif
62561 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
62562 new file mode 100644
62563 index 0000000..54f4e85
62564 --- /dev/null
62565 +++ b/include/linux/grmsg.h
62566 @@ -0,0 +1,110 @@
62567 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
62568 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
62569 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
62570 +#define GR_STOPMOD_MSG "denied modification of module state by "
62571 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
62572 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
62573 +#define GR_IOPERM_MSG "denied use of ioperm() by "
62574 +#define GR_IOPL_MSG "denied use of iopl() by "
62575 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
62576 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
62577 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
62578 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
62579 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
62580 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
62581 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
62582 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
62583 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
62584 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
62585 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
62586 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
62587 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
62588 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
62589 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
62590 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
62591 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
62592 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
62593 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
62594 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
62595 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
62596 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
62597 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
62598 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
62599 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
62600 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
62601 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
62602 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
62603 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
62604 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
62605 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
62606 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
62607 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
62608 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
62609 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
62610 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
62611 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
62612 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
62613 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
62614 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
62615 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
62616 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
62617 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
62618 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
62619 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
62620 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
62621 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
62622 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
62623 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
62624 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
62625 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
62626 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
62627 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
62628 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
62629 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
62630 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
62631 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
62632 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
62633 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
62634 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
62635 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
62636 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
62637 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
62638 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
62639 +#define GR_NICE_CHROOT_MSG "denied priority change by "
62640 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
62641 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
62642 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
62643 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
62644 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
62645 +#define GR_TIME_MSG "time set by "
62646 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
62647 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
62648 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
62649 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
62650 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
62651 +#define GR_BIND_MSG "denied bind() by "
62652 +#define GR_CONNECT_MSG "denied connect() by "
62653 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
62654 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
62655 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
62656 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
62657 +#define GR_CAP_ACL_MSG "use of %s denied for "
62658 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
62659 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
62660 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
62661 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
62662 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
62663 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
62664 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
62665 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
62666 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
62667 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
62668 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
62669 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
62670 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
62671 +#define GR_VM86_MSG "denied use of vm86 by "
62672 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
62673 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
62674 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
62675 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
62676 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
62677 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
62678 new file mode 100644
62679 index 0000000..38bfb04
62680 --- /dev/null
62681 +++ b/include/linux/grsecurity.h
62682 @@ -0,0 +1,233 @@
62683 +#ifndef GR_SECURITY_H
62684 +#define GR_SECURITY_H
62685 +#include <linux/fs.h>
62686 +#include <linux/fs_struct.h>
62687 +#include <linux/binfmts.h>
62688 +#include <linux/gracl.h>
62689 +
62690 +/* notify of brain-dead configs */
62691 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62692 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
62693 +#endif
62694 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
62695 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
62696 +#endif
62697 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
62698 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
62699 +#endif
62700 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
62701 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
62702 +#endif
62703 +
62704 +#include <linux/compat.h>
62705 +
62706 +struct user_arg_ptr {
62707 +#ifdef CONFIG_COMPAT
62708 + bool is_compat;
62709 +#endif
62710 + union {
62711 + const char __user *const __user *native;
62712 +#ifdef CONFIG_COMPAT
62713 + compat_uptr_t __user *compat;
62714 +#endif
62715 + } ptr;
62716 +};
62717 +
62718 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
62719 +void gr_handle_brute_check(void);
62720 +void gr_handle_kernel_exploit(void);
62721 +int gr_process_user_ban(void);
62722 +
62723 +char gr_roletype_to_char(void);
62724 +
62725 +int gr_acl_enable_at_secure(void);
62726 +
62727 +int gr_check_user_change(int real, int effective, int fs);
62728 +int gr_check_group_change(int real, int effective, int fs);
62729 +
62730 +void gr_del_task_from_ip_table(struct task_struct *p);
62731 +
62732 +int gr_pid_is_chrooted(struct task_struct *p);
62733 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
62734 +int gr_handle_chroot_nice(void);
62735 +int gr_handle_chroot_sysctl(const int op);
62736 +int gr_handle_chroot_setpriority(struct task_struct *p,
62737 + const int niceval);
62738 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
62739 +int gr_handle_chroot_chroot(const struct dentry *dentry,
62740 + const struct vfsmount *mnt);
62741 +void gr_handle_chroot_chdir(struct path *path);
62742 +int gr_handle_chroot_chmod(const struct dentry *dentry,
62743 + const struct vfsmount *mnt, const int mode);
62744 +int gr_handle_chroot_mknod(const struct dentry *dentry,
62745 + const struct vfsmount *mnt, const int mode);
62746 +int gr_handle_chroot_mount(const struct dentry *dentry,
62747 + const struct vfsmount *mnt,
62748 + const char *dev_name);
62749 +int gr_handle_chroot_pivot(void);
62750 +int gr_handle_chroot_unix(const pid_t pid);
62751 +
62752 +int gr_handle_rawio(const struct inode *inode);
62753 +
62754 +void gr_handle_ioperm(void);
62755 +void gr_handle_iopl(void);
62756 +
62757 +umode_t gr_acl_umask(void);
62758 +
62759 +int gr_tpe_allow(const struct file *file);
62760 +
62761 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
62762 +void gr_clear_chroot_entries(struct task_struct *task);
62763 +
62764 +void gr_log_forkfail(const int retval);
62765 +void gr_log_timechange(void);
62766 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
62767 +void gr_log_chdir(const struct dentry *dentry,
62768 + const struct vfsmount *mnt);
62769 +void gr_log_chroot_exec(const struct dentry *dentry,
62770 + const struct vfsmount *mnt);
62771 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
62772 +void gr_log_remount(const char *devname, const int retval);
62773 +void gr_log_unmount(const char *devname, const int retval);
62774 +void gr_log_mount(const char *from, const char *to, const int retval);
62775 +void gr_log_textrel(struct vm_area_struct *vma);
62776 +void gr_log_rwxmmap(struct file *file);
62777 +void gr_log_rwxmprotect(struct file *file);
62778 +
62779 +int gr_handle_follow_link(const struct inode *parent,
62780 + const struct inode *inode,
62781 + const struct dentry *dentry,
62782 + const struct vfsmount *mnt);
62783 +int gr_handle_fifo(const struct dentry *dentry,
62784 + const struct vfsmount *mnt,
62785 + const struct dentry *dir, const int flag,
62786 + const int acc_mode);
62787 +int gr_handle_hardlink(const struct dentry *dentry,
62788 + const struct vfsmount *mnt,
62789 + struct inode *inode,
62790 + const int mode, const char *to);
62791 +
62792 +int gr_is_capable(const int cap);
62793 +int gr_is_capable_nolog(const int cap);
62794 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
62795 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
62796 +
62797 +void gr_learn_resource(const struct task_struct *task, const int limit,
62798 + const unsigned long wanted, const int gt);
62799 +void gr_copy_label(struct task_struct *tsk);
62800 +void gr_handle_crash(struct task_struct *task, const int sig);
62801 +int gr_handle_signal(const struct task_struct *p, const int sig);
62802 +int gr_check_crash_uid(const uid_t uid);
62803 +int gr_check_protected_task(const struct task_struct *task);
62804 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
62805 +int gr_acl_handle_mmap(const struct file *file,
62806 + const unsigned long prot);
62807 +int gr_acl_handle_mprotect(const struct file *file,
62808 + const unsigned long prot);
62809 +int gr_check_hidden_task(const struct task_struct *tsk);
62810 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
62811 + const struct vfsmount *mnt);
62812 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
62813 + const struct vfsmount *mnt);
62814 +__u32 gr_acl_handle_access(const struct dentry *dentry,
62815 + const struct vfsmount *mnt, const int fmode);
62816 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
62817 + const struct vfsmount *mnt, umode_t *mode);
62818 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
62819 + const struct vfsmount *mnt);
62820 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
62821 + const struct vfsmount *mnt);
62822 +int gr_handle_ptrace(struct task_struct *task, const long request);
62823 +int gr_handle_proc_ptrace(struct task_struct *task);
62824 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
62825 + const struct vfsmount *mnt);
62826 +int gr_check_crash_exec(const struct file *filp);
62827 +int gr_acl_is_enabled(void);
62828 +void gr_set_kernel_label(struct task_struct *task);
62829 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
62830 + const gid_t gid);
62831 +int gr_set_proc_label(const struct dentry *dentry,
62832 + const struct vfsmount *mnt,
62833 + const int unsafe_flags);
62834 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
62835 + const struct vfsmount *mnt);
62836 +__u32 gr_acl_handle_open(const struct dentry *dentry,
62837 + const struct vfsmount *mnt, int acc_mode);
62838 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
62839 + const struct dentry *p_dentry,
62840 + const struct vfsmount *p_mnt,
62841 + int open_flags, int acc_mode, const int imode);
62842 +void gr_handle_create(const struct dentry *dentry,
62843 + const struct vfsmount *mnt);
62844 +void gr_handle_proc_create(const struct dentry *dentry,
62845 + const struct inode *inode);
62846 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
62847 + const struct dentry *parent_dentry,
62848 + const struct vfsmount *parent_mnt,
62849 + const int mode);
62850 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
62851 + const struct dentry *parent_dentry,
62852 + const struct vfsmount *parent_mnt);
62853 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
62854 + const struct vfsmount *mnt);
62855 +void gr_handle_delete(const ino_t ino, const dev_t dev);
62856 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
62857 + const struct vfsmount *mnt);
62858 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
62859 + const struct dentry *parent_dentry,
62860 + const struct vfsmount *parent_mnt,
62861 + const char *from);
62862 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
62863 + const struct dentry *parent_dentry,
62864 + const struct vfsmount *parent_mnt,
62865 + const struct dentry *old_dentry,
62866 + const struct vfsmount *old_mnt, const char *to);
62867 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
62868 +int gr_acl_handle_rename(struct dentry *new_dentry,
62869 + struct dentry *parent_dentry,
62870 + const struct vfsmount *parent_mnt,
62871 + struct dentry *old_dentry,
62872 + struct inode *old_parent_inode,
62873 + struct vfsmount *old_mnt, const char *newname);
62874 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62875 + struct dentry *old_dentry,
62876 + struct dentry *new_dentry,
62877 + struct vfsmount *mnt, const __u8 replace);
62878 +__u32 gr_check_link(const struct dentry *new_dentry,
62879 + const struct dentry *parent_dentry,
62880 + const struct vfsmount *parent_mnt,
62881 + const struct dentry *old_dentry,
62882 + const struct vfsmount *old_mnt);
62883 +int gr_acl_handle_filldir(const struct file *file, const char *name,
62884 + const unsigned int namelen, const ino_t ino);
62885 +
62886 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
62887 + const struct vfsmount *mnt);
62888 +void gr_acl_handle_exit(void);
62889 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
62890 +int gr_acl_handle_procpidmem(const struct task_struct *task);
62891 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
62892 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
62893 +void gr_audit_ptrace(struct task_struct *task);
62894 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
62895 +
62896 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
62897 +
62898 +#ifdef CONFIG_GRKERNSEC
62899 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
62900 +void gr_handle_vm86(void);
62901 +void gr_handle_mem_readwrite(u64 from, u64 to);
62902 +
62903 +void gr_log_badprocpid(const char *entry);
62904 +
62905 +extern int grsec_enable_dmesg;
62906 +extern int grsec_disable_privio;
62907 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62908 +extern int grsec_enable_chroot_findtask;
62909 +#endif
62910 +#ifdef CONFIG_GRKERNSEC_SETXID
62911 +extern int grsec_enable_setxid;
62912 +#endif
62913 +#endif
62914 +
62915 +#endif
62916 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
62917 new file mode 100644
62918 index 0000000..e7ffaaf
62919 --- /dev/null
62920 +++ b/include/linux/grsock.h
62921 @@ -0,0 +1,19 @@
62922 +#ifndef __GRSOCK_H
62923 +#define __GRSOCK_H
62924 +
62925 +extern void gr_attach_curr_ip(const struct sock *sk);
62926 +extern int gr_handle_sock_all(const int family, const int type,
62927 + const int protocol);
62928 +extern int gr_handle_sock_server(const struct sockaddr *sck);
62929 +extern int gr_handle_sock_server_other(const struct sock *sck);
62930 +extern int gr_handle_sock_client(const struct sockaddr *sck);
62931 +extern int gr_search_connect(struct socket * sock,
62932 + struct sockaddr_in * addr);
62933 +extern int gr_search_bind(struct socket * sock,
62934 + struct sockaddr_in * addr);
62935 +extern int gr_search_listen(struct socket * sock);
62936 +extern int gr_search_accept(struct socket * sock);
62937 +extern int gr_search_socket(const int domain, const int type,
62938 + const int protocol);
62939 +
62940 +#endif
62941 diff --git a/include/linux/hid.h b/include/linux/hid.h
62942 index 449fa38..b37c8cc 100644
62943 --- a/include/linux/hid.h
62944 +++ b/include/linux/hid.h
62945 @@ -704,7 +704,7 @@ struct hid_ll_driver {
62946 unsigned int code, int value);
62947
62948 int (*parse)(struct hid_device *hdev);
62949 -};
62950 +} __no_const;
62951
62952 #define PM_HINT_FULLON 1<<5
62953 #define PM_HINT_NORMAL 1<<1
62954 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
62955 index d3999b4..1304cb4 100644
62956 --- a/include/linux/highmem.h
62957 +++ b/include/linux/highmem.h
62958 @@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
62959 kunmap_atomic(kaddr);
62960 }
62961
62962 +static inline void sanitize_highpage(struct page *page)
62963 +{
62964 + void *kaddr;
62965 + unsigned long flags;
62966 +
62967 + local_irq_save(flags);
62968 + kaddr = kmap_atomic(page);
62969 + clear_page(kaddr);
62970 + kunmap_atomic(kaddr);
62971 + local_irq_restore(flags);
62972 +}
62973 +
62974 static inline void zero_user_segments(struct page *page,
62975 unsigned start1, unsigned end1,
62976 unsigned start2, unsigned end2)
62977 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
62978 index ddfa041..a44cfff 100644
62979 --- a/include/linux/i2c.h
62980 +++ b/include/linux/i2c.h
62981 @@ -366,6 +366,7 @@ struct i2c_algorithm {
62982 /* To determine what the adapter supports */
62983 u32 (*functionality) (struct i2c_adapter *);
62984 };
62985 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
62986
62987 /*
62988 * i2c_adapter is the structure used to identify a physical i2c bus along
62989 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
62990 index d23c3c2..eb63c81 100644
62991 --- a/include/linux/i2o.h
62992 +++ b/include/linux/i2o.h
62993 @@ -565,7 +565,7 @@ struct i2o_controller {
62994 struct i2o_device *exec; /* Executive */
62995 #if BITS_PER_LONG == 64
62996 spinlock_t context_list_lock; /* lock for context_list */
62997 - atomic_t context_list_counter; /* needed for unique contexts */
62998 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62999 struct list_head context_list; /* list of context id's
63000 and pointers */
63001 #endif
63002 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
63003 index 8185f57..7b2d222 100644
63004 --- a/include/linux/if_team.h
63005 +++ b/include/linux/if_team.h
63006 @@ -74,6 +74,7 @@ struct team_mode_ops {
63007 void (*port_leave)(struct team *team, struct team_port *port);
63008 void (*port_change_mac)(struct team *team, struct team_port *port);
63009 };
63010 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
63011
63012 enum team_option_type {
63013 TEAM_OPTION_TYPE_U32,
63014 @@ -136,7 +137,7 @@ struct team {
63015 struct list_head option_inst_list; /* list of option instances */
63016
63017 const struct team_mode *mode;
63018 - struct team_mode_ops ops;
63019 + team_mode_ops_no_const ops;
63020 long mode_priv[TEAM_MODE_PRIV_LONGS];
63021 };
63022
63023 diff --git a/include/linux/init.h b/include/linux/init.h
63024 index 6b95109..7616d09 100644
63025 --- a/include/linux/init.h
63026 +++ b/include/linux/init.h
63027 @@ -39,9 +39,15 @@
63028 * Also note, that this data cannot be "const".
63029 */
63030
63031 +#ifdef MODULE
63032 +#define add_latent_entropy
63033 +#else
63034 +#define add_latent_entropy __latent_entropy
63035 +#endif
63036 +
63037 /* These are for everybody (although not all archs will actually
63038 discard it in modules) */
63039 -#define __init __section(.init.text) __cold notrace
63040 +#define __init __section(.init.text) __cold notrace add_latent_entropy
63041 #define __initdata __section(.init.data)
63042 #define __initconst __section(.init.rodata)
63043 #define __exitdata __section(.exit.data)
63044 @@ -83,7 +89,7 @@
63045 #define __exit __section(.exit.text) __exitused __cold notrace
63046
63047 /* Used for HOTPLUG */
63048 -#define __devinit __section(.devinit.text) __cold notrace
63049 +#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
63050 #define __devinitdata __section(.devinit.data)
63051 #define __devinitconst __section(.devinit.rodata)
63052 #define __devexit __section(.devexit.text) __exitused __cold notrace
63053 @@ -91,7 +97,7 @@
63054 #define __devexitconst __section(.devexit.rodata)
63055
63056 /* Used for HOTPLUG_CPU */
63057 -#define __cpuinit __section(.cpuinit.text) __cold notrace
63058 +#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
63059 #define __cpuinitdata __section(.cpuinit.data)
63060 #define __cpuinitconst __section(.cpuinit.rodata)
63061 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
63062 @@ -99,7 +105,7 @@
63063 #define __cpuexitconst __section(.cpuexit.rodata)
63064
63065 /* Used for MEMORY_HOTPLUG */
63066 -#define __meminit __section(.meminit.text) __cold notrace
63067 +#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
63068 #define __meminitdata __section(.meminit.data)
63069 #define __meminitconst __section(.meminit.rodata)
63070 #define __memexit __section(.memexit.text) __exitused __cold notrace
63071 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
63072 index b806b82..2e2837e 100644
63073 --- a/include/linux/init_task.h
63074 +++ b/include/linux/init_task.h
63075 @@ -143,6 +143,12 @@ extern struct task_group root_task_group;
63076
63077 #define INIT_TASK_COMM "swapper"
63078
63079 +#ifdef CONFIG_X86
63080 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
63081 +#else
63082 +#define INIT_TASK_THREAD_INFO
63083 +#endif
63084 +
63085 /*
63086 * INIT_TASK is used to set up the first task table, touch at
63087 * your own risk!. Base=0, limit=0x1fffff (=2MB)
63088 @@ -182,6 +188,7 @@ extern struct task_group root_task_group;
63089 RCU_INIT_POINTER(.cred, &init_cred), \
63090 .comm = INIT_TASK_COMM, \
63091 .thread = INIT_THREAD, \
63092 + INIT_TASK_THREAD_INFO \
63093 .fs = &init_fs, \
63094 .files = &init_files, \
63095 .signal = &init_signals, \
63096 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
63097 index e6ca56d..8583707 100644
63098 --- a/include/linux/intel-iommu.h
63099 +++ b/include/linux/intel-iommu.h
63100 @@ -296,7 +296,7 @@ struct iommu_flush {
63101 u8 fm, u64 type);
63102 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
63103 unsigned int size_order, u64 type);
63104 -};
63105 +} __no_const;
63106
63107 enum {
63108 SR_DMAR_FECTL_REG,
63109 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
63110 index e68a8e5..811b9af 100644
63111 --- a/include/linux/interrupt.h
63112 +++ b/include/linux/interrupt.h
63113 @@ -435,7 +435,7 @@ enum
63114 /* map softirq index to softirq name. update 'softirq_to_name' in
63115 * kernel/softirq.c when adding a new softirq.
63116 */
63117 -extern char *softirq_to_name[NR_SOFTIRQS];
63118 +extern const char * const softirq_to_name[NR_SOFTIRQS];
63119
63120 /* softirq mask and active fields moved to irq_cpustat_t in
63121 * asm/hardirq.h to get better cache usage. KAO
63122 @@ -443,12 +443,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
63123
63124 struct softirq_action
63125 {
63126 - void (*action)(struct softirq_action *);
63127 + void (*action)(void);
63128 };
63129
63130 asmlinkage void do_softirq(void);
63131 asmlinkage void __do_softirq(void);
63132 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
63133 +extern void open_softirq(int nr, void (*action)(void));
63134 extern void softirq_init(void);
63135 extern void __raise_softirq_irqoff(unsigned int nr);
63136
63137 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
63138 index 6883e19..06992b1 100644
63139 --- a/include/linux/kallsyms.h
63140 +++ b/include/linux/kallsyms.h
63141 @@ -15,7 +15,8 @@
63142
63143 struct module;
63144
63145 -#ifdef CONFIG_KALLSYMS
63146 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
63147 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63148 /* Lookup the address for a symbol. Returns 0 if not found. */
63149 unsigned long kallsyms_lookup_name(const char *name);
63150
63151 @@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
63152 /* Stupid that this does nothing, but I didn't create this mess. */
63153 #define __print_symbol(fmt, addr)
63154 #endif /*CONFIG_KALLSYMS*/
63155 +#else /* when included by kallsyms.c, vsnprintf.c, or
63156 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
63157 +extern void __print_symbol(const char *fmt, unsigned long address);
63158 +extern int sprint_backtrace(char *buffer, unsigned long address);
63159 +extern int sprint_symbol(char *buffer, unsigned long address);
63160 +extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
63161 +const char *kallsyms_lookup(unsigned long addr,
63162 + unsigned long *symbolsize,
63163 + unsigned long *offset,
63164 + char **modname, char *namebuf);
63165 +#endif
63166
63167 /* This macro allows us to keep printk typechecking */
63168 static __printf(1, 2)
63169 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
63170 index c4d2fc1..5df9c19 100644
63171 --- a/include/linux/kgdb.h
63172 +++ b/include/linux/kgdb.h
63173 @@ -53,7 +53,7 @@ extern int kgdb_connected;
63174 extern int kgdb_io_module_registered;
63175
63176 extern atomic_t kgdb_setting_breakpoint;
63177 -extern atomic_t kgdb_cpu_doing_single_step;
63178 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
63179
63180 extern struct task_struct *kgdb_usethread;
63181 extern struct task_struct *kgdb_contthread;
63182 @@ -252,7 +252,7 @@ struct kgdb_arch {
63183 void (*disable_hw_break)(struct pt_regs *regs);
63184 void (*remove_all_hw_break)(void);
63185 void (*correct_hw_break)(void);
63186 -};
63187 +} __do_const;
63188
63189 /**
63190 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
63191 @@ -277,7 +277,7 @@ struct kgdb_io {
63192 void (*pre_exception) (void);
63193 void (*post_exception) (void);
63194 int is_console;
63195 -};
63196 +} __do_const;
63197
63198 extern struct kgdb_arch arch_kgdb_ops;
63199
63200 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
63201 index 5398d58..5883a34 100644
63202 --- a/include/linux/kmod.h
63203 +++ b/include/linux/kmod.h
63204 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
63205 * usually useless though. */
63206 extern __printf(2, 3)
63207 int __request_module(bool wait, const char *name, ...);
63208 +extern __printf(3, 4)
63209 +int ___request_module(bool wait, char *param_name, const char *name, ...);
63210 #define request_module(mod...) __request_module(true, mod)
63211 #define request_module_nowait(mod...) __request_module(false, mod)
63212 #define try_then_request_module(x, mod...) \
63213 diff --git a/include/linux/kref.h b/include/linux/kref.h
63214 index 9c07dce..a92fa71 100644
63215 --- a/include/linux/kref.h
63216 +++ b/include/linux/kref.h
63217 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
63218 static inline int kref_sub(struct kref *kref, unsigned int count,
63219 void (*release)(struct kref *kref))
63220 {
63221 - WARN_ON(release == NULL);
63222 + BUG_ON(release == NULL);
63223
63224 if (atomic_sub_and_test((int) count, &kref->refcount)) {
63225 release(kref);
63226 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
63227 index 96c158a..1864db5 100644
63228 --- a/include/linux/kvm_host.h
63229 +++ b/include/linux/kvm_host.h
63230 @@ -345,7 +345,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
63231 void vcpu_load(struct kvm_vcpu *vcpu);
63232 void vcpu_put(struct kvm_vcpu *vcpu);
63233
63234 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
63235 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
63236 struct module *module);
63237 void kvm_exit(void);
63238
63239 @@ -511,7 +511,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
63240 struct kvm_guest_debug *dbg);
63241 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
63242
63243 -int kvm_arch_init(void *opaque);
63244 +int kvm_arch_init(const void *opaque);
63245 void kvm_arch_exit(void);
63246
63247 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
63248 diff --git a/include/linux/libata.h b/include/linux/libata.h
63249 index 6e887c7..4539601 100644
63250 --- a/include/linux/libata.h
63251 +++ b/include/linux/libata.h
63252 @@ -910,7 +910,7 @@ struct ata_port_operations {
63253 * fields must be pointers.
63254 */
63255 const struct ata_port_operations *inherits;
63256 -};
63257 +} __do_const;
63258
63259 struct ata_port_info {
63260 unsigned long flags;
63261 diff --git a/include/linux/memory.h b/include/linux/memory.h
63262 index ff9a9f8..c715deb 100644
63263 --- a/include/linux/memory.h
63264 +++ b/include/linux/memory.h
63265 @@ -143,7 +143,7 @@ struct memory_accessor {
63266 size_t count);
63267 ssize_t (*write)(struct memory_accessor *, const char *buf,
63268 off_t offset, size_t count);
63269 -};
63270 +} __no_const;
63271
63272 /*
63273 * Kernel text modification mutex, used for code patching. Users of this lock
63274 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
63275 index 1318ca6..7521340 100644
63276 --- a/include/linux/mfd/abx500.h
63277 +++ b/include/linux/mfd/abx500.h
63278 @@ -452,6 +452,7 @@ struct abx500_ops {
63279 int (*event_registers_startup_state_get) (struct device *, u8 *);
63280 int (*startup_irq_enabled) (struct device *, unsigned int);
63281 };
63282 +typedef struct abx500_ops __no_const abx500_ops_no_const;
63283
63284 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
63285 void abx500_remove_ops(struct device *dev);
63286 diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
63287 index 9b07725..3d55001 100644
63288 --- a/include/linux/mfd/abx500/ux500_chargalg.h
63289 +++ b/include/linux/mfd/abx500/ux500_chargalg.h
63290 @@ -19,7 +19,7 @@ struct ux500_charger_ops {
63291 int (*enable) (struct ux500_charger *, int, int, int);
63292 int (*kick_wd) (struct ux500_charger *);
63293 int (*update_curr) (struct ux500_charger *, int);
63294 -};
63295 +} __no_const;
63296
63297 /**
63298 * struct ux500_charger - power supply ux500 charger sub class
63299 diff --git a/include/linux/mm.h b/include/linux/mm.h
63300 index f9f279c..198da78 100644
63301 --- a/include/linux/mm.h
63302 +++ b/include/linux/mm.h
63303 @@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
63304
63305 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
63306 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
63307 +
63308 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
63309 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
63310 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
63311 +#else
63312 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
63313 +#endif
63314 +
63315 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
63316 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
63317
63318 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
63319 int set_page_dirty_lock(struct page *page);
63320 int clear_page_dirty_for_io(struct page *page);
63321
63322 -/* Is the vma a continuation of the stack vma above it? */
63323 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
63324 -{
63325 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
63326 -}
63327 -
63328 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
63329 - unsigned long addr)
63330 -{
63331 - return (vma->vm_flags & VM_GROWSDOWN) &&
63332 - (vma->vm_start == addr) &&
63333 - !vma_growsdown(vma->vm_prev, addr);
63334 -}
63335 -
63336 -/* Is the vma a continuation of the stack vma below it? */
63337 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
63338 -{
63339 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
63340 -}
63341 -
63342 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
63343 - unsigned long addr)
63344 -{
63345 - return (vma->vm_flags & VM_GROWSUP) &&
63346 - (vma->vm_end == addr) &&
63347 - !vma_growsup(vma->vm_next, addr);
63348 -}
63349 -
63350 extern pid_t
63351 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
63352
63353 @@ -1135,6 +1114,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
63354 }
63355 #endif
63356
63357 +#ifdef CONFIG_MMU
63358 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
63359 +#else
63360 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
63361 +{
63362 + return __pgprot(0);
63363 +}
63364 +#endif
63365 +
63366 int vma_wants_writenotify(struct vm_area_struct *vma);
63367
63368 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
63369 @@ -1153,8 +1141,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
63370 {
63371 return 0;
63372 }
63373 +
63374 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
63375 + unsigned long address)
63376 +{
63377 + return 0;
63378 +}
63379 #else
63380 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63381 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63382 #endif
63383
63384 #ifdef __PAGETABLE_PMD_FOLDED
63385 @@ -1163,8 +1158,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
63386 {
63387 return 0;
63388 }
63389 +
63390 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
63391 + unsigned long address)
63392 +{
63393 + return 0;
63394 +}
63395 #else
63396 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
63397 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
63398 #endif
63399
63400 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
63401 @@ -1182,11 +1184,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
63402 NULL: pud_offset(pgd, address);
63403 }
63404
63405 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
63406 +{
63407 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
63408 + NULL: pud_offset(pgd, address);
63409 +}
63410 +
63411 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
63412 {
63413 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
63414 NULL: pmd_offset(pud, address);
63415 }
63416 +
63417 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
63418 +{
63419 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
63420 + NULL: pmd_offset(pud, address);
63421 +}
63422 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
63423
63424 #if USE_SPLIT_PTLOCKS
63425 @@ -1396,6 +1410,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
63426 unsigned long, unsigned long,
63427 unsigned long, unsigned long);
63428 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
63429 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
63430
63431 /* These take the mm semaphore themselves */
63432 extern unsigned long vm_brk(unsigned long, unsigned long);
63433 @@ -1458,6 +1473,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
63434 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
63435 struct vm_area_struct **pprev);
63436
63437 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
63438 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
63439 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
63440 +
63441 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
63442 NULL if none. Assume start_addr < end_addr. */
63443 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
63444 @@ -1486,15 +1505,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
63445 return vma;
63446 }
63447
63448 -#ifdef CONFIG_MMU
63449 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
63450 -#else
63451 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
63452 -{
63453 - return __pgprot(0);
63454 -}
63455 -#endif
63456 -
63457 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
63458 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
63459 unsigned long pfn, unsigned long size, pgprot_t);
63460 @@ -1599,7 +1609,7 @@ extern int unpoison_memory(unsigned long pfn);
63461 extern int sysctl_memory_failure_early_kill;
63462 extern int sysctl_memory_failure_recovery;
63463 extern void shake_page(struct page *p, int access);
63464 -extern atomic_long_t mce_bad_pages;
63465 +extern atomic_long_unchecked_t mce_bad_pages;
63466 extern int soft_offline_page(struct page *page, int flags);
63467
63468 extern void dump_page(struct page *page);
63469 @@ -1630,5 +1640,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
63470 static inline bool page_is_guard(struct page *page) { return false; }
63471 #endif /* CONFIG_DEBUG_PAGEALLOC */
63472
63473 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63474 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
63475 +#else
63476 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
63477 +#endif
63478 +
63479 #endif /* __KERNEL__ */
63480 #endif /* _LINUX_MM_H */
63481 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
63482 index 704a626..bb0705a 100644
63483 --- a/include/linux/mm_types.h
63484 +++ b/include/linux/mm_types.h
63485 @@ -263,6 +263,8 @@ struct vm_area_struct {
63486 #ifdef CONFIG_NUMA
63487 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
63488 #endif
63489 +
63490 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
63491 };
63492
63493 struct core_thread {
63494 @@ -337,7 +339,7 @@ struct mm_struct {
63495 unsigned long def_flags;
63496 unsigned long nr_ptes; /* Page table pages */
63497 unsigned long start_code, end_code, start_data, end_data;
63498 - unsigned long start_brk, brk, start_stack;
63499 + unsigned long brk_gap, start_brk, brk, start_stack;
63500 unsigned long arg_start, arg_end, env_start, env_end;
63501
63502 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
63503 @@ -389,6 +391,24 @@ struct mm_struct {
63504 struct cpumask cpumask_allocation;
63505 #endif
63506 struct uprobes_state uprobes_state;
63507 +
63508 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63509 + unsigned long pax_flags;
63510 +#endif
63511 +
63512 +#ifdef CONFIG_PAX_DLRESOLVE
63513 + unsigned long call_dl_resolve;
63514 +#endif
63515 +
63516 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
63517 + unsigned long call_syscall;
63518 +#endif
63519 +
63520 +#ifdef CONFIG_PAX_ASLR
63521 + unsigned long delta_mmap; /* randomized offset */
63522 + unsigned long delta_stack; /* randomized offset */
63523 +#endif
63524 +
63525 };
63526
63527 static inline void mm_init_cpumask(struct mm_struct *mm)
63528 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
63529 index 1d1b1e1..2a13c78 100644
63530 --- a/include/linux/mmu_notifier.h
63531 +++ b/include/linux/mmu_notifier.h
63532 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
63533 */
63534 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
63535 ({ \
63536 - pte_t __pte; \
63537 + pte_t ___pte; \
63538 struct vm_area_struct *___vma = __vma; \
63539 unsigned long ___address = __address; \
63540 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
63541 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
63542 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
63543 - __pte; \
63544 + ___pte; \
63545 })
63546
63547 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
63548 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
63549 index 68c569f..5f43753 100644
63550 --- a/include/linux/mmzone.h
63551 +++ b/include/linux/mmzone.h
63552 @@ -411,7 +411,7 @@ struct zone {
63553 unsigned long flags; /* zone flags, see below */
63554
63555 /* Zone statistics */
63556 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63557 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63558
63559 /*
63560 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
63561 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
63562 index 5db9382..50e801d 100644
63563 --- a/include/linux/mod_devicetable.h
63564 +++ b/include/linux/mod_devicetable.h
63565 @@ -12,7 +12,7 @@
63566 typedef unsigned long kernel_ulong_t;
63567 #endif
63568
63569 -#define PCI_ANY_ID (~0)
63570 +#define PCI_ANY_ID ((__u16)~0)
63571
63572 struct pci_device_id {
63573 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
63574 @@ -131,7 +131,7 @@ struct usb_device_id {
63575 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
63576 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
63577
63578 -#define HID_ANY_ID (~0)
63579 +#define HID_ANY_ID (~0U)
63580 #define HID_BUS_ANY 0xffff
63581 #define HID_GROUP_ANY 0x0000
63582
63583 diff --git a/include/linux/module.h b/include/linux/module.h
63584 index fbcafe2..e5d9587 100644
63585 --- a/include/linux/module.h
63586 +++ b/include/linux/module.h
63587 @@ -17,6 +17,7 @@
63588 #include <linux/moduleparam.h>
63589 #include <linux/tracepoint.h>
63590 #include <linux/export.h>
63591 +#include <linux/fs.h>
63592
63593 #include <linux/percpu.h>
63594 #include <asm/module.h>
63595 @@ -273,19 +274,16 @@ struct module
63596 int (*init)(void);
63597
63598 /* If this is non-NULL, vfree after init() returns */
63599 - void *module_init;
63600 + void *module_init_rx, *module_init_rw;
63601
63602 /* Here is the actual code + data, vfree'd on unload. */
63603 - void *module_core;
63604 + void *module_core_rx, *module_core_rw;
63605
63606 /* Here are the sizes of the init and core sections */
63607 - unsigned int init_size, core_size;
63608 + unsigned int init_size_rw, core_size_rw;
63609
63610 /* The size of the executable code in each section. */
63611 - unsigned int init_text_size, core_text_size;
63612 -
63613 - /* Size of RO sections of the module (text+rodata) */
63614 - unsigned int init_ro_size, core_ro_size;
63615 + unsigned int init_size_rx, core_size_rx;
63616
63617 /* Arch-specific module values */
63618 struct mod_arch_specific arch;
63619 @@ -341,6 +339,10 @@ struct module
63620 #ifdef CONFIG_EVENT_TRACING
63621 struct ftrace_event_call **trace_events;
63622 unsigned int num_trace_events;
63623 + struct file_operations trace_id;
63624 + struct file_operations trace_enable;
63625 + struct file_operations trace_format;
63626 + struct file_operations trace_filter;
63627 #endif
63628 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
63629 unsigned int num_ftrace_callsites;
63630 @@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
63631 bool is_module_percpu_address(unsigned long addr);
63632 bool is_module_text_address(unsigned long addr);
63633
63634 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
63635 +{
63636 +
63637 +#ifdef CONFIG_PAX_KERNEXEC
63638 + if (ktla_ktva(addr) >= (unsigned long)start &&
63639 + ktla_ktva(addr) < (unsigned long)start + size)
63640 + return 1;
63641 +#endif
63642 +
63643 + return ((void *)addr >= start && (void *)addr < start + size);
63644 +}
63645 +
63646 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
63647 +{
63648 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
63649 +}
63650 +
63651 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
63652 +{
63653 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
63654 +}
63655 +
63656 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
63657 +{
63658 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
63659 +}
63660 +
63661 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
63662 +{
63663 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
63664 +}
63665 +
63666 static inline int within_module_core(unsigned long addr, struct module *mod)
63667 {
63668 - return (unsigned long)mod->module_core <= addr &&
63669 - addr < (unsigned long)mod->module_core + mod->core_size;
63670 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
63671 }
63672
63673 static inline int within_module_init(unsigned long addr, struct module *mod)
63674 {
63675 - return (unsigned long)mod->module_init <= addr &&
63676 - addr < (unsigned long)mod->module_init + mod->init_size;
63677 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
63678 }
63679
63680 /* Search for module by name: must hold module_mutex. */
63681 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
63682 index b2be02e..72d2f78 100644
63683 --- a/include/linux/moduleloader.h
63684 +++ b/include/linux/moduleloader.h
63685 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
63686
63687 /* Allocator used for allocating struct module, core sections and init
63688 sections. Returns NULL on failure. */
63689 -void *module_alloc(unsigned long size);
63690 +void *module_alloc(unsigned long size) __size_overflow(1);
63691 +
63692 +#ifdef CONFIG_PAX_KERNEXEC
63693 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
63694 +#else
63695 +#define module_alloc_exec(x) module_alloc(x)
63696 +#endif
63697
63698 /* Free memory returned from module_alloc. */
63699 void module_free(struct module *mod, void *module_region);
63700
63701 +#ifdef CONFIG_PAX_KERNEXEC
63702 +void module_free_exec(struct module *mod, void *module_region);
63703 +#else
63704 +#define module_free_exec(x, y) module_free((x), (y))
63705 +#endif
63706 +
63707 /* Apply the given relocation to the (simplified) ELF. Return -error
63708 or 0. */
63709 int apply_relocate(Elf_Shdr *sechdrs,
63710 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
63711 index d6a5806..7c13347 100644
63712 --- a/include/linux/moduleparam.h
63713 +++ b/include/linux/moduleparam.h
63714 @@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
63715 * @len is usually just sizeof(string).
63716 */
63717 #define module_param_string(name, string, len, perm) \
63718 - static const struct kparam_string __param_string_##name \
63719 + static const struct kparam_string __param_string_##name __used \
63720 = { len, string }; \
63721 __module_param_call(MODULE_PARAM_PREFIX, name, \
63722 &param_ops_string, \
63723 @@ -425,7 +425,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
63724 */
63725 #define module_param_array_named(name, array, type, nump, perm) \
63726 param_check_##type(name, &(array)[0]); \
63727 - static const struct kparam_array __param_arr_##name \
63728 + static const struct kparam_array __param_arr_##name __used \
63729 = { .max = ARRAY_SIZE(array), .num = nump, \
63730 .ops = &param_ops_##type, \
63731 .elemsize = sizeof(array[0]), .elem = array }; \
63732 diff --git a/include/linux/namei.h b/include/linux/namei.h
63733 index ffc0213..2c1f2cb 100644
63734 --- a/include/linux/namei.h
63735 +++ b/include/linux/namei.h
63736 @@ -24,7 +24,7 @@ struct nameidata {
63737 unsigned seq;
63738 int last_type;
63739 unsigned depth;
63740 - char *saved_names[MAX_NESTED_LINKS + 1];
63741 + const char *saved_names[MAX_NESTED_LINKS + 1];
63742
63743 /* Intent data */
63744 union {
63745 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
63746 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
63747 extern void unlock_rename(struct dentry *, struct dentry *);
63748
63749 -static inline void nd_set_link(struct nameidata *nd, char *path)
63750 +static inline void nd_set_link(struct nameidata *nd, const char *path)
63751 {
63752 nd->saved_names[nd->depth] = path;
63753 }
63754
63755 -static inline char *nd_get_link(struct nameidata *nd)
63756 +static inline const char *nd_get_link(const struct nameidata *nd)
63757 {
63758 return nd->saved_names[nd->depth];
63759 }
63760 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
63761 index b52070a..ea67208 100644
63762 --- a/include/linux/netdevice.h
63763 +++ b/include/linux/netdevice.h
63764 @@ -1026,6 +1026,7 @@ struct net_device_ops {
63765 struct net_device *dev,
63766 int idx);
63767 };
63768 +typedef struct net_device_ops __no_const net_device_ops_no_const;
63769
63770 /*
63771 * The DEVICE structure.
63772 @@ -1087,7 +1088,7 @@ struct net_device {
63773 int iflink;
63774
63775 struct net_device_stats stats;
63776 - atomic_long_t rx_dropped; /* dropped packets by core network
63777 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
63778 * Do not use this in drivers.
63779 */
63780
63781 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
63782 new file mode 100644
63783 index 0000000..33f4af8
63784 --- /dev/null
63785 +++ b/include/linux/netfilter/xt_gradm.h
63786 @@ -0,0 +1,9 @@
63787 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
63788 +#define _LINUX_NETFILTER_XT_GRADM_H 1
63789 +
63790 +struct xt_gradm_mtinfo {
63791 + __u16 flags;
63792 + __u16 invflags;
63793 +};
63794 +
63795 +#endif
63796 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
63797 index c65a18a..0c05f3a 100644
63798 --- a/include/linux/of_pdt.h
63799 +++ b/include/linux/of_pdt.h
63800 @@ -32,7 +32,7 @@ struct of_pdt_ops {
63801
63802 /* return 0 on success; fill in 'len' with number of bytes in path */
63803 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
63804 -};
63805 +} __no_const;
63806
63807 extern void *prom_early_alloc(unsigned long size);
63808
63809 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
63810 index a4c5624..79d6d88 100644
63811 --- a/include/linux/oprofile.h
63812 +++ b/include/linux/oprofile.h
63813 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
63814 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
63815 char const * name, ulong * val);
63816
63817 -/** Create a file for read-only access to an atomic_t. */
63818 +/** Create a file for read-only access to an atomic_unchecked_t. */
63819 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
63820 - char const * name, atomic_t * val);
63821 + char const * name, atomic_unchecked_t * val);
63822
63823 /** create a directory */
63824 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
63825 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
63826 index c3c98a6..c57555e 100644
63827 --- a/include/linux/perf_event.h
63828 +++ b/include/linux/perf_event.h
63829 @@ -881,8 +881,8 @@ struct perf_event {
63830
63831 enum perf_event_active_state state;
63832 unsigned int attach_state;
63833 - local64_t count;
63834 - atomic64_t child_count;
63835 + local64_t count; /* PaX: fix it one day */
63836 + atomic64_unchecked_t child_count;
63837
63838 /*
63839 * These are the total time in nanoseconds that the event
63840 @@ -933,8 +933,8 @@ struct perf_event {
63841 * These accumulate total time (in nanoseconds) that children
63842 * events have been enabled and running, respectively.
63843 */
63844 - atomic64_t child_total_time_enabled;
63845 - atomic64_t child_total_time_running;
63846 + atomic64_unchecked_t child_total_time_enabled;
63847 + atomic64_unchecked_t child_total_time_running;
63848
63849 /*
63850 * Protect attach/detach and child_list:
63851 diff --git a/include/linux/personality.h b/include/linux/personality.h
63852 index 8fc7dd1a..c19d89e 100644
63853 --- a/include/linux/personality.h
63854 +++ b/include/linux/personality.h
63855 @@ -44,6 +44,7 @@ enum {
63856 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
63857 ADDR_NO_RANDOMIZE | \
63858 ADDR_COMPAT_LAYOUT | \
63859 + ADDR_LIMIT_3GB | \
63860 MMAP_PAGE_ZERO)
63861
63862 /*
63863 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
63864 index e1ac1ce..0675fed 100644
63865 --- a/include/linux/pipe_fs_i.h
63866 +++ b/include/linux/pipe_fs_i.h
63867 @@ -45,9 +45,9 @@ struct pipe_buffer {
63868 struct pipe_inode_info {
63869 wait_queue_head_t wait;
63870 unsigned int nrbufs, curbuf, buffers;
63871 - unsigned int readers;
63872 - unsigned int writers;
63873 - unsigned int waiting_writers;
63874 + atomic_t readers;
63875 + atomic_t writers;
63876 + atomic_t waiting_writers;
63877 unsigned int r_counter;
63878 unsigned int w_counter;
63879 struct page *tmp_page;
63880 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
63881 index f271860..6b3bec5 100644
63882 --- a/include/linux/pm_runtime.h
63883 +++ b/include/linux/pm_runtime.h
63884 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
63885
63886 static inline void pm_runtime_mark_last_busy(struct device *dev)
63887 {
63888 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
63889 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
63890 }
63891
63892 #else /* !CONFIG_PM_RUNTIME */
63893 diff --git a/include/linux/poison.h b/include/linux/poison.h
63894 index 2110a81..13a11bb 100644
63895 --- a/include/linux/poison.h
63896 +++ b/include/linux/poison.h
63897 @@ -19,8 +19,8 @@
63898 * under normal circumstances, used to verify that nobody uses
63899 * non-initialized list entries.
63900 */
63901 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
63902 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
63903 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
63904 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
63905
63906 /********** include/linux/timer.h **********/
63907 /*
63908 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
63909 index 5a710b9..0b0dab9 100644
63910 --- a/include/linux/preempt.h
63911 +++ b/include/linux/preempt.h
63912 @@ -126,7 +126,7 @@ struct preempt_ops {
63913 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
63914 void (*sched_out)(struct preempt_notifier *notifier,
63915 struct task_struct *next);
63916 -};
63917 +} __no_const;
63918
63919 /**
63920 * preempt_notifier - key for installing preemption notifiers
63921 diff --git a/include/linux/printk.h b/include/linux/printk.h
63922 index 1bec2f7..b66e833 100644
63923 --- a/include/linux/printk.h
63924 +++ b/include/linux/printk.h
63925 @@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
63926 extern int printk_needs_cpu(int cpu);
63927 extern void printk_tick(void);
63928
63929 +extern int kptr_restrict;
63930 +
63931 #ifdef CONFIG_PRINTK
63932 asmlinkage __printf(5, 0)
63933 int vprintk_emit(int facility, int level,
63934 @@ -128,7 +130,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
63935
63936 extern int printk_delay_msec;
63937 extern int dmesg_restrict;
63938 -extern int kptr_restrict;
63939
63940 void log_buf_kexec_setup(void);
63941 void __init setup_log_buf(int early);
63942 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
63943 index 3fd2e87..d93a721 100644
63944 --- a/include/linux/proc_fs.h
63945 +++ b/include/linux/proc_fs.h
63946 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
63947 return proc_create_data(name, mode, parent, proc_fops, NULL);
63948 }
63949
63950 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
63951 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
63952 +{
63953 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63954 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
63955 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63956 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
63957 +#else
63958 + return proc_create_data(name, mode, parent, proc_fops, NULL);
63959 +#endif
63960 +}
63961 +
63962 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
63963 umode_t mode, struct proc_dir_entry *base,
63964 read_proc_t *read_proc, void * data)
63965 @@ -258,7 +270,7 @@ union proc_op {
63966 int (*proc_show)(struct seq_file *m,
63967 struct pid_namespace *ns, struct pid *pid,
63968 struct task_struct *task);
63969 -};
63970 +} __no_const;
63971
63972 struct ctl_table_header;
63973 struct ctl_table;
63974 diff --git a/include/linux/random.h b/include/linux/random.h
63975 index ac621ce..c1215f3 100644
63976 --- a/include/linux/random.h
63977 +++ b/include/linux/random.h
63978 @@ -53,6 +53,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
63979 unsigned int value);
63980 extern void add_interrupt_randomness(int irq, int irq_flags);
63981
63982 +#ifdef CONFIG_PAX_LATENT_ENTROPY
63983 +extern void transfer_latent_entropy(void);
63984 +#endif
63985 +
63986 extern void get_random_bytes(void *buf, int nbytes);
63987 extern void get_random_bytes_arch(void *buf, int nbytes);
63988 void generate_random_uuid(unsigned char uuid_out[16]);
63989 @@ -69,12 +73,17 @@ void srandom32(u32 seed);
63990
63991 u32 prandom32(struct rnd_state *);
63992
63993 +static inline unsigned long pax_get_random_long(void)
63994 +{
63995 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
63996 +}
63997 +
63998 /*
63999 * Handle minimum values for seeds
64000 */
64001 static inline u32 __seed(u32 x, u32 m)
64002 {
64003 - return (x < m) ? x + m : x;
64004 + return (x <= m) ? x + m + 1 : x;
64005 }
64006
64007 /**
64008 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
64009 index e0879a7..a12f962 100644
64010 --- a/include/linux/reboot.h
64011 +++ b/include/linux/reboot.h
64012 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
64013 * Architecture-specific implementations of sys_reboot commands.
64014 */
64015
64016 -extern void machine_restart(char *cmd);
64017 -extern void machine_halt(void);
64018 -extern void machine_power_off(void);
64019 +extern void machine_restart(char *cmd) __noreturn;
64020 +extern void machine_halt(void) __noreturn;
64021 +extern void machine_power_off(void) __noreturn;
64022
64023 extern void machine_shutdown(void);
64024 struct pt_regs;
64025 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
64026 */
64027
64028 extern void kernel_restart_prepare(char *cmd);
64029 -extern void kernel_restart(char *cmd);
64030 -extern void kernel_halt(void);
64031 -extern void kernel_power_off(void);
64032 +extern void kernel_restart(char *cmd) __noreturn;
64033 +extern void kernel_halt(void) __noreturn;
64034 +extern void kernel_power_off(void) __noreturn;
64035
64036 extern int C_A_D; /* for sysctl */
64037 void ctrl_alt_del(void);
64038 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
64039 * Emergency restart, callable from an interrupt handler.
64040 */
64041
64042 -extern void emergency_restart(void);
64043 +extern void emergency_restart(void) __noreturn;
64044 #include <asm/emergency-restart.h>
64045
64046 #endif
64047 diff --git a/include/linux/relay.h b/include/linux/relay.h
64048 index 91cacc3..b55ff74 100644
64049 --- a/include/linux/relay.h
64050 +++ b/include/linux/relay.h
64051 @@ -160,7 +160,7 @@ struct rchan_callbacks
64052 * The callback should return 0 if successful, negative if not.
64053 */
64054 int (*remove_buf_file)(struct dentry *dentry);
64055 -};
64056 +} __no_const;
64057
64058 /*
64059 * CONFIG_RELAY kernel API, kernel/relay.c
64060 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
64061 index 6fdf027..ff72610 100644
64062 --- a/include/linux/rfkill.h
64063 +++ b/include/linux/rfkill.h
64064 @@ -147,6 +147,7 @@ struct rfkill_ops {
64065 void (*query)(struct rfkill *rfkill, void *data);
64066 int (*set_block)(void *data, bool blocked);
64067 };
64068 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
64069
64070 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
64071 /**
64072 diff --git a/include/linux/rio.h b/include/linux/rio.h
64073 index a90ebad..fd87b5d 100644
64074 --- a/include/linux/rio.h
64075 +++ b/include/linux/rio.h
64076 @@ -321,7 +321,7 @@ struct rio_ops {
64077 int mbox, void *buffer, size_t len);
64078 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
64079 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
64080 -};
64081 +} __no_const;
64082
64083 #define RIO_RESOURCE_MEM 0x00000100
64084 #define RIO_RESOURCE_DOORBELL 0x00000200
64085 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
64086 index 3fce545..b4fed6e 100644
64087 --- a/include/linux/rmap.h
64088 +++ b/include/linux/rmap.h
64089 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
64090 void anon_vma_init(void); /* create anon_vma_cachep */
64091 int anon_vma_prepare(struct vm_area_struct *);
64092 void unlink_anon_vmas(struct vm_area_struct *);
64093 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
64094 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
64095 void anon_vma_moveto_tail(struct vm_area_struct *);
64096 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
64097 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
64098
64099 static inline void anon_vma_merge(struct vm_area_struct *vma,
64100 struct vm_area_struct *next)
64101 diff --git a/include/linux/sched.h b/include/linux/sched.h
64102 index e63650f..7dfd1b3 100644
64103 --- a/include/linux/sched.h
64104 +++ b/include/linux/sched.h
64105 @@ -101,6 +101,7 @@ struct bio_list;
64106 struct fs_struct;
64107 struct perf_event_context;
64108 struct blk_plug;
64109 +struct linux_binprm;
64110
64111 /*
64112 * List of flags we want to share for kernel threads,
64113 @@ -384,10 +385,13 @@ struct user_namespace;
64114 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
64115
64116 extern int sysctl_max_map_count;
64117 +extern unsigned long sysctl_heap_stack_gap;
64118
64119 #include <linux/aio.h>
64120
64121 #ifdef CONFIG_MMU
64122 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
64123 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
64124 extern void arch_pick_mmap_layout(struct mm_struct *mm);
64125 extern unsigned long
64126 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
64127 @@ -406,6 +410,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
64128 extern void set_dumpable(struct mm_struct *mm, int value);
64129 extern int get_dumpable(struct mm_struct *mm);
64130
64131 +/* get/set_dumpable() values */
64132 +#define SUID_DUMPABLE_DISABLED 0
64133 +#define SUID_DUMPABLE_ENABLED 1
64134 +#define SUID_DUMPABLE_SAFE 2
64135 +
64136 /* mm flags */
64137 /* dumpable bits */
64138 #define MMF_DUMPABLE 0 /* core dump is permitted */
64139 @@ -646,6 +655,17 @@ struct signal_struct {
64140 #ifdef CONFIG_TASKSTATS
64141 struct taskstats *stats;
64142 #endif
64143 +
64144 +#ifdef CONFIG_GRKERNSEC
64145 + u32 curr_ip;
64146 + u32 saved_ip;
64147 + u32 gr_saddr;
64148 + u32 gr_daddr;
64149 + u16 gr_sport;
64150 + u16 gr_dport;
64151 + u8 used_accept:1;
64152 +#endif
64153 +
64154 #ifdef CONFIG_AUDIT
64155 unsigned audit_tty;
64156 struct tty_audit_buf *tty_audit_buf;
64157 @@ -729,6 +749,11 @@ struct user_struct {
64158 struct key *session_keyring; /* UID's default session keyring */
64159 #endif
64160
64161 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64162 + unsigned int banned;
64163 + unsigned long ban_expires;
64164 +#endif
64165 +
64166 /* Hash table maintenance information */
64167 struct hlist_node uidhash_node;
64168 kuid_t uid;
64169 @@ -1351,8 +1376,8 @@ struct task_struct {
64170 struct list_head thread_group;
64171
64172 struct completion *vfork_done; /* for vfork() */
64173 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
64174 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
64175 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
64176 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
64177
64178 cputime_t utime, stime, utimescaled, stimescaled;
64179 cputime_t gtime;
64180 @@ -1368,11 +1393,6 @@ struct task_struct {
64181 struct task_cputime cputime_expires;
64182 struct list_head cpu_timers[3];
64183
64184 -/* process credentials */
64185 - const struct cred __rcu *real_cred; /* objective and real subjective task
64186 - * credentials (COW) */
64187 - const struct cred __rcu *cred; /* effective (overridable) subjective task
64188 - * credentials (COW) */
64189 char comm[TASK_COMM_LEN]; /* executable name excluding path
64190 - access with [gs]et_task_comm (which lock
64191 it with task_lock())
64192 @@ -1389,8 +1409,16 @@ struct task_struct {
64193 #endif
64194 /* CPU-specific state of this task */
64195 struct thread_struct thread;
64196 +/* thread_info moved to task_struct */
64197 +#ifdef CONFIG_X86
64198 + struct thread_info tinfo;
64199 +#endif
64200 /* filesystem information */
64201 struct fs_struct *fs;
64202 +
64203 + const struct cred __rcu *cred; /* effective (overridable) subjective task
64204 + * credentials (COW) */
64205 +
64206 /* open file information */
64207 struct files_struct *files;
64208 /* namespaces */
64209 @@ -1434,6 +1462,11 @@ struct task_struct {
64210 struct rt_mutex_waiter *pi_blocked_on;
64211 #endif
64212
64213 +/* process credentials */
64214 + const struct cred __rcu *real_cred; /* objective and real subjective task
64215 + * credentials (COW) */
64216 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
64217 +
64218 #ifdef CONFIG_DEBUG_MUTEXES
64219 /* mutex deadlock detection */
64220 struct mutex_waiter *blocked_on;
64221 @@ -1550,6 +1583,27 @@ struct task_struct {
64222 unsigned long default_timer_slack_ns;
64223
64224 struct list_head *scm_work_list;
64225 +
64226 +#ifdef CONFIG_GRKERNSEC
64227 + /* grsecurity */
64228 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64229 + u64 exec_id;
64230 +#endif
64231 +#ifdef CONFIG_GRKERNSEC_SETXID
64232 + const struct cred *delayed_cred;
64233 +#endif
64234 + struct dentry *gr_chroot_dentry;
64235 + struct acl_subject_label *acl;
64236 + struct acl_role_label *role;
64237 + struct file *exec_file;
64238 + u16 acl_role_id;
64239 + /* is this the task that authenticated to the special role */
64240 + u8 acl_sp_role;
64241 + u8 is_writable;
64242 + u8 brute;
64243 + u8 gr_is_chrooted;
64244 +#endif
64245 +
64246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64247 /* Index of current stored address in ret_stack */
64248 int curr_ret_stack;
64249 @@ -1588,6 +1642,51 @@ struct task_struct {
64250 #endif
64251 };
64252
64253 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
64254 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
64255 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
64256 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
64257 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
64258 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
64259 +
64260 +#ifdef CONFIG_PAX_SOFTMODE
64261 +extern int pax_softmode;
64262 +#endif
64263 +
64264 +extern int pax_check_flags(unsigned long *);
64265 +
64266 +/* if tsk != current then task_lock must be held on it */
64267 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64268 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
64269 +{
64270 + if (likely(tsk->mm))
64271 + return tsk->mm->pax_flags;
64272 + else
64273 + return 0UL;
64274 +}
64275 +
64276 +/* if tsk != current then task_lock must be held on it */
64277 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
64278 +{
64279 + if (likely(tsk->mm)) {
64280 + tsk->mm->pax_flags = flags;
64281 + return 0;
64282 + }
64283 + return -EINVAL;
64284 +}
64285 +#endif
64286 +
64287 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64288 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
64289 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
64290 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
64291 +#endif
64292 +
64293 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
64294 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
64295 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
64296 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
64297 +
64298 /* Future-safe accessor for struct task_struct's cpus_allowed. */
64299 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
64300
64301 @@ -2115,7 +2214,9 @@ void yield(void);
64302 extern struct exec_domain default_exec_domain;
64303
64304 union thread_union {
64305 +#ifndef CONFIG_X86
64306 struct thread_info thread_info;
64307 +#endif
64308 unsigned long stack[THREAD_SIZE/sizeof(long)];
64309 };
64310
64311 @@ -2148,6 +2249,7 @@ extern struct pid_namespace init_pid_ns;
64312 */
64313
64314 extern struct task_struct *find_task_by_vpid(pid_t nr);
64315 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
64316 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
64317 struct pid_namespace *ns);
64318
64319 @@ -2304,7 +2406,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
64320 extern void exit_itimers(struct signal_struct *);
64321 extern void flush_itimer_signals(void);
64322
64323 -extern void do_group_exit(int);
64324 +extern __noreturn void do_group_exit(int);
64325
64326 extern void daemonize(const char *, ...);
64327 extern int allow_signal(int);
64328 @@ -2505,9 +2607,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
64329
64330 #endif
64331
64332 -static inline int object_is_on_stack(void *obj)
64333 +static inline int object_starts_on_stack(void *obj)
64334 {
64335 - void *stack = task_stack_page(current);
64336 + const void *stack = task_stack_page(current);
64337
64338 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
64339 }
64340 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
64341 index 899fbb4..1cb4138 100644
64342 --- a/include/linux/screen_info.h
64343 +++ b/include/linux/screen_info.h
64344 @@ -43,7 +43,8 @@ struct screen_info {
64345 __u16 pages; /* 0x32 */
64346 __u16 vesa_attributes; /* 0x34 */
64347 __u32 capabilities; /* 0x36 */
64348 - __u8 _reserved[6]; /* 0x3a */
64349 + __u16 vesapm_size; /* 0x3a */
64350 + __u8 _reserved[4]; /* 0x3c */
64351 } __attribute__((packed));
64352
64353 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
64354 diff --git a/include/linux/security.h b/include/linux/security.h
64355 index d143b8e..9f27b95 100644
64356 --- a/include/linux/security.h
64357 +++ b/include/linux/security.h
64358 @@ -26,6 +26,7 @@
64359 #include <linux/capability.h>
64360 #include <linux/slab.h>
64361 #include <linux/err.h>
64362 +#include <linux/grsecurity.h>
64363
64364 struct linux_binprm;
64365 struct cred;
64366 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
64367 index fc61854..d7c490b 100644
64368 --- a/include/linux/seq_file.h
64369 +++ b/include/linux/seq_file.h
64370 @@ -25,6 +25,9 @@ struct seq_file {
64371 struct mutex lock;
64372 const struct seq_operations *op;
64373 int poll_event;
64374 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64375 + u64 exec_id;
64376 +#endif
64377 void *private;
64378 };
64379
64380 @@ -34,6 +37,7 @@ struct seq_operations {
64381 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
64382 int (*show) (struct seq_file *m, void *v);
64383 };
64384 +typedef struct seq_operations __no_const seq_operations_no_const;
64385
64386 #define SEQ_SKIP 1
64387
64388 diff --git a/include/linux/shm.h b/include/linux/shm.h
64389 index 92808b8..c28cac4 100644
64390 --- a/include/linux/shm.h
64391 +++ b/include/linux/shm.h
64392 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
64393
64394 /* The task created the shm object. NULL if the task is dead. */
64395 struct task_struct *shm_creator;
64396 +#ifdef CONFIG_GRKERNSEC
64397 + time_t shm_createtime;
64398 + pid_t shm_lapid;
64399 +#endif
64400 };
64401
64402 /* shm_mode upper byte flags */
64403 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
64404 index 642cb73..2efdb98 100644
64405 --- a/include/linux/skbuff.h
64406 +++ b/include/linux/skbuff.h
64407 @@ -567,7 +567,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
64408 extern struct sk_buff *__alloc_skb(unsigned int size,
64409 gfp_t priority, int fclone, int node);
64410 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
64411 -static inline struct sk_buff *alloc_skb(unsigned int size,
64412 +static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
64413 gfp_t priority)
64414 {
64415 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
64416 @@ -680,7 +680,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
64417 */
64418 static inline int skb_queue_empty(const struct sk_buff_head *list)
64419 {
64420 - return list->next == (struct sk_buff *)list;
64421 + return list->next == (const struct sk_buff *)list;
64422 }
64423
64424 /**
64425 @@ -693,7 +693,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
64426 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64427 const struct sk_buff *skb)
64428 {
64429 - return skb->next == (struct sk_buff *)list;
64430 + return skb->next == (const struct sk_buff *)list;
64431 }
64432
64433 /**
64434 @@ -706,7 +706,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64435 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
64436 const struct sk_buff *skb)
64437 {
64438 - return skb->prev == (struct sk_buff *)list;
64439 + return skb->prev == (const struct sk_buff *)list;
64440 }
64441
64442 /**
64443 @@ -1605,7 +1605,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
64444 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
64445 */
64446 #ifndef NET_SKB_PAD
64447 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
64448 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
64449 #endif
64450
64451 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
64452 @@ -2112,7 +2112,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
64453 int noblock, int *err);
64454 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
64455 struct poll_table_struct *wait);
64456 -extern int skb_copy_datagram_iovec(const struct sk_buff *from,
64457 +extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
64458 int offset, struct iovec *to,
64459 int size);
64460 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
64461 diff --git a/include/linux/slab.h b/include/linux/slab.h
64462 index 67d5d94..bbd740b 100644
64463 --- a/include/linux/slab.h
64464 +++ b/include/linux/slab.h
64465 @@ -11,12 +11,20 @@
64466
64467 #include <linux/gfp.h>
64468 #include <linux/types.h>
64469 +#include <linux/err.h>
64470
64471 /*
64472 * Flags to pass to kmem_cache_create().
64473 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
64474 */
64475 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
64476 +
64477 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64478 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
64479 +#else
64480 +#define SLAB_USERCOPY 0x00000000UL
64481 +#endif
64482 +
64483 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
64484 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
64485 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
64486 @@ -87,10 +95,13 @@
64487 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
64488 * Both make kfree a no-op.
64489 */
64490 -#define ZERO_SIZE_PTR ((void *)16)
64491 +#define ZERO_SIZE_PTR \
64492 +({ \
64493 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
64494 + (void *)(-MAX_ERRNO-1L); \
64495 +})
64496
64497 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
64498 - (unsigned long)ZERO_SIZE_PTR)
64499 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
64500
64501 /*
64502 * struct kmem_cache related prototypes
64503 @@ -161,6 +172,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
64504 void kfree(const void *);
64505 void kzfree(const void *);
64506 size_t ksize(const void *);
64507 +const char *check_heap_object(const void *ptr, unsigned long n, bool to);
64508 +bool is_usercopy_object(const void *ptr);
64509
64510 /*
64511 * Allocator specific definitions. These are mainly used to establish optimized
64512 @@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64513 */
64514 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64515 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64516 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64517 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
64518 #define kmalloc_track_caller(size, flags) \
64519 __kmalloc_track_caller(size, flags, _RET_IP_)
64520 #else
64521 @@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64522 */
64523 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64524 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64525 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
64526 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
64527 #define kmalloc_node_track_caller(size, flags, node) \
64528 __kmalloc_node_track_caller(size, flags, node, \
64529 _RET_IP_)
64530 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
64531 index fbd1117..0a3d314 100644
64532 --- a/include/linux/slab_def.h
64533 +++ b/include/linux/slab_def.h
64534 @@ -66,10 +66,10 @@ struct kmem_cache {
64535 unsigned long node_allocs;
64536 unsigned long node_frees;
64537 unsigned long node_overflow;
64538 - atomic_t allochit;
64539 - atomic_t allocmiss;
64540 - atomic_t freehit;
64541 - atomic_t freemiss;
64542 + atomic_unchecked_t allochit;
64543 + atomic_unchecked_t allocmiss;
64544 + atomic_unchecked_t freehit;
64545 + atomic_unchecked_t freemiss;
64546
64547 /*
64548 * If debugging is enabled, then the allocator can add additional
64549 @@ -103,11 +103,16 @@ struct cache_sizes {
64550 #ifdef CONFIG_ZONE_DMA
64551 struct kmem_cache *cs_dmacachep;
64552 #endif
64553 +
64554 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64555 + struct kmem_cache *cs_usercopycachep;
64556 +#endif
64557 +
64558 };
64559 extern struct cache_sizes malloc_sizes[];
64560
64561 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64562 -void *__kmalloc(size_t size, gfp_t flags);
64563 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64564
64565 #ifdef CONFIG_TRACING
64566 extern void *kmem_cache_alloc_trace(size_t size,
64567 @@ -150,6 +155,13 @@ found:
64568 cachep = malloc_sizes[i].cs_dmacachep;
64569 else
64570 #endif
64571 +
64572 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64573 + if (flags & GFP_USERCOPY)
64574 + cachep = malloc_sizes[i].cs_usercopycachep;
64575 + else
64576 +#endif
64577 +
64578 cachep = malloc_sizes[i].cs_cachep;
64579
64580 ret = kmem_cache_alloc_trace(size, cachep, flags);
64581 @@ -160,7 +172,7 @@ found:
64582 }
64583
64584 #ifdef CONFIG_NUMA
64585 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
64586 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64587 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64588
64589 #ifdef CONFIG_TRACING
64590 @@ -203,6 +215,13 @@ found:
64591 cachep = malloc_sizes[i].cs_dmacachep;
64592 else
64593 #endif
64594 +
64595 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64596 + if (flags & GFP_USERCOPY)
64597 + cachep = malloc_sizes[i].cs_usercopycachep;
64598 + else
64599 +#endif
64600 +
64601 cachep = malloc_sizes[i].cs_cachep;
64602
64603 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
64604 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
64605 index 0ec00b3..22b4715 100644
64606 --- a/include/linux/slob_def.h
64607 +++ b/include/linux/slob_def.h
64608 @@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
64609 return kmem_cache_alloc_node(cachep, flags, -1);
64610 }
64611
64612 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
64613 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64614
64615 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64616 {
64617 @@ -29,7 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64618 return __kmalloc_node(size, flags, -1);
64619 }
64620
64621 -static __always_inline void *__kmalloc(size_t size, gfp_t flags)
64622 +static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
64623 {
64624 return kmalloc(size, flags);
64625 }
64626 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
64627 index c2f8c8b..d992a41 100644
64628 --- a/include/linux/slub_def.h
64629 +++ b/include/linux/slub_def.h
64630 @@ -92,7 +92,7 @@ struct kmem_cache {
64631 struct kmem_cache_order_objects max;
64632 struct kmem_cache_order_objects min;
64633 gfp_t allocflags; /* gfp flags to use on each alloc */
64634 - int refcount; /* Refcount for slab cache destroy */
64635 + atomic_t refcount; /* Refcount for slab cache destroy */
64636 void (*ctor)(void *);
64637 int inuse; /* Offset to metadata */
64638 int align; /* Alignment */
64639 @@ -153,7 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
64640 * Sorry that the following has to be that ugly but some versions of GCC
64641 * have trouble with constant propagation and loops.
64642 */
64643 -static __always_inline int kmalloc_index(size_t size)
64644 +static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
64645 {
64646 if (!size)
64647 return 0;
64648 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64649 }
64650
64651 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64652 -void *__kmalloc(size_t size, gfp_t flags);
64653 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
64654
64655 static __always_inline void *
64656 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
64657 @@ -259,7 +259,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
64658 }
64659 #endif
64660
64661 -static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
64662 +static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
64663 {
64664 unsigned int order = get_order(size);
64665 return kmalloc_order_trace(size, flags, order);
64666 @@ -284,7 +284,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64667 }
64668
64669 #ifdef CONFIG_NUMA
64670 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
64671 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64672 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64673
64674 #ifdef CONFIG_TRACING
64675 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
64676 index de8832d..0147b46 100644
64677 --- a/include/linux/sonet.h
64678 +++ b/include/linux/sonet.h
64679 @@ -61,7 +61,7 @@ struct sonet_stats {
64680 #include <linux/atomic.h>
64681
64682 struct k_sonet_stats {
64683 -#define __HANDLE_ITEM(i) atomic_t i
64684 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
64685 __SONET_ITEMS
64686 #undef __HANDLE_ITEM
64687 };
64688 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
64689 index 523547e..2cb7140 100644
64690 --- a/include/linux/sunrpc/clnt.h
64691 +++ b/include/linux/sunrpc/clnt.h
64692 @@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
64693 {
64694 switch (sap->sa_family) {
64695 case AF_INET:
64696 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
64697 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
64698 case AF_INET6:
64699 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
64700 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
64701 }
64702 return 0;
64703 }
64704 @@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
64705 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
64706 const struct sockaddr *src)
64707 {
64708 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
64709 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
64710 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
64711
64712 dsin->sin_family = ssin->sin_family;
64713 @@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
64714 if (sa->sa_family != AF_INET6)
64715 return 0;
64716
64717 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
64718 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
64719 }
64720
64721 #endif /* __KERNEL__ */
64722 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
64723 index dc0c3cc..8503fb6 100644
64724 --- a/include/linux/sunrpc/sched.h
64725 +++ b/include/linux/sunrpc/sched.h
64726 @@ -106,6 +106,7 @@ struct rpc_call_ops {
64727 void (*rpc_count_stats)(struct rpc_task *, void *);
64728 void (*rpc_release)(void *);
64729 };
64730 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
64731
64732 struct rpc_task_setup {
64733 struct rpc_task *task;
64734 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
64735 index 0b8e3e6..33e0a01 100644
64736 --- a/include/linux/sunrpc/svc_rdma.h
64737 +++ b/include/linux/sunrpc/svc_rdma.h
64738 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
64739 extern unsigned int svcrdma_max_requests;
64740 extern unsigned int svcrdma_max_req_size;
64741
64742 -extern atomic_t rdma_stat_recv;
64743 -extern atomic_t rdma_stat_read;
64744 -extern atomic_t rdma_stat_write;
64745 -extern atomic_t rdma_stat_sq_starve;
64746 -extern atomic_t rdma_stat_rq_starve;
64747 -extern atomic_t rdma_stat_rq_poll;
64748 -extern atomic_t rdma_stat_rq_prod;
64749 -extern atomic_t rdma_stat_sq_poll;
64750 -extern atomic_t rdma_stat_sq_prod;
64751 +extern atomic_unchecked_t rdma_stat_recv;
64752 +extern atomic_unchecked_t rdma_stat_read;
64753 +extern atomic_unchecked_t rdma_stat_write;
64754 +extern atomic_unchecked_t rdma_stat_sq_starve;
64755 +extern atomic_unchecked_t rdma_stat_rq_starve;
64756 +extern atomic_unchecked_t rdma_stat_rq_poll;
64757 +extern atomic_unchecked_t rdma_stat_rq_prod;
64758 +extern atomic_unchecked_t rdma_stat_sq_poll;
64759 +extern atomic_unchecked_t rdma_stat_sq_prod;
64760
64761 #define RPCRDMA_VERSION 1
64762
64763 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
64764 index c34b4c8..a65b67d 100644
64765 --- a/include/linux/sysctl.h
64766 +++ b/include/linux/sysctl.h
64767 @@ -155,7 +155,11 @@ enum
64768 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
64769 };
64770
64771 -
64772 +#ifdef CONFIG_PAX_SOFTMODE
64773 +enum {
64774 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
64775 +};
64776 +#endif
64777
64778 /* CTL_VM names: */
64779 enum
64780 @@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
64781
64782 extern int proc_dostring(struct ctl_table *, int,
64783 void __user *, size_t *, loff_t *);
64784 +extern int proc_dostring_modpriv(struct ctl_table *, int,
64785 + void __user *, size_t *, loff_t *);
64786 extern int proc_dointvec(struct ctl_table *, int,
64787 void __user *, size_t *, loff_t *);
64788 extern int proc_dointvec_minmax(struct ctl_table *, int,
64789 diff --git a/include/linux/tty.h b/include/linux/tty.h
64790 index 9f47ab5..73da944 100644
64791 --- a/include/linux/tty.h
64792 +++ b/include/linux/tty.h
64793 @@ -225,7 +225,7 @@ struct tty_port {
64794 const struct tty_port_operations *ops; /* Port operations */
64795 spinlock_t lock; /* Lock protecting tty field */
64796 int blocked_open; /* Waiting to open */
64797 - int count; /* Usage count */
64798 + atomic_t count; /* Usage count */
64799 wait_queue_head_t open_wait; /* Open waiters */
64800 wait_queue_head_t close_wait; /* Close waiters */
64801 wait_queue_head_t delta_msr_wait; /* Modem status change */
64802 @@ -525,7 +525,7 @@ extern int tty_port_open(struct tty_port *port,
64803 struct tty_struct *tty, struct file *filp);
64804 static inline int tty_port_users(struct tty_port *port)
64805 {
64806 - return port->count + port->blocked_open;
64807 + return atomic_read(&port->count) + port->blocked_open;
64808 }
64809
64810 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
64811 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
64812 index fb79dd8d..07d4773 100644
64813 --- a/include/linux/tty_ldisc.h
64814 +++ b/include/linux/tty_ldisc.h
64815 @@ -149,7 +149,7 @@ struct tty_ldisc_ops {
64816
64817 struct module *owner;
64818
64819 - int refcount;
64820 + atomic_t refcount;
64821 };
64822
64823 struct tty_ldisc {
64824 diff --git a/include/linux/types.h b/include/linux/types.h
64825 index 9c1bd53..c2370f6 100644
64826 --- a/include/linux/types.h
64827 +++ b/include/linux/types.h
64828 @@ -220,10 +220,26 @@ typedef struct {
64829 int counter;
64830 } atomic_t;
64831
64832 +#ifdef CONFIG_PAX_REFCOUNT
64833 +typedef struct {
64834 + int counter;
64835 +} atomic_unchecked_t;
64836 +#else
64837 +typedef atomic_t atomic_unchecked_t;
64838 +#endif
64839 +
64840 #ifdef CONFIG_64BIT
64841 typedef struct {
64842 long counter;
64843 } atomic64_t;
64844 +
64845 +#ifdef CONFIG_PAX_REFCOUNT
64846 +typedef struct {
64847 + long counter;
64848 +} atomic64_unchecked_t;
64849 +#else
64850 +typedef atomic64_t atomic64_unchecked_t;
64851 +#endif
64852 #endif
64853
64854 struct list_head {
64855 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
64856 index 5ca0951..ab496a5 100644
64857 --- a/include/linux/uaccess.h
64858 +++ b/include/linux/uaccess.h
64859 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
64860 long ret; \
64861 mm_segment_t old_fs = get_fs(); \
64862 \
64863 - set_fs(KERNEL_DS); \
64864 pagefault_disable(); \
64865 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
64866 - pagefault_enable(); \
64867 + set_fs(KERNEL_DS); \
64868 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
64869 set_fs(old_fs); \
64870 + pagefault_enable(); \
64871 ret; \
64872 })
64873
64874 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
64875 index 99c1b4d..bb94261 100644
64876 --- a/include/linux/unaligned/access_ok.h
64877 +++ b/include/linux/unaligned/access_ok.h
64878 @@ -6,32 +6,32 @@
64879
64880 static inline u16 get_unaligned_le16(const void *p)
64881 {
64882 - return le16_to_cpup((__le16 *)p);
64883 + return le16_to_cpup((const __le16 *)p);
64884 }
64885
64886 static inline u32 get_unaligned_le32(const void *p)
64887 {
64888 - return le32_to_cpup((__le32 *)p);
64889 + return le32_to_cpup((const __le32 *)p);
64890 }
64891
64892 static inline u64 get_unaligned_le64(const void *p)
64893 {
64894 - return le64_to_cpup((__le64 *)p);
64895 + return le64_to_cpup((const __le64 *)p);
64896 }
64897
64898 static inline u16 get_unaligned_be16(const void *p)
64899 {
64900 - return be16_to_cpup((__be16 *)p);
64901 + return be16_to_cpup((const __be16 *)p);
64902 }
64903
64904 static inline u32 get_unaligned_be32(const void *p)
64905 {
64906 - return be32_to_cpup((__be32 *)p);
64907 + return be32_to_cpup((const __be32 *)p);
64908 }
64909
64910 static inline u64 get_unaligned_be64(const void *p)
64911 {
64912 - return be64_to_cpup((__be64 *)p);
64913 + return be64_to_cpup((const __be64 *)p);
64914 }
64915
64916 static inline void put_unaligned_le16(u16 val, void *p)
64917 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
64918 index 547e59c..db6ad19 100644
64919 --- a/include/linux/usb/renesas_usbhs.h
64920 +++ b/include/linux/usb/renesas_usbhs.h
64921 @@ -39,7 +39,7 @@ enum {
64922 */
64923 struct renesas_usbhs_driver_callback {
64924 int (*notify_hotplug)(struct platform_device *pdev);
64925 -};
64926 +} __no_const;
64927
64928 /*
64929 * callback functions for platform
64930 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
64931 * VBUS control is needed for Host
64932 */
64933 int (*set_vbus)(struct platform_device *pdev, int enable);
64934 -};
64935 +} __no_const;
64936
64937 /*
64938 * parameters for renesas usbhs
64939 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
64940 index 6f8fbcf..8259001 100644
64941 --- a/include/linux/vermagic.h
64942 +++ b/include/linux/vermagic.h
64943 @@ -25,9 +25,35 @@
64944 #define MODULE_ARCH_VERMAGIC ""
64945 #endif
64946
64947 +#ifdef CONFIG_PAX_REFCOUNT
64948 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
64949 +#else
64950 +#define MODULE_PAX_REFCOUNT ""
64951 +#endif
64952 +
64953 +#ifdef CONSTIFY_PLUGIN
64954 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64955 +#else
64956 +#define MODULE_CONSTIFY_PLUGIN ""
64957 +#endif
64958 +
64959 +#ifdef STACKLEAK_PLUGIN
64960 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
64961 +#else
64962 +#define MODULE_STACKLEAK_PLUGIN ""
64963 +#endif
64964 +
64965 +#ifdef CONFIG_GRKERNSEC
64966 +#define MODULE_GRSEC "GRSEC "
64967 +#else
64968 +#define MODULE_GRSEC ""
64969 +#endif
64970 +
64971 #define VERMAGIC_STRING \
64972 UTS_RELEASE " " \
64973 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64974 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64975 - MODULE_ARCH_VERMAGIC
64976 + MODULE_ARCH_VERMAGIC \
64977 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
64978 + MODULE_GRSEC
64979
64980 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
64981 index dcdfc2b..ec79ab5 100644
64982 --- a/include/linux/vmalloc.h
64983 +++ b/include/linux/vmalloc.h
64984 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
64985 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64986 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
64987 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
64988 +
64989 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64990 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
64991 +#endif
64992 +
64993 /* bits [20..32] reserved for arch specific ioremap internals */
64994
64995 /*
64996 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
64997 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
64998 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
64999 unsigned long start, unsigned long end, gfp_t gfp_mask,
65000 - pgprot_t prot, int node, void *caller);
65001 + pgprot_t prot, int node, void *caller) __size_overflow(1);
65002 extern void vfree(const void *addr);
65003
65004 extern void *vmap(struct page **pages, unsigned int count,
65005 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
65006 extern void free_vm_area(struct vm_struct *area);
65007
65008 /* for /dev/kmem */
65009 -extern long vread(char *buf, char *addr, unsigned long count);
65010 -extern long vwrite(char *buf, char *addr, unsigned long count);
65011 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
65012 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
65013
65014 /*
65015 * Internals. Dont't use..
65016 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
65017 index 65efb92..137adbb 100644
65018 --- a/include/linux/vmstat.h
65019 +++ b/include/linux/vmstat.h
65020 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
65021 /*
65022 * Zone based page accounting with per cpu differentials.
65023 */
65024 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65025 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65026
65027 static inline void zone_page_state_add(long x, struct zone *zone,
65028 enum zone_stat_item item)
65029 {
65030 - atomic_long_add(x, &zone->vm_stat[item]);
65031 - atomic_long_add(x, &vm_stat[item]);
65032 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
65033 + atomic_long_add_unchecked(x, &vm_stat[item]);
65034 }
65035
65036 static inline unsigned long global_page_state(enum zone_stat_item item)
65037 {
65038 - long x = atomic_long_read(&vm_stat[item]);
65039 + long x = atomic_long_read_unchecked(&vm_stat[item]);
65040 #ifdef CONFIG_SMP
65041 if (x < 0)
65042 x = 0;
65043 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
65044 static inline unsigned long zone_page_state(struct zone *zone,
65045 enum zone_stat_item item)
65046 {
65047 - long x = atomic_long_read(&zone->vm_stat[item]);
65048 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
65049 #ifdef CONFIG_SMP
65050 if (x < 0)
65051 x = 0;
65052 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
65053 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
65054 enum zone_stat_item item)
65055 {
65056 - long x = atomic_long_read(&zone->vm_stat[item]);
65057 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
65058
65059 #ifdef CONFIG_SMP
65060 int cpu;
65061 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
65062
65063 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
65064 {
65065 - atomic_long_inc(&zone->vm_stat[item]);
65066 - atomic_long_inc(&vm_stat[item]);
65067 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
65068 + atomic_long_inc_unchecked(&vm_stat[item]);
65069 }
65070
65071 static inline void __inc_zone_page_state(struct page *page,
65072 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
65073
65074 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
65075 {
65076 - atomic_long_dec(&zone->vm_stat[item]);
65077 - atomic_long_dec(&vm_stat[item]);
65078 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
65079 + atomic_long_dec_unchecked(&vm_stat[item]);
65080 }
65081
65082 static inline void __dec_zone_page_state(struct page *page,
65083 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
65084 index e5d1220..ef6e406 100644
65085 --- a/include/linux/xattr.h
65086 +++ b/include/linux/xattr.h
65087 @@ -57,6 +57,11 @@
65088 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
65089 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
65090
65091 +/* User namespace */
65092 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
65093 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
65094 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
65095 +
65096 #ifdef __KERNEL__
65097
65098 #include <linux/types.h>
65099 diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
65100 index 22e61fd..28e493b 100644
65101 --- a/include/linux/xfrm.h
65102 +++ b/include/linux/xfrm.h
65103 @@ -84,6 +84,8 @@ struct xfrm_replay_state {
65104 __u32 bitmap;
65105 };
65106
65107 +#define XFRMA_REPLAY_ESN_MAX 4096
65108 +
65109 struct xfrm_replay_state_esn {
65110 unsigned int bmp_len;
65111 __u32 oseq;
65112 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
65113 index 944ecdf..a3994fc 100644
65114 --- a/include/media/saa7146_vv.h
65115 +++ b/include/media/saa7146_vv.h
65116 @@ -161,8 +161,8 @@ struct saa7146_ext_vv
65117 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
65118
65119 /* the extension can override this */
65120 - struct v4l2_ioctl_ops vid_ops;
65121 - struct v4l2_ioctl_ops vbi_ops;
65122 + v4l2_ioctl_ops_no_const vid_ops;
65123 + v4l2_ioctl_ops_no_const vbi_ops;
65124 /* pointer to the saa7146 core ops */
65125 const struct v4l2_ioctl_ops *core_ops;
65126
65127 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
65128 index a056e6e..31023a5 100644
65129 --- a/include/media/v4l2-dev.h
65130 +++ b/include/media/v4l2-dev.h
65131 @@ -73,7 +73,8 @@ struct v4l2_file_operations {
65132 int (*mmap) (struct file *, struct vm_area_struct *);
65133 int (*open) (struct file *);
65134 int (*release) (struct file *);
65135 -};
65136 +} __do_const;
65137 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
65138
65139 /*
65140 * Newer version of video_device, handled by videodev2.c
65141 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
65142 index d8b76f7..7d5aa18 100644
65143 --- a/include/media/v4l2-ioctl.h
65144 +++ b/include/media/v4l2-ioctl.h
65145 @@ -287,7 +287,7 @@ struct v4l2_ioctl_ops {
65146 long (*vidioc_default) (struct file *file, void *fh,
65147 bool valid_prio, int cmd, void *arg);
65148 };
65149 -
65150 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
65151
65152 /* v4l debugging and diagnostics */
65153
65154 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
65155 index 439dadc..1c67e3f 100644
65156 --- a/include/net/caif/caif_hsi.h
65157 +++ b/include/net/caif/caif_hsi.h
65158 @@ -98,7 +98,7 @@ struct cfhsi_drv {
65159 void (*rx_done_cb) (struct cfhsi_drv *drv);
65160 void (*wake_up_cb) (struct cfhsi_drv *drv);
65161 void (*wake_down_cb) (struct cfhsi_drv *drv);
65162 -};
65163 +} __no_const;
65164
65165 /* Structure implemented by HSI device. */
65166 struct cfhsi_dev {
65167 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
65168 index 9e5425b..8136ffc 100644
65169 --- a/include/net/caif/cfctrl.h
65170 +++ b/include/net/caif/cfctrl.h
65171 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
65172 void (*radioset_rsp)(void);
65173 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
65174 struct cflayer *client_layer);
65175 -};
65176 +} __no_const;
65177
65178 /* Link Setup Parameters for CAIF-Links. */
65179 struct cfctrl_link_param {
65180 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
65181 struct cfctrl {
65182 struct cfsrvl serv;
65183 struct cfctrl_rsp res;
65184 - atomic_t req_seq_no;
65185 - atomic_t rsp_seq_no;
65186 + atomic_unchecked_t req_seq_no;
65187 + atomic_unchecked_t rsp_seq_no;
65188 struct list_head list;
65189 /* Protects from simultaneous access to first_req list */
65190 spinlock_t info_list_lock;
65191 diff --git a/include/net/flow.h b/include/net/flow.h
65192 index 6c469db..7743b8e 100644
65193 --- a/include/net/flow.h
65194 +++ b/include/net/flow.h
65195 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
65196
65197 extern void flow_cache_flush(void);
65198 extern void flow_cache_flush_deferred(void);
65199 -extern atomic_t flow_cache_genid;
65200 +extern atomic_unchecked_t flow_cache_genid;
65201
65202 #endif
65203 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
65204 index 2040bff..f4c0733 100644
65205 --- a/include/net/inetpeer.h
65206 +++ b/include/net/inetpeer.h
65207 @@ -51,8 +51,8 @@ struct inet_peer {
65208 */
65209 union {
65210 struct {
65211 - atomic_t rid; /* Frag reception counter */
65212 - atomic_t ip_id_count; /* IP ID for the next packet */
65213 + atomic_unchecked_t rid; /* Frag reception counter */
65214 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
65215 __u32 tcp_ts;
65216 __u32 tcp_ts_stamp;
65217 };
65218 @@ -118,11 +118,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
65219 more++;
65220 inet_peer_refcheck(p);
65221 do {
65222 - old = atomic_read(&p->ip_id_count);
65223 + old = atomic_read_unchecked(&p->ip_id_count);
65224 new = old + more;
65225 if (!new)
65226 new = 1;
65227 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
65228 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
65229 return new;
65230 }
65231
65232 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
65233 index 78df0866..00e5c9b 100644
65234 --- a/include/net/ip_fib.h
65235 +++ b/include/net/ip_fib.h
65236 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
65237
65238 #define FIB_RES_SADDR(net, res) \
65239 ((FIB_RES_NH(res).nh_saddr_genid == \
65240 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
65241 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
65242 FIB_RES_NH(res).nh_saddr : \
65243 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
65244 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
65245 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
65246 index 95374d1..2300e36 100644
65247 --- a/include/net/ip_vs.h
65248 +++ b/include/net/ip_vs.h
65249 @@ -510,7 +510,7 @@ struct ip_vs_conn {
65250 struct ip_vs_conn *control; /* Master control connection */
65251 atomic_t n_control; /* Number of controlled ones */
65252 struct ip_vs_dest *dest; /* real server */
65253 - atomic_t in_pkts; /* incoming packet counter */
65254 + atomic_unchecked_t in_pkts; /* incoming packet counter */
65255
65256 /* packet transmitter for different forwarding methods. If it
65257 mangles the packet, it must return NF_DROP or better NF_STOLEN,
65258 @@ -648,7 +648,7 @@ struct ip_vs_dest {
65259 __be16 port; /* port number of the server */
65260 union nf_inet_addr addr; /* IP address of the server */
65261 volatile unsigned int flags; /* dest status flags */
65262 - atomic_t conn_flags; /* flags to copy to conn */
65263 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
65264 atomic_t weight; /* server weight */
65265
65266 atomic_t refcnt; /* reference counter */
65267 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
65268 index 69b610a..fe3962c 100644
65269 --- a/include/net/irda/ircomm_core.h
65270 +++ b/include/net/irda/ircomm_core.h
65271 @@ -51,7 +51,7 @@ typedef struct {
65272 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
65273 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
65274 struct ircomm_info *);
65275 -} call_t;
65276 +} __no_const call_t;
65277
65278 struct ircomm_cb {
65279 irda_queue_t queue;
65280 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
65281 index 59ba38bc..d515662 100644
65282 --- a/include/net/irda/ircomm_tty.h
65283 +++ b/include/net/irda/ircomm_tty.h
65284 @@ -35,6 +35,7 @@
65285 #include <linux/termios.h>
65286 #include <linux/timer.h>
65287 #include <linux/tty.h> /* struct tty_struct */
65288 +#include <asm/local.h>
65289
65290 #include <net/irda/irias_object.h>
65291 #include <net/irda/ircomm_core.h>
65292 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
65293 unsigned short close_delay;
65294 unsigned short closing_wait; /* time to wait before closing */
65295
65296 - int open_count;
65297 - int blocked_open; /* # of blocked opens */
65298 + local_t open_count;
65299 + local_t blocked_open; /* # of blocked opens */
65300
65301 /* Protect concurent access to :
65302 * o self->open_count
65303 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
65304 index cc7c197..9f2da2a 100644
65305 --- a/include/net/iucv/af_iucv.h
65306 +++ b/include/net/iucv/af_iucv.h
65307 @@ -141,7 +141,7 @@ struct iucv_sock {
65308 struct iucv_sock_list {
65309 struct hlist_head head;
65310 rwlock_t lock;
65311 - atomic_t autobind_name;
65312 + atomic_unchecked_t autobind_name;
65313 };
65314
65315 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
65316 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
65317 index 6cdfeed..55a0256 100644
65318 --- a/include/net/neighbour.h
65319 +++ b/include/net/neighbour.h
65320 @@ -123,7 +123,7 @@ struct neigh_ops {
65321 void (*error_report)(struct neighbour *, struct sk_buff *);
65322 int (*output)(struct neighbour *, struct sk_buff *);
65323 int (*connected_output)(struct neighbour *, struct sk_buff *);
65324 -};
65325 +} __do_const;
65326
65327 struct pneigh_entry {
65328 struct pneigh_entry *next;
65329 diff --git a/include/net/netdma.h b/include/net/netdma.h
65330 index 8ba8ce2..99b7fff 100644
65331 --- a/include/net/netdma.h
65332 +++ b/include/net/netdma.h
65333 @@ -24,7 +24,7 @@
65334 #include <linux/dmaengine.h>
65335 #include <linux/skbuff.h>
65336
65337 -int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
65338 +int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
65339 struct sk_buff *skb, int offset, struct iovec *to,
65340 size_t len, struct dma_pinned_list *pinned_list);
65341
65342 diff --git a/include/net/netlink.h b/include/net/netlink.h
65343 index 785f37a..c81dc0c 100644
65344 --- a/include/net/netlink.h
65345 +++ b/include/net/netlink.h
65346 @@ -520,7 +520,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
65347 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
65348 {
65349 if (mark)
65350 - skb_trim(skb, (unsigned char *) mark - skb->data);
65351 + skb_trim(skb, (const unsigned char *) mark - skb->data);
65352 }
65353
65354 /**
65355 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
65356 index bbd023a..97c6d0d 100644
65357 --- a/include/net/netns/ipv4.h
65358 +++ b/include/net/netns/ipv4.h
65359 @@ -57,8 +57,8 @@ struct netns_ipv4 {
65360 unsigned int sysctl_ping_group_range[2];
65361 long sysctl_tcp_mem[3];
65362
65363 - atomic_t rt_genid;
65364 - atomic_t dev_addr_genid;
65365 + atomic_unchecked_t rt_genid;
65366 + atomic_unchecked_t dev_addr_genid;
65367
65368 #ifdef CONFIG_IP_MROUTE
65369 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
65370 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
65371 index a2ef814..31a8e3f 100644
65372 --- a/include/net/sctp/sctp.h
65373 +++ b/include/net/sctp/sctp.h
65374 @@ -318,9 +318,9 @@ do { \
65375
65376 #else /* SCTP_DEBUG */
65377
65378 -#define SCTP_DEBUG_PRINTK(whatever...)
65379 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
65380 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
65381 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
65382 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
65383 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
65384 #define SCTP_ENABLE_DEBUG
65385 #define SCTP_DISABLE_DEBUG
65386 #define SCTP_ASSERT(expr, str, func)
65387 diff --git a/include/net/sock.h b/include/net/sock.h
65388 index 5de6557..0bad07b 100644
65389 --- a/include/net/sock.h
65390 +++ b/include/net/sock.h
65391 @@ -304,7 +304,7 @@ struct sock {
65392 #ifdef CONFIG_RPS
65393 __u32 sk_rxhash;
65394 #endif
65395 - atomic_t sk_drops;
65396 + atomic_unchecked_t sk_drops;
65397 int sk_rcvbuf;
65398
65399 struct sk_filter __rcu *sk_filter;
65400 @@ -1728,7 +1728,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
65401 }
65402
65403 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
65404 - char __user *from, char *to,
65405 + char __user *from, unsigned char *to,
65406 int copy, int offset)
65407 {
65408 if (skb->ip_summed == CHECKSUM_NONE) {
65409 @@ -1987,7 +1987,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
65410 }
65411 }
65412
65413 -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
65414 +struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
65415
65416 static inline struct page *sk_stream_alloc_page(struct sock *sk)
65417 {
65418 diff --git a/include/net/tcp.h b/include/net/tcp.h
65419 index e79aa48..05e52de 100644
65420 --- a/include/net/tcp.h
65421 +++ b/include/net/tcp.h
65422 @@ -476,7 +476,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
65423 extern void tcp_xmit_retransmit_queue(struct sock *);
65424 extern void tcp_simple_retransmit(struct sock *);
65425 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
65426 -extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
65427 +extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
65428
65429 extern void tcp_send_probe0(struct sock *);
65430 extern void tcp_send_partial(struct sock *);
65431 @@ -643,8 +643,8 @@ struct tcp_skb_cb {
65432 struct inet6_skb_parm h6;
65433 #endif
65434 } header; /* For incoming frames */
65435 - __u32 seq; /* Starting sequence number */
65436 - __u32 end_seq; /* SEQ + FIN + SYN + datalen */
65437 + __u32 seq __intentional_overflow(0); /* Starting sequence number */
65438 + __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
65439 __u32 when; /* used to compute rtt's */
65440 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
65441
65442 @@ -658,7 +658,7 @@ struct tcp_skb_cb {
65443
65444 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
65445 /* 1 byte hole */
65446 - __u32 ack_seq; /* Sequence number ACK'd */
65447 + __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
65448 };
65449
65450 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
65451 @@ -1459,7 +1459,7 @@ struct tcp_seq_afinfo {
65452 char *name;
65453 sa_family_t family;
65454 const struct file_operations *seq_fops;
65455 - struct seq_operations seq_ops;
65456 + seq_operations_no_const seq_ops;
65457 };
65458
65459 struct tcp_iter_state {
65460 diff --git a/include/net/udp.h b/include/net/udp.h
65461 index 065f379..b661b40 100644
65462 --- a/include/net/udp.h
65463 +++ b/include/net/udp.h
65464 @@ -244,7 +244,7 @@ struct udp_seq_afinfo {
65465 sa_family_t family;
65466 struct udp_table *udp_table;
65467 const struct file_operations *seq_fops;
65468 - struct seq_operations seq_ops;
65469 + seq_operations_no_const seq_ops;
65470 };
65471
65472 struct udp_iter_state {
65473 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
65474 index e0a55df..5890bca07 100644
65475 --- a/include/net/xfrm.h
65476 +++ b/include/net/xfrm.h
65477 @@ -505,7 +505,7 @@ struct xfrm_policy {
65478 struct timer_list timer;
65479
65480 struct flow_cache_object flo;
65481 - atomic_t genid;
65482 + atomic_unchecked_t genid;
65483 u32 priority;
65484 u32 index;
65485 struct xfrm_mark mark;
65486 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
65487 index 1a046b1..ee0bef0 100644
65488 --- a/include/rdma/iw_cm.h
65489 +++ b/include/rdma/iw_cm.h
65490 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
65491 int backlog);
65492
65493 int (*destroy_listen)(struct iw_cm_id *cm_id);
65494 -};
65495 +} __no_const;
65496
65497 /**
65498 * iw_create_cm_id - Create an IW CM identifier.
65499 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
65500 index 8f9dfba..610ab6c 100644
65501 --- a/include/scsi/libfc.h
65502 +++ b/include/scsi/libfc.h
65503 @@ -756,6 +756,7 @@ struct libfc_function_template {
65504 */
65505 void (*disc_stop_final) (struct fc_lport *);
65506 };
65507 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
65508
65509 /**
65510 * struct fc_disc - Discovery context
65511 @@ -861,7 +862,7 @@ struct fc_lport {
65512 struct fc_vport *vport;
65513
65514 /* Operational Information */
65515 - struct libfc_function_template tt;
65516 + libfc_function_template_no_const tt;
65517 u8 link_up;
65518 u8 qfull;
65519 enum fc_lport_state state;
65520 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
65521 index ba96988..ecf2eb9 100644
65522 --- a/include/scsi/scsi_device.h
65523 +++ b/include/scsi/scsi_device.h
65524 @@ -163,9 +163,9 @@ struct scsi_device {
65525 unsigned int max_device_blocked; /* what device_blocked counts down from */
65526 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
65527
65528 - atomic_t iorequest_cnt;
65529 - atomic_t iodone_cnt;
65530 - atomic_t ioerr_cnt;
65531 + atomic_unchecked_t iorequest_cnt;
65532 + atomic_unchecked_t iodone_cnt;
65533 + atomic_unchecked_t ioerr_cnt;
65534
65535 struct device sdev_gendev,
65536 sdev_dev;
65537 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
65538 index 719faf1..07b6728 100644
65539 --- a/include/scsi/scsi_transport_fc.h
65540 +++ b/include/scsi/scsi_transport_fc.h
65541 @@ -739,7 +739,8 @@ struct fc_function_template {
65542 unsigned long show_host_system_hostname:1;
65543
65544 unsigned long disable_target_scan:1;
65545 -};
65546 +} __do_const;
65547 +typedef struct fc_function_template __no_const fc_function_template_no_const;
65548
65549
65550 /**
65551 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
65552 index 030b87c..98a6954 100644
65553 --- a/include/sound/ak4xxx-adda.h
65554 +++ b/include/sound/ak4xxx-adda.h
65555 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
65556 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
65557 unsigned char val);
65558 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
65559 -};
65560 +} __no_const;
65561
65562 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
65563
65564 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
65565 index 8c05e47..2b5df97 100644
65566 --- a/include/sound/hwdep.h
65567 +++ b/include/sound/hwdep.h
65568 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
65569 struct snd_hwdep_dsp_status *status);
65570 int (*dsp_load)(struct snd_hwdep *hw,
65571 struct snd_hwdep_dsp_image *image);
65572 -};
65573 +} __no_const;
65574
65575 struct snd_hwdep {
65576 struct snd_card *card;
65577 diff --git a/include/sound/info.h b/include/sound/info.h
65578 index 9ca1a49..aba1728 100644
65579 --- a/include/sound/info.h
65580 +++ b/include/sound/info.h
65581 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
65582 struct snd_info_buffer *buffer);
65583 void (*write)(struct snd_info_entry *entry,
65584 struct snd_info_buffer *buffer);
65585 -};
65586 +} __no_const;
65587
65588 struct snd_info_entry_ops {
65589 int (*open)(struct snd_info_entry *entry,
65590 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
65591 index 0d11128..814178e 100644
65592 --- a/include/sound/pcm.h
65593 +++ b/include/sound/pcm.h
65594 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
65595 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
65596 int (*ack)(struct snd_pcm_substream *substream);
65597 };
65598 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
65599
65600 /*
65601 *
65602 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
65603 index af1b49e..a5d55a5 100644
65604 --- a/include/sound/sb16_csp.h
65605 +++ b/include/sound/sb16_csp.h
65606 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
65607 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
65608 int (*csp_stop) (struct snd_sb_csp * p);
65609 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
65610 -};
65611 +} __no_const;
65612
65613 /*
65614 * CSP private data
65615 diff --git a/include/sound/soc.h b/include/sound/soc.h
65616 index c703871..f7fbbbd 100644
65617 --- a/include/sound/soc.h
65618 +++ b/include/sound/soc.h
65619 @@ -757,7 +757,7 @@ struct snd_soc_platform_driver {
65620 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
65621 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
65622 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
65623 -};
65624 +} __do_const;
65625
65626 struct snd_soc_platform {
65627 const char *name;
65628 @@ -949,7 +949,7 @@ struct snd_soc_pcm_runtime {
65629 struct snd_soc_dai_link *dai_link;
65630 struct mutex pcm_mutex;
65631 enum snd_soc_pcm_subclass pcm_subclass;
65632 - struct snd_pcm_ops ops;
65633 + snd_pcm_ops_no_const ops;
65634
65635 unsigned int dev_registered:1;
65636
65637 diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h
65638 index 0c3c2fb..d9d9990 100644
65639 --- a/include/sound/tea575x-tuner.h
65640 +++ b/include/sound/tea575x-tuner.h
65641 @@ -44,7 +44,7 @@ struct snd_tea575x_ops {
65642
65643 struct snd_tea575x {
65644 struct v4l2_device *v4l2_dev;
65645 - struct v4l2_file_operations fops;
65646 + v4l2_file_operations_no_const fops;
65647 struct video_device vd; /* video device */
65648 int radio_nr; /* radio_nr */
65649 bool tea5759; /* 5759 chip is present */
65650 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
65651 index 4119966..1a4671c 100644
65652 --- a/include/sound/ymfpci.h
65653 +++ b/include/sound/ymfpci.h
65654 @@ -358,7 +358,7 @@ struct snd_ymfpci {
65655 spinlock_t reg_lock;
65656 spinlock_t voice_lock;
65657 wait_queue_head_t interrupt_sleep;
65658 - atomic_t interrupt_sleep_count;
65659 + atomic_unchecked_t interrupt_sleep_count;
65660 struct snd_info_entry *proc_entry;
65661 const struct firmware *dsp_microcode;
65662 const struct firmware *controller_microcode;
65663 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
65664 index 362e0d9..36b9a83 100644
65665 --- a/include/target/target_core_base.h
65666 +++ b/include/target/target_core_base.h
65667 @@ -441,7 +441,7 @@ struct t10_reservation_ops {
65668 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
65669 int (*t10_pr_register)(struct se_cmd *);
65670 int (*t10_pr_clear)(struct se_cmd *);
65671 -};
65672 +} __no_const;
65673
65674 struct t10_reservation {
65675 /* Reservation effects all target ports */
65676 @@ -780,7 +780,7 @@ struct se_device {
65677 spinlock_t stats_lock;
65678 /* Active commands on this virtual SE device */
65679 atomic_t simple_cmds;
65680 - atomic_t dev_ordered_id;
65681 + atomic_unchecked_t dev_ordered_id;
65682 atomic_t execute_tasks;
65683 atomic_t dev_ordered_sync;
65684 atomic_t dev_qf_count;
65685 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
65686 new file mode 100644
65687 index 0000000..2efe49d
65688 --- /dev/null
65689 +++ b/include/trace/events/fs.h
65690 @@ -0,0 +1,53 @@
65691 +#undef TRACE_SYSTEM
65692 +#define TRACE_SYSTEM fs
65693 +
65694 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
65695 +#define _TRACE_FS_H
65696 +
65697 +#include <linux/fs.h>
65698 +#include <linux/tracepoint.h>
65699 +
65700 +TRACE_EVENT(do_sys_open,
65701 +
65702 + TP_PROTO(char *filename, int flags, int mode),
65703 +
65704 + TP_ARGS(filename, flags, mode),
65705 +
65706 + TP_STRUCT__entry(
65707 + __string( filename, filename )
65708 + __field( int, flags )
65709 + __field( int, mode )
65710 + ),
65711 +
65712 + TP_fast_assign(
65713 + __assign_str(filename, filename);
65714 + __entry->flags = flags;
65715 + __entry->mode = mode;
65716 + ),
65717 +
65718 + TP_printk("\"%s\" %x %o",
65719 + __get_str(filename), __entry->flags, __entry->mode)
65720 +);
65721 +
65722 +TRACE_EVENT(open_exec,
65723 +
65724 + TP_PROTO(const char *filename),
65725 +
65726 + TP_ARGS(filename),
65727 +
65728 + TP_STRUCT__entry(
65729 + __string( filename, filename )
65730 + ),
65731 +
65732 + TP_fast_assign(
65733 + __assign_str(filename, filename);
65734 + ),
65735 +
65736 + TP_printk("\"%s\"",
65737 + __get_str(filename))
65738 +);
65739 +
65740 +#endif /* _TRACE_FS_H */
65741 +
65742 +/* This part must be outside protection */
65743 +#include <trace/define_trace.h>
65744 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
65745 index 1c09820..7f5ec79 100644
65746 --- a/include/trace/events/irq.h
65747 +++ b/include/trace/events/irq.h
65748 @@ -36,7 +36,7 @@ struct softirq_action;
65749 */
65750 TRACE_EVENT(irq_handler_entry,
65751
65752 - TP_PROTO(int irq, struct irqaction *action),
65753 + TP_PROTO(int irq, const struct irqaction *action),
65754
65755 TP_ARGS(irq, action),
65756
65757 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
65758 */
65759 TRACE_EVENT(irq_handler_exit,
65760
65761 - TP_PROTO(int irq, struct irqaction *action, int ret),
65762 + TP_PROTO(int irq, const struct irqaction *action, int ret),
65763
65764 TP_ARGS(irq, action, ret),
65765
65766 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
65767 index f9466fa..f4e2b81 100644
65768 --- a/include/video/udlfb.h
65769 +++ b/include/video/udlfb.h
65770 @@ -53,10 +53,10 @@ struct dlfb_data {
65771 u32 pseudo_palette[256];
65772 int blank_mode; /*one of FB_BLANK_ */
65773 /* blit-only rendering path metrics, exposed through sysfs */
65774 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65775 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
65776 - atomic_t bytes_sent; /* to usb, after compression including overhead */
65777 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
65778 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65779 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
65780 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
65781 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
65782 };
65783
65784 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
65785 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
65786 index 0993a22..32ba2fe 100644
65787 --- a/include/video/uvesafb.h
65788 +++ b/include/video/uvesafb.h
65789 @@ -177,6 +177,7 @@ struct uvesafb_par {
65790 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
65791 u8 pmi_setpal; /* PMI for palette changes */
65792 u16 *pmi_base; /* protected mode interface location */
65793 + u8 *pmi_code; /* protected mode code location */
65794 void *pmi_start;
65795 void *pmi_pal;
65796 u8 *vbe_state_orig; /*
65797 diff --git a/init/Kconfig b/init/Kconfig
65798 index d07dcf9..fa47d0e 100644
65799 --- a/init/Kconfig
65800 +++ b/init/Kconfig
65801 @@ -835,6 +835,7 @@ endif # CGROUPS
65802
65803 config CHECKPOINT_RESTORE
65804 bool "Checkpoint/restore support" if EXPERT
65805 + depends on !GRKERNSEC
65806 default n
65807 help
65808 Enables additional kernel features in a sake of checkpoint/restore.
65809 @@ -1014,6 +1015,7 @@ config UIDGID_CONVERTED
65810 # Security modules
65811 depends on SECURITY_TOMOYO = n
65812 depends on SECURITY_APPARMOR = n
65813 + depends on GRKERNSEC = n
65814
65815 config UIDGID_STRICT_TYPE_CHECKS
65816 bool "Require conversions between uid/gids and their internal representation"
65817 @@ -1401,7 +1403,7 @@ config SLUB_DEBUG
65818
65819 config COMPAT_BRK
65820 bool "Disable heap randomization"
65821 - default y
65822 + default n
65823 help
65824 Randomizing heap placement makes heap exploits harder, but it
65825 also breaks ancient binaries (including anything libc5 based).
65826 @@ -1584,7 +1586,7 @@ config INIT_ALL_POSSIBLE
65827 config STOP_MACHINE
65828 bool
65829 default y
65830 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
65831 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
65832 help
65833 Need stop_machine() primitive.
65834
65835 diff --git a/init/Makefile b/init/Makefile
65836 index 7bc47ee..6da2dc7 100644
65837 --- a/init/Makefile
65838 +++ b/init/Makefile
65839 @@ -2,6 +2,9 @@
65840 # Makefile for the linux kernel.
65841 #
65842
65843 +ccflags-y := $(GCC_PLUGINS_CFLAGS)
65844 +asflags-y := $(GCC_PLUGINS_AFLAGS)
65845 +
65846 obj-y := main.o version.o mounts.o
65847 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
65848 obj-y += noinitramfs.o
65849 diff --git a/init/do_mounts.c b/init/do_mounts.c
65850 index d3f0aee..c9322f5 100644
65851 --- a/init/do_mounts.c
65852 +++ b/init/do_mounts.c
65853 @@ -336,11 +336,11 @@ static void __init get_fs_names(char *page)
65854 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
65855 {
65856 struct super_block *s;
65857 - int err = sys_mount(name, "/root", fs, flags, data);
65858 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
65859 if (err)
65860 return err;
65861
65862 - sys_chdir("/root");
65863 + sys_chdir((const char __force_user *)"/root");
65864 s = current->fs->pwd.dentry->d_sb;
65865 ROOT_DEV = s->s_dev;
65866 printk(KERN_INFO
65867 @@ -460,18 +460,18 @@ void __init change_floppy(char *fmt, ...)
65868 va_start(args, fmt);
65869 vsprintf(buf, fmt, args);
65870 va_end(args);
65871 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
65872 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
65873 if (fd >= 0) {
65874 sys_ioctl(fd, FDEJECT, 0);
65875 sys_close(fd);
65876 }
65877 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
65878 - fd = sys_open("/dev/console", O_RDWR, 0);
65879 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
65880 if (fd >= 0) {
65881 sys_ioctl(fd, TCGETS, (long)&termios);
65882 termios.c_lflag &= ~ICANON;
65883 sys_ioctl(fd, TCSETSF, (long)&termios);
65884 - sys_read(fd, &c, 1);
65885 + sys_read(fd, (char __user *)&c, 1);
65886 termios.c_lflag |= ICANON;
65887 sys_ioctl(fd, TCSETSF, (long)&termios);
65888 sys_close(fd);
65889 @@ -565,6 +565,6 @@ void __init prepare_namespace(void)
65890 mount_root();
65891 out:
65892 devtmpfs_mount("dev");
65893 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
65894 - sys_chroot(".");
65895 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65896 + sys_chroot((const char __force_user *)".");
65897 }
65898 diff --git a/init/do_mounts.h b/init/do_mounts.h
65899 index f5b978a..69dbfe8 100644
65900 --- a/init/do_mounts.h
65901 +++ b/init/do_mounts.h
65902 @@ -15,15 +15,15 @@ extern int root_mountflags;
65903
65904 static inline int create_dev(char *name, dev_t dev)
65905 {
65906 - sys_unlink(name);
65907 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
65908 + sys_unlink((char __force_user *)name);
65909 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
65910 }
65911
65912 #if BITS_PER_LONG == 32
65913 static inline u32 bstat(char *name)
65914 {
65915 struct stat64 stat;
65916 - if (sys_stat64(name, &stat) != 0)
65917 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
65918 return 0;
65919 if (!S_ISBLK(stat.st_mode))
65920 return 0;
65921 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
65922 static inline u32 bstat(char *name)
65923 {
65924 struct stat stat;
65925 - if (sys_newstat(name, &stat) != 0)
65926 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
65927 return 0;
65928 if (!S_ISBLK(stat.st_mode))
65929 return 0;
65930 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
65931 index 135959a2..28a3f43 100644
65932 --- a/init/do_mounts_initrd.c
65933 +++ b/init/do_mounts_initrd.c
65934 @@ -53,13 +53,13 @@ static void __init handle_initrd(void)
65935 create_dev("/dev/root.old", Root_RAM0);
65936 /* mount initrd on rootfs' /root */
65937 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
65938 - sys_mkdir("/old", 0700);
65939 - root_fd = sys_open("/", 0, 0);
65940 - old_fd = sys_open("/old", 0, 0);
65941 + sys_mkdir((const char __force_user *)"/old", 0700);
65942 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
65943 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
65944 /* move initrd over / and chdir/chroot in initrd root */
65945 - sys_chdir("/root");
65946 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
65947 - sys_chroot(".");
65948 + sys_chdir((const char __force_user *)"/root");
65949 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65950 + sys_chroot((const char __force_user *)".");
65951
65952 /*
65953 * In case that a resume from disk is carried out by linuxrc or one of
65954 @@ -76,15 +76,15 @@ static void __init handle_initrd(void)
65955
65956 /* move initrd to rootfs' /old */
65957 sys_fchdir(old_fd);
65958 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
65959 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
65960 /* switch root and cwd back to / of rootfs */
65961 sys_fchdir(root_fd);
65962 - sys_chroot(".");
65963 + sys_chroot((const char __force_user *)".");
65964 sys_close(old_fd);
65965 sys_close(root_fd);
65966
65967 if (new_decode_dev(real_root_dev) == Root_RAM0) {
65968 - sys_chdir("/old");
65969 + sys_chdir((const char __force_user *)"/old");
65970 return;
65971 }
65972
65973 @@ -92,17 +92,17 @@ static void __init handle_initrd(void)
65974 mount_root();
65975
65976 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
65977 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
65978 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
65979 if (!error)
65980 printk("okay\n");
65981 else {
65982 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
65983 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
65984 if (error == -ENOENT)
65985 printk("/initrd does not exist. Ignored.\n");
65986 else
65987 printk("failed\n");
65988 printk(KERN_NOTICE "Unmounting old root\n");
65989 - sys_umount("/old", MNT_DETACH);
65990 + sys_umount((char __force_user *)"/old", MNT_DETACH);
65991 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
65992 if (fd < 0) {
65993 error = fd;
65994 @@ -125,11 +125,11 @@ int __init initrd_load(void)
65995 * mounted in the normal path.
65996 */
65997 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
65998 - sys_unlink("/initrd.image");
65999 + sys_unlink((const char __force_user *)"/initrd.image");
66000 handle_initrd();
66001 return 1;
66002 }
66003 }
66004 - sys_unlink("/initrd.image");
66005 + sys_unlink((const char __force_user *)"/initrd.image");
66006 return 0;
66007 }
66008 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
66009 index 8cb6db5..d729f50 100644
66010 --- a/init/do_mounts_md.c
66011 +++ b/init/do_mounts_md.c
66012 @@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
66013 partitioned ? "_d" : "", minor,
66014 md_setup_args[ent].device_names);
66015
66016 - fd = sys_open(name, 0, 0);
66017 + fd = sys_open((char __force_user *)name, 0, 0);
66018 if (fd < 0) {
66019 printk(KERN_ERR "md: open failed - cannot start "
66020 "array %s\n", name);
66021 @@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
66022 * array without it
66023 */
66024 sys_close(fd);
66025 - fd = sys_open(name, 0, 0);
66026 + fd = sys_open((char __force_user *)name, 0, 0);
66027 sys_ioctl(fd, BLKRRPART, 0);
66028 }
66029 sys_close(fd);
66030 @@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
66031
66032 wait_for_device_probe();
66033
66034 - fd = sys_open("/dev/md0", 0, 0);
66035 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
66036 if (fd >= 0) {
66037 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
66038 sys_close(fd);
66039 diff --git a/init/init_task.c b/init/init_task.c
66040 index 8b2f399..f0797c9 100644
66041 --- a/init/init_task.c
66042 +++ b/init/init_task.c
66043 @@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
66044 * Initial thread structure. Alignment of this is handled by a special
66045 * linker map entry.
66046 */
66047 +#ifdef CONFIG_X86
66048 +union thread_union init_thread_union __init_task_data;
66049 +#else
66050 union thread_union init_thread_union __init_task_data =
66051 { INIT_THREAD_INFO(init_task) };
66052 +#endif
66053 diff --git a/init/initramfs.c b/init/initramfs.c
66054 index 84c6bf1..8899338 100644
66055 --- a/init/initramfs.c
66056 +++ b/init/initramfs.c
66057 @@ -84,7 +84,7 @@ static void __init free_hash(void)
66058 }
66059 }
66060
66061 -static long __init do_utime(char *filename, time_t mtime)
66062 +static long __init do_utime(char __force_user *filename, time_t mtime)
66063 {
66064 struct timespec t[2];
66065
66066 @@ -119,7 +119,7 @@ static void __init dir_utime(void)
66067 struct dir_entry *de, *tmp;
66068 list_for_each_entry_safe(de, tmp, &dir_list, list) {
66069 list_del(&de->list);
66070 - do_utime(de->name, de->mtime);
66071 + do_utime((char __force_user *)de->name, de->mtime);
66072 kfree(de->name);
66073 kfree(de);
66074 }
66075 @@ -281,7 +281,7 @@ static int __init maybe_link(void)
66076 if (nlink >= 2) {
66077 char *old = find_link(major, minor, ino, mode, collected);
66078 if (old)
66079 - return (sys_link(old, collected) < 0) ? -1 : 1;
66080 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
66081 }
66082 return 0;
66083 }
66084 @@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
66085 {
66086 struct stat st;
66087
66088 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
66089 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
66090 if (S_ISDIR(st.st_mode))
66091 - sys_rmdir(path);
66092 + sys_rmdir((char __force_user *)path);
66093 else
66094 - sys_unlink(path);
66095 + sys_unlink((char __force_user *)path);
66096 }
66097 }
66098
66099 @@ -315,7 +315,7 @@ static int __init do_name(void)
66100 int openflags = O_WRONLY|O_CREAT;
66101 if (ml != 1)
66102 openflags |= O_TRUNC;
66103 - wfd = sys_open(collected, openflags, mode);
66104 + wfd = sys_open((char __force_user *)collected, openflags, mode);
66105
66106 if (wfd >= 0) {
66107 sys_fchown(wfd, uid, gid);
66108 @@ -327,17 +327,17 @@ static int __init do_name(void)
66109 }
66110 }
66111 } else if (S_ISDIR(mode)) {
66112 - sys_mkdir(collected, mode);
66113 - sys_chown(collected, uid, gid);
66114 - sys_chmod(collected, mode);
66115 + sys_mkdir((char __force_user *)collected, mode);
66116 + sys_chown((char __force_user *)collected, uid, gid);
66117 + sys_chmod((char __force_user *)collected, mode);
66118 dir_add(collected, mtime);
66119 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
66120 S_ISFIFO(mode) || S_ISSOCK(mode)) {
66121 if (maybe_link() == 0) {
66122 - sys_mknod(collected, mode, rdev);
66123 - sys_chown(collected, uid, gid);
66124 - sys_chmod(collected, mode);
66125 - do_utime(collected, mtime);
66126 + sys_mknod((char __force_user *)collected, mode, rdev);
66127 + sys_chown((char __force_user *)collected, uid, gid);
66128 + sys_chmod((char __force_user *)collected, mode);
66129 + do_utime((char __force_user *)collected, mtime);
66130 }
66131 }
66132 return 0;
66133 @@ -346,15 +346,15 @@ static int __init do_name(void)
66134 static int __init do_copy(void)
66135 {
66136 if (count >= body_len) {
66137 - sys_write(wfd, victim, body_len);
66138 + sys_write(wfd, (char __force_user *)victim, body_len);
66139 sys_close(wfd);
66140 - do_utime(vcollected, mtime);
66141 + do_utime((char __force_user *)vcollected, mtime);
66142 kfree(vcollected);
66143 eat(body_len);
66144 state = SkipIt;
66145 return 0;
66146 } else {
66147 - sys_write(wfd, victim, count);
66148 + sys_write(wfd, (char __force_user *)victim, count);
66149 body_len -= count;
66150 eat(count);
66151 return 1;
66152 @@ -365,9 +365,9 @@ static int __init do_symlink(void)
66153 {
66154 collected[N_ALIGN(name_len) + body_len] = '\0';
66155 clean_path(collected, 0);
66156 - sys_symlink(collected + N_ALIGN(name_len), collected);
66157 - sys_lchown(collected, uid, gid);
66158 - do_utime(collected, mtime);
66159 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
66160 + sys_lchown((char __force_user *)collected, uid, gid);
66161 + do_utime((char __force_user *)collected, mtime);
66162 state = SkipIt;
66163 next_state = Reset;
66164 return 0;
66165 diff --git a/init/main.c b/init/main.c
66166 index b5cc0a7..8e67244 100644
66167 --- a/init/main.c
66168 +++ b/init/main.c
66169 @@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
66170 extern void tc_init(void);
66171 #endif
66172
66173 +extern void grsecurity_init(void);
66174 +
66175 /*
66176 * Debug helper: via this flag we know that we are in 'early bootup code'
66177 * where only the boot processor is running with IRQ disabled. This means
66178 @@ -148,6 +150,51 @@ static int __init set_reset_devices(char *str)
66179
66180 __setup("reset_devices", set_reset_devices);
66181
66182 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
66183 +extern char pax_enter_kernel_user[];
66184 +extern char pax_exit_kernel_user[];
66185 +extern pgdval_t clone_pgd_mask;
66186 +#endif
66187 +
66188 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
66189 +static int __init setup_pax_nouderef(char *str)
66190 +{
66191 +#ifdef CONFIG_X86_32
66192 + unsigned int cpu;
66193 + struct desc_struct *gdt;
66194 +
66195 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
66196 + gdt = get_cpu_gdt_table(cpu);
66197 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
66198 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
66199 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
66200 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
66201 + }
66202 + loadsegment(ds, __KERNEL_DS);
66203 + loadsegment(es, __KERNEL_DS);
66204 + loadsegment(ss, __KERNEL_DS);
66205 +#else
66206 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
66207 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
66208 + clone_pgd_mask = ~(pgdval_t)0UL;
66209 +#endif
66210 +
66211 + return 0;
66212 +}
66213 +early_param("pax_nouderef", setup_pax_nouderef);
66214 +#endif
66215 +
66216 +#ifdef CONFIG_PAX_SOFTMODE
66217 +int pax_softmode;
66218 +
66219 +static int __init setup_pax_softmode(char *str)
66220 +{
66221 + get_option(&str, &pax_softmode);
66222 + return 1;
66223 +}
66224 +__setup("pax_softmode=", setup_pax_softmode);
66225 +#endif
66226 +
66227 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
66228 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
66229 static const char *panic_later, *panic_param;
66230 @@ -674,6 +721,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
66231 {
66232 int count = preempt_count();
66233 int ret;
66234 + const char *msg1 = "", *msg2 = "";
66235
66236 if (initcall_debug)
66237 ret = do_one_initcall_debug(fn);
66238 @@ -686,15 +734,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
66239 sprintf(msgbuf, "error code %d ", ret);
66240
66241 if (preempt_count() != count) {
66242 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
66243 + msg1 = " preemption imbalance";
66244 preempt_count() = count;
66245 }
66246 if (irqs_disabled()) {
66247 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
66248 + msg2 = " disabled interrupts";
66249 local_irq_enable();
66250 }
66251 - if (msgbuf[0]) {
66252 - printk("initcall %pF returned with %s\n", fn, msgbuf);
66253 + if (msgbuf[0] || *msg1 || *msg2) {
66254 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
66255 }
66256
66257 return ret;
66258 @@ -747,8 +795,14 @@ static void __init do_initcall_level(int level)
66259 level, level,
66260 &repair_env_string);
66261
66262 - for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
66263 + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
66264 do_one_initcall(*fn);
66265 +
66266 +#ifdef CONFIG_PAX_LATENT_ENTROPY
66267 + transfer_latent_entropy();
66268 +#endif
66269 +
66270 + }
66271 }
66272
66273 static void __init do_initcalls(void)
66274 @@ -782,8 +836,14 @@ static void __init do_pre_smp_initcalls(void)
66275 {
66276 initcall_t *fn;
66277
66278 - for (fn = __initcall_start; fn < __initcall0_start; fn++)
66279 + for (fn = __initcall_start; fn < __initcall0_start; fn++) {
66280 do_one_initcall(*fn);
66281 +
66282 +#ifdef CONFIG_PAX_LATENT_ENTROPY
66283 + transfer_latent_entropy();
66284 +#endif
66285 +
66286 + }
66287 }
66288
66289 static void run_init_process(const char *init_filename)
66290 @@ -865,7 +925,7 @@ static int __init kernel_init(void * unused)
66291 do_basic_setup();
66292
66293 /* Open the /dev/console on the rootfs, this should never fail */
66294 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
66295 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
66296 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
66297
66298 (void) sys_dup(0);
66299 @@ -878,11 +938,13 @@ static int __init kernel_init(void * unused)
66300 if (!ramdisk_execute_command)
66301 ramdisk_execute_command = "/init";
66302
66303 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
66304 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
66305 ramdisk_execute_command = NULL;
66306 prepare_namespace();
66307 }
66308
66309 + grsecurity_init();
66310 +
66311 /*
66312 * Ok, we have completed the initial bootup, and
66313 * we're essentially up and running. Get rid of the
66314 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
66315 index 8ce5769..4666884 100644
66316 --- a/ipc/mqueue.c
66317 +++ b/ipc/mqueue.c
66318 @@ -279,6 +279,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
66319 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
66320 info->attr.mq_msgsize);
66321
66322 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
66323 spin_lock(&mq_lock);
66324 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
66325 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
66326 diff --git a/ipc/msg.c b/ipc/msg.c
66327 index 7385de2..a8180e08 100644
66328 --- a/ipc/msg.c
66329 +++ b/ipc/msg.c
66330 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
66331 return security_msg_queue_associate(msq, msgflg);
66332 }
66333
66334 +static struct ipc_ops msg_ops = {
66335 + .getnew = newque,
66336 + .associate = msg_security,
66337 + .more_checks = NULL
66338 +};
66339 +
66340 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
66341 {
66342 struct ipc_namespace *ns;
66343 - struct ipc_ops msg_ops;
66344 struct ipc_params msg_params;
66345
66346 ns = current->nsproxy->ipc_ns;
66347
66348 - msg_ops.getnew = newque;
66349 - msg_ops.associate = msg_security;
66350 - msg_ops.more_checks = NULL;
66351 -
66352 msg_params.key = key;
66353 msg_params.flg = msgflg;
66354
66355 diff --git a/ipc/sem.c b/ipc/sem.c
66356 index 5215a81..cfc0cac 100644
66357 --- a/ipc/sem.c
66358 +++ b/ipc/sem.c
66359 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
66360 return 0;
66361 }
66362
66363 +static struct ipc_ops sem_ops = {
66364 + .getnew = newary,
66365 + .associate = sem_security,
66366 + .more_checks = sem_more_checks
66367 +};
66368 +
66369 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66370 {
66371 struct ipc_namespace *ns;
66372 - struct ipc_ops sem_ops;
66373 struct ipc_params sem_params;
66374
66375 ns = current->nsproxy->ipc_ns;
66376 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66377 if (nsems < 0 || nsems > ns->sc_semmsl)
66378 return -EINVAL;
66379
66380 - sem_ops.getnew = newary;
66381 - sem_ops.associate = sem_security;
66382 - sem_ops.more_checks = sem_more_checks;
66383 -
66384 sem_params.key = key;
66385 sem_params.flg = semflg;
66386 sem_params.u.nsems = nsems;
66387 diff --git a/ipc/shm.c b/ipc/shm.c
66388 index 41c1285..cf6404c 100644
66389 --- a/ipc/shm.c
66390 +++ b/ipc/shm.c
66391 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
66392 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
66393 #endif
66394
66395 +#ifdef CONFIG_GRKERNSEC
66396 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66397 + const time_t shm_createtime, const uid_t cuid,
66398 + const int shmid);
66399 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66400 + const time_t shm_createtime);
66401 +#endif
66402 +
66403 void shm_init_ns(struct ipc_namespace *ns)
66404 {
66405 ns->shm_ctlmax = SHMMAX;
66406 @@ -520,6 +528,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
66407 shp->shm_lprid = 0;
66408 shp->shm_atim = shp->shm_dtim = 0;
66409 shp->shm_ctim = get_seconds();
66410 +#ifdef CONFIG_GRKERNSEC
66411 + {
66412 + struct timespec timeval;
66413 + do_posix_clock_monotonic_gettime(&timeval);
66414 +
66415 + shp->shm_createtime = timeval.tv_sec;
66416 + }
66417 +#endif
66418 shp->shm_segsz = size;
66419 shp->shm_nattch = 0;
66420 shp->shm_file = file;
66421 @@ -571,18 +587,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
66422 return 0;
66423 }
66424
66425 +static struct ipc_ops shm_ops = {
66426 + .getnew = newseg,
66427 + .associate = shm_security,
66428 + .more_checks = shm_more_checks
66429 +};
66430 +
66431 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
66432 {
66433 struct ipc_namespace *ns;
66434 - struct ipc_ops shm_ops;
66435 struct ipc_params shm_params;
66436
66437 ns = current->nsproxy->ipc_ns;
66438
66439 - shm_ops.getnew = newseg;
66440 - shm_ops.associate = shm_security;
66441 - shm_ops.more_checks = shm_more_checks;
66442 -
66443 shm_params.key = key;
66444 shm_params.flg = shmflg;
66445 shm_params.u.size = size;
66446 @@ -1000,6 +1017,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66447 f_mode = FMODE_READ | FMODE_WRITE;
66448 }
66449 if (shmflg & SHM_EXEC) {
66450 +
66451 +#ifdef CONFIG_PAX_MPROTECT
66452 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
66453 + goto out;
66454 +#endif
66455 +
66456 prot |= PROT_EXEC;
66457 acc_mode |= S_IXUGO;
66458 }
66459 @@ -1023,9 +1046,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66460 if (err)
66461 goto out_unlock;
66462
66463 +#ifdef CONFIG_GRKERNSEC
66464 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
66465 + shp->shm_perm.cuid, shmid) ||
66466 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
66467 + err = -EACCES;
66468 + goto out_unlock;
66469 + }
66470 +#endif
66471 +
66472 path = shp->shm_file->f_path;
66473 path_get(&path);
66474 shp->shm_nattch++;
66475 +#ifdef CONFIG_GRKERNSEC
66476 + shp->shm_lapid = current->pid;
66477 +#endif
66478 size = i_size_read(path.dentry->d_inode);
66479 shm_unlock(shp);
66480
66481 diff --git a/kernel/acct.c b/kernel/acct.c
66482 index 02e6167..54824f7 100644
66483 --- a/kernel/acct.c
66484 +++ b/kernel/acct.c
66485 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
66486 */
66487 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
66488 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
66489 - file->f_op->write(file, (char *)&ac,
66490 + file->f_op->write(file, (char __force_user *)&ac,
66491 sizeof(acct_t), &file->f_pos);
66492 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
66493 set_fs(fs);
66494 diff --git a/kernel/audit.c b/kernel/audit.c
66495 index 1c7f2c6..9ba5359 100644
66496 --- a/kernel/audit.c
66497 +++ b/kernel/audit.c
66498 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
66499 3) suppressed due to audit_rate_limit
66500 4) suppressed due to audit_backlog_limit
66501 */
66502 -static atomic_t audit_lost = ATOMIC_INIT(0);
66503 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
66504
66505 /* The netlink socket. */
66506 static struct sock *audit_sock;
66507 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
66508 unsigned long now;
66509 int print;
66510
66511 - atomic_inc(&audit_lost);
66512 + atomic_inc_unchecked(&audit_lost);
66513
66514 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
66515
66516 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
66517 printk(KERN_WARNING
66518 "audit: audit_lost=%d audit_rate_limit=%d "
66519 "audit_backlog_limit=%d\n",
66520 - atomic_read(&audit_lost),
66521 + atomic_read_unchecked(&audit_lost),
66522 audit_rate_limit,
66523 audit_backlog_limit);
66524 audit_panic(message);
66525 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
66526 status_set.pid = audit_pid;
66527 status_set.rate_limit = audit_rate_limit;
66528 status_set.backlog_limit = audit_backlog_limit;
66529 - status_set.lost = atomic_read(&audit_lost);
66530 + status_set.lost = atomic_read_unchecked(&audit_lost);
66531 status_set.backlog = skb_queue_len(&audit_skb_queue);
66532 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
66533 &status_set, sizeof(status_set));
66534 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
66535 index 4b96415..d8c16ee 100644
66536 --- a/kernel/auditsc.c
66537 +++ b/kernel/auditsc.c
66538 @@ -2289,7 +2289,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
66539 }
66540
66541 /* global counter which is incremented every time something logs in */
66542 -static atomic_t session_id = ATOMIC_INIT(0);
66543 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
66544
66545 /**
66546 * audit_set_loginuid - set current task's audit_context loginuid
66547 @@ -2313,7 +2313,7 @@ int audit_set_loginuid(uid_t loginuid)
66548 return -EPERM;
66549 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
66550
66551 - sessionid = atomic_inc_return(&session_id);
66552 + sessionid = atomic_inc_return_unchecked(&session_id);
66553 if (context && context->in_syscall) {
66554 struct audit_buffer *ab;
66555
66556 diff --git a/kernel/capability.c b/kernel/capability.c
66557 index 493d972..ea17248 100644
66558 --- a/kernel/capability.c
66559 +++ b/kernel/capability.c
66560 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
66561 * before modification is attempted and the application
66562 * fails.
66563 */
66564 + if (tocopy > ARRAY_SIZE(kdata))
66565 + return -EFAULT;
66566 +
66567 if (copy_to_user(dataptr, kdata, tocopy
66568 * sizeof(struct __user_cap_data_struct))) {
66569 return -EFAULT;
66570 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
66571 int ret;
66572
66573 rcu_read_lock();
66574 - ret = security_capable(__task_cred(t), ns, cap);
66575 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
66576 + gr_task_is_capable(t, __task_cred(t), cap);
66577 rcu_read_unlock();
66578
66579 - return (ret == 0);
66580 + return ret;
66581 }
66582
66583 /**
66584 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
66585 int ret;
66586
66587 rcu_read_lock();
66588 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
66589 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
66590 rcu_read_unlock();
66591
66592 - return (ret == 0);
66593 + return ret;
66594 }
66595
66596 /**
66597 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
66598 BUG();
66599 }
66600
66601 - if (security_capable(current_cred(), ns, cap) == 0) {
66602 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
66603 current->flags |= PF_SUPERPRIV;
66604 return true;
66605 }
66606 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
66607 }
66608 EXPORT_SYMBOL(ns_capable);
66609
66610 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
66611 +{
66612 + if (unlikely(!cap_valid(cap))) {
66613 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
66614 + BUG();
66615 + }
66616 +
66617 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
66618 + current->flags |= PF_SUPERPRIV;
66619 + return true;
66620 + }
66621 + return false;
66622 +}
66623 +EXPORT_SYMBOL(ns_capable_nolog);
66624 +
66625 /**
66626 * capable - Determine if the current task has a superior capability in effect
66627 * @cap: The capability to be tested for
66628 @@ -408,6 +427,12 @@ bool capable(int cap)
66629 }
66630 EXPORT_SYMBOL(capable);
66631
66632 +bool capable_nolog(int cap)
66633 +{
66634 + return ns_capable_nolog(&init_user_ns, cap);
66635 +}
66636 +EXPORT_SYMBOL(capable_nolog);
66637 +
66638 /**
66639 * nsown_capable - Check superior capability to one's own user_ns
66640 * @cap: The capability in question
66641 @@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
66642
66643 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
66644 }
66645 +
66646 +bool inode_capable_nolog(const struct inode *inode, int cap)
66647 +{
66648 + struct user_namespace *ns = current_user_ns();
66649 +
66650 + return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
66651 +}
66652 diff --git a/kernel/compat.c b/kernel/compat.c
66653 index c28a306..b4d0cf3 100644
66654 --- a/kernel/compat.c
66655 +++ b/kernel/compat.c
66656 @@ -13,6 +13,7 @@
66657
66658 #include <linux/linkage.h>
66659 #include <linux/compat.h>
66660 +#include <linux/module.h>
66661 #include <linux/errno.h>
66662 #include <linux/time.h>
66663 #include <linux/signal.h>
66664 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
66665 mm_segment_t oldfs;
66666 long ret;
66667
66668 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
66669 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
66670 oldfs = get_fs();
66671 set_fs(KERNEL_DS);
66672 ret = hrtimer_nanosleep_restart(restart);
66673 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
66674 oldfs = get_fs();
66675 set_fs(KERNEL_DS);
66676 ret = hrtimer_nanosleep(&tu,
66677 - rmtp ? (struct timespec __user *)&rmt : NULL,
66678 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
66679 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
66680 set_fs(oldfs);
66681
66682 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
66683 mm_segment_t old_fs = get_fs();
66684
66685 set_fs(KERNEL_DS);
66686 - ret = sys_sigpending((old_sigset_t __user *) &s);
66687 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
66688 set_fs(old_fs);
66689 if (ret == 0)
66690 ret = put_user(s, set);
66691 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
66692 mm_segment_t old_fs = get_fs();
66693
66694 set_fs(KERNEL_DS);
66695 - ret = sys_old_getrlimit(resource, &r);
66696 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
66697 set_fs(old_fs);
66698
66699 if (!ret) {
66700 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
66701 mm_segment_t old_fs = get_fs();
66702
66703 set_fs(KERNEL_DS);
66704 - ret = sys_getrusage(who, (struct rusage __user *) &r);
66705 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
66706 set_fs(old_fs);
66707
66708 if (ret)
66709 @@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
66710 set_fs (KERNEL_DS);
66711 ret = sys_wait4(pid,
66712 (stat_addr ?
66713 - (unsigned int __user *) &status : NULL),
66714 - options, (struct rusage __user *) &r);
66715 + (unsigned int __force_user *) &status : NULL),
66716 + options, (struct rusage __force_user *) &r);
66717 set_fs (old_fs);
66718
66719 if (ret > 0) {
66720 @@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
66721 memset(&info, 0, sizeof(info));
66722
66723 set_fs(KERNEL_DS);
66724 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
66725 - uru ? (struct rusage __user *)&ru : NULL);
66726 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
66727 + uru ? (struct rusage __force_user *)&ru : NULL);
66728 set_fs(old_fs);
66729
66730 if ((ret < 0) || (info.si_signo == 0))
66731 @@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
66732 oldfs = get_fs();
66733 set_fs(KERNEL_DS);
66734 err = sys_timer_settime(timer_id, flags,
66735 - (struct itimerspec __user *) &newts,
66736 - (struct itimerspec __user *) &oldts);
66737 + (struct itimerspec __force_user *) &newts,
66738 + (struct itimerspec __force_user *) &oldts);
66739 set_fs(oldfs);
66740 if (!err && old && put_compat_itimerspec(old, &oldts))
66741 return -EFAULT;
66742 @@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
66743 oldfs = get_fs();
66744 set_fs(KERNEL_DS);
66745 err = sys_timer_gettime(timer_id,
66746 - (struct itimerspec __user *) &ts);
66747 + (struct itimerspec __force_user *) &ts);
66748 set_fs(oldfs);
66749 if (!err && put_compat_itimerspec(setting, &ts))
66750 return -EFAULT;
66751 @@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
66752 oldfs = get_fs();
66753 set_fs(KERNEL_DS);
66754 err = sys_clock_settime(which_clock,
66755 - (struct timespec __user *) &ts);
66756 + (struct timespec __force_user *) &ts);
66757 set_fs(oldfs);
66758 return err;
66759 }
66760 @@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
66761 oldfs = get_fs();
66762 set_fs(KERNEL_DS);
66763 err = sys_clock_gettime(which_clock,
66764 - (struct timespec __user *) &ts);
66765 + (struct timespec __force_user *) &ts);
66766 set_fs(oldfs);
66767 if (!err && put_compat_timespec(&ts, tp))
66768 return -EFAULT;
66769 @@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
66770
66771 oldfs = get_fs();
66772 set_fs(KERNEL_DS);
66773 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
66774 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
66775 set_fs(oldfs);
66776
66777 err = compat_put_timex(utp, &txc);
66778 @@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
66779 oldfs = get_fs();
66780 set_fs(KERNEL_DS);
66781 err = sys_clock_getres(which_clock,
66782 - (struct timespec __user *) &ts);
66783 + (struct timespec __force_user *) &ts);
66784 set_fs(oldfs);
66785 if (!err && tp && put_compat_timespec(&ts, tp))
66786 return -EFAULT;
66787 @@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
66788 long err;
66789 mm_segment_t oldfs;
66790 struct timespec tu;
66791 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
66792 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
66793
66794 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
66795 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
66796 oldfs = get_fs();
66797 set_fs(KERNEL_DS);
66798 err = clock_nanosleep_restart(restart);
66799 @@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
66800 oldfs = get_fs();
66801 set_fs(KERNEL_DS);
66802 err = sys_clock_nanosleep(which_clock, flags,
66803 - (struct timespec __user *) &in,
66804 - (struct timespec __user *) &out);
66805 + (struct timespec __force_user *) &in,
66806 + (struct timespec __force_user *) &out);
66807 set_fs(oldfs);
66808
66809 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
66810 diff --git a/kernel/configs.c b/kernel/configs.c
66811 index 42e8fa0..9e7406b 100644
66812 --- a/kernel/configs.c
66813 +++ b/kernel/configs.c
66814 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
66815 struct proc_dir_entry *entry;
66816
66817 /* create the current config file */
66818 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66819 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
66820 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
66821 + &ikconfig_file_ops);
66822 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66823 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
66824 + &ikconfig_file_ops);
66825 +#endif
66826 +#else
66827 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
66828 &ikconfig_file_ops);
66829 +#endif
66830 +
66831 if (!entry)
66832 return -ENOMEM;
66833
66834 diff --git a/kernel/cred.c b/kernel/cred.c
66835 index de728ac..e3c267c 100644
66836 --- a/kernel/cred.c
66837 +++ b/kernel/cred.c
66838 @@ -207,6 +207,16 @@ void exit_creds(struct task_struct *tsk)
66839 validate_creds(cred);
66840 alter_cred_subscribers(cred, -1);
66841 put_cred(cred);
66842 +
66843 +#ifdef CONFIG_GRKERNSEC_SETXID
66844 + cred = (struct cred *) tsk->delayed_cred;
66845 + if (cred != NULL) {
66846 + tsk->delayed_cred = NULL;
66847 + validate_creds(cred);
66848 + alter_cred_subscribers(cred, -1);
66849 + put_cred(cred);
66850 + }
66851 +#endif
66852 }
66853
66854 /**
66855 @@ -469,7 +479,7 @@ error_put:
66856 * Always returns 0 thus allowing this function to be tail-called at the end
66857 * of, say, sys_setgid().
66858 */
66859 -int commit_creds(struct cred *new)
66860 +static int __commit_creds(struct cred *new)
66861 {
66862 struct task_struct *task = current;
66863 const struct cred *old = task->real_cred;
66864 @@ -488,6 +498,8 @@ int commit_creds(struct cred *new)
66865
66866 get_cred(new); /* we will require a ref for the subj creds too */
66867
66868 + gr_set_role_label(task, new->uid, new->gid);
66869 +
66870 /* dumpability changes */
66871 if (!uid_eq(old->euid, new->euid) ||
66872 !gid_eq(old->egid, new->egid) ||
66873 @@ -537,6 +549,101 @@ int commit_creds(struct cred *new)
66874 put_cred(old);
66875 return 0;
66876 }
66877 +#ifdef CONFIG_GRKERNSEC_SETXID
66878 +extern int set_user(struct cred *new);
66879 +
66880 +void gr_delayed_cred_worker(void)
66881 +{
66882 + const struct cred *new = current->delayed_cred;
66883 + struct cred *ncred;
66884 +
66885 + current->delayed_cred = NULL;
66886 +
66887 + if (current_uid() && new != NULL) {
66888 + // from doing get_cred on it when queueing this
66889 + put_cred(new);
66890 + return;
66891 + } else if (new == NULL)
66892 + return;
66893 +
66894 + ncred = prepare_creds();
66895 + if (!ncred)
66896 + goto die;
66897 + // uids
66898 + ncred->uid = new->uid;
66899 + ncred->euid = new->euid;
66900 + ncred->suid = new->suid;
66901 + ncred->fsuid = new->fsuid;
66902 + // gids
66903 + ncred->gid = new->gid;
66904 + ncred->egid = new->egid;
66905 + ncred->sgid = new->sgid;
66906 + ncred->fsgid = new->fsgid;
66907 + // groups
66908 + if (set_groups(ncred, new->group_info) < 0) {
66909 + abort_creds(ncred);
66910 + goto die;
66911 + }
66912 + // caps
66913 + ncred->securebits = new->securebits;
66914 + ncred->cap_inheritable = new->cap_inheritable;
66915 + ncred->cap_permitted = new->cap_permitted;
66916 + ncred->cap_effective = new->cap_effective;
66917 + ncred->cap_bset = new->cap_bset;
66918 +
66919 + if (set_user(ncred)) {
66920 + abort_creds(ncred);
66921 + goto die;
66922 + }
66923 +
66924 + // from doing get_cred on it when queueing this
66925 + put_cred(new);
66926 +
66927 + __commit_creds(ncred);
66928 + return;
66929 +die:
66930 + // from doing get_cred on it when queueing this
66931 + put_cred(new);
66932 + do_group_exit(SIGKILL);
66933 +}
66934 +#endif
66935 +
66936 +int commit_creds(struct cred *new)
66937 +{
66938 +#ifdef CONFIG_GRKERNSEC_SETXID
66939 + int ret;
66940 + int schedule_it = 0;
66941 + struct task_struct *t;
66942 +
66943 + /* we won't get called with tasklist_lock held for writing
66944 + and interrupts disabled as the cred struct in that case is
66945 + init_cred
66946 + */
66947 + if (grsec_enable_setxid && !current_is_single_threaded() &&
66948 + !current_uid() && new->uid) {
66949 + schedule_it = 1;
66950 + }
66951 + ret = __commit_creds(new);
66952 + if (schedule_it) {
66953 + rcu_read_lock();
66954 + read_lock(&tasklist_lock);
66955 + for (t = next_thread(current); t != current;
66956 + t = next_thread(t)) {
66957 + if (t->delayed_cred == NULL) {
66958 + t->delayed_cred = get_cred(new);
66959 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
66960 + set_tsk_need_resched(t);
66961 + }
66962 + }
66963 + read_unlock(&tasklist_lock);
66964 + rcu_read_unlock();
66965 + }
66966 + return ret;
66967 +#else
66968 + return __commit_creds(new);
66969 +#endif
66970 +}
66971 +
66972 EXPORT_SYMBOL(commit_creds);
66973
66974 /**
66975 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
66976 index 0557f24..1a00d9a 100644
66977 --- a/kernel/debug/debug_core.c
66978 +++ b/kernel/debug/debug_core.c
66979 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
66980 */
66981 static atomic_t masters_in_kgdb;
66982 static atomic_t slaves_in_kgdb;
66983 -static atomic_t kgdb_break_tasklet_var;
66984 +static atomic_unchecked_t kgdb_break_tasklet_var;
66985 atomic_t kgdb_setting_breakpoint;
66986
66987 struct task_struct *kgdb_usethread;
66988 @@ -132,7 +132,7 @@ int kgdb_single_step;
66989 static pid_t kgdb_sstep_pid;
66990
66991 /* to keep track of the CPU which is doing the single stepping*/
66992 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66993 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66994
66995 /*
66996 * If you are debugging a problem where roundup (the collection of
66997 @@ -540,7 +540,7 @@ return_normal:
66998 * kernel will only try for the value of sstep_tries before
66999 * giving up and continuing on.
67000 */
67001 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
67002 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
67003 (kgdb_info[cpu].task &&
67004 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
67005 atomic_set(&kgdb_active, -1);
67006 @@ -634,8 +634,8 @@ cpu_master_loop:
67007 }
67008
67009 kgdb_restore:
67010 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
67011 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
67012 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
67013 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
67014 if (kgdb_info[sstep_cpu].task)
67015 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
67016 else
67017 @@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
67018 static void kgdb_tasklet_bpt(unsigned long ing)
67019 {
67020 kgdb_breakpoint();
67021 - atomic_set(&kgdb_break_tasklet_var, 0);
67022 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
67023 }
67024
67025 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
67026
67027 void kgdb_schedule_breakpoint(void)
67028 {
67029 - if (atomic_read(&kgdb_break_tasklet_var) ||
67030 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
67031 atomic_read(&kgdb_active) != -1 ||
67032 atomic_read(&kgdb_setting_breakpoint))
67033 return;
67034 - atomic_inc(&kgdb_break_tasklet_var);
67035 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
67036 tasklet_schedule(&kgdb_tasklet_breakpoint);
67037 }
67038 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
67039 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
67040 index 1f91413..362a0a1 100644
67041 --- a/kernel/debug/kdb/kdb_main.c
67042 +++ b/kernel/debug/kdb/kdb_main.c
67043 @@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
67044 list_for_each_entry(mod, kdb_modules, list) {
67045
67046 kdb_printf("%-20s%8u 0x%p ", mod->name,
67047 - mod->core_size, (void *)mod);
67048 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
67049 #ifdef CONFIG_MODULE_UNLOAD
67050 kdb_printf("%4ld ", module_refcount(mod));
67051 #endif
67052 @@ -1994,7 +1994,7 @@ static int kdb_lsmod(int argc, const char **argv)
67053 kdb_printf(" (Loading)");
67054 else
67055 kdb_printf(" (Live)");
67056 - kdb_printf(" 0x%p", mod->module_core);
67057 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
67058
67059 #ifdef CONFIG_MODULE_UNLOAD
67060 {
67061 diff --git a/kernel/events/core.c b/kernel/events/core.c
67062 index 11409af..b6ec863 100644
67063 --- a/kernel/events/core.c
67064 +++ b/kernel/events/core.c
67065 @@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
67066 return 0;
67067 }
67068
67069 -static atomic64_t perf_event_id;
67070 +static atomic64_unchecked_t perf_event_id;
67071
67072 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
67073 enum event_type_t event_type);
67074 @@ -2663,7 +2663,7 @@ static void __perf_event_read(void *info)
67075
67076 static inline u64 perf_event_count(struct perf_event *event)
67077 {
67078 - return local64_read(&event->count) + atomic64_read(&event->child_count);
67079 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
67080 }
67081
67082 static u64 perf_event_read(struct perf_event *event)
67083 @@ -2993,9 +2993,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
67084 mutex_lock(&event->child_mutex);
67085 total += perf_event_read(event);
67086 *enabled += event->total_time_enabled +
67087 - atomic64_read(&event->child_total_time_enabled);
67088 + atomic64_read_unchecked(&event->child_total_time_enabled);
67089 *running += event->total_time_running +
67090 - atomic64_read(&event->child_total_time_running);
67091 + atomic64_read_unchecked(&event->child_total_time_running);
67092
67093 list_for_each_entry(child, &event->child_list, child_list) {
67094 total += perf_event_read(child);
67095 @@ -3404,10 +3404,10 @@ void perf_event_update_userpage(struct perf_event *event)
67096 userpg->offset -= local64_read(&event->hw.prev_count);
67097
67098 userpg->time_enabled = enabled +
67099 - atomic64_read(&event->child_total_time_enabled);
67100 + atomic64_read_unchecked(&event->child_total_time_enabled);
67101
67102 userpg->time_running = running +
67103 - atomic64_read(&event->child_total_time_running);
67104 + atomic64_read_unchecked(&event->child_total_time_running);
67105
67106 arch_perf_update_userpage(userpg, now);
67107
67108 @@ -3840,11 +3840,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
67109 values[n++] = perf_event_count(event);
67110 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
67111 values[n++] = enabled +
67112 - atomic64_read(&event->child_total_time_enabled);
67113 + atomic64_read_unchecked(&event->child_total_time_enabled);
67114 }
67115 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
67116 values[n++] = running +
67117 - atomic64_read(&event->child_total_time_running);
67118 + atomic64_read_unchecked(&event->child_total_time_running);
67119 }
67120 if (read_format & PERF_FORMAT_ID)
67121 values[n++] = primary_event_id(event);
67122 @@ -4522,12 +4522,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
67123 * need to add enough zero bytes after the string to handle
67124 * the 64bit alignment we do later.
67125 */
67126 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
67127 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
67128 if (!buf) {
67129 name = strncpy(tmp, "//enomem", sizeof(tmp));
67130 goto got_name;
67131 }
67132 - name = d_path(&file->f_path, buf, PATH_MAX);
67133 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
67134 if (IS_ERR(name)) {
67135 name = strncpy(tmp, "//toolong", sizeof(tmp));
67136 goto got_name;
67137 @@ -5940,7 +5940,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
67138 event->parent = parent_event;
67139
67140 event->ns = get_pid_ns(current->nsproxy->pid_ns);
67141 - event->id = atomic64_inc_return(&perf_event_id);
67142 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
67143
67144 event->state = PERF_EVENT_STATE_INACTIVE;
67145
67146 @@ -6500,10 +6500,10 @@ static void sync_child_event(struct perf_event *child_event,
67147 /*
67148 * Add back the child's count to the parent's count:
67149 */
67150 - atomic64_add(child_val, &parent_event->child_count);
67151 - atomic64_add(child_event->total_time_enabled,
67152 + atomic64_add_unchecked(child_val, &parent_event->child_count);
67153 + atomic64_add_unchecked(child_event->total_time_enabled,
67154 &parent_event->child_total_time_enabled);
67155 - atomic64_add(child_event->total_time_running,
67156 + atomic64_add_unchecked(child_event->total_time_running,
67157 &parent_event->child_total_time_running);
67158
67159 /*
67160 diff --git a/kernel/exit.c b/kernel/exit.c
67161 index 46ce8da..c648f3a 100644
67162 --- a/kernel/exit.c
67163 +++ b/kernel/exit.c
67164 @@ -59,6 +59,10 @@
67165 #include <asm/pgtable.h>
67166 #include <asm/mmu_context.h>
67167
67168 +#ifdef CONFIG_GRKERNSEC
67169 +extern rwlock_t grsec_exec_file_lock;
67170 +#endif
67171 +
67172 static void exit_mm(struct task_struct * tsk);
67173
67174 static void __unhash_process(struct task_struct *p, bool group_dead)
67175 @@ -182,6 +186,10 @@ void release_task(struct task_struct * p)
67176 struct task_struct *leader;
67177 int zap_leader;
67178 repeat:
67179 +#ifdef CONFIG_NET
67180 + gr_del_task_from_ip_table(p);
67181 +#endif
67182 +
67183 /* don't need to get the RCU readlock here - the process is dead and
67184 * can't be modifying its own credentials. But shut RCU-lockdep up */
67185 rcu_read_lock();
67186 @@ -394,7 +402,7 @@ int allow_signal(int sig)
67187 * know it'll be handled, so that they don't get converted to
67188 * SIGKILL or just silently dropped.
67189 */
67190 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
67191 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
67192 recalc_sigpending();
67193 spin_unlock_irq(&current->sighand->siglock);
67194 return 0;
67195 @@ -430,6 +438,17 @@ void daemonize(const char *name, ...)
67196 vsnprintf(current->comm, sizeof(current->comm), name, args);
67197 va_end(args);
67198
67199 +#ifdef CONFIG_GRKERNSEC
67200 + write_lock(&grsec_exec_file_lock);
67201 + if (current->exec_file) {
67202 + fput(current->exec_file);
67203 + current->exec_file = NULL;
67204 + }
67205 + write_unlock(&grsec_exec_file_lock);
67206 +#endif
67207 +
67208 + gr_set_kernel_label(current);
67209 +
67210 /*
67211 * If we were started as result of loading a module, close all of the
67212 * user space pages. We don't need them, and if we didn't close them
67213 @@ -907,6 +926,8 @@ void do_exit(long code)
67214 struct task_struct *tsk = current;
67215 int group_dead;
67216
67217 + set_fs(USER_DS);
67218 +
67219 profile_task_exit(tsk);
67220
67221 WARN_ON(blk_needs_flush_plug(tsk));
67222 @@ -923,7 +944,6 @@ void do_exit(long code)
67223 * mm_release()->clear_child_tid() from writing to a user-controlled
67224 * kernel address.
67225 */
67226 - set_fs(USER_DS);
67227
67228 ptrace_event(PTRACE_EVENT_EXIT, code);
67229
67230 @@ -985,6 +1005,9 @@ void do_exit(long code)
67231 tsk->exit_code = code;
67232 taskstats_exit(tsk, group_dead);
67233
67234 + gr_acl_handle_psacct(tsk, code);
67235 + gr_acl_handle_exit();
67236 +
67237 exit_mm(tsk);
67238
67239 if (group_dead)
67240 @@ -1101,7 +1124,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
67241 * Take down every thread in the group. This is called by fatal signals
67242 * as well as by sys_exit_group (below).
67243 */
67244 -void
67245 +__noreturn void
67246 do_group_exit(int exit_code)
67247 {
67248 struct signal_struct *sig = current->signal;
67249 diff --git a/kernel/fork.c b/kernel/fork.c
67250 index f9d0499..e4f8f44 100644
67251 --- a/kernel/fork.c
67252 +++ b/kernel/fork.c
67253 @@ -321,7 +321,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
67254 *stackend = STACK_END_MAGIC; /* for overflow detection */
67255
67256 #ifdef CONFIG_CC_STACKPROTECTOR
67257 - tsk->stack_canary = get_random_int();
67258 + tsk->stack_canary = pax_get_random_long();
67259 #endif
67260
67261 /*
67262 @@ -345,13 +345,78 @@ out:
67263 }
67264
67265 #ifdef CONFIG_MMU
67266 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
67267 +{
67268 + struct vm_area_struct *tmp;
67269 + unsigned long charge;
67270 + struct mempolicy *pol;
67271 + struct file *file;
67272 +
67273 + charge = 0;
67274 + if (mpnt->vm_flags & VM_ACCOUNT) {
67275 + unsigned long len;
67276 + len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
67277 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
67278 + goto fail_nomem;
67279 + charge = len;
67280 + }
67281 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67282 + if (!tmp)
67283 + goto fail_nomem;
67284 + *tmp = *mpnt;
67285 + tmp->vm_mm = mm;
67286 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
67287 + pol = mpol_dup(vma_policy(mpnt));
67288 + if (IS_ERR(pol))
67289 + goto fail_nomem_policy;
67290 + vma_set_policy(tmp, pol);
67291 + if (anon_vma_fork(tmp, mpnt))
67292 + goto fail_nomem_anon_vma_fork;
67293 + tmp->vm_flags &= ~VM_LOCKED;
67294 + tmp->vm_next = tmp->vm_prev = NULL;
67295 + tmp->vm_mirror = NULL;
67296 + file = tmp->vm_file;
67297 + if (file) {
67298 + struct inode *inode = file->f_path.dentry->d_inode;
67299 + struct address_space *mapping = file->f_mapping;
67300 +
67301 + get_file(file);
67302 + if (tmp->vm_flags & VM_DENYWRITE)
67303 + atomic_dec(&inode->i_writecount);
67304 + mutex_lock(&mapping->i_mmap_mutex);
67305 + if (tmp->vm_flags & VM_SHARED)
67306 + mapping->i_mmap_writable++;
67307 + flush_dcache_mmap_lock(mapping);
67308 + /* insert tmp into the share list, just after mpnt */
67309 + vma_prio_tree_add(tmp, mpnt);
67310 + flush_dcache_mmap_unlock(mapping);
67311 + mutex_unlock(&mapping->i_mmap_mutex);
67312 + }
67313 +
67314 + /*
67315 + * Clear hugetlb-related page reserves for children. This only
67316 + * affects MAP_PRIVATE mappings. Faults generated by the child
67317 + * are not guaranteed to succeed, even if read-only
67318 + */
67319 + if (is_vm_hugetlb_page(tmp))
67320 + reset_vma_resv_huge_pages(tmp);
67321 +
67322 + return tmp;
67323 +
67324 +fail_nomem_anon_vma_fork:
67325 + mpol_put(pol);
67326 +fail_nomem_policy:
67327 + kmem_cache_free(vm_area_cachep, tmp);
67328 +fail_nomem:
67329 + vm_unacct_memory(charge);
67330 + return NULL;
67331 +}
67332 +
67333 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67334 {
67335 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
67336 struct rb_node **rb_link, *rb_parent;
67337 int retval;
67338 - unsigned long charge;
67339 - struct mempolicy *pol;
67340
67341 down_write(&oldmm->mmap_sem);
67342 flush_cache_dup_mm(oldmm);
67343 @@ -363,8 +428,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67344 mm->locked_vm = 0;
67345 mm->mmap = NULL;
67346 mm->mmap_cache = NULL;
67347 - mm->free_area_cache = oldmm->mmap_base;
67348 - mm->cached_hole_size = ~0UL;
67349 + mm->free_area_cache = oldmm->free_area_cache;
67350 + mm->cached_hole_size = oldmm->cached_hole_size;
67351 mm->map_count = 0;
67352 cpumask_clear(mm_cpumask(mm));
67353 mm->mm_rb = RB_ROOT;
67354 @@ -380,8 +445,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67355
67356 prev = NULL;
67357 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
67358 - struct file *file;
67359 -
67360 if (mpnt->vm_flags & VM_DONTCOPY) {
67361 long pages = vma_pages(mpnt);
67362 mm->total_vm -= pages;
67363 @@ -389,54 +452,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67364 -pages);
67365 continue;
67366 }
67367 - charge = 0;
67368 - if (mpnt->vm_flags & VM_ACCOUNT) {
67369 - unsigned long len;
67370 - len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
67371 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
67372 - goto fail_nomem;
67373 - charge = len;
67374 + tmp = dup_vma(mm, oldmm, mpnt);
67375 + if (!tmp) {
67376 + retval = -ENOMEM;
67377 + goto out;
67378 }
67379 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67380 - if (!tmp)
67381 - goto fail_nomem;
67382 - *tmp = *mpnt;
67383 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
67384 - pol = mpol_dup(vma_policy(mpnt));
67385 - retval = PTR_ERR(pol);
67386 - if (IS_ERR(pol))
67387 - goto fail_nomem_policy;
67388 - vma_set_policy(tmp, pol);
67389 - tmp->vm_mm = mm;
67390 - if (anon_vma_fork(tmp, mpnt))
67391 - goto fail_nomem_anon_vma_fork;
67392 - tmp->vm_flags &= ~VM_LOCKED;
67393 - tmp->vm_next = tmp->vm_prev = NULL;
67394 - file = tmp->vm_file;
67395 - if (file) {
67396 - struct inode *inode = file->f_path.dentry->d_inode;
67397 - struct address_space *mapping = file->f_mapping;
67398 -
67399 - get_file(file);
67400 - if (tmp->vm_flags & VM_DENYWRITE)
67401 - atomic_dec(&inode->i_writecount);
67402 - mutex_lock(&mapping->i_mmap_mutex);
67403 - if (tmp->vm_flags & VM_SHARED)
67404 - mapping->i_mmap_writable++;
67405 - flush_dcache_mmap_lock(mapping);
67406 - /* insert tmp into the share list, just after mpnt */
67407 - vma_prio_tree_add(tmp, mpnt);
67408 - flush_dcache_mmap_unlock(mapping);
67409 - mutex_unlock(&mapping->i_mmap_mutex);
67410 - }
67411 -
67412 - /*
67413 - * Clear hugetlb-related page reserves for children. This only
67414 - * affects MAP_PRIVATE mappings. Faults generated by the child
67415 - * are not guaranteed to succeed, even if read-only
67416 - */
67417 - if (is_vm_hugetlb_page(tmp))
67418 - reset_vma_resv_huge_pages(tmp);
67419
67420 /*
67421 * Link in the new vma and copy the page table entries.
67422 @@ -459,9 +479,34 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67423 if (retval)
67424 goto out;
67425
67426 - if (file)
67427 + if (tmp->vm_file)
67428 uprobe_mmap(tmp);
67429 }
67430 +
67431 +#ifdef CONFIG_PAX_SEGMEXEC
67432 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
67433 + struct vm_area_struct *mpnt_m;
67434 +
67435 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
67436 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
67437 +
67438 + if (!mpnt->vm_mirror)
67439 + continue;
67440 +
67441 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
67442 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
67443 + mpnt->vm_mirror = mpnt_m;
67444 + } else {
67445 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
67446 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
67447 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
67448 + mpnt->vm_mirror->vm_mirror = mpnt;
67449 + }
67450 + }
67451 + BUG_ON(mpnt_m);
67452 + }
67453 +#endif
67454 +
67455 /* a new mm has just been created */
67456 arch_dup_mmap(oldmm, mm);
67457 retval = 0;
67458 @@ -470,14 +515,6 @@ out:
67459 flush_tlb_mm(oldmm);
67460 up_write(&oldmm->mmap_sem);
67461 return retval;
67462 -fail_nomem_anon_vma_fork:
67463 - mpol_put(pol);
67464 -fail_nomem_policy:
67465 - kmem_cache_free(vm_area_cachep, tmp);
67466 -fail_nomem:
67467 - retval = -ENOMEM;
67468 - vm_unacct_memory(charge);
67469 - goto out;
67470 }
67471
67472 static inline int mm_alloc_pgd(struct mm_struct *mm)
67473 @@ -714,8 +751,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
67474 return ERR_PTR(err);
67475
67476 mm = get_task_mm(task);
67477 - if (mm && mm != current->mm &&
67478 - !ptrace_may_access(task, mode)) {
67479 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
67480 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
67481 mmput(mm);
67482 mm = ERR_PTR(-EACCES);
67483 }
67484 @@ -936,13 +973,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
67485 spin_unlock(&fs->lock);
67486 return -EAGAIN;
67487 }
67488 - fs->users++;
67489 + atomic_inc(&fs->users);
67490 spin_unlock(&fs->lock);
67491 return 0;
67492 }
67493 tsk->fs = copy_fs_struct(fs);
67494 if (!tsk->fs)
67495 return -ENOMEM;
67496 + gr_set_chroot_entries(tsk, &tsk->fs->root);
67497 return 0;
67498 }
67499
67500 @@ -1209,6 +1247,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67501 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
67502 #endif
67503 retval = -EAGAIN;
67504 +
67505 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
67506 +
67507 if (atomic_read(&p->real_cred->user->processes) >=
67508 task_rlimit(p, RLIMIT_NPROC)) {
67509 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
67510 @@ -1431,6 +1472,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67511 /* Need tasklist lock for parent etc handling! */
67512 write_lock_irq(&tasklist_lock);
67513
67514 + /* synchronizes with gr_set_acls() */
67515 + gr_copy_label(p);
67516 +
67517 /* CLONE_PARENT re-uses the old parent */
67518 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
67519 p->real_parent = current->real_parent;
67520 @@ -1541,6 +1585,8 @@ bad_fork_cleanup_count:
67521 bad_fork_free:
67522 free_task(p);
67523 fork_out:
67524 + gr_log_forkfail(retval);
67525 +
67526 return ERR_PTR(retval);
67527 }
67528
67529 @@ -1641,6 +1687,8 @@ long do_fork(unsigned long clone_flags,
67530 if (clone_flags & CLONE_PARENT_SETTID)
67531 put_user(nr, parent_tidptr);
67532
67533 + gr_handle_brute_check();
67534 +
67535 if (clone_flags & CLONE_VFORK) {
67536 p->vfork_done = &vfork;
67537 init_completion(&vfork);
67538 @@ -1739,7 +1787,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
67539 return 0;
67540
67541 /* don't need lock here; in the worst case we'll do useless copy */
67542 - if (fs->users == 1)
67543 + if (atomic_read(&fs->users) == 1)
67544 return 0;
67545
67546 *new_fsp = copy_fs_struct(fs);
67547 @@ -1828,7 +1876,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
67548 fs = current->fs;
67549 spin_lock(&fs->lock);
67550 current->fs = new_fs;
67551 - if (--fs->users)
67552 + gr_set_chroot_entries(current, &current->fs->root);
67553 + if (atomic_dec_return(&fs->users))
67554 new_fs = NULL;
67555 else
67556 new_fs = fs;
67557 diff --git a/kernel/futex.c b/kernel/futex.c
67558 index 3717e7b..473c750 100644
67559 --- a/kernel/futex.c
67560 +++ b/kernel/futex.c
67561 @@ -54,6 +54,7 @@
67562 #include <linux/mount.h>
67563 #include <linux/pagemap.h>
67564 #include <linux/syscalls.h>
67565 +#include <linux/ptrace.h>
67566 #include <linux/signal.h>
67567 #include <linux/export.h>
67568 #include <linux/magic.h>
67569 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
67570 struct page *page, *page_head;
67571 int err, ro = 0;
67572
67573 +#ifdef CONFIG_PAX_SEGMEXEC
67574 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
67575 + return -EFAULT;
67576 +#endif
67577 +
67578 /*
67579 * The futex address must be "naturally" aligned.
67580 */
67581 @@ -2714,6 +2720,7 @@ static int __init futex_init(void)
67582 {
67583 u32 curval;
67584 int i;
67585 + mm_segment_t oldfs;
67586
67587 /*
67588 * This will fail and we want it. Some arch implementations do
67589 @@ -2725,8 +2732,11 @@ static int __init futex_init(void)
67590 * implementation, the non-functional ones will return
67591 * -ENOSYS.
67592 */
67593 + oldfs = get_fs();
67594 + set_fs(USER_DS);
67595 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
67596 futex_cmpxchg_enabled = 1;
67597 + set_fs(oldfs);
67598
67599 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
67600 plist_head_init(&futex_queues[i].chain);
67601 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
67602 index 9b22d03..6295b62 100644
67603 --- a/kernel/gcov/base.c
67604 +++ b/kernel/gcov/base.c
67605 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
67606 }
67607
67608 #ifdef CONFIG_MODULES
67609 -static inline int within(void *addr, void *start, unsigned long size)
67610 -{
67611 - return ((addr >= start) && (addr < start + size));
67612 -}
67613 -
67614 /* Update list and generate events when modules are unloaded. */
67615 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67616 void *data)
67617 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67618 prev = NULL;
67619 /* Remove entries located in module from linked list. */
67620 for (info = gcov_info_head; info; info = info->next) {
67621 - if (within(info, mod->module_core, mod->core_size)) {
67622 + if (within_module_core_rw((unsigned long)info, mod)) {
67623 if (prev)
67624 prev->next = info->next;
67625 else
67626 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
67627 index 6db7a5e..25b6648 100644
67628 --- a/kernel/hrtimer.c
67629 +++ b/kernel/hrtimer.c
67630 @@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
67631 local_irq_restore(flags);
67632 }
67633
67634 -static void run_hrtimer_softirq(struct softirq_action *h)
67635 +static void run_hrtimer_softirq(void)
67636 {
67637 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
67638
67639 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
67640 index 4304919..408c4c0 100644
67641 --- a/kernel/jump_label.c
67642 +++ b/kernel/jump_label.c
67643 @@ -13,6 +13,7 @@
67644 #include <linux/sort.h>
67645 #include <linux/err.h>
67646 #include <linux/static_key.h>
67647 +#include <linux/mm.h>
67648
67649 #ifdef HAVE_JUMP_LABEL
67650
67651 @@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
67652
67653 size = (((unsigned long)stop - (unsigned long)start)
67654 / sizeof(struct jump_entry));
67655 + pax_open_kernel();
67656 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
67657 + pax_close_kernel();
67658 }
67659
67660 static void jump_label_update(struct static_key *key, int enable);
67661 @@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
67662 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
67663 struct jump_entry *iter;
67664
67665 + pax_open_kernel();
67666 for (iter = iter_start; iter < iter_stop; iter++) {
67667 if (within_module_init(iter->code, mod))
67668 iter->code = 0;
67669 }
67670 + pax_close_kernel();
67671 }
67672
67673 static int
67674 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
67675 index 2169fee..45c017a 100644
67676 --- a/kernel/kallsyms.c
67677 +++ b/kernel/kallsyms.c
67678 @@ -11,6 +11,9 @@
67679 * Changed the compression method from stem compression to "table lookup"
67680 * compression (see scripts/kallsyms.c for a more complete description)
67681 */
67682 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67683 +#define __INCLUDED_BY_HIDESYM 1
67684 +#endif
67685 #include <linux/kallsyms.h>
67686 #include <linux/module.h>
67687 #include <linux/init.h>
67688 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
67689
67690 static inline int is_kernel_inittext(unsigned long addr)
67691 {
67692 + if (system_state != SYSTEM_BOOTING)
67693 + return 0;
67694 +
67695 if (addr >= (unsigned long)_sinittext
67696 && addr <= (unsigned long)_einittext)
67697 return 1;
67698 return 0;
67699 }
67700
67701 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67702 +#ifdef CONFIG_MODULES
67703 +static inline int is_module_text(unsigned long addr)
67704 +{
67705 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
67706 + return 1;
67707 +
67708 + addr = ktla_ktva(addr);
67709 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
67710 +}
67711 +#else
67712 +static inline int is_module_text(unsigned long addr)
67713 +{
67714 + return 0;
67715 +}
67716 +#endif
67717 +#endif
67718 +
67719 static inline int is_kernel_text(unsigned long addr)
67720 {
67721 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
67722 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
67723
67724 static inline int is_kernel(unsigned long addr)
67725 {
67726 +
67727 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67728 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
67729 + return 1;
67730 +
67731 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
67732 +#else
67733 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
67734 +#endif
67735 +
67736 return 1;
67737 return in_gate_area_no_mm(addr);
67738 }
67739
67740 static int is_ksym_addr(unsigned long addr)
67741 {
67742 +
67743 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67744 + if (is_module_text(addr))
67745 + return 0;
67746 +#endif
67747 +
67748 if (all_var)
67749 return is_kernel(addr);
67750
67751 @@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
67752
67753 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
67754 {
67755 - iter->name[0] = '\0';
67756 iter->nameoff = get_symbol_offset(new_pos);
67757 iter->pos = new_pos;
67758 }
67759 @@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
67760 {
67761 struct kallsym_iter *iter = m->private;
67762
67763 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67764 + if (current_uid())
67765 + return 0;
67766 +#endif
67767 +
67768 /* Some debugging symbols have no name. Ignore them. */
67769 if (!iter->name[0])
67770 return 0;
67771 @@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
67772 */
67773 type = iter->exported ? toupper(iter->type) :
67774 tolower(iter->type);
67775 +
67776 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
67777 type, iter->name, iter->module_name);
67778 } else
67779 @@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
67780 struct kallsym_iter *iter;
67781 int ret;
67782
67783 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
67784 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
67785 if (!iter)
67786 return -ENOMEM;
67787 reset_iter(iter, 0);
67788 diff --git a/kernel/kexec.c b/kernel/kexec.c
67789 index 4e2e472..cd0c7ae 100644
67790 --- a/kernel/kexec.c
67791 +++ b/kernel/kexec.c
67792 @@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
67793 unsigned long flags)
67794 {
67795 struct compat_kexec_segment in;
67796 - struct kexec_segment out, __user *ksegments;
67797 + struct kexec_segment out;
67798 + struct kexec_segment __user *ksegments;
67799 unsigned long i, result;
67800
67801 /* Don't allow clients that don't understand the native
67802 diff --git a/kernel/kmod.c b/kernel/kmod.c
67803 index ff2c7cb..085d7af 100644
67804 --- a/kernel/kmod.c
67805 +++ b/kernel/kmod.c
67806 @@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
67807 kfree(info->argv);
67808 }
67809
67810 -static int call_modprobe(char *module_name, int wait)
67811 +static int call_modprobe(char *module_name, char *module_param, int wait)
67812 {
67813 static char *envp[] = {
67814 "HOME=/",
67815 @@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
67816 NULL
67817 };
67818
67819 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
67820 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
67821 if (!argv)
67822 goto out;
67823
67824 @@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
67825 argv[1] = "-q";
67826 argv[2] = "--";
67827 argv[3] = module_name; /* check free_modprobe_argv() */
67828 - argv[4] = NULL;
67829 + argv[4] = module_param;
67830 + argv[5] = NULL;
67831
67832 return call_usermodehelper_fns(modprobe_path, argv, envp,
67833 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
67834 @@ -112,9 +113,8 @@ out:
67835 * If module auto-loading support is disabled then this function
67836 * becomes a no-operation.
67837 */
67838 -int __request_module(bool wait, const char *fmt, ...)
67839 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
67840 {
67841 - va_list args;
67842 char module_name[MODULE_NAME_LEN];
67843 unsigned int max_modprobes;
67844 int ret;
67845 @@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
67846 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
67847 static int kmod_loop_msg;
67848
67849 - va_start(args, fmt);
67850 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
67851 - va_end(args);
67852 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
67853 if (ret >= MODULE_NAME_LEN)
67854 return -ENAMETOOLONG;
67855
67856 @@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
67857 if (ret)
67858 return ret;
67859
67860 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67861 + if (!current_uid()) {
67862 + /* hack to workaround consolekit/udisks stupidity */
67863 + read_lock(&tasklist_lock);
67864 + if (!strcmp(current->comm, "mount") &&
67865 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
67866 + read_unlock(&tasklist_lock);
67867 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
67868 + return -EPERM;
67869 + }
67870 + read_unlock(&tasklist_lock);
67871 + }
67872 +#endif
67873 +
67874 /* If modprobe needs a service that is in a module, we get a recursive
67875 * loop. Limit the number of running kmod threads to max_threads/2 or
67876 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
67877 @@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
67878
67879 trace_module_request(module_name, wait, _RET_IP_);
67880
67881 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
67882 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
67883
67884 atomic_dec(&kmod_concurrent);
67885 return ret;
67886 }
67887 +
67888 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
67889 +{
67890 + va_list args;
67891 + int ret;
67892 +
67893 + va_start(args, fmt);
67894 + ret = ____request_module(wait, module_param, fmt, args);
67895 + va_end(args);
67896 +
67897 + return ret;
67898 +}
67899 +
67900 +int __request_module(bool wait, const char *fmt, ...)
67901 +{
67902 + va_list args;
67903 + int ret;
67904 +
67905 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67906 + if (current_uid()) {
67907 + char module_param[MODULE_NAME_LEN];
67908 +
67909 + memset(module_param, 0, sizeof(module_param));
67910 +
67911 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
67912 +
67913 + va_start(args, fmt);
67914 + ret = ____request_module(wait, module_param, fmt, args);
67915 + va_end(args);
67916 +
67917 + return ret;
67918 + }
67919 +#endif
67920 +
67921 + va_start(args, fmt);
67922 + ret = ____request_module(wait, NULL, fmt, args);
67923 + va_end(args);
67924 +
67925 + return ret;
67926 +}
67927 +
67928 EXPORT_SYMBOL(__request_module);
67929 #endif /* CONFIG_MODULES */
67930
67931 @@ -266,7 +319,7 @@ static int wait_for_helper(void *data)
67932 *
67933 * Thus the __user pointer cast is valid here.
67934 */
67935 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
67936 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
67937
67938 /*
67939 * If ret is 0, either ____call_usermodehelper failed and the
67940 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
67941 index c62b854..cb67968 100644
67942 --- a/kernel/kprobes.c
67943 +++ b/kernel/kprobes.c
67944 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
67945 * kernel image and loaded module images reside. This is required
67946 * so x86_64 can correctly handle the %rip-relative fixups.
67947 */
67948 - kip->insns = module_alloc(PAGE_SIZE);
67949 + kip->insns = module_alloc_exec(PAGE_SIZE);
67950 if (!kip->insns) {
67951 kfree(kip);
67952 return NULL;
67953 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
67954 */
67955 if (!list_is_singular(&kip->list)) {
67956 list_del(&kip->list);
67957 - module_free(NULL, kip->insns);
67958 + module_free_exec(NULL, kip->insns);
67959 kfree(kip);
67960 }
67961 return 1;
67962 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
67963 {
67964 int i, err = 0;
67965 unsigned long offset = 0, size = 0;
67966 - char *modname, namebuf[128];
67967 + char *modname, namebuf[KSYM_NAME_LEN];
67968 const char *symbol_name;
67969 void *addr;
67970 struct kprobe_blackpoint *kb;
67971 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
67972 const char *sym = NULL;
67973 unsigned int i = *(loff_t *) v;
67974 unsigned long offset = 0;
67975 - char *modname, namebuf[128];
67976 + char *modname, namebuf[KSYM_NAME_LEN];
67977
67978 head = &kprobe_table[i];
67979 preempt_disable();
67980 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
67981 index 4e316e1..5501eef 100644
67982 --- a/kernel/ksysfs.c
67983 +++ b/kernel/ksysfs.c
67984 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
67985 {
67986 if (count+1 > UEVENT_HELPER_PATH_LEN)
67987 return -ENOENT;
67988 + if (!capable(CAP_SYS_ADMIN))
67989 + return -EPERM;
67990 memcpy(uevent_helper, buf, count);
67991 uevent_helper[count] = '\0';
67992 if (count && uevent_helper[count-1] == '\n')
67993 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
67994 index ea9ee45..67ebc8f 100644
67995 --- a/kernel/lockdep.c
67996 +++ b/kernel/lockdep.c
67997 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
67998 end = (unsigned long) &_end,
67999 addr = (unsigned long) obj;
68000
68001 +#ifdef CONFIG_PAX_KERNEXEC
68002 + start = ktla_ktva(start);
68003 +#endif
68004 +
68005 /*
68006 * static variable?
68007 */
68008 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
68009 if (!static_obj(lock->key)) {
68010 debug_locks_off();
68011 printk("INFO: trying to register non-static key.\n");
68012 + printk("lock:%pS key:%pS.\n", lock, lock->key);
68013 printk("the code is fine but needs lockdep annotation.\n");
68014 printk("turning off the locking correctness validator.\n");
68015 dump_stack();
68016 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
68017 if (!class)
68018 return 0;
68019 }
68020 - atomic_inc((atomic_t *)&class->ops);
68021 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
68022 if (very_verbose(class)) {
68023 printk("\nacquire class [%p] %s", class->key, class->name);
68024 if (class->name_version > 1)
68025 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
68026 index 91c32a0..b2c71c5 100644
68027 --- a/kernel/lockdep_proc.c
68028 +++ b/kernel/lockdep_proc.c
68029 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
68030
68031 static void print_name(struct seq_file *m, struct lock_class *class)
68032 {
68033 - char str[128];
68034 + char str[KSYM_NAME_LEN];
68035 const char *name = class->name;
68036
68037 if (!name) {
68038 diff --git a/kernel/module.c b/kernel/module.c
68039 index 4edbd9c..165e780 100644
68040 --- a/kernel/module.c
68041 +++ b/kernel/module.c
68042 @@ -58,6 +58,7 @@
68043 #include <linux/jump_label.h>
68044 #include <linux/pfn.h>
68045 #include <linux/bsearch.h>
68046 +#include <linux/grsecurity.h>
68047
68048 #define CREATE_TRACE_POINTS
68049 #include <trace/events/module.h>
68050 @@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
68051
68052 /* Bounds of module allocation, for speeding __module_address.
68053 * Protected by module_mutex. */
68054 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
68055 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
68056 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
68057
68058 int register_module_notifier(struct notifier_block * nb)
68059 {
68060 @@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
68061 return true;
68062
68063 list_for_each_entry_rcu(mod, &modules, list) {
68064 - struct symsearch arr[] = {
68065 + struct symsearch modarr[] = {
68066 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
68067 NOT_GPL_ONLY, false },
68068 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
68069 @@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
68070 #endif
68071 };
68072
68073 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
68074 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
68075 return true;
68076 }
68077 return false;
68078 @@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
68079 static int percpu_modalloc(struct module *mod,
68080 unsigned long size, unsigned long align)
68081 {
68082 - if (align > PAGE_SIZE) {
68083 + if (align-1 >= PAGE_SIZE) {
68084 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
68085 mod->name, align, PAGE_SIZE);
68086 align = PAGE_SIZE;
68087 @@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
68088 static ssize_t show_coresize(struct module_attribute *mattr,
68089 struct module_kobject *mk, char *buffer)
68090 {
68091 - return sprintf(buffer, "%u\n", mk->mod->core_size);
68092 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
68093 }
68094
68095 static struct module_attribute modinfo_coresize =
68096 @@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
68097 static ssize_t show_initsize(struct module_attribute *mattr,
68098 struct module_kobject *mk, char *buffer)
68099 {
68100 - return sprintf(buffer, "%u\n", mk->mod->init_size);
68101 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
68102 }
68103
68104 static struct module_attribute modinfo_initsize =
68105 @@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
68106 */
68107 #ifdef CONFIG_SYSFS
68108
68109 -#ifdef CONFIG_KALLSYMS
68110 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68111 static inline bool sect_empty(const Elf_Shdr *sect)
68112 {
68113 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
68114 @@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
68115
68116 static void unset_module_core_ro_nx(struct module *mod)
68117 {
68118 - set_page_attributes(mod->module_core + mod->core_text_size,
68119 - mod->module_core + mod->core_size,
68120 + set_page_attributes(mod->module_core_rw,
68121 + mod->module_core_rw + mod->core_size_rw,
68122 set_memory_x);
68123 - set_page_attributes(mod->module_core,
68124 - mod->module_core + mod->core_ro_size,
68125 + set_page_attributes(mod->module_core_rx,
68126 + mod->module_core_rx + mod->core_size_rx,
68127 set_memory_rw);
68128 }
68129
68130 static void unset_module_init_ro_nx(struct module *mod)
68131 {
68132 - set_page_attributes(mod->module_init + mod->init_text_size,
68133 - mod->module_init + mod->init_size,
68134 + set_page_attributes(mod->module_init_rw,
68135 + mod->module_init_rw + mod->init_size_rw,
68136 set_memory_x);
68137 - set_page_attributes(mod->module_init,
68138 - mod->module_init + mod->init_ro_size,
68139 + set_page_attributes(mod->module_init_rx,
68140 + mod->module_init_rx + mod->init_size_rx,
68141 set_memory_rw);
68142 }
68143
68144 @@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
68145
68146 mutex_lock(&module_mutex);
68147 list_for_each_entry_rcu(mod, &modules, list) {
68148 - if ((mod->module_core) && (mod->core_text_size)) {
68149 - set_page_attributes(mod->module_core,
68150 - mod->module_core + mod->core_text_size,
68151 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
68152 + set_page_attributes(mod->module_core_rx,
68153 + mod->module_core_rx + mod->core_size_rx,
68154 set_memory_rw);
68155 }
68156 - if ((mod->module_init) && (mod->init_text_size)) {
68157 - set_page_attributes(mod->module_init,
68158 - mod->module_init + mod->init_text_size,
68159 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
68160 + set_page_attributes(mod->module_init_rx,
68161 + mod->module_init_rx + mod->init_size_rx,
68162 set_memory_rw);
68163 }
68164 }
68165 @@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
68166
68167 mutex_lock(&module_mutex);
68168 list_for_each_entry_rcu(mod, &modules, list) {
68169 - if ((mod->module_core) && (mod->core_text_size)) {
68170 - set_page_attributes(mod->module_core,
68171 - mod->module_core + mod->core_text_size,
68172 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
68173 + set_page_attributes(mod->module_core_rx,
68174 + mod->module_core_rx + mod->core_size_rx,
68175 set_memory_ro);
68176 }
68177 - if ((mod->module_init) && (mod->init_text_size)) {
68178 - set_page_attributes(mod->module_init,
68179 - mod->module_init + mod->init_text_size,
68180 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
68181 + set_page_attributes(mod->module_init_rx,
68182 + mod->module_init_rx + mod->init_size_rx,
68183 set_memory_ro);
68184 }
68185 }
68186 @@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
68187
68188 /* This may be NULL, but that's OK */
68189 unset_module_init_ro_nx(mod);
68190 - module_free(mod, mod->module_init);
68191 + module_free(mod, mod->module_init_rw);
68192 + module_free_exec(mod, mod->module_init_rx);
68193 kfree(mod->args);
68194 percpu_modfree(mod);
68195
68196 /* Free lock-classes: */
68197 - lockdep_free_key_range(mod->module_core, mod->core_size);
68198 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
68199 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
68200
68201 /* Finally, free the core (containing the module structure) */
68202 unset_module_core_ro_nx(mod);
68203 - module_free(mod, mod->module_core);
68204 + module_free_exec(mod, mod->module_core_rx);
68205 + module_free(mod, mod->module_core_rw);
68206
68207 #ifdef CONFIG_MPU
68208 update_protections(current->mm);
68209 @@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68210 int ret = 0;
68211 const struct kernel_symbol *ksym;
68212
68213 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68214 + int is_fs_load = 0;
68215 + int register_filesystem_found = 0;
68216 + char *p;
68217 +
68218 + p = strstr(mod->args, "grsec_modharden_fs");
68219 + if (p) {
68220 + char *endptr = p + strlen("grsec_modharden_fs");
68221 + /* copy \0 as well */
68222 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
68223 + is_fs_load = 1;
68224 + }
68225 +#endif
68226 +
68227 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
68228 const char *name = info->strtab + sym[i].st_name;
68229
68230 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68231 + /* it's a real shame this will never get ripped and copied
68232 + upstream! ;(
68233 + */
68234 + if (is_fs_load && !strcmp(name, "register_filesystem"))
68235 + register_filesystem_found = 1;
68236 +#endif
68237 +
68238 switch (sym[i].st_shndx) {
68239 case SHN_COMMON:
68240 /* We compiled with -fno-common. These are not
68241 @@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68242 ksym = resolve_symbol_wait(mod, info, name);
68243 /* Ok if resolved. */
68244 if (ksym && !IS_ERR(ksym)) {
68245 + pax_open_kernel();
68246 sym[i].st_value = ksym->value;
68247 + pax_close_kernel();
68248 break;
68249 }
68250
68251 @@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68252 secbase = (unsigned long)mod_percpu(mod);
68253 else
68254 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
68255 + pax_open_kernel();
68256 sym[i].st_value += secbase;
68257 + pax_close_kernel();
68258 break;
68259 }
68260 }
68261
68262 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68263 + if (is_fs_load && !register_filesystem_found) {
68264 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
68265 + ret = -EPERM;
68266 + }
68267 +#endif
68268 +
68269 return ret;
68270 }
68271
68272 @@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
68273 || s->sh_entsize != ~0UL
68274 || strstarts(sname, ".init"))
68275 continue;
68276 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
68277 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
68278 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
68279 + else
68280 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
68281 pr_debug("\t%s\n", sname);
68282 }
68283 - switch (m) {
68284 - case 0: /* executable */
68285 - mod->core_size = debug_align(mod->core_size);
68286 - mod->core_text_size = mod->core_size;
68287 - break;
68288 - case 1: /* RO: text and ro-data */
68289 - mod->core_size = debug_align(mod->core_size);
68290 - mod->core_ro_size = mod->core_size;
68291 - break;
68292 - case 3: /* whole core */
68293 - mod->core_size = debug_align(mod->core_size);
68294 - break;
68295 - }
68296 }
68297
68298 pr_debug("Init section allocation order:\n");
68299 @@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
68300 || s->sh_entsize != ~0UL
68301 || !strstarts(sname, ".init"))
68302 continue;
68303 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
68304 - | INIT_OFFSET_MASK);
68305 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
68306 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
68307 + else
68308 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
68309 + s->sh_entsize |= INIT_OFFSET_MASK;
68310 pr_debug("\t%s\n", sname);
68311 }
68312 - switch (m) {
68313 - case 0: /* executable */
68314 - mod->init_size = debug_align(mod->init_size);
68315 - mod->init_text_size = mod->init_size;
68316 - break;
68317 - case 1: /* RO: text and ro-data */
68318 - mod->init_size = debug_align(mod->init_size);
68319 - mod->init_ro_size = mod->init_size;
68320 - break;
68321 - case 3: /* whole init */
68322 - mod->init_size = debug_align(mod->init_size);
68323 - break;
68324 - }
68325 }
68326 }
68327
68328 @@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
68329
68330 /* Put symbol section at end of init part of module. */
68331 symsect->sh_flags |= SHF_ALLOC;
68332 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
68333 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
68334 info->index.sym) | INIT_OFFSET_MASK;
68335 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
68336
68337 @@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
68338 }
68339
68340 /* Append room for core symbols at end of core part. */
68341 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
68342 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
68343 - mod->core_size += strtab_size;
68344 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
68345 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
68346 + mod->core_size_rx += strtab_size;
68347
68348 /* Put string table section at end of init part of module. */
68349 strsect->sh_flags |= SHF_ALLOC;
68350 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
68351 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
68352 info->index.str) | INIT_OFFSET_MASK;
68353 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
68354 }
68355 @@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
68356 /* Make sure we get permanent strtab: don't use info->strtab. */
68357 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
68358
68359 + pax_open_kernel();
68360 +
68361 /* Set types up while we still have access to sections. */
68362 for (i = 0; i < mod->num_symtab; i++)
68363 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
68364
68365 - mod->core_symtab = dst = mod->module_core + info->symoffs;
68366 - mod->core_strtab = s = mod->module_core + info->stroffs;
68367 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
68368 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
68369 src = mod->symtab;
68370 *dst = *src;
68371 *s++ = 0;
68372 @@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
68373 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
68374 }
68375 mod->core_num_syms = ndst;
68376 +
68377 + pax_close_kernel();
68378 }
68379 #else
68380 static inline void layout_symtab(struct module *mod, struct load_info *info)
68381 @@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
68382 return size == 0 ? NULL : vmalloc_exec(size);
68383 }
68384
68385 -static void *module_alloc_update_bounds(unsigned long size)
68386 +static void *module_alloc_update_bounds_rw(unsigned long size)
68387 {
68388 void *ret = module_alloc(size);
68389
68390 if (ret) {
68391 mutex_lock(&module_mutex);
68392 /* Update module bounds. */
68393 - if ((unsigned long)ret < module_addr_min)
68394 - module_addr_min = (unsigned long)ret;
68395 - if ((unsigned long)ret + size > module_addr_max)
68396 - module_addr_max = (unsigned long)ret + size;
68397 + if ((unsigned long)ret < module_addr_min_rw)
68398 + module_addr_min_rw = (unsigned long)ret;
68399 + if ((unsigned long)ret + size > module_addr_max_rw)
68400 + module_addr_max_rw = (unsigned long)ret + size;
68401 + mutex_unlock(&module_mutex);
68402 + }
68403 + return ret;
68404 +}
68405 +
68406 +static void *module_alloc_update_bounds_rx(unsigned long size)
68407 +{
68408 + void *ret = module_alloc_exec(size);
68409 +
68410 + if (ret) {
68411 + mutex_lock(&module_mutex);
68412 + /* Update module bounds. */
68413 + if ((unsigned long)ret < module_addr_min_rx)
68414 + module_addr_min_rx = (unsigned long)ret;
68415 + if ((unsigned long)ret + size > module_addr_max_rx)
68416 + module_addr_max_rx = (unsigned long)ret + size;
68417 mutex_unlock(&module_mutex);
68418 }
68419 return ret;
68420 @@ -2544,8 +2582,14 @@ static struct module *setup_load_info(struct load_info *info)
68421 static int check_modinfo(struct module *mod, struct load_info *info)
68422 {
68423 const char *modmagic = get_modinfo(info, "vermagic");
68424 + const char *license = get_modinfo(info, "license");
68425 int err;
68426
68427 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
68428 + if (!license || !license_is_gpl_compatible(license))
68429 + return -ENOEXEC;
68430 +#endif
68431 +
68432 /* This is allowed: modprobe --force will invalidate it. */
68433 if (!modmagic) {
68434 err = try_to_force_load(mod, "bad vermagic");
68435 @@ -2568,7 +2612,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
68436 }
68437
68438 /* Set up license info based on the info section */
68439 - set_license(mod, get_modinfo(info, "license"));
68440 + set_license(mod, license);
68441
68442 return 0;
68443 }
68444 @@ -2662,7 +2706,7 @@ static int move_module(struct module *mod, struct load_info *info)
68445 void *ptr;
68446
68447 /* Do the allocs. */
68448 - ptr = module_alloc_update_bounds(mod->core_size);
68449 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
68450 /*
68451 * The pointer to this block is stored in the module structure
68452 * which is inside the block. Just mark it as not being a
68453 @@ -2672,23 +2716,50 @@ static int move_module(struct module *mod, struct load_info *info)
68454 if (!ptr)
68455 return -ENOMEM;
68456
68457 - memset(ptr, 0, mod->core_size);
68458 - mod->module_core = ptr;
68459 + memset(ptr, 0, mod->core_size_rw);
68460 + mod->module_core_rw = ptr;
68461
68462 - ptr = module_alloc_update_bounds(mod->init_size);
68463 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
68464 /*
68465 * The pointer to this block is stored in the module structure
68466 * which is inside the block. This block doesn't need to be
68467 * scanned as it contains data and code that will be freed
68468 * after the module is initialized.
68469 */
68470 - kmemleak_ignore(ptr);
68471 - if (!ptr && mod->init_size) {
68472 - module_free(mod, mod->module_core);
68473 + kmemleak_not_leak(ptr);
68474 + if (!ptr && mod->init_size_rw) {
68475 + module_free(mod, mod->module_core_rw);
68476 return -ENOMEM;
68477 }
68478 - memset(ptr, 0, mod->init_size);
68479 - mod->module_init = ptr;
68480 + memset(ptr, 0, mod->init_size_rw);
68481 + mod->module_init_rw = ptr;
68482 +
68483 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
68484 + kmemleak_not_leak(ptr);
68485 + if (!ptr) {
68486 + module_free(mod, mod->module_init_rw);
68487 + module_free(mod, mod->module_core_rw);
68488 + return -ENOMEM;
68489 + }
68490 +
68491 + pax_open_kernel();
68492 + memset(ptr, 0, mod->core_size_rx);
68493 + pax_close_kernel();
68494 + mod->module_core_rx = ptr;
68495 +
68496 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
68497 + kmemleak_not_leak(ptr);
68498 + if (!ptr && mod->init_size_rx) {
68499 + module_free_exec(mod, mod->module_core_rx);
68500 + module_free(mod, mod->module_init_rw);
68501 + module_free(mod, mod->module_core_rw);
68502 + return -ENOMEM;
68503 + }
68504 +
68505 + pax_open_kernel();
68506 + memset(ptr, 0, mod->init_size_rx);
68507 + pax_close_kernel();
68508 + mod->module_init_rx = ptr;
68509
68510 /* Transfer each section which specifies SHF_ALLOC */
68511 pr_debug("final section addresses:\n");
68512 @@ -2699,16 +2770,45 @@ static int move_module(struct module *mod, struct load_info *info)
68513 if (!(shdr->sh_flags & SHF_ALLOC))
68514 continue;
68515
68516 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
68517 - dest = mod->module_init
68518 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68519 - else
68520 - dest = mod->module_core + shdr->sh_entsize;
68521 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
68522 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68523 + dest = mod->module_init_rw
68524 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68525 + else
68526 + dest = mod->module_init_rx
68527 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68528 + } else {
68529 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68530 + dest = mod->module_core_rw + shdr->sh_entsize;
68531 + else
68532 + dest = mod->module_core_rx + shdr->sh_entsize;
68533 + }
68534 +
68535 + if (shdr->sh_type != SHT_NOBITS) {
68536 +
68537 +#ifdef CONFIG_PAX_KERNEXEC
68538 +#ifdef CONFIG_X86_64
68539 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
68540 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
68541 +#endif
68542 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
68543 + pax_open_kernel();
68544 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
68545 + pax_close_kernel();
68546 + } else
68547 +#endif
68548
68549 - if (shdr->sh_type != SHT_NOBITS)
68550 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
68551 + }
68552 /* Update sh_addr to point to copy in image. */
68553 - shdr->sh_addr = (unsigned long)dest;
68554 +
68555 +#ifdef CONFIG_PAX_KERNEXEC
68556 + if (shdr->sh_flags & SHF_EXECINSTR)
68557 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
68558 + else
68559 +#endif
68560 +
68561 + shdr->sh_addr = (unsigned long)dest;
68562 pr_debug("\t0x%lx %s\n",
68563 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
68564 }
68565 @@ -2759,12 +2859,12 @@ static void flush_module_icache(const struct module *mod)
68566 * Do it before processing of module parameters, so the module
68567 * can provide parameter accessor functions of its own.
68568 */
68569 - if (mod->module_init)
68570 - flush_icache_range((unsigned long)mod->module_init,
68571 - (unsigned long)mod->module_init
68572 - + mod->init_size);
68573 - flush_icache_range((unsigned long)mod->module_core,
68574 - (unsigned long)mod->module_core + mod->core_size);
68575 + if (mod->module_init_rx)
68576 + flush_icache_range((unsigned long)mod->module_init_rx,
68577 + (unsigned long)mod->module_init_rx
68578 + + mod->init_size_rx);
68579 + flush_icache_range((unsigned long)mod->module_core_rx,
68580 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
68581
68582 set_fs(old_fs);
68583 }
68584 @@ -2834,8 +2934,10 @@ out:
68585 static void module_deallocate(struct module *mod, struct load_info *info)
68586 {
68587 percpu_modfree(mod);
68588 - module_free(mod, mod->module_init);
68589 - module_free(mod, mod->module_core);
68590 + module_free_exec(mod, mod->module_init_rx);
68591 + module_free_exec(mod, mod->module_core_rx);
68592 + module_free(mod, mod->module_init_rw);
68593 + module_free(mod, mod->module_core_rw);
68594 }
68595
68596 int __weak module_finalize(const Elf_Ehdr *hdr,
68597 @@ -2848,7 +2950,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
68598 static int post_relocation(struct module *mod, const struct load_info *info)
68599 {
68600 /* Sort exception table now relocations are done. */
68601 + pax_open_kernel();
68602 sort_extable(mod->extable, mod->extable + mod->num_exentries);
68603 + pax_close_kernel();
68604
68605 /* Copy relocated percpu area over. */
68606 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
68607 @@ -2899,9 +3003,38 @@ static struct module *load_module(void __user *umod,
68608 if (err)
68609 goto free_unload;
68610
68611 + /* Now copy in args */
68612 + mod->args = strndup_user(uargs, ~0UL >> 1);
68613 + if (IS_ERR(mod->args)) {
68614 + err = PTR_ERR(mod->args);
68615 + goto free_unload;
68616 + }
68617 +
68618 /* Set up MODINFO_ATTR fields */
68619 setup_modinfo(mod, &info);
68620
68621 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68622 + {
68623 + char *p, *p2;
68624 +
68625 + if (strstr(mod->args, "grsec_modharden_netdev")) {
68626 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
68627 + err = -EPERM;
68628 + goto free_modinfo;
68629 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
68630 + p += strlen("grsec_modharden_normal");
68631 + p2 = strstr(p, "_");
68632 + if (p2) {
68633 + *p2 = '\0';
68634 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
68635 + *p2 = '_';
68636 + }
68637 + err = -EPERM;
68638 + goto free_modinfo;
68639 + }
68640 + }
68641 +#endif
68642 +
68643 /* Fix up syms, so that st_value is a pointer to location. */
68644 err = simplify_symbols(mod, &info);
68645 if (err < 0)
68646 @@ -2917,13 +3050,6 @@ static struct module *load_module(void __user *umod,
68647
68648 flush_module_icache(mod);
68649
68650 - /* Now copy in args */
68651 - mod->args = strndup_user(uargs, ~0UL >> 1);
68652 - if (IS_ERR(mod->args)) {
68653 - err = PTR_ERR(mod->args);
68654 - goto free_arch_cleanup;
68655 - }
68656 -
68657 /* Mark state as coming so strong_try_module_get() ignores us. */
68658 mod->state = MODULE_STATE_COMING;
68659
68660 @@ -2981,11 +3107,10 @@ static struct module *load_module(void __user *umod,
68661 unlock:
68662 mutex_unlock(&module_mutex);
68663 synchronize_sched();
68664 - kfree(mod->args);
68665 - free_arch_cleanup:
68666 module_arch_cleanup(mod);
68667 free_modinfo:
68668 free_modinfo(mod);
68669 + kfree(mod->args);
68670 free_unload:
68671 module_unload_free(mod);
68672 free_module:
68673 @@ -3026,16 +3151,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68674 MODULE_STATE_COMING, mod);
68675
68676 /* Set RO and NX regions for core */
68677 - set_section_ro_nx(mod->module_core,
68678 - mod->core_text_size,
68679 - mod->core_ro_size,
68680 - mod->core_size);
68681 + set_section_ro_nx(mod->module_core_rx,
68682 + mod->core_size_rx,
68683 + mod->core_size_rx,
68684 + mod->core_size_rx);
68685
68686 /* Set RO and NX regions for init */
68687 - set_section_ro_nx(mod->module_init,
68688 - mod->init_text_size,
68689 - mod->init_ro_size,
68690 - mod->init_size);
68691 + set_section_ro_nx(mod->module_init_rx,
68692 + mod->init_size_rx,
68693 + mod->init_size_rx,
68694 + mod->init_size_rx);
68695
68696 do_mod_ctors(mod);
68697 /* Start the module */
68698 @@ -3081,11 +3206,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68699 mod->strtab = mod->core_strtab;
68700 #endif
68701 unset_module_init_ro_nx(mod);
68702 - module_free(mod, mod->module_init);
68703 - mod->module_init = NULL;
68704 - mod->init_size = 0;
68705 - mod->init_ro_size = 0;
68706 - mod->init_text_size = 0;
68707 + module_free(mod, mod->module_init_rw);
68708 + module_free_exec(mod, mod->module_init_rx);
68709 + mod->module_init_rw = NULL;
68710 + mod->module_init_rx = NULL;
68711 + mod->init_size_rw = 0;
68712 + mod->init_size_rx = 0;
68713 mutex_unlock(&module_mutex);
68714
68715 return 0;
68716 @@ -3116,10 +3242,16 @@ static const char *get_ksymbol(struct module *mod,
68717 unsigned long nextval;
68718
68719 /* At worse, next value is at end of module */
68720 - if (within_module_init(addr, mod))
68721 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
68722 + if (within_module_init_rx(addr, mod))
68723 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
68724 + else if (within_module_init_rw(addr, mod))
68725 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
68726 + else if (within_module_core_rx(addr, mod))
68727 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
68728 + else if (within_module_core_rw(addr, mod))
68729 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
68730 else
68731 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
68732 + return NULL;
68733
68734 /* Scan for closest preceding symbol, and next symbol. (ELF
68735 starts real symbols at 1). */
68736 @@ -3354,7 +3486,7 @@ static int m_show(struct seq_file *m, void *p)
68737 char buf[8];
68738
68739 seq_printf(m, "%s %u",
68740 - mod->name, mod->init_size + mod->core_size);
68741 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
68742 print_unload_info(m, mod);
68743
68744 /* Informative for users. */
68745 @@ -3363,7 +3495,7 @@ static int m_show(struct seq_file *m, void *p)
68746 mod->state == MODULE_STATE_COMING ? "Loading":
68747 "Live");
68748 /* Used by oprofile and other similar tools. */
68749 - seq_printf(m, " 0x%pK", mod->module_core);
68750 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
68751
68752 /* Taints info */
68753 if (mod->taints)
68754 @@ -3399,7 +3531,17 @@ static const struct file_operations proc_modules_operations = {
68755
68756 static int __init proc_modules_init(void)
68757 {
68758 +#ifndef CONFIG_GRKERNSEC_HIDESYM
68759 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68760 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68761 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68762 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
68763 +#else
68764 proc_create("modules", 0, NULL, &proc_modules_operations);
68765 +#endif
68766 +#else
68767 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68768 +#endif
68769 return 0;
68770 }
68771 module_init(proc_modules_init);
68772 @@ -3458,12 +3600,12 @@ struct module *__module_address(unsigned long addr)
68773 {
68774 struct module *mod;
68775
68776 - if (addr < module_addr_min || addr > module_addr_max)
68777 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
68778 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
68779 return NULL;
68780
68781 list_for_each_entry_rcu(mod, &modules, list)
68782 - if (within_module_core(addr, mod)
68783 - || within_module_init(addr, mod))
68784 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
68785 return mod;
68786 return NULL;
68787 }
68788 @@ -3497,11 +3639,20 @@ bool is_module_text_address(unsigned long addr)
68789 */
68790 struct module *__module_text_address(unsigned long addr)
68791 {
68792 - struct module *mod = __module_address(addr);
68793 + struct module *mod;
68794 +
68795 +#ifdef CONFIG_X86_32
68796 + addr = ktla_ktva(addr);
68797 +#endif
68798 +
68799 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
68800 + return NULL;
68801 +
68802 + mod = __module_address(addr);
68803 +
68804 if (mod) {
68805 /* Make sure it's within the text section. */
68806 - if (!within(addr, mod->module_init, mod->init_text_size)
68807 - && !within(addr, mod->module_core, mod->core_text_size))
68808 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
68809 mod = NULL;
68810 }
68811 return mod;
68812 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
68813 index 7e3443f..b2a1e6b 100644
68814 --- a/kernel/mutex-debug.c
68815 +++ b/kernel/mutex-debug.c
68816 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
68817 }
68818
68819 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68820 - struct thread_info *ti)
68821 + struct task_struct *task)
68822 {
68823 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
68824
68825 /* Mark the current thread as blocked on the lock: */
68826 - ti->task->blocked_on = waiter;
68827 + task->blocked_on = waiter;
68828 }
68829
68830 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68831 - struct thread_info *ti)
68832 + struct task_struct *task)
68833 {
68834 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
68835 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
68836 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
68837 - ti->task->blocked_on = NULL;
68838 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
68839 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
68840 + task->blocked_on = NULL;
68841
68842 list_del_init(&waiter->list);
68843 waiter->task = NULL;
68844 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
68845 index 0799fd3..d06ae3b 100644
68846 --- a/kernel/mutex-debug.h
68847 +++ b/kernel/mutex-debug.h
68848 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
68849 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
68850 extern void debug_mutex_add_waiter(struct mutex *lock,
68851 struct mutex_waiter *waiter,
68852 - struct thread_info *ti);
68853 + struct task_struct *task);
68854 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68855 - struct thread_info *ti);
68856 + struct task_struct *task);
68857 extern void debug_mutex_unlock(struct mutex *lock);
68858 extern void debug_mutex_init(struct mutex *lock, const char *name,
68859 struct lock_class_key *key);
68860 diff --git a/kernel/mutex.c b/kernel/mutex.c
68861 index a307cc9..27fd2e9 100644
68862 --- a/kernel/mutex.c
68863 +++ b/kernel/mutex.c
68864 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68865 spin_lock_mutex(&lock->wait_lock, flags);
68866
68867 debug_mutex_lock_common(lock, &waiter);
68868 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
68869 + debug_mutex_add_waiter(lock, &waiter, task);
68870
68871 /* add waiting tasks to the end of the waitqueue (FIFO): */
68872 list_add_tail(&waiter.list, &lock->wait_list);
68873 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68874 * TASK_UNINTERRUPTIBLE case.)
68875 */
68876 if (unlikely(signal_pending_state(state, task))) {
68877 - mutex_remove_waiter(lock, &waiter,
68878 - task_thread_info(task));
68879 + mutex_remove_waiter(lock, &waiter, task);
68880 mutex_release(&lock->dep_map, 1, ip);
68881 spin_unlock_mutex(&lock->wait_lock, flags);
68882
68883 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68884 done:
68885 lock_acquired(&lock->dep_map, ip);
68886 /* got the lock - rejoice! */
68887 - mutex_remove_waiter(lock, &waiter, current_thread_info());
68888 + mutex_remove_waiter(lock, &waiter, task);
68889 mutex_set_owner(lock);
68890
68891 /* set it to 0 if there are no waiters left: */
68892 diff --git a/kernel/panic.c b/kernel/panic.c
68893 index d2a5f4e..5edc1d9 100644
68894 --- a/kernel/panic.c
68895 +++ b/kernel/panic.c
68896 @@ -75,6 +75,14 @@ void panic(const char *fmt, ...)
68897 int state = 0;
68898
68899 /*
68900 + * Disable local interrupts. This will prevent panic_smp_self_stop
68901 + * from deadlocking the first cpu that invokes the panic, since
68902 + * there is nothing to prevent an interrupt handler (that runs
68903 + * after the panic_lock is acquired) from invoking panic again.
68904 + */
68905 + local_irq_disable();
68906 +
68907 + /*
68908 * It's possible to come here directly from a panic-assertion and
68909 * not have preempt disabled. Some functions called from here want
68910 * preempt to be disabled. No point enabling it later though...
68911 @@ -402,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
68912 const char *board;
68913
68914 printk(KERN_WARNING "------------[ cut here ]------------\n");
68915 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
68916 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
68917 board = dmi_get_system_info(DMI_PRODUCT_NAME);
68918 if (board)
68919 printk(KERN_WARNING "Hardware name: %s\n", board);
68920 @@ -457,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
68921 */
68922 void __stack_chk_fail(void)
68923 {
68924 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
68925 + dump_stack();
68926 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
68927 __builtin_return_address(0));
68928 }
68929 EXPORT_SYMBOL(__stack_chk_fail);
68930 diff --git a/kernel/pid.c b/kernel/pid.c
68931 index e86b291a..e8b0fb5 100644
68932 --- a/kernel/pid.c
68933 +++ b/kernel/pid.c
68934 @@ -33,6 +33,7 @@
68935 #include <linux/rculist.h>
68936 #include <linux/bootmem.h>
68937 #include <linux/hash.h>
68938 +#include <linux/security.h>
68939 #include <linux/pid_namespace.h>
68940 #include <linux/init_task.h>
68941 #include <linux/syscalls.h>
68942 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
68943
68944 int pid_max = PID_MAX_DEFAULT;
68945
68946 -#define RESERVED_PIDS 300
68947 +#define RESERVED_PIDS 500
68948
68949 int pid_max_min = RESERVED_PIDS + 1;
68950 int pid_max_max = PID_MAX_LIMIT;
68951 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
68952 */
68953 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
68954 {
68955 + struct task_struct *task;
68956 +
68957 rcu_lockdep_assert(rcu_read_lock_held(),
68958 "find_task_by_pid_ns() needs rcu_read_lock()"
68959 " protection");
68960 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68961 +
68962 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68963 +
68964 + if (gr_pid_is_chrooted(task))
68965 + return NULL;
68966 +
68967 + return task;
68968 }
68969
68970 struct task_struct *find_task_by_vpid(pid_t vnr)
68971 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
68972 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
68973 }
68974
68975 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
68976 +{
68977 + rcu_lockdep_assert(rcu_read_lock_held(),
68978 + "find_task_by_pid_ns() needs rcu_read_lock()"
68979 + " protection");
68980 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
68981 +}
68982 +
68983 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
68984 {
68985 struct pid *pid;
68986 diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
68987 index b3c7fd5..6144bab 100644
68988 --- a/kernel/pid_namespace.c
68989 +++ b/kernel/pid_namespace.c
68990 @@ -232,15 +232,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
68991 */
68992
68993 tmp.data = &current->nsproxy->pid_ns->last_pid;
68994 - return proc_dointvec(&tmp, write, buffer, lenp, ppos);
68995 + return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
68996 }
68997
68998 +extern int pid_max;
68999 +static int zero = 0;
69000 static struct ctl_table pid_ns_ctl_table[] = {
69001 {
69002 .procname = "ns_last_pid",
69003 .maxlen = sizeof(int),
69004 .mode = 0666, /* permissions are checked in the handler */
69005 .proc_handler = pid_ns_ctl_handler,
69006 + .extra1 = &zero,
69007 + .extra2 = &pid_max,
69008 },
69009 { }
69010 };
69011 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
69012 index 125cb67..a4d1c30 100644
69013 --- a/kernel/posix-cpu-timers.c
69014 +++ b/kernel/posix-cpu-timers.c
69015 @@ -6,6 +6,7 @@
69016 #include <linux/posix-timers.h>
69017 #include <linux/errno.h>
69018 #include <linux/math64.h>
69019 +#include <linux/security.h>
69020 #include <asm/uaccess.h>
69021 #include <linux/kernel_stat.h>
69022 #include <trace/events/timer.h>
69023 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
69024
69025 static __init int init_posix_cpu_timers(void)
69026 {
69027 - struct k_clock process = {
69028 + static struct k_clock process = {
69029 .clock_getres = process_cpu_clock_getres,
69030 .clock_get = process_cpu_clock_get,
69031 .timer_create = process_cpu_timer_create,
69032 .nsleep = process_cpu_nsleep,
69033 .nsleep_restart = process_cpu_nsleep_restart,
69034 };
69035 - struct k_clock thread = {
69036 + static struct k_clock thread = {
69037 .clock_getres = thread_cpu_clock_getres,
69038 .clock_get = thread_cpu_clock_get,
69039 .timer_create = thread_cpu_timer_create,
69040 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
69041 index 69185ae..cc2847a 100644
69042 --- a/kernel/posix-timers.c
69043 +++ b/kernel/posix-timers.c
69044 @@ -43,6 +43,7 @@
69045 #include <linux/idr.h>
69046 #include <linux/posix-clock.h>
69047 #include <linux/posix-timers.h>
69048 +#include <linux/grsecurity.h>
69049 #include <linux/syscalls.h>
69050 #include <linux/wait.h>
69051 #include <linux/workqueue.h>
69052 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
69053 * which we beg off on and pass to do_sys_settimeofday().
69054 */
69055
69056 -static struct k_clock posix_clocks[MAX_CLOCKS];
69057 +static struct k_clock *posix_clocks[MAX_CLOCKS];
69058
69059 /*
69060 * These ones are defined below.
69061 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
69062 */
69063 static __init int init_posix_timers(void)
69064 {
69065 - struct k_clock clock_realtime = {
69066 + static struct k_clock clock_realtime = {
69067 .clock_getres = hrtimer_get_res,
69068 .clock_get = posix_clock_realtime_get,
69069 .clock_set = posix_clock_realtime_set,
69070 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
69071 .timer_get = common_timer_get,
69072 .timer_del = common_timer_del,
69073 };
69074 - struct k_clock clock_monotonic = {
69075 + static struct k_clock clock_monotonic = {
69076 .clock_getres = hrtimer_get_res,
69077 .clock_get = posix_ktime_get_ts,
69078 .nsleep = common_nsleep,
69079 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
69080 .timer_get = common_timer_get,
69081 .timer_del = common_timer_del,
69082 };
69083 - struct k_clock clock_monotonic_raw = {
69084 + static struct k_clock clock_monotonic_raw = {
69085 .clock_getres = hrtimer_get_res,
69086 .clock_get = posix_get_monotonic_raw,
69087 };
69088 - struct k_clock clock_realtime_coarse = {
69089 + static struct k_clock clock_realtime_coarse = {
69090 .clock_getres = posix_get_coarse_res,
69091 .clock_get = posix_get_realtime_coarse,
69092 };
69093 - struct k_clock clock_monotonic_coarse = {
69094 + static struct k_clock clock_monotonic_coarse = {
69095 .clock_getres = posix_get_coarse_res,
69096 .clock_get = posix_get_monotonic_coarse,
69097 };
69098 - struct k_clock clock_boottime = {
69099 + static struct k_clock clock_boottime = {
69100 .clock_getres = hrtimer_get_res,
69101 .clock_get = posix_get_boottime,
69102 .nsleep = common_nsleep,
69103 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
69104 return;
69105 }
69106
69107 - posix_clocks[clock_id] = *new_clock;
69108 + posix_clocks[clock_id] = new_clock;
69109 }
69110 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
69111
69112 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
69113 return (id & CLOCKFD_MASK) == CLOCKFD ?
69114 &clock_posix_dynamic : &clock_posix_cpu;
69115
69116 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
69117 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
69118 return NULL;
69119 - return &posix_clocks[id];
69120 + return posix_clocks[id];
69121 }
69122
69123 static int common_timer_create(struct k_itimer *new_timer)
69124 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
69125 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
69126 return -EFAULT;
69127
69128 + /* only the CLOCK_REALTIME clock can be set, all other clocks
69129 + have their clock_set fptr set to a nosettime dummy function
69130 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
69131 + call common_clock_set, which calls do_sys_settimeofday, which
69132 + we hook
69133 + */
69134 +
69135 return kc->clock_set(which_clock, &new_tp);
69136 }
69137
69138 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
69139 index d523593..68197a4 100644
69140 --- a/kernel/power/poweroff.c
69141 +++ b/kernel/power/poweroff.c
69142 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
69143 .enable_mask = SYSRQ_ENABLE_BOOT,
69144 };
69145
69146 -static int pm_sysrq_init(void)
69147 +static int __init pm_sysrq_init(void)
69148 {
69149 register_sysrq_key('o', &sysrq_poweroff_op);
69150 return 0;
69151 diff --git a/kernel/power/process.c b/kernel/power/process.c
69152 index 19db29f..33b52b6 100644
69153 --- a/kernel/power/process.c
69154 +++ b/kernel/power/process.c
69155 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
69156 u64 elapsed_csecs64;
69157 unsigned int elapsed_csecs;
69158 bool wakeup = false;
69159 + bool timedout = false;
69160
69161 do_gettimeofday(&start);
69162
69163 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
69164
69165 while (true) {
69166 todo = 0;
69167 + if (time_after(jiffies, end_time))
69168 + timedout = true;
69169 read_lock(&tasklist_lock);
69170 do_each_thread(g, p) {
69171 if (p == current || !freeze_task(p))
69172 @@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
69173 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
69174 * transition can't race with task state testing here.
69175 */
69176 - if (!task_is_stopped_or_traced(p) &&
69177 - !freezer_should_skip(p))
69178 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
69179 todo++;
69180 + if (timedout) {
69181 + printk(KERN_ERR "Task refusing to freeze:\n");
69182 + sched_show_task(p);
69183 + }
69184 + }
69185 } while_each_thread(g, p);
69186 read_unlock(&tasklist_lock);
69187
69188 @@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
69189 todo += wq_busy;
69190 }
69191
69192 - if (!todo || time_after(jiffies, end_time))
69193 + if (!todo || timedout)
69194 break;
69195
69196 if (pm_wakeup_pending()) {
69197 diff --git a/kernel/printk.c b/kernel/printk.c
69198 index 146827f..a501fec 100644
69199 --- a/kernel/printk.c
69200 +++ b/kernel/printk.c
69201 @@ -782,6 +782,11 @@ static int check_syslog_permissions(int type, bool from_file)
69202 if (from_file && type != SYSLOG_ACTION_OPEN)
69203 return 0;
69204
69205 +#ifdef CONFIG_GRKERNSEC_DMESG
69206 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
69207 + return -EPERM;
69208 +#endif
69209 +
69210 if (syslog_action_restricted(type)) {
69211 if (capable(CAP_SYSLOG))
69212 return 0;
69213 diff --git a/kernel/profile.c b/kernel/profile.c
69214 index 76b8e77..a2930e8 100644
69215 --- a/kernel/profile.c
69216 +++ b/kernel/profile.c
69217 @@ -39,7 +39,7 @@ struct profile_hit {
69218 /* Oprofile timer tick hook */
69219 static int (*timer_hook)(struct pt_regs *) __read_mostly;
69220
69221 -static atomic_t *prof_buffer;
69222 +static atomic_unchecked_t *prof_buffer;
69223 static unsigned long prof_len, prof_shift;
69224
69225 int prof_on __read_mostly;
69226 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
69227 hits[i].pc = 0;
69228 continue;
69229 }
69230 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
69231 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
69232 hits[i].hits = hits[i].pc = 0;
69233 }
69234 }
69235 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
69236 * Add the current hit(s) and flush the write-queue out
69237 * to the global buffer:
69238 */
69239 - atomic_add(nr_hits, &prof_buffer[pc]);
69240 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
69241 for (i = 0; i < NR_PROFILE_HIT; ++i) {
69242 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
69243 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
69244 hits[i].pc = hits[i].hits = 0;
69245 }
69246 out:
69247 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
69248 {
69249 unsigned long pc;
69250 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
69251 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
69252 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
69253 }
69254 #endif /* !CONFIG_SMP */
69255
69256 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
69257 return -EFAULT;
69258 buf++; p++; count--; read++;
69259 }
69260 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
69261 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
69262 if (copy_to_user(buf, (void *)pnt, count))
69263 return -EFAULT;
69264 read += count;
69265 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
69266 }
69267 #endif
69268 profile_discard_flip_buffers();
69269 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
69270 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
69271 return count;
69272 }
69273
69274 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
69275 index a232bb5..2a65ef9 100644
69276 --- a/kernel/ptrace.c
69277 +++ b/kernel/ptrace.c
69278 @@ -279,7 +279,7 @@ static int ptrace_attach(struct task_struct *task, long request,
69279
69280 if (seize)
69281 flags |= PT_SEIZED;
69282 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
69283 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
69284 flags |= PT_PTRACE_CAP;
69285 task->ptrace = flags;
69286
69287 @@ -486,7 +486,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
69288 break;
69289 return -EIO;
69290 }
69291 - if (copy_to_user(dst, buf, retval))
69292 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
69293 return -EFAULT;
69294 copied += retval;
69295 src += retval;
69296 @@ -671,7 +671,7 @@ int ptrace_request(struct task_struct *child, long request,
69297 bool seized = child->ptrace & PT_SEIZED;
69298 int ret = -EIO;
69299 siginfo_t siginfo, *si;
69300 - void __user *datavp = (void __user *) data;
69301 + void __user *datavp = (__force void __user *) data;
69302 unsigned long __user *datalp = datavp;
69303 unsigned long flags;
69304
69305 @@ -873,14 +873,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
69306 goto out;
69307 }
69308
69309 + if (gr_handle_ptrace(child, request)) {
69310 + ret = -EPERM;
69311 + goto out_put_task_struct;
69312 + }
69313 +
69314 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
69315 ret = ptrace_attach(child, request, addr, data);
69316 /*
69317 * Some architectures need to do book-keeping after
69318 * a ptrace attach.
69319 */
69320 - if (!ret)
69321 + if (!ret) {
69322 arch_ptrace_attach(child);
69323 + gr_audit_ptrace(child);
69324 + }
69325 goto out_put_task_struct;
69326 }
69327
69328 @@ -906,7 +913,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
69329 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
69330 if (copied != sizeof(tmp))
69331 return -EIO;
69332 - return put_user(tmp, (unsigned long __user *)data);
69333 + return put_user(tmp, (__force unsigned long __user *)data);
69334 }
69335
69336 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
69337 @@ -1016,14 +1023,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
69338 goto out;
69339 }
69340
69341 + if (gr_handle_ptrace(child, request)) {
69342 + ret = -EPERM;
69343 + goto out_put_task_struct;
69344 + }
69345 +
69346 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
69347 ret = ptrace_attach(child, request, addr, data);
69348 /*
69349 * Some architectures need to do book-keeping after
69350 * a ptrace attach.
69351 */
69352 - if (!ret)
69353 + if (!ret) {
69354 arch_ptrace_attach(child);
69355 + gr_audit_ptrace(child);
69356 + }
69357 goto out_put_task_struct;
69358 }
69359
69360 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
69361 index 37a5444..eec170a 100644
69362 --- a/kernel/rcutiny.c
69363 +++ b/kernel/rcutiny.c
69364 @@ -46,7 +46,7 @@
69365 struct rcu_ctrlblk;
69366 static void invoke_rcu_callbacks(void);
69367 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
69368 -static void rcu_process_callbacks(struct softirq_action *unused);
69369 +static void rcu_process_callbacks(void);
69370 static void __call_rcu(struct rcu_head *head,
69371 void (*func)(struct rcu_head *rcu),
69372 struct rcu_ctrlblk *rcp);
69373 @@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
69374 rcu_is_callbacks_kthread()));
69375 }
69376
69377 -static void rcu_process_callbacks(struct softirq_action *unused)
69378 +static void rcu_process_callbacks(void)
69379 {
69380 __rcu_process_callbacks(&rcu_sched_ctrlblk);
69381 __rcu_process_callbacks(&rcu_bh_ctrlblk);
69382 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
69383 index fc31a2d..be2ec04 100644
69384 --- a/kernel/rcutiny_plugin.h
69385 +++ b/kernel/rcutiny_plugin.h
69386 @@ -939,7 +939,7 @@ static int rcu_kthread(void *arg)
69387 have_rcu_kthread_work = morework;
69388 local_irq_restore(flags);
69389 if (work)
69390 - rcu_process_callbacks(NULL);
69391 + rcu_process_callbacks();
69392 schedule_timeout_interruptible(1); /* Leave CPU for others. */
69393 }
69394
69395 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
69396 index e66b34a..4b8b626 100644
69397 --- a/kernel/rcutorture.c
69398 +++ b/kernel/rcutorture.c
69399 @@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
69400 { 0 };
69401 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
69402 { 0 };
69403 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69404 -static atomic_t n_rcu_torture_alloc;
69405 -static atomic_t n_rcu_torture_alloc_fail;
69406 -static atomic_t n_rcu_torture_free;
69407 -static atomic_t n_rcu_torture_mberror;
69408 -static atomic_t n_rcu_torture_error;
69409 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69410 +static atomic_unchecked_t n_rcu_torture_alloc;
69411 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
69412 +static atomic_unchecked_t n_rcu_torture_free;
69413 +static atomic_unchecked_t n_rcu_torture_mberror;
69414 +static atomic_unchecked_t n_rcu_torture_error;
69415 static long n_rcu_torture_barrier_error;
69416 static long n_rcu_torture_boost_ktrerror;
69417 static long n_rcu_torture_boost_rterror;
69418 @@ -265,11 +265,11 @@ rcu_torture_alloc(void)
69419
69420 spin_lock_bh(&rcu_torture_lock);
69421 if (list_empty(&rcu_torture_freelist)) {
69422 - atomic_inc(&n_rcu_torture_alloc_fail);
69423 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
69424 spin_unlock_bh(&rcu_torture_lock);
69425 return NULL;
69426 }
69427 - atomic_inc(&n_rcu_torture_alloc);
69428 + atomic_inc_unchecked(&n_rcu_torture_alloc);
69429 p = rcu_torture_freelist.next;
69430 list_del_init(p);
69431 spin_unlock_bh(&rcu_torture_lock);
69432 @@ -282,7 +282,7 @@ rcu_torture_alloc(void)
69433 static void
69434 rcu_torture_free(struct rcu_torture *p)
69435 {
69436 - atomic_inc(&n_rcu_torture_free);
69437 + atomic_inc_unchecked(&n_rcu_torture_free);
69438 spin_lock_bh(&rcu_torture_lock);
69439 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
69440 spin_unlock_bh(&rcu_torture_lock);
69441 @@ -403,7 +403,7 @@ rcu_torture_cb(struct rcu_head *p)
69442 i = rp->rtort_pipe_count;
69443 if (i > RCU_TORTURE_PIPE_LEN)
69444 i = RCU_TORTURE_PIPE_LEN;
69445 - atomic_inc(&rcu_torture_wcount[i]);
69446 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
69447 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69448 rp->rtort_mbtest = 0;
69449 rcu_torture_free(rp);
69450 @@ -451,7 +451,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
69451 i = rp->rtort_pipe_count;
69452 if (i > RCU_TORTURE_PIPE_LEN)
69453 i = RCU_TORTURE_PIPE_LEN;
69454 - atomic_inc(&rcu_torture_wcount[i]);
69455 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
69456 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69457 rp->rtort_mbtest = 0;
69458 list_del(&rp->rtort_free);
69459 @@ -983,7 +983,7 @@ rcu_torture_writer(void *arg)
69460 i = old_rp->rtort_pipe_count;
69461 if (i > RCU_TORTURE_PIPE_LEN)
69462 i = RCU_TORTURE_PIPE_LEN;
69463 - atomic_inc(&rcu_torture_wcount[i]);
69464 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
69465 old_rp->rtort_pipe_count++;
69466 cur_ops->deferred_free(old_rp);
69467 }
69468 @@ -1064,7 +1064,7 @@ static void rcu_torture_timer(unsigned long unused)
69469 }
69470 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
69471 if (p->rtort_mbtest == 0)
69472 - atomic_inc(&n_rcu_torture_mberror);
69473 + atomic_inc_unchecked(&n_rcu_torture_mberror);
69474 spin_lock(&rand_lock);
69475 cur_ops->read_delay(&rand);
69476 n_rcu_torture_timers++;
69477 @@ -1128,7 +1128,7 @@ rcu_torture_reader(void *arg)
69478 }
69479 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
69480 if (p->rtort_mbtest == 0)
69481 - atomic_inc(&n_rcu_torture_mberror);
69482 + atomic_inc_unchecked(&n_rcu_torture_mberror);
69483 cur_ops->read_delay(&rand);
69484 preempt_disable();
69485 pipe_count = p->rtort_pipe_count;
69486 @@ -1191,10 +1191,10 @@ rcu_torture_printk(char *page)
69487 rcu_torture_current,
69488 rcu_torture_current_version,
69489 list_empty(&rcu_torture_freelist),
69490 - atomic_read(&n_rcu_torture_alloc),
69491 - atomic_read(&n_rcu_torture_alloc_fail),
69492 - atomic_read(&n_rcu_torture_free),
69493 - atomic_read(&n_rcu_torture_mberror),
69494 + atomic_read_unchecked(&n_rcu_torture_alloc),
69495 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
69496 + atomic_read_unchecked(&n_rcu_torture_free),
69497 + atomic_read_unchecked(&n_rcu_torture_mberror),
69498 n_rcu_torture_boost_ktrerror,
69499 n_rcu_torture_boost_rterror,
69500 n_rcu_torture_boost_failure,
69501 @@ -1208,14 +1208,14 @@ rcu_torture_printk(char *page)
69502 n_barrier_attempts,
69503 n_rcu_torture_barrier_error);
69504 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
69505 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
69506 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
69507 n_rcu_torture_barrier_error != 0 ||
69508 n_rcu_torture_boost_ktrerror != 0 ||
69509 n_rcu_torture_boost_rterror != 0 ||
69510 n_rcu_torture_boost_failure != 0 ||
69511 i > 1) {
69512 cnt += sprintf(&page[cnt], "!!! ");
69513 - atomic_inc(&n_rcu_torture_error);
69514 + atomic_inc_unchecked(&n_rcu_torture_error);
69515 WARN_ON_ONCE(1);
69516 }
69517 cnt += sprintf(&page[cnt], "Reader Pipe: ");
69518 @@ -1229,7 +1229,7 @@ rcu_torture_printk(char *page)
69519 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
69520 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69521 cnt += sprintf(&page[cnt], " %d",
69522 - atomic_read(&rcu_torture_wcount[i]));
69523 + atomic_read_unchecked(&rcu_torture_wcount[i]));
69524 }
69525 cnt += sprintf(&page[cnt], "\n");
69526 if (cur_ops->stats)
69527 @@ -1888,7 +1888,7 @@ rcu_torture_cleanup(void)
69528
69529 if (cur_ops->cleanup)
69530 cur_ops->cleanup();
69531 - if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
69532 + if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
69533 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
69534 else if (n_online_successes != n_online_attempts ||
69535 n_offline_successes != n_offline_attempts)
69536 @@ -1958,18 +1958,18 @@ rcu_torture_init(void)
69537
69538 rcu_torture_current = NULL;
69539 rcu_torture_current_version = 0;
69540 - atomic_set(&n_rcu_torture_alloc, 0);
69541 - atomic_set(&n_rcu_torture_alloc_fail, 0);
69542 - atomic_set(&n_rcu_torture_free, 0);
69543 - atomic_set(&n_rcu_torture_mberror, 0);
69544 - atomic_set(&n_rcu_torture_error, 0);
69545 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
69546 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
69547 + atomic_set_unchecked(&n_rcu_torture_free, 0);
69548 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
69549 + atomic_set_unchecked(&n_rcu_torture_error, 0);
69550 n_rcu_torture_barrier_error = 0;
69551 n_rcu_torture_boost_ktrerror = 0;
69552 n_rcu_torture_boost_rterror = 0;
69553 n_rcu_torture_boost_failure = 0;
69554 n_rcu_torture_boosts = 0;
69555 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
69556 - atomic_set(&rcu_torture_wcount[i], 0);
69557 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
69558 for_each_possible_cpu(cpu) {
69559 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69560 per_cpu(rcu_torture_count, cpu)[i] = 0;
69561 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
69562 index 4b97bba..b92c9d2 100644
69563 --- a/kernel/rcutree.c
69564 +++ b/kernel/rcutree.c
69565 @@ -366,9 +366,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
69566 rcu_prepare_for_idle(smp_processor_id());
69567 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69568 smp_mb__before_atomic_inc(); /* See above. */
69569 - atomic_inc(&rdtp->dynticks);
69570 + atomic_inc_unchecked(&rdtp->dynticks);
69571 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
69572 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69573 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69574
69575 /*
69576 * The idle task is not permitted to enter the idle loop while
69577 @@ -457,10 +457,10 @@ void rcu_irq_exit(void)
69578 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
69579 {
69580 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
69581 - atomic_inc(&rdtp->dynticks);
69582 + atomic_inc_unchecked(&rdtp->dynticks);
69583 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69584 smp_mb__after_atomic_inc(); /* See above. */
69585 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69586 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69587 rcu_cleanup_after_idle(smp_processor_id());
69588 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
69589 if (!is_idle_task(current)) {
69590 @@ -554,14 +554,14 @@ void rcu_nmi_enter(void)
69591 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
69592
69593 if (rdtp->dynticks_nmi_nesting == 0 &&
69594 - (atomic_read(&rdtp->dynticks) & 0x1))
69595 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
69596 return;
69597 rdtp->dynticks_nmi_nesting++;
69598 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
69599 - atomic_inc(&rdtp->dynticks);
69600 + atomic_inc_unchecked(&rdtp->dynticks);
69601 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69602 smp_mb__after_atomic_inc(); /* See above. */
69603 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69604 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69605 }
69606
69607 /**
69608 @@ -580,9 +580,9 @@ void rcu_nmi_exit(void)
69609 return;
69610 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69611 smp_mb__before_atomic_inc(); /* See above. */
69612 - atomic_inc(&rdtp->dynticks);
69613 + atomic_inc_unchecked(&rdtp->dynticks);
69614 smp_mb__after_atomic_inc(); /* Force delay to next write. */
69615 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69616 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69617 }
69618
69619 #ifdef CONFIG_PROVE_RCU
69620 @@ -598,7 +598,7 @@ int rcu_is_cpu_idle(void)
69621 int ret;
69622
69623 preempt_disable();
69624 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69625 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69626 preempt_enable();
69627 return ret;
69628 }
69629 @@ -668,7 +668,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
69630 */
69631 static int dyntick_save_progress_counter(struct rcu_data *rdp)
69632 {
69633 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
69634 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69635 return (rdp->dynticks_snap & 0x1) == 0;
69636 }
69637
69638 @@ -683,7 +683,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
69639 unsigned int curr;
69640 unsigned int snap;
69641
69642 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
69643 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69644 snap = (unsigned int)rdp->dynticks_snap;
69645
69646 /*
69647 @@ -713,10 +713,10 @@ static int jiffies_till_stall_check(void)
69648 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
69649 */
69650 if (till_stall_check < 3) {
69651 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
69652 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
69653 till_stall_check = 3;
69654 } else if (till_stall_check > 300) {
69655 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
69656 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
69657 till_stall_check = 300;
69658 }
69659 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
69660 @@ -1824,7 +1824,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
69661 /*
69662 * Do RCU core processing for the current CPU.
69663 */
69664 -static void rcu_process_callbacks(struct softirq_action *unused)
69665 +static void rcu_process_callbacks(void)
69666 {
69667 trace_rcu_utilization("Start RCU core");
69668 __rcu_process_callbacks(&rcu_sched_state,
69669 @@ -2042,8 +2042,8 @@ void synchronize_rcu_bh(void)
69670 }
69671 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
69672
69673 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
69674 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
69675 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
69676 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
69677
69678 static int synchronize_sched_expedited_cpu_stop(void *data)
69679 {
69680 @@ -2104,7 +2104,7 @@ void synchronize_sched_expedited(void)
69681 int firstsnap, s, snap, trycount = 0;
69682
69683 /* Note that atomic_inc_return() implies full memory barrier. */
69684 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
69685 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
69686 get_online_cpus();
69687 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
69688
69689 @@ -2126,7 +2126,7 @@ void synchronize_sched_expedited(void)
69690 }
69691
69692 /* Check to see if someone else did our work for us. */
69693 - s = atomic_read(&sync_sched_expedited_done);
69694 + s = atomic_read_unchecked(&sync_sched_expedited_done);
69695 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
69696 smp_mb(); /* ensure test happens before caller kfree */
69697 return;
69698 @@ -2141,7 +2141,7 @@ void synchronize_sched_expedited(void)
69699 * grace period works for us.
69700 */
69701 get_online_cpus();
69702 - snap = atomic_read(&sync_sched_expedited_started);
69703 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
69704 smp_mb(); /* ensure read is before try_stop_cpus(). */
69705 }
69706
69707 @@ -2152,12 +2152,12 @@ void synchronize_sched_expedited(void)
69708 * than we did beat us to the punch.
69709 */
69710 do {
69711 - s = atomic_read(&sync_sched_expedited_done);
69712 + s = atomic_read_unchecked(&sync_sched_expedited_done);
69713 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
69714 smp_mb(); /* ensure test happens before caller kfree */
69715 break;
69716 }
69717 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
69718 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
69719
69720 put_online_cpus();
69721 }
69722 @@ -2421,7 +2421,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
69723 rdp->qlen = 0;
69724 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
69725 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
69726 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
69727 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
69728 rdp->cpu = cpu;
69729 rdp->rsp = rsp;
69730 raw_spin_unlock_irqrestore(&rnp->lock, flags);
69731 @@ -2449,8 +2449,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
69732 rdp->n_force_qs_snap = rsp->n_force_qs;
69733 rdp->blimit = blimit;
69734 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
69735 - atomic_set(&rdp->dynticks->dynticks,
69736 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
69737 + atomic_set_unchecked(&rdp->dynticks->dynticks,
69738 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
69739 rcu_prepare_for_idle_init(cpu);
69740 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
69741
69742 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
69743 index 19b61ac..5c60a94 100644
69744 --- a/kernel/rcutree.h
69745 +++ b/kernel/rcutree.h
69746 @@ -83,7 +83,7 @@ struct rcu_dynticks {
69747 long long dynticks_nesting; /* Track irq/process nesting level. */
69748 /* Process level is worth LLONG_MAX/2. */
69749 int dynticks_nmi_nesting; /* Track NMI nesting level. */
69750 - atomic_t dynticks; /* Even value for idle, else odd. */
69751 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
69752 #ifdef CONFIG_RCU_FAST_NO_HZ
69753 int dyntick_drain; /* Prepare-for-idle state variable. */
69754 unsigned long dyntick_holdoff;
69755 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
69756 index 3e48994..d94f03a 100644
69757 --- a/kernel/rcutree_plugin.h
69758 +++ b/kernel/rcutree_plugin.h
69759 @@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
69760
69761 /* Clean up and exit. */
69762 smp_mb(); /* ensure expedited GP seen before counter increment. */
69763 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
69764 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
69765 unlock_mb_ret:
69766 mutex_unlock(&sync_rcu_preempt_exp_mutex);
69767 mb_ret:
69768 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
69769 index d4bc16d..c234a5c 100644
69770 --- a/kernel/rcutree_trace.c
69771 +++ b/kernel/rcutree_trace.c
69772 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
69773 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69774 rdp->qs_pending);
69775 seq_printf(m, " dt=%d/%llx/%d df=%lu",
69776 - atomic_read(&rdp->dynticks->dynticks),
69777 + atomic_read_unchecked(&rdp->dynticks->dynticks),
69778 rdp->dynticks->dynticks_nesting,
69779 rdp->dynticks->dynticks_nmi_nesting,
69780 rdp->dynticks_fqs);
69781 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
69782 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69783 rdp->qs_pending);
69784 seq_printf(m, ",%d,%llx,%d,%lu",
69785 - atomic_read(&rdp->dynticks->dynticks),
69786 + atomic_read_unchecked(&rdp->dynticks->dynticks),
69787 rdp->dynticks->dynticks_nesting,
69788 rdp->dynticks->dynticks_nmi_nesting,
69789 rdp->dynticks_fqs);
69790 diff --git a/kernel/resource.c b/kernel/resource.c
69791 index e1d2b8e..24820bb 100644
69792 --- a/kernel/resource.c
69793 +++ b/kernel/resource.c
69794 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
69795
69796 static int __init ioresources_init(void)
69797 {
69798 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69799 +#ifdef CONFIG_GRKERNSEC_PROC_USER
69800 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
69801 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
69802 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69803 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
69804 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
69805 +#endif
69806 +#else
69807 proc_create("ioports", 0, NULL, &proc_ioports_operations);
69808 proc_create("iomem", 0, NULL, &proc_iomem_operations);
69809 +#endif
69810 return 0;
69811 }
69812 __initcall(ioresources_init);
69813 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
69814 index 98ec494..4241d6d 100644
69815 --- a/kernel/rtmutex-tester.c
69816 +++ b/kernel/rtmutex-tester.c
69817 @@ -20,7 +20,7 @@
69818 #define MAX_RT_TEST_MUTEXES 8
69819
69820 static spinlock_t rttest_lock;
69821 -static atomic_t rttest_event;
69822 +static atomic_unchecked_t rttest_event;
69823
69824 struct test_thread_data {
69825 int opcode;
69826 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69827
69828 case RTTEST_LOCKCONT:
69829 td->mutexes[td->opdata] = 1;
69830 - td->event = atomic_add_return(1, &rttest_event);
69831 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69832 return 0;
69833
69834 case RTTEST_RESET:
69835 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69836 return 0;
69837
69838 case RTTEST_RESETEVENT:
69839 - atomic_set(&rttest_event, 0);
69840 + atomic_set_unchecked(&rttest_event, 0);
69841 return 0;
69842
69843 default:
69844 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69845 return ret;
69846
69847 td->mutexes[id] = 1;
69848 - td->event = atomic_add_return(1, &rttest_event);
69849 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69850 rt_mutex_lock(&mutexes[id]);
69851 - td->event = atomic_add_return(1, &rttest_event);
69852 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69853 td->mutexes[id] = 4;
69854 return 0;
69855
69856 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69857 return ret;
69858
69859 td->mutexes[id] = 1;
69860 - td->event = atomic_add_return(1, &rttest_event);
69861 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69862 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
69863 - td->event = atomic_add_return(1, &rttest_event);
69864 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69865 td->mutexes[id] = ret ? 0 : 4;
69866 return ret ? -EINTR : 0;
69867
69868 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69869 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
69870 return ret;
69871
69872 - td->event = atomic_add_return(1, &rttest_event);
69873 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69874 rt_mutex_unlock(&mutexes[id]);
69875 - td->event = atomic_add_return(1, &rttest_event);
69876 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69877 td->mutexes[id] = 0;
69878 return 0;
69879
69880 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69881 break;
69882
69883 td->mutexes[dat] = 2;
69884 - td->event = atomic_add_return(1, &rttest_event);
69885 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69886 break;
69887
69888 default:
69889 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69890 return;
69891
69892 td->mutexes[dat] = 3;
69893 - td->event = atomic_add_return(1, &rttest_event);
69894 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69895 break;
69896
69897 case RTTEST_LOCKNOWAIT:
69898 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69899 return;
69900
69901 td->mutexes[dat] = 1;
69902 - td->event = atomic_add_return(1, &rttest_event);
69903 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69904 return;
69905
69906 default:
69907 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
69908 index 0984a21..939f183 100644
69909 --- a/kernel/sched/auto_group.c
69910 +++ b/kernel/sched/auto_group.c
69911 @@ -11,7 +11,7 @@
69912
69913 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
69914 static struct autogroup autogroup_default;
69915 -static atomic_t autogroup_seq_nr;
69916 +static atomic_unchecked_t autogroup_seq_nr;
69917
69918 void __init autogroup_init(struct task_struct *init_task)
69919 {
69920 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
69921
69922 kref_init(&ag->kref);
69923 init_rwsem(&ag->lock);
69924 - ag->id = atomic_inc_return(&autogroup_seq_nr);
69925 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
69926 ag->tg = tg;
69927 #ifdef CONFIG_RT_GROUP_SCHED
69928 /*
69929 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
69930 index c04c9ff..98c54bf 100644
69931 --- a/kernel/sched/core.c
69932 +++ b/kernel/sched/core.c
69933 @@ -4103,6 +4103,8 @@ int can_nice(const struct task_struct *p, const int nice)
69934 /* convert nice value [19,-20] to rlimit style value [1,40] */
69935 int nice_rlim = 20 - nice;
69936
69937 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
69938 +
69939 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
69940 capable(CAP_SYS_NICE));
69941 }
69942 @@ -4136,7 +4138,8 @@ SYSCALL_DEFINE1(nice, int, increment)
69943 if (nice > 19)
69944 nice = 19;
69945
69946 - if (increment < 0 && !can_nice(current, nice))
69947 + if (increment < 0 && (!can_nice(current, nice) ||
69948 + gr_handle_chroot_nice()))
69949 return -EPERM;
69950
69951 retval = security_task_setnice(current, nice);
69952 @@ -4290,6 +4293,7 @@ recheck:
69953 unsigned long rlim_rtprio =
69954 task_rlimit(p, RLIMIT_RTPRIO);
69955
69956 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
69957 /* can't set/change the rt policy */
69958 if (policy != p->policy && !rlim_rtprio)
69959 return -EPERM;
69960 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
69961 index c099cc6..06aec4f 100644
69962 --- a/kernel/sched/fair.c
69963 +++ b/kernel/sched/fair.c
69964 @@ -4846,7 +4846,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
69965 * run_rebalance_domains is triggered when needed from the scheduler tick.
69966 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
69967 */
69968 -static void run_rebalance_domains(struct softirq_action *h)
69969 +static void run_rebalance_domains(void)
69970 {
69971 int this_cpu = smp_processor_id();
69972 struct rq *this_rq = cpu_rq(this_cpu);
69973 diff --git a/kernel/signal.c b/kernel/signal.c
69974 index 6771027..763e51e 100644
69975 --- a/kernel/signal.c
69976 +++ b/kernel/signal.c
69977 @@ -48,12 +48,12 @@ static struct kmem_cache *sigqueue_cachep;
69978
69979 int print_fatal_signals __read_mostly;
69980
69981 -static void __user *sig_handler(struct task_struct *t, int sig)
69982 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
69983 {
69984 return t->sighand->action[sig - 1].sa.sa_handler;
69985 }
69986
69987 -static int sig_handler_ignored(void __user *handler, int sig)
69988 +static int sig_handler_ignored(__sighandler_t handler, int sig)
69989 {
69990 /* Is it explicitly or implicitly ignored? */
69991 return handler == SIG_IGN ||
69992 @@ -62,7 +62,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
69993
69994 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69995 {
69996 - void __user *handler;
69997 + __sighandler_t handler;
69998
69999 handler = sig_handler(t, sig);
70000
70001 @@ -366,6 +366,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
70002 atomic_inc(&user->sigpending);
70003 rcu_read_unlock();
70004
70005 + if (!override_rlimit)
70006 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
70007 +
70008 if (override_rlimit ||
70009 atomic_read(&user->sigpending) <=
70010 task_rlimit(t, RLIMIT_SIGPENDING)) {
70011 @@ -490,7 +493,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
70012
70013 int unhandled_signal(struct task_struct *tsk, int sig)
70014 {
70015 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
70016 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
70017 if (is_global_init(tsk))
70018 return 1;
70019 if (handler != SIG_IGN && handler != SIG_DFL)
70020 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
70021 }
70022 }
70023
70024 + /* allow glibc communication via tgkill to other threads in our
70025 + thread group */
70026 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
70027 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
70028 + && gr_handle_signal(t, sig))
70029 + return -EPERM;
70030 +
70031 return security_task_kill(t, info, sig, 0);
70032 }
70033
70034 @@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
70035 return send_signal(sig, info, p, 1);
70036 }
70037
70038 -static int
70039 +int
70040 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
70041 {
70042 return send_signal(sig, info, t, 0);
70043 @@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
70044 unsigned long int flags;
70045 int ret, blocked, ignored;
70046 struct k_sigaction *action;
70047 + int is_unhandled = 0;
70048
70049 spin_lock_irqsave(&t->sighand->siglock, flags);
70050 action = &t->sighand->action[sig-1];
70051 @@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
70052 }
70053 if (action->sa.sa_handler == SIG_DFL)
70054 t->signal->flags &= ~SIGNAL_UNKILLABLE;
70055 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
70056 + is_unhandled = 1;
70057 ret = specific_send_sig_info(sig, info, t);
70058 spin_unlock_irqrestore(&t->sighand->siglock, flags);
70059
70060 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
70061 + normal operation */
70062 + if (is_unhandled) {
70063 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
70064 + gr_handle_crash(t, sig);
70065 + }
70066 +
70067 return ret;
70068 }
70069
70070 @@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
70071 ret = check_kill_permission(sig, info, p);
70072 rcu_read_unlock();
70073
70074 - if (!ret && sig)
70075 + if (!ret && sig) {
70076 ret = do_send_sig_info(sig, info, p, true);
70077 + if (!ret)
70078 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
70079 + }
70080
70081 return ret;
70082 }
70083 @@ -2858,7 +2881,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
70084 int error = -ESRCH;
70085
70086 rcu_read_lock();
70087 - p = find_task_by_vpid(pid);
70088 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
70089 + /* allow glibc communication via tgkill to other threads in our
70090 + thread group */
70091 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
70092 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
70093 + p = find_task_by_vpid_unrestricted(pid);
70094 + else
70095 +#endif
70096 + p = find_task_by_vpid(pid);
70097 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
70098 error = check_kill_permission(sig, info, p);
70099 /*
70100 diff --git a/kernel/smp.c b/kernel/smp.c
70101 index d0ae5b2..b87c5a8 100644
70102 --- a/kernel/smp.c
70103 +++ b/kernel/smp.c
70104 @@ -582,22 +582,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
70105 }
70106 EXPORT_SYMBOL(smp_call_function);
70107
70108 -void ipi_call_lock(void)
70109 +void ipi_call_lock(void) __acquires(call_function.lock)
70110 {
70111 raw_spin_lock(&call_function.lock);
70112 }
70113
70114 -void ipi_call_unlock(void)
70115 +void ipi_call_unlock(void) __releases(call_function.lock)
70116 {
70117 raw_spin_unlock(&call_function.lock);
70118 }
70119
70120 -void ipi_call_lock_irq(void)
70121 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
70122 {
70123 raw_spin_lock_irq(&call_function.lock);
70124 }
70125
70126 -void ipi_call_unlock_irq(void)
70127 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
70128 {
70129 raw_spin_unlock_irq(&call_function.lock);
70130 }
70131 diff --git a/kernel/softirq.c b/kernel/softirq.c
70132 index 671f959..91c51cb 100644
70133 --- a/kernel/softirq.c
70134 +++ b/kernel/softirq.c
70135 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
70136
70137 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
70138
70139 -char *softirq_to_name[NR_SOFTIRQS] = {
70140 +const char * const softirq_to_name[NR_SOFTIRQS] = {
70141 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
70142 "TASKLET", "SCHED", "HRTIMER", "RCU"
70143 };
70144 @@ -235,7 +235,7 @@ restart:
70145 kstat_incr_softirqs_this_cpu(vec_nr);
70146
70147 trace_softirq_entry(vec_nr);
70148 - h->action(h);
70149 + h->action();
70150 trace_softirq_exit(vec_nr);
70151 if (unlikely(prev_count != preempt_count())) {
70152 printk(KERN_ERR "huh, entered softirq %u %s %p"
70153 @@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
70154 or_softirq_pending(1UL << nr);
70155 }
70156
70157 -void open_softirq(int nr, void (*action)(struct softirq_action *))
70158 +void open_softirq(int nr, void (*action)(void))
70159 {
70160 - softirq_vec[nr].action = action;
70161 + pax_open_kernel();
70162 + *(void **)&softirq_vec[nr].action = action;
70163 + pax_close_kernel();
70164 }
70165
70166 /*
70167 @@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
70168
70169 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
70170
70171 -static void tasklet_action(struct softirq_action *a)
70172 +static void tasklet_action(void)
70173 {
70174 struct tasklet_struct *list;
70175
70176 @@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
70177 }
70178 }
70179
70180 -static void tasklet_hi_action(struct softirq_action *a)
70181 +static void tasklet_hi_action(void)
70182 {
70183 struct tasklet_struct *list;
70184
70185 diff --git a/kernel/srcu.c b/kernel/srcu.c
70186 index 2095be3..9a5b89d 100644
70187 --- a/kernel/srcu.c
70188 +++ b/kernel/srcu.c
70189 @@ -302,9 +302,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
70190 preempt_disable();
70191 idx = rcu_dereference_index_check(sp->completed,
70192 rcu_read_lock_sched_held()) & 0x1;
70193 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
70194 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
70195 smp_mb(); /* B */ /* Avoid leaking the critical section. */
70196 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
70197 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
70198 preempt_enable();
70199 return idx;
70200 }
70201 @@ -320,7 +320,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
70202 {
70203 preempt_disable();
70204 smp_mb(); /* C */ /* Avoid leaking the critical section. */
70205 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
70206 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
70207 preempt_enable();
70208 }
70209 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
70210 diff --git a/kernel/sys.c b/kernel/sys.c
70211 index 2d39a84..1c3abe4 100644
70212 --- a/kernel/sys.c
70213 +++ b/kernel/sys.c
70214 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
70215 error = -EACCES;
70216 goto out;
70217 }
70218 +
70219 + if (gr_handle_chroot_setpriority(p, niceval)) {
70220 + error = -EACCES;
70221 + goto out;
70222 + }
70223 +
70224 no_nice = security_task_setnice(p, niceval);
70225 if (no_nice) {
70226 error = no_nice;
70227 @@ -594,6 +600,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
70228 goto error;
70229 }
70230
70231 + if (gr_check_group_change(new->gid, new->egid, -1))
70232 + goto error;
70233 +
70234 if (rgid != (gid_t) -1 ||
70235 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
70236 new->sgid = new->egid;
70237 @@ -629,6 +638,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
70238 old = current_cred();
70239
70240 retval = -EPERM;
70241 +
70242 + if (gr_check_group_change(kgid, kgid, kgid))
70243 + goto error;
70244 +
70245 if (nsown_capable(CAP_SETGID))
70246 new->gid = new->egid = new->sgid = new->fsgid = kgid;
70247 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
70248 @@ -646,7 +659,7 @@ error:
70249 /*
70250 * change the user struct in a credentials set to match the new UID
70251 */
70252 -static int set_user(struct cred *new)
70253 +int set_user(struct cred *new)
70254 {
70255 struct user_struct *new_user;
70256
70257 @@ -726,6 +739,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
70258 goto error;
70259 }
70260
70261 + if (gr_check_user_change(new->uid, new->euid, -1))
70262 + goto error;
70263 +
70264 if (!uid_eq(new->uid, old->uid)) {
70265 retval = set_user(new);
70266 if (retval < 0)
70267 @@ -776,6 +792,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
70268 old = current_cred();
70269
70270 retval = -EPERM;
70271 +
70272 + if (gr_check_crash_uid(kuid))
70273 + goto error;
70274 + if (gr_check_user_change(kuid, kuid, kuid))
70275 + goto error;
70276 +
70277 if (nsown_capable(CAP_SETUID)) {
70278 new->suid = new->uid = kuid;
70279 if (!uid_eq(kuid, old->uid)) {
70280 @@ -845,6 +867,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
70281 goto error;
70282 }
70283
70284 + if (gr_check_user_change(kruid, keuid, -1))
70285 + goto error;
70286 +
70287 if (ruid != (uid_t) -1) {
70288 new->uid = kruid;
70289 if (!uid_eq(kruid, old->uid)) {
70290 @@ -927,6 +952,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
70291 goto error;
70292 }
70293
70294 + if (gr_check_group_change(krgid, kegid, -1))
70295 + goto error;
70296 +
70297 if (rgid != (gid_t) -1)
70298 new->gid = krgid;
70299 if (egid != (gid_t) -1)
70300 @@ -980,6 +1008,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
70301 if (!uid_valid(kuid))
70302 return old_fsuid;
70303
70304 + if (gr_check_user_change(-1, -1, kuid))
70305 + goto error;
70306 +
70307 new = prepare_creds();
70308 if (!new)
70309 return old_fsuid;
70310 @@ -994,6 +1025,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
70311 }
70312 }
70313
70314 +error:
70315 abort_creds(new);
70316 return old_fsuid;
70317
70318 @@ -1026,12 +1058,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
70319 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
70320 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
70321 nsown_capable(CAP_SETGID)) {
70322 + if (gr_check_group_change(-1, -1, kgid))
70323 + goto error;
70324 +
70325 if (!gid_eq(kgid, old->fsgid)) {
70326 new->fsgid = kgid;
70327 goto change_okay;
70328 }
70329 }
70330
70331 +error:
70332 abort_creds(new);
70333 return old_fsgid;
70334
70335 @@ -1264,13 +1300,13 @@ DECLARE_RWSEM(uts_sem);
70336 * Work around broken programs that cannot handle "Linux 3.0".
70337 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
70338 */
70339 -static int override_release(char __user *release, int len)
70340 +static int override_release(char __user *release, size_t len)
70341 {
70342 int ret = 0;
70343 - char buf[65];
70344
70345 if (current->personality & UNAME26) {
70346 - char *rest = UTS_RELEASE;
70347 + char buf[65] = { 0 };
70348 + const char *rest = UTS_RELEASE;
70349 int ndots = 0;
70350 unsigned v;
70351
70352 @@ -1282,7 +1318,10 @@ static int override_release(char __user *release, int len)
70353 rest++;
70354 }
70355 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
70356 + if (sizeof buf < len)
70357 + len = sizeof buf;
70358 snprintf(buf, len, "2.6.%u%s", v, rest);
70359 + buf[len - 1] = 0;
70360 ret = copy_to_user(release, buf, len);
70361 }
70362 return ret;
70363 @@ -1337,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
70364 return -EFAULT;
70365
70366 down_read(&uts_sem);
70367 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
70368 + error = __copy_to_user(name->sysname, &utsname()->sysname,
70369 __OLD_UTS_LEN);
70370 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
70371 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
70372 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
70373 __OLD_UTS_LEN);
70374 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
70375 - error |= __copy_to_user(&name->release, &utsname()->release,
70376 + error |= __copy_to_user(name->release, &utsname()->release,
70377 __OLD_UTS_LEN);
70378 error |= __put_user(0, name->release + __OLD_UTS_LEN);
70379 - error |= __copy_to_user(&name->version, &utsname()->version,
70380 + error |= __copy_to_user(name->version, &utsname()->version,
70381 __OLD_UTS_LEN);
70382 error |= __put_user(0, name->version + __OLD_UTS_LEN);
70383 - error |= __copy_to_user(&name->machine, &utsname()->machine,
70384 + error |= __copy_to_user(name->machine, &utsname()->machine,
70385 __OLD_UTS_LEN);
70386 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
70387 up_read(&uts_sem);
70388 @@ -2024,7 +2063,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
70389 error = get_dumpable(me->mm);
70390 break;
70391 case PR_SET_DUMPABLE:
70392 - if (arg2 < 0 || arg2 > 1) {
70393 + if (arg2 > 1) {
70394 error = -EINVAL;
70395 break;
70396 }
70397 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
70398 index 4ab1187..33f4f2b 100644
70399 --- a/kernel/sysctl.c
70400 +++ b/kernel/sysctl.c
70401 @@ -91,7 +91,6 @@
70402
70403
70404 #if defined(CONFIG_SYSCTL)
70405 -
70406 /* External variables not in a header file. */
70407 extern int sysctl_overcommit_memory;
70408 extern int sysctl_overcommit_ratio;
70409 @@ -169,10 +168,13 @@ static int proc_taint(struct ctl_table *table, int write,
70410 void __user *buffer, size_t *lenp, loff_t *ppos);
70411 #endif
70412
70413 -#ifdef CONFIG_PRINTK
70414 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70415 void __user *buffer, size_t *lenp, loff_t *ppos);
70416 -#endif
70417 +
70418 +static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
70419 + void __user *buffer, size_t *lenp, loff_t *ppos);
70420 +static int proc_dostring_coredump(struct ctl_table *table, int write,
70421 + void __user *buffer, size_t *lenp, loff_t *ppos);
70422
70423 #ifdef CONFIG_MAGIC_SYSRQ
70424 /* Note: sysrq code uses it's own private copy */
70425 @@ -196,6 +198,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
70426
70427 #endif
70428
70429 +extern struct ctl_table grsecurity_table[];
70430 +
70431 static struct ctl_table kern_table[];
70432 static struct ctl_table vm_table[];
70433 static struct ctl_table fs_table[];
70434 @@ -210,6 +214,20 @@ extern struct ctl_table epoll_table[];
70435 int sysctl_legacy_va_layout;
70436 #endif
70437
70438 +#ifdef CONFIG_PAX_SOFTMODE
70439 +static ctl_table pax_table[] = {
70440 + {
70441 + .procname = "softmode",
70442 + .data = &pax_softmode,
70443 + .maxlen = sizeof(unsigned int),
70444 + .mode = 0600,
70445 + .proc_handler = &proc_dointvec,
70446 + },
70447 +
70448 + { }
70449 +};
70450 +#endif
70451 +
70452 /* The default sysctl tables: */
70453
70454 static struct ctl_table sysctl_base_table[] = {
70455 @@ -256,6 +274,22 @@ static int max_extfrag_threshold = 1000;
70456 #endif
70457
70458 static struct ctl_table kern_table[] = {
70459 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
70460 + {
70461 + .procname = "grsecurity",
70462 + .mode = 0500,
70463 + .child = grsecurity_table,
70464 + },
70465 +#endif
70466 +
70467 +#ifdef CONFIG_PAX_SOFTMODE
70468 + {
70469 + .procname = "pax",
70470 + .mode = 0500,
70471 + .child = pax_table,
70472 + },
70473 +#endif
70474 +
70475 {
70476 .procname = "sched_child_runs_first",
70477 .data = &sysctl_sched_child_runs_first,
70478 @@ -410,7 +444,7 @@ static struct ctl_table kern_table[] = {
70479 .data = core_pattern,
70480 .maxlen = CORENAME_MAX_SIZE,
70481 .mode = 0644,
70482 - .proc_handler = proc_dostring,
70483 + .proc_handler = proc_dostring_coredump,
70484 },
70485 {
70486 .procname = "core_pipe_limit",
70487 @@ -540,7 +574,7 @@ static struct ctl_table kern_table[] = {
70488 .data = &modprobe_path,
70489 .maxlen = KMOD_PATH_LEN,
70490 .mode = 0644,
70491 - .proc_handler = proc_dostring,
70492 + .proc_handler = proc_dostring_modpriv,
70493 },
70494 {
70495 .procname = "modules_disabled",
70496 @@ -707,16 +741,20 @@ static struct ctl_table kern_table[] = {
70497 .extra1 = &zero,
70498 .extra2 = &one,
70499 },
70500 +#endif
70501 {
70502 .procname = "kptr_restrict",
70503 .data = &kptr_restrict,
70504 .maxlen = sizeof(int),
70505 .mode = 0644,
70506 .proc_handler = proc_dointvec_minmax_sysadmin,
70507 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70508 + .extra1 = &two,
70509 +#else
70510 .extra1 = &zero,
70511 +#endif
70512 .extra2 = &two,
70513 },
70514 -#endif
70515 {
70516 .procname = "ngroups_max",
70517 .data = &ngroups_max,
70518 @@ -1215,6 +1253,13 @@ static struct ctl_table vm_table[] = {
70519 .proc_handler = proc_dointvec_minmax,
70520 .extra1 = &zero,
70521 },
70522 + {
70523 + .procname = "heap_stack_gap",
70524 + .data = &sysctl_heap_stack_gap,
70525 + .maxlen = sizeof(sysctl_heap_stack_gap),
70526 + .mode = 0644,
70527 + .proc_handler = proc_doulongvec_minmax,
70528 + },
70529 #else
70530 {
70531 .procname = "nr_trim_pages",
70532 @@ -1498,7 +1543,7 @@ static struct ctl_table fs_table[] = {
70533 .data = &suid_dumpable,
70534 .maxlen = sizeof(int),
70535 .mode = 0644,
70536 - .proc_handler = proc_dointvec_minmax,
70537 + .proc_handler = proc_dointvec_minmax_coredump,
70538 .extra1 = &zero,
70539 .extra2 = &two,
70540 },
70541 @@ -1645,6 +1690,16 @@ int proc_dostring(struct ctl_table *table, int write,
70542 buffer, lenp, ppos);
70543 }
70544
70545 +int proc_dostring_modpriv(struct ctl_table *table, int write,
70546 + void __user *buffer, size_t *lenp, loff_t *ppos)
70547 +{
70548 + if (write && !capable(CAP_SYS_MODULE))
70549 + return -EPERM;
70550 +
70551 + return _proc_do_string(table->data, table->maxlen, write,
70552 + buffer, lenp, ppos);
70553 +}
70554 +
70555 static size_t proc_skip_spaces(char **buf)
70556 {
70557 size_t ret;
70558 @@ -1750,6 +1805,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
70559 len = strlen(tmp);
70560 if (len > *size)
70561 len = *size;
70562 + if (len > sizeof(tmp))
70563 + len = sizeof(tmp);
70564 if (copy_to_user(*buf, tmp, len))
70565 return -EFAULT;
70566 *size -= len;
70567 @@ -1942,7 +1999,6 @@ static int proc_taint(struct ctl_table *table, int write,
70568 return err;
70569 }
70570
70571 -#ifdef CONFIG_PRINTK
70572 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70573 void __user *buffer, size_t *lenp, loff_t *ppos)
70574 {
70575 @@ -1951,7 +2007,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70576
70577 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
70578 }
70579 -#endif
70580
70581 struct do_proc_dointvec_minmax_conv_param {
70582 int *min;
70583 @@ -2009,6 +2064,34 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
70584 do_proc_dointvec_minmax_conv, &param);
70585 }
70586
70587 +static void validate_coredump_safety(void)
70588 +{
70589 + if (suid_dumpable == SUID_DUMPABLE_SAFE &&
70590 + core_pattern[0] != '/' && core_pattern[0] != '|') {
70591 + printk(KERN_WARNING "Unsafe core_pattern used with "\
70592 + "suid_dumpable=2. Pipe handler or fully qualified "\
70593 + "core dump path required.\n");
70594 + }
70595 +}
70596 +
70597 +static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
70598 + void __user *buffer, size_t *lenp, loff_t *ppos)
70599 +{
70600 + int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
70601 + if (!error)
70602 + validate_coredump_safety();
70603 + return error;
70604 +}
70605 +
70606 +static int proc_dostring_coredump(struct ctl_table *table, int write,
70607 + void __user *buffer, size_t *lenp, loff_t *ppos)
70608 +{
70609 + int error = proc_dostring(table, write, buffer, lenp, ppos);
70610 + if (!error)
70611 + validate_coredump_safety();
70612 + return error;
70613 +}
70614 +
70615 static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
70616 void __user *buffer,
70617 size_t *lenp, loff_t *ppos,
70618 @@ -2066,8 +2149,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
70619 *i = val;
70620 } else {
70621 val = convdiv * (*i) / convmul;
70622 - if (!first)
70623 + if (!first) {
70624 err = proc_put_char(&buffer, &left, '\t');
70625 + if (err)
70626 + break;
70627 + }
70628 err = proc_put_long(&buffer, &left, val, false);
70629 if (err)
70630 break;
70631 @@ -2459,6 +2545,12 @@ int proc_dostring(struct ctl_table *table, int write,
70632 return -ENOSYS;
70633 }
70634
70635 +int proc_dostring_modpriv(struct ctl_table *table, int write,
70636 + void __user *buffer, size_t *lenp, loff_t *ppos)
70637 +{
70638 + return -ENOSYS;
70639 +}
70640 +
70641 int proc_dointvec(struct ctl_table *table, int write,
70642 void __user *buffer, size_t *lenp, loff_t *ppos)
70643 {
70644 @@ -2515,5 +2607,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
70645 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
70646 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
70647 EXPORT_SYMBOL(proc_dostring);
70648 +EXPORT_SYMBOL(proc_dostring_modpriv);
70649 EXPORT_SYMBOL(proc_doulongvec_minmax);
70650 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
70651 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
70652 index a650694..aaeeb20 100644
70653 --- a/kernel/sysctl_binary.c
70654 +++ b/kernel/sysctl_binary.c
70655 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
70656 int i;
70657
70658 set_fs(KERNEL_DS);
70659 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70660 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70661 set_fs(old_fs);
70662 if (result < 0)
70663 goto out_kfree;
70664 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
70665 }
70666
70667 set_fs(KERNEL_DS);
70668 - result = vfs_write(file, buffer, str - buffer, &pos);
70669 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70670 set_fs(old_fs);
70671 if (result < 0)
70672 goto out_kfree;
70673 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
70674 int i;
70675
70676 set_fs(KERNEL_DS);
70677 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70678 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70679 set_fs(old_fs);
70680 if (result < 0)
70681 goto out_kfree;
70682 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
70683 }
70684
70685 set_fs(KERNEL_DS);
70686 - result = vfs_write(file, buffer, str - buffer, &pos);
70687 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70688 set_fs(old_fs);
70689 if (result < 0)
70690 goto out_kfree;
70691 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
70692 int i;
70693
70694 set_fs(KERNEL_DS);
70695 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70696 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70697 set_fs(old_fs);
70698 if (result < 0)
70699 goto out;
70700 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70701 __le16 dnaddr;
70702
70703 set_fs(KERNEL_DS);
70704 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70705 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70706 set_fs(old_fs);
70707 if (result < 0)
70708 goto out;
70709 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70710 le16_to_cpu(dnaddr) & 0x3ff);
70711
70712 set_fs(KERNEL_DS);
70713 - result = vfs_write(file, buf, len, &pos);
70714 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
70715 set_fs(old_fs);
70716 if (result < 0)
70717 goto out;
70718 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
70719 index e660464..c8b9e67 100644
70720 --- a/kernel/taskstats.c
70721 +++ b/kernel/taskstats.c
70722 @@ -27,9 +27,12 @@
70723 #include <linux/cgroup.h>
70724 #include <linux/fs.h>
70725 #include <linux/file.h>
70726 +#include <linux/grsecurity.h>
70727 #include <net/genetlink.h>
70728 #include <linux/atomic.h>
70729
70730 +extern int gr_is_taskstats_denied(int pid);
70731 +
70732 /*
70733 * Maximum length of a cpumask that can be specified in
70734 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
70735 @@ -556,6 +559,9 @@ err:
70736
70737 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
70738 {
70739 + if (gr_is_taskstats_denied(current->pid))
70740 + return -EACCES;
70741 +
70742 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
70743 return cmd_attr_register_cpumask(info);
70744 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
70745 diff --git a/kernel/time.c b/kernel/time.c
70746 index ba744cf..267b7c5 100644
70747 --- a/kernel/time.c
70748 +++ b/kernel/time.c
70749 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
70750 return error;
70751
70752 if (tz) {
70753 + /* we log in do_settimeofday called below, so don't log twice
70754 + */
70755 + if (!tv)
70756 + gr_log_timechange();
70757 +
70758 sys_tz = *tz;
70759 update_vsyscall_tz();
70760 if (firsttime) {
70761 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
70762 index aa27d39..34d221c 100644
70763 --- a/kernel/time/alarmtimer.c
70764 +++ b/kernel/time/alarmtimer.c
70765 @@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
70766 struct platform_device *pdev;
70767 int error = 0;
70768 int i;
70769 - struct k_clock alarm_clock = {
70770 + static struct k_clock alarm_clock = {
70771 .clock_getres = alarm_clock_getres,
70772 .clock_get = alarm_clock_get,
70773 .timer_create = alarm_timer_create,
70774 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
70775 index f113755..ec24223 100644
70776 --- a/kernel/time/tick-broadcast.c
70777 +++ b/kernel/time/tick-broadcast.c
70778 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
70779 * then clear the broadcast bit.
70780 */
70781 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
70782 - int cpu = smp_processor_id();
70783 + cpu = smp_processor_id();
70784
70785 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
70786 tick_broadcast_clear_oneshot(cpu);
70787 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
70788 index 63c88c1..8fd3c2f 100644
70789 --- a/kernel/time/timekeeping.c
70790 +++ b/kernel/time/timekeeping.c
70791 @@ -14,6 +14,7 @@
70792 #include <linux/init.h>
70793 #include <linux/mm.h>
70794 #include <linux/sched.h>
70795 +#include <linux/grsecurity.h>
70796 #include <linux/syscore_ops.h>
70797 #include <linux/clocksource.h>
70798 #include <linux/jiffies.h>
70799 @@ -387,6 +388,8 @@ int do_settimeofday(const struct timespec *tv)
70800 if (!timespec_valid_strict(tv))
70801 return -EINVAL;
70802
70803 + gr_log_timechange();
70804 +
70805 write_seqlock_irqsave(&timekeeper.lock, flags);
70806
70807 timekeeping_forward_now();
70808 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
70809 index 3258455..f35227d 100644
70810 --- a/kernel/time/timer_list.c
70811 +++ b/kernel/time/timer_list.c
70812 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
70813
70814 static void print_name_offset(struct seq_file *m, void *sym)
70815 {
70816 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70817 + SEQ_printf(m, "<%p>", NULL);
70818 +#else
70819 char symname[KSYM_NAME_LEN];
70820
70821 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
70822 SEQ_printf(m, "<%pK>", sym);
70823 else
70824 SEQ_printf(m, "%s", symname);
70825 +#endif
70826 }
70827
70828 static void
70829 @@ -112,7 +116,11 @@ next_one:
70830 static void
70831 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
70832 {
70833 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70834 + SEQ_printf(m, " .base: %p\n", NULL);
70835 +#else
70836 SEQ_printf(m, " .base: %pK\n", base);
70837 +#endif
70838 SEQ_printf(m, " .index: %d\n",
70839 base->index);
70840 SEQ_printf(m, " .resolution: %Lu nsecs\n",
70841 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
70842 {
70843 struct proc_dir_entry *pe;
70844
70845 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70846 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
70847 +#else
70848 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
70849 +#endif
70850 if (!pe)
70851 return -ENOMEM;
70852 return 0;
70853 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
70854 index 0b537f2..9e71eca 100644
70855 --- a/kernel/time/timer_stats.c
70856 +++ b/kernel/time/timer_stats.c
70857 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
70858 static unsigned long nr_entries;
70859 static struct entry entries[MAX_ENTRIES];
70860
70861 -static atomic_t overflow_count;
70862 +static atomic_unchecked_t overflow_count;
70863
70864 /*
70865 * The entries are in a hash-table, for fast lookup:
70866 @@ -140,7 +140,7 @@ static void reset_entries(void)
70867 nr_entries = 0;
70868 memset(entries, 0, sizeof(entries));
70869 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
70870 - atomic_set(&overflow_count, 0);
70871 + atomic_set_unchecked(&overflow_count, 0);
70872 }
70873
70874 static struct entry *alloc_entry(void)
70875 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70876 if (likely(entry))
70877 entry->count++;
70878 else
70879 - atomic_inc(&overflow_count);
70880 + atomic_inc_unchecked(&overflow_count);
70881
70882 out_unlock:
70883 raw_spin_unlock_irqrestore(lock, flags);
70884 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70885
70886 static void print_name_offset(struct seq_file *m, unsigned long addr)
70887 {
70888 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70889 + seq_printf(m, "<%p>", NULL);
70890 +#else
70891 char symname[KSYM_NAME_LEN];
70892
70893 if (lookup_symbol_name(addr, symname) < 0)
70894 seq_printf(m, "<%p>", (void *)addr);
70895 else
70896 seq_printf(m, "%s", symname);
70897 +#endif
70898 }
70899
70900 static int tstats_show(struct seq_file *m, void *v)
70901 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
70902
70903 seq_puts(m, "Timer Stats Version: v0.2\n");
70904 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
70905 - if (atomic_read(&overflow_count))
70906 + if (atomic_read_unchecked(&overflow_count))
70907 seq_printf(m, "Overflow: %d entries\n",
70908 - atomic_read(&overflow_count));
70909 + atomic_read_unchecked(&overflow_count));
70910
70911 for (i = 0; i < nr_entries; i++) {
70912 entry = entries + i;
70913 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
70914 {
70915 struct proc_dir_entry *pe;
70916
70917 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70918 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
70919 +#else
70920 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
70921 +#endif
70922 if (!pe)
70923 return -ENOMEM;
70924 return 0;
70925 diff --git a/kernel/timer.c b/kernel/timer.c
70926 index 6ec7e7e..cbc448b 100644
70927 --- a/kernel/timer.c
70928 +++ b/kernel/timer.c
70929 @@ -1362,7 +1362,7 @@ void update_process_times(int user_tick)
70930 /*
70931 * This function runs timers and the timer-tq in bottom half context.
70932 */
70933 -static void run_timer_softirq(struct softirq_action *h)
70934 +static void run_timer_softirq(void)
70935 {
70936 struct tvec_base *base = __this_cpu_read(tvec_bases);
70937
70938 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
70939 index c0bd030..62a1927 100644
70940 --- a/kernel/trace/blktrace.c
70941 +++ b/kernel/trace/blktrace.c
70942 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
70943 struct blk_trace *bt = filp->private_data;
70944 char buf[16];
70945
70946 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
70947 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
70948
70949 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
70950 }
70951 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
70952 return 1;
70953
70954 bt = buf->chan->private_data;
70955 - atomic_inc(&bt->dropped);
70956 + atomic_inc_unchecked(&bt->dropped);
70957 return 0;
70958 }
70959
70960 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
70961
70962 bt->dir = dir;
70963 bt->dev = dev;
70964 - atomic_set(&bt->dropped, 0);
70965 + atomic_set_unchecked(&bt->dropped, 0);
70966
70967 ret = -EIO;
70968 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
70969 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
70970 index a008663..30d7429 100644
70971 --- a/kernel/trace/ftrace.c
70972 +++ b/kernel/trace/ftrace.c
70973 @@ -1785,12 +1785,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
70974 if (unlikely(ftrace_disabled))
70975 return 0;
70976
70977 + ret = ftrace_arch_code_modify_prepare();
70978 + FTRACE_WARN_ON(ret);
70979 + if (ret)
70980 + return 0;
70981 +
70982 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
70983 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
70984 if (ret) {
70985 ftrace_bug(ret, ip);
70986 - return 0;
70987 }
70988 - return 1;
70989 + return ret ? 0 : 1;
70990 }
70991
70992 /*
70993 @@ -2885,7 +2890,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
70994
70995 int
70996 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
70997 - void *data)
70998 + void *data)
70999 {
71000 struct ftrace_func_probe *entry;
71001 struct ftrace_page *pg;
71002 @@ -3697,8 +3702,10 @@ static int ftrace_process_locs(struct module *mod,
71003 if (!count)
71004 return 0;
71005
71006 + pax_open_kernel();
71007 sort(start, count, sizeof(*start),
71008 ftrace_cmp_ips, ftrace_swap_ips);
71009 + pax_close_kernel();
71010
71011 start_pg = ftrace_allocate_pages(count);
71012 if (!start_pg)
71013 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
71014 index a7fa070..403bc8d 100644
71015 --- a/kernel/trace/trace.c
71016 +++ b/kernel/trace/trace.c
71017 @@ -4421,10 +4421,9 @@ static const struct file_operations tracing_dyn_info_fops = {
71018 };
71019 #endif
71020
71021 -static struct dentry *d_tracer;
71022 -
71023 struct dentry *tracing_init_dentry(void)
71024 {
71025 + static struct dentry *d_tracer;
71026 static int once;
71027
71028 if (d_tracer)
71029 @@ -4444,10 +4443,9 @@ struct dentry *tracing_init_dentry(void)
71030 return d_tracer;
71031 }
71032
71033 -static struct dentry *d_percpu;
71034 -
71035 struct dentry *tracing_dentry_percpu(void)
71036 {
71037 + static struct dentry *d_percpu;
71038 static int once;
71039 struct dentry *d_tracer;
71040
71041 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
71042 index 29111da..d190fe2 100644
71043 --- a/kernel/trace/trace_events.c
71044 +++ b/kernel/trace/trace_events.c
71045 @@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
71046 struct ftrace_module_file_ops {
71047 struct list_head list;
71048 struct module *mod;
71049 - struct file_operations id;
71050 - struct file_operations enable;
71051 - struct file_operations format;
71052 - struct file_operations filter;
71053 };
71054
71055 static struct ftrace_module_file_ops *
71056 @@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
71057
71058 file_ops->mod = mod;
71059
71060 - file_ops->id = ftrace_event_id_fops;
71061 - file_ops->id.owner = mod;
71062 -
71063 - file_ops->enable = ftrace_enable_fops;
71064 - file_ops->enable.owner = mod;
71065 -
71066 - file_ops->filter = ftrace_event_filter_fops;
71067 - file_ops->filter.owner = mod;
71068 -
71069 - file_ops->format = ftrace_event_format_fops;
71070 - file_ops->format.owner = mod;
71071 + pax_open_kernel();
71072 + *(void **)&mod->trace_id.owner = mod;
71073 + *(void **)&mod->trace_enable.owner = mod;
71074 + *(void **)&mod->trace_filter.owner = mod;
71075 + *(void **)&mod->trace_format.owner = mod;
71076 + pax_close_kernel();
71077
71078 list_add(&file_ops->list, &ftrace_module_file_list);
71079
71080 @@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
71081
71082 for_each_event(call, start, end) {
71083 __trace_add_event_call(*call, mod,
71084 - &file_ops->id, &file_ops->enable,
71085 - &file_ops->filter, &file_ops->format);
71086 + &mod->trace_id, &mod->trace_enable,
71087 + &mod->trace_filter, &mod->trace_format);
71088 }
71089 }
71090
71091 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
71092 index fd3c8aa..5f324a6 100644
71093 --- a/kernel/trace/trace_mmiotrace.c
71094 +++ b/kernel/trace/trace_mmiotrace.c
71095 @@ -24,7 +24,7 @@ struct header_iter {
71096 static struct trace_array *mmio_trace_array;
71097 static bool overrun_detected;
71098 static unsigned long prev_overruns;
71099 -static atomic_t dropped_count;
71100 +static atomic_unchecked_t dropped_count;
71101
71102 static void mmio_reset_data(struct trace_array *tr)
71103 {
71104 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
71105
71106 static unsigned long count_overruns(struct trace_iterator *iter)
71107 {
71108 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
71109 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
71110 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
71111
71112 if (over > prev_overruns)
71113 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
71114 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
71115 sizeof(*entry), 0, pc);
71116 if (!event) {
71117 - atomic_inc(&dropped_count);
71118 + atomic_inc_unchecked(&dropped_count);
71119 return;
71120 }
71121 entry = ring_buffer_event_data(event);
71122 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
71123 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
71124 sizeof(*entry), 0, pc);
71125 if (!event) {
71126 - atomic_inc(&dropped_count);
71127 + atomic_inc_unchecked(&dropped_count);
71128 return;
71129 }
71130 entry = ring_buffer_event_data(event);
71131 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
71132 index df611a0..10d8b32 100644
71133 --- a/kernel/trace/trace_output.c
71134 +++ b/kernel/trace/trace_output.c
71135 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
71136
71137 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
71138 if (!IS_ERR(p)) {
71139 - p = mangle_path(s->buffer + s->len, p, "\n");
71140 + p = mangle_path(s->buffer + s->len, p, "\n\\");
71141 if (p) {
71142 s->len = p - s->buffer;
71143 return 1;
71144 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
71145 index d4545f4..a9010a1 100644
71146 --- a/kernel/trace/trace_stack.c
71147 +++ b/kernel/trace/trace_stack.c
71148 @@ -53,7 +53,7 @@ static inline void check_stack(void)
71149 return;
71150
71151 /* we do not handle interrupt stacks yet */
71152 - if (!object_is_on_stack(&this_size))
71153 + if (!object_starts_on_stack(&this_size))
71154 return;
71155
71156 local_irq_save(flags);
71157 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
71158 index 2016347..070fbbe 100644
71159 --- a/kernel/workqueue.c
71160 +++ b/kernel/workqueue.c
71161 @@ -3451,7 +3451,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
71162 */
71163 worker_flags |= WORKER_REBIND;
71164 worker_flags &= ~WORKER_ROGUE;
71165 - ACCESS_ONCE(worker->flags) = worker_flags;
71166 + ACCESS_ONCE_RW(worker->flags) = worker_flags;
71167
71168 /* queue rebind_work, wq doesn't matter, use the default one */
71169 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
71170 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
71171 index ff5bdee..e68c124 100644
71172 --- a/lib/Kconfig.debug
71173 +++ b/lib/Kconfig.debug
71174 @@ -1165,6 +1165,7 @@ config LATENCYTOP
71175 depends on DEBUG_KERNEL
71176 depends on STACKTRACE_SUPPORT
71177 depends on PROC_FS
71178 + depends on !GRKERNSEC_HIDESYM
71179 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
71180 select KALLSYMS
71181 select KALLSYMS_ALL
71182 @@ -1180,7 +1181,7 @@ source kernel/trace/Kconfig
71183
71184 config PROVIDE_OHCI1394_DMA_INIT
71185 bool "Remote debugging over FireWire early on boot"
71186 - depends on PCI && X86
71187 + depends on PCI && X86 && !GRKERNSEC
71188 help
71189 If you want to debug problems which hang or crash the kernel early
71190 on boot and the crashing machine has a FireWire port, you can use
71191 @@ -1209,7 +1210,7 @@ config PROVIDE_OHCI1394_DMA_INIT
71192
71193 config FIREWIRE_OHCI_REMOTE_DMA
71194 bool "Remote debugging over FireWire with firewire-ohci"
71195 - depends on FIREWIRE_OHCI
71196 + depends on FIREWIRE_OHCI && !GRKERNSEC
71197 help
71198 This option lets you use the FireWire bus for remote debugging
71199 with help of the firewire-ohci driver. It enables unfiltered
71200 diff --git a/lib/bitmap.c b/lib/bitmap.c
71201 index 06fdfa1..97c5c7d 100644
71202 --- a/lib/bitmap.c
71203 +++ b/lib/bitmap.c
71204 @@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
71205 {
71206 int c, old_c, totaldigits, ndigits, nchunks, nbits;
71207 u32 chunk;
71208 - const char __user __force *ubuf = (const char __user __force *)buf;
71209 + const char __user *ubuf = (const char __force_user *)buf;
71210
71211 bitmap_zero(maskp, nmaskbits);
71212
71213 @@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
71214 {
71215 if (!access_ok(VERIFY_READ, ubuf, ulen))
71216 return -EFAULT;
71217 - return __bitmap_parse((const char __force *)ubuf,
71218 + return __bitmap_parse((const char __force_kernel *)ubuf,
71219 ulen, 1, maskp, nmaskbits);
71220
71221 }
71222 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
71223 {
71224 unsigned a, b;
71225 int c, old_c, totaldigits;
71226 - const char __user __force *ubuf = (const char __user __force *)buf;
71227 + const char __user *ubuf = (const char __force_user *)buf;
71228 int exp_digit, in_range;
71229
71230 totaldigits = c = 0;
71231 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
71232 {
71233 if (!access_ok(VERIFY_READ, ubuf, ulen))
71234 return -EFAULT;
71235 - return __bitmap_parselist((const char __force *)ubuf,
71236 + return __bitmap_parselist((const char __force_kernel *)ubuf,
71237 ulen, 1, maskp, nmaskbits);
71238 }
71239 EXPORT_SYMBOL(bitmap_parselist_user);
71240 diff --git a/lib/bug.c b/lib/bug.c
71241 index a28c141..2bd3d95 100644
71242 --- a/lib/bug.c
71243 +++ b/lib/bug.c
71244 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
71245 return BUG_TRAP_TYPE_NONE;
71246
71247 bug = find_bug(bugaddr);
71248 + if (!bug)
71249 + return BUG_TRAP_TYPE_NONE;
71250
71251 file = NULL;
71252 line = 0;
71253 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
71254 index d11808c..dc2d6f8 100644
71255 --- a/lib/debugobjects.c
71256 +++ b/lib/debugobjects.c
71257 @@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
71258 if (limit > 4)
71259 return;
71260
71261 - is_on_stack = object_is_on_stack(addr);
71262 + is_on_stack = object_starts_on_stack(addr);
71263 if (is_on_stack == onstack)
71264 return;
71265
71266 diff --git a/lib/devres.c b/lib/devres.c
71267 index 80b9c76..9e32279 100644
71268 --- a/lib/devres.c
71269 +++ b/lib/devres.c
71270 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
71271 void devm_iounmap(struct device *dev, void __iomem *addr)
71272 {
71273 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
71274 - (void *)addr));
71275 + (void __force *)addr));
71276 iounmap(addr);
71277 }
71278 EXPORT_SYMBOL(devm_iounmap);
71279 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
71280 {
71281 ioport_unmap(addr);
71282 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
71283 - devm_ioport_map_match, (void *)addr));
71284 + devm_ioport_map_match, (void __force *)addr));
71285 }
71286 EXPORT_SYMBOL(devm_ioport_unmap);
71287
71288 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
71289 index 66ce414..6f0a0dd 100644
71290 --- a/lib/dma-debug.c
71291 +++ b/lib/dma-debug.c
71292 @@ -924,7 +924,7 @@ out:
71293
71294 static void check_for_stack(struct device *dev, void *addr)
71295 {
71296 - if (object_is_on_stack(addr))
71297 + if (object_starts_on_stack(addr))
71298 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
71299 "stack [addr=%p]\n", addr);
71300 }
71301 diff --git a/lib/inflate.c b/lib/inflate.c
71302 index 013a761..c28f3fc 100644
71303 --- a/lib/inflate.c
71304 +++ b/lib/inflate.c
71305 @@ -269,7 +269,7 @@ static void free(void *where)
71306 malloc_ptr = free_mem_ptr;
71307 }
71308 #else
71309 -#define malloc(a) kmalloc(a, GFP_KERNEL)
71310 +#define malloc(a) kmalloc((a), GFP_KERNEL)
71311 #define free(a) kfree(a)
71312 #endif
71313
71314 diff --git a/lib/ioremap.c b/lib/ioremap.c
71315 index 0c9216c..863bd89 100644
71316 --- a/lib/ioremap.c
71317 +++ b/lib/ioremap.c
71318 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
71319 unsigned long next;
71320
71321 phys_addr -= addr;
71322 - pmd = pmd_alloc(&init_mm, pud, addr);
71323 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
71324 if (!pmd)
71325 return -ENOMEM;
71326 do {
71327 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
71328 unsigned long next;
71329
71330 phys_addr -= addr;
71331 - pud = pud_alloc(&init_mm, pgd, addr);
71332 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
71333 if (!pud)
71334 return -ENOMEM;
71335 do {
71336 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
71337 index bd2bea9..6b3c95e 100644
71338 --- a/lib/is_single_threaded.c
71339 +++ b/lib/is_single_threaded.c
71340 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
71341 struct task_struct *p, *t;
71342 bool ret;
71343
71344 + if (!mm)
71345 + return true;
71346 +
71347 if (atomic_read(&task->signal->live) != 1)
71348 return false;
71349
71350 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
71351 index e796429..6e38f9f 100644
71352 --- a/lib/radix-tree.c
71353 +++ b/lib/radix-tree.c
71354 @@ -92,7 +92,7 @@ struct radix_tree_preload {
71355 int nr;
71356 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
71357 };
71358 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
71359 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
71360
71361 static inline void *ptr_to_indirect(void *ptr)
71362 {
71363 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
71364 index 598a73e..5c5aeb5 100644
71365 --- a/lib/vsprintf.c
71366 +++ b/lib/vsprintf.c
71367 @@ -16,6 +16,9 @@
71368 * - scnprintf and vscnprintf
71369 */
71370
71371 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71372 +#define __INCLUDED_BY_HIDESYM 1
71373 +#endif
71374 #include <stdarg.h>
71375 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
71376 #include <linux/types.h>
71377 @@ -536,7 +539,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
71378 char sym[KSYM_SYMBOL_LEN];
71379 if (ext == 'B')
71380 sprint_backtrace(sym, value);
71381 - else if (ext != 'f' && ext != 's')
71382 + else if (ext != 'f' && ext != 's' && ext != 'a')
71383 sprint_symbol(sym, value);
71384 else
71385 sprint_symbol_no_offset(sym, value);
71386 @@ -912,7 +915,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
71387 return number(buf, end, *(const netdev_features_t *)addr, spec);
71388 }
71389
71390 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71391 +int kptr_restrict __read_mostly = 2;
71392 +#else
71393 int kptr_restrict __read_mostly;
71394 +#endif
71395
71396 /*
71397 * Show a '%p' thing. A kernel extension is that the '%p' is followed
71398 @@ -926,6 +933,8 @@ int kptr_restrict __read_mostly;
71399 * - 'S' For symbolic direct pointers with offset
71400 * - 's' For symbolic direct pointers without offset
71401 * - 'B' For backtraced symbolic direct pointers with offset
71402 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
71403 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
71404 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
71405 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
71406 * - 'M' For a 6-byte MAC address, it prints the address in the
71407 @@ -973,12 +982,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71408
71409 if (!ptr && *fmt != 'K') {
71410 /*
71411 - * Print (null) with the same width as a pointer so it makes
71412 + * Print (nil) with the same width as a pointer so it makes
71413 * tabular output look nice.
71414 */
71415 if (spec.field_width == -1)
71416 spec.field_width = default_width;
71417 - return string(buf, end, "(null)", spec);
71418 + return string(buf, end, "(nil)", spec);
71419 }
71420
71421 switch (*fmt) {
71422 @@ -988,6 +997,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71423 /* Fallthrough */
71424 case 'S':
71425 case 's':
71426 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71427 + break;
71428 +#else
71429 + return symbol_string(buf, end, ptr, spec, *fmt);
71430 +#endif
71431 + case 'A':
71432 + case 'a':
71433 case 'B':
71434 return symbol_string(buf, end, ptr, spec, *fmt);
71435 case 'R':
71436 @@ -1025,6 +1041,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71437 va_end(va);
71438 return buf;
71439 }
71440 + case 'P':
71441 + break;
71442 case 'K':
71443 /*
71444 * %pK cannot be used in IRQ context because its test
71445 @@ -1048,6 +1066,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71446 }
71447 break;
71448 }
71449 +
71450 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71451 + /* 'P' = approved pointers to copy to userland,
71452 + as in the /proc/kallsyms case, as we make it display nothing
71453 + for non-root users, and the real contents for root users
71454 + Also ignore 'K' pointers, since we force their NULLing for non-root users
71455 + above
71456 + */
71457 + if (ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
71458 + printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
71459 + dump_stack();
71460 + ptr = NULL;
71461 + }
71462 +#endif
71463 +
71464 spec.flags |= SMALL;
71465 if (spec.field_width == -1) {
71466 spec.field_width = default_width;
71467 @@ -1759,11 +1792,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
71468 typeof(type) value; \
71469 if (sizeof(type) == 8) { \
71470 args = PTR_ALIGN(args, sizeof(u32)); \
71471 - *(u32 *)&value = *(u32 *)args; \
71472 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
71473 + *(u32 *)&value = *(const u32 *)args; \
71474 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
71475 } else { \
71476 args = PTR_ALIGN(args, sizeof(type)); \
71477 - value = *(typeof(type) *)args; \
71478 + value = *(const typeof(type) *)args; \
71479 } \
71480 args += sizeof(type); \
71481 value; \
71482 @@ -1826,7 +1859,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
71483 case FORMAT_TYPE_STR: {
71484 const char *str_arg = args;
71485 args += strlen(str_arg) + 1;
71486 - str = string(str, end, (char *)str_arg, spec);
71487 + str = string(str, end, str_arg, spec);
71488 break;
71489 }
71490
71491 diff --git a/localversion-grsec b/localversion-grsec
71492 new file mode 100644
71493 index 0000000..7cd6065
71494 --- /dev/null
71495 +++ b/localversion-grsec
71496 @@ -0,0 +1 @@
71497 +-grsec
71498 diff --git a/mm/Kconfig b/mm/Kconfig
71499 index 82fed4e..979e814 100644
71500 --- a/mm/Kconfig
71501 +++ b/mm/Kconfig
71502 @@ -247,10 +247,10 @@ config KSM
71503 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
71504
71505 config DEFAULT_MMAP_MIN_ADDR
71506 - int "Low address space to protect from user allocation"
71507 + int "Low address space to protect from user allocation"
71508 depends on MMU
71509 - default 4096
71510 - help
71511 + default 65536
71512 + help
71513 This is the portion of low virtual memory which should be protected
71514 from userspace allocation. Keeping a user from writing to low pages
71515 can help reduce the impact of kernel NULL pointer bugs.
71516 @@ -280,7 +280,7 @@ config MEMORY_FAILURE
71517
71518 config HWPOISON_INJECT
71519 tristate "HWPoison pages injector"
71520 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
71521 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
71522 select PROC_PAGE_MONITOR
71523
71524 config NOMMU_INITIAL_TRIM_EXCESS
71525 diff --git a/mm/filemap.c b/mm/filemap.c
71526 index a4a5260..6151dc5 100644
71527 --- a/mm/filemap.c
71528 +++ b/mm/filemap.c
71529 @@ -1723,7 +1723,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
71530 struct address_space *mapping = file->f_mapping;
71531
71532 if (!mapping->a_ops->readpage)
71533 - return -ENOEXEC;
71534 + return -ENODEV;
71535 file_accessed(file);
71536 vma->vm_ops = &generic_file_vm_ops;
71537 vma->vm_flags |= VM_CAN_NONLINEAR;
71538 @@ -2064,6 +2064,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
71539 *pos = i_size_read(inode);
71540
71541 if (limit != RLIM_INFINITY) {
71542 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
71543 if (*pos >= limit) {
71544 send_sig(SIGXFSZ, current, 0);
71545 return -EFBIG;
71546 diff --git a/mm/fremap.c b/mm/fremap.c
71547 index 9ed4fd4..c42648d 100644
71548 --- a/mm/fremap.c
71549 +++ b/mm/fremap.c
71550 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
71551 retry:
71552 vma = find_vma(mm, start);
71553
71554 +#ifdef CONFIG_PAX_SEGMEXEC
71555 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
71556 + goto out;
71557 +#endif
71558 +
71559 /*
71560 * Make sure the vma is shared, that it supports prefaulting,
71561 * and that the remapped range is valid and fully within
71562 diff --git a/mm/highmem.c b/mm/highmem.c
71563 index 57d82c6..e9e0552 100644
71564 --- a/mm/highmem.c
71565 +++ b/mm/highmem.c
71566 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
71567 * So no dangers, even with speculative execution.
71568 */
71569 page = pte_page(pkmap_page_table[i]);
71570 + pax_open_kernel();
71571 pte_clear(&init_mm, (unsigned long)page_address(page),
71572 &pkmap_page_table[i]);
71573 -
71574 + pax_close_kernel();
71575 set_page_address(page, NULL);
71576 need_flush = 1;
71577 }
71578 @@ -186,9 +187,11 @@ start:
71579 }
71580 }
71581 vaddr = PKMAP_ADDR(last_pkmap_nr);
71582 +
71583 + pax_open_kernel();
71584 set_pte_at(&init_mm, vaddr,
71585 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
71586 -
71587 + pax_close_kernel();
71588 pkmap_count[last_pkmap_nr] = 1;
71589 set_page_address(page, (void *)vaddr);
71590
71591 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
71592 index 57c4b93..24b8f59 100644
71593 --- a/mm/huge_memory.c
71594 +++ b/mm/huge_memory.c
71595 @@ -735,7 +735,7 @@ out:
71596 * run pte_offset_map on the pmd, if an huge pmd could
71597 * materialize from under us from a different thread.
71598 */
71599 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
71600 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71601 return VM_FAULT_OOM;
71602 /* if an huge pmd materialized from under us just retry later */
71603 if (unlikely(pmd_trans_huge(*pmd)))
71604 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
71605 index 19558df..f7743b3 100644
71606 --- a/mm/hugetlb.c
71607 +++ b/mm/hugetlb.c
71608 @@ -2463,6 +2463,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
71609 return 1;
71610 }
71611
71612 +#ifdef CONFIG_PAX_SEGMEXEC
71613 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
71614 +{
71615 + struct mm_struct *mm = vma->vm_mm;
71616 + struct vm_area_struct *vma_m;
71617 + unsigned long address_m;
71618 + pte_t *ptep_m;
71619 +
71620 + vma_m = pax_find_mirror_vma(vma);
71621 + if (!vma_m)
71622 + return;
71623 +
71624 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71625 + address_m = address + SEGMEXEC_TASK_SIZE;
71626 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
71627 + get_page(page_m);
71628 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
71629 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
71630 +}
71631 +#endif
71632 +
71633 /*
71634 * Hugetlb_cow() should be called with page lock of the original hugepage held.
71635 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
71636 @@ -2575,6 +2596,11 @@ retry_avoidcopy:
71637 make_huge_pte(vma, new_page, 1));
71638 page_remove_rmap(old_page);
71639 hugepage_add_new_anon_rmap(new_page, vma, address);
71640 +
71641 +#ifdef CONFIG_PAX_SEGMEXEC
71642 + pax_mirror_huge_pte(vma, address, new_page);
71643 +#endif
71644 +
71645 /* Make the old page be freed below */
71646 new_page = old_page;
71647 mmu_notifier_invalidate_range_end(mm,
71648 @@ -2729,6 +2755,10 @@ retry:
71649 && (vma->vm_flags & VM_SHARED)));
71650 set_huge_pte_at(mm, address, ptep, new_pte);
71651
71652 +#ifdef CONFIG_PAX_SEGMEXEC
71653 + pax_mirror_huge_pte(vma, address, page);
71654 +#endif
71655 +
71656 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
71657 /* Optimization, do the COW without a second fault */
71658 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
71659 @@ -2758,6 +2788,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71660 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
71661 struct hstate *h = hstate_vma(vma);
71662
71663 +#ifdef CONFIG_PAX_SEGMEXEC
71664 + struct vm_area_struct *vma_m;
71665 +#endif
71666 +
71667 address &= huge_page_mask(h);
71668
71669 ptep = huge_pte_offset(mm, address);
71670 @@ -2771,6 +2805,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71671 VM_FAULT_SET_HINDEX(h - hstates);
71672 }
71673
71674 +#ifdef CONFIG_PAX_SEGMEXEC
71675 + vma_m = pax_find_mirror_vma(vma);
71676 + if (vma_m) {
71677 + unsigned long address_m;
71678 +
71679 + if (vma->vm_start > vma_m->vm_start) {
71680 + address_m = address;
71681 + address -= SEGMEXEC_TASK_SIZE;
71682 + vma = vma_m;
71683 + h = hstate_vma(vma);
71684 + } else
71685 + address_m = address + SEGMEXEC_TASK_SIZE;
71686 +
71687 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
71688 + return VM_FAULT_OOM;
71689 + address_m &= HPAGE_MASK;
71690 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
71691 + }
71692 +#endif
71693 +
71694 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
71695 if (!ptep)
71696 return VM_FAULT_OOM;
71697 diff --git a/mm/internal.h b/mm/internal.h
71698 index 8052379..47029d1 100644
71699 --- a/mm/internal.h
71700 +++ b/mm/internal.h
71701 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
71702 * in mm/page_alloc.c
71703 */
71704 extern void __free_pages_bootmem(struct page *page, unsigned int order);
71705 +extern void free_compound_page(struct page *page);
71706 extern void prep_compound_page(struct page *page, unsigned long order);
71707 #ifdef CONFIG_MEMORY_FAILURE
71708 extern bool is_free_buddy_page(struct page *page);
71709 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
71710 index 45eb621..6ccd8ea 100644
71711 --- a/mm/kmemleak.c
71712 +++ b/mm/kmemleak.c
71713 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
71714
71715 for (i = 0; i < object->trace_len; i++) {
71716 void *ptr = (void *)object->trace[i];
71717 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
71718 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
71719 }
71720 }
71721
71722 diff --git a/mm/maccess.c b/mm/maccess.c
71723 index d53adf9..03a24bf 100644
71724 --- a/mm/maccess.c
71725 +++ b/mm/maccess.c
71726 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
71727 set_fs(KERNEL_DS);
71728 pagefault_disable();
71729 ret = __copy_from_user_inatomic(dst,
71730 - (__force const void __user *)src, size);
71731 + (const void __force_user *)src, size);
71732 pagefault_enable();
71733 set_fs(old_fs);
71734
71735 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
71736
71737 set_fs(KERNEL_DS);
71738 pagefault_disable();
71739 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
71740 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
71741 pagefault_enable();
71742 set_fs(old_fs);
71743
71744 diff --git a/mm/madvise.c b/mm/madvise.c
71745 index 14d260f..b2a80fd 100644
71746 --- a/mm/madvise.c
71747 +++ b/mm/madvise.c
71748 @@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
71749 pgoff_t pgoff;
71750 unsigned long new_flags = vma->vm_flags;
71751
71752 +#ifdef CONFIG_PAX_SEGMEXEC
71753 + struct vm_area_struct *vma_m;
71754 +#endif
71755 +
71756 switch (behavior) {
71757 case MADV_NORMAL:
71758 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
71759 @@ -119,6 +123,13 @@ success:
71760 /*
71761 * vm_flags is protected by the mmap_sem held in write mode.
71762 */
71763 +
71764 +#ifdef CONFIG_PAX_SEGMEXEC
71765 + vma_m = pax_find_mirror_vma(vma);
71766 + if (vma_m)
71767 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
71768 +#endif
71769 +
71770 vma->vm_flags = new_flags;
71771
71772 out:
71773 @@ -177,6 +188,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71774 struct vm_area_struct ** prev,
71775 unsigned long start, unsigned long end)
71776 {
71777 +
71778 +#ifdef CONFIG_PAX_SEGMEXEC
71779 + struct vm_area_struct *vma_m;
71780 +#endif
71781 +
71782 *prev = vma;
71783 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
71784 return -EINVAL;
71785 @@ -189,6 +205,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71786 zap_page_range(vma, start, end - start, &details);
71787 } else
71788 zap_page_range(vma, start, end - start, NULL);
71789 +
71790 +#ifdef CONFIG_PAX_SEGMEXEC
71791 + vma_m = pax_find_mirror_vma(vma);
71792 + if (vma_m) {
71793 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
71794 + struct zap_details details = {
71795 + .nonlinear_vma = vma_m,
71796 + .last_index = ULONG_MAX,
71797 + };
71798 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
71799 + } else
71800 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
71801 + }
71802 +#endif
71803 +
71804 return 0;
71805 }
71806
71807 @@ -393,6 +424,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
71808 if (end < start)
71809 goto out;
71810
71811 +#ifdef CONFIG_PAX_SEGMEXEC
71812 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71813 + if (end > SEGMEXEC_TASK_SIZE)
71814 + goto out;
71815 + } else
71816 +#endif
71817 +
71818 + if (end > TASK_SIZE)
71819 + goto out;
71820 +
71821 error = 0;
71822 if (end == start)
71823 goto out;
71824 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
71825 index 6de0d61..da836cf 100644
71826 --- a/mm/memory-failure.c
71827 +++ b/mm/memory-failure.c
71828 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
71829
71830 int sysctl_memory_failure_recovery __read_mostly = 1;
71831
71832 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71833 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71834
71835 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
71836
71837 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
71838 pfn, t->comm, t->pid);
71839 si.si_signo = SIGBUS;
71840 si.si_errno = 0;
71841 - si.si_addr = (void *)addr;
71842 + si.si_addr = (void __user *)addr;
71843 #ifdef __ARCH_SI_TRAPNO
71844 si.si_trapno = trapno;
71845 #endif
71846 @@ -1038,7 +1038,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71847 }
71848
71849 nr_pages = 1 << compound_trans_order(hpage);
71850 - atomic_long_add(nr_pages, &mce_bad_pages);
71851 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
71852
71853 /*
71854 * We need/can do nothing about count=0 pages.
71855 @@ -1068,7 +1068,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71856 if (!PageHWPoison(hpage)
71857 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
71858 || (p != hpage && TestSetPageHWPoison(hpage))) {
71859 - atomic_long_sub(nr_pages, &mce_bad_pages);
71860 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71861 return 0;
71862 }
71863 set_page_hwpoison_huge_page(hpage);
71864 @@ -1126,7 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71865 }
71866 if (hwpoison_filter(p)) {
71867 if (TestClearPageHWPoison(p))
71868 - atomic_long_sub(nr_pages, &mce_bad_pages);
71869 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71870 unlock_page(hpage);
71871 put_page(hpage);
71872 return 0;
71873 @@ -1321,7 +1321,7 @@ int unpoison_memory(unsigned long pfn)
71874 return 0;
71875 }
71876 if (TestClearPageHWPoison(p))
71877 - atomic_long_sub(nr_pages, &mce_bad_pages);
71878 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71879 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
71880 return 0;
71881 }
71882 @@ -1335,7 +1335,7 @@ int unpoison_memory(unsigned long pfn)
71883 */
71884 if (TestClearPageHWPoison(page)) {
71885 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
71886 - atomic_long_sub(nr_pages, &mce_bad_pages);
71887 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71888 freeit = 1;
71889 if (PageHuge(page))
71890 clear_page_hwpoison_huge_page(page);
71891 @@ -1448,7 +1448,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
71892 }
71893 done:
71894 if (!PageHWPoison(hpage))
71895 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
71896 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
71897 set_page_hwpoison_huge_page(hpage);
71898 dequeue_hwpoisoned_huge_page(hpage);
71899 /* keep elevated page count for bad page */
71900 @@ -1579,7 +1579,7 @@ int soft_offline_page(struct page *page, int flags)
71901 return ret;
71902
71903 done:
71904 - atomic_long_add(1, &mce_bad_pages);
71905 + atomic_long_add_unchecked(1, &mce_bad_pages);
71906 SetPageHWPoison(page);
71907 /* keep elevated page count for bad page */
71908 return ret;
71909 diff --git a/mm/memory.c b/mm/memory.c
71910 index 2466d12..595ed79 100644
71911 --- a/mm/memory.c
71912 +++ b/mm/memory.c
71913 @@ -422,6 +422,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
71914 free_pte_range(tlb, pmd, addr);
71915 } while (pmd++, addr = next, addr != end);
71916
71917 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
71918 start &= PUD_MASK;
71919 if (start < floor)
71920 return;
71921 @@ -436,6 +437,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
71922 pmd = pmd_offset(pud, start);
71923 pud_clear(pud);
71924 pmd_free_tlb(tlb, pmd, start);
71925 +#endif
71926 +
71927 }
71928
71929 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71930 @@ -455,6 +458,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71931 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
71932 } while (pud++, addr = next, addr != end);
71933
71934 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
71935 start &= PGDIR_MASK;
71936 if (start < floor)
71937 return;
71938 @@ -469,6 +473,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71939 pud = pud_offset(pgd, start);
71940 pgd_clear(pgd);
71941 pud_free_tlb(tlb, pud, start);
71942 +#endif
71943 +
71944 }
71945
71946 /*
71947 @@ -1602,12 +1608,6 @@ no_page_table:
71948 return page;
71949 }
71950
71951 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
71952 -{
71953 - return stack_guard_page_start(vma, addr) ||
71954 - stack_guard_page_end(vma, addr+PAGE_SIZE);
71955 -}
71956 -
71957 /**
71958 * __get_user_pages() - pin user pages in memory
71959 * @tsk: task_struct of target task
71960 @@ -1680,10 +1680,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71961 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
71962 i = 0;
71963
71964 - do {
71965 + while (nr_pages) {
71966 struct vm_area_struct *vma;
71967
71968 - vma = find_extend_vma(mm, start);
71969 + vma = find_vma(mm, start);
71970 if (!vma && in_gate_area(mm, start)) {
71971 unsigned long pg = start & PAGE_MASK;
71972 pgd_t *pgd;
71973 @@ -1731,7 +1731,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71974 goto next_page;
71975 }
71976
71977 - if (!vma ||
71978 + if (!vma || start < vma->vm_start ||
71979 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
71980 !(vm_flags & vma->vm_flags))
71981 return i ? : -EFAULT;
71982 @@ -1758,11 +1758,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71983 int ret;
71984 unsigned int fault_flags = 0;
71985
71986 - /* For mlock, just skip the stack guard page. */
71987 - if (foll_flags & FOLL_MLOCK) {
71988 - if (stack_guard_page(vma, start))
71989 - goto next_page;
71990 - }
71991 if (foll_flags & FOLL_WRITE)
71992 fault_flags |= FAULT_FLAG_WRITE;
71993 if (nonblocking)
71994 @@ -1836,7 +1831,7 @@ next_page:
71995 start += PAGE_SIZE;
71996 nr_pages--;
71997 } while (nr_pages && start < vma->vm_end);
71998 - } while (nr_pages);
71999 + }
72000 return i;
72001 }
72002 EXPORT_SYMBOL(__get_user_pages);
72003 @@ -2043,6 +2038,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
72004 page_add_file_rmap(page);
72005 set_pte_at(mm, addr, pte, mk_pte(page, prot));
72006
72007 +#ifdef CONFIG_PAX_SEGMEXEC
72008 + pax_mirror_file_pte(vma, addr, page, ptl);
72009 +#endif
72010 +
72011 retval = 0;
72012 pte_unmap_unlock(pte, ptl);
72013 return retval;
72014 @@ -2077,10 +2076,22 @@ out:
72015 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
72016 struct page *page)
72017 {
72018 +
72019 +#ifdef CONFIG_PAX_SEGMEXEC
72020 + struct vm_area_struct *vma_m;
72021 +#endif
72022 +
72023 if (addr < vma->vm_start || addr >= vma->vm_end)
72024 return -EFAULT;
72025 if (!page_count(page))
72026 return -EINVAL;
72027 +
72028 +#ifdef CONFIG_PAX_SEGMEXEC
72029 + vma_m = pax_find_mirror_vma(vma);
72030 + if (vma_m)
72031 + vma_m->vm_flags |= VM_INSERTPAGE;
72032 +#endif
72033 +
72034 vma->vm_flags |= VM_INSERTPAGE;
72035 return insert_page(vma, addr, page, vma->vm_page_prot);
72036 }
72037 @@ -2166,6 +2177,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
72038 unsigned long pfn)
72039 {
72040 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
72041 + BUG_ON(vma->vm_mirror);
72042
72043 if (addr < vma->vm_start || addr >= vma->vm_end)
72044 return -EFAULT;
72045 @@ -2373,7 +2385,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
72046
72047 BUG_ON(pud_huge(*pud));
72048
72049 - pmd = pmd_alloc(mm, pud, addr);
72050 + pmd = (mm == &init_mm) ?
72051 + pmd_alloc_kernel(mm, pud, addr) :
72052 + pmd_alloc(mm, pud, addr);
72053 if (!pmd)
72054 return -ENOMEM;
72055 do {
72056 @@ -2393,7 +2407,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
72057 unsigned long next;
72058 int err;
72059
72060 - pud = pud_alloc(mm, pgd, addr);
72061 + pud = (mm == &init_mm) ?
72062 + pud_alloc_kernel(mm, pgd, addr) :
72063 + pud_alloc(mm, pgd, addr);
72064 if (!pud)
72065 return -ENOMEM;
72066 do {
72067 @@ -2481,6 +2497,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
72068 copy_user_highpage(dst, src, va, vma);
72069 }
72070
72071 +#ifdef CONFIG_PAX_SEGMEXEC
72072 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
72073 +{
72074 + struct mm_struct *mm = vma->vm_mm;
72075 + spinlock_t *ptl;
72076 + pte_t *pte, entry;
72077 +
72078 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
72079 + entry = *pte;
72080 + if (!pte_present(entry)) {
72081 + if (!pte_none(entry)) {
72082 + BUG_ON(pte_file(entry));
72083 + free_swap_and_cache(pte_to_swp_entry(entry));
72084 + pte_clear_not_present_full(mm, address, pte, 0);
72085 + }
72086 + } else {
72087 + struct page *page;
72088 +
72089 + flush_cache_page(vma, address, pte_pfn(entry));
72090 + entry = ptep_clear_flush(vma, address, pte);
72091 + BUG_ON(pte_dirty(entry));
72092 + page = vm_normal_page(vma, address, entry);
72093 + if (page) {
72094 + update_hiwater_rss(mm);
72095 + if (PageAnon(page))
72096 + dec_mm_counter_fast(mm, MM_ANONPAGES);
72097 + else
72098 + dec_mm_counter_fast(mm, MM_FILEPAGES);
72099 + page_remove_rmap(page);
72100 + page_cache_release(page);
72101 + }
72102 + }
72103 + pte_unmap_unlock(pte, ptl);
72104 +}
72105 +
72106 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
72107 + *
72108 + * the ptl of the lower mapped page is held on entry and is not released on exit
72109 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
72110 + */
72111 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
72112 +{
72113 + struct mm_struct *mm = vma->vm_mm;
72114 + unsigned long address_m;
72115 + spinlock_t *ptl_m;
72116 + struct vm_area_struct *vma_m;
72117 + pmd_t *pmd_m;
72118 + pte_t *pte_m, entry_m;
72119 +
72120 + BUG_ON(!page_m || !PageAnon(page_m));
72121 +
72122 + vma_m = pax_find_mirror_vma(vma);
72123 + if (!vma_m)
72124 + return;
72125 +
72126 + BUG_ON(!PageLocked(page_m));
72127 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
72128 + address_m = address + SEGMEXEC_TASK_SIZE;
72129 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
72130 + pte_m = pte_offset_map(pmd_m, address_m);
72131 + ptl_m = pte_lockptr(mm, pmd_m);
72132 + if (ptl != ptl_m) {
72133 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
72134 + if (!pte_none(*pte_m))
72135 + goto out;
72136 + }
72137 +
72138 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
72139 + page_cache_get(page_m);
72140 + page_add_anon_rmap(page_m, vma_m, address_m);
72141 + inc_mm_counter_fast(mm, MM_ANONPAGES);
72142 + set_pte_at(mm, address_m, pte_m, entry_m);
72143 + update_mmu_cache(vma_m, address_m, entry_m);
72144 +out:
72145 + if (ptl != ptl_m)
72146 + spin_unlock(ptl_m);
72147 + pte_unmap(pte_m);
72148 + unlock_page(page_m);
72149 +}
72150 +
72151 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
72152 +{
72153 + struct mm_struct *mm = vma->vm_mm;
72154 + unsigned long address_m;
72155 + spinlock_t *ptl_m;
72156 + struct vm_area_struct *vma_m;
72157 + pmd_t *pmd_m;
72158 + pte_t *pte_m, entry_m;
72159 +
72160 + BUG_ON(!page_m || PageAnon(page_m));
72161 +
72162 + vma_m = pax_find_mirror_vma(vma);
72163 + if (!vma_m)
72164 + return;
72165 +
72166 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
72167 + address_m = address + SEGMEXEC_TASK_SIZE;
72168 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
72169 + pte_m = pte_offset_map(pmd_m, address_m);
72170 + ptl_m = pte_lockptr(mm, pmd_m);
72171 + if (ptl != ptl_m) {
72172 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
72173 + if (!pte_none(*pte_m))
72174 + goto out;
72175 + }
72176 +
72177 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
72178 + page_cache_get(page_m);
72179 + page_add_file_rmap(page_m);
72180 + inc_mm_counter_fast(mm, MM_FILEPAGES);
72181 + set_pte_at(mm, address_m, pte_m, entry_m);
72182 + update_mmu_cache(vma_m, address_m, entry_m);
72183 +out:
72184 + if (ptl != ptl_m)
72185 + spin_unlock(ptl_m);
72186 + pte_unmap(pte_m);
72187 +}
72188 +
72189 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
72190 +{
72191 + struct mm_struct *mm = vma->vm_mm;
72192 + unsigned long address_m;
72193 + spinlock_t *ptl_m;
72194 + struct vm_area_struct *vma_m;
72195 + pmd_t *pmd_m;
72196 + pte_t *pte_m, entry_m;
72197 +
72198 + vma_m = pax_find_mirror_vma(vma);
72199 + if (!vma_m)
72200 + return;
72201 +
72202 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
72203 + address_m = address + SEGMEXEC_TASK_SIZE;
72204 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
72205 + pte_m = pte_offset_map(pmd_m, address_m);
72206 + ptl_m = pte_lockptr(mm, pmd_m);
72207 + if (ptl != ptl_m) {
72208 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
72209 + if (!pte_none(*pte_m))
72210 + goto out;
72211 + }
72212 +
72213 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
72214 + set_pte_at(mm, address_m, pte_m, entry_m);
72215 +out:
72216 + if (ptl != ptl_m)
72217 + spin_unlock(ptl_m);
72218 + pte_unmap(pte_m);
72219 +}
72220 +
72221 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
72222 +{
72223 + struct page *page_m;
72224 + pte_t entry;
72225 +
72226 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
72227 + goto out;
72228 +
72229 + entry = *pte;
72230 + page_m = vm_normal_page(vma, address, entry);
72231 + if (!page_m)
72232 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
72233 + else if (PageAnon(page_m)) {
72234 + if (pax_find_mirror_vma(vma)) {
72235 + pte_unmap_unlock(pte, ptl);
72236 + lock_page(page_m);
72237 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
72238 + if (pte_same(entry, *pte))
72239 + pax_mirror_anon_pte(vma, address, page_m, ptl);
72240 + else
72241 + unlock_page(page_m);
72242 + }
72243 + } else
72244 + pax_mirror_file_pte(vma, address, page_m, ptl);
72245 +
72246 +out:
72247 + pte_unmap_unlock(pte, ptl);
72248 +}
72249 +#endif
72250 +
72251 /*
72252 * This routine handles present pages, when users try to write
72253 * to a shared page. It is done by copying the page to a new address
72254 @@ -2692,6 +2888,12 @@ gotten:
72255 */
72256 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
72257 if (likely(pte_same(*page_table, orig_pte))) {
72258 +
72259 +#ifdef CONFIG_PAX_SEGMEXEC
72260 + if (pax_find_mirror_vma(vma))
72261 + BUG_ON(!trylock_page(new_page));
72262 +#endif
72263 +
72264 if (old_page) {
72265 if (!PageAnon(old_page)) {
72266 dec_mm_counter_fast(mm, MM_FILEPAGES);
72267 @@ -2743,6 +2945,10 @@ gotten:
72268 page_remove_rmap(old_page);
72269 }
72270
72271 +#ifdef CONFIG_PAX_SEGMEXEC
72272 + pax_mirror_anon_pte(vma, address, new_page, ptl);
72273 +#endif
72274 +
72275 /* Free the old page.. */
72276 new_page = old_page;
72277 ret |= VM_FAULT_WRITE;
72278 @@ -3022,6 +3228,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
72279 swap_free(entry);
72280 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
72281 try_to_free_swap(page);
72282 +
72283 +#ifdef CONFIG_PAX_SEGMEXEC
72284 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
72285 +#endif
72286 +
72287 unlock_page(page);
72288 if (swapcache) {
72289 /*
72290 @@ -3045,6 +3256,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
72291
72292 /* No need to invalidate - it was non-present before */
72293 update_mmu_cache(vma, address, page_table);
72294 +
72295 +#ifdef CONFIG_PAX_SEGMEXEC
72296 + pax_mirror_anon_pte(vma, address, page, ptl);
72297 +#endif
72298 +
72299 unlock:
72300 pte_unmap_unlock(page_table, ptl);
72301 out:
72302 @@ -3064,40 +3280,6 @@ out_release:
72303 }
72304
72305 /*
72306 - * This is like a special single-page "expand_{down|up}wards()",
72307 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
72308 - * doesn't hit another vma.
72309 - */
72310 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
72311 -{
72312 - address &= PAGE_MASK;
72313 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
72314 - struct vm_area_struct *prev = vma->vm_prev;
72315 -
72316 - /*
72317 - * Is there a mapping abutting this one below?
72318 - *
72319 - * That's only ok if it's the same stack mapping
72320 - * that has gotten split..
72321 - */
72322 - if (prev && prev->vm_end == address)
72323 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
72324 -
72325 - expand_downwards(vma, address - PAGE_SIZE);
72326 - }
72327 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
72328 - struct vm_area_struct *next = vma->vm_next;
72329 -
72330 - /* As VM_GROWSDOWN but s/below/above/ */
72331 - if (next && next->vm_start == address + PAGE_SIZE)
72332 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
72333 -
72334 - expand_upwards(vma, address + PAGE_SIZE);
72335 - }
72336 - return 0;
72337 -}
72338 -
72339 -/*
72340 * We enter with non-exclusive mmap_sem (to exclude vma changes,
72341 * but allow concurrent faults), and pte mapped but not yet locked.
72342 * We return with mmap_sem still held, but pte unmapped and unlocked.
72343 @@ -3106,27 +3288,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
72344 unsigned long address, pte_t *page_table, pmd_t *pmd,
72345 unsigned int flags)
72346 {
72347 - struct page *page;
72348 + struct page *page = NULL;
72349 spinlock_t *ptl;
72350 pte_t entry;
72351
72352 - pte_unmap(page_table);
72353 -
72354 - /* Check if we need to add a guard page to the stack */
72355 - if (check_stack_guard_page(vma, address) < 0)
72356 - return VM_FAULT_SIGBUS;
72357 -
72358 - /* Use the zero-page for reads */
72359 if (!(flags & FAULT_FLAG_WRITE)) {
72360 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
72361 vma->vm_page_prot));
72362 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
72363 + ptl = pte_lockptr(mm, pmd);
72364 + spin_lock(ptl);
72365 if (!pte_none(*page_table))
72366 goto unlock;
72367 goto setpte;
72368 }
72369
72370 /* Allocate our own private page. */
72371 + pte_unmap(page_table);
72372 +
72373 if (unlikely(anon_vma_prepare(vma)))
72374 goto oom;
72375 page = alloc_zeroed_user_highpage_movable(vma, address);
72376 @@ -3145,6 +3323,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
72377 if (!pte_none(*page_table))
72378 goto release;
72379
72380 +#ifdef CONFIG_PAX_SEGMEXEC
72381 + if (pax_find_mirror_vma(vma))
72382 + BUG_ON(!trylock_page(page));
72383 +#endif
72384 +
72385 inc_mm_counter_fast(mm, MM_ANONPAGES);
72386 page_add_new_anon_rmap(page, vma, address);
72387 setpte:
72388 @@ -3152,6 +3335,12 @@ setpte:
72389
72390 /* No need to invalidate - it was non-present before */
72391 update_mmu_cache(vma, address, page_table);
72392 +
72393 +#ifdef CONFIG_PAX_SEGMEXEC
72394 + if (page)
72395 + pax_mirror_anon_pte(vma, address, page, ptl);
72396 +#endif
72397 +
72398 unlock:
72399 pte_unmap_unlock(page_table, ptl);
72400 return 0;
72401 @@ -3295,6 +3484,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72402 */
72403 /* Only go through if we didn't race with anybody else... */
72404 if (likely(pte_same(*page_table, orig_pte))) {
72405 +
72406 +#ifdef CONFIG_PAX_SEGMEXEC
72407 + if (anon && pax_find_mirror_vma(vma))
72408 + BUG_ON(!trylock_page(page));
72409 +#endif
72410 +
72411 flush_icache_page(vma, page);
72412 entry = mk_pte(page, vma->vm_page_prot);
72413 if (flags & FAULT_FLAG_WRITE)
72414 @@ -3314,6 +3509,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72415
72416 /* no need to invalidate: a not-present page won't be cached */
72417 update_mmu_cache(vma, address, page_table);
72418 +
72419 +#ifdef CONFIG_PAX_SEGMEXEC
72420 + if (anon)
72421 + pax_mirror_anon_pte(vma, address, page, ptl);
72422 + else
72423 + pax_mirror_file_pte(vma, address, page, ptl);
72424 +#endif
72425 +
72426 } else {
72427 if (cow_page)
72428 mem_cgroup_uncharge_page(cow_page);
72429 @@ -3467,6 +3670,12 @@ int handle_pte_fault(struct mm_struct *mm,
72430 if (flags & FAULT_FLAG_WRITE)
72431 flush_tlb_fix_spurious_fault(vma, address);
72432 }
72433 +
72434 +#ifdef CONFIG_PAX_SEGMEXEC
72435 + pax_mirror_pte(vma, address, pte, pmd, ptl);
72436 + return 0;
72437 +#endif
72438 +
72439 unlock:
72440 pte_unmap_unlock(pte, ptl);
72441 return 0;
72442 @@ -3483,6 +3692,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72443 pmd_t *pmd;
72444 pte_t *pte;
72445
72446 +#ifdef CONFIG_PAX_SEGMEXEC
72447 + struct vm_area_struct *vma_m;
72448 +#endif
72449 +
72450 __set_current_state(TASK_RUNNING);
72451
72452 count_vm_event(PGFAULT);
72453 @@ -3494,6 +3707,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72454 if (unlikely(is_vm_hugetlb_page(vma)))
72455 return hugetlb_fault(mm, vma, address, flags);
72456
72457 +#ifdef CONFIG_PAX_SEGMEXEC
72458 + vma_m = pax_find_mirror_vma(vma);
72459 + if (vma_m) {
72460 + unsigned long address_m;
72461 + pgd_t *pgd_m;
72462 + pud_t *pud_m;
72463 + pmd_t *pmd_m;
72464 +
72465 + if (vma->vm_start > vma_m->vm_start) {
72466 + address_m = address;
72467 + address -= SEGMEXEC_TASK_SIZE;
72468 + vma = vma_m;
72469 + } else
72470 + address_m = address + SEGMEXEC_TASK_SIZE;
72471 +
72472 + pgd_m = pgd_offset(mm, address_m);
72473 + pud_m = pud_alloc(mm, pgd_m, address_m);
72474 + if (!pud_m)
72475 + return VM_FAULT_OOM;
72476 + pmd_m = pmd_alloc(mm, pud_m, address_m);
72477 + if (!pmd_m)
72478 + return VM_FAULT_OOM;
72479 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
72480 + return VM_FAULT_OOM;
72481 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
72482 + }
72483 +#endif
72484 +
72485 retry:
72486 pgd = pgd_offset(mm, address);
72487 pud = pud_alloc(mm, pgd, address);
72488 @@ -3535,7 +3776,7 @@ retry:
72489 * run pte_offset_map on the pmd, if an huge pmd could
72490 * materialize from under us from a different thread.
72491 */
72492 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
72493 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
72494 return VM_FAULT_OOM;
72495 /* if an huge pmd materialized from under us just retry later */
72496 if (unlikely(pmd_trans_huge(*pmd)))
72497 @@ -3572,6 +3813,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72498 spin_unlock(&mm->page_table_lock);
72499 return 0;
72500 }
72501 +
72502 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72503 +{
72504 + pud_t *new = pud_alloc_one(mm, address);
72505 + if (!new)
72506 + return -ENOMEM;
72507 +
72508 + smp_wmb(); /* See comment in __pte_alloc */
72509 +
72510 + spin_lock(&mm->page_table_lock);
72511 + if (pgd_present(*pgd)) /* Another has populated it */
72512 + pud_free(mm, new);
72513 + else
72514 + pgd_populate_kernel(mm, pgd, new);
72515 + spin_unlock(&mm->page_table_lock);
72516 + return 0;
72517 +}
72518 #endif /* __PAGETABLE_PUD_FOLDED */
72519
72520 #ifndef __PAGETABLE_PMD_FOLDED
72521 @@ -3602,6 +3860,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
72522 spin_unlock(&mm->page_table_lock);
72523 return 0;
72524 }
72525 +
72526 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
72527 +{
72528 + pmd_t *new = pmd_alloc_one(mm, address);
72529 + if (!new)
72530 + return -ENOMEM;
72531 +
72532 + smp_wmb(); /* See comment in __pte_alloc */
72533 +
72534 + spin_lock(&mm->page_table_lock);
72535 +#ifndef __ARCH_HAS_4LEVEL_HACK
72536 + if (pud_present(*pud)) /* Another has populated it */
72537 + pmd_free(mm, new);
72538 + else
72539 + pud_populate_kernel(mm, pud, new);
72540 +#else
72541 + if (pgd_present(*pud)) /* Another has populated it */
72542 + pmd_free(mm, new);
72543 + else
72544 + pgd_populate_kernel(mm, pud, new);
72545 +#endif /* __ARCH_HAS_4LEVEL_HACK */
72546 + spin_unlock(&mm->page_table_lock);
72547 + return 0;
72548 +}
72549 #endif /* __PAGETABLE_PMD_FOLDED */
72550
72551 int make_pages_present(unsigned long addr, unsigned long end)
72552 @@ -3639,7 +3921,7 @@ static int __init gate_vma_init(void)
72553 gate_vma.vm_start = FIXADDR_USER_START;
72554 gate_vma.vm_end = FIXADDR_USER_END;
72555 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
72556 - gate_vma.vm_page_prot = __P101;
72557 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
72558
72559 return 0;
72560 }
72561 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
72562 index b12b28a..64b57d0 100644
72563 --- a/mm/mempolicy.c
72564 +++ b/mm/mempolicy.c
72565 @@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
72566 unsigned long vmstart;
72567 unsigned long vmend;
72568
72569 +#ifdef CONFIG_PAX_SEGMEXEC
72570 + struct vm_area_struct *vma_m;
72571 +#endif
72572 +
72573 vma = find_vma(mm, start);
72574 if (!vma || vma->vm_start > start)
72575 return -EFAULT;
72576 @@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
72577 if (err)
72578 goto out;
72579 }
72580 +
72581 +#ifdef CONFIG_PAX_SEGMEXEC
72582 + vma_m = pax_find_mirror_vma(vma);
72583 + if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
72584 + err = vma_m->vm_ops->set_policy(vma_m, new_pol);
72585 + if (err)
72586 + goto out;
72587 + }
72588 +#endif
72589 +
72590 }
72591
72592 out:
72593 @@ -1125,6 +1139,17 @@ static long do_mbind(unsigned long start, unsigned long len,
72594
72595 if (end < start)
72596 return -EINVAL;
72597 +
72598 +#ifdef CONFIG_PAX_SEGMEXEC
72599 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72600 + if (end > SEGMEXEC_TASK_SIZE)
72601 + return -EINVAL;
72602 + } else
72603 +#endif
72604 +
72605 + if (end > TASK_SIZE)
72606 + return -EINVAL;
72607 +
72608 if (end == start)
72609 return 0;
72610
72611 @@ -1348,8 +1373,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
72612 */
72613 tcred = __task_cred(task);
72614 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
72615 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
72616 - !capable(CAP_SYS_NICE)) {
72617 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
72618 rcu_read_unlock();
72619 err = -EPERM;
72620 goto out_put;
72621 @@ -1380,6 +1404,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
72622 goto out;
72623 }
72624
72625 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72626 + if (mm != current->mm &&
72627 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72628 + mmput(mm);
72629 + err = -EPERM;
72630 + goto out;
72631 + }
72632 +#endif
72633 +
72634 err = do_migrate_pages(mm, old, new,
72635 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
72636
72637 diff --git a/mm/mlock.c b/mm/mlock.c
72638 index ef726e8..cd7f1ec 100644
72639 --- a/mm/mlock.c
72640 +++ b/mm/mlock.c
72641 @@ -13,6 +13,7 @@
72642 #include <linux/pagemap.h>
72643 #include <linux/mempolicy.h>
72644 #include <linux/syscalls.h>
72645 +#include <linux/security.h>
72646 #include <linux/sched.h>
72647 #include <linux/export.h>
72648 #include <linux/rmap.h>
72649 @@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
72650 {
72651 unsigned long nstart, end, tmp;
72652 struct vm_area_struct * vma, * prev;
72653 - int error;
72654 + int error = 0;
72655
72656 VM_BUG_ON(start & ~PAGE_MASK);
72657 VM_BUG_ON(len != PAGE_ALIGN(len));
72658 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
72659 return -EINVAL;
72660 if (end == start)
72661 return 0;
72662 + if (end > TASK_SIZE)
72663 + return -EINVAL;
72664 +
72665 vma = find_vma(current->mm, start);
72666 if (!vma || vma->vm_start > start)
72667 return -ENOMEM;
72668 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
72669 for (nstart = start ; ; ) {
72670 vm_flags_t newflags;
72671
72672 +#ifdef CONFIG_PAX_SEGMEXEC
72673 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72674 + break;
72675 +#endif
72676 +
72677 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
72678
72679 newflags = vma->vm_flags | VM_LOCKED;
72680 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
72681 lock_limit >>= PAGE_SHIFT;
72682
72683 /* check against resource limits */
72684 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
72685 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
72686 error = do_mlock(start, len, 1);
72687 up_write(&current->mm->mmap_sem);
72688 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
72689 static int do_mlockall(int flags)
72690 {
72691 struct vm_area_struct * vma, * prev = NULL;
72692 - unsigned int def_flags = 0;
72693
72694 if (flags & MCL_FUTURE)
72695 - def_flags = VM_LOCKED;
72696 - current->mm->def_flags = def_flags;
72697 + current->mm->def_flags |= VM_LOCKED;
72698 + else
72699 + current->mm->def_flags &= ~VM_LOCKED;
72700 if (flags == MCL_FUTURE)
72701 goto out;
72702
72703 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
72704 vm_flags_t newflags;
72705
72706 +#ifdef CONFIG_PAX_SEGMEXEC
72707 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72708 + break;
72709 +#endif
72710 +
72711 + BUG_ON(vma->vm_end > TASK_SIZE);
72712 newflags = vma->vm_flags | VM_LOCKED;
72713 if (!(flags & MCL_CURRENT))
72714 newflags &= ~VM_LOCKED;
72715 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
72716 lock_limit >>= PAGE_SHIFT;
72717
72718 ret = -ENOMEM;
72719 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
72720 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
72721 capable(CAP_IPC_LOCK))
72722 ret = do_mlockall(flags);
72723 diff --git a/mm/mmap.c b/mm/mmap.c
72724 index fa1f274..86de476 100644
72725 --- a/mm/mmap.c
72726 +++ b/mm/mmap.c
72727 @@ -47,6 +47,16 @@
72728 #define arch_rebalance_pgtables(addr, len) (addr)
72729 #endif
72730
72731 +static inline void verify_mm_writelocked(struct mm_struct *mm)
72732 +{
72733 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
72734 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72735 + up_read(&mm->mmap_sem);
72736 + BUG();
72737 + }
72738 +#endif
72739 +}
72740 +
72741 static void unmap_region(struct mm_struct *mm,
72742 struct vm_area_struct *vma, struct vm_area_struct *prev,
72743 unsigned long start, unsigned long end);
72744 @@ -72,22 +82,32 @@ static void unmap_region(struct mm_struct *mm,
72745 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72746 *
72747 */
72748 -pgprot_t protection_map[16] = {
72749 +pgprot_t protection_map[16] __read_only = {
72750 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
72751 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
72752 };
72753
72754 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
72755 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
72756 {
72757 - return __pgprot(pgprot_val(protection_map[vm_flags &
72758 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
72759 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
72760 pgprot_val(arch_vm_get_page_prot(vm_flags)));
72761 +
72762 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72763 + if (!(__supported_pte_mask & _PAGE_NX) &&
72764 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
72765 + (vm_flags & (VM_READ | VM_WRITE)))
72766 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
72767 +#endif
72768 +
72769 + return prot;
72770 }
72771 EXPORT_SYMBOL(vm_get_page_prot);
72772
72773 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
72774 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
72775 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
72776 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
72777 /*
72778 * Make sure vm_committed_as in one cacheline and not cacheline shared with
72779 * other variables. It can be updated by several CPUs frequently.
72780 @@ -229,6 +249,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
72781 struct vm_area_struct *next = vma->vm_next;
72782
72783 might_sleep();
72784 + BUG_ON(vma->vm_mirror);
72785 if (vma->vm_ops && vma->vm_ops->close)
72786 vma->vm_ops->close(vma);
72787 if (vma->vm_file) {
72788 @@ -275,6 +296,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
72789 * not page aligned -Ram Gupta
72790 */
72791 rlim = rlimit(RLIMIT_DATA);
72792 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
72793 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
72794 (mm->end_data - mm->start_data) > rlim)
72795 goto out;
72796 @@ -708,6 +730,12 @@ static int
72797 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
72798 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72799 {
72800 +
72801 +#ifdef CONFIG_PAX_SEGMEXEC
72802 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
72803 + return 0;
72804 +#endif
72805 +
72806 if (is_mergeable_vma(vma, file, vm_flags) &&
72807 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72808 if (vma->vm_pgoff == vm_pgoff)
72809 @@ -727,6 +755,12 @@ static int
72810 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72811 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72812 {
72813 +
72814 +#ifdef CONFIG_PAX_SEGMEXEC
72815 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
72816 + return 0;
72817 +#endif
72818 +
72819 if (is_mergeable_vma(vma, file, vm_flags) &&
72820 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72821 pgoff_t vm_pglen;
72822 @@ -769,13 +803,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72823 struct vm_area_struct *vma_merge(struct mm_struct *mm,
72824 struct vm_area_struct *prev, unsigned long addr,
72825 unsigned long end, unsigned long vm_flags,
72826 - struct anon_vma *anon_vma, struct file *file,
72827 + struct anon_vma *anon_vma, struct file *file,
72828 pgoff_t pgoff, struct mempolicy *policy)
72829 {
72830 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
72831 struct vm_area_struct *area, *next;
72832 int err;
72833
72834 +#ifdef CONFIG_PAX_SEGMEXEC
72835 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
72836 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
72837 +
72838 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
72839 +#endif
72840 +
72841 /*
72842 * We later require that vma->vm_flags == vm_flags,
72843 * so this tests vma->vm_flags & VM_SPECIAL, too.
72844 @@ -791,6 +832,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72845 if (next && next->vm_end == end) /* cases 6, 7, 8 */
72846 next = next->vm_next;
72847
72848 +#ifdef CONFIG_PAX_SEGMEXEC
72849 + if (prev)
72850 + prev_m = pax_find_mirror_vma(prev);
72851 + if (area)
72852 + area_m = pax_find_mirror_vma(area);
72853 + if (next)
72854 + next_m = pax_find_mirror_vma(next);
72855 +#endif
72856 +
72857 /*
72858 * Can it merge with the predecessor?
72859 */
72860 @@ -810,9 +860,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72861 /* cases 1, 6 */
72862 err = vma_adjust(prev, prev->vm_start,
72863 next->vm_end, prev->vm_pgoff, NULL);
72864 - } else /* cases 2, 5, 7 */
72865 +
72866 +#ifdef CONFIG_PAX_SEGMEXEC
72867 + if (!err && prev_m)
72868 + err = vma_adjust(prev_m, prev_m->vm_start,
72869 + next_m->vm_end, prev_m->vm_pgoff, NULL);
72870 +#endif
72871 +
72872 + } else { /* cases 2, 5, 7 */
72873 err = vma_adjust(prev, prev->vm_start,
72874 end, prev->vm_pgoff, NULL);
72875 +
72876 +#ifdef CONFIG_PAX_SEGMEXEC
72877 + if (!err && prev_m)
72878 + err = vma_adjust(prev_m, prev_m->vm_start,
72879 + end_m, prev_m->vm_pgoff, NULL);
72880 +#endif
72881 +
72882 + }
72883 if (err)
72884 return NULL;
72885 khugepaged_enter_vma_merge(prev);
72886 @@ -826,12 +891,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72887 mpol_equal(policy, vma_policy(next)) &&
72888 can_vma_merge_before(next, vm_flags,
72889 anon_vma, file, pgoff+pglen)) {
72890 - if (prev && addr < prev->vm_end) /* case 4 */
72891 + if (prev && addr < prev->vm_end) { /* case 4 */
72892 err = vma_adjust(prev, prev->vm_start,
72893 addr, prev->vm_pgoff, NULL);
72894 - else /* cases 3, 8 */
72895 +
72896 +#ifdef CONFIG_PAX_SEGMEXEC
72897 + if (!err && prev_m)
72898 + err = vma_adjust(prev_m, prev_m->vm_start,
72899 + addr_m, prev_m->vm_pgoff, NULL);
72900 +#endif
72901 +
72902 + } else { /* cases 3, 8 */
72903 err = vma_adjust(area, addr, next->vm_end,
72904 next->vm_pgoff - pglen, NULL);
72905 +
72906 +#ifdef CONFIG_PAX_SEGMEXEC
72907 + if (!err && area_m)
72908 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
72909 + next_m->vm_pgoff - pglen, NULL);
72910 +#endif
72911 +
72912 + }
72913 if (err)
72914 return NULL;
72915 khugepaged_enter_vma_merge(area);
72916 @@ -940,14 +1020,11 @@ none:
72917 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
72918 struct file *file, long pages)
72919 {
72920 - const unsigned long stack_flags
72921 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
72922 -
72923 if (file) {
72924 mm->shared_vm += pages;
72925 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
72926 mm->exec_vm += pages;
72927 - } else if (flags & stack_flags)
72928 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
72929 mm->stack_vm += pages;
72930 if (flags & (VM_RESERVED|VM_IO))
72931 mm->reserved_vm += pages;
72932 @@ -985,7 +1062,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72933 * (the exception is when the underlying filesystem is noexec
72934 * mounted, in which case we dont add PROT_EXEC.)
72935 */
72936 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72937 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72938 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
72939 prot |= PROT_EXEC;
72940
72941 @@ -1011,7 +1088,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72942 /* Obtain the address to map to. we verify (or select) it and ensure
72943 * that it represents a valid section of the address space.
72944 */
72945 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
72946 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
72947 if (addr & ~PAGE_MASK)
72948 return addr;
72949
72950 @@ -1022,6 +1099,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72951 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
72952 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
72953
72954 +#ifdef CONFIG_PAX_MPROTECT
72955 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72956 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72957 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
72958 + gr_log_rwxmmap(file);
72959 +
72960 +#ifdef CONFIG_PAX_EMUPLT
72961 + vm_flags &= ~VM_EXEC;
72962 +#else
72963 + return -EPERM;
72964 +#endif
72965 +
72966 + }
72967 +
72968 + if (!(vm_flags & VM_EXEC))
72969 + vm_flags &= ~VM_MAYEXEC;
72970 +#else
72971 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72972 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72973 +#endif
72974 + else
72975 + vm_flags &= ~VM_MAYWRITE;
72976 + }
72977 +#endif
72978 +
72979 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72980 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
72981 + vm_flags &= ~VM_PAGEEXEC;
72982 +#endif
72983 +
72984 if (flags & MAP_LOCKED)
72985 if (!can_do_mlock())
72986 return -EPERM;
72987 @@ -1033,6 +1140,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72988 locked += mm->locked_vm;
72989 lock_limit = rlimit(RLIMIT_MEMLOCK);
72990 lock_limit >>= PAGE_SHIFT;
72991 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72992 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
72993 return -EAGAIN;
72994 }
72995 @@ -1099,6 +1207,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72996 }
72997 }
72998
72999 + if (!gr_acl_handle_mmap(file, prot))
73000 + return -EACCES;
73001 +
73002 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
73003 }
73004
73005 @@ -1175,7 +1286,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
73006 vm_flags_t vm_flags = vma->vm_flags;
73007
73008 /* If it was private or non-writable, the write bit is already clear */
73009 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
73010 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
73011 return 0;
73012
73013 /* The backer wishes to know when pages are first written to? */
73014 @@ -1224,14 +1335,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
73015 unsigned long charged = 0;
73016 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
73017
73018 +#ifdef CONFIG_PAX_SEGMEXEC
73019 + struct vm_area_struct *vma_m = NULL;
73020 +#endif
73021 +
73022 + /*
73023 + * mm->mmap_sem is required to protect against another thread
73024 + * changing the mappings in case we sleep.
73025 + */
73026 + verify_mm_writelocked(mm);
73027 +
73028 /* Clear old maps */
73029 error = -ENOMEM;
73030 -munmap_back:
73031 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73032 if (vma && vma->vm_start < addr + len) {
73033 if (do_munmap(mm, addr, len))
73034 return -ENOMEM;
73035 - goto munmap_back;
73036 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73037 + BUG_ON(vma && vma->vm_start < addr + len);
73038 }
73039
73040 /* Check against address space limit. */
73041 @@ -1280,6 +1401,16 @@ munmap_back:
73042 goto unacct_error;
73043 }
73044
73045 +#ifdef CONFIG_PAX_SEGMEXEC
73046 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
73047 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73048 + if (!vma_m) {
73049 + error = -ENOMEM;
73050 + goto free_vma;
73051 + }
73052 + }
73053 +#endif
73054 +
73055 vma->vm_mm = mm;
73056 vma->vm_start = addr;
73057 vma->vm_end = addr + len;
73058 @@ -1304,6 +1435,19 @@ munmap_back:
73059 error = file->f_op->mmap(file, vma);
73060 if (error)
73061 goto unmap_and_free_vma;
73062 +
73063 +#ifdef CONFIG_PAX_SEGMEXEC
73064 + if (vma_m && (vm_flags & VM_EXECUTABLE))
73065 + added_exe_file_vma(mm);
73066 +#endif
73067 +
73068 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
73069 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
73070 + vma->vm_flags |= VM_PAGEEXEC;
73071 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73072 + }
73073 +#endif
73074 +
73075 if (vm_flags & VM_EXECUTABLE)
73076 added_exe_file_vma(mm);
73077
73078 @@ -1341,6 +1485,11 @@ munmap_back:
73079 vma_link(mm, vma, prev, rb_link, rb_parent);
73080 file = vma->vm_file;
73081
73082 +#ifdef CONFIG_PAX_SEGMEXEC
73083 + if (vma_m)
73084 + BUG_ON(pax_mirror_vma(vma_m, vma));
73085 +#endif
73086 +
73087 /* Once vma denies write, undo our temporary denial count */
73088 if (correct_wcount)
73089 atomic_inc(&inode->i_writecount);
73090 @@ -1349,6 +1498,7 @@ out:
73091
73092 mm->total_vm += len >> PAGE_SHIFT;
73093 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
73094 + track_exec_limit(mm, addr, addr + len, vm_flags);
73095 if (vm_flags & VM_LOCKED) {
73096 if (!mlock_vma_pages_range(vma, addr, addr + len))
73097 mm->locked_vm += (len >> PAGE_SHIFT);
73098 @@ -1370,6 +1520,12 @@ unmap_and_free_vma:
73099 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
73100 charged = 0;
73101 free_vma:
73102 +
73103 +#ifdef CONFIG_PAX_SEGMEXEC
73104 + if (vma_m)
73105 + kmem_cache_free(vm_area_cachep, vma_m);
73106 +#endif
73107 +
73108 kmem_cache_free(vm_area_cachep, vma);
73109 unacct_error:
73110 if (charged)
73111 @@ -1377,6 +1533,44 @@ unacct_error:
73112 return error;
73113 }
73114
73115 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
73116 +{
73117 + if (!vma) {
73118 +#ifdef CONFIG_STACK_GROWSUP
73119 + if (addr > sysctl_heap_stack_gap)
73120 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
73121 + else
73122 + vma = find_vma(current->mm, 0);
73123 + if (vma && (vma->vm_flags & VM_GROWSUP))
73124 + return false;
73125 +#endif
73126 + return true;
73127 + }
73128 +
73129 + if (addr + len > vma->vm_start)
73130 + return false;
73131 +
73132 + if (vma->vm_flags & VM_GROWSDOWN)
73133 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
73134 +#ifdef CONFIG_STACK_GROWSUP
73135 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
73136 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
73137 +#endif
73138 +
73139 + return true;
73140 +}
73141 +
73142 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
73143 +{
73144 + if (vma->vm_start < len)
73145 + return -ENOMEM;
73146 + if (!(vma->vm_flags & VM_GROWSDOWN))
73147 + return vma->vm_start - len;
73148 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
73149 + return vma->vm_start - len - sysctl_heap_stack_gap;
73150 + return -ENOMEM;
73151 +}
73152 +
73153 /* Get an address range which is currently unmapped.
73154 * For shmat() with addr=0.
73155 *
73156 @@ -1403,18 +1597,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
73157 if (flags & MAP_FIXED)
73158 return addr;
73159
73160 +#ifdef CONFIG_PAX_RANDMMAP
73161 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
73162 +#endif
73163 +
73164 if (addr) {
73165 addr = PAGE_ALIGN(addr);
73166 - vma = find_vma(mm, addr);
73167 - if (TASK_SIZE - len >= addr &&
73168 - (!vma || addr + len <= vma->vm_start))
73169 - return addr;
73170 + if (TASK_SIZE - len >= addr) {
73171 + vma = find_vma(mm, addr);
73172 + if (check_heap_stack_gap(vma, addr, len))
73173 + return addr;
73174 + }
73175 }
73176 if (len > mm->cached_hole_size) {
73177 - start_addr = addr = mm->free_area_cache;
73178 + start_addr = addr = mm->free_area_cache;
73179 } else {
73180 - start_addr = addr = TASK_UNMAPPED_BASE;
73181 - mm->cached_hole_size = 0;
73182 + start_addr = addr = mm->mmap_base;
73183 + mm->cached_hole_size = 0;
73184 }
73185
73186 full_search:
73187 @@ -1425,34 +1624,40 @@ full_search:
73188 * Start a new search - just in case we missed
73189 * some holes.
73190 */
73191 - if (start_addr != TASK_UNMAPPED_BASE) {
73192 - addr = TASK_UNMAPPED_BASE;
73193 - start_addr = addr;
73194 + if (start_addr != mm->mmap_base) {
73195 + start_addr = addr = mm->mmap_base;
73196 mm->cached_hole_size = 0;
73197 goto full_search;
73198 }
73199 return -ENOMEM;
73200 }
73201 - if (!vma || addr + len <= vma->vm_start) {
73202 - /*
73203 - * Remember the place where we stopped the search:
73204 - */
73205 - mm->free_area_cache = addr + len;
73206 - return addr;
73207 - }
73208 + if (check_heap_stack_gap(vma, addr, len))
73209 + break;
73210 if (addr + mm->cached_hole_size < vma->vm_start)
73211 mm->cached_hole_size = vma->vm_start - addr;
73212 addr = vma->vm_end;
73213 }
73214 +
73215 + /*
73216 + * Remember the place where we stopped the search:
73217 + */
73218 + mm->free_area_cache = addr + len;
73219 + return addr;
73220 }
73221 #endif
73222
73223 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
73224 {
73225 +
73226 +#ifdef CONFIG_PAX_SEGMEXEC
73227 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
73228 + return;
73229 +#endif
73230 +
73231 /*
73232 * Is this a new hole at the lowest possible address?
73233 */
73234 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
73235 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
73236 mm->free_area_cache = addr;
73237 }
73238
73239 @@ -1468,7 +1673,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
73240 {
73241 struct vm_area_struct *vma;
73242 struct mm_struct *mm = current->mm;
73243 - unsigned long addr = addr0, start_addr;
73244 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
73245
73246 /* requested length too big for entire address space */
73247 if (len > TASK_SIZE)
73248 @@ -1477,13 +1682,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
73249 if (flags & MAP_FIXED)
73250 return addr;
73251
73252 +#ifdef CONFIG_PAX_RANDMMAP
73253 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
73254 +#endif
73255 +
73256 /* requesting a specific address */
73257 if (addr) {
73258 addr = PAGE_ALIGN(addr);
73259 - vma = find_vma(mm, addr);
73260 - if (TASK_SIZE - len >= addr &&
73261 - (!vma || addr + len <= vma->vm_start))
73262 - return addr;
73263 + if (TASK_SIZE - len >= addr) {
73264 + vma = find_vma(mm, addr);
73265 + if (check_heap_stack_gap(vma, addr, len))
73266 + return addr;
73267 + }
73268 }
73269
73270 /* check if free_area_cache is useful for us */
73271 @@ -1507,7 +1717,7 @@ try_again:
73272 * return with success:
73273 */
73274 vma = find_vma(mm, addr);
73275 - if (!vma || addr+len <= vma->vm_start)
73276 + if (check_heap_stack_gap(vma, addr, len))
73277 /* remember the address as a hint for next time */
73278 return (mm->free_area_cache = addr);
73279
73280 @@ -1516,8 +1726,8 @@ try_again:
73281 mm->cached_hole_size = vma->vm_start - addr;
73282
73283 /* try just below the current vma->vm_start */
73284 - addr = vma->vm_start-len;
73285 - } while (len < vma->vm_start);
73286 + addr = skip_heap_stack_gap(vma, len);
73287 + } while (!IS_ERR_VALUE(addr));
73288
73289 fail:
73290 /*
73291 @@ -1540,13 +1750,21 @@ fail:
73292 * can happen with large stack limits and large mmap()
73293 * allocations.
73294 */
73295 + mm->mmap_base = TASK_UNMAPPED_BASE;
73296 +
73297 +#ifdef CONFIG_PAX_RANDMMAP
73298 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73299 + mm->mmap_base += mm->delta_mmap;
73300 +#endif
73301 +
73302 + mm->free_area_cache = mm->mmap_base;
73303 mm->cached_hole_size = ~0UL;
73304 - mm->free_area_cache = TASK_UNMAPPED_BASE;
73305 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
73306 /*
73307 * Restore the topdown base:
73308 */
73309 - mm->free_area_cache = mm->mmap_base;
73310 + mm->mmap_base = base;
73311 + mm->free_area_cache = base;
73312 mm->cached_hole_size = ~0UL;
73313
73314 return addr;
73315 @@ -1555,6 +1773,12 @@ fail:
73316
73317 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
73318 {
73319 +
73320 +#ifdef CONFIG_PAX_SEGMEXEC
73321 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
73322 + return;
73323 +#endif
73324 +
73325 /*
73326 * Is this a new hole at the highest possible address?
73327 */
73328 @@ -1562,8 +1786,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
73329 mm->free_area_cache = addr;
73330
73331 /* dont allow allocations above current base */
73332 - if (mm->free_area_cache > mm->mmap_base)
73333 + if (mm->free_area_cache > mm->mmap_base) {
73334 mm->free_area_cache = mm->mmap_base;
73335 + mm->cached_hole_size = ~0UL;
73336 + }
73337 }
73338
73339 unsigned long
73340 @@ -1662,6 +1888,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
73341 return vma;
73342 }
73343
73344 +#ifdef CONFIG_PAX_SEGMEXEC
73345 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
73346 +{
73347 + struct vm_area_struct *vma_m;
73348 +
73349 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
73350 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
73351 + BUG_ON(vma->vm_mirror);
73352 + return NULL;
73353 + }
73354 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
73355 + vma_m = vma->vm_mirror;
73356 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
73357 + BUG_ON(vma->vm_file != vma_m->vm_file);
73358 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
73359 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
73360 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
73361 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
73362 + return vma_m;
73363 +}
73364 +#endif
73365 +
73366 /*
73367 * Verify that the stack growth is acceptable and
73368 * update accounting. This is shared with both the
73369 @@ -1678,6 +1926,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73370 return -ENOMEM;
73371
73372 /* Stack limit test */
73373 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
73374 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
73375 return -ENOMEM;
73376
73377 @@ -1688,6 +1937,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73378 locked = mm->locked_vm + grow;
73379 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
73380 limit >>= PAGE_SHIFT;
73381 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
73382 if (locked > limit && !capable(CAP_IPC_LOCK))
73383 return -ENOMEM;
73384 }
73385 @@ -1718,37 +1968,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73386 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
73387 * vma is the last one with address > vma->vm_end. Have to extend vma.
73388 */
73389 +#ifndef CONFIG_IA64
73390 +static
73391 +#endif
73392 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
73393 {
73394 int error;
73395 + bool locknext;
73396
73397 if (!(vma->vm_flags & VM_GROWSUP))
73398 return -EFAULT;
73399
73400 + /* Also guard against wrapping around to address 0. */
73401 + if (address < PAGE_ALIGN(address+1))
73402 + address = PAGE_ALIGN(address+1);
73403 + else
73404 + return -ENOMEM;
73405 +
73406 /*
73407 * We must make sure the anon_vma is allocated
73408 * so that the anon_vma locking is not a noop.
73409 */
73410 if (unlikely(anon_vma_prepare(vma)))
73411 return -ENOMEM;
73412 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
73413 + if (locknext && anon_vma_prepare(vma->vm_next))
73414 + return -ENOMEM;
73415 vma_lock_anon_vma(vma);
73416 + if (locknext)
73417 + vma_lock_anon_vma(vma->vm_next);
73418
73419 /*
73420 * vma->vm_start/vm_end cannot change under us because the caller
73421 * is required to hold the mmap_sem in read mode. We need the
73422 - * anon_vma lock to serialize against concurrent expand_stacks.
73423 - * Also guard against wrapping around to address 0.
73424 + * anon_vma locks to serialize against concurrent expand_stacks
73425 + * and expand_upwards.
73426 */
73427 - if (address < PAGE_ALIGN(address+4))
73428 - address = PAGE_ALIGN(address+4);
73429 - else {
73430 - vma_unlock_anon_vma(vma);
73431 - return -ENOMEM;
73432 - }
73433 error = 0;
73434
73435 /* Somebody else might have raced and expanded it already */
73436 - if (address > vma->vm_end) {
73437 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
73438 + error = -ENOMEM;
73439 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
73440 unsigned long size, grow;
73441
73442 size = address - vma->vm_start;
73443 @@ -1763,6 +2024,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
73444 }
73445 }
73446 }
73447 + if (locknext)
73448 + vma_unlock_anon_vma(vma->vm_next);
73449 vma_unlock_anon_vma(vma);
73450 khugepaged_enter_vma_merge(vma);
73451 return error;
73452 @@ -1776,6 +2039,8 @@ int expand_downwards(struct vm_area_struct *vma,
73453 unsigned long address)
73454 {
73455 int error;
73456 + bool lockprev = false;
73457 + struct vm_area_struct *prev;
73458
73459 /*
73460 * We must make sure the anon_vma is allocated
73461 @@ -1789,6 +2054,15 @@ int expand_downwards(struct vm_area_struct *vma,
73462 if (error)
73463 return error;
73464
73465 + prev = vma->vm_prev;
73466 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
73467 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
73468 +#endif
73469 + if (lockprev && anon_vma_prepare(prev))
73470 + return -ENOMEM;
73471 + if (lockprev)
73472 + vma_lock_anon_vma(prev);
73473 +
73474 vma_lock_anon_vma(vma);
73475
73476 /*
73477 @@ -1798,9 +2072,17 @@ int expand_downwards(struct vm_area_struct *vma,
73478 */
73479
73480 /* Somebody else might have raced and expanded it already */
73481 - if (address < vma->vm_start) {
73482 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
73483 + error = -ENOMEM;
73484 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
73485 unsigned long size, grow;
73486
73487 +#ifdef CONFIG_PAX_SEGMEXEC
73488 + struct vm_area_struct *vma_m;
73489 +
73490 + vma_m = pax_find_mirror_vma(vma);
73491 +#endif
73492 +
73493 size = vma->vm_end - address;
73494 grow = (vma->vm_start - address) >> PAGE_SHIFT;
73495
73496 @@ -1810,11 +2092,22 @@ int expand_downwards(struct vm_area_struct *vma,
73497 if (!error) {
73498 vma->vm_start = address;
73499 vma->vm_pgoff -= grow;
73500 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
73501 +
73502 +#ifdef CONFIG_PAX_SEGMEXEC
73503 + if (vma_m) {
73504 + vma_m->vm_start -= grow << PAGE_SHIFT;
73505 + vma_m->vm_pgoff -= grow;
73506 + }
73507 +#endif
73508 +
73509 perf_event_mmap(vma);
73510 }
73511 }
73512 }
73513 vma_unlock_anon_vma(vma);
73514 + if (lockprev)
73515 + vma_unlock_anon_vma(prev);
73516 khugepaged_enter_vma_merge(vma);
73517 return error;
73518 }
73519 @@ -1886,6 +2179,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
73520 do {
73521 long nrpages = vma_pages(vma);
73522
73523 +#ifdef CONFIG_PAX_SEGMEXEC
73524 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
73525 + vma = remove_vma(vma);
73526 + continue;
73527 + }
73528 +#endif
73529 +
73530 if (vma->vm_flags & VM_ACCOUNT)
73531 nr_accounted += nrpages;
73532 mm->total_vm -= nrpages;
73533 @@ -1932,6 +2232,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
73534 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
73535 vma->vm_prev = NULL;
73536 do {
73537 +
73538 +#ifdef CONFIG_PAX_SEGMEXEC
73539 + if (vma->vm_mirror) {
73540 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
73541 + vma->vm_mirror->vm_mirror = NULL;
73542 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
73543 + vma->vm_mirror = NULL;
73544 + }
73545 +#endif
73546 +
73547 rb_erase(&vma->vm_rb, &mm->mm_rb);
73548 mm->map_count--;
73549 tail_vma = vma;
73550 @@ -1960,14 +2270,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73551 struct vm_area_struct *new;
73552 int err = -ENOMEM;
73553
73554 +#ifdef CONFIG_PAX_SEGMEXEC
73555 + struct vm_area_struct *vma_m, *new_m = NULL;
73556 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
73557 +#endif
73558 +
73559 if (is_vm_hugetlb_page(vma) && (addr &
73560 ~(huge_page_mask(hstate_vma(vma)))))
73561 return -EINVAL;
73562
73563 +#ifdef CONFIG_PAX_SEGMEXEC
73564 + vma_m = pax_find_mirror_vma(vma);
73565 +#endif
73566 +
73567 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73568 if (!new)
73569 goto out_err;
73570
73571 +#ifdef CONFIG_PAX_SEGMEXEC
73572 + if (vma_m) {
73573 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73574 + if (!new_m) {
73575 + kmem_cache_free(vm_area_cachep, new);
73576 + goto out_err;
73577 + }
73578 + }
73579 +#endif
73580 +
73581 /* most fields are the same, copy all, and then fixup */
73582 *new = *vma;
73583
73584 @@ -1980,6 +2309,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73585 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
73586 }
73587
73588 +#ifdef CONFIG_PAX_SEGMEXEC
73589 + if (vma_m) {
73590 + *new_m = *vma_m;
73591 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
73592 + new_m->vm_mirror = new;
73593 + new->vm_mirror = new_m;
73594 +
73595 + if (new_below)
73596 + new_m->vm_end = addr_m;
73597 + else {
73598 + new_m->vm_start = addr_m;
73599 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
73600 + }
73601 + }
73602 +#endif
73603 +
73604 pol = mpol_dup(vma_policy(vma));
73605 if (IS_ERR(pol)) {
73606 err = PTR_ERR(pol);
73607 @@ -2005,6 +2350,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73608 else
73609 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
73610
73611 +#ifdef CONFIG_PAX_SEGMEXEC
73612 + if (!err && vma_m) {
73613 + if (anon_vma_clone(new_m, vma_m))
73614 + goto out_free_mpol;
73615 +
73616 + mpol_get(pol);
73617 + vma_set_policy(new_m, pol);
73618 +
73619 + if (new_m->vm_file) {
73620 + get_file(new_m->vm_file);
73621 + if (vma_m->vm_flags & VM_EXECUTABLE)
73622 + added_exe_file_vma(mm);
73623 + }
73624 +
73625 + if (new_m->vm_ops && new_m->vm_ops->open)
73626 + new_m->vm_ops->open(new_m);
73627 +
73628 + if (new_below)
73629 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
73630 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
73631 + else
73632 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
73633 +
73634 + if (err) {
73635 + if (new_m->vm_ops && new_m->vm_ops->close)
73636 + new_m->vm_ops->close(new_m);
73637 + if (new_m->vm_file) {
73638 + if (vma_m->vm_flags & VM_EXECUTABLE)
73639 + removed_exe_file_vma(mm);
73640 + fput(new_m->vm_file);
73641 + }
73642 + mpol_put(pol);
73643 + }
73644 + }
73645 +#endif
73646 +
73647 /* Success. */
73648 if (!err)
73649 return 0;
73650 @@ -2017,10 +2398,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73651 removed_exe_file_vma(mm);
73652 fput(new->vm_file);
73653 }
73654 - unlink_anon_vmas(new);
73655 out_free_mpol:
73656 mpol_put(pol);
73657 out_free_vma:
73658 +
73659 +#ifdef CONFIG_PAX_SEGMEXEC
73660 + if (new_m) {
73661 + unlink_anon_vmas(new_m);
73662 + kmem_cache_free(vm_area_cachep, new_m);
73663 + }
73664 +#endif
73665 +
73666 + unlink_anon_vmas(new);
73667 kmem_cache_free(vm_area_cachep, new);
73668 out_err:
73669 return err;
73670 @@ -2033,6 +2422,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73671 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73672 unsigned long addr, int new_below)
73673 {
73674 +
73675 +#ifdef CONFIG_PAX_SEGMEXEC
73676 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
73677 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
73678 + if (mm->map_count >= sysctl_max_map_count-1)
73679 + return -ENOMEM;
73680 + } else
73681 +#endif
73682 +
73683 if (mm->map_count >= sysctl_max_map_count)
73684 return -ENOMEM;
73685
73686 @@ -2044,11 +2442,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73687 * work. This now handles partial unmappings.
73688 * Jeremy Fitzhardinge <jeremy@goop.org>
73689 */
73690 +#ifdef CONFIG_PAX_SEGMEXEC
73691 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73692 {
73693 + int ret = __do_munmap(mm, start, len);
73694 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
73695 + return ret;
73696 +
73697 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
73698 +}
73699 +
73700 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73701 +#else
73702 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73703 +#endif
73704 +{
73705 unsigned long end;
73706 struct vm_area_struct *vma, *prev, *last;
73707
73708 + /*
73709 + * mm->mmap_sem is required to protect against another thread
73710 + * changing the mappings in case we sleep.
73711 + */
73712 + verify_mm_writelocked(mm);
73713 +
73714 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
73715 return -EINVAL;
73716
73717 @@ -2123,6 +2540,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73718 /* Fix up all other VM information */
73719 remove_vma_list(mm, vma);
73720
73721 + track_exec_limit(mm, start, end, 0UL);
73722 +
73723 return 0;
73724 }
73725
73726 @@ -2131,6 +2550,13 @@ int vm_munmap(unsigned long start, size_t len)
73727 int ret;
73728 struct mm_struct *mm = current->mm;
73729
73730 +
73731 +#ifdef CONFIG_PAX_SEGMEXEC
73732 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
73733 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
73734 + return -EINVAL;
73735 +#endif
73736 +
73737 down_write(&mm->mmap_sem);
73738 ret = do_munmap(mm, start, len);
73739 up_write(&mm->mmap_sem);
73740 @@ -2144,16 +2570,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
73741 return vm_munmap(addr, len);
73742 }
73743
73744 -static inline void verify_mm_writelocked(struct mm_struct *mm)
73745 -{
73746 -#ifdef CONFIG_DEBUG_VM
73747 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
73748 - WARN_ON(1);
73749 - up_read(&mm->mmap_sem);
73750 - }
73751 -#endif
73752 -}
73753 -
73754 /*
73755 * this is really a simplified "do_mmap". it only handles
73756 * anonymous maps. eventually we may be able to do some
73757 @@ -2167,6 +2583,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73758 struct rb_node ** rb_link, * rb_parent;
73759 pgoff_t pgoff = addr >> PAGE_SHIFT;
73760 int error;
73761 + unsigned long charged;
73762
73763 len = PAGE_ALIGN(len);
73764 if (!len)
73765 @@ -2174,16 +2591,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73766
73767 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
73768
73769 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
73770 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
73771 + flags &= ~VM_EXEC;
73772 +
73773 +#ifdef CONFIG_PAX_MPROTECT
73774 + if (mm->pax_flags & MF_PAX_MPROTECT)
73775 + flags &= ~VM_MAYEXEC;
73776 +#endif
73777 +
73778 + }
73779 +#endif
73780 +
73781 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
73782 if (error & ~PAGE_MASK)
73783 return error;
73784
73785 + charged = len >> PAGE_SHIFT;
73786 +
73787 /*
73788 * mlock MCL_FUTURE?
73789 */
73790 if (mm->def_flags & VM_LOCKED) {
73791 unsigned long locked, lock_limit;
73792 - locked = len >> PAGE_SHIFT;
73793 + locked = charged;
73794 locked += mm->locked_vm;
73795 lock_limit = rlimit(RLIMIT_MEMLOCK);
73796 lock_limit >>= PAGE_SHIFT;
73797 @@ -2200,22 +2631,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73798 /*
73799 * Clear old maps. this also does some error checking for us
73800 */
73801 - munmap_back:
73802 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73803 if (vma && vma->vm_start < addr + len) {
73804 if (do_munmap(mm, addr, len))
73805 return -ENOMEM;
73806 - goto munmap_back;
73807 - }
73808 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73809 + BUG_ON(vma && vma->vm_start < addr + len);
73810 + }
73811
73812 /* Check against address space limits *after* clearing old maps... */
73813 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
73814 + if (!may_expand_vm(mm, charged))
73815 return -ENOMEM;
73816
73817 if (mm->map_count > sysctl_max_map_count)
73818 return -ENOMEM;
73819
73820 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
73821 + if (security_vm_enough_memory_mm(mm, charged))
73822 return -ENOMEM;
73823
73824 /* Can we just expand an old private anonymous mapping? */
73825 @@ -2229,7 +2660,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73826 */
73827 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73828 if (!vma) {
73829 - vm_unacct_memory(len >> PAGE_SHIFT);
73830 + vm_unacct_memory(charged);
73831 return -ENOMEM;
73832 }
73833
73834 @@ -2243,11 +2674,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73835 vma_link(mm, vma, prev, rb_link, rb_parent);
73836 out:
73837 perf_event_mmap(vma);
73838 - mm->total_vm += len >> PAGE_SHIFT;
73839 + mm->total_vm += charged;
73840 if (flags & VM_LOCKED) {
73841 if (!mlock_vma_pages_range(vma, addr, addr + len))
73842 - mm->locked_vm += (len >> PAGE_SHIFT);
73843 + mm->locked_vm += charged;
73844 }
73845 + track_exec_limit(mm, addr, addr + len, flags);
73846 return addr;
73847 }
73848
73849 @@ -2305,6 +2737,7 @@ void exit_mmap(struct mm_struct *mm)
73850 while (vma) {
73851 if (vma->vm_flags & VM_ACCOUNT)
73852 nr_accounted += vma_pages(vma);
73853 + vma->vm_mirror = NULL;
73854 vma = remove_vma(vma);
73855 }
73856 vm_unacct_memory(nr_accounted);
73857 @@ -2321,6 +2754,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73858 struct vm_area_struct * __vma, * prev;
73859 struct rb_node ** rb_link, * rb_parent;
73860
73861 +#ifdef CONFIG_PAX_SEGMEXEC
73862 + struct vm_area_struct *vma_m = NULL;
73863 +#endif
73864 +
73865 + if (security_mmap_addr(vma->vm_start))
73866 + return -EPERM;
73867 +
73868 /*
73869 * The vm_pgoff of a purely anonymous vma should be irrelevant
73870 * until its first write fault, when page's anon_vma and index
73871 @@ -2347,7 +2787,21 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73872 if (vma->vm_file && uprobe_mmap(vma))
73873 return -EINVAL;
73874
73875 +#ifdef CONFIG_PAX_SEGMEXEC
73876 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
73877 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73878 + if (!vma_m)
73879 + return -ENOMEM;
73880 + }
73881 +#endif
73882 +
73883 vma_link(mm, vma, prev, rb_link, rb_parent);
73884 +
73885 +#ifdef CONFIG_PAX_SEGMEXEC
73886 + if (vma_m)
73887 + BUG_ON(pax_mirror_vma(vma_m, vma));
73888 +#endif
73889 +
73890 return 0;
73891 }
73892
73893 @@ -2366,6 +2820,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73894 struct mempolicy *pol;
73895 bool faulted_in_anon_vma = true;
73896
73897 + BUG_ON(vma->vm_mirror);
73898 +
73899 /*
73900 * If anonymous vma has not yet been faulted, update new pgoff
73901 * to match new location, to increase its chance of merging.
73902 @@ -2437,6 +2893,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73903 return NULL;
73904 }
73905
73906 +#ifdef CONFIG_PAX_SEGMEXEC
73907 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
73908 +{
73909 + struct vm_area_struct *prev_m;
73910 + struct rb_node **rb_link_m, *rb_parent_m;
73911 + struct mempolicy *pol_m;
73912 +
73913 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
73914 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
73915 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
73916 + *vma_m = *vma;
73917 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
73918 + if (anon_vma_clone(vma_m, vma))
73919 + return -ENOMEM;
73920 + pol_m = vma_policy(vma_m);
73921 + mpol_get(pol_m);
73922 + vma_set_policy(vma_m, pol_m);
73923 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
73924 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
73925 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
73926 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
73927 + if (vma_m->vm_file)
73928 + get_file(vma_m->vm_file);
73929 + if (vma_m->vm_ops && vma_m->vm_ops->open)
73930 + vma_m->vm_ops->open(vma_m);
73931 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
73932 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
73933 + vma_m->vm_mirror = vma;
73934 + vma->vm_mirror = vma_m;
73935 + return 0;
73936 +}
73937 +#endif
73938 +
73939 /*
73940 * Return true if the calling process may expand its vm space by the passed
73941 * number of pages
73942 @@ -2448,6 +2937,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
73943
73944 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
73945
73946 +#ifdef CONFIG_PAX_RANDMMAP
73947 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73948 + cur -= mm->brk_gap;
73949 +#endif
73950 +
73951 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
73952 if (cur + npages > lim)
73953 return 0;
73954 return 1;
73955 @@ -2518,6 +3013,22 @@ int install_special_mapping(struct mm_struct *mm,
73956 vma->vm_start = addr;
73957 vma->vm_end = addr + len;
73958
73959 +#ifdef CONFIG_PAX_MPROTECT
73960 + if (mm->pax_flags & MF_PAX_MPROTECT) {
73961 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
73962 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
73963 + return -EPERM;
73964 + if (!(vm_flags & VM_EXEC))
73965 + vm_flags &= ~VM_MAYEXEC;
73966 +#else
73967 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
73968 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
73969 +#endif
73970 + else
73971 + vm_flags &= ~VM_MAYWRITE;
73972 + }
73973 +#endif
73974 +
73975 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
73976 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73977
73978 diff --git a/mm/mprotect.c b/mm/mprotect.c
73979 index a409926..8b32e6d 100644
73980 --- a/mm/mprotect.c
73981 +++ b/mm/mprotect.c
73982 @@ -23,10 +23,17 @@
73983 #include <linux/mmu_notifier.h>
73984 #include <linux/migrate.h>
73985 #include <linux/perf_event.h>
73986 +
73987 +#ifdef CONFIG_PAX_MPROTECT
73988 +#include <linux/elf.h>
73989 +#include <linux/binfmts.h>
73990 +#endif
73991 +
73992 #include <asm/uaccess.h>
73993 #include <asm/pgtable.h>
73994 #include <asm/cacheflush.h>
73995 #include <asm/tlbflush.h>
73996 +#include <asm/mmu_context.h>
73997
73998 #ifndef pgprot_modify
73999 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
74000 @@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
74001 flush_tlb_range(vma, start, end);
74002 }
74003
74004 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
74005 +/* called while holding the mmap semaphor for writing except stack expansion */
74006 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
74007 +{
74008 + unsigned long oldlimit, newlimit = 0UL;
74009 +
74010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
74011 + return;
74012 +
74013 + spin_lock(&mm->page_table_lock);
74014 + oldlimit = mm->context.user_cs_limit;
74015 + if ((prot & VM_EXEC) && oldlimit < end)
74016 + /* USER_CS limit moved up */
74017 + newlimit = end;
74018 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
74019 + /* USER_CS limit moved down */
74020 + newlimit = start;
74021 +
74022 + if (newlimit) {
74023 + mm->context.user_cs_limit = newlimit;
74024 +
74025 +#ifdef CONFIG_SMP
74026 + wmb();
74027 + cpus_clear(mm->context.cpu_user_cs_mask);
74028 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
74029 +#endif
74030 +
74031 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
74032 + }
74033 + spin_unlock(&mm->page_table_lock);
74034 + if (newlimit == end) {
74035 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
74036 +
74037 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
74038 + if (is_vm_hugetlb_page(vma))
74039 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
74040 + else
74041 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
74042 + }
74043 +}
74044 +#endif
74045 +
74046 int
74047 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
74048 unsigned long start, unsigned long end, unsigned long newflags)
74049 @@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
74050 int error;
74051 int dirty_accountable = 0;
74052
74053 +#ifdef CONFIG_PAX_SEGMEXEC
74054 + struct vm_area_struct *vma_m = NULL;
74055 + unsigned long start_m, end_m;
74056 +
74057 + start_m = start + SEGMEXEC_TASK_SIZE;
74058 + end_m = end + SEGMEXEC_TASK_SIZE;
74059 +#endif
74060 +
74061 if (newflags == oldflags) {
74062 *pprev = vma;
74063 return 0;
74064 }
74065
74066 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
74067 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
74068 +
74069 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
74070 + return -ENOMEM;
74071 +
74072 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
74073 + return -ENOMEM;
74074 + }
74075 +
74076 /*
74077 * If we make a private mapping writable we increase our commit;
74078 * but (without finer accounting) cannot reduce our commit if we
74079 @@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
74080 }
74081 }
74082
74083 +#ifdef CONFIG_PAX_SEGMEXEC
74084 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
74085 + if (start != vma->vm_start) {
74086 + error = split_vma(mm, vma, start, 1);
74087 + if (error)
74088 + goto fail;
74089 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
74090 + *pprev = (*pprev)->vm_next;
74091 + }
74092 +
74093 + if (end != vma->vm_end) {
74094 + error = split_vma(mm, vma, end, 0);
74095 + if (error)
74096 + goto fail;
74097 + }
74098 +
74099 + if (pax_find_mirror_vma(vma)) {
74100 + error = __do_munmap(mm, start_m, end_m - start_m);
74101 + if (error)
74102 + goto fail;
74103 + } else {
74104 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
74105 + if (!vma_m) {
74106 + error = -ENOMEM;
74107 + goto fail;
74108 + }
74109 + vma->vm_flags = newflags;
74110 + error = pax_mirror_vma(vma_m, vma);
74111 + if (error) {
74112 + vma->vm_flags = oldflags;
74113 + goto fail;
74114 + }
74115 + }
74116 + }
74117 +#endif
74118 +
74119 /*
74120 * First try to merge with previous and/or next vma.
74121 */
74122 @@ -204,9 +307,21 @@ success:
74123 * vm_flags and vm_page_prot are protected by the mmap_sem
74124 * held in write mode.
74125 */
74126 +
74127 +#ifdef CONFIG_PAX_SEGMEXEC
74128 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
74129 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
74130 +#endif
74131 +
74132 vma->vm_flags = newflags;
74133 +
74134 +#ifdef CONFIG_PAX_MPROTECT
74135 + if (mm->binfmt && mm->binfmt->handle_mprotect)
74136 + mm->binfmt->handle_mprotect(vma, newflags);
74137 +#endif
74138 +
74139 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
74140 - vm_get_page_prot(newflags));
74141 + vm_get_page_prot(vma->vm_flags));
74142
74143 if (vma_wants_writenotify(vma)) {
74144 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
74145 @@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74146 end = start + len;
74147 if (end <= start)
74148 return -ENOMEM;
74149 +
74150 +#ifdef CONFIG_PAX_SEGMEXEC
74151 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
74152 + if (end > SEGMEXEC_TASK_SIZE)
74153 + return -EINVAL;
74154 + } else
74155 +#endif
74156 +
74157 + if (end > TASK_SIZE)
74158 + return -EINVAL;
74159 +
74160 if (!arch_validate_prot(prot))
74161 return -EINVAL;
74162
74163 @@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74164 /*
74165 * Does the application expect PROT_READ to imply PROT_EXEC:
74166 */
74167 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
74168 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
74169 prot |= PROT_EXEC;
74170
74171 vm_flags = calc_vm_prot_bits(prot);
74172 @@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74173 if (start > vma->vm_start)
74174 prev = vma;
74175
74176 +#ifdef CONFIG_PAX_MPROTECT
74177 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
74178 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
74179 +#endif
74180 +
74181 for (nstart = start ; ; ) {
74182 unsigned long newflags;
74183
74184 @@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74185
74186 /* newflags >> 4 shift VM_MAY% in place of VM_% */
74187 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
74188 + if (prot & (PROT_WRITE | PROT_EXEC))
74189 + gr_log_rwxmprotect(vma->vm_file);
74190 +
74191 + error = -EACCES;
74192 + goto out;
74193 + }
74194 +
74195 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
74196 error = -EACCES;
74197 goto out;
74198 }
74199 @@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74200 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
74201 if (error)
74202 goto out;
74203 +
74204 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
74205 +
74206 nstart = tmp;
74207
74208 if (nstart < prev->vm_end)
74209 diff --git a/mm/mremap.c b/mm/mremap.c
74210 index 21fed20..6822658 100644
74211 --- a/mm/mremap.c
74212 +++ b/mm/mremap.c
74213 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
74214 continue;
74215 pte = ptep_get_and_clear(mm, old_addr, old_pte);
74216 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
74217 +
74218 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
74219 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
74220 + pte = pte_exprotect(pte);
74221 +#endif
74222 +
74223 set_pte_at(mm, new_addr, new_pte, pte);
74224 }
74225
74226 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
74227 if (is_vm_hugetlb_page(vma))
74228 goto Einval;
74229
74230 +#ifdef CONFIG_PAX_SEGMEXEC
74231 + if (pax_find_mirror_vma(vma))
74232 + goto Einval;
74233 +#endif
74234 +
74235 /* We can't remap across vm area boundaries */
74236 if (old_len > vma->vm_end - addr)
74237 goto Efault;
74238 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
74239 unsigned long ret = -EINVAL;
74240 unsigned long charged = 0;
74241 unsigned long map_flags;
74242 + unsigned long pax_task_size = TASK_SIZE;
74243
74244 if (new_addr & ~PAGE_MASK)
74245 goto out;
74246
74247 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
74248 +#ifdef CONFIG_PAX_SEGMEXEC
74249 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
74250 + pax_task_size = SEGMEXEC_TASK_SIZE;
74251 +#endif
74252 +
74253 + pax_task_size -= PAGE_SIZE;
74254 +
74255 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
74256 goto out;
74257
74258 /* Check if the location we're moving into overlaps the
74259 * old location at all, and fail if it does.
74260 */
74261 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
74262 - goto out;
74263 -
74264 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
74265 + if (addr + old_len > new_addr && new_addr + new_len > addr)
74266 goto out;
74267
74268 ret = do_munmap(mm, new_addr, new_len);
74269 @@ -436,6 +452,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74270 struct vm_area_struct *vma;
74271 unsigned long ret = -EINVAL;
74272 unsigned long charged = 0;
74273 + unsigned long pax_task_size = TASK_SIZE;
74274
74275 down_write(&current->mm->mmap_sem);
74276
74277 @@ -456,6 +473,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74278 if (!new_len)
74279 goto out;
74280
74281 +#ifdef CONFIG_PAX_SEGMEXEC
74282 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
74283 + pax_task_size = SEGMEXEC_TASK_SIZE;
74284 +#endif
74285 +
74286 + pax_task_size -= PAGE_SIZE;
74287 +
74288 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
74289 + old_len > pax_task_size || addr > pax_task_size-old_len)
74290 + goto out;
74291 +
74292 if (flags & MREMAP_FIXED) {
74293 if (flags & MREMAP_MAYMOVE)
74294 ret = mremap_to(addr, old_len, new_addr, new_len);
74295 @@ -505,6 +533,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74296 addr + new_len);
74297 }
74298 ret = addr;
74299 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
74300 goto out;
74301 }
74302 }
74303 @@ -528,7 +557,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74304 goto out;
74305 }
74306
74307 + map_flags = vma->vm_flags;
74308 ret = move_vma(vma, addr, old_len, new_len, new_addr);
74309 + if (!(ret & ~PAGE_MASK)) {
74310 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
74311 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
74312 + }
74313 }
74314 out:
74315 if (ret & ~PAGE_MASK)
74316 diff --git a/mm/nommu.c b/mm/nommu.c
74317 index d4b0c10..ed421b5 100644
74318 --- a/mm/nommu.c
74319 +++ b/mm/nommu.c
74320 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
74321 int sysctl_overcommit_ratio = 50; /* default is 50% */
74322 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
74323 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
74324 -int heap_stack_gap = 0;
74325
74326 atomic_long_t mmap_pages_allocated;
74327
74328 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
74329 EXPORT_SYMBOL(find_vma);
74330
74331 /*
74332 - * find a VMA
74333 - * - we don't extend stack VMAs under NOMMU conditions
74334 - */
74335 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
74336 -{
74337 - return find_vma(mm, addr);
74338 -}
74339 -
74340 -/*
74341 * expand a stack to a given address
74342 * - not supported under NOMMU conditions
74343 */
74344 @@ -1551,6 +1541,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
74345
74346 /* most fields are the same, copy all, and then fixup */
74347 *new = *vma;
74348 + INIT_LIST_HEAD(&new->anon_vma_chain);
74349 *region = *vma->vm_region;
74350 new->vm_region = region;
74351
74352 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
74353 index 34d879e..62970df 100644
74354 --- a/mm/page_alloc.c
74355 +++ b/mm/page_alloc.c
74356 @@ -336,7 +336,7 @@ out:
74357 * This usage means that zero-order pages may not be compound.
74358 */
74359
74360 -static void free_compound_page(struct page *page)
74361 +void free_compound_page(struct page *page)
74362 {
74363 __free_pages_ok(page, compound_order(page));
74364 }
74365 @@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
74366 int i;
74367 int bad = 0;
74368
74369 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
74370 + unsigned long index = 1UL << order;
74371 +#endif
74372 +
74373 trace_mm_page_free(page, order);
74374 kmemcheck_free_shadow(page, order);
74375
74376 @@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
74377 debug_check_no_obj_freed(page_address(page),
74378 PAGE_SIZE << order);
74379 }
74380 +
74381 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
74382 + for (; index; --index)
74383 + sanitize_highpage(page + index - 1);
74384 +#endif
74385 +
74386 arch_free_page(page, order);
74387 kernel_map_pages(page, 1 << order, 0);
74388
74389 @@ -849,8 +859,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
74390 arch_alloc_page(page, order);
74391 kernel_map_pages(page, 1 << order, 1);
74392
74393 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
74394 if (gfp_flags & __GFP_ZERO)
74395 prep_zero_page(page, order, gfp_flags);
74396 +#endif
74397
74398 if (order && (gfp_flags & __GFP_COMP))
74399 prep_compound_page(page, order);
74400 @@ -3579,7 +3591,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
74401 unsigned long pfn;
74402
74403 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
74404 +#ifdef CONFIG_X86_32
74405 + /* boot failures in VMware 8 on 32bit vanilla since
74406 + this change */
74407 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
74408 +#else
74409 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
74410 +#endif
74411 return 1;
74412 }
74413 return 0;
74414 diff --git a/mm/percpu.c b/mm/percpu.c
74415 index bb4be74..a43ea85 100644
74416 --- a/mm/percpu.c
74417 +++ b/mm/percpu.c
74418 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
74419 static unsigned int pcpu_high_unit_cpu __read_mostly;
74420
74421 /* the address of the first chunk which starts with the kernel static area */
74422 -void *pcpu_base_addr __read_mostly;
74423 +void *pcpu_base_addr __read_only;
74424 EXPORT_SYMBOL_GPL(pcpu_base_addr);
74425
74426 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
74427 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
74428 index 926b466..b23df53 100644
74429 --- a/mm/process_vm_access.c
74430 +++ b/mm/process_vm_access.c
74431 @@ -13,6 +13,7 @@
74432 #include <linux/uio.h>
74433 #include <linux/sched.h>
74434 #include <linux/highmem.h>
74435 +#include <linux/security.h>
74436 #include <linux/ptrace.h>
74437 #include <linux/slab.h>
74438 #include <linux/syscalls.h>
74439 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
74440 size_t iov_l_curr_offset = 0;
74441 ssize_t iov_len;
74442
74443 + return -ENOSYS; // PaX: until properly audited
74444 +
74445 /*
74446 * Work out how many pages of struct pages we're going to need
74447 * when eventually calling get_user_pages
74448 */
74449 for (i = 0; i < riovcnt; i++) {
74450 iov_len = rvec[i].iov_len;
74451 - if (iov_len > 0) {
74452 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
74453 - + iov_len)
74454 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
74455 - / PAGE_SIZE + 1;
74456 - nr_pages = max(nr_pages, nr_pages_iov);
74457 - }
74458 + if (iov_len <= 0)
74459 + continue;
74460 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
74461 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
74462 + nr_pages = max(nr_pages, nr_pages_iov);
74463 }
74464
74465 if (nr_pages == 0)
74466 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
74467 goto free_proc_pages;
74468 }
74469
74470 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
74471 + rc = -EPERM;
74472 + goto put_task_struct;
74473 + }
74474 +
74475 mm = mm_access(task, PTRACE_MODE_ATTACH);
74476 if (!mm || IS_ERR(mm)) {
74477 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
74478 diff --git a/mm/rmap.c b/mm/rmap.c
74479 index 0f3b7cd..c5652b6 100644
74480 --- a/mm/rmap.c
74481 +++ b/mm/rmap.c
74482 @@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74483 struct anon_vma *anon_vma = vma->anon_vma;
74484 struct anon_vma_chain *avc;
74485
74486 +#ifdef CONFIG_PAX_SEGMEXEC
74487 + struct anon_vma_chain *avc_m = NULL;
74488 +#endif
74489 +
74490 might_sleep();
74491 if (unlikely(!anon_vma)) {
74492 struct mm_struct *mm = vma->vm_mm;
74493 @@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74494 if (!avc)
74495 goto out_enomem;
74496
74497 +#ifdef CONFIG_PAX_SEGMEXEC
74498 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
74499 + if (!avc_m)
74500 + goto out_enomem_free_avc;
74501 +#endif
74502 +
74503 anon_vma = find_mergeable_anon_vma(vma);
74504 allocated = NULL;
74505 if (!anon_vma) {
74506 @@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74507 /* page_table_lock to protect against threads */
74508 spin_lock(&mm->page_table_lock);
74509 if (likely(!vma->anon_vma)) {
74510 +
74511 +#ifdef CONFIG_PAX_SEGMEXEC
74512 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
74513 +
74514 + if (vma_m) {
74515 + BUG_ON(vma_m->anon_vma);
74516 + vma_m->anon_vma = anon_vma;
74517 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
74518 + avc_m = NULL;
74519 + }
74520 +#endif
74521 +
74522 vma->anon_vma = anon_vma;
74523 anon_vma_chain_link(vma, avc, anon_vma);
74524 allocated = NULL;
74525 @@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74526
74527 if (unlikely(allocated))
74528 put_anon_vma(allocated);
74529 +
74530 +#ifdef CONFIG_PAX_SEGMEXEC
74531 + if (unlikely(avc_m))
74532 + anon_vma_chain_free(avc_m);
74533 +#endif
74534 +
74535 if (unlikely(avc))
74536 anon_vma_chain_free(avc);
74537 }
74538 return 0;
74539
74540 out_enomem_free_avc:
74541 +
74542 +#ifdef CONFIG_PAX_SEGMEXEC
74543 + if (avc_m)
74544 + anon_vma_chain_free(avc_m);
74545 +#endif
74546 +
74547 anon_vma_chain_free(avc);
74548 out_enomem:
74549 return -ENOMEM;
74550 @@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
74551 * Attach the anon_vmas from src to dst.
74552 * Returns 0 on success, -ENOMEM on failure.
74553 */
74554 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
74555 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
74556 {
74557 struct anon_vma_chain *avc, *pavc;
74558 struct anon_vma *root = NULL;
74559 @@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
74560 * the corresponding VMA in the parent process is attached to.
74561 * Returns 0 on success, non-zero on failure.
74562 */
74563 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
74564 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
74565 {
74566 struct anon_vma_chain *avc;
74567 struct anon_vma *anon_vma;
74568 diff --git a/mm/shmem.c b/mm/shmem.c
74569 index bd10636..5c16d49 100644
74570 --- a/mm/shmem.c
74571 +++ b/mm/shmem.c
74572 @@ -31,7 +31,7 @@
74573 #include <linux/export.h>
74574 #include <linux/swap.h>
74575
74576 -static struct vfsmount *shm_mnt;
74577 +struct vfsmount *shm_mnt;
74578
74579 #ifdef CONFIG_SHMEM
74580 /*
74581 @@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
74582 #define BOGO_DIRENT_SIZE 20
74583
74584 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
74585 -#define SHORT_SYMLINK_LEN 128
74586 +#define SHORT_SYMLINK_LEN 64
74587
74588 struct shmem_xattr {
74589 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
74590 @@ -2590,8 +2590,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
74591 int err = -ENOMEM;
74592
74593 /* Round up to L1_CACHE_BYTES to resist false sharing */
74594 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
74595 - L1_CACHE_BYTES), GFP_KERNEL);
74596 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
74597 if (!sbinfo)
74598 return -ENOMEM;
74599
74600 diff --git a/mm/slab.c b/mm/slab.c
74601 index e901a36..9ff3f90 100644
74602 --- a/mm/slab.c
74603 +++ b/mm/slab.c
74604 @@ -153,7 +153,7 @@
74605
74606 /* Legal flag mask for kmem_cache_create(). */
74607 #if DEBUG
74608 -# define CREATE_MASK (SLAB_RED_ZONE | \
74609 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
74610 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
74611 SLAB_CACHE_DMA | \
74612 SLAB_STORE_USER | \
74613 @@ -161,7 +161,7 @@
74614 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74615 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
74616 #else
74617 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
74618 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
74619 SLAB_CACHE_DMA | \
74620 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
74621 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74622 @@ -290,7 +290,7 @@ struct kmem_list3 {
74623 * Need this for bootstrapping a per node allocator.
74624 */
74625 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
74626 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
74627 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
74628 #define CACHE_CACHE 0
74629 #define SIZE_AC MAX_NUMNODES
74630 #define SIZE_L3 (2 * MAX_NUMNODES)
74631 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
74632 if ((x)->max_freeable < i) \
74633 (x)->max_freeable = i; \
74634 } while (0)
74635 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
74636 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
74637 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
74638 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
74639 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
74640 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
74641 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
74642 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
74643 #else
74644 #define STATS_INC_ACTIVE(x) do { } while (0)
74645 #define STATS_DEC_ACTIVE(x) do { } while (0)
74646 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
74647 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
74648 */
74649 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
74650 - const struct slab *slab, void *obj)
74651 + const struct slab *slab, const void *obj)
74652 {
74653 u32 offset = (obj - slab->s_mem);
74654 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
74655 @@ -563,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
74656 struct cache_names {
74657 char *name;
74658 char *name_dma;
74659 + char *name_usercopy;
74660 };
74661
74662 static struct cache_names __initdata cache_names[] = {
74663 -#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
74664 +#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
74665 #include <linux/kmalloc_sizes.h>
74666 - {NULL,}
74667 + {NULL}
74668 #undef CACHE
74669 };
74670
74671 @@ -756,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
74672 if (unlikely(gfpflags & GFP_DMA))
74673 return csizep->cs_dmacachep;
74674 #endif
74675 +
74676 +#ifdef CONFIG_PAX_USERCOPY_SLABS
74677 + if (unlikely(gfpflags & GFP_USERCOPY))
74678 + return csizep->cs_usercopycachep;
74679 +#endif
74680 +
74681 return csizep->cs_cachep;
74682 }
74683
74684 @@ -1588,7 +1595,7 @@ void __init kmem_cache_init(void)
74685 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
74686 sizes[INDEX_AC].cs_size,
74687 ARCH_KMALLOC_MINALIGN,
74688 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74689 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74690 NULL);
74691
74692 if (INDEX_AC != INDEX_L3) {
74693 @@ -1596,7 +1603,7 @@ void __init kmem_cache_init(void)
74694 kmem_cache_create(names[INDEX_L3].name,
74695 sizes[INDEX_L3].cs_size,
74696 ARCH_KMALLOC_MINALIGN,
74697 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74698 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74699 NULL);
74700 }
74701
74702 @@ -1614,7 +1621,7 @@ void __init kmem_cache_init(void)
74703 sizes->cs_cachep = kmem_cache_create(names->name,
74704 sizes->cs_size,
74705 ARCH_KMALLOC_MINALIGN,
74706 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74707 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74708 NULL);
74709 }
74710 #ifdef CONFIG_ZONE_DMA
74711 @@ -1626,6 +1633,16 @@ void __init kmem_cache_init(void)
74712 SLAB_PANIC,
74713 NULL);
74714 #endif
74715 +
74716 +#ifdef CONFIG_PAX_USERCOPY_SLABS
74717 + sizes->cs_usercopycachep = kmem_cache_create(
74718 + names->name_usercopy,
74719 + sizes->cs_size,
74720 + ARCH_KMALLOC_MINALIGN,
74721 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74722 + NULL);
74723 +#endif
74724 +
74725 sizes++;
74726 names++;
74727 }
74728 @@ -4390,10 +4407,10 @@ static int s_show(struct seq_file *m, void *p)
74729 }
74730 /* cpu stats */
74731 {
74732 - unsigned long allochit = atomic_read(&cachep->allochit);
74733 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
74734 - unsigned long freehit = atomic_read(&cachep->freehit);
74735 - unsigned long freemiss = atomic_read(&cachep->freemiss);
74736 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
74737 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
74738 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
74739 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
74740
74741 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
74742 allochit, allocmiss, freehit, freemiss);
74743 @@ -4652,13 +4669,71 @@ static int __init slab_proc_init(void)
74744 {
74745 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
74746 #ifdef CONFIG_DEBUG_SLAB_LEAK
74747 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
74748 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
74749 #endif
74750 return 0;
74751 }
74752 module_init(slab_proc_init);
74753 #endif
74754
74755 +bool is_usercopy_object(const void *ptr)
74756 +{
74757 + struct page *page;
74758 + struct kmem_cache *cachep;
74759 +
74760 + if (ZERO_OR_NULL_PTR(ptr))
74761 + return false;
74762 +
74763 + if (!slab_is_available())
74764 + return false;
74765 +
74766 + if (!virt_addr_valid(ptr))
74767 + return false;
74768 +
74769 + page = virt_to_head_page(ptr);
74770 +
74771 + if (!PageSlab(page))
74772 + return false;
74773 +
74774 + cachep = page_get_cache(page);
74775 + return cachep->flags & SLAB_USERCOPY;
74776 +}
74777 +
74778 +#ifdef CONFIG_PAX_USERCOPY
74779 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
74780 +{
74781 + struct page *page;
74782 + struct kmem_cache *cachep;
74783 + struct slab *slabp;
74784 + unsigned int objnr;
74785 + unsigned long offset;
74786 +
74787 + if (ZERO_OR_NULL_PTR(ptr))
74788 + return "<null>";
74789 +
74790 + if (!virt_addr_valid(ptr))
74791 + return NULL;
74792 +
74793 + page = virt_to_head_page(ptr);
74794 +
74795 + if (!PageSlab(page))
74796 + return NULL;
74797 +
74798 + cachep = page_get_cache(page);
74799 + if (!(cachep->flags & SLAB_USERCOPY))
74800 + return cachep->name;
74801 +
74802 + slabp = page_get_slab(page);
74803 + objnr = obj_to_index(cachep, slabp, ptr);
74804 + BUG_ON(objnr >= cachep->num);
74805 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
74806 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
74807 + return NULL;
74808 +
74809 + return cachep->name;
74810 +}
74811 +#endif
74812 +
74813 /**
74814 * ksize - get the actual amount of memory allocated for a given object
74815 * @objp: Pointer to the object
74816 diff --git a/mm/slob.c b/mm/slob.c
74817 index 8105be4..33e52d7 100644
74818 --- a/mm/slob.c
74819 +++ b/mm/slob.c
74820 @@ -29,7 +29,7 @@
74821 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
74822 * alloc_pages() directly, allocating compound pages so the page order
74823 * does not have to be separately tracked, and also stores the exact
74824 - * allocation size in page->private so that it can be used to accurately
74825 + * allocation size in slob_page->size so that it can be used to accurately
74826 * provide ksize(). These objects are detected in kfree() because slob_page()
74827 * is false for them.
74828 *
74829 @@ -58,6 +58,7 @@
74830 */
74831
74832 #include <linux/kernel.h>
74833 +#include <linux/sched.h>
74834 #include <linux/slab.h>
74835 #include <linux/mm.h>
74836 #include <linux/swap.h> /* struct reclaim_state */
74837 @@ -100,9 +101,8 @@ struct slob_page {
74838 union {
74839 struct {
74840 unsigned long flags; /* mandatory */
74841 - atomic_t _count; /* mandatory */
74842 slobidx_t units; /* free units left in page */
74843 - unsigned long pad[2];
74844 + unsigned long size; /* size when >=PAGE_SIZE */
74845 slob_t *free; /* first free slob_t in page */
74846 struct list_head list; /* linked list of free pages */
74847 };
74848 @@ -135,7 +135,7 @@ static LIST_HEAD(free_slob_large);
74849 */
74850 static inline int is_slob_page(struct slob_page *sp)
74851 {
74852 - return PageSlab((struct page *)sp);
74853 + return PageSlab((struct page *)sp) && !sp->size;
74854 }
74855
74856 static inline void set_slob_page(struct slob_page *sp)
74857 @@ -150,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
74858
74859 static inline struct slob_page *slob_page(const void *addr)
74860 {
74861 - return (struct slob_page *)virt_to_page(addr);
74862 + return (struct slob_page *)virt_to_head_page(addr);
74863 }
74864
74865 /*
74866 @@ -210,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
74867 /*
74868 * Return the size of a slob block.
74869 */
74870 -static slobidx_t slob_units(slob_t *s)
74871 +static slobidx_t slob_units(const slob_t *s)
74872 {
74873 if (s->units > 0)
74874 return s->units;
74875 @@ -220,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
74876 /*
74877 * Return the next free slob block pointer after this one.
74878 */
74879 -static slob_t *slob_next(slob_t *s)
74880 +static slob_t *slob_next(const slob_t *s)
74881 {
74882 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
74883 slobidx_t next;
74884 @@ -235,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
74885 /*
74886 * Returns true if s is the last free block in its page.
74887 */
74888 -static int slob_last(slob_t *s)
74889 +static int slob_last(const slob_t *s)
74890 {
74891 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
74892 }
74893 @@ -254,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
74894 if (!page)
74895 return NULL;
74896
74897 + set_slob_page(page);
74898 return page_address(page);
74899 }
74900
74901 @@ -370,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
74902 if (!b)
74903 return NULL;
74904 sp = slob_page(b);
74905 - set_slob_page(sp);
74906
74907 spin_lock_irqsave(&slob_lock, flags);
74908 sp->units = SLOB_UNITS(PAGE_SIZE);
74909 sp->free = b;
74910 + sp->size = 0;
74911 INIT_LIST_HEAD(&sp->list);
74912 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
74913 set_slob_page_free(sp, slob_list);
74914 @@ -476,10 +477,9 @@ out:
74915 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
74916 */
74917
74918 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74919 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
74920 {
74921 - unsigned int *m;
74922 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74923 + slob_t *m;
74924 void *ret;
74925
74926 gfp &= gfp_allowed_mask;
74927 @@ -494,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74928
74929 if (!m)
74930 return NULL;
74931 - *m = size;
74932 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
74933 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
74934 + m[0].units = size;
74935 + m[1].units = align;
74936 ret = (void *)m + align;
74937
74938 trace_kmalloc_node(_RET_IP_, ret,
74939 @@ -506,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74940 gfp |= __GFP_COMP;
74941 ret = slob_new_pages(gfp, order, node);
74942 if (ret) {
74943 - struct page *page;
74944 - page = virt_to_page(ret);
74945 - page->private = size;
74946 + struct slob_page *sp;
74947 + sp = slob_page(ret);
74948 + sp->size = size;
74949 }
74950
74951 trace_kmalloc_node(_RET_IP_, ret,
74952 size, PAGE_SIZE << order, gfp, node);
74953 }
74954
74955 - kmemleak_alloc(ret, size, 1, gfp);
74956 + return ret;
74957 +}
74958 +
74959 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74960 +{
74961 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74962 + void *ret = __kmalloc_node_align(size, gfp, node, align);
74963 +
74964 + if (!ZERO_OR_NULL_PTR(ret))
74965 + kmemleak_alloc(ret, size, 1, gfp);
74966 return ret;
74967 }
74968 EXPORT_SYMBOL(__kmalloc_node);
74969 @@ -533,13 +545,88 @@ void kfree(const void *block)
74970 sp = slob_page(block);
74971 if (is_slob_page(sp)) {
74972 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74973 - unsigned int *m = (unsigned int *)(block - align);
74974 - slob_free(m, *m + align);
74975 - } else
74976 + slob_t *m = (slob_t *)(block - align);
74977 + slob_free(m, m[0].units + align);
74978 + } else {
74979 + clear_slob_page(sp);
74980 + free_slob_page(sp);
74981 + sp->size = 0;
74982 put_page(&sp->page);
74983 + }
74984 }
74985 EXPORT_SYMBOL(kfree);
74986
74987 +bool is_usercopy_object(const void *ptr)
74988 +{
74989 + if (!slab_is_available())
74990 + return false;
74991 +
74992 + // PAX: TODO
74993 +
74994 + return false;
74995 +}
74996 +
74997 +#ifdef CONFIG_PAX_USERCOPY
74998 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
74999 +{
75000 + struct slob_page *sp;
75001 + const slob_t *free;
75002 + const void *base;
75003 + unsigned long flags;
75004 +
75005 + if (ZERO_OR_NULL_PTR(ptr))
75006 + return "<null>";
75007 +
75008 + if (!virt_addr_valid(ptr))
75009 + return NULL;
75010 +
75011 + sp = slob_page(ptr);
75012 + if (!PageSlab((struct page *)sp))
75013 + return NULL;
75014 +
75015 + if (sp->size) {
75016 + base = page_address(&sp->page);
75017 + if (base <= ptr && n <= sp->size - (ptr - base))
75018 + return NULL;
75019 + return "<slob>";
75020 + }
75021 +
75022 + /* some tricky double walking to find the chunk */
75023 + spin_lock_irqsave(&slob_lock, flags);
75024 + base = (void *)((unsigned long)ptr & PAGE_MASK);
75025 + free = sp->free;
75026 +
75027 + while ((void *)free <= ptr) {
75028 + base = free + slob_units(free);
75029 + free = slob_next(free);
75030 + }
75031 +
75032 + while (base < (void *)free) {
75033 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
75034 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
75035 + int offset;
75036 +
75037 + if (ptr < base + align)
75038 + break;
75039 +
75040 + offset = ptr - base - align;
75041 + if (offset >= m) {
75042 + base += size;
75043 + continue;
75044 + }
75045 +
75046 + if (n > m - offset)
75047 + break;
75048 +
75049 + spin_unlock_irqrestore(&slob_lock, flags);
75050 + return NULL;
75051 + }
75052 +
75053 + spin_unlock_irqrestore(&slob_lock, flags);
75054 + return "<slob>";
75055 +}
75056 +#endif
75057 +
75058 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
75059 size_t ksize(const void *block)
75060 {
75061 @@ -552,10 +639,10 @@ size_t ksize(const void *block)
75062 sp = slob_page(block);
75063 if (is_slob_page(sp)) {
75064 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
75065 - unsigned int *m = (unsigned int *)(block - align);
75066 - return SLOB_UNITS(*m) * SLOB_UNIT;
75067 + slob_t *m = (slob_t *)(block - align);
75068 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
75069 } else
75070 - return sp->page.private;
75071 + return sp->size;
75072 }
75073 EXPORT_SYMBOL(ksize);
75074
75075 @@ -571,8 +658,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
75076 {
75077 struct kmem_cache *c;
75078
75079 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75080 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
75081 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
75082 +#else
75083 c = slob_alloc(sizeof(struct kmem_cache),
75084 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
75085 +#endif
75086
75087 if (c) {
75088 c->name = name;
75089 @@ -614,17 +706,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
75090
75091 lockdep_trace_alloc(flags);
75092
75093 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75094 + b = __kmalloc_node_align(c->size, flags, node, c->align);
75095 +#else
75096 if (c->size < PAGE_SIZE) {
75097 b = slob_alloc(c->size, flags, c->align, node);
75098 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
75099 SLOB_UNITS(c->size) * SLOB_UNIT,
75100 flags, node);
75101 } else {
75102 + struct slob_page *sp;
75103 +
75104 b = slob_new_pages(flags, get_order(c->size), node);
75105 + sp = slob_page(b);
75106 + sp->size = c->size;
75107 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
75108 PAGE_SIZE << get_order(c->size),
75109 flags, node);
75110 }
75111 +#endif
75112
75113 if (c->ctor)
75114 c->ctor(b);
75115 @@ -636,10 +736,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
75116
75117 static void __kmem_cache_free(void *b, int size)
75118 {
75119 - if (size < PAGE_SIZE)
75120 + struct slob_page *sp = slob_page(b);
75121 +
75122 + if (is_slob_page(sp))
75123 slob_free(b, size);
75124 - else
75125 + else {
75126 + clear_slob_page(sp);
75127 + free_slob_page(sp);
75128 + sp->size = 0;
75129 slob_free_pages(b, get_order(size));
75130 + }
75131 }
75132
75133 static void kmem_rcu_free(struct rcu_head *head)
75134 @@ -652,17 +758,31 @@ static void kmem_rcu_free(struct rcu_head *head)
75135
75136 void kmem_cache_free(struct kmem_cache *c, void *b)
75137 {
75138 + int size = c->size;
75139 +
75140 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75141 + if (size + c->align < PAGE_SIZE) {
75142 + size += c->align;
75143 + b -= c->align;
75144 + }
75145 +#endif
75146 +
75147 kmemleak_free_recursive(b, c->flags);
75148 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
75149 struct slob_rcu *slob_rcu;
75150 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
75151 - slob_rcu->size = c->size;
75152 + slob_rcu = b + (size - sizeof(struct slob_rcu));
75153 + slob_rcu->size = size;
75154 call_rcu(&slob_rcu->head, kmem_rcu_free);
75155 } else {
75156 - __kmem_cache_free(b, c->size);
75157 + __kmem_cache_free(b, size);
75158 }
75159
75160 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75161 + trace_kfree(_RET_IP_, b);
75162 +#else
75163 trace_kmem_cache_free(_RET_IP_, b);
75164 +#endif
75165 +
75166 }
75167 EXPORT_SYMBOL(kmem_cache_free);
75168
75169 diff --git a/mm/slub.c b/mm/slub.c
75170 index 8c691fa..2993c2b 100644
75171 --- a/mm/slub.c
75172 +++ b/mm/slub.c
75173 @@ -209,7 +209,7 @@ struct track {
75174
75175 enum track_item { TRACK_ALLOC, TRACK_FREE };
75176
75177 -#ifdef CONFIG_SYSFS
75178 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75179 static int sysfs_slab_add(struct kmem_cache *);
75180 static int sysfs_slab_alias(struct kmem_cache *, const char *);
75181 static void sysfs_slab_remove(struct kmem_cache *);
75182 @@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
75183 if (!t->addr)
75184 return;
75185
75186 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
75187 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
75188 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
75189 #ifdef CONFIG_STACKTRACE
75190 {
75191 @@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
75192
75193 page = virt_to_head_page(x);
75194
75195 + BUG_ON(!PageSlab(page));
75196 +
75197 slab_free(s, page, x, _RET_IP_);
75198
75199 trace_kmem_cache_free(_RET_IP_, x);
75200 @@ -2636,7 +2638,7 @@ static int slub_min_objects;
75201 * Merge control. If this is set then no merging of slab caches will occur.
75202 * (Could be removed. This was introduced to pacify the merge skeptics.)
75203 */
75204 -static int slub_nomerge;
75205 +static int slub_nomerge = 1;
75206
75207 /*
75208 * Calculate the order of allocation given an slab object size.
75209 @@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
75210 else
75211 s->cpu_partial = 30;
75212
75213 - s->refcount = 1;
75214 + atomic_set(&s->refcount, 1);
75215 #ifdef CONFIG_NUMA
75216 s->remote_node_defrag_ratio = 1000;
75217 #endif
75218 @@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
75219 void kmem_cache_destroy(struct kmem_cache *s)
75220 {
75221 down_write(&slub_lock);
75222 - s->refcount--;
75223 - if (!s->refcount) {
75224 + if (atomic_dec_and_test(&s->refcount)) {
75225 list_del(&s->list);
75226 up_write(&slub_lock);
75227 if (kmem_cache_close(s)) {
75228 @@ -3223,6 +3224,10 @@ static struct kmem_cache *kmem_cache;
75229 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
75230 #endif
75231
75232 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75233 +static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
75234 +#endif
75235 +
75236 static int __init setup_slub_min_order(char *str)
75237 {
75238 get_option(&str, &slub_min_order);
75239 @@ -3337,6 +3342,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
75240 return kmalloc_dma_caches[index];
75241
75242 #endif
75243 +
75244 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75245 + if (flags & SLAB_USERCOPY)
75246 + return kmalloc_usercopy_caches[index];
75247 +
75248 +#endif
75249 +
75250 return kmalloc_caches[index];
75251 }
75252
75253 @@ -3405,6 +3417,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
75254 EXPORT_SYMBOL(__kmalloc_node);
75255 #endif
75256
75257 +bool is_usercopy_object(const void *ptr)
75258 +{
75259 + struct page *page;
75260 + struct kmem_cache *s;
75261 +
75262 + if (ZERO_OR_NULL_PTR(ptr))
75263 + return false;
75264 +
75265 + if (!slab_is_available())
75266 + return false;
75267 +
75268 + if (!virt_addr_valid(ptr))
75269 + return false;
75270 +
75271 + page = virt_to_head_page(ptr);
75272 +
75273 + if (!PageSlab(page))
75274 + return false;
75275 +
75276 + s = page->slab;
75277 + return s->flags & SLAB_USERCOPY;
75278 +}
75279 +
75280 +#ifdef CONFIG_PAX_USERCOPY
75281 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
75282 +{
75283 + struct page *page;
75284 + struct kmem_cache *s;
75285 + unsigned long offset;
75286 +
75287 + if (ZERO_OR_NULL_PTR(ptr))
75288 + return "<null>";
75289 +
75290 + if (!virt_addr_valid(ptr))
75291 + return NULL;
75292 +
75293 + page = virt_to_head_page(ptr);
75294 +
75295 + if (!PageSlab(page))
75296 + return NULL;
75297 +
75298 + s = page->slab;
75299 + if (!(s->flags & SLAB_USERCOPY))
75300 + return s->name;
75301 +
75302 + offset = (ptr - page_address(page)) % s->size;
75303 + if (offset <= s->objsize && n <= s->objsize - offset)
75304 + return NULL;
75305 +
75306 + return s->name;
75307 +}
75308 +#endif
75309 +
75310 size_t ksize(const void *object)
75311 {
75312 struct page *page;
75313 @@ -3679,7 +3744,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
75314 int node;
75315
75316 list_add(&s->list, &slab_caches);
75317 - s->refcount = -1;
75318 + atomic_set(&s->refcount, -1);
75319
75320 for_each_node_state(node, N_NORMAL_MEMORY) {
75321 struct kmem_cache_node *n = get_node(s, node);
75322 @@ -3799,17 +3864,17 @@ void __init kmem_cache_init(void)
75323
75324 /* Caches that are not of the two-to-the-power-of size */
75325 if (KMALLOC_MIN_SIZE <= 32) {
75326 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
75327 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
75328 caches++;
75329 }
75330
75331 if (KMALLOC_MIN_SIZE <= 64) {
75332 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
75333 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
75334 caches++;
75335 }
75336
75337 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
75338 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
75339 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
75340 caches++;
75341 }
75342
75343 @@ -3851,6 +3916,22 @@ void __init kmem_cache_init(void)
75344 }
75345 }
75346 #endif
75347 +
75348 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75349 + for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
75350 + struct kmem_cache *s = kmalloc_caches[i];
75351 +
75352 + if (s && s->size) {
75353 + char *name = kasprintf(GFP_NOWAIT,
75354 + "usercopy-kmalloc-%d", s->objsize);
75355 +
75356 + BUG_ON(!name);
75357 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
75358 + s->objsize, SLAB_USERCOPY);
75359 + }
75360 + }
75361 +#endif
75362 +
75363 printk(KERN_INFO
75364 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
75365 " CPUs=%d, Nodes=%d\n",
75366 @@ -3877,7 +3958,7 @@ static int slab_unmergeable(struct kmem_cache *s)
75367 /*
75368 * We may have set a slab to be unmergeable during bootstrap.
75369 */
75370 - if (s->refcount < 0)
75371 + if (atomic_read(&s->refcount) < 0)
75372 return 1;
75373
75374 return 0;
75375 @@ -3936,7 +4017,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
75376 down_write(&slub_lock);
75377 s = find_mergeable(size, align, flags, name, ctor);
75378 if (s) {
75379 - s->refcount++;
75380 + atomic_inc(&s->refcount);
75381 /*
75382 * Adjust the object sizes so that we clear
75383 * the complete object on kzalloc.
75384 @@ -3945,7 +4026,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
75385 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
75386
75387 if (sysfs_slab_alias(s, name)) {
75388 - s->refcount--;
75389 + atomic_dec(&s->refcount);
75390 goto err;
75391 }
75392 up_write(&slub_lock);
75393 @@ -4074,7 +4155,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
75394 }
75395 #endif
75396
75397 -#ifdef CONFIG_SYSFS
75398 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75399 static int count_inuse(struct page *page)
75400 {
75401 return page->inuse;
75402 @@ -4461,12 +4542,12 @@ static void resiliency_test(void)
75403 validate_slab_cache(kmalloc_caches[9]);
75404 }
75405 #else
75406 -#ifdef CONFIG_SYSFS
75407 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75408 static void resiliency_test(void) {};
75409 #endif
75410 #endif
75411
75412 -#ifdef CONFIG_SYSFS
75413 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75414 enum slab_stat_type {
75415 SL_ALL, /* All slabs */
75416 SL_PARTIAL, /* Only partially allocated slabs */
75417 @@ -4709,7 +4790,7 @@ SLAB_ATTR_RO(ctor);
75418
75419 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
75420 {
75421 - return sprintf(buf, "%d\n", s->refcount - 1);
75422 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
75423 }
75424 SLAB_ATTR_RO(aliases);
75425
75426 @@ -5280,6 +5361,7 @@ static char *create_unique_id(struct kmem_cache *s)
75427 return name;
75428 }
75429
75430 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75431 static int sysfs_slab_add(struct kmem_cache *s)
75432 {
75433 int err;
75434 @@ -5342,6 +5424,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
75435 kobject_del(&s->kobj);
75436 kobject_put(&s->kobj);
75437 }
75438 +#endif
75439
75440 /*
75441 * Need to buffer aliases during bootup until sysfs becomes
75442 @@ -5355,6 +5438,7 @@ struct saved_alias {
75443
75444 static struct saved_alias *alias_list;
75445
75446 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75447 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
75448 {
75449 struct saved_alias *al;
75450 @@ -5377,6 +5461,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
75451 alias_list = al;
75452 return 0;
75453 }
75454 +#endif
75455
75456 static int __init slab_sysfs_init(void)
75457 {
75458 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
75459 index 1b7e22a..3fcd4f3 100644
75460 --- a/mm/sparse-vmemmap.c
75461 +++ b/mm/sparse-vmemmap.c
75462 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
75463 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
75464 if (!p)
75465 return NULL;
75466 - pud_populate(&init_mm, pud, p);
75467 + pud_populate_kernel(&init_mm, pud, p);
75468 }
75469 return pud;
75470 }
75471 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
75472 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
75473 if (!p)
75474 return NULL;
75475 - pgd_populate(&init_mm, pgd, p);
75476 + pgd_populate_kernel(&init_mm, pgd, p);
75477 }
75478 return pgd;
75479 }
75480 diff --git a/mm/swap.c b/mm/swap.c
75481 index 4e7e2ec..0c57830 100644
75482 --- a/mm/swap.c
75483 +++ b/mm/swap.c
75484 @@ -30,6 +30,7 @@
75485 #include <linux/backing-dev.h>
75486 #include <linux/memcontrol.h>
75487 #include <linux/gfp.h>
75488 +#include <linux/hugetlb.h>
75489
75490 #include "internal.h"
75491
75492 @@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
75493
75494 __page_cache_release(page);
75495 dtor = get_compound_page_dtor(page);
75496 + if (!PageHuge(page))
75497 + BUG_ON(dtor != free_compound_page);
75498 (*dtor)(page);
75499 }
75500
75501 diff --git a/mm/swapfile.c b/mm/swapfile.c
75502 index 71373d0..11fa7d9 100644
75503 --- a/mm/swapfile.c
75504 +++ b/mm/swapfile.c
75505 @@ -63,7 +63,7 @@ static DEFINE_MUTEX(swapon_mutex);
75506
75507 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
75508 /* Activity counter to indicate that a swapon or swapoff has occurred */
75509 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
75510 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
75511
75512 static inline unsigned char swap_count(unsigned char ent)
75513 {
75514 @@ -1663,7 +1663,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
75515 }
75516 filp_close(swap_file, NULL);
75517 err = 0;
75518 - atomic_inc(&proc_poll_event);
75519 + atomic_inc_unchecked(&proc_poll_event);
75520 wake_up_interruptible(&proc_poll_wait);
75521
75522 out_dput:
75523 @@ -1679,8 +1679,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
75524
75525 poll_wait(file, &proc_poll_wait, wait);
75526
75527 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
75528 - seq->poll_event = atomic_read(&proc_poll_event);
75529 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
75530 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
75531 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
75532 }
75533
75534 @@ -1778,7 +1778,7 @@ static int swaps_open(struct inode *inode, struct file *file)
75535 return ret;
75536
75537 seq = file->private_data;
75538 - seq->poll_event = atomic_read(&proc_poll_event);
75539 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
75540 return 0;
75541 }
75542
75543 @@ -2120,7 +2120,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
75544 (frontswap_map) ? "FS" : "");
75545
75546 mutex_unlock(&swapon_mutex);
75547 - atomic_inc(&proc_poll_event);
75548 + atomic_inc_unchecked(&proc_poll_event);
75549 wake_up_interruptible(&proc_poll_wait);
75550
75551 if (S_ISREG(inode->i_mode))
75552 diff --git a/mm/util.c b/mm/util.c
75553 index 8c7265a..c96d884 100644
75554 --- a/mm/util.c
75555 +++ b/mm/util.c
75556 @@ -285,6 +285,12 @@ done:
75557 void arch_pick_mmap_layout(struct mm_struct *mm)
75558 {
75559 mm->mmap_base = TASK_UNMAPPED_BASE;
75560 +
75561 +#ifdef CONFIG_PAX_RANDMMAP
75562 + if (mm->pax_flags & MF_PAX_RANDMMAP)
75563 + mm->mmap_base += mm->delta_mmap;
75564 +#endif
75565 +
75566 mm->get_unmapped_area = arch_get_unmapped_area;
75567 mm->unmap_area = arch_unmap_area;
75568 }
75569 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
75570 index 2aad499..8aad8b1 100644
75571 --- a/mm/vmalloc.c
75572 +++ b/mm/vmalloc.c
75573 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
75574
75575 pte = pte_offset_kernel(pmd, addr);
75576 do {
75577 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75578 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75579 +
75580 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75581 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
75582 + BUG_ON(!pte_exec(*pte));
75583 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
75584 + continue;
75585 + }
75586 +#endif
75587 +
75588 + {
75589 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75590 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75591 + }
75592 } while (pte++, addr += PAGE_SIZE, addr != end);
75593 }
75594
75595 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
75596 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
75597 {
75598 pte_t *pte;
75599 + int ret = -ENOMEM;
75600
75601 /*
75602 * nr is a running index into the array which helps higher level
75603 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
75604 pte = pte_alloc_kernel(pmd, addr);
75605 if (!pte)
75606 return -ENOMEM;
75607 +
75608 + pax_open_kernel();
75609 do {
75610 struct page *page = pages[*nr];
75611
75612 - if (WARN_ON(!pte_none(*pte)))
75613 - return -EBUSY;
75614 - if (WARN_ON(!page))
75615 - return -ENOMEM;
75616 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75617 + if (pgprot_val(prot) & _PAGE_NX)
75618 +#endif
75619 +
75620 + if (WARN_ON(!pte_none(*pte))) {
75621 + ret = -EBUSY;
75622 + goto out;
75623 + }
75624 + if (WARN_ON(!page)) {
75625 + ret = -ENOMEM;
75626 + goto out;
75627 + }
75628 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
75629 (*nr)++;
75630 } while (pte++, addr += PAGE_SIZE, addr != end);
75631 - return 0;
75632 + ret = 0;
75633 +out:
75634 + pax_close_kernel();
75635 + return ret;
75636 }
75637
75638 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75639 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75640 pmd_t *pmd;
75641 unsigned long next;
75642
75643 - pmd = pmd_alloc(&init_mm, pud, addr);
75644 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
75645 if (!pmd)
75646 return -ENOMEM;
75647 do {
75648 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
75649 pud_t *pud;
75650 unsigned long next;
75651
75652 - pud = pud_alloc(&init_mm, pgd, addr);
75653 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
75654 if (!pud)
75655 return -ENOMEM;
75656 do {
75657 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
75658 * and fall back on vmalloc() if that fails. Others
75659 * just put it in the vmalloc space.
75660 */
75661 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
75662 +#ifdef CONFIG_MODULES
75663 +#ifdef MODULES_VADDR
75664 unsigned long addr = (unsigned long)x;
75665 if (addr >= MODULES_VADDR && addr < MODULES_END)
75666 return 1;
75667 #endif
75668 +
75669 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75670 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
75671 + return 1;
75672 +#endif
75673 +
75674 +#endif
75675 +
75676 return is_vmalloc_addr(x);
75677 }
75678
75679 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
75680
75681 if (!pgd_none(*pgd)) {
75682 pud_t *pud = pud_offset(pgd, addr);
75683 +#ifdef CONFIG_X86
75684 + if (!pud_large(*pud))
75685 +#endif
75686 if (!pud_none(*pud)) {
75687 pmd_t *pmd = pmd_offset(pud, addr);
75688 +#ifdef CONFIG_X86
75689 + if (!pmd_large(*pmd))
75690 +#endif
75691 if (!pmd_none(*pmd)) {
75692 pte_t *ptep, pte;
75693
75694 @@ -329,7 +369,7 @@ static void purge_vmap_area_lazy(void);
75695 * Allocate a region of KVA of the specified size and alignment, within the
75696 * vstart and vend.
75697 */
75698 -static struct vmap_area *alloc_vmap_area(unsigned long size,
75699 +static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
75700 unsigned long align,
75701 unsigned long vstart, unsigned long vend,
75702 int node, gfp_t gfp_mask)
75703 @@ -1320,6 +1360,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
75704 struct vm_struct *area;
75705
75706 BUG_ON(in_interrupt());
75707 +
75708 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75709 + if (flags & VM_KERNEXEC) {
75710 + if (start != VMALLOC_START || end != VMALLOC_END)
75711 + return NULL;
75712 + start = (unsigned long)MODULES_EXEC_VADDR;
75713 + end = (unsigned long)MODULES_EXEC_END;
75714 + }
75715 +#endif
75716 +
75717 if (flags & VM_IOREMAP) {
75718 int bit = fls(size);
75719
75720 @@ -1552,6 +1602,11 @@ void *vmap(struct page **pages, unsigned int count,
75721 if (count > totalram_pages)
75722 return NULL;
75723
75724 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75725 + if (!(pgprot_val(prot) & _PAGE_NX))
75726 + flags |= VM_KERNEXEC;
75727 +#endif
75728 +
75729 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
75730 __builtin_return_address(0));
75731 if (!area)
75732 @@ -1653,6 +1708,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
75733 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
75734 goto fail;
75735
75736 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75737 + if (!(pgprot_val(prot) & _PAGE_NX))
75738 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
75739 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
75740 + else
75741 +#endif
75742 +
75743 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
75744 start, end, node, gfp_mask, caller);
75745 if (!area)
75746 @@ -1826,10 +1888,9 @@ EXPORT_SYMBOL(vzalloc_node);
75747 * For tight control over page level allocator and protection flags
75748 * use __vmalloc() instead.
75749 */
75750 -
75751 void *vmalloc_exec(unsigned long size)
75752 {
75753 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
75754 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
75755 -1, __builtin_return_address(0));
75756 }
75757
75758 @@ -2124,6 +2185,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
75759 unsigned long uaddr = vma->vm_start;
75760 unsigned long usize = vma->vm_end - vma->vm_start;
75761
75762 + BUG_ON(vma->vm_mirror);
75763 +
75764 if ((PAGE_SIZE-1) & (unsigned long)addr)
75765 return -EINVAL;
75766
75767 @@ -2560,7 +2623,7 @@ static int s_show(struct seq_file *m, void *p)
75768 {
75769 struct vm_struct *v = p;
75770
75771 - seq_printf(m, "0x%p-0x%p %7ld",
75772 + seq_printf(m, "0x%pK-0x%pK %7ld",
75773 v->addr, v->addr + v->size, v->size);
75774
75775 if (v->caller)
75776 diff --git a/mm/vmstat.c b/mm/vmstat.c
75777 index 1bbbbd9..ff35669 100644
75778 --- a/mm/vmstat.c
75779 +++ b/mm/vmstat.c
75780 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
75781 *
75782 * vm_stat contains the global counters
75783 */
75784 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75785 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75786 EXPORT_SYMBOL(vm_stat);
75787
75788 #ifdef CONFIG_SMP
75789 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
75790 v = p->vm_stat_diff[i];
75791 p->vm_stat_diff[i] = 0;
75792 local_irq_restore(flags);
75793 - atomic_long_add(v, &zone->vm_stat[i]);
75794 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
75795 global_diff[i] += v;
75796 #ifdef CONFIG_NUMA
75797 /* 3 seconds idle till flush */
75798 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
75799
75800 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
75801 if (global_diff[i])
75802 - atomic_long_add(global_diff[i], &vm_stat[i]);
75803 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
75804 }
75805
75806 #endif
75807 @@ -1211,10 +1211,20 @@ static int __init setup_vmstat(void)
75808 start_cpu_timer(cpu);
75809 #endif
75810 #ifdef CONFIG_PROC_FS
75811 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
75812 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
75813 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
75814 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
75815 + {
75816 + mode_t gr_mode = S_IRUGO;
75817 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75818 + gr_mode = S_IRUSR;
75819 +#endif
75820 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
75821 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
75822 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75823 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
75824 +#else
75825 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
75826 +#endif
75827 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
75828 + }
75829 #endif
75830 return 0;
75831 }
75832 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
75833 index 9096bcb..43ed7bb 100644
75834 --- a/net/8021q/vlan.c
75835 +++ b/net/8021q/vlan.c
75836 @@ -557,8 +557,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
75837 err = -EPERM;
75838 if (!capable(CAP_NET_ADMIN))
75839 break;
75840 - if ((args.u.name_type >= 0) &&
75841 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
75842 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
75843 struct vlan_net *vn;
75844
75845 vn = net_generic(net, vlan_net_id);
75846 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
75847 index 6449bae..8c1f454 100644
75848 --- a/net/9p/trans_fd.c
75849 +++ b/net/9p/trans_fd.c
75850 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
75851 oldfs = get_fs();
75852 set_fs(get_ds());
75853 /* The cast to a user pointer is valid due to the set_fs() */
75854 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
75855 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
75856 set_fs(oldfs);
75857
75858 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
75859 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
75860 index 876fbe8..8bbea9f 100644
75861 --- a/net/atm/atm_misc.c
75862 +++ b/net/atm/atm_misc.c
75863 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
75864 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
75865 return 1;
75866 atm_return(vcc, truesize);
75867 - atomic_inc(&vcc->stats->rx_drop);
75868 + atomic_inc_unchecked(&vcc->stats->rx_drop);
75869 return 0;
75870 }
75871 EXPORT_SYMBOL(atm_charge);
75872 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
75873 }
75874 }
75875 atm_return(vcc, guess);
75876 - atomic_inc(&vcc->stats->rx_drop);
75877 + atomic_inc_unchecked(&vcc->stats->rx_drop);
75878 return NULL;
75879 }
75880 EXPORT_SYMBOL(atm_alloc_charge);
75881 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
75882
75883 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75884 {
75885 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75886 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75887 __SONET_ITEMS
75888 #undef __HANDLE_ITEM
75889 }
75890 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
75891
75892 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75893 {
75894 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75895 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
75896 __SONET_ITEMS
75897 #undef __HANDLE_ITEM
75898 }
75899 diff --git a/net/atm/lec.h b/net/atm/lec.h
75900 index a86aff9..3a0d6f6 100644
75901 --- a/net/atm/lec.h
75902 +++ b/net/atm/lec.h
75903 @@ -48,7 +48,7 @@ struct lane2_ops {
75904 const u8 *tlvs, u32 sizeoftlvs);
75905 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
75906 const u8 *tlvs, u32 sizeoftlvs);
75907 -};
75908 +} __no_const;
75909
75910 /*
75911 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
75912 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
75913 index 0919a88..a23d54e 100644
75914 --- a/net/atm/mpc.h
75915 +++ b/net/atm/mpc.h
75916 @@ -33,7 +33,7 @@ struct mpoa_client {
75917 struct mpc_parameters parameters; /* parameters for this client */
75918
75919 const struct net_device_ops *old_ops;
75920 - struct net_device_ops new_ops;
75921 + net_device_ops_no_const new_ops;
75922 };
75923
75924
75925 diff --git a/net/atm/proc.c b/net/atm/proc.c
75926 index 0d020de..011c7bb 100644
75927 --- a/net/atm/proc.c
75928 +++ b/net/atm/proc.c
75929 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
75930 const struct k_atm_aal_stats *stats)
75931 {
75932 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
75933 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
75934 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
75935 - atomic_read(&stats->rx_drop));
75936 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
75937 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
75938 + atomic_read_unchecked(&stats->rx_drop));
75939 }
75940
75941 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
75942 diff --git a/net/atm/resources.c b/net/atm/resources.c
75943 index 23f45ce..c748f1a 100644
75944 --- a/net/atm/resources.c
75945 +++ b/net/atm/resources.c
75946 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
75947 static void copy_aal_stats(struct k_atm_aal_stats *from,
75948 struct atm_aal_stats *to)
75949 {
75950 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75951 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75952 __AAL_STAT_ITEMS
75953 #undef __HANDLE_ITEM
75954 }
75955 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
75956 static void subtract_aal_stats(struct k_atm_aal_stats *from,
75957 struct atm_aal_stats *to)
75958 {
75959 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75960 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
75961 __AAL_STAT_ITEMS
75962 #undef __HANDLE_ITEM
75963 }
75964 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
75965 index dc53798..dc66333 100644
75966 --- a/net/batman-adv/bat_iv_ogm.c
75967 +++ b/net/batman-adv/bat_iv_ogm.c
75968 @@ -63,7 +63,7 @@ static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
75969
75970 /* randomize initial seqno to avoid collision */
75971 get_random_bytes(&random_seqno, sizeof(random_seqno));
75972 - atomic_set(&hard_iface->seqno, random_seqno);
75973 + atomic_set_unchecked(&hard_iface->seqno, random_seqno);
75974
75975 hard_iface->packet_len = BATMAN_OGM_HLEN;
75976 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
75977 @@ -572,7 +572,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
75978
75979 /* change sequence number to network order */
75980 batman_ogm_packet->seqno =
75981 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
75982 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
75983
75984 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
75985 batman_ogm_packet->tt_crc = htons((uint16_t)
75986 @@ -592,7 +592,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
75987 else
75988 batman_ogm_packet->gw_flags = NO_FLAGS;
75989
75990 - atomic_inc(&hard_iface->seqno);
75991 + atomic_inc_unchecked(&hard_iface->seqno);
75992
75993 slide_own_bcast_window(hard_iface);
75994 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
75995 @@ -956,7 +956,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
75996 return;
75997
75998 /* could be changed by schedule_own_packet() */
75999 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
76000 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
76001
76002 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
76003
76004 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
76005 index dc334fa..766a01a 100644
76006 --- a/net/batman-adv/hard-interface.c
76007 +++ b/net/batman-adv/hard-interface.c
76008 @@ -321,7 +321,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
76009 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
76010 dev_add_pack(&hard_iface->batman_adv_ptype);
76011
76012 - atomic_set(&hard_iface->frag_seqno, 1);
76013 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
76014 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
76015 hard_iface->net_dev->name);
76016
76017 @@ -444,7 +444,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
76018 * This can't be called via a bat_priv callback because
76019 * we have no bat_priv yet.
76020 */
76021 - atomic_set(&hard_iface->seqno, 1);
76022 + atomic_set_unchecked(&hard_iface->seqno, 1);
76023 hard_iface->packet_buff = NULL;
76024
76025 return hard_iface;
76026 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
76027 index a0ec0e4..7beb587 100644
76028 --- a/net/batman-adv/soft-interface.c
76029 +++ b/net/batman-adv/soft-interface.c
76030 @@ -214,7 +214,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
76031
76032 /* set broadcast sequence number */
76033 bcast_packet->seqno =
76034 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
76035 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
76036
76037 add_bcast_packet_to_list(bat_priv, skb, 1);
76038
76039 @@ -390,7 +390,7 @@ struct net_device *softif_create(const char *name)
76040 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
76041
76042 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
76043 - atomic_set(&bat_priv->bcast_seqno, 1);
76044 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
76045 atomic_set(&bat_priv->ttvn, 0);
76046 atomic_set(&bat_priv->tt_local_changes, 0);
76047 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
76048 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
76049 index 61308e8..2e142b2 100644
76050 --- a/net/batman-adv/types.h
76051 +++ b/net/batman-adv/types.h
76052 @@ -38,8 +38,8 @@ struct hard_iface {
76053 int16_t if_num;
76054 char if_status;
76055 struct net_device *net_dev;
76056 - atomic_t seqno;
76057 - atomic_t frag_seqno;
76058 + atomic_unchecked_t seqno;
76059 + atomic_unchecked_t frag_seqno;
76060 unsigned char *packet_buff;
76061 int packet_len;
76062 struct kobject *hardif_obj;
76063 @@ -163,7 +163,7 @@ struct bat_priv {
76064 atomic_t orig_interval; /* uint */
76065 atomic_t hop_penalty; /* uint */
76066 atomic_t log_level; /* uint */
76067 - atomic_t bcast_seqno;
76068 + atomic_unchecked_t bcast_seqno;
76069 atomic_t bcast_queue_left;
76070 atomic_t batman_queue_left;
76071 atomic_t ttvn; /* translation table version number */
76072 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
76073 index 74175c2..32f8901 100644
76074 --- a/net/batman-adv/unicast.c
76075 +++ b/net/batman-adv/unicast.c
76076 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
76077 frag1->flags = UNI_FRAG_HEAD | large_tail;
76078 frag2->flags = large_tail;
76079
76080 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
76081 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
76082 frag1->seqno = htons(seqno - 1);
76083 frag2->seqno = htons(seqno);
76084
76085 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
76086 index bedc768..a4a2b19 100644
76087 --- a/net/bluetooth/hci_sock.c
76088 +++ b/net/bluetooth/hci_sock.c
76089 @@ -942,7 +942,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
76090 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
76091 }
76092
76093 - len = min_t(unsigned int, len, sizeof(uf));
76094 + len = min((size_t)len, sizeof(uf));
76095 if (copy_from_user(&uf, optval, len)) {
76096 err = -EFAULT;
76097 break;
76098 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
76099 index 6204170..5ae8707 100644
76100 --- a/net/bluetooth/l2cap_core.c
76101 +++ b/net/bluetooth/l2cap_core.c
76102 @@ -2800,8 +2800,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
76103 break;
76104
76105 case L2CAP_CONF_RFC:
76106 - if (olen == sizeof(rfc))
76107 - memcpy(&rfc, (void *)val, olen);
76108 + if (olen != sizeof(rfc))
76109 + break;
76110 +
76111 + memcpy(&rfc, (void *)val, olen);
76112
76113 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
76114 rfc.mode != chan->mode)
76115 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
76116 index 9c94d44..831dc8e 100644
76117 --- a/net/bluetooth/l2cap_sock.c
76118 +++ b/net/bluetooth/l2cap_sock.c
76119 @@ -452,7 +452,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
76120 struct sock *sk = sock->sk;
76121 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
76122 struct l2cap_options opts;
76123 - int len, err = 0;
76124 + int err = 0;
76125 + size_t len = optlen;
76126 u32 opt;
76127
76128 BT_DBG("sk %p", sk);
76129 @@ -474,7 +475,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
76130 opts.max_tx = chan->max_tx;
76131 opts.txwin_size = chan->tx_win;
76132
76133 - len = min_t(unsigned int, sizeof(opts), optlen);
76134 + len = min(sizeof(opts), len);
76135 if (copy_from_user((char *) &opts, optval, len)) {
76136 err = -EFAULT;
76137 break;
76138 @@ -547,7 +548,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
76139 struct bt_security sec;
76140 struct bt_power pwr;
76141 struct l2cap_conn *conn;
76142 - int len, err = 0;
76143 + int err = 0;
76144 + size_t len = optlen;
76145 u32 opt;
76146
76147 BT_DBG("sk %p", sk);
76148 @@ -570,7 +572,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
76149
76150 sec.level = BT_SECURITY_LOW;
76151
76152 - len = min_t(unsigned int, sizeof(sec), optlen);
76153 + len = min(sizeof(sec), len);
76154 if (copy_from_user((char *) &sec, optval, len)) {
76155 err = -EFAULT;
76156 break;
76157 @@ -667,7 +669,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
76158
76159 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
76160
76161 - len = min_t(unsigned int, sizeof(pwr), optlen);
76162 + len = min(sizeof(pwr), len);
76163 if (copy_from_user((char *) &pwr, optval, len)) {
76164 err = -EFAULT;
76165 break;
76166 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
76167 index 2df6956..4b24eef 100644
76168 --- a/net/bluetooth/rfcomm/sock.c
76169 +++ b/net/bluetooth/rfcomm/sock.c
76170 @@ -686,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
76171 struct sock *sk = sock->sk;
76172 struct bt_security sec;
76173 int err = 0;
76174 - size_t len;
76175 + size_t len = optlen;
76176 u32 opt;
76177
76178 BT_DBG("sk %p", sk);
76179 @@ -708,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
76180
76181 sec.level = BT_SECURITY_LOW;
76182
76183 - len = min_t(unsigned int, sizeof(sec), optlen);
76184 + len = min(sizeof(sec), len);
76185 if (copy_from_user((char *) &sec, optval, len)) {
76186 err = -EFAULT;
76187 break;
76188 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
76189 index 6435296..4f8c8f6 100644
76190 --- a/net/bluetooth/rfcomm/tty.c
76191 +++ b/net/bluetooth/rfcomm/tty.c
76192 @@ -314,7 +314,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
76193 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
76194
76195 spin_lock_irqsave(&dev->port.lock, flags);
76196 - if (dev->port.count > 0) {
76197 + if (atomic_read(&dev->port.count) > 0) {
76198 spin_unlock_irqrestore(&dev->port.lock, flags);
76199 return;
76200 }
76201 @@ -669,10 +669,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
76202 return -ENODEV;
76203
76204 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
76205 - dev->channel, dev->port.count);
76206 + dev->channel, atomic_read(&dev->port.count));
76207
76208 spin_lock_irqsave(&dev->port.lock, flags);
76209 - if (++dev->port.count > 1) {
76210 + if (atomic_inc_return(&dev->port.count) > 1) {
76211 spin_unlock_irqrestore(&dev->port.lock, flags);
76212 return 0;
76213 }
76214 @@ -737,10 +737,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
76215 return;
76216
76217 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
76218 - dev->port.count);
76219 + atomic_read(&dev->port.count));
76220
76221 spin_lock_irqsave(&dev->port.lock, flags);
76222 - if (!--dev->port.count) {
76223 + if (!atomic_dec_return(&dev->port.count)) {
76224 spin_unlock_irqrestore(&dev->port.lock, flags);
76225 if (dev->tty_dev->parent)
76226 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
76227 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
76228 index 5fe2ff3..121d696 100644
76229 --- a/net/bridge/netfilter/ebtables.c
76230 +++ b/net/bridge/netfilter/ebtables.c
76231 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
76232 tmp.valid_hooks = t->table->valid_hooks;
76233 }
76234 mutex_unlock(&ebt_mutex);
76235 - if (copy_to_user(user, &tmp, *len) != 0){
76236 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
76237 BUGPRINT("c2u Didn't work\n");
76238 ret = -EFAULT;
76239 break;
76240 @@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
76241 goto out;
76242 tmp.valid_hooks = t->valid_hooks;
76243
76244 - if (copy_to_user(user, &tmp, *len) != 0) {
76245 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
76246 ret = -EFAULT;
76247 break;
76248 }
76249 @@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
76250 tmp.entries_size = t->table->entries_size;
76251 tmp.valid_hooks = t->table->valid_hooks;
76252
76253 - if (copy_to_user(user, &tmp, *len) != 0) {
76254 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
76255 ret = -EFAULT;
76256 break;
76257 }
76258 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
76259 index 047cd0e..461fd28 100644
76260 --- a/net/caif/cfctrl.c
76261 +++ b/net/caif/cfctrl.c
76262 @@ -10,6 +10,7 @@
76263 #include <linux/spinlock.h>
76264 #include <linux/slab.h>
76265 #include <linux/pkt_sched.h>
76266 +#include <linux/sched.h>
76267 #include <net/caif/caif_layer.h>
76268 #include <net/caif/cfpkt.h>
76269 #include <net/caif/cfctrl.h>
76270 @@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
76271 memset(&dev_info, 0, sizeof(dev_info));
76272 dev_info.id = 0xff;
76273 cfsrvl_init(&this->serv, 0, &dev_info, false);
76274 - atomic_set(&this->req_seq_no, 1);
76275 - atomic_set(&this->rsp_seq_no, 1);
76276 + atomic_set_unchecked(&this->req_seq_no, 1);
76277 + atomic_set_unchecked(&this->rsp_seq_no, 1);
76278 this->serv.layer.receive = cfctrl_recv;
76279 sprintf(this->serv.layer.name, "ctrl");
76280 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
76281 @@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
76282 struct cfctrl_request_info *req)
76283 {
76284 spin_lock_bh(&ctrl->info_list_lock);
76285 - atomic_inc(&ctrl->req_seq_no);
76286 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
76287 + atomic_inc_unchecked(&ctrl->req_seq_no);
76288 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
76289 list_add_tail(&req->list, &ctrl->list);
76290 spin_unlock_bh(&ctrl->info_list_lock);
76291 }
76292 @@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
76293 if (p != first)
76294 pr_warn("Requests are not received in order\n");
76295
76296 - atomic_set(&ctrl->rsp_seq_no,
76297 + atomic_set_unchecked(&ctrl->rsp_seq_no,
76298 p->sequence_no);
76299 list_del(&p->list);
76300 goto out;
76301 diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
76302 index 69771c0..e597733 100644
76303 --- a/net/caif/chnl_net.c
76304 +++ b/net/caif/chnl_net.c
76305 @@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
76306
76307 /* check the version of IP */
76308 ip_version = skb_header_pointer(skb, 0, 1, &buf);
76309 + if (!ip_version) {
76310 + kfree_skb(skb);
76311 + return -EINVAL;
76312 + }
76313
76314 switch (*ip_version >> 4) {
76315 case 4:
76316 diff --git a/net/can/gw.c b/net/can/gw.c
76317 index b41acf2..3affb3a 100644
76318 --- a/net/can/gw.c
76319 +++ b/net/can/gw.c
76320 @@ -96,7 +96,7 @@ struct cf_mod {
76321 struct {
76322 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
76323 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
76324 - } csumfunc;
76325 + } __no_const csumfunc;
76326 };
76327
76328
76329 diff --git a/net/compat.c b/net/compat.c
76330 index 74ed1d7..3695bd9 100644
76331 --- a/net/compat.c
76332 +++ b/net/compat.c
76333 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
76334 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
76335 __get_user(kmsg->msg_flags, &umsg->msg_flags))
76336 return -EFAULT;
76337 - kmsg->msg_name = compat_ptr(tmp1);
76338 - kmsg->msg_iov = compat_ptr(tmp2);
76339 - kmsg->msg_control = compat_ptr(tmp3);
76340 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
76341 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
76342 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
76343 return 0;
76344 }
76345
76346 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76347
76348 if (kern_msg->msg_namelen) {
76349 if (mode == VERIFY_READ) {
76350 - int err = move_addr_to_kernel(kern_msg->msg_name,
76351 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
76352 kern_msg->msg_namelen,
76353 kern_address);
76354 if (err < 0)
76355 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76356 kern_msg->msg_name = NULL;
76357
76358 tot_len = iov_from_user_compat_to_kern(kern_iov,
76359 - (struct compat_iovec __user *)kern_msg->msg_iov,
76360 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
76361 kern_msg->msg_iovlen);
76362 if (tot_len >= 0)
76363 kern_msg->msg_iov = kern_iov;
76364 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76365
76366 #define CMSG_COMPAT_FIRSTHDR(msg) \
76367 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
76368 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
76369 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
76370 (struct compat_cmsghdr __user *)NULL)
76371
76372 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
76373 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
76374 (ucmlen) <= (unsigned long) \
76375 ((mhdr)->msg_controllen - \
76376 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
76377 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
76378
76379 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
76380 struct compat_cmsghdr __user *cmsg, int cmsg_len)
76381 {
76382 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
76383 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
76384 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
76385 msg->msg_controllen)
76386 return NULL;
76387 return (struct compat_cmsghdr __user *)ptr;
76388 @@ -219,7 +219,7 @@ Efault:
76389
76390 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
76391 {
76392 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
76393 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
76394 struct compat_cmsghdr cmhdr;
76395 struct compat_timeval ctv;
76396 struct compat_timespec cts[3];
76397 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
76398
76399 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
76400 {
76401 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
76402 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
76403 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
76404 int fdnum = scm->fp->count;
76405 struct file **fp = scm->fp->fp;
76406 @@ -364,7 +364,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
76407 return -EFAULT;
76408 old_fs = get_fs();
76409 set_fs(KERNEL_DS);
76410 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
76411 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
76412 set_fs(old_fs);
76413
76414 return err;
76415 @@ -425,7 +425,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
76416 len = sizeof(ktime);
76417 old_fs = get_fs();
76418 set_fs(KERNEL_DS);
76419 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
76420 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
76421 set_fs(old_fs);
76422
76423 if (!err) {
76424 @@ -568,7 +568,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76425 case MCAST_JOIN_GROUP:
76426 case MCAST_LEAVE_GROUP:
76427 {
76428 - struct compat_group_req __user *gr32 = (void *)optval;
76429 + struct compat_group_req __user *gr32 = (void __user *)optval;
76430 struct group_req __user *kgr =
76431 compat_alloc_user_space(sizeof(struct group_req));
76432 u32 interface;
76433 @@ -589,7 +589,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76434 case MCAST_BLOCK_SOURCE:
76435 case MCAST_UNBLOCK_SOURCE:
76436 {
76437 - struct compat_group_source_req __user *gsr32 = (void *)optval;
76438 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
76439 struct group_source_req __user *kgsr = compat_alloc_user_space(
76440 sizeof(struct group_source_req));
76441 u32 interface;
76442 @@ -610,7 +610,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76443 }
76444 case MCAST_MSFILTER:
76445 {
76446 - struct compat_group_filter __user *gf32 = (void *)optval;
76447 + struct compat_group_filter __user *gf32 = (void __user *)optval;
76448 struct group_filter __user *kgf;
76449 u32 interface, fmode, numsrc;
76450
76451 @@ -648,7 +648,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
76452 char __user *optval, int __user *optlen,
76453 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
76454 {
76455 - struct compat_group_filter __user *gf32 = (void *)optval;
76456 + struct compat_group_filter __user *gf32 = (void __user *)optval;
76457 struct group_filter __user *kgf;
76458 int __user *koptlen;
76459 u32 interface, fmode, numsrc;
76460 @@ -797,7 +797,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
76461
76462 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
76463 return -EINVAL;
76464 - if (copy_from_user(a, args, nas[call]))
76465 + if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
76466 return -EFAULT;
76467 a0 = a[0];
76468 a1 = a[1];
76469 diff --git a/net/core/datagram.c b/net/core/datagram.c
76470 index ae6acf6..d5c8f66 100644
76471 --- a/net/core/datagram.c
76472 +++ b/net/core/datagram.c
76473 @@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
76474 }
76475
76476 kfree_skb(skb);
76477 - atomic_inc(&sk->sk_drops);
76478 + atomic_inc_unchecked(&sk->sk_drops);
76479 sk_mem_reclaim_partial(sk);
76480
76481 return err;
76482 diff --git a/net/core/dev.c b/net/core/dev.c
76483 index e2215ee..b850ae4 100644
76484 --- a/net/core/dev.c
76485 +++ b/net/core/dev.c
76486 @@ -1138,9 +1138,13 @@ void dev_load(struct net *net, const char *name)
76487 if (no_module && capable(CAP_NET_ADMIN))
76488 no_module = request_module("netdev-%s", name);
76489 if (no_module && capable(CAP_SYS_MODULE)) {
76490 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
76491 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
76492 +#else
76493 if (!request_module("%s", name))
76494 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
76495 name);
76496 +#endif
76497 }
76498 }
76499 EXPORT_SYMBOL(dev_load);
76500 @@ -1605,7 +1609,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
76501 {
76502 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
76503 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
76504 - atomic_long_inc(&dev->rx_dropped);
76505 + atomic_long_inc_unchecked(&dev->rx_dropped);
76506 kfree_skb(skb);
76507 return NET_RX_DROP;
76508 }
76509 @@ -1615,7 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
76510 nf_reset(skb);
76511
76512 if (unlikely(!is_skb_forwardable(dev, skb))) {
76513 - atomic_long_inc(&dev->rx_dropped);
76514 + atomic_long_inc_unchecked(&dev->rx_dropped);
76515 kfree_skb(skb);
76516 return NET_RX_DROP;
76517 }
76518 @@ -2056,7 +2060,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
76519
76520 struct dev_gso_cb {
76521 void (*destructor)(struct sk_buff *skb);
76522 -};
76523 +} __no_const;
76524
76525 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
76526
76527 @@ -2894,7 +2898,7 @@ enqueue:
76528
76529 local_irq_restore(flags);
76530
76531 - atomic_long_inc(&skb->dev->rx_dropped);
76532 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
76533 kfree_skb(skb);
76534 return NET_RX_DROP;
76535 }
76536 @@ -2966,7 +2970,7 @@ int netif_rx_ni(struct sk_buff *skb)
76537 }
76538 EXPORT_SYMBOL(netif_rx_ni);
76539
76540 -static void net_tx_action(struct softirq_action *h)
76541 +static void net_tx_action(void)
76542 {
76543 struct softnet_data *sd = &__get_cpu_var(softnet_data);
76544
76545 @@ -3253,7 +3257,7 @@ ncls:
76546 if (pt_prev) {
76547 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
76548 } else {
76549 - atomic_long_inc(&skb->dev->rx_dropped);
76550 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
76551 kfree_skb(skb);
76552 /* Jamal, now you will not able to escape explaining
76553 * me how you were going to use this. :-)
76554 @@ -3818,7 +3822,7 @@ void netif_napi_del(struct napi_struct *napi)
76555 }
76556 EXPORT_SYMBOL(netif_napi_del);
76557
76558 -static void net_rx_action(struct softirq_action *h)
76559 +static void net_rx_action(void)
76560 {
76561 struct softnet_data *sd = &__get_cpu_var(softnet_data);
76562 unsigned long time_limit = jiffies + 2;
76563 @@ -4288,8 +4292,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
76564 else
76565 seq_printf(seq, "%04x", ntohs(pt->type));
76566
76567 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76568 + seq_printf(seq, " %-8s %p\n",
76569 + pt->dev ? pt->dev->name : "", NULL);
76570 +#else
76571 seq_printf(seq, " %-8s %pF\n",
76572 pt->dev ? pt->dev->name : "", pt->func);
76573 +#endif
76574 }
76575
76576 return 0;
76577 @@ -5841,7 +5850,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
76578 } else {
76579 netdev_stats_to_stats64(storage, &dev->stats);
76580 }
76581 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
76582 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
76583 return storage;
76584 }
76585 EXPORT_SYMBOL(dev_get_stats);
76586 diff --git a/net/core/flow.c b/net/core/flow.c
76587 index e318c7e..168b1d0 100644
76588 --- a/net/core/flow.c
76589 +++ b/net/core/flow.c
76590 @@ -61,7 +61,7 @@ struct flow_cache {
76591 struct timer_list rnd_timer;
76592 };
76593
76594 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
76595 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
76596 EXPORT_SYMBOL(flow_cache_genid);
76597 static struct flow_cache flow_cache_global;
76598 static struct kmem_cache *flow_cachep __read_mostly;
76599 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
76600
76601 static int flow_entry_valid(struct flow_cache_entry *fle)
76602 {
76603 - if (atomic_read(&flow_cache_genid) != fle->genid)
76604 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
76605 return 0;
76606 if (fle->object && !fle->object->ops->check(fle->object))
76607 return 0;
76608 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
76609 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
76610 fcp->hash_count++;
76611 }
76612 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
76613 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
76614 flo = fle->object;
76615 if (!flo)
76616 goto ret_object;
76617 @@ -280,7 +280,7 @@ nocache:
76618 }
76619 flo = resolver(net, key, family, dir, flo, ctx);
76620 if (fle) {
76621 - fle->genid = atomic_read(&flow_cache_genid);
76622 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
76623 if (!IS_ERR(flo))
76624 fle->object = flo;
76625 else
76626 diff --git a/net/core/iovec.c b/net/core/iovec.c
76627 index 7e7aeb0..2a998cb 100644
76628 --- a/net/core/iovec.c
76629 +++ b/net/core/iovec.c
76630 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
76631 if (m->msg_namelen) {
76632 if (mode == VERIFY_READ) {
76633 void __user *namep;
76634 - namep = (void __user __force *) m->msg_name;
76635 + namep = (void __force_user *) m->msg_name;
76636 err = move_addr_to_kernel(namep, m->msg_namelen,
76637 address);
76638 if (err < 0)
76639 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
76640 }
76641
76642 size = m->msg_iovlen * sizeof(struct iovec);
76643 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
76644 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
76645 return -EFAULT;
76646
76647 m->msg_iov = iov;
76648 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
76649 index 6c50ac0..6b4c038 100644
76650 --- a/net/core/rtnetlink.c
76651 +++ b/net/core/rtnetlink.c
76652 @@ -58,7 +58,7 @@ struct rtnl_link {
76653 rtnl_doit_func doit;
76654 rtnl_dumpit_func dumpit;
76655 rtnl_calcit_func calcit;
76656 -};
76657 +} __no_const;
76658
76659 static DEFINE_MUTEX(rtnl_mutex);
76660
76661 diff --git a/net/core/scm.c b/net/core/scm.c
76662 index 611c5ef..88f6d6d 100644
76663 --- a/net/core/scm.c
76664 +++ b/net/core/scm.c
76665 @@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
76666 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76667 {
76668 struct cmsghdr __user *cm
76669 - = (__force struct cmsghdr __user *)msg->msg_control;
76670 + = (struct cmsghdr __force_user *)msg->msg_control;
76671 struct cmsghdr cmhdr;
76672 int cmlen = CMSG_LEN(len);
76673 int err;
76674 @@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76675 err = -EFAULT;
76676 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
76677 goto out;
76678 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
76679 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
76680 goto out;
76681 cmlen = CMSG_SPACE(len);
76682 if (msg->msg_controllen < cmlen)
76683 @@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
76684 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76685 {
76686 struct cmsghdr __user *cm
76687 - = (__force struct cmsghdr __user*)msg->msg_control;
76688 + = (struct cmsghdr __force_user *)msg->msg_control;
76689
76690 int fdmax = 0;
76691 int fdnum = scm->fp->count;
76692 @@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76693 if (fdnum < fdmax)
76694 fdmax = fdnum;
76695
76696 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
76697 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
76698 i++, cmfptr++)
76699 {
76700 int new_fd;
76701 diff --git a/net/core/sock.c b/net/core/sock.c
76702 index fcf8fdf..74437a5 100644
76703 --- a/net/core/sock.c
76704 +++ b/net/core/sock.c
76705 @@ -344,7 +344,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76706 struct sk_buff_head *list = &sk->sk_receive_queue;
76707
76708 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
76709 - atomic_inc(&sk->sk_drops);
76710 + atomic_inc_unchecked(&sk->sk_drops);
76711 trace_sock_rcvqueue_full(sk, skb);
76712 return -ENOMEM;
76713 }
76714 @@ -354,7 +354,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76715 return err;
76716
76717 if (!sk_rmem_schedule(sk, skb->truesize)) {
76718 - atomic_inc(&sk->sk_drops);
76719 + atomic_inc_unchecked(&sk->sk_drops);
76720 return -ENOBUFS;
76721 }
76722
76723 @@ -374,7 +374,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76724 skb_dst_force(skb);
76725
76726 spin_lock_irqsave(&list->lock, flags);
76727 - skb->dropcount = atomic_read(&sk->sk_drops);
76728 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76729 __skb_queue_tail(list, skb);
76730 spin_unlock_irqrestore(&list->lock, flags);
76731
76732 @@ -394,7 +394,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76733 skb->dev = NULL;
76734
76735 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
76736 - atomic_inc(&sk->sk_drops);
76737 + atomic_inc_unchecked(&sk->sk_drops);
76738 goto discard_and_relse;
76739 }
76740 if (nested)
76741 @@ -412,7 +412,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76742 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
76743 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
76744 bh_unlock_sock(sk);
76745 - atomic_inc(&sk->sk_drops);
76746 + atomic_inc_unchecked(&sk->sk_drops);
76747 goto discard_and_relse;
76748 }
76749
76750 @@ -636,7 +636,8 @@ set_rcvbuf:
76751
76752 case SO_KEEPALIVE:
76753 #ifdef CONFIG_INET
76754 - if (sk->sk_protocol == IPPROTO_TCP)
76755 + if (sk->sk_protocol == IPPROTO_TCP &&
76756 + sk->sk_type == SOCK_STREAM)
76757 tcp_set_keepalive(sk, valbool);
76758 #endif
76759 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
76760 @@ -830,12 +831,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76761 struct timeval tm;
76762 } v;
76763
76764 - int lv = sizeof(int);
76765 - int len;
76766 + unsigned int lv = sizeof(int);
76767 + unsigned int len;
76768
76769 if (get_user(len, optlen))
76770 return -EFAULT;
76771 - if (len < 0)
76772 + if (len > INT_MAX)
76773 return -EINVAL;
76774
76775 memset(&v, 0, sizeof(v));
76776 @@ -976,18 +977,18 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76777 if (len > sizeof(peercred))
76778 len = sizeof(peercred);
76779 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
76780 - if (copy_to_user(optval, &peercred, len))
76781 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
76782 return -EFAULT;
76783 goto lenout;
76784 }
76785
76786 case SO_PEERNAME:
76787 {
76788 - char address[128];
76789 + char address[_K_SS_MAXSIZE];
76790
76791 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
76792 return -ENOTCONN;
76793 - if (lv < len)
76794 + if (lv < len || sizeof address < len)
76795 return -EINVAL;
76796 if (copy_to_user(optval, address, len))
76797 return -EFAULT;
76798 @@ -1035,7 +1036,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76799
76800 if (len > lv)
76801 len = lv;
76802 - if (copy_to_user(optval, &v, len))
76803 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
76804 return -EFAULT;
76805 lenout:
76806 if (put_user(len, optlen))
76807 @@ -2125,7 +2126,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
76808 */
76809 smp_wmb();
76810 atomic_set(&sk->sk_refcnt, 1);
76811 - atomic_set(&sk->sk_drops, 0);
76812 + atomic_set_unchecked(&sk->sk_drops, 0);
76813 }
76814 EXPORT_SYMBOL(sock_init_data);
76815
76816 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
76817 index 5fd1467..8b70900 100644
76818 --- a/net/core/sock_diag.c
76819 +++ b/net/core/sock_diag.c
76820 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
76821
76822 int sock_diag_check_cookie(void *sk, __u32 *cookie)
76823 {
76824 +#ifndef CONFIG_GRKERNSEC_HIDESYM
76825 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
76826 cookie[1] != INET_DIAG_NOCOOKIE) &&
76827 ((u32)(unsigned long)sk != cookie[0] ||
76828 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
76829 return -ESTALE;
76830 else
76831 +#endif
76832 return 0;
76833 }
76834 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
76835
76836 void sock_diag_save_cookie(void *sk, __u32 *cookie)
76837 {
76838 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76839 + cookie[0] = 0;
76840 + cookie[1] = 0;
76841 +#else
76842 cookie[0] = (u32)(unsigned long)sk;
76843 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
76844 +#endif
76845 }
76846 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
76847
76848 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
76849 index a55eecc..dd8428c 100644
76850 --- a/net/decnet/sysctl_net_decnet.c
76851 +++ b/net/decnet/sysctl_net_decnet.c
76852 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
76853
76854 if (len > *lenp) len = *lenp;
76855
76856 - if (copy_to_user(buffer, addr, len))
76857 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
76858 return -EFAULT;
76859
76860 *lenp = len;
76861 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
76862
76863 if (len > *lenp) len = *lenp;
76864
76865 - if (copy_to_user(buffer, devname, len))
76866 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
76867 return -EFAULT;
76868
76869 *lenp = len;
76870 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
76871 index 3854411..2201a94 100644
76872 --- a/net/ipv4/fib_frontend.c
76873 +++ b/net/ipv4/fib_frontend.c
76874 @@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
76875 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76876 fib_sync_up(dev);
76877 #endif
76878 - atomic_inc(&net->ipv4.dev_addr_genid);
76879 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76880 rt_cache_flush(dev_net(dev), -1);
76881 break;
76882 case NETDEV_DOWN:
76883 fib_del_ifaddr(ifa, NULL);
76884 - atomic_inc(&net->ipv4.dev_addr_genid);
76885 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76886 if (ifa->ifa_dev->ifa_list == NULL) {
76887 /* Last address was deleted from this interface.
76888 * Disable IP.
76889 @@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
76890 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76891 fib_sync_up(dev);
76892 #endif
76893 - atomic_inc(&net->ipv4.dev_addr_genid);
76894 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76895 rt_cache_flush(dev_net(dev), -1);
76896 break;
76897 case NETDEV_DOWN:
76898 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
76899 index e5b7182..570a90e 100644
76900 --- a/net/ipv4/fib_semantics.c
76901 +++ b/net/ipv4/fib_semantics.c
76902 @@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
76903 nh->nh_saddr = inet_select_addr(nh->nh_dev,
76904 nh->nh_gw,
76905 nh->nh_parent->fib_scope);
76906 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
76907 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
76908
76909 return nh->nh_saddr;
76910 }
76911 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
76912 index 7880af9..70f92a3 100644
76913 --- a/net/ipv4/inet_hashtables.c
76914 +++ b/net/ipv4/inet_hashtables.c
76915 @@ -18,12 +18,15 @@
76916 #include <linux/sched.h>
76917 #include <linux/slab.h>
76918 #include <linux/wait.h>
76919 +#include <linux/security.h>
76920
76921 #include <net/inet_connection_sock.h>
76922 #include <net/inet_hashtables.h>
76923 #include <net/secure_seq.h>
76924 #include <net/ip.h>
76925
76926 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
76927 +
76928 /*
76929 * Allocate and initialize a new local port bind bucket.
76930 * The bindhash mutex for snum's hash chain must be held here.
76931 @@ -530,6 +533,8 @@ ok:
76932 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
76933 spin_unlock(&head->lock);
76934
76935 + gr_update_task_in_ip_table(current, inet_sk(sk));
76936 +
76937 if (tw) {
76938 inet_twsk_deschedule(tw, death_row);
76939 while (twrefcnt) {
76940 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
76941 index dfba343..c827d50 100644
76942 --- a/net/ipv4/inetpeer.c
76943 +++ b/net/ipv4/inetpeer.c
76944 @@ -487,8 +487,8 @@ relookup:
76945 if (p) {
76946 p->daddr = *daddr;
76947 atomic_set(&p->refcnt, 1);
76948 - atomic_set(&p->rid, 0);
76949 - atomic_set(&p->ip_id_count,
76950 + atomic_set_unchecked(&p->rid, 0);
76951 + atomic_set_unchecked(&p->ip_id_count,
76952 (daddr->family == AF_INET) ?
76953 secure_ip_id(daddr->addr.a4) :
76954 secure_ipv6_id(daddr->addr.a6));
76955 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
76956 index 9dbd3dd..0c59fb2 100644
76957 --- a/net/ipv4/ip_fragment.c
76958 +++ b/net/ipv4/ip_fragment.c
76959 @@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
76960 return 0;
76961
76962 start = qp->rid;
76963 - end = atomic_inc_return(&peer->rid);
76964 + end = atomic_inc_return_unchecked(&peer->rid);
76965 qp->rid = end;
76966
76967 rc = qp->q.fragments && (end - start) > max;
76968 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
76969 index 0d11f23..2bb3f64 100644
76970 --- a/net/ipv4/ip_sockglue.c
76971 +++ b/net/ipv4/ip_sockglue.c
76972 @@ -1142,7 +1142,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76973 len = min_t(unsigned int, len, opt->optlen);
76974 if (put_user(len, optlen))
76975 return -EFAULT;
76976 - if (copy_to_user(optval, opt->__data, len))
76977 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
76978 + copy_to_user(optval, opt->__data, len))
76979 return -EFAULT;
76980 return 0;
76981 }
76982 @@ -1273,7 +1274,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76983 if (sk->sk_type != SOCK_STREAM)
76984 return -ENOPROTOOPT;
76985
76986 - msg.msg_control = optval;
76987 + msg.msg_control = (void __force_kernel *)optval;
76988 msg.msg_controllen = len;
76989 msg.msg_flags = flags;
76990
76991 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
76992 index 67e8a6b..386764d 100644
76993 --- a/net/ipv4/ipconfig.c
76994 +++ b/net/ipv4/ipconfig.c
76995 @@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
76996
76997 mm_segment_t oldfs = get_fs();
76998 set_fs(get_ds());
76999 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
77000 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
77001 set_fs(oldfs);
77002 return res;
77003 }
77004 @@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
77005
77006 mm_segment_t oldfs = get_fs();
77007 set_fs(get_ds());
77008 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
77009 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
77010 set_fs(oldfs);
77011 return res;
77012 }
77013 @@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
77014
77015 mm_segment_t oldfs = get_fs();
77016 set_fs(get_ds());
77017 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
77018 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
77019 set_fs(oldfs);
77020 return res;
77021 }
77022 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
77023 index 97e61ea..cac1bbb 100644
77024 --- a/net/ipv4/netfilter/arp_tables.c
77025 +++ b/net/ipv4/netfilter/arp_tables.c
77026 @@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
77027 #endif
77028
77029 static int get_info(struct net *net, void __user *user,
77030 - const int *len, int compat)
77031 + int len, int compat)
77032 {
77033 char name[XT_TABLE_MAXNAMELEN];
77034 struct xt_table *t;
77035 int ret;
77036
77037 - if (*len != sizeof(struct arpt_getinfo)) {
77038 - duprintf("length %u != %Zu\n", *len,
77039 + if (len != sizeof(struct arpt_getinfo)) {
77040 + duprintf("length %u != %Zu\n", len,
77041 sizeof(struct arpt_getinfo));
77042 return -EINVAL;
77043 }
77044 @@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
77045 info.size = private->size;
77046 strcpy(info.name, name);
77047
77048 - if (copy_to_user(user, &info, *len) != 0)
77049 + if (copy_to_user(user, &info, len) != 0)
77050 ret = -EFAULT;
77051 else
77052 ret = 0;
77053 @@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
77054
77055 switch (cmd) {
77056 case ARPT_SO_GET_INFO:
77057 - ret = get_info(sock_net(sk), user, len, 1);
77058 + ret = get_info(sock_net(sk), user, *len, 1);
77059 break;
77060 case ARPT_SO_GET_ENTRIES:
77061 ret = compat_get_entries(sock_net(sk), user, len);
77062 @@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
77063
77064 switch (cmd) {
77065 case ARPT_SO_GET_INFO:
77066 - ret = get_info(sock_net(sk), user, len, 0);
77067 + ret = get_info(sock_net(sk), user, *len, 0);
77068 break;
77069
77070 case ARPT_SO_GET_ENTRIES:
77071 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
77072 index 170b1fd..6105b91 100644
77073 --- a/net/ipv4/netfilter/ip_tables.c
77074 +++ b/net/ipv4/netfilter/ip_tables.c
77075 @@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
77076 #endif
77077
77078 static int get_info(struct net *net, void __user *user,
77079 - const int *len, int compat)
77080 + int len, int compat)
77081 {
77082 char name[XT_TABLE_MAXNAMELEN];
77083 struct xt_table *t;
77084 int ret;
77085
77086 - if (*len != sizeof(struct ipt_getinfo)) {
77087 - duprintf("length %u != %zu\n", *len,
77088 + if (len != sizeof(struct ipt_getinfo)) {
77089 + duprintf("length %u != %zu\n", len,
77090 sizeof(struct ipt_getinfo));
77091 return -EINVAL;
77092 }
77093 @@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
77094 info.size = private->size;
77095 strcpy(info.name, name);
77096
77097 - if (copy_to_user(user, &info, *len) != 0)
77098 + if (copy_to_user(user, &info, len) != 0)
77099 ret = -EFAULT;
77100 else
77101 ret = 0;
77102 @@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77103
77104 switch (cmd) {
77105 case IPT_SO_GET_INFO:
77106 - ret = get_info(sock_net(sk), user, len, 1);
77107 + ret = get_info(sock_net(sk), user, *len, 1);
77108 break;
77109 case IPT_SO_GET_ENTRIES:
77110 ret = compat_get_entries(sock_net(sk), user, len);
77111 @@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77112
77113 switch (cmd) {
77114 case IPT_SO_GET_INFO:
77115 - ret = get_info(sock_net(sk), user, len, 0);
77116 + ret = get_info(sock_net(sk), user, *len, 0);
77117 break;
77118
77119 case IPT_SO_GET_ENTRIES:
77120 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
77121 index 2c00e8b..45b3bdd 100644
77122 --- a/net/ipv4/ping.c
77123 +++ b/net/ipv4/ping.c
77124 @@ -845,7 +845,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
77125 sk_rmem_alloc_get(sp),
77126 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
77127 atomic_read(&sp->sk_refcnt), sp,
77128 - atomic_read(&sp->sk_drops), len);
77129 + atomic_read_unchecked(&sp->sk_drops), len);
77130 }
77131
77132 static int ping_seq_show(struct seq_file *seq, void *v)
77133 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
77134 index 4032b81..625143c 100644
77135 --- a/net/ipv4/raw.c
77136 +++ b/net/ipv4/raw.c
77137 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
77138 int raw_rcv(struct sock *sk, struct sk_buff *skb)
77139 {
77140 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
77141 - atomic_inc(&sk->sk_drops);
77142 + atomic_inc_unchecked(&sk->sk_drops);
77143 kfree_skb(skb);
77144 return NET_RX_DROP;
77145 }
77146 @@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
77147
77148 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
77149 {
77150 + struct icmp_filter filter;
77151 +
77152 if (optlen > sizeof(struct icmp_filter))
77153 optlen = sizeof(struct icmp_filter);
77154 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
77155 + if (copy_from_user(&filter, optval, optlen))
77156 return -EFAULT;
77157 + raw_sk(sk)->filter = filter;
77158 return 0;
77159 }
77160
77161 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
77162 {
77163 int len, ret = -EFAULT;
77164 + struct icmp_filter filter;
77165
77166 if (get_user(len, optlen))
77167 goto out;
77168 @@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
77169 if (len > sizeof(struct icmp_filter))
77170 len = sizeof(struct icmp_filter);
77171 ret = -EFAULT;
77172 - if (put_user(len, optlen) ||
77173 - copy_to_user(optval, &raw_sk(sk)->filter, len))
77174 + filter = raw_sk(sk)->filter;
77175 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
77176 goto out;
77177 ret = 0;
77178 out: return ret;
77179 @@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
77180 sk_wmem_alloc_get(sp),
77181 sk_rmem_alloc_get(sp),
77182 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
77183 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
77184 + atomic_read(&sp->sk_refcnt),
77185 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77186 + NULL,
77187 +#else
77188 + sp,
77189 +#endif
77190 + atomic_read_unchecked(&sp->sk_drops));
77191 }
77192
77193 static int raw_seq_show(struct seq_file *seq, void *v)
77194 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
77195 index 98b30d0..cfa3cf7 100644
77196 --- a/net/ipv4/route.c
77197 +++ b/net/ipv4/route.c
77198 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
77199
77200 static inline int rt_genid(struct net *net)
77201 {
77202 - return atomic_read(&net->ipv4.rt_genid);
77203 + return atomic_read_unchecked(&net->ipv4.rt_genid);
77204 }
77205
77206 #ifdef CONFIG_PROC_FS
77207 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
77208 unsigned char shuffle;
77209
77210 get_random_bytes(&shuffle, sizeof(shuffle));
77211 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
77212 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
77213 inetpeer_invalidate_tree(AF_INET);
77214 }
77215
77216 @@ -3011,7 +3011,7 @@ static int rt_fill_info(struct net *net,
77217 error = rt->dst.error;
77218 if (peer) {
77219 inet_peer_refcheck(rt->peer);
77220 - id = atomic_read(&peer->ip_id_count) & 0xffff;
77221 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
77222 if (peer->tcp_ts_stamp) {
77223 ts = peer->tcp_ts;
77224 tsage = get_seconds() - peer->tcp_ts_stamp;
77225 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
77226 index 262316b..5a9f3f3 100644
77227 --- a/net/ipv4/tcp_input.c
77228 +++ b/net/ipv4/tcp_input.c
77229 @@ -4885,7 +4885,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
77230 * simplifies code)
77231 */
77232 static void
77233 -tcp_collapse(struct sock *sk, struct sk_buff_head *list,
77234 +__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
77235 struct sk_buff *head, struct sk_buff *tail,
77236 u32 start, u32 end)
77237 {
77238 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
77239 index c8d28c4..e40f75a 100644
77240 --- a/net/ipv4/tcp_ipv4.c
77241 +++ b/net/ipv4/tcp_ipv4.c
77242 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
77243 EXPORT_SYMBOL(sysctl_tcp_low_latency);
77244
77245
77246 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77247 +extern int grsec_enable_blackhole;
77248 +#endif
77249 +
77250 #ifdef CONFIG_TCP_MD5SIG
77251 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
77252 __be32 daddr, __be32 saddr, const struct tcphdr *th);
77253 @@ -1656,6 +1660,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
77254 return 0;
77255
77256 reset:
77257 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77258 + if (!grsec_enable_blackhole)
77259 +#endif
77260 tcp_v4_send_reset(rsk, skb);
77261 discard:
77262 kfree_skb(skb);
77263 @@ -1718,12 +1725,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
77264 TCP_SKB_CB(skb)->sacked = 0;
77265
77266 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
77267 - if (!sk)
77268 + if (!sk) {
77269 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77270 + ret = 1;
77271 +#endif
77272 goto no_tcp_socket;
77273 -
77274 + }
77275 process:
77276 - if (sk->sk_state == TCP_TIME_WAIT)
77277 + if (sk->sk_state == TCP_TIME_WAIT) {
77278 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77279 + ret = 2;
77280 +#endif
77281 goto do_time_wait;
77282 + }
77283
77284 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
77285 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
77286 @@ -1774,6 +1788,10 @@ no_tcp_socket:
77287 bad_packet:
77288 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
77289 } else {
77290 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77291 + if (!grsec_enable_blackhole || (ret == 1 &&
77292 + (skb->dev->flags & IFF_LOOPBACK)))
77293 +#endif
77294 tcp_v4_send_reset(NULL, skb);
77295 }
77296
77297 @@ -2386,7 +2404,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
77298 0, /* non standard timer */
77299 0, /* open_requests have no inode */
77300 atomic_read(&sk->sk_refcnt),
77301 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77302 + NULL,
77303 +#else
77304 req,
77305 +#endif
77306 len);
77307 }
77308
77309 @@ -2436,7 +2458,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
77310 sock_i_uid(sk),
77311 icsk->icsk_probes_out,
77312 sock_i_ino(sk),
77313 - atomic_read(&sk->sk_refcnt), sk,
77314 + atomic_read(&sk->sk_refcnt),
77315 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77316 + NULL,
77317 +#else
77318 + sk,
77319 +#endif
77320 jiffies_to_clock_t(icsk->icsk_rto),
77321 jiffies_to_clock_t(icsk->icsk_ack.ato),
77322 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
77323 @@ -2464,7 +2491,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
77324 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
77325 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
77326 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
77327 - atomic_read(&tw->tw_refcnt), tw, len);
77328 + atomic_read(&tw->tw_refcnt),
77329 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77330 + NULL,
77331 +#else
77332 + tw,
77333 +#endif
77334 + len);
77335 }
77336
77337 #define TMPSZ 150
77338 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
77339 index b85d9fe..4b0eed9 100644
77340 --- a/net/ipv4/tcp_minisocks.c
77341 +++ b/net/ipv4/tcp_minisocks.c
77342 @@ -27,6 +27,10 @@
77343 #include <net/inet_common.h>
77344 #include <net/xfrm.h>
77345
77346 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77347 +extern int grsec_enable_blackhole;
77348 +#endif
77349 +
77350 int sysctl_tcp_syncookies __read_mostly = 1;
77351 EXPORT_SYMBOL(sysctl_tcp_syncookies);
77352
77353 @@ -754,6 +758,10 @@ listen_overflow:
77354
77355 embryonic_reset:
77356 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
77357 +
77358 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77359 + if (!grsec_enable_blackhole)
77360 +#endif
77361 if (!(flg & TCP_FLAG_RST))
77362 req->rsk_ops->send_reset(sk, skb);
77363
77364 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
77365 index 4526fe6..1a34e43 100644
77366 --- a/net/ipv4/tcp_probe.c
77367 +++ b/net/ipv4/tcp_probe.c
77368 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
77369 if (cnt + width >= len)
77370 break;
77371
77372 - if (copy_to_user(buf + cnt, tbuf, width))
77373 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
77374 return -EFAULT;
77375 cnt += width;
77376 }
77377 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
77378 index e911e6c..d0a9356 100644
77379 --- a/net/ipv4/tcp_timer.c
77380 +++ b/net/ipv4/tcp_timer.c
77381 @@ -22,6 +22,10 @@
77382 #include <linux/gfp.h>
77383 #include <net/tcp.h>
77384
77385 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77386 +extern int grsec_lastack_retries;
77387 +#endif
77388 +
77389 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
77390 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
77391 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
77392 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
77393 }
77394 }
77395
77396 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77397 + if ((sk->sk_state == TCP_LAST_ACK) &&
77398 + (grsec_lastack_retries > 0) &&
77399 + (grsec_lastack_retries < retry_until))
77400 + retry_until = grsec_lastack_retries;
77401 +#endif
77402 +
77403 if (retransmits_timed_out(sk, retry_until,
77404 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
77405 /* Has it gone just too far? */
77406 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
77407 index eaca736..60488ae 100644
77408 --- a/net/ipv4/udp.c
77409 +++ b/net/ipv4/udp.c
77410 @@ -87,6 +87,7 @@
77411 #include <linux/types.h>
77412 #include <linux/fcntl.h>
77413 #include <linux/module.h>
77414 +#include <linux/security.h>
77415 #include <linux/socket.h>
77416 #include <linux/sockios.h>
77417 #include <linux/igmp.h>
77418 @@ -110,6 +111,10 @@
77419 #include <linux/static_key.h>
77420 #include "udp_impl.h"
77421
77422 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77423 +extern int grsec_enable_blackhole;
77424 +#endif
77425 +
77426 struct udp_table udp_table __read_mostly;
77427 EXPORT_SYMBOL(udp_table);
77428
77429 @@ -568,6 +573,9 @@ found:
77430 return s;
77431 }
77432
77433 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
77434 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
77435 +
77436 /*
77437 * This routine is called by the ICMP module when it gets some
77438 * sort of error condition. If err < 0 then the socket should
77439 @@ -859,9 +867,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
77440 dport = usin->sin_port;
77441 if (dport == 0)
77442 return -EINVAL;
77443 +
77444 + err = gr_search_udp_sendmsg(sk, usin);
77445 + if (err)
77446 + return err;
77447 } else {
77448 if (sk->sk_state != TCP_ESTABLISHED)
77449 return -EDESTADDRREQ;
77450 +
77451 + err = gr_search_udp_sendmsg(sk, NULL);
77452 + if (err)
77453 + return err;
77454 +
77455 daddr = inet->inet_daddr;
77456 dport = inet->inet_dport;
77457 /* Open fast path for connected socket.
77458 @@ -1103,7 +1120,7 @@ static unsigned int first_packet_length(struct sock *sk)
77459 udp_lib_checksum_complete(skb)) {
77460 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
77461 IS_UDPLITE(sk));
77462 - atomic_inc(&sk->sk_drops);
77463 + atomic_inc_unchecked(&sk->sk_drops);
77464 __skb_unlink(skb, rcvq);
77465 __skb_queue_tail(&list_kill, skb);
77466 }
77467 @@ -1189,6 +1206,10 @@ try_again:
77468 if (!skb)
77469 goto out;
77470
77471 + err = gr_search_udp_recvmsg(sk, skb);
77472 + if (err)
77473 + goto out_free;
77474 +
77475 ulen = skb->len - sizeof(struct udphdr);
77476 copied = len;
77477 if (copied > ulen)
77478 @@ -1498,7 +1519,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
77479
77480 drop:
77481 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
77482 - atomic_inc(&sk->sk_drops);
77483 + atomic_inc_unchecked(&sk->sk_drops);
77484 kfree_skb(skb);
77485 return -1;
77486 }
77487 @@ -1517,7 +1538,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
77488 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
77489
77490 if (!skb1) {
77491 - atomic_inc(&sk->sk_drops);
77492 + atomic_inc_unchecked(&sk->sk_drops);
77493 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
77494 IS_UDPLITE(sk));
77495 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
77496 @@ -1686,6 +1707,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77497 goto csum_error;
77498
77499 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
77500 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77501 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
77502 +#endif
77503 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
77504
77505 /*
77506 @@ -2104,8 +2128,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
77507 sk_wmem_alloc_get(sp),
77508 sk_rmem_alloc_get(sp),
77509 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
77510 - atomic_read(&sp->sk_refcnt), sp,
77511 - atomic_read(&sp->sk_drops), len);
77512 + atomic_read(&sp->sk_refcnt),
77513 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77514 + NULL,
77515 +#else
77516 + sp,
77517 +#endif
77518 + atomic_read_unchecked(&sp->sk_drops), len);
77519 }
77520
77521 int udp4_seq_show(struct seq_file *seq, void *v)
77522 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
77523 index 938ba6d..30c8021 100644
77524 --- a/net/ipv6/addrconf.c
77525 +++ b/net/ipv6/addrconf.c
77526 @@ -2143,7 +2143,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
77527 p.iph.ihl = 5;
77528 p.iph.protocol = IPPROTO_IPV6;
77529 p.iph.ttl = 64;
77530 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
77531 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
77532
77533 if (ops->ndo_do_ioctl) {
77534 mm_segment_t oldfs = get_fs();
77535 diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
77536 index db1521f..ebb3314 100644
77537 --- a/net/ipv6/esp6.c
77538 +++ b/net/ipv6/esp6.c
77539 @@ -166,8 +166,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
77540 struct esp_data *esp = x->data;
77541
77542 /* skb is pure payload to encrypt */
77543 - err = -ENOMEM;
77544 -
77545 aead = esp->aead;
77546 alen = crypto_aead_authsize(aead);
77547
77548 @@ -202,8 +200,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
77549 }
77550
77551 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
77552 - if (!tmp)
77553 + if (!tmp) {
77554 + err = -ENOMEM;
77555 goto error;
77556 + }
77557
77558 seqhi = esp_tmp_seqhi(tmp);
77559 iv = esp_tmp_iv(aead, tmp, seqhilen);
77560 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
77561 index e6cee52..cf47476 100644
77562 --- a/net/ipv6/inet6_connection_sock.c
77563 +++ b/net/ipv6/inet6_connection_sock.c
77564 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
77565 #ifdef CONFIG_XFRM
77566 {
77567 struct rt6_info *rt = (struct rt6_info *)dst;
77568 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
77569 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
77570 }
77571 #endif
77572 }
77573 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
77574 #ifdef CONFIG_XFRM
77575 if (dst) {
77576 struct rt6_info *rt = (struct rt6_info *)dst;
77577 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
77578 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
77579 __sk_dst_reset(sk);
77580 dst = NULL;
77581 }
77582 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
77583 index ba6d13d..6899122 100644
77584 --- a/net/ipv6/ipv6_sockglue.c
77585 +++ b/net/ipv6/ipv6_sockglue.c
77586 @@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
77587 if (sk->sk_type != SOCK_STREAM)
77588 return -ENOPROTOOPT;
77589
77590 - msg.msg_control = optval;
77591 + msg.msg_control = (void __force_kernel *)optval;
77592 msg.msg_controllen = len;
77593 msg.msg_flags = flags;
77594
77595 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
77596 index d7cb045..8c0ded6 100644
77597 --- a/net/ipv6/netfilter/ip6_tables.c
77598 +++ b/net/ipv6/netfilter/ip6_tables.c
77599 @@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
77600 #endif
77601
77602 static int get_info(struct net *net, void __user *user,
77603 - const int *len, int compat)
77604 + int len, int compat)
77605 {
77606 char name[XT_TABLE_MAXNAMELEN];
77607 struct xt_table *t;
77608 int ret;
77609
77610 - if (*len != sizeof(struct ip6t_getinfo)) {
77611 - duprintf("length %u != %zu\n", *len,
77612 + if (len != sizeof(struct ip6t_getinfo)) {
77613 + duprintf("length %u != %zu\n", len,
77614 sizeof(struct ip6t_getinfo));
77615 return -EINVAL;
77616 }
77617 @@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
77618 info.size = private->size;
77619 strcpy(info.name, name);
77620
77621 - if (copy_to_user(user, &info, *len) != 0)
77622 + if (copy_to_user(user, &info, len) != 0)
77623 ret = -EFAULT;
77624 else
77625 ret = 0;
77626 @@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77627
77628 switch (cmd) {
77629 case IP6T_SO_GET_INFO:
77630 - ret = get_info(sock_net(sk), user, len, 1);
77631 + ret = get_info(sock_net(sk), user, *len, 1);
77632 break;
77633 case IP6T_SO_GET_ENTRIES:
77634 ret = compat_get_entries(sock_net(sk), user, len);
77635 @@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77636
77637 switch (cmd) {
77638 case IP6T_SO_GET_INFO:
77639 - ret = get_info(sock_net(sk), user, len, 0);
77640 + ret = get_info(sock_net(sk), user, *len, 0);
77641 break;
77642
77643 case IP6T_SO_GET_ENTRIES:
77644 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
77645 index 93d6983..8e54c4d 100644
77646 --- a/net/ipv6/raw.c
77647 +++ b/net/ipv6/raw.c
77648 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
77649 {
77650 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
77651 skb_checksum_complete(skb)) {
77652 - atomic_inc(&sk->sk_drops);
77653 + atomic_inc_unchecked(&sk->sk_drops);
77654 kfree_skb(skb);
77655 return NET_RX_DROP;
77656 }
77657 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
77658 struct raw6_sock *rp = raw6_sk(sk);
77659
77660 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
77661 - atomic_inc(&sk->sk_drops);
77662 + atomic_inc_unchecked(&sk->sk_drops);
77663 kfree_skb(skb);
77664 return NET_RX_DROP;
77665 }
77666 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
77667
77668 if (inet->hdrincl) {
77669 if (skb_checksum_complete(skb)) {
77670 - atomic_inc(&sk->sk_drops);
77671 + atomic_inc_unchecked(&sk->sk_drops);
77672 kfree_skb(skb);
77673 return NET_RX_DROP;
77674 }
77675 @@ -602,7 +602,7 @@ out:
77676 return err;
77677 }
77678
77679 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
77680 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
77681 struct flowi6 *fl6, struct dst_entry **dstp,
77682 unsigned int flags)
77683 {
77684 @@ -914,12 +914,15 @@ do_confirm:
77685 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
77686 char __user *optval, int optlen)
77687 {
77688 + struct icmp6_filter filter;
77689 +
77690 switch (optname) {
77691 case ICMPV6_FILTER:
77692 if (optlen > sizeof(struct icmp6_filter))
77693 optlen = sizeof(struct icmp6_filter);
77694 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
77695 + if (copy_from_user(&filter, optval, optlen))
77696 return -EFAULT;
77697 + raw6_sk(sk)->filter = filter;
77698 return 0;
77699 default:
77700 return -ENOPROTOOPT;
77701 @@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
77702 char __user *optval, int __user *optlen)
77703 {
77704 int len;
77705 + struct icmp6_filter filter;
77706
77707 switch (optname) {
77708 case ICMPV6_FILTER:
77709 @@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
77710 len = sizeof(struct icmp6_filter);
77711 if (put_user(len, optlen))
77712 return -EFAULT;
77713 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
77714 + filter = raw6_sk(sk)->filter;
77715 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
77716 return -EFAULT;
77717 return 0;
77718 default:
77719 @@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
77720 0, 0L, 0,
77721 sock_i_uid(sp), 0,
77722 sock_i_ino(sp),
77723 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
77724 + atomic_read(&sp->sk_refcnt),
77725 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77726 + NULL,
77727 +#else
77728 + sp,
77729 +#endif
77730 + atomic_read_unchecked(&sp->sk_drops));
77731 }
77732
77733 static int raw6_seq_show(struct seq_file *seq, void *v)
77734 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
77735 index 9df64a5..39875da 100644
77736 --- a/net/ipv6/tcp_ipv6.c
77737 +++ b/net/ipv6/tcp_ipv6.c
77738 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
77739 }
77740 #endif
77741
77742 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77743 +extern int grsec_enable_blackhole;
77744 +#endif
77745 +
77746 static void tcp_v6_hash(struct sock *sk)
77747 {
77748 if (sk->sk_state != TCP_CLOSE) {
77749 @@ -1544,6 +1548,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
77750 return 0;
77751
77752 reset:
77753 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77754 + if (!grsec_enable_blackhole)
77755 +#endif
77756 tcp_v6_send_reset(sk, skb);
77757 discard:
77758 if (opt_skb)
77759 @@ -1625,12 +1632,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
77760 TCP_SKB_CB(skb)->sacked = 0;
77761
77762 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
77763 - if (!sk)
77764 + if (!sk) {
77765 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77766 + ret = 1;
77767 +#endif
77768 goto no_tcp_socket;
77769 + }
77770
77771 process:
77772 - if (sk->sk_state == TCP_TIME_WAIT)
77773 + if (sk->sk_state == TCP_TIME_WAIT) {
77774 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77775 + ret = 2;
77776 +#endif
77777 goto do_time_wait;
77778 + }
77779
77780 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
77781 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
77782 @@ -1679,6 +1694,10 @@ no_tcp_socket:
77783 bad_packet:
77784 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
77785 } else {
77786 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77787 + if (!grsec_enable_blackhole || (ret == 1 &&
77788 + (skb->dev->flags & IFF_LOOPBACK)))
77789 +#endif
77790 tcp_v6_send_reset(NULL, skb);
77791 }
77792
77793 @@ -1885,7 +1904,13 @@ static void get_openreq6(struct seq_file *seq,
77794 uid,
77795 0, /* non standard timer */
77796 0, /* open_requests have no inode */
77797 - 0, req);
77798 + 0,
77799 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77800 + NULL
77801 +#else
77802 + req
77803 +#endif
77804 + );
77805 }
77806
77807 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77808 @@ -1935,7 +1960,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77809 sock_i_uid(sp),
77810 icsk->icsk_probes_out,
77811 sock_i_ino(sp),
77812 - atomic_read(&sp->sk_refcnt), sp,
77813 + atomic_read(&sp->sk_refcnt),
77814 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77815 + NULL,
77816 +#else
77817 + sp,
77818 +#endif
77819 jiffies_to_clock_t(icsk->icsk_rto),
77820 jiffies_to_clock_t(icsk->icsk_ack.ato),
77821 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
77822 @@ -1970,7 +2000,13 @@ static void get_timewait6_sock(struct seq_file *seq,
77823 dest->s6_addr32[2], dest->s6_addr32[3], destp,
77824 tw->tw_substate, 0, 0,
77825 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
77826 - atomic_read(&tw->tw_refcnt), tw);
77827 + atomic_read(&tw->tw_refcnt),
77828 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77829 + NULL
77830 +#else
77831 + tw
77832 +#endif
77833 + );
77834 }
77835
77836 static int tcp6_seq_show(struct seq_file *seq, void *v)
77837 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
77838 index f05099f..ea613f0 100644
77839 --- a/net/ipv6/udp.c
77840 +++ b/net/ipv6/udp.c
77841 @@ -50,6 +50,10 @@
77842 #include <linux/seq_file.h>
77843 #include "udp_impl.h"
77844
77845 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77846 +extern int grsec_enable_blackhole;
77847 +#endif
77848 +
77849 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
77850 {
77851 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
77852 @@ -615,7 +619,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
77853 return rc;
77854 drop:
77855 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
77856 - atomic_inc(&sk->sk_drops);
77857 + atomic_inc_unchecked(&sk->sk_drops);
77858 kfree_skb(skb);
77859 return -1;
77860 }
77861 @@ -673,7 +677,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
77862 if (likely(skb1 == NULL))
77863 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
77864 if (!skb1) {
77865 - atomic_inc(&sk->sk_drops);
77866 + atomic_inc_unchecked(&sk->sk_drops);
77867 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
77868 IS_UDPLITE(sk));
77869 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
77870 @@ -844,6 +848,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77871 goto discard;
77872
77873 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
77874 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77875 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
77876 +#endif
77877 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
77878
77879 kfree_skb(skb);
77880 @@ -1453,8 +1460,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
77881 0, 0L, 0,
77882 sock_i_uid(sp), 0,
77883 sock_i_ino(sp),
77884 - atomic_read(&sp->sk_refcnt), sp,
77885 - atomic_read(&sp->sk_drops));
77886 + atomic_read(&sp->sk_refcnt),
77887 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77888 + NULL,
77889 +#else
77890 + sp,
77891 +#endif
77892 + atomic_read_unchecked(&sp->sk_drops));
77893 }
77894
77895 int udp6_seq_show(struct seq_file *seq, void *v)
77896 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
77897 index 6b9d5a0..4dffaf1 100644
77898 --- a/net/irda/ircomm/ircomm_tty.c
77899 +++ b/net/irda/ircomm/ircomm_tty.c
77900 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77901 add_wait_queue(&self->open_wait, &wait);
77902
77903 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
77904 - __FILE__,__LINE__, tty->driver->name, self->open_count );
77905 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
77906
77907 /* As far as I can see, we protect open_count - Jean II */
77908 spin_lock_irqsave(&self->spinlock, flags);
77909 if (!tty_hung_up_p(filp)) {
77910 extra_count = 1;
77911 - self->open_count--;
77912 + local_dec(&self->open_count);
77913 }
77914 spin_unlock_irqrestore(&self->spinlock, flags);
77915 - self->blocked_open++;
77916 + local_inc(&self->blocked_open);
77917
77918 while (1) {
77919 if (tty->termios->c_cflag & CBAUD) {
77920 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77921 }
77922
77923 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
77924 - __FILE__,__LINE__, tty->driver->name, self->open_count );
77925 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
77926
77927 schedule();
77928 }
77929 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77930 if (extra_count) {
77931 /* ++ is not atomic, so this should be protected - Jean II */
77932 spin_lock_irqsave(&self->spinlock, flags);
77933 - self->open_count++;
77934 + local_inc(&self->open_count);
77935 spin_unlock_irqrestore(&self->spinlock, flags);
77936 }
77937 - self->blocked_open--;
77938 + local_dec(&self->blocked_open);
77939
77940 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
77941 - __FILE__,__LINE__, tty->driver->name, self->open_count);
77942 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
77943
77944 if (!retval)
77945 self->flags |= ASYNC_NORMAL_ACTIVE;
77946 @@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
77947 }
77948 /* ++ is not atomic, so this should be protected - Jean II */
77949 spin_lock_irqsave(&self->spinlock, flags);
77950 - self->open_count++;
77951 + local_inc(&self->open_count);
77952
77953 tty->driver_data = self;
77954 self->tty = tty;
77955 spin_unlock_irqrestore(&self->spinlock, flags);
77956
77957 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
77958 - self->line, self->open_count);
77959 + self->line, local_read(&self->open_count));
77960
77961 /* Not really used by us, but lets do it anyway */
77962 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
77963 @@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77964 return;
77965 }
77966
77967 - if ((tty->count == 1) && (self->open_count != 1)) {
77968 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
77969 /*
77970 * Uh, oh. tty->count is 1, which means that the tty
77971 * structure will be freed. state->count should always
77972 @@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77973 */
77974 IRDA_DEBUG(0, "%s(), bad serial port count; "
77975 "tty->count is 1, state->count is %d\n", __func__ ,
77976 - self->open_count);
77977 - self->open_count = 1;
77978 + local_read(&self->open_count));
77979 + local_set(&self->open_count, 1);
77980 }
77981
77982 - if (--self->open_count < 0) {
77983 + if (local_dec_return(&self->open_count) < 0) {
77984 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
77985 - __func__, self->line, self->open_count);
77986 - self->open_count = 0;
77987 + __func__, self->line, local_read(&self->open_count));
77988 + local_set(&self->open_count, 0);
77989 }
77990 - if (self->open_count) {
77991 + if (local_read(&self->open_count)) {
77992 spin_unlock_irqrestore(&self->spinlock, flags);
77993
77994 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
77995 @@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77996 tty->closing = 0;
77997 self->tty = NULL;
77998
77999 - if (self->blocked_open) {
78000 + if (local_read(&self->blocked_open)) {
78001 if (self->close_delay)
78002 schedule_timeout_interruptible(self->close_delay);
78003 wake_up_interruptible(&self->open_wait);
78004 @@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
78005 spin_lock_irqsave(&self->spinlock, flags);
78006 self->flags &= ~ASYNC_NORMAL_ACTIVE;
78007 self->tty = NULL;
78008 - self->open_count = 0;
78009 + local_set(&self->open_count, 0);
78010 spin_unlock_irqrestore(&self->spinlock, flags);
78011
78012 wake_up_interruptible(&self->open_wait);
78013 @@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
78014 seq_putc(m, '\n');
78015
78016 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
78017 - seq_printf(m, "Open count: %d\n", self->open_count);
78018 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
78019 seq_printf(m, "Max data size: %d\n", self->max_data_size);
78020 seq_printf(m, "Max header size: %d\n", self->max_header_size);
78021
78022 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
78023 index cd6f7a9..e63fe89 100644
78024 --- a/net/iucv/af_iucv.c
78025 +++ b/net/iucv/af_iucv.c
78026 @@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
78027
78028 write_lock_bh(&iucv_sk_list.lock);
78029
78030 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
78031 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
78032 while (__iucv_get_sock_by_name(name)) {
78033 sprintf(name, "%08x",
78034 - atomic_inc_return(&iucv_sk_list.autobind_name));
78035 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
78036 }
78037
78038 write_unlock_bh(&iucv_sk_list.lock);
78039 diff --git a/net/key/af_key.c b/net/key/af_key.c
78040 index 34e4185..8823368 100644
78041 --- a/net/key/af_key.c
78042 +++ b/net/key/af_key.c
78043 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
78044 static u32 get_acqseq(void)
78045 {
78046 u32 res;
78047 - static atomic_t acqseq;
78048 + static atomic_unchecked_t acqseq;
78049
78050 do {
78051 - res = atomic_inc_return(&acqseq);
78052 + res = atomic_inc_return_unchecked(&acqseq);
78053 } while (!res);
78054 return res;
78055 }
78056 diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
78057 index ddc553e..1937a14 100644
78058 --- a/net/l2tp/l2tp_netlink.c
78059 +++ b/net/l2tp/l2tp_netlink.c
78060 @@ -80,8 +80,8 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
78061
78062 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
78063 &l2tp_nl_family, 0, L2TP_CMD_NOOP);
78064 - if (IS_ERR(hdr)) {
78065 - ret = PTR_ERR(hdr);
78066 + if (!hdr) {
78067 + ret = -EMSGSIZE;
78068 goto err_out;
78069 }
78070
78071 @@ -250,8 +250,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
78072
78073 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
78074 L2TP_CMD_TUNNEL_GET);
78075 - if (IS_ERR(hdr))
78076 - return PTR_ERR(hdr);
78077 + if (!hdr)
78078 + return -EMSGSIZE;
78079
78080 if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
78081 nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
78082 @@ -617,8 +617,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
78083 sk = tunnel->sock;
78084
78085 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
78086 - if (IS_ERR(hdr))
78087 - return PTR_ERR(hdr);
78088 + if (!hdr)
78089 + return -EMSGSIZE;
78090
78091 if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
78092 nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
78093 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
78094 index 3f3cd50..d2cf249 100644
78095 --- a/net/mac80211/ieee80211_i.h
78096 +++ b/net/mac80211/ieee80211_i.h
78097 @@ -28,6 +28,7 @@
78098 #include <net/ieee80211_radiotap.h>
78099 #include <net/cfg80211.h>
78100 #include <net/mac80211.h>
78101 +#include <asm/local.h>
78102 #include "key.h"
78103 #include "sta_info.h"
78104
78105 @@ -863,7 +864,7 @@ struct ieee80211_local {
78106 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
78107 spinlock_t queue_stop_reason_lock;
78108
78109 - int open_count;
78110 + local_t open_count;
78111 int monitors, cooked_mntrs;
78112 /* number of interfaces with corresponding FIF_ flags */
78113 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
78114 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
78115 index 8664111..1d6a065 100644
78116 --- a/net/mac80211/iface.c
78117 +++ b/net/mac80211/iface.c
78118 @@ -328,7 +328,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
78119 break;
78120 }
78121
78122 - if (local->open_count == 0) {
78123 + if (local_read(&local->open_count) == 0) {
78124 res = drv_start(local);
78125 if (res)
78126 goto err_del_bss;
78127 @@ -371,7 +371,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
78128 break;
78129 }
78130
78131 - if (local->monitors == 0 && local->open_count == 0) {
78132 + if (local->monitors == 0 && local_read(&local->open_count) == 0) {
78133 res = ieee80211_add_virtual_monitor(local);
78134 if (res)
78135 goto err_stop;
78136 @@ -468,7 +468,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
78137 mutex_unlock(&local->mtx);
78138
78139 if (coming_up)
78140 - local->open_count++;
78141 + local_inc(&local->open_count);
78142
78143 if (hw_reconf_flags)
78144 ieee80211_hw_config(local, hw_reconf_flags);
78145 @@ -481,7 +481,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
78146 err_del_interface:
78147 drv_remove_interface(local, sdata);
78148 err_stop:
78149 - if (!local->open_count)
78150 + if (!local_read(&local->open_count))
78151 drv_stop(local);
78152 err_del_bss:
78153 sdata->bss = NULL;
78154 @@ -613,7 +613,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
78155 }
78156
78157 if (going_down)
78158 - local->open_count--;
78159 + local_dec(&local->open_count);
78160
78161 switch (sdata->vif.type) {
78162 case NL80211_IFTYPE_AP_VLAN:
78163 @@ -685,7 +685,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
78164
78165 ieee80211_recalc_ps(local, -1);
78166
78167 - if (local->open_count == 0) {
78168 + if (local_read(&local->open_count) == 0) {
78169 if (local->ops->napi_poll)
78170 napi_disable(&local->napi);
78171 ieee80211_clear_tx_pending(local);
78172 @@ -717,7 +717,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
78173 }
78174 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
78175
78176 - if (local->monitors == local->open_count && local->monitors > 0)
78177 + if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
78178 ieee80211_add_virtual_monitor(local);
78179 }
78180
78181 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
78182 index f5548e9..474a15f 100644
78183 --- a/net/mac80211/main.c
78184 +++ b/net/mac80211/main.c
78185 @@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
78186 local->hw.conf.power_level = power;
78187 }
78188
78189 - if (changed && local->open_count) {
78190 + if (changed && local_read(&local->open_count)) {
78191 ret = drv_config(local, changed);
78192 /*
78193 * Goal:
78194 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
78195 index af1c4e2..24dbbe3 100644
78196 --- a/net/mac80211/pm.c
78197 +++ b/net/mac80211/pm.c
78198 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
78199 struct ieee80211_sub_if_data *sdata;
78200 struct sta_info *sta;
78201
78202 - if (!local->open_count)
78203 + if (!local_read(&local->open_count))
78204 goto suspend;
78205
78206 ieee80211_scan_cancel(local);
78207 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
78208 cancel_work_sync(&local->dynamic_ps_enable_work);
78209 del_timer_sync(&local->dynamic_ps_timer);
78210
78211 - local->wowlan = wowlan && local->open_count;
78212 + local->wowlan = wowlan && local_read(&local->open_count);
78213 if (local->wowlan) {
78214 int err = drv_suspend(local, wowlan);
78215 if (err < 0) {
78216 @@ -132,7 +132,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
78217 drv_remove_interface(local, sdata);
78218
78219 /* stop hardware - this must stop RX */
78220 - if (local->open_count)
78221 + if (local_read(&local->open_count))
78222 ieee80211_stop_device(local);
78223
78224 suspend:
78225 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
78226 index 3313c11..bec9f17 100644
78227 --- a/net/mac80211/rate.c
78228 +++ b/net/mac80211/rate.c
78229 @@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
78230
78231 ASSERT_RTNL();
78232
78233 - if (local->open_count)
78234 + if (local_read(&local->open_count))
78235 return -EBUSY;
78236
78237 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
78238 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
78239 index c97a065..ff61928 100644
78240 --- a/net/mac80211/rc80211_pid_debugfs.c
78241 +++ b/net/mac80211/rc80211_pid_debugfs.c
78242 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
78243
78244 spin_unlock_irqrestore(&events->lock, status);
78245
78246 - if (copy_to_user(buf, pb, p))
78247 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
78248 return -EFAULT;
78249
78250 return p;
78251 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
78252 index f564b5e..22fee47 100644
78253 --- a/net/mac80211/util.c
78254 +++ b/net/mac80211/util.c
78255 @@ -1224,7 +1224,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
78256 }
78257 #endif
78258 /* everything else happens only if HW was up & running */
78259 - if (!local->open_count)
78260 + if (!local_read(&local->open_count))
78261 goto wake_up;
78262
78263 /*
78264 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
78265 index 209c1ed..39484dc 100644
78266 --- a/net/netfilter/Kconfig
78267 +++ b/net/netfilter/Kconfig
78268 @@ -851,6 +851,16 @@ config NETFILTER_XT_MATCH_ESP
78269
78270 To compile it as a module, choose M here. If unsure, say N.
78271
78272 +config NETFILTER_XT_MATCH_GRADM
78273 + tristate '"gradm" match support'
78274 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
78275 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
78276 + ---help---
78277 + The gradm match allows to match on grsecurity RBAC being enabled.
78278 + It is useful when iptables rules are applied early on bootup to
78279 + prevent connections to the machine (except from a trusted host)
78280 + while the RBAC system is disabled.
78281 +
78282 config NETFILTER_XT_MATCH_HASHLIMIT
78283 tristate '"hashlimit" match support'
78284 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
78285 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
78286 index 4e7960c..89e48d4 100644
78287 --- a/net/netfilter/Makefile
78288 +++ b/net/netfilter/Makefile
78289 @@ -87,6 +87,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
78290 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
78291 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
78292 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
78293 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
78294 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
78295 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
78296 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
78297 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
78298 index 1548df9..98ad9b4 100644
78299 --- a/net/netfilter/ipvs/ip_vs_conn.c
78300 +++ b/net/netfilter/ipvs/ip_vs_conn.c
78301 @@ -557,7 +557,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
78302 /* Increase the refcnt counter of the dest */
78303 atomic_inc(&dest->refcnt);
78304
78305 - conn_flags = atomic_read(&dest->conn_flags);
78306 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
78307 if (cp->protocol != IPPROTO_UDP)
78308 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
78309 flags = cp->flags;
78310 @@ -902,7 +902,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
78311 atomic_set(&cp->refcnt, 1);
78312
78313 atomic_set(&cp->n_control, 0);
78314 - atomic_set(&cp->in_pkts, 0);
78315 + atomic_set_unchecked(&cp->in_pkts, 0);
78316
78317 atomic_inc(&ipvs->conn_count);
78318 if (flags & IP_VS_CONN_F_NO_CPORT)
78319 @@ -1183,7 +1183,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
78320
78321 /* Don't drop the entry if its number of incoming packets is not
78322 located in [0, 8] */
78323 - i = atomic_read(&cp->in_pkts);
78324 + i = atomic_read_unchecked(&cp->in_pkts);
78325 if (i > 8 || i < 0) return 0;
78326
78327 if (!todrop_rate[i]) return 0;
78328 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
78329 index a54b018c..07e0120 100644
78330 --- a/net/netfilter/ipvs/ip_vs_core.c
78331 +++ b/net/netfilter/ipvs/ip_vs_core.c
78332 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
78333 ret = cp->packet_xmit(skb, cp, pd->pp);
78334 /* do not touch skb anymore */
78335
78336 - atomic_inc(&cp->in_pkts);
78337 + atomic_inc_unchecked(&cp->in_pkts);
78338 ip_vs_conn_put(cp);
78339 return ret;
78340 }
78341 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
78342 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
78343 pkts = sysctl_sync_threshold(ipvs);
78344 else
78345 - pkts = atomic_add_return(1, &cp->in_pkts);
78346 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
78347
78348 if (ipvs->sync_state & IP_VS_STATE_MASTER)
78349 ip_vs_sync_conn(net, cp, pkts);
78350 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
78351 index 72bf32a..f91c066 100644
78352 --- a/net/netfilter/ipvs/ip_vs_ctl.c
78353 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
78354 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
78355 ip_vs_rs_hash(ipvs, dest);
78356 write_unlock_bh(&ipvs->rs_lock);
78357 }
78358 - atomic_set(&dest->conn_flags, conn_flags);
78359 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
78360
78361 /* bind the service */
78362 if (!dest->svc) {
78363 @@ -2074,7 +2074,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
78364 " %-7s %-6d %-10d %-10d\n",
78365 &dest->addr.in6,
78366 ntohs(dest->port),
78367 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
78368 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
78369 atomic_read(&dest->weight),
78370 atomic_read(&dest->activeconns),
78371 atomic_read(&dest->inactconns));
78372 @@ -2085,7 +2085,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
78373 "%-7s %-6d %-10d %-10d\n",
78374 ntohl(dest->addr.ip),
78375 ntohs(dest->port),
78376 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
78377 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
78378 atomic_read(&dest->weight),
78379 atomic_read(&dest->activeconns),
78380 atomic_read(&dest->inactconns));
78381 @@ -2555,7 +2555,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
78382
78383 entry.addr = dest->addr.ip;
78384 entry.port = dest->port;
78385 - entry.conn_flags = atomic_read(&dest->conn_flags);
78386 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
78387 entry.weight = atomic_read(&dest->weight);
78388 entry.u_threshold = dest->u_threshold;
78389 entry.l_threshold = dest->l_threshold;
78390 @@ -3090,7 +3090,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
78391 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
78392 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
78393 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
78394 - (atomic_read(&dest->conn_flags) &
78395 + (atomic_read_unchecked(&dest->conn_flags) &
78396 IP_VS_CONN_F_FWD_MASK)) ||
78397 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
78398 atomic_read(&dest->weight)) ||
78399 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
78400 index effa10c..9058928 100644
78401 --- a/net/netfilter/ipvs/ip_vs_sync.c
78402 +++ b/net/netfilter/ipvs/ip_vs_sync.c
78403 @@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
78404 cp = cp->control;
78405 if (cp) {
78406 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
78407 - pkts = atomic_add_return(1, &cp->in_pkts);
78408 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
78409 else
78410 pkts = sysctl_sync_threshold(ipvs);
78411 ip_vs_sync_conn(net, cp->control, pkts);
78412 @@ -758,7 +758,7 @@ control:
78413 if (!cp)
78414 return;
78415 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
78416 - pkts = atomic_add_return(1, &cp->in_pkts);
78417 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
78418 else
78419 pkts = sysctl_sync_threshold(ipvs);
78420 goto sloop;
78421 @@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
78422
78423 if (opt)
78424 memcpy(&cp->in_seq, opt, sizeof(*opt));
78425 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
78426 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
78427 cp->state = state;
78428 cp->old_state = cp->state;
78429 /*
78430 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
78431 index 7fd66de..e6fb361 100644
78432 --- a/net/netfilter/ipvs/ip_vs_xmit.c
78433 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
78434 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
78435 else
78436 rc = NF_ACCEPT;
78437 /* do not touch skb anymore */
78438 - atomic_inc(&cp->in_pkts);
78439 + atomic_inc_unchecked(&cp->in_pkts);
78440 goto out;
78441 }
78442
78443 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
78444 else
78445 rc = NF_ACCEPT;
78446 /* do not touch skb anymore */
78447 - atomic_inc(&cp->in_pkts);
78448 + atomic_inc_unchecked(&cp->in_pkts);
78449 goto out;
78450 }
78451
78452 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
78453 index ac3af97..c134c21 100644
78454 --- a/net/netfilter/nf_conntrack_core.c
78455 +++ b/net/netfilter/nf_conntrack_core.c
78456 @@ -1530,6 +1530,10 @@ err_proto:
78457 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
78458 #define DYING_NULLS_VAL ((1<<30)+1)
78459
78460 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78461 +static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
78462 +#endif
78463 +
78464 static int nf_conntrack_init_net(struct net *net)
78465 {
78466 int ret;
78467 @@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
78468 goto err_stat;
78469 }
78470
78471 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78472 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
78473 +#else
78474 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
78475 +#endif
78476 if (!net->ct.slabname) {
78477 ret = -ENOMEM;
78478 goto err_slabname;
78479 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
78480 index 3c3cfc0..7a6ea1a 100644
78481 --- a/net/netfilter/nfnetlink_log.c
78482 +++ b/net/netfilter/nfnetlink_log.c
78483 @@ -70,7 +70,7 @@ struct nfulnl_instance {
78484 };
78485
78486 static DEFINE_SPINLOCK(instances_lock);
78487 -static atomic_t global_seq;
78488 +static atomic_unchecked_t global_seq;
78489
78490 #define INSTANCE_BUCKETS 16
78491 static struct hlist_head instance_table[INSTANCE_BUCKETS];
78492 @@ -517,7 +517,7 @@ __build_packet_message(struct nfulnl_instance *inst,
78493 /* global sequence number */
78494 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
78495 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
78496 - htonl(atomic_inc_return(&global_seq))))
78497 + htonl(atomic_inc_return_unchecked(&global_seq))))
78498 goto nla_put_failure;
78499
78500 if (data_len) {
78501 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
78502 new file mode 100644
78503 index 0000000..6905327
78504 --- /dev/null
78505 +++ b/net/netfilter/xt_gradm.c
78506 @@ -0,0 +1,51 @@
78507 +/*
78508 + * gradm match for netfilter
78509 + * Copyright © Zbigniew Krzystolik, 2010
78510 + *
78511 + * This program is free software; you can redistribute it and/or modify
78512 + * it under the terms of the GNU General Public License; either version
78513 + * 2 or 3 as published by the Free Software Foundation.
78514 + */
78515 +#include <linux/module.h>
78516 +#include <linux/moduleparam.h>
78517 +#include <linux/skbuff.h>
78518 +#include <linux/netfilter/x_tables.h>
78519 +#include <linux/grsecurity.h>
78520 +#include <linux/netfilter/xt_gradm.h>
78521 +
78522 +static bool
78523 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
78524 +{
78525 + const struct xt_gradm_mtinfo *info = par->matchinfo;
78526 + bool retval = false;
78527 + if (gr_acl_is_enabled())
78528 + retval = true;
78529 + return retval ^ info->invflags;
78530 +}
78531 +
78532 +static struct xt_match gradm_mt_reg __read_mostly = {
78533 + .name = "gradm",
78534 + .revision = 0,
78535 + .family = NFPROTO_UNSPEC,
78536 + .match = gradm_mt,
78537 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
78538 + .me = THIS_MODULE,
78539 +};
78540 +
78541 +static int __init gradm_mt_init(void)
78542 +{
78543 + return xt_register_match(&gradm_mt_reg);
78544 +}
78545 +
78546 +static void __exit gradm_mt_exit(void)
78547 +{
78548 + xt_unregister_match(&gradm_mt_reg);
78549 +}
78550 +
78551 +module_init(gradm_mt_init);
78552 +module_exit(gradm_mt_exit);
78553 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
78554 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
78555 +MODULE_LICENSE("GPL");
78556 +MODULE_ALIAS("ipt_gradm");
78557 +MODULE_ALIAS("ip6t_gradm");
78558 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
78559 index 4fe4fb4..87a89e5 100644
78560 --- a/net/netfilter/xt_statistic.c
78561 +++ b/net/netfilter/xt_statistic.c
78562 @@ -19,7 +19,7 @@
78563 #include <linux/module.h>
78564
78565 struct xt_statistic_priv {
78566 - atomic_t count;
78567 + atomic_unchecked_t count;
78568 } ____cacheline_aligned_in_smp;
78569
78570 MODULE_LICENSE("GPL");
78571 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
78572 break;
78573 case XT_STATISTIC_MODE_NTH:
78574 do {
78575 - oval = atomic_read(&info->master->count);
78576 + oval = atomic_read_unchecked(&info->master->count);
78577 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
78578 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
78579 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
78580 if (nval == 0)
78581 ret = !ret;
78582 break;
78583 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
78584 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
78585 if (info->master == NULL)
78586 return -ENOMEM;
78587 - atomic_set(&info->master->count, info->u.nth.count);
78588 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
78589
78590 return 0;
78591 }
78592 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
78593 index 20b32fd..e717db9 100644
78594 --- a/net/netlink/af_netlink.c
78595 +++ b/net/netlink/af_netlink.c
78596 @@ -753,7 +753,7 @@ static void netlink_overrun(struct sock *sk)
78597 sk->sk_error_report(sk);
78598 }
78599 }
78600 - atomic_inc(&sk->sk_drops);
78601 + atomic_inc_unchecked(&sk->sk_drops);
78602 }
78603
78604 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
78605 @@ -2023,7 +2023,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
78606 sk_wmem_alloc_get(s),
78607 nlk->cb,
78608 atomic_read(&s->sk_refcnt),
78609 - atomic_read(&s->sk_drops),
78610 + atomic_read_unchecked(&s->sk_drops),
78611 sock_i_ino(s)
78612 );
78613
78614 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
78615 index 06592d8..64860f6 100644
78616 --- a/net/netrom/af_netrom.c
78617 +++ b/net/netrom/af_netrom.c
78618 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
78619 struct sock *sk = sock->sk;
78620 struct nr_sock *nr = nr_sk(sk);
78621
78622 + memset(sax, 0, sizeof(*sax));
78623 lock_sock(sk);
78624 if (peer != 0) {
78625 if (sk->sk_state != TCP_ESTABLISHED) {
78626 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
78627 *uaddr_len = sizeof(struct full_sockaddr_ax25);
78628 } else {
78629 sax->fsa_ax25.sax25_family = AF_NETROM;
78630 - sax->fsa_ax25.sax25_ndigis = 0;
78631 sax->fsa_ax25.sax25_call = nr->source_addr;
78632 *uaddr_len = sizeof(struct sockaddr_ax25);
78633 }
78634 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
78635 index 901cffd..7dfd21b 100644
78636 --- a/net/packet/af_packet.c
78637 +++ b/net/packet/af_packet.c
78638 @@ -1696,7 +1696,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
78639
78640 spin_lock(&sk->sk_receive_queue.lock);
78641 po->stats.tp_packets++;
78642 - skb->dropcount = atomic_read(&sk->sk_drops);
78643 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
78644 __skb_queue_tail(&sk->sk_receive_queue, skb);
78645 spin_unlock(&sk->sk_receive_queue.lock);
78646 sk->sk_data_ready(sk, skb->len);
78647 @@ -1705,7 +1705,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
78648 drop_n_acct:
78649 spin_lock(&sk->sk_receive_queue.lock);
78650 po->stats.tp_drops++;
78651 - atomic_inc(&sk->sk_drops);
78652 + atomic_inc_unchecked(&sk->sk_drops);
78653 spin_unlock(&sk->sk_receive_queue.lock);
78654
78655 drop_n_restore:
78656 @@ -2649,6 +2649,7 @@ out:
78657
78658 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
78659 {
78660 + struct sock_extended_err ee;
78661 struct sock_exterr_skb *serr;
78662 struct sk_buff *skb, *skb2;
78663 int copied, err;
78664 @@ -2670,8 +2671,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
78665 sock_recv_timestamp(msg, sk, skb);
78666
78667 serr = SKB_EXT_ERR(skb);
78668 + ee = serr->ee;
78669 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
78670 - sizeof(serr->ee), &serr->ee);
78671 + sizeof ee, &ee);
78672
78673 msg->msg_flags |= MSG_ERRQUEUE;
78674 err = copied;
78675 @@ -3283,7 +3285,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
78676 case PACKET_HDRLEN:
78677 if (len > sizeof(int))
78678 len = sizeof(int);
78679 - if (copy_from_user(&val, optval, len))
78680 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
78681 return -EFAULT;
78682 switch (val) {
78683 case TPACKET_V1:
78684 @@ -3322,7 +3324,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
78685 len = lv;
78686 if (put_user(len, optlen))
78687 return -EFAULT;
78688 - if (copy_to_user(optval, data, len))
78689 + if (len > sizeof(st) || copy_to_user(optval, data, len))
78690 return -EFAULT;
78691 return 0;
78692 }
78693 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
78694 index 5a940db..f0b9c12 100644
78695 --- a/net/phonet/af_phonet.c
78696 +++ b/net/phonet/af_phonet.c
78697 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
78698 {
78699 struct phonet_protocol *pp;
78700
78701 - if (protocol >= PHONET_NPROTO)
78702 + if (protocol < 0 || protocol >= PHONET_NPROTO)
78703 return NULL;
78704
78705 rcu_read_lock();
78706 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
78707 {
78708 int err = 0;
78709
78710 - if (protocol >= PHONET_NPROTO)
78711 + if (protocol < 0 || protocol >= PHONET_NPROTO)
78712 return -EINVAL;
78713
78714 err = proto_register(pp->prot, 1);
78715 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
78716 index 576f22c..bc7a71b 100644
78717 --- a/net/phonet/pep.c
78718 +++ b/net/phonet/pep.c
78719 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
78720
78721 case PNS_PEP_CTRL_REQ:
78722 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
78723 - atomic_inc(&sk->sk_drops);
78724 + atomic_inc_unchecked(&sk->sk_drops);
78725 break;
78726 }
78727 __skb_pull(skb, 4);
78728 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
78729 }
78730
78731 if (pn->rx_credits == 0) {
78732 - atomic_inc(&sk->sk_drops);
78733 + atomic_inc_unchecked(&sk->sk_drops);
78734 err = -ENOBUFS;
78735 break;
78736 }
78737 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
78738 }
78739
78740 if (pn->rx_credits == 0) {
78741 - atomic_inc(&sk->sk_drops);
78742 + atomic_inc_unchecked(&sk->sk_drops);
78743 err = NET_RX_DROP;
78744 break;
78745 }
78746 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
78747 index 0acc943..c727611 100644
78748 --- a/net/phonet/socket.c
78749 +++ b/net/phonet/socket.c
78750 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
78751 pn->resource, sk->sk_state,
78752 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
78753 sock_i_uid(sk), sock_i_ino(sk),
78754 - atomic_read(&sk->sk_refcnt), sk,
78755 - atomic_read(&sk->sk_drops), &len);
78756 + atomic_read(&sk->sk_refcnt),
78757 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78758 + NULL,
78759 +#else
78760 + sk,
78761 +#endif
78762 + atomic_read_unchecked(&sk->sk_drops), &len);
78763 }
78764 seq_printf(seq, "%*s\n", 127 - len, "");
78765 return 0;
78766 diff --git a/net/rds/cong.c b/net/rds/cong.c
78767 index e5b65ac..f3b6fb7 100644
78768 --- a/net/rds/cong.c
78769 +++ b/net/rds/cong.c
78770 @@ -78,7 +78,7 @@
78771 * finds that the saved generation number is smaller than the global generation
78772 * number, it wakes up the process.
78773 */
78774 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
78775 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
78776
78777 /*
78778 * Congestion monitoring
78779 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
78780 rdsdebug("waking map %p for %pI4\n",
78781 map, &map->m_addr);
78782 rds_stats_inc(s_cong_update_received);
78783 - atomic_inc(&rds_cong_generation);
78784 + atomic_inc_unchecked(&rds_cong_generation);
78785 if (waitqueue_active(&map->m_waitq))
78786 wake_up(&map->m_waitq);
78787 if (waitqueue_active(&rds_poll_waitq))
78788 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
78789
78790 int rds_cong_updated_since(unsigned long *recent)
78791 {
78792 - unsigned long gen = atomic_read(&rds_cong_generation);
78793 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
78794
78795 if (likely(*recent == gen))
78796 return 0;
78797 diff --git a/net/rds/ib.h b/net/rds/ib.h
78798 index 8d2b3d5..227ec5b 100644
78799 --- a/net/rds/ib.h
78800 +++ b/net/rds/ib.h
78801 @@ -128,7 +128,7 @@ struct rds_ib_connection {
78802 /* sending acks */
78803 unsigned long i_ack_flags;
78804 #ifdef KERNEL_HAS_ATOMIC64
78805 - atomic64_t i_ack_next; /* next ACK to send */
78806 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
78807 #else
78808 spinlock_t i_ack_lock; /* protect i_ack_next */
78809 u64 i_ack_next; /* next ACK to send */
78810 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
78811 index a1e1162..265e129 100644
78812 --- a/net/rds/ib_cm.c
78813 +++ b/net/rds/ib_cm.c
78814 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
78815 /* Clear the ACK state */
78816 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
78817 #ifdef KERNEL_HAS_ATOMIC64
78818 - atomic64_set(&ic->i_ack_next, 0);
78819 + atomic64_set_unchecked(&ic->i_ack_next, 0);
78820 #else
78821 ic->i_ack_next = 0;
78822 #endif
78823 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
78824 index 8d19491..05a3e65 100644
78825 --- a/net/rds/ib_recv.c
78826 +++ b/net/rds/ib_recv.c
78827 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
78828 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
78829 int ack_required)
78830 {
78831 - atomic64_set(&ic->i_ack_next, seq);
78832 + atomic64_set_unchecked(&ic->i_ack_next, seq);
78833 if (ack_required) {
78834 smp_mb__before_clear_bit();
78835 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
78836 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
78837 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
78838 smp_mb__after_clear_bit();
78839
78840 - return atomic64_read(&ic->i_ack_next);
78841 + return atomic64_read_unchecked(&ic->i_ack_next);
78842 }
78843 #endif
78844
78845 diff --git a/net/rds/iw.h b/net/rds/iw.h
78846 index 04ce3b1..48119a6 100644
78847 --- a/net/rds/iw.h
78848 +++ b/net/rds/iw.h
78849 @@ -134,7 +134,7 @@ struct rds_iw_connection {
78850 /* sending acks */
78851 unsigned long i_ack_flags;
78852 #ifdef KERNEL_HAS_ATOMIC64
78853 - atomic64_t i_ack_next; /* next ACK to send */
78854 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
78855 #else
78856 spinlock_t i_ack_lock; /* protect i_ack_next */
78857 u64 i_ack_next; /* next ACK to send */
78858 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
78859 index a91e1db..cf3053f 100644
78860 --- a/net/rds/iw_cm.c
78861 +++ b/net/rds/iw_cm.c
78862 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
78863 /* Clear the ACK state */
78864 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
78865 #ifdef KERNEL_HAS_ATOMIC64
78866 - atomic64_set(&ic->i_ack_next, 0);
78867 + atomic64_set_unchecked(&ic->i_ack_next, 0);
78868 #else
78869 ic->i_ack_next = 0;
78870 #endif
78871 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
78872 index 4503335..db566b4 100644
78873 --- a/net/rds/iw_recv.c
78874 +++ b/net/rds/iw_recv.c
78875 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
78876 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
78877 int ack_required)
78878 {
78879 - atomic64_set(&ic->i_ack_next, seq);
78880 + atomic64_set_unchecked(&ic->i_ack_next, seq);
78881 if (ack_required) {
78882 smp_mb__before_clear_bit();
78883 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
78884 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
78885 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
78886 smp_mb__after_clear_bit();
78887
78888 - return atomic64_read(&ic->i_ack_next);
78889 + return atomic64_read_unchecked(&ic->i_ack_next);
78890 }
78891 #endif
78892
78893 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
78894 index edac9ef..16bcb98 100644
78895 --- a/net/rds/tcp.c
78896 +++ b/net/rds/tcp.c
78897 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
78898 int val = 1;
78899
78900 set_fs(KERNEL_DS);
78901 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
78902 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
78903 sizeof(val));
78904 set_fs(oldfs);
78905 }
78906 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
78907 index 1b4fd68..2234175 100644
78908 --- a/net/rds/tcp_send.c
78909 +++ b/net/rds/tcp_send.c
78910 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
78911
78912 oldfs = get_fs();
78913 set_fs(KERNEL_DS);
78914 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
78915 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
78916 sizeof(val));
78917 set_fs(oldfs);
78918 }
78919 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
78920 index 05996d0..5a1dfe0 100644
78921 --- a/net/rxrpc/af_rxrpc.c
78922 +++ b/net/rxrpc/af_rxrpc.c
78923 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
78924 __be32 rxrpc_epoch;
78925
78926 /* current debugging ID */
78927 -atomic_t rxrpc_debug_id;
78928 +atomic_unchecked_t rxrpc_debug_id;
78929
78930 /* count of skbs currently in use */
78931 atomic_t rxrpc_n_skbs;
78932 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
78933 index e4d9cbc..b229649 100644
78934 --- a/net/rxrpc/ar-ack.c
78935 +++ b/net/rxrpc/ar-ack.c
78936 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
78937
78938 _enter("{%d,%d,%d,%d},",
78939 call->acks_hard, call->acks_unacked,
78940 - atomic_read(&call->sequence),
78941 + atomic_read_unchecked(&call->sequence),
78942 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
78943
78944 stop = 0;
78945 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
78946
78947 /* each Tx packet has a new serial number */
78948 sp->hdr.serial =
78949 - htonl(atomic_inc_return(&call->conn->serial));
78950 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
78951
78952 hdr = (struct rxrpc_header *) txb->head;
78953 hdr->serial = sp->hdr.serial;
78954 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
78955 */
78956 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
78957 {
78958 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
78959 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
78960 }
78961
78962 /*
78963 @@ -629,7 +629,7 @@ process_further:
78964
78965 latest = ntohl(sp->hdr.serial);
78966 hard = ntohl(ack.firstPacket);
78967 - tx = atomic_read(&call->sequence);
78968 + tx = atomic_read_unchecked(&call->sequence);
78969
78970 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
78971 latest,
78972 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
78973 goto maybe_reschedule;
78974
78975 send_ACK_with_skew:
78976 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
78977 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
78978 ntohl(ack.serial));
78979 send_ACK:
78980 mtu = call->conn->trans->peer->if_mtu;
78981 @@ -1173,7 +1173,7 @@ send_ACK:
78982 ackinfo.rxMTU = htonl(5692);
78983 ackinfo.jumbo_max = htonl(4);
78984
78985 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
78986 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
78987 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
78988 ntohl(hdr.serial),
78989 ntohs(ack.maxSkew),
78990 @@ -1191,7 +1191,7 @@ send_ACK:
78991 send_message:
78992 _debug("send message");
78993
78994 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
78995 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
78996 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
78997 send_message_2:
78998
78999 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
79000 index a3bbb36..3341fb9 100644
79001 --- a/net/rxrpc/ar-call.c
79002 +++ b/net/rxrpc/ar-call.c
79003 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
79004 spin_lock_init(&call->lock);
79005 rwlock_init(&call->state_lock);
79006 atomic_set(&call->usage, 1);
79007 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
79008 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79009 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
79010
79011 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
79012 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
79013 index 4106ca9..a338d7a 100644
79014 --- a/net/rxrpc/ar-connection.c
79015 +++ b/net/rxrpc/ar-connection.c
79016 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
79017 rwlock_init(&conn->lock);
79018 spin_lock_init(&conn->state_lock);
79019 atomic_set(&conn->usage, 1);
79020 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
79021 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79022 conn->avail_calls = RXRPC_MAXCALLS;
79023 conn->size_align = 4;
79024 conn->header_size = sizeof(struct rxrpc_header);
79025 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
79026 index e7ed43a..6afa140 100644
79027 --- a/net/rxrpc/ar-connevent.c
79028 +++ b/net/rxrpc/ar-connevent.c
79029 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
79030
79031 len = iov[0].iov_len + iov[1].iov_len;
79032
79033 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
79034 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
79035 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
79036
79037 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
79038 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
79039 index 529572f..c758ca7 100644
79040 --- a/net/rxrpc/ar-input.c
79041 +++ b/net/rxrpc/ar-input.c
79042 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
79043 /* track the latest serial number on this connection for ACK packet
79044 * information */
79045 serial = ntohl(sp->hdr.serial);
79046 - hi_serial = atomic_read(&call->conn->hi_serial);
79047 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
79048 while (serial > hi_serial)
79049 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
79050 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
79051 serial);
79052
79053 /* request ACK generation for any ACK or DATA packet that requests
79054 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
79055 index a693aca..81e7293 100644
79056 --- a/net/rxrpc/ar-internal.h
79057 +++ b/net/rxrpc/ar-internal.h
79058 @@ -272,8 +272,8 @@ struct rxrpc_connection {
79059 int error; /* error code for local abort */
79060 int debug_id; /* debug ID for printks */
79061 unsigned int call_counter; /* call ID counter */
79062 - atomic_t serial; /* packet serial number counter */
79063 - atomic_t hi_serial; /* highest serial number received */
79064 + atomic_unchecked_t serial; /* packet serial number counter */
79065 + atomic_unchecked_t hi_serial; /* highest serial number received */
79066 u8 avail_calls; /* number of calls available */
79067 u8 size_align; /* data size alignment (for security) */
79068 u8 header_size; /* rxrpc + security header size */
79069 @@ -346,7 +346,7 @@ struct rxrpc_call {
79070 spinlock_t lock;
79071 rwlock_t state_lock; /* lock for state transition */
79072 atomic_t usage;
79073 - atomic_t sequence; /* Tx data packet sequence counter */
79074 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
79075 u32 abort_code; /* local/remote abort code */
79076 enum { /* current state of call */
79077 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
79078 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
79079 */
79080 extern atomic_t rxrpc_n_skbs;
79081 extern __be32 rxrpc_epoch;
79082 -extern atomic_t rxrpc_debug_id;
79083 +extern atomic_unchecked_t rxrpc_debug_id;
79084 extern struct workqueue_struct *rxrpc_workqueue;
79085
79086 /*
79087 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
79088 index 87f7135..74d3703 100644
79089 --- a/net/rxrpc/ar-local.c
79090 +++ b/net/rxrpc/ar-local.c
79091 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
79092 spin_lock_init(&local->lock);
79093 rwlock_init(&local->services_lock);
79094 atomic_set(&local->usage, 1);
79095 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
79096 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79097 memcpy(&local->srx, srx, sizeof(*srx));
79098 }
79099
79100 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
79101 index 16ae887..d24f12b 100644
79102 --- a/net/rxrpc/ar-output.c
79103 +++ b/net/rxrpc/ar-output.c
79104 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
79105 sp->hdr.cid = call->cid;
79106 sp->hdr.callNumber = call->call_id;
79107 sp->hdr.seq =
79108 - htonl(atomic_inc_return(&call->sequence));
79109 + htonl(atomic_inc_return_unchecked(&call->sequence));
79110 sp->hdr.serial =
79111 - htonl(atomic_inc_return(&conn->serial));
79112 + htonl(atomic_inc_return_unchecked(&conn->serial));
79113 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
79114 sp->hdr.userStatus = 0;
79115 sp->hdr.securityIndex = conn->security_ix;
79116 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
79117 index bebaa43..2644591 100644
79118 --- a/net/rxrpc/ar-peer.c
79119 +++ b/net/rxrpc/ar-peer.c
79120 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
79121 INIT_LIST_HEAD(&peer->error_targets);
79122 spin_lock_init(&peer->lock);
79123 atomic_set(&peer->usage, 1);
79124 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
79125 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79126 memcpy(&peer->srx, srx, sizeof(*srx));
79127
79128 rxrpc_assess_MTU_size(peer);
79129 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
79130 index 38047f7..9f48511 100644
79131 --- a/net/rxrpc/ar-proc.c
79132 +++ b/net/rxrpc/ar-proc.c
79133 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
79134 atomic_read(&conn->usage),
79135 rxrpc_conn_states[conn->state],
79136 key_serial(conn->key),
79137 - atomic_read(&conn->serial),
79138 - atomic_read(&conn->hi_serial));
79139 + atomic_read_unchecked(&conn->serial),
79140 + atomic_read_unchecked(&conn->hi_serial));
79141
79142 return 0;
79143 }
79144 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
79145 index 92df566..87ec1bf 100644
79146 --- a/net/rxrpc/ar-transport.c
79147 +++ b/net/rxrpc/ar-transport.c
79148 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
79149 spin_lock_init(&trans->client_lock);
79150 rwlock_init(&trans->conn_lock);
79151 atomic_set(&trans->usage, 1);
79152 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
79153 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
79154
79155 if (peer->srx.transport.family == AF_INET) {
79156 switch (peer->srx.transport_type) {
79157 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
79158 index f226709..0e735a8 100644
79159 --- a/net/rxrpc/rxkad.c
79160 +++ b/net/rxrpc/rxkad.c
79161 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
79162
79163 len = iov[0].iov_len + iov[1].iov_len;
79164
79165 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
79166 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
79167 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
79168
79169 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
79170 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
79171
79172 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
79173
79174 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
79175 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
79176 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
79177
79178 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
79179 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
79180 index 1e2eee8..ce3967e 100644
79181 --- a/net/sctp/proc.c
79182 +++ b/net/sctp/proc.c
79183 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
79184 seq_printf(seq,
79185 "%8pK %8pK %-3d %-3d %-2d %-4d "
79186 "%4d %8d %8d %7d %5lu %-5d %5d ",
79187 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
79188 + assoc, sk,
79189 + sctp_sk(sk)->type, sk->sk_state,
79190 assoc->state, hash,
79191 assoc->assoc_id,
79192 assoc->sndbuf_used,
79193 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
79194 index 31c7bfc..af7bfdc 100644
79195 --- a/net/sctp/socket.c
79196 +++ b/net/sctp/socket.c
79197 @@ -4577,6 +4577,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
79198 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
79199 if (space_left < addrlen)
79200 return -ENOMEM;
79201 + if (addrlen > sizeof(temp) || addrlen < 0)
79202 + return -EFAULT;
79203 if (copy_to_user(to, &temp, addrlen))
79204 return -EFAULT;
79205 to += addrlen;
79206 diff --git a/net/socket.c b/net/socket.c
79207 index bce45cc..5af9802 100644
79208 --- a/net/socket.c
79209 +++ b/net/socket.c
79210 @@ -88,6 +88,7 @@
79211 #include <linux/nsproxy.h>
79212 #include <linux/magic.h>
79213 #include <linux/slab.h>
79214 +#include <linux/in.h>
79215
79216 #include <asm/uaccess.h>
79217 #include <asm/unistd.h>
79218 @@ -105,6 +106,8 @@
79219 #include <linux/sockios.h>
79220 #include <linux/atalk.h>
79221
79222 +#include <linux/grsock.h>
79223 +
79224 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
79225 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
79226 unsigned long nr_segs, loff_t pos);
79227 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
79228 &sockfs_dentry_operations, SOCKFS_MAGIC);
79229 }
79230
79231 -static struct vfsmount *sock_mnt __read_mostly;
79232 +struct vfsmount *sock_mnt __read_mostly;
79233
79234 static struct file_system_type sock_fs_type = {
79235 .name = "sockfs",
79236 @@ -1210,6 +1213,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
79237 return -EAFNOSUPPORT;
79238 if (type < 0 || type >= SOCK_MAX)
79239 return -EINVAL;
79240 + if (protocol < 0)
79241 + return -EINVAL;
79242
79243 /* Compatibility.
79244
79245 @@ -1341,6 +1346,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
79246 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
79247 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
79248
79249 + if(!gr_search_socket(family, type, protocol)) {
79250 + retval = -EACCES;
79251 + goto out;
79252 + }
79253 +
79254 + if (gr_handle_sock_all(family, type, protocol)) {
79255 + retval = -EACCES;
79256 + goto out;
79257 + }
79258 +
79259 retval = sock_create(family, type, protocol, &sock);
79260 if (retval < 0)
79261 goto out;
79262 @@ -1453,6 +1468,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
79263 if (sock) {
79264 err = move_addr_to_kernel(umyaddr, addrlen, &address);
79265 if (err >= 0) {
79266 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
79267 + err = -EACCES;
79268 + goto error;
79269 + }
79270 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
79271 + if (err)
79272 + goto error;
79273 +
79274 err = security_socket_bind(sock,
79275 (struct sockaddr *)&address,
79276 addrlen);
79277 @@ -1461,6 +1484,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
79278 (struct sockaddr *)
79279 &address, addrlen);
79280 }
79281 +error:
79282 fput_light(sock->file, fput_needed);
79283 }
79284 return err;
79285 @@ -1484,10 +1508,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
79286 if ((unsigned int)backlog > somaxconn)
79287 backlog = somaxconn;
79288
79289 + if (gr_handle_sock_server_other(sock->sk)) {
79290 + err = -EPERM;
79291 + goto error;
79292 + }
79293 +
79294 + err = gr_search_listen(sock);
79295 + if (err)
79296 + goto error;
79297 +
79298 err = security_socket_listen(sock, backlog);
79299 if (!err)
79300 err = sock->ops->listen(sock, backlog);
79301
79302 +error:
79303 fput_light(sock->file, fput_needed);
79304 }
79305 return err;
79306 @@ -1531,6 +1565,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
79307 newsock->type = sock->type;
79308 newsock->ops = sock->ops;
79309
79310 + if (gr_handle_sock_server_other(sock->sk)) {
79311 + err = -EPERM;
79312 + sock_release(newsock);
79313 + goto out_put;
79314 + }
79315 +
79316 + err = gr_search_accept(sock);
79317 + if (err) {
79318 + sock_release(newsock);
79319 + goto out_put;
79320 + }
79321 +
79322 /*
79323 * We don't need try_module_get here, as the listening socket (sock)
79324 * has the protocol module (sock->ops->owner) held.
79325 @@ -1569,6 +1615,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
79326 fd_install(newfd, newfile);
79327 err = newfd;
79328
79329 + gr_attach_curr_ip(newsock->sk);
79330 +
79331 out_put:
79332 fput_light(sock->file, fput_needed);
79333 out:
79334 @@ -1601,6 +1649,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
79335 int, addrlen)
79336 {
79337 struct socket *sock;
79338 + struct sockaddr *sck;
79339 struct sockaddr_storage address;
79340 int err, fput_needed;
79341
79342 @@ -1611,6 +1660,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
79343 if (err < 0)
79344 goto out_put;
79345
79346 + sck = (struct sockaddr *)&address;
79347 +
79348 + if (gr_handle_sock_client(sck)) {
79349 + err = -EACCES;
79350 + goto out_put;
79351 + }
79352 +
79353 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
79354 + if (err)
79355 + goto out_put;
79356 +
79357 err =
79358 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
79359 if (err)
79360 @@ -1965,7 +2025,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
79361 * checking falls down on this.
79362 */
79363 if (copy_from_user(ctl_buf,
79364 - (void __user __force *)msg_sys->msg_control,
79365 + (void __force_user *)msg_sys->msg_control,
79366 ctl_len))
79367 goto out_freectl;
79368 msg_sys->msg_control = ctl_buf;
79369 @@ -2133,7 +2193,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
79370 * kernel msghdr to use the kernel address space)
79371 */
79372
79373 - uaddr = (__force void __user *)msg_sys->msg_name;
79374 + uaddr = (void __force_user *)msg_sys->msg_name;
79375 uaddr_len = COMPAT_NAMELEN(msg);
79376 if (MSG_CMSG_COMPAT & flags) {
79377 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
79378 @@ -2762,7 +2822,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
79379 }
79380
79381 ifr = compat_alloc_user_space(buf_size);
79382 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
79383 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
79384
79385 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
79386 return -EFAULT;
79387 @@ -2786,12 +2846,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
79388 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
79389
79390 if (copy_in_user(rxnfc, compat_rxnfc,
79391 - (void *)(&rxnfc->fs.m_ext + 1) -
79392 - (void *)rxnfc) ||
79393 + (void __user *)(&rxnfc->fs.m_ext + 1) -
79394 + (void __user *)rxnfc) ||
79395 copy_in_user(&rxnfc->fs.ring_cookie,
79396 &compat_rxnfc->fs.ring_cookie,
79397 - (void *)(&rxnfc->fs.location + 1) -
79398 - (void *)&rxnfc->fs.ring_cookie) ||
79399 + (void __user *)(&rxnfc->fs.location + 1) -
79400 + (void __user *)&rxnfc->fs.ring_cookie) ||
79401 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
79402 sizeof(rxnfc->rule_cnt)))
79403 return -EFAULT;
79404 @@ -2803,12 +2863,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
79405
79406 if (convert_out) {
79407 if (copy_in_user(compat_rxnfc, rxnfc,
79408 - (const void *)(&rxnfc->fs.m_ext + 1) -
79409 - (const void *)rxnfc) ||
79410 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
79411 + (const void __user *)rxnfc) ||
79412 copy_in_user(&compat_rxnfc->fs.ring_cookie,
79413 &rxnfc->fs.ring_cookie,
79414 - (const void *)(&rxnfc->fs.location + 1) -
79415 - (const void *)&rxnfc->fs.ring_cookie) ||
79416 + (const void __user *)(&rxnfc->fs.location + 1) -
79417 + (const void __user *)&rxnfc->fs.ring_cookie) ||
79418 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
79419 sizeof(rxnfc->rule_cnt)))
79420 return -EFAULT;
79421 @@ -2878,7 +2938,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
79422 old_fs = get_fs();
79423 set_fs(KERNEL_DS);
79424 err = dev_ioctl(net, cmd,
79425 - (struct ifreq __user __force *) &kifr);
79426 + (struct ifreq __force_user *) &kifr);
79427 set_fs(old_fs);
79428
79429 return err;
79430 @@ -2987,7 +3047,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
79431
79432 old_fs = get_fs();
79433 set_fs(KERNEL_DS);
79434 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
79435 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
79436 set_fs(old_fs);
79437
79438 if (cmd == SIOCGIFMAP && !err) {
79439 @@ -3092,7 +3152,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
79440 ret |= __get_user(rtdev, &(ur4->rt_dev));
79441 if (rtdev) {
79442 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
79443 - r4.rt_dev = (char __user __force *)devname;
79444 + r4.rt_dev = (char __force_user *)devname;
79445 devname[15] = 0;
79446 } else
79447 r4.rt_dev = NULL;
79448 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
79449 int __user *uoptlen;
79450 int err;
79451
79452 - uoptval = (char __user __force *) optval;
79453 - uoptlen = (int __user __force *) optlen;
79454 + uoptval = (char __force_user *) optval;
79455 + uoptlen = (int __force_user *) optlen;
79456
79457 set_fs(KERNEL_DS);
79458 if (level == SOL_SOCKET)
79459 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
79460 char __user *uoptval;
79461 int err;
79462
79463 - uoptval = (char __user __force *) optval;
79464 + uoptval = (char __force_user *) optval;
79465
79466 set_fs(KERNEL_DS);
79467 if (level == SOL_SOCKET)
79468 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
79469 index eda32ae..1c9fa7c 100644
79470 --- a/net/sunrpc/sched.c
79471 +++ b/net/sunrpc/sched.c
79472 @@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
79473 #ifdef RPC_DEBUG
79474 static void rpc_task_set_debuginfo(struct rpc_task *task)
79475 {
79476 - static atomic_t rpc_pid;
79477 + static atomic_unchecked_t rpc_pid;
79478
79479 - task->tk_pid = atomic_inc_return(&rpc_pid);
79480 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
79481 }
79482 #else
79483 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
79484 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
79485 index 8343737..677025e 100644
79486 --- a/net/sunrpc/xprtrdma/svc_rdma.c
79487 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
79488 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
79489 static unsigned int min_max_inline = 4096;
79490 static unsigned int max_max_inline = 65536;
79491
79492 -atomic_t rdma_stat_recv;
79493 -atomic_t rdma_stat_read;
79494 -atomic_t rdma_stat_write;
79495 -atomic_t rdma_stat_sq_starve;
79496 -atomic_t rdma_stat_rq_starve;
79497 -atomic_t rdma_stat_rq_poll;
79498 -atomic_t rdma_stat_rq_prod;
79499 -atomic_t rdma_stat_sq_poll;
79500 -atomic_t rdma_stat_sq_prod;
79501 +atomic_unchecked_t rdma_stat_recv;
79502 +atomic_unchecked_t rdma_stat_read;
79503 +atomic_unchecked_t rdma_stat_write;
79504 +atomic_unchecked_t rdma_stat_sq_starve;
79505 +atomic_unchecked_t rdma_stat_rq_starve;
79506 +atomic_unchecked_t rdma_stat_rq_poll;
79507 +atomic_unchecked_t rdma_stat_rq_prod;
79508 +atomic_unchecked_t rdma_stat_sq_poll;
79509 +atomic_unchecked_t rdma_stat_sq_prod;
79510
79511 /* Temporary NFS request map and context caches */
79512 struct kmem_cache *svc_rdma_map_cachep;
79513 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
79514 len -= *ppos;
79515 if (len > *lenp)
79516 len = *lenp;
79517 - if (len && copy_to_user(buffer, str_buf, len))
79518 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
79519 return -EFAULT;
79520 *lenp = len;
79521 *ppos += len;
79522 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
79523 {
79524 .procname = "rdma_stat_read",
79525 .data = &rdma_stat_read,
79526 - .maxlen = sizeof(atomic_t),
79527 + .maxlen = sizeof(atomic_unchecked_t),
79528 .mode = 0644,
79529 .proc_handler = read_reset_stat,
79530 },
79531 {
79532 .procname = "rdma_stat_recv",
79533 .data = &rdma_stat_recv,
79534 - .maxlen = sizeof(atomic_t),
79535 + .maxlen = sizeof(atomic_unchecked_t),
79536 .mode = 0644,
79537 .proc_handler = read_reset_stat,
79538 },
79539 {
79540 .procname = "rdma_stat_write",
79541 .data = &rdma_stat_write,
79542 - .maxlen = sizeof(atomic_t),
79543 + .maxlen = sizeof(atomic_unchecked_t),
79544 .mode = 0644,
79545 .proc_handler = read_reset_stat,
79546 },
79547 {
79548 .procname = "rdma_stat_sq_starve",
79549 .data = &rdma_stat_sq_starve,
79550 - .maxlen = sizeof(atomic_t),
79551 + .maxlen = sizeof(atomic_unchecked_t),
79552 .mode = 0644,
79553 .proc_handler = read_reset_stat,
79554 },
79555 {
79556 .procname = "rdma_stat_rq_starve",
79557 .data = &rdma_stat_rq_starve,
79558 - .maxlen = sizeof(atomic_t),
79559 + .maxlen = sizeof(atomic_unchecked_t),
79560 .mode = 0644,
79561 .proc_handler = read_reset_stat,
79562 },
79563 {
79564 .procname = "rdma_stat_rq_poll",
79565 .data = &rdma_stat_rq_poll,
79566 - .maxlen = sizeof(atomic_t),
79567 + .maxlen = sizeof(atomic_unchecked_t),
79568 .mode = 0644,
79569 .proc_handler = read_reset_stat,
79570 },
79571 {
79572 .procname = "rdma_stat_rq_prod",
79573 .data = &rdma_stat_rq_prod,
79574 - .maxlen = sizeof(atomic_t),
79575 + .maxlen = sizeof(atomic_unchecked_t),
79576 .mode = 0644,
79577 .proc_handler = read_reset_stat,
79578 },
79579 {
79580 .procname = "rdma_stat_sq_poll",
79581 .data = &rdma_stat_sq_poll,
79582 - .maxlen = sizeof(atomic_t),
79583 + .maxlen = sizeof(atomic_unchecked_t),
79584 .mode = 0644,
79585 .proc_handler = read_reset_stat,
79586 },
79587 {
79588 .procname = "rdma_stat_sq_prod",
79589 .data = &rdma_stat_sq_prod,
79590 - .maxlen = sizeof(atomic_t),
79591 + .maxlen = sizeof(atomic_unchecked_t),
79592 .mode = 0644,
79593 .proc_handler = read_reset_stat,
79594 },
79595 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
79596 index 41cb63b..c4a1489 100644
79597 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
79598 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
79599 @@ -501,7 +501,7 @@ next_sge:
79600 svc_rdma_put_context(ctxt, 0);
79601 goto out;
79602 }
79603 - atomic_inc(&rdma_stat_read);
79604 + atomic_inc_unchecked(&rdma_stat_read);
79605
79606 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
79607 chl_map->ch[ch_no].count -= read_wr.num_sge;
79608 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
79609 dto_q);
79610 list_del_init(&ctxt->dto_q);
79611 } else {
79612 - atomic_inc(&rdma_stat_rq_starve);
79613 + atomic_inc_unchecked(&rdma_stat_rq_starve);
79614 clear_bit(XPT_DATA, &xprt->xpt_flags);
79615 ctxt = NULL;
79616 }
79617 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
79618 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
79619 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
79620 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
79621 - atomic_inc(&rdma_stat_recv);
79622 + atomic_inc_unchecked(&rdma_stat_recv);
79623
79624 /* Build up the XDR from the receive buffers. */
79625 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
79626 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
79627 index 42eb7ba..c887c45 100644
79628 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
79629 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
79630 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
79631 write_wr.wr.rdma.remote_addr = to;
79632
79633 /* Post It */
79634 - atomic_inc(&rdma_stat_write);
79635 + atomic_inc_unchecked(&rdma_stat_write);
79636 if (svc_rdma_send(xprt, &write_wr))
79637 goto err;
79638 return 0;
79639 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
79640 index 73b428b..5f3f8f3 100644
79641 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
79642 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
79643 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
79644 return;
79645
79646 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
79647 - atomic_inc(&rdma_stat_rq_poll);
79648 + atomic_inc_unchecked(&rdma_stat_rq_poll);
79649
79650 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
79651 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
79652 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
79653 }
79654
79655 if (ctxt)
79656 - atomic_inc(&rdma_stat_rq_prod);
79657 + atomic_inc_unchecked(&rdma_stat_rq_prod);
79658
79659 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
79660 /*
79661 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
79662 return;
79663
79664 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
79665 - atomic_inc(&rdma_stat_sq_poll);
79666 + atomic_inc_unchecked(&rdma_stat_sq_poll);
79667 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
79668 if (wc.status != IB_WC_SUCCESS)
79669 /* Close the transport */
79670 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
79671 }
79672
79673 if (ctxt)
79674 - atomic_inc(&rdma_stat_sq_prod);
79675 + atomic_inc_unchecked(&rdma_stat_sq_prod);
79676 }
79677
79678 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
79679 @@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
79680 spin_lock_bh(&xprt->sc_lock);
79681 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
79682 spin_unlock_bh(&xprt->sc_lock);
79683 - atomic_inc(&rdma_stat_sq_starve);
79684 + atomic_inc_unchecked(&rdma_stat_sq_starve);
79685
79686 /* See if we can opportunistically reap SQ WR to make room */
79687 sq_cq_reap(xprt);
79688 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
79689 index e3a6e37..be2ea77 100644
79690 --- a/net/sysctl_net.c
79691 +++ b/net/sysctl_net.c
79692 @@ -43,7 +43,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
79693 struct ctl_table *table)
79694 {
79695 /* Allow network administrator to have same access as root. */
79696 - if (capable(CAP_NET_ADMIN)) {
79697 + if (capable_nolog(CAP_NET_ADMIN)) {
79698 int mode = (table->mode >> 6) & 7;
79699 return (mode << 6) | (mode << 3) | mode;
79700 }
79701 diff --git a/net/tipc/link.c b/net/tipc/link.c
79702 index 7a614f4..b14dbd2 100644
79703 --- a/net/tipc/link.c
79704 +++ b/net/tipc/link.c
79705 @@ -1164,7 +1164,7 @@ static int link_send_sections_long(struct tipc_port *sender,
79706 struct tipc_msg fragm_hdr;
79707 struct sk_buff *buf, *buf_chain, *prev;
79708 u32 fragm_crs, fragm_rest, hsz, sect_rest;
79709 - const unchar *sect_crs;
79710 + const unchar __user *sect_crs;
79711 int curr_sect;
79712 u32 fragm_no;
79713
79714 @@ -1205,7 +1205,7 @@ again:
79715
79716 if (!sect_rest) {
79717 sect_rest = msg_sect[++curr_sect].iov_len;
79718 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
79719 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
79720 }
79721
79722 if (sect_rest < fragm_rest)
79723 @@ -1224,7 +1224,7 @@ error:
79724 }
79725 } else
79726 skb_copy_to_linear_data_offset(buf, fragm_crs,
79727 - sect_crs, sz);
79728 + (const void __force_kernel *)sect_crs, sz);
79729 sect_crs += sz;
79730 sect_rest -= sz;
79731 fragm_crs += sz;
79732 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
79733 index deea0d2..fa13bd7 100644
79734 --- a/net/tipc/msg.c
79735 +++ b/net/tipc/msg.c
79736 @@ -98,7 +98,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
79737 msg_sect[cnt].iov_len);
79738 else
79739 skb_copy_to_linear_data_offset(*buf, pos,
79740 - msg_sect[cnt].iov_base,
79741 + (const void __force_kernel *)msg_sect[cnt].iov_base,
79742 msg_sect[cnt].iov_len);
79743 pos += msg_sect[cnt].iov_len;
79744 }
79745 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
79746 index f976e9cd..560d055 100644
79747 --- a/net/tipc/subscr.c
79748 +++ b/net/tipc/subscr.c
79749 @@ -96,7 +96,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
79750 {
79751 struct iovec msg_sect;
79752
79753 - msg_sect.iov_base = (void *)&sub->evt;
79754 + msg_sect.iov_base = (void __force_user *)&sub->evt;
79755 msg_sect.iov_len = sizeof(struct tipc_event);
79756
79757 sub->evt.event = htohl(event, sub->swap);
79758 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
79759 index 9aa708c..590bb48 100644
79760 --- a/net/unix/af_unix.c
79761 +++ b/net/unix/af_unix.c
79762 @@ -780,6 +780,12 @@ static struct sock *unix_find_other(struct net *net,
79763 err = -ECONNREFUSED;
79764 if (!S_ISSOCK(inode->i_mode))
79765 goto put_fail;
79766 +
79767 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
79768 + err = -EACCES;
79769 + goto put_fail;
79770 + }
79771 +
79772 u = unix_find_socket_byinode(inode);
79773 if (!u)
79774 goto put_fail;
79775 @@ -800,6 +806,13 @@ static struct sock *unix_find_other(struct net *net,
79776 if (u) {
79777 struct dentry *dentry;
79778 dentry = unix_sk(u)->path.dentry;
79779 +
79780 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
79781 + err = -EPERM;
79782 + sock_put(u);
79783 + goto fail;
79784 + }
79785 +
79786 if (dentry)
79787 touch_atime(&unix_sk(u)->path);
79788 } else
79789 @@ -882,11 +895,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
79790 err = security_path_mknod(&path, dentry, mode, 0);
79791 if (err)
79792 goto out_mknod_drop_write;
79793 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
79794 + err = -EACCES;
79795 + goto out_mknod_drop_write;
79796 + }
79797 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
79798 out_mknod_drop_write:
79799 mnt_drop_write(path.mnt);
79800 if (err)
79801 goto out_mknod_dput;
79802 +
79803 + gr_handle_create(dentry, path.mnt);
79804 +
79805 mutex_unlock(&path.dentry->d_inode->i_mutex);
79806 dput(path.dentry);
79807 path.dentry = dentry;
79808 diff --git a/net/wireless/core.h b/net/wireless/core.h
79809 index bc686ef..27845e6 100644
79810 --- a/net/wireless/core.h
79811 +++ b/net/wireless/core.h
79812 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
79813 struct mutex mtx;
79814
79815 /* rfkill support */
79816 - struct rfkill_ops rfkill_ops;
79817 + rfkill_ops_no_const rfkill_ops;
79818 struct rfkill *rfkill;
79819 struct work_struct rfkill_sync;
79820
79821 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
79822 index b0eb7aa..7d73e82 100644
79823 --- a/net/wireless/wext-core.c
79824 +++ b/net/wireless/wext-core.c
79825 @@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
79826 */
79827
79828 /* Support for very large requests */
79829 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
79830 - (user_length > descr->max_tokens)) {
79831 + if (user_length > descr->max_tokens) {
79832 /* Allow userspace to GET more than max so
79833 * we can support any size GET requests.
79834 * There is still a limit : -ENOMEM.
79835 @@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
79836 }
79837 }
79838
79839 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
79840 - /*
79841 - * If this is a GET, but not NOMAX, it means that the extra
79842 - * data is not bounded by userspace, but by max_tokens. Thus
79843 - * set the length to max_tokens. This matches the extra data
79844 - * allocation.
79845 - * The driver should fill it with the number of tokens it
79846 - * provided, and it may check iwp->length rather than having
79847 - * knowledge of max_tokens. If the driver doesn't change the
79848 - * iwp->length, this ioctl just copies back max_token tokens
79849 - * filled with zeroes. Hopefully the driver isn't claiming
79850 - * them to be valid data.
79851 - */
79852 - iwp->length = descr->max_tokens;
79853 - }
79854 -
79855 err = handler(dev, info, (union iwreq_data *) iwp, extra);
79856
79857 iwp->length += essid_compat;
79858 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
79859 index ccfbd32..9b61cf9f 100644
79860 --- a/net/xfrm/xfrm_policy.c
79861 +++ b/net/xfrm/xfrm_policy.c
79862 @@ -300,7 +300,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
79863 {
79864 policy->walk.dead = 1;
79865
79866 - atomic_inc(&policy->genid);
79867 + atomic_inc_unchecked(&policy->genid);
79868
79869 if (del_timer(&policy->timer))
79870 xfrm_pol_put(policy);
79871 @@ -584,7 +584,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
79872 hlist_add_head(&policy->bydst, chain);
79873 xfrm_pol_hold(policy);
79874 net->xfrm.policy_count[dir]++;
79875 - atomic_inc(&flow_cache_genid);
79876 + atomic_inc_unchecked(&flow_cache_genid);
79877 if (delpol)
79878 __xfrm_policy_unlink(delpol, dir);
79879 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
79880 @@ -1532,7 +1532,7 @@ free_dst:
79881 goto out;
79882 }
79883
79884 -static int inline
79885 +static inline int
79886 xfrm_dst_alloc_copy(void **target, const void *src, int size)
79887 {
79888 if (!*target) {
79889 @@ -1544,7 +1544,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
79890 return 0;
79891 }
79892
79893 -static int inline
79894 +static inline int
79895 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
79896 {
79897 #ifdef CONFIG_XFRM_SUB_POLICY
79898 @@ -1556,7 +1556,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
79899 #endif
79900 }
79901
79902 -static int inline
79903 +static inline int
79904 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
79905 {
79906 #ifdef CONFIG_XFRM_SUB_POLICY
79907 @@ -1650,7 +1650,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
79908
79909 xdst->num_pols = num_pols;
79910 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
79911 - xdst->policy_genid = atomic_read(&pols[0]->genid);
79912 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
79913
79914 return xdst;
79915 }
79916 @@ -2350,7 +2350,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
79917 if (xdst->xfrm_genid != dst->xfrm->genid)
79918 return 0;
79919 if (xdst->num_pols > 0 &&
79920 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
79921 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
79922 return 0;
79923
79924 mtu = dst_mtu(dst->child);
79925 @@ -2887,7 +2887,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
79926 sizeof(pol->xfrm_vec[i].saddr));
79927 pol->xfrm_vec[i].encap_family = mp->new_family;
79928 /* flush bundles */
79929 - atomic_inc(&pol->genid);
79930 + atomic_inc_unchecked(&pol->genid);
79931 }
79932 }
79933
79934 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
79935 index 5b228f9..6aca4e3 100644
79936 --- a/net/xfrm/xfrm_state.c
79937 +++ b/net/xfrm/xfrm_state.c
79938 @@ -1981,8 +1981,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
79939 goto error;
79940
79941 x->outer_mode = xfrm_get_mode(x->props.mode, family);
79942 - if (x->outer_mode == NULL)
79943 + if (x->outer_mode == NULL) {
79944 + err = -EPROTONOSUPPORT;
79945 goto error;
79946 + }
79947
79948 if (init_replay) {
79949 err = xfrm_init_replay(x);
79950 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
79951 index 44293b3..1870c62 100644
79952 --- a/net/xfrm/xfrm_user.c
79953 +++ b/net/xfrm/xfrm_user.c
79954 @@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
79955 struct nlattr **attrs)
79956 {
79957 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
79958 + struct xfrm_replay_state_esn *rs;
79959
79960 - if ((p->flags & XFRM_STATE_ESN) && !rt)
79961 - return -EINVAL;
79962 + if (p->flags & XFRM_STATE_ESN) {
79963 + if (!rt)
79964 + return -EINVAL;
79965 +
79966 + rs = nla_data(rt);
79967 +
79968 + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
79969 + return -EINVAL;
79970 +
79971 + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
79972 + nla_len(rt) != sizeof(*rs))
79973 + return -EINVAL;
79974 + }
79975
79976 if (!rt)
79977 return 0;
79978 @@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
79979 struct nlattr *rp)
79980 {
79981 struct xfrm_replay_state_esn *up;
79982 + int ulen;
79983
79984 if (!replay_esn || !rp)
79985 return 0;
79986
79987 up = nla_data(rp);
79988 + ulen = xfrm_replay_state_esn_len(up);
79989
79990 - if (xfrm_replay_state_esn_len(replay_esn) !=
79991 - xfrm_replay_state_esn_len(up))
79992 + if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
79993 return -EINVAL;
79994
79995 return 0;
79996 @@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
79997 struct nlattr *rta)
79998 {
79999 struct xfrm_replay_state_esn *p, *pp, *up;
80000 + int klen, ulen;
80001
80002 if (!rta)
80003 return 0;
80004
80005 up = nla_data(rta);
80006 + klen = xfrm_replay_state_esn_len(up);
80007 + ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
80008
80009 - p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
80010 + p = kzalloc(klen, GFP_KERNEL);
80011 if (!p)
80012 return -ENOMEM;
80013
80014 - pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
80015 + pp = kzalloc(klen, GFP_KERNEL);
80016 if (!pp) {
80017 kfree(p);
80018 return -ENOMEM;
80019 }
80020
80021 + memcpy(p, up, ulen);
80022 + memcpy(pp, up, ulen);
80023 +
80024 *replay_esn = p;
80025 *preplay_esn = pp;
80026
80027 @@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
80028 * somehow made shareable and move it to xfrm_state.c - JHS
80029 *
80030 */
80031 -static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
80032 +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
80033 + int update_esn)
80034 {
80035 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
80036 - struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
80037 + struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
80038 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
80039 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
80040 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
80041 @@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
80042 goto error;
80043
80044 /* override default values from above */
80045 - xfrm_update_ae_params(x, attrs);
80046 + xfrm_update_ae_params(x, attrs, 0);
80047
80048 return x;
80049
80050 @@ -689,6 +709,7 @@ out:
80051
80052 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
80053 {
80054 + memset(p, 0, sizeof(*p));
80055 memcpy(&p->id, &x->id, sizeof(p->id));
80056 memcpy(&p->sel, &x->sel, sizeof(p->sel));
80057 memcpy(&p->lft, &x->lft, sizeof(p->lft));
80058 @@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
80059 return -EMSGSIZE;
80060
80061 algo = nla_data(nla);
80062 - strcpy(algo->alg_name, auth->alg_name);
80063 + strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
80064 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
80065 algo->alg_key_len = auth->alg_key_len;
80066
80067 @@ -872,6 +893,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
80068 {
80069 struct xfrm_dump_info info;
80070 struct sk_buff *skb;
80071 + int err;
80072
80073 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
80074 if (!skb)
80075 @@ -882,9 +904,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
80076 info.nlmsg_seq = seq;
80077 info.nlmsg_flags = 0;
80078
80079 - if (dump_one_state(x, 0, &info)) {
80080 + err = dump_one_state(x, 0, &info);
80081 + if (err) {
80082 kfree_skb(skb);
80083 - return NULL;
80084 + return ERR_PTR(err);
80085 }
80086
80087 return skb;
80088 @@ -1309,6 +1332,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy
80089
80090 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
80091 {
80092 + memset(p, 0, sizeof(*p));
80093 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
80094 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
80095 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
80096 @@ -1413,6 +1437,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
80097 struct xfrm_user_tmpl *up = &vec[i];
80098 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
80099
80100 + memset(up, 0, sizeof(*up));
80101 memcpy(&up->id, &kp->id, sizeof(up->id));
80102 up->family = kp->encap_family;
80103 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
80104 @@ -1812,7 +1837,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
80105 goto out;
80106
80107 spin_lock_bh(&x->lock);
80108 - xfrm_update_ae_params(x, attrs);
80109 + xfrm_update_ae_params(x, attrs, 1);
80110 spin_unlock_bh(&x->lock);
80111
80112 c.event = nlh->nlmsg_type;
80113 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
80114 index ff1720d..ed8475e 100644
80115 --- a/scripts/Makefile.build
80116 +++ b/scripts/Makefile.build
80117 @@ -111,7 +111,7 @@ endif
80118 endif
80119
80120 # Do not include host rules unless needed
80121 -ifneq ($(hostprogs-y)$(hostprogs-m),)
80122 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
80123 include scripts/Makefile.host
80124 endif
80125
80126 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
80127 index 686cb0d..9d653bf 100644
80128 --- a/scripts/Makefile.clean
80129 +++ b/scripts/Makefile.clean
80130 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
80131 __clean-files := $(extra-y) $(always) \
80132 $(targets) $(clean-files) \
80133 $(host-progs) \
80134 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
80135 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
80136 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
80137
80138 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
80139
80140 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
80141 index 1ac414f..38575f7 100644
80142 --- a/scripts/Makefile.host
80143 +++ b/scripts/Makefile.host
80144 @@ -31,6 +31,8 @@
80145 # Note: Shared libraries consisting of C++ files are not supported
80146
80147 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
80148 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
80149 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
80150
80151 # C code
80152 # Executables compiled from a single .c file
80153 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
80154 # Shared libaries (only .c supported)
80155 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
80156 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
80157 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
80158 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
80159 # Remove .so files from "xxx-objs"
80160 host-cobjs := $(filter-out %.so,$(host-cobjs))
80161 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
80162
80163 -#Object (.o) files used by the shared libaries
80164 +# Object (.o) files used by the shared libaries
80165 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
80166 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
80167
80168 # output directory for programs/.o files
80169 # hostprogs-y := tools/build may have been specified. Retrieve directory
80170 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
80171 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
80172 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
80173 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
80174 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
80175 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
80176 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
80177 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
80178
80179 obj-dirs += $(host-objdirs)
80180 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
80181 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
80182 $(call if_changed_dep,host-cshobjs)
80183
80184 +# Compile .c file, create position independent .o file
80185 +# host-cxxshobjs -> .o
80186 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
80187 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
80188 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
80189 + $(call if_changed_dep,host-cxxshobjs)
80190 +
80191 # Link a shared library, based on position independent .o files
80192 # *.o -> .so shared library (host-cshlib)
80193 quiet_cmd_host-cshlib = HOSTLLD -shared $@
80194 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
80195 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
80196 $(call if_changed,host-cshlib)
80197
80198 +# Link a shared library, based on position independent .o files
80199 +# *.o -> .so shared library (host-cxxshlib)
80200 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
80201 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
80202 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
80203 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
80204 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
80205 + $(call if_changed,host-cxxshlib)
80206 +
80207 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
80208 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
80209 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
80210
80211 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
80212 index cb1f50c..cef2a7c 100644
80213 --- a/scripts/basic/fixdep.c
80214 +++ b/scripts/basic/fixdep.c
80215 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
80216 /*
80217 * Lookup a value in the configuration string.
80218 */
80219 -static int is_defined_config(const char *name, int len, unsigned int hash)
80220 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
80221 {
80222 struct item *aux;
80223
80224 @@ -211,10 +211,10 @@ static void clear_config(void)
80225 /*
80226 * Record the use of a CONFIG_* word.
80227 */
80228 -static void use_config(const char *m, int slen)
80229 +static void use_config(const char *m, unsigned int slen)
80230 {
80231 unsigned int hash = strhash(m, slen);
80232 - int c, i;
80233 + unsigned int c, i;
80234
80235 if (is_defined_config(m, slen, hash))
80236 return;
80237 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
80238
80239 static void parse_config_file(const char *map, size_t len)
80240 {
80241 - const int *end = (const int *) (map + len);
80242 + const unsigned int *end = (const unsigned int *) (map + len);
80243 /* start at +1, so that p can never be < map */
80244 - const int *m = (const int *) map + 1;
80245 + const unsigned int *m = (const unsigned int *) map + 1;
80246 const char *p, *q;
80247
80248 for (; m < end; m++) {
80249 @@ -406,7 +406,7 @@ static void print_deps(void)
80250 static void traps(void)
80251 {
80252 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
80253 - int *p = (int *)test;
80254 + unsigned int *p = (unsigned int *)test;
80255
80256 if (*p != INT_CONF) {
80257 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
80258 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
80259 new file mode 100644
80260 index 0000000..008ac1a
80261 --- /dev/null
80262 +++ b/scripts/gcc-plugin.sh
80263 @@ -0,0 +1,17 @@
80264 +#!/bin/bash
80265 +plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
80266 +#include "gcc-plugin.h"
80267 +#include "tree.h"
80268 +#include "tm.h"
80269 +#include "rtl.h"
80270 +#ifdef ENABLE_BUILD_WITH_CXX
80271 +#warning $2
80272 +#else
80273 +#warning $1
80274 +#endif
80275 +EOF`
80276 +if [ $? -eq 0 ]
80277 +then
80278 + [[ "$plugincc" =~ "$1" ]] && echo "$1"
80279 + [[ "$plugincc" =~ "$2" ]] && echo "$2"
80280 +fi
80281 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
80282 index cd9c6c6..0c8f0fa 100644
80283 --- a/scripts/link-vmlinux.sh
80284 +++ b/scripts/link-vmlinux.sh
80285 @@ -147,7 +147,7 @@ else
80286 fi;
80287
80288 # final build of init/
80289 -${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
80290 +${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
80291
80292 kallsymso=""
80293 kallsyms_vmlinux=""
80294 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
80295 index 5759751..b170367 100644
80296 --- a/scripts/mod/file2alias.c
80297 +++ b/scripts/mod/file2alias.c
80298 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
80299 unsigned long size, unsigned long id_size,
80300 void *symval)
80301 {
80302 - int i;
80303 + unsigned int i;
80304
80305 if (size % id_size || size < id_size) {
80306 if (cross_build != 0)
80307 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
80308 /* USB is special because the bcdDevice can be matched against a numeric range */
80309 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
80310 static void do_usb_entry(struct usb_device_id *id,
80311 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
80312 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
80313 unsigned char range_lo, unsigned char range_hi,
80314 unsigned char max, struct module *mod)
80315 {
80316 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
80317 {
80318 unsigned int devlo, devhi;
80319 unsigned char chi, clo, max;
80320 - int ndigits;
80321 + unsigned int ndigits;
80322
80323 id->match_flags = TO_NATIVE(id->match_flags);
80324 id->idVendor = TO_NATIVE(id->idVendor);
80325 @@ -504,7 +504,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
80326 for (i = 0; i < count; i++) {
80327 const char *id = (char *)devs[i].id;
80328 char acpi_id[sizeof(devs[0].id)];
80329 - int j;
80330 + unsigned int j;
80331
80332 buf_printf(&mod->dev_table_buf,
80333 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
80334 @@ -534,7 +534,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
80335
80336 for (j = 0; j < PNP_MAX_DEVICES; j++) {
80337 const char *id = (char *)card->devs[j].id;
80338 - int i2, j2;
80339 + unsigned int i2, j2;
80340 int dup = 0;
80341
80342 if (!id[0])
80343 @@ -560,7 +560,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
80344 /* add an individual alias for every device entry */
80345 if (!dup) {
80346 char acpi_id[sizeof(card->devs[0].id)];
80347 - int k;
80348 + unsigned int k;
80349
80350 buf_printf(&mod->dev_table_buf,
80351 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
80352 @@ -885,7 +885,7 @@ static void dmi_ascii_filter(char *d, const char *s)
80353 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
80354 char *alias)
80355 {
80356 - int i, j;
80357 + unsigned int i, j;
80358
80359 sprintf(alias, "dmi*");
80360
80361 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
80362 index 0f84bb3..2d42035 100644
80363 --- a/scripts/mod/modpost.c
80364 +++ b/scripts/mod/modpost.c
80365 @@ -925,6 +925,7 @@ enum mismatch {
80366 ANY_INIT_TO_ANY_EXIT,
80367 ANY_EXIT_TO_ANY_INIT,
80368 EXPORT_TO_INIT_EXIT,
80369 + DATA_TO_TEXT
80370 };
80371
80372 struct sectioncheck {
80373 @@ -1033,6 +1034,12 @@ const struct sectioncheck sectioncheck[] = {
80374 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
80375 .mismatch = EXPORT_TO_INIT_EXIT,
80376 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
80377 +},
80378 +/* Do not reference code from writable data */
80379 +{
80380 + .fromsec = { DATA_SECTIONS, NULL },
80381 + .tosec = { TEXT_SECTIONS, NULL },
80382 + .mismatch = DATA_TO_TEXT
80383 }
80384 };
80385
80386 @@ -1155,10 +1162,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
80387 continue;
80388 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
80389 continue;
80390 - if (sym->st_value == addr)
80391 - return sym;
80392 /* Find a symbol nearby - addr are maybe negative */
80393 d = sym->st_value - addr;
80394 + if (d == 0)
80395 + return sym;
80396 if (d < 0)
80397 d = addr - sym->st_value;
80398 if (d < distance) {
80399 @@ -1437,6 +1444,14 @@ static void report_sec_mismatch(const char *modname,
80400 tosym, prl_to, prl_to, tosym);
80401 free(prl_to);
80402 break;
80403 + case DATA_TO_TEXT:
80404 +#if 0
80405 + fprintf(stderr,
80406 + "The %s %s:%s references\n"
80407 + "the %s %s:%s%s\n",
80408 + from, fromsec, fromsym, to, tosec, tosym, to_p);
80409 +#endif
80410 + break;
80411 }
80412 fprintf(stderr, "\n");
80413 }
80414 @@ -1671,7 +1686,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
80415 static void check_sec_ref(struct module *mod, const char *modname,
80416 struct elf_info *elf)
80417 {
80418 - int i;
80419 + unsigned int i;
80420 Elf_Shdr *sechdrs = elf->sechdrs;
80421
80422 /* Walk through all sections */
80423 @@ -1769,7 +1784,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
80424 va_end(ap);
80425 }
80426
80427 -void buf_write(struct buffer *buf, const char *s, int len)
80428 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
80429 {
80430 if (buf->size - buf->pos < len) {
80431 buf->size += len + SZ;
80432 @@ -1987,7 +2002,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
80433 if (fstat(fileno(file), &st) < 0)
80434 goto close_write;
80435
80436 - if (st.st_size != b->pos)
80437 + if (st.st_size != (off_t)b->pos)
80438 goto close_write;
80439
80440 tmp = NOFAIL(malloc(b->pos));
80441 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
80442 index 51207e4..f7d603d 100644
80443 --- a/scripts/mod/modpost.h
80444 +++ b/scripts/mod/modpost.h
80445 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
80446
80447 struct buffer {
80448 char *p;
80449 - int pos;
80450 - int size;
80451 + unsigned int pos;
80452 + unsigned int size;
80453 };
80454
80455 void __attribute__((format(printf, 2, 3)))
80456 buf_printf(struct buffer *buf, const char *fmt, ...);
80457
80458 void
80459 -buf_write(struct buffer *buf, const char *s, int len);
80460 +buf_write(struct buffer *buf, const char *s, unsigned int len);
80461
80462 struct module {
80463 struct module *next;
80464 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
80465 index 9dfcd6d..099068e 100644
80466 --- a/scripts/mod/sumversion.c
80467 +++ b/scripts/mod/sumversion.c
80468 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
80469 goto out;
80470 }
80471
80472 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
80473 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
80474 warn("writing sum in %s failed: %s\n",
80475 filename, strerror(errno));
80476 goto out;
80477 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
80478 index 5c11312..72742b5 100644
80479 --- a/scripts/pnmtologo.c
80480 +++ b/scripts/pnmtologo.c
80481 @@ -237,14 +237,14 @@ static void write_header(void)
80482 fprintf(out, " * Linux logo %s\n", logoname);
80483 fputs(" */\n\n", out);
80484 fputs("#include <linux/linux_logo.h>\n\n", out);
80485 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
80486 + fprintf(out, "static unsigned char %s_data[] = {\n",
80487 logoname);
80488 }
80489
80490 static void write_footer(void)
80491 {
80492 fputs("\n};\n\n", out);
80493 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
80494 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
80495 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
80496 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
80497 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
80498 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
80499 fputs("\n};\n\n", out);
80500
80501 /* write logo clut */
80502 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
80503 + fprintf(out, "static unsigned char %s_clut[] = {\n",
80504 logoname);
80505 write_hex_cnt = 0;
80506 for (i = 0; i < logo_clutsize; i++) {
80507 diff --git a/security/Kconfig b/security/Kconfig
80508 index e9c6ac7..5ff1ad1 100644
80509 --- a/security/Kconfig
80510 +++ b/security/Kconfig
80511 @@ -4,6 +4,885 @@
80512
80513 menu "Security options"
80514
80515 +menu "Grsecurity"
80516 +
80517 + config ARCH_TRACK_EXEC_LIMIT
80518 + bool
80519 +
80520 + config PAX_KERNEXEC_PLUGIN
80521 + bool
80522 +
80523 + config PAX_PER_CPU_PGD
80524 + bool
80525 +
80526 + config TASK_SIZE_MAX_SHIFT
80527 + int
80528 + depends on X86_64
80529 + default 47 if !PAX_PER_CPU_PGD
80530 + default 42 if PAX_PER_CPU_PGD
80531 +
80532 + config PAX_ENABLE_PAE
80533 + bool
80534 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
80535 +
80536 + config PAX_USERCOPY_SLABS
80537 + bool
80538 +
80539 +config GRKERNSEC
80540 + bool "Grsecurity"
80541 + select CRYPTO
80542 + select CRYPTO_SHA256
80543 + select PROC_FS
80544 + select STOP_MACHINE
80545 + help
80546 + If you say Y here, you will be able to configure many features
80547 + that will enhance the security of your system. It is highly
80548 + recommended that you say Y here and read through the help
80549 + for each option so that you fully understand the features and
80550 + can evaluate their usefulness for your machine.
80551 +
80552 +choice
80553 + prompt "Configuration Method"
80554 + depends on GRKERNSEC
80555 + default GRKERNSEC_CONFIG_CUSTOM
80556 + help
80557 +
80558 +config GRKERNSEC_CONFIG_AUTO
80559 + bool "Automatic"
80560 + help
80561 + If you choose this configuration method, you'll be able to answer a small
80562 + number of simple questions about how you plan to use this kernel.
80563 + The settings of grsecurity and PaX will be automatically configured for
80564 + the highest commonly-used settings within the provided constraints.
80565 +
80566 + If you require additional configuration, custom changes can still be made
80567 + from the "custom configuration" menu.
80568 +
80569 +config GRKERNSEC_CONFIG_CUSTOM
80570 + bool "Custom"
80571 + help
80572 + If you choose this configuration method, you'll be able to configure all
80573 + grsecurity and PaX settings manually. Via this method, no options are
80574 + automatically enabled.
80575 +
80576 +endchoice
80577 +
80578 +choice
80579 + prompt "Usage Type"
80580 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
80581 + default GRKERNSEC_CONFIG_SERVER
80582 + help
80583 +
80584 +config GRKERNSEC_CONFIG_SERVER
80585 + bool "Server"
80586 + help
80587 + Choose this option if you plan to use this kernel on a server.
80588 +
80589 +config GRKERNSEC_CONFIG_DESKTOP
80590 + bool "Desktop"
80591 + help
80592 + Choose this option if you plan to use this kernel on a desktop.
80593 +
80594 +endchoice
80595 +
80596 +choice
80597 + prompt "Virtualization Type"
80598 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
80599 + default GRKERNSEC_CONFIG_VIRT_NONE
80600 + help
80601 +
80602 +config GRKERNSEC_CONFIG_VIRT_NONE
80603 + bool "None"
80604 + help
80605 + Choose this option if this kernel will be run on bare metal.
80606 +
80607 +config GRKERNSEC_CONFIG_VIRT_GUEST
80608 + bool "Guest"
80609 + help
80610 + Choose this option if this kernel will be run as a VM guest.
80611 +
80612 +config GRKERNSEC_CONFIG_VIRT_HOST
80613 + bool "Host"
80614 + help
80615 + Choose this option if this kernel will be run as a VM host.
80616 +
80617 +endchoice
80618 +
80619 +choice
80620 + prompt "Virtualization Hardware"
80621 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
80622 + help
80623 +
80624 +config GRKERNSEC_CONFIG_VIRT_EPT
80625 + bool "EPT/RVI Processor Support"
80626 + depends on X86
80627 + help
80628 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
80629 + hardware virtualization. This allows for additional kernel hardening protections
80630 + to operate without additional performance impact.
80631 +
80632 + To see if your Intel processor supports EPT, see:
80633 + http://ark.intel.com/Products/VirtualizationTechnology
80634 + (Most Core i3/5/7 support EPT)
80635 +
80636 + To see if your AMD processor supports RVI, see:
80637 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
80638 +
80639 +config GRKERNSEC_CONFIG_VIRT_SOFT
80640 + bool "First-gen/No Hardware Virtualization"
80641 + help
80642 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
80643 + support hardware virtualization or doesn't support the EPT/RVI extensions.
80644 +
80645 +endchoice
80646 +
80647 +choice
80648 + prompt "Virtualization Software"
80649 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
80650 + help
80651 +
80652 +config GRKERNSEC_CONFIG_VIRT_XEN
80653 + bool "Xen"
80654 + help
80655 + Choose this option if this kernel is running as a Xen guest or host.
80656 +
80657 +config GRKERNSEC_CONFIG_VIRT_VMWARE
80658 + bool "VMWare"
80659 + help
80660 + Choose this option if this kernel is running as a VMWare guest or host.
80661 +
80662 +config GRKERNSEC_CONFIG_VIRT_KVM
80663 + bool "KVM"
80664 + help
80665 + Choose this option if this kernel is running as a KVM guest or host.
80666 +
80667 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
80668 + bool "VirtualBox"
80669 + help
80670 + Choose this option if this kernel is running as a VirtualBox guest or host.
80671 +
80672 +endchoice
80673 +
80674 +choice
80675 + prompt "Required Priorities"
80676 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
80677 + default GRKERNSEC_CONFIG_PRIORITY_PERF
80678 + help
80679 +
80680 +config GRKERNSEC_CONFIG_PRIORITY_PERF
80681 + bool "Performance"
80682 + help
80683 + Choose this option if performance is of highest priority for this deployment
80684 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
80685 + and freed memory sanitizing will be disabled.
80686 +
80687 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
80688 + bool "Security"
80689 + help
80690 + Choose this option if security is of highest priority for this deployment of
80691 + grsecurity. UDEREF, kernel stack clearing, and freed memory sanitizing will
80692 + be enabled for this kernel. In a worst-case scenario, these features can
80693 + introduce a 20% performance hit (UDEREF on x64 contributing half of this hit).
80694 +
80695 +endchoice
80696 +
80697 +menu "Default Special Groups"
80698 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
80699 +
80700 +config GRKERNSEC_PROC_GID
80701 + int "GID exempted from /proc restrictions"
80702 + default 1001
80703 + help
80704 + Setting this GID determines which group will be exempted from
80705 + grsecurity's /proc restrictions, allowing users of the specified
80706 + group to view network statistics and the existence of other users'
80707 + processes on the system.
80708 +
80709 +config GRKERNSEC_TPE_GID
80710 + int "GID for untrusted users"
80711 + depends on GRKERNSEC_CONFIG_SERVER
80712 + default 1005
80713 + help
80714 + Setting this GID determines which group untrusted users should
80715 + be added to. These users will be placed under grsecurity's Trusted Path
80716 + Execution mechanism, preventing them from executing their own binaries.
80717 + The users will only be able to execute binaries in directories owned and
80718 + writable only by the root user.
80719 +
80720 +config GRKERNSEC_SYMLINKOWN_GID
80721 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
80722 + depends on GRKERNSEC_CONFIG_SERVER
80723 + default 1006
80724 + help
80725 + Setting this GID determines what group kernel-enforced
80726 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
80727 + is enabled, a sysctl option with name "symlinkown_gid" is created.
80728 +
80729 +
80730 +endmenu
80731 +
80732 +menu "Customize Configuration"
80733 +depends on GRKERNSEC
80734 +
80735 +menu "PaX"
80736 +
80737 +config PAX
80738 + bool "Enable various PaX features"
80739 + default y if GRKERNSEC_CONFIG_AUTO
80740 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
80741 + help
80742 + This allows you to enable various PaX features. PaX adds
80743 + intrusion prevention mechanisms to the kernel that reduce
80744 + the risks posed by exploitable memory corruption bugs.
80745 +
80746 +menu "PaX Control"
80747 + depends on PAX
80748 +
80749 +config PAX_SOFTMODE
80750 + bool 'Support soft mode'
80751 + help
80752 + Enabling this option will allow you to run PaX in soft mode, that
80753 + is, PaX features will not be enforced by default, only on executables
80754 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
80755 + support as they are the only way to mark executables for soft mode use.
80756 +
80757 + Soft mode can be activated by using the "pax_softmode=1" kernel command
80758 + line option on boot. Furthermore you can control various PaX features
80759 + at runtime via the entries in /proc/sys/kernel/pax.
80760 +
80761 +config PAX_EI_PAX
80762 + bool 'Use legacy ELF header marking'
80763 + default y if GRKERNSEC_CONFIG_AUTO
80764 + help
80765 + Enabling this option will allow you to control PaX features on
80766 + a per executable basis via the 'chpax' utility available at
80767 + http://pax.grsecurity.net/. The control flags will be read from
80768 + an otherwise reserved part of the ELF header. This marking has
80769 + numerous drawbacks (no support for soft-mode, toolchain does not
80770 + know about the non-standard use of the ELF header) therefore it
80771 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
80772 + support.
80773 +
80774 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
80775 + support as well, they will override the legacy EI_PAX marks.
80776 +
80777 + If you enable none of the marking options then all applications
80778 + will run with PaX enabled on them by default.
80779 +
80780 +config PAX_PT_PAX_FLAGS
80781 + bool 'Use ELF program header marking'
80782 + default y if GRKERNSEC_CONFIG_AUTO
80783 + help
80784 + Enabling this option will allow you to control PaX features on
80785 + a per executable basis via the 'paxctl' utility available at
80786 + http://pax.grsecurity.net/. The control flags will be read from
80787 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
80788 + has the benefits of supporting both soft mode and being fully
80789 + integrated into the toolchain (the binutils patch is available
80790 + from http://pax.grsecurity.net).
80791 +
80792 + Note that if you enable the legacy EI_PAX marking support as well,
80793 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
80794 +
80795 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
80796 + must make sure that the marks are the same if a binary has both marks.
80797 +
80798 + If you enable none of the marking options then all applications
80799 + will run with PaX enabled on them by default.
80800 +
80801 +config PAX_XATTR_PAX_FLAGS
80802 + bool 'Use filesystem extended attributes marking'
80803 + default y if GRKERNSEC_CONFIG_AUTO
80804 + select CIFS_XATTR if CIFS
80805 + select EXT2_FS_XATTR if EXT2_FS
80806 + select EXT3_FS_XATTR if EXT3_FS
80807 + select EXT4_FS_XATTR if EXT4_FS
80808 + select JFFS2_FS_XATTR if JFFS2_FS
80809 + select REISERFS_FS_XATTR if REISERFS_FS
80810 + select SQUASHFS_XATTR if SQUASHFS
80811 + select TMPFS_XATTR if TMPFS
80812 + select UBIFS_FS_XATTR if UBIFS_FS
80813 + help
80814 + Enabling this option will allow you to control PaX features on
80815 + a per executable basis via the 'setfattr' utility. The control
80816 + flags will be read from the user.pax.flags extended attribute of
80817 + the file. This marking has the benefit of supporting binary-only
80818 + applications that self-check themselves (e.g., skype) and would
80819 + not tolerate chpax/paxctl changes. The main drawback is that
80820 + extended attributes are not supported by some filesystems (e.g.,
80821 + isofs, udf, vfat) so copying files through such filesystems will
80822 + lose the extended attributes and these PaX markings.
80823 +
80824 + Note that if you enable the legacy EI_PAX marking support as well,
80825 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
80826 +
80827 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
80828 + must make sure that the marks are the same if a binary has both marks.
80829 +
80830 + If you enable none of the marking options then all applications
80831 + will run with PaX enabled on them by default.
80832 +
80833 +choice
80834 + prompt 'MAC system integration'
80835 + default PAX_HAVE_ACL_FLAGS
80836 + help
80837 + Mandatory Access Control systems have the option of controlling
80838 + PaX flags on a per executable basis, choose the method supported
80839 + by your particular system.
80840 +
80841 + - "none": if your MAC system does not interact with PaX,
80842 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
80843 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
80844 +
80845 + NOTE: this option is for developers/integrators only.
80846 +
80847 + config PAX_NO_ACL_FLAGS
80848 + bool 'none'
80849 +
80850 + config PAX_HAVE_ACL_FLAGS
80851 + bool 'direct'
80852 +
80853 + config PAX_HOOK_ACL_FLAGS
80854 + bool 'hook'
80855 +endchoice
80856 +
80857 +endmenu
80858 +
80859 +menu "Non-executable pages"
80860 + depends on PAX
80861 +
80862 +config PAX_NOEXEC
80863 + bool "Enforce non-executable pages"
80864 + default y if GRKERNSEC_CONFIG_AUTO
80865 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
80866 + help
80867 + By design some architectures do not allow for protecting memory
80868 + pages against execution or even if they do, Linux does not make
80869 + use of this feature. In practice this means that if a page is
80870 + readable (such as the stack or heap) it is also executable.
80871 +
80872 + There is a well known exploit technique that makes use of this
80873 + fact and a common programming mistake where an attacker can
80874 + introduce code of his choice somewhere in the attacked program's
80875 + memory (typically the stack or the heap) and then execute it.
80876 +
80877 + If the attacked program was running with different (typically
80878 + higher) privileges than that of the attacker, then he can elevate
80879 + his own privilege level (e.g. get a root shell, write to files for
80880 + which he does not have write access to, etc).
80881 +
80882 + Enabling this option will let you choose from various features
80883 + that prevent the injection and execution of 'foreign' code in
80884 + a program.
80885 +
80886 + This will also break programs that rely on the old behaviour and
80887 + expect that dynamically allocated memory via the malloc() family
80888 + of functions is executable (which it is not). Notable examples
80889 + are the XFree86 4.x server, the java runtime and wine.
80890 +
80891 +config PAX_PAGEEXEC
80892 + bool "Paging based non-executable pages"
80893 + default y if GRKERNSEC_CONFIG_AUTO
80894 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
80895 + select S390_SWITCH_AMODE if S390
80896 + select S390_EXEC_PROTECT if S390
80897 + select ARCH_TRACK_EXEC_LIMIT if X86_32
80898 + help
80899 + This implementation is based on the paging feature of the CPU.
80900 + On i386 without hardware non-executable bit support there is a
80901 + variable but usually low performance impact, however on Intel's
80902 + P4 core based CPUs it is very high so you should not enable this
80903 + for kernels meant to be used on such CPUs.
80904 +
80905 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
80906 + with hardware non-executable bit support there is no performance
80907 + impact, on ppc the impact is negligible.
80908 +
80909 + Note that several architectures require various emulations due to
80910 + badly designed userland ABIs, this will cause a performance impact
80911 + but will disappear as soon as userland is fixed. For example, ppc
80912 + userland MUST have been built with secure-plt by a recent toolchain.
80913 +
80914 +config PAX_SEGMEXEC
80915 + bool "Segmentation based non-executable pages"
80916 + default y if GRKERNSEC_CONFIG_AUTO
80917 + depends on PAX_NOEXEC && X86_32
80918 + help
80919 + This implementation is based on the segmentation feature of the
80920 + CPU and has a very small performance impact, however applications
80921 + will be limited to a 1.5 GB address space instead of the normal
80922 + 3 GB.
80923 +
80924 +config PAX_EMUTRAMP
80925 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
80926 + default y if PARISC
80927 + help
80928 + There are some programs and libraries that for one reason or
80929 + another attempt to execute special small code snippets from
80930 + non-executable memory pages. Most notable examples are the
80931 + signal handler return code generated by the kernel itself and
80932 + the GCC trampolines.
80933 +
80934 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
80935 + such programs will no longer work under your kernel.
80936 +
80937 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
80938 + utilities to enable trampoline emulation for the affected programs
80939 + yet still have the protection provided by the non-executable pages.
80940 +
80941 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
80942 + your system will not even boot.
80943 +
80944 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
80945 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
80946 + for the affected files.
80947 +
80948 + NOTE: enabling this feature *may* open up a loophole in the
80949 + protection provided by non-executable pages that an attacker
80950 + could abuse. Therefore the best solution is to not have any
80951 + files on your system that would require this option. This can
80952 + be achieved by not using libc5 (which relies on the kernel
80953 + signal handler return code) and not using or rewriting programs
80954 + that make use of the nested function implementation of GCC.
80955 + Skilled users can just fix GCC itself so that it implements
80956 + nested function calls in a way that does not interfere with PaX.
80957 +
80958 +config PAX_EMUSIGRT
80959 + bool "Automatically emulate sigreturn trampolines"
80960 + depends on PAX_EMUTRAMP && PARISC
80961 + default y
80962 + help
80963 + Enabling this option will have the kernel automatically detect
80964 + and emulate signal return trampolines executing on the stack
80965 + that would otherwise lead to task termination.
80966 +
80967 + This solution is intended as a temporary one for users with
80968 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
80969 + Modula-3 runtime, etc) or executables linked to such, basically
80970 + everything that does not specify its own SA_RESTORER function in
80971 + normal executable memory like glibc 2.1+ does.
80972 +
80973 + On parisc you MUST enable this option, otherwise your system will
80974 + not even boot.
80975 +
80976 + NOTE: this feature cannot be disabled on a per executable basis
80977 + and since it *does* open up a loophole in the protection provided
80978 + by non-executable pages, the best solution is to not have any
80979 + files on your system that would require this option.
80980 +
80981 +config PAX_MPROTECT
80982 + bool "Restrict mprotect()"
80983 + default y if GRKERNSEC_CONFIG_AUTO
80984 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
80985 + help
80986 + Enabling this option will prevent programs from
80987 + - changing the executable status of memory pages that were
80988 + not originally created as executable,
80989 + - making read-only executable pages writable again,
80990 + - creating executable pages from anonymous memory,
80991 + - making read-only-after-relocations (RELRO) data pages writable again.
80992 +
80993 + You should say Y here to complete the protection provided by
80994 + the enforcement of non-executable pages.
80995 +
80996 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
80997 + this feature on a per file basis.
80998 +
80999 +config PAX_MPROTECT_COMPAT
81000 + bool "Use legacy/compat protection demoting (read help)"
81001 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
81002 + depends on PAX_MPROTECT
81003 + help
81004 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
81005 + by sending the proper error code to the application. For some broken
81006 + userland, this can cause problems with Python or other applications. The
81007 + current implementation however allows for applications like clamav to
81008 + detect if JIT compilation/execution is allowed and to fall back gracefully
81009 + to an interpreter-based mode if it does not. While we encourage everyone
81010 + to use the current implementation as-is and push upstream to fix broken
81011 + userland (note that the RWX logging option can assist with this), in some
81012 + environments this may not be possible. Having to disable MPROTECT
81013 + completely on certain binaries reduces the security benefit of PaX,
81014 + so this option is provided for those environments to revert to the old
81015 + behavior.
81016 +
81017 +config PAX_ELFRELOCS
81018 + bool "Allow ELF text relocations (read help)"
81019 + depends on PAX_MPROTECT
81020 + default n
81021 + help
81022 + Non-executable pages and mprotect() restrictions are effective
81023 + in preventing the introduction of new executable code into an
81024 + attacked task's address space. There remain only two venues
81025 + for this kind of attack: if the attacker can execute already
81026 + existing code in the attacked task then he can either have it
81027 + create and mmap() a file containing his code or have it mmap()
81028 + an already existing ELF library that does not have position
81029 + independent code in it and use mprotect() on it to make it
81030 + writable and copy his code there. While protecting against
81031 + the former approach is beyond PaX, the latter can be prevented
81032 + by having only PIC ELF libraries on one's system (which do not
81033 + need to relocate their code). If you are sure this is your case,
81034 + as is the case with all modern Linux distributions, then leave
81035 + this option disabled. You should say 'n' here.
81036 +
81037 +config PAX_ETEXECRELOCS
81038 + bool "Allow ELF ET_EXEC text relocations"
81039 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
81040 + select PAX_ELFRELOCS
81041 + default y
81042 + help
81043 + On some architectures there are incorrectly created applications
81044 + that require text relocations and would not work without enabling
81045 + this option. If you are an alpha, ia64 or parisc user, you should
81046 + enable this option and disable it once you have made sure that
81047 + none of your applications need it.
81048 +
81049 +config PAX_EMUPLT
81050 + bool "Automatically emulate ELF PLT"
81051 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
81052 + default y
81053 + help
81054 + Enabling this option will have the kernel automatically detect
81055 + and emulate the Procedure Linkage Table entries in ELF files.
81056 + On some architectures such entries are in writable memory, and
81057 + become non-executable leading to task termination. Therefore
81058 + it is mandatory that you enable this option on alpha, parisc,
81059 + sparc and sparc64, otherwise your system would not even boot.
81060 +
81061 + NOTE: this feature *does* open up a loophole in the protection
81062 + provided by the non-executable pages, therefore the proper
81063 + solution is to modify the toolchain to produce a PLT that does
81064 + not need to be writable.
81065 +
81066 +config PAX_DLRESOLVE
81067 + bool 'Emulate old glibc resolver stub'
81068 + depends on PAX_EMUPLT && SPARC
81069 + default n
81070 + help
81071 + This option is needed if userland has an old glibc (before 2.4)
81072 + that puts a 'save' instruction into the runtime generated resolver
81073 + stub that needs special emulation.
81074 +
81075 +config PAX_KERNEXEC
81076 + bool "Enforce non-executable kernel pages"
81077 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
81078 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
81079 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
81080 + select PAX_KERNEXEC_PLUGIN if X86_64
81081 + help
81082 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
81083 + that is, enabling this option will make it harder to inject
81084 + and execute 'foreign' code in kernel memory itself.
81085 +
81086 +choice
81087 + prompt "Return Address Instrumentation Method"
81088 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
81089 + depends on PAX_KERNEXEC_PLUGIN
81090 + help
81091 + Select the method used to instrument function pointer dereferences.
81092 + Note that binary modules cannot be instrumented by this approach.
81093 +
81094 + Note that the implementation requires a gcc with plugin support,
81095 + i.e., gcc 4.5 or newer. You may need to install the supporting
81096 + headers explicitly in addition to the normal gcc package.
81097 +
81098 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
81099 + bool "bts"
81100 + help
81101 + This method is compatible with binary only modules but has
81102 + a higher runtime overhead.
81103 +
81104 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
81105 + bool "or"
81106 + depends on !PARAVIRT
81107 + help
81108 + This method is incompatible with binary only modules but has
81109 + a lower runtime overhead.
81110 +endchoice
81111 +
81112 +config PAX_KERNEXEC_PLUGIN_METHOD
81113 + string
81114 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
81115 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
81116 + default ""
81117 +
81118 +config PAX_KERNEXEC_MODULE_TEXT
81119 + int "Minimum amount of memory reserved for module code"
81120 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
81121 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
81122 + depends on PAX_KERNEXEC && X86_32 && MODULES
81123 + help
81124 + Due to implementation details the kernel must reserve a fixed
81125 + amount of memory for module code at compile time that cannot be
81126 + changed at runtime. Here you can specify the minimum amount
81127 + in MB that will be reserved. Due to the same implementation
81128 + details this size will always be rounded up to the next 2/4 MB
81129 + boundary (depends on PAE) so the actually available memory for
81130 + module code will usually be more than this minimum.
81131 +
81132 + The default 4 MB should be enough for most users but if you have
81133 + an excessive number of modules (e.g., most distribution configs
81134 + compile many drivers as modules) or use huge modules such as
81135 + nvidia's kernel driver, you will need to adjust this amount.
81136 + A good rule of thumb is to look at your currently loaded kernel
81137 + modules and add up their sizes.
81138 +
81139 +endmenu
81140 +
81141 +menu "Address Space Layout Randomization"
81142 + depends on PAX
81143 +
81144 +config PAX_ASLR
81145 + bool "Address Space Layout Randomization"
81146 + default y if GRKERNSEC_CONFIG_AUTO
81147 + help
81148 + Many if not most exploit techniques rely on the knowledge of
81149 + certain addresses in the attacked program. The following options
81150 + will allow the kernel to apply a certain amount of randomization
81151 + to specific parts of the program thereby forcing an attacker to
81152 + guess them in most cases. Any failed guess will most likely crash
81153 + the attacked program which allows the kernel to detect such attempts
81154 + and react on them. PaX itself provides no reaction mechanisms,
81155 + instead it is strongly encouraged that you make use of Nergal's
81156 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
81157 + (http://www.grsecurity.net/) built-in crash detection features or
81158 + develop one yourself.
81159 +
81160 + By saying Y here you can choose to randomize the following areas:
81161 + - top of the task's kernel stack
81162 + - top of the task's userland stack
81163 + - base address for mmap() requests that do not specify one
81164 + (this includes all libraries)
81165 + - base address of the main executable
81166 +
81167 + It is strongly recommended to say Y here as address space layout
81168 + randomization has negligible impact on performance yet it provides
81169 + a very effective protection.
81170 +
81171 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
81172 + this feature on a per file basis.
81173 +
81174 +config PAX_RANDKSTACK
81175 + bool "Randomize kernel stack base"
81176 + default y if GRKERNSEC_CONFIG_AUTO
81177 + depends on X86_TSC && X86
81178 + help
81179 + By saying Y here the kernel will randomize every task's kernel
81180 + stack on every system call. This will not only force an attacker
81181 + to guess it but also prevent him from making use of possible
81182 + leaked information about it.
81183 +
81184 + Since the kernel stack is a rather scarce resource, randomization
81185 + may cause unexpected stack overflows, therefore you should very
81186 + carefully test your system. Note that once enabled in the kernel
81187 + configuration, this feature cannot be disabled on a per file basis.
81188 +
81189 +config PAX_RANDUSTACK
81190 + bool "Randomize user stack base"
81191 + default y if GRKERNSEC_CONFIG_AUTO
81192 + depends on PAX_ASLR
81193 + help
81194 + By saying Y here the kernel will randomize every task's userland
81195 + stack. The randomization is done in two steps where the second
81196 + one may apply a big amount of shift to the top of the stack and
81197 + cause problems for programs that want to use lots of memory (more
81198 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
81199 + For this reason the second step can be controlled by 'chpax' or
81200 + 'paxctl' on a per file basis.
81201 +
81202 +config PAX_RANDMMAP
81203 + bool "Randomize mmap() base"
81204 + default y if GRKERNSEC_CONFIG_AUTO
81205 + depends on PAX_ASLR
81206 + help
81207 + By saying Y here the kernel will use a randomized base address for
81208 + mmap() requests that do not specify one themselves. As a result
81209 + all dynamically loaded libraries will appear at random addresses
81210 + and therefore be harder to exploit by a technique where an attacker
81211 + attempts to execute library code for his purposes (e.g. spawn a
81212 + shell from an exploited program that is running at an elevated
81213 + privilege level).
81214 +
81215 + Furthermore, if a program is relinked as a dynamic ELF file, its
81216 + base address will be randomized as well, completing the full
81217 + randomization of the address space layout. Attacking such programs
81218 + becomes a guess game. You can find an example of doing this at
81219 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
81220 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
81221 +
81222 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
81223 + feature on a per file basis.
81224 +
81225 +endmenu
81226 +
81227 +menu "Miscellaneous hardening features"
81228 +
81229 +config PAX_MEMORY_SANITIZE
81230 + bool "Sanitize all freed memory"
81231 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
81232 + depends on !HIBERNATION
81233 + help
81234 + By saying Y here the kernel will erase memory pages as soon as they
81235 + are freed. This in turn reduces the lifetime of data stored in the
81236 + pages, making it less likely that sensitive information such as
81237 + passwords, cryptographic secrets, etc stay in memory for too long.
81238 +
81239 + This is especially useful for programs whose runtime is short, long
81240 + lived processes and the kernel itself benefit from this as long as
81241 + they operate on whole memory pages and ensure timely freeing of pages
81242 + that may hold sensitive information.
81243 +
81244 + The tradeoff is performance impact, on a single CPU system kernel
81245 + compilation sees a 3% slowdown, other systems and workloads may vary
81246 + and you are advised to test this feature on your expected workload
81247 + before deploying it.
81248 +
81249 + Note that this feature does not protect data stored in live pages,
81250 + e.g., process memory swapped to disk may stay there for a long time.
81251 +
81252 +config PAX_MEMORY_STACKLEAK
81253 + bool "Sanitize kernel stack"
81254 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
81255 + depends on X86
81256 + help
81257 + By saying Y here the kernel will erase the kernel stack before it
81258 + returns from a system call. This in turn reduces the information
81259 + that a kernel stack leak bug can reveal.
81260 +
81261 + Note that such a bug can still leak information that was put on
81262 + the stack by the current system call (the one eventually triggering
81263 + the bug) but traces of earlier system calls on the kernel stack
81264 + cannot leak anymore.
81265 +
81266 + The tradeoff is performance impact: on a single CPU system kernel
81267 + compilation sees a 1% slowdown, other systems and workloads may vary
81268 + and you are advised to test this feature on your expected workload
81269 + before deploying it.
81270 +
81271 + Note that the full feature requires a gcc with plugin support,
81272 + i.e., gcc 4.5 or newer. You may need to install the supporting
81273 + headers explicitly in addition to the normal gcc package. Using
81274 + older gcc versions means that functions with large enough stack
81275 + frames may leave uninitialized memory behind that may be exposed
81276 + to a later syscall leaking the stack.
81277 +
81278 +config PAX_MEMORY_UDEREF
81279 + bool "Prevent invalid userland pointer dereference"
81280 + default y if GRKERNSEC_CONFIG_AUTO && (X86_32 || (X86_64 && GRKERNSEC_CONFIG_PRIORITY_SECURITY)) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
81281 + depends on X86 && !UML_X86 && !XEN
81282 + select PAX_PER_CPU_PGD if X86_64
81283 + help
81284 + By saying Y here the kernel will be prevented from dereferencing
81285 + userland pointers in contexts where the kernel expects only kernel
81286 + pointers. This is both a useful runtime debugging feature and a
81287 + security measure that prevents exploiting a class of kernel bugs.
81288 +
81289 + The tradeoff is that some virtualization solutions may experience
81290 + a huge slowdown and therefore you should not enable this feature
81291 + for kernels meant to run in such environments. Whether a given VM
81292 + solution is affected or not is best determined by simply trying it
81293 + out, the performance impact will be obvious right on boot as this
81294 + mechanism engages from very early on. A good rule of thumb is that
81295 + VMs running on CPUs without hardware virtualization support (i.e.,
81296 + the majority of IA-32 CPUs) will likely experience the slowdown.
81297 +
81298 +config PAX_REFCOUNT
81299 + bool "Prevent various kernel object reference counter overflows"
81300 + default y if GRKERNSEC_CONFIG_AUTO
81301 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
81302 + help
81303 + By saying Y here the kernel will detect and prevent overflowing
81304 + various (but not all) kinds of object reference counters. Such
81305 + overflows can normally occur due to bugs only and are often, if
81306 + not always, exploitable.
81307 +
81308 + The tradeoff is that data structures protected by an overflowed
81309 + refcount will never be freed and therefore will leak memory. Note
81310 + that this leak also happens even without this protection but in
81311 + that case the overflow can eventually trigger the freeing of the
81312 + data structure while it is still being used elsewhere, resulting
81313 + in the exploitable situation that this feature prevents.
81314 +
81315 + Since this has a negligible performance impact, you should enable
81316 + this feature.
81317 +
81318 +config PAX_USERCOPY
81319 + bool "Harden heap object copies between kernel and userland"
81320 + default y if GRKERNSEC_CONFIG_AUTO
81321 + depends on X86 || PPC || SPARC || ARM
81322 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
81323 + select PAX_USERCOPY_SLABS
81324 + help
81325 + By saying Y here the kernel will enforce the size of heap objects
81326 + when they are copied in either direction between the kernel and
81327 + userland, even if only a part of the heap object is copied.
81328 +
81329 + Specifically, this checking prevents information leaking from the
81330 + kernel heap during kernel to userland copies (if the kernel heap
81331 + object is otherwise fully initialized) and prevents kernel heap
81332 + overflows during userland to kernel copies.
81333 +
81334 + Note that the current implementation provides the strictest bounds
81335 + checks for the SLUB allocator.
81336 +
81337 + Enabling this option also enables per-slab cache protection against
81338 + data in a given cache being copied into/out of via userland
81339 + accessors. Though the whitelist of regions will be reduced over
81340 + time, it notably protects important data structures like task structs.
81341 +
81342 + If frame pointers are enabled on x86, this option will also restrict
81343 + copies into and out of the kernel stack to local variables within a
81344 + single frame.
81345 +
81346 + Since this has a negligible performance impact, you should enable
81347 + this feature.
81348 +
81349 +config PAX_SIZE_OVERFLOW
81350 + bool "Prevent various integer overflows in function size parameters"
81351 + default y if GRKERNSEC_CONFIG_AUTO
81352 + depends on X86
81353 + help
81354 + By saying Y here the kernel recomputes expressions of function
81355 + arguments marked by a size_overflow attribute with double integer
81356 + precision (DImode/TImode for 32/64 bit integer types).
81357 +
81358 + The recomputed argument is checked against TYPE_MAX and an event
81359 + is logged on overflow and the triggering process is killed.
81360 +
81361 + Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
81362 +
81363 + Note that the implementation requires a gcc with plugin support,
81364 + i.e., gcc 4.5 or newer. You may need to install the supporting
81365 + headers explicitly in addition to the normal gcc package.
81366 +
81367 +config PAX_LATENT_ENTROPY
81368 + bool "Generate some entropy during boot"
81369 + default y if GRKERNSEC_CONFIG_AUTO
81370 + help
81371 + By saying Y here the kernel will instrument early boot code to
81372 + extract some entropy from both original and artificially created
81373 + program state. This will help especially embedded systems where
81374 + there is little 'natural' source of entropy normally. The cost
81375 + is some slowdown of the boot process.
81376 +
81377 + Note that the implementation requires a gcc with plugin support,
81378 + i.e., gcc 4.5 or newer. You may need to install the supporting
81379 + headers explicitly in addition to the normal gcc package.
81380 +
81381 + Note that entropy extracted this way is not cryptographically
81382 + secure!
81383 +
81384 +endmenu
81385 +
81386 +endmenu
81387 +
81388 +source grsecurity/Kconfig
81389 +
81390 +endmenu
81391 +
81392 +endmenu
81393 +
81394 source security/keys/Kconfig
81395
81396 config SECURITY_DMESG_RESTRICT
81397 @@ -103,7 +982,7 @@ config INTEL_TXT
81398 config LSM_MMAP_MIN_ADDR
81399 int "Low address space for LSM to protect from user allocation"
81400 depends on SECURITY && SECURITY_SELINUX
81401 - default 32768 if ARM
81402 + default 32768 if ALPHA || ARM || PARISC || SPARC32
81403 default 65536
81404 help
81405 This is the portion of low virtual memory which should be protected
81406 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
81407 index 8ea39aa..8569ac5 100644
81408 --- a/security/apparmor/lsm.c
81409 +++ b/security/apparmor/lsm.c
81410 @@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
81411 return error;
81412 }
81413
81414 -static struct security_operations apparmor_ops = {
81415 +static struct security_operations apparmor_ops __read_only = {
81416 .name = "apparmor",
81417
81418 .ptrace_access_check = apparmor_ptrace_access_check,
81419 diff --git a/security/commoncap.c b/security/commoncap.c
81420 index 6dbae46..d5611fd 100644
81421 --- a/security/commoncap.c
81422 +++ b/security/commoncap.c
81423 @@ -415,6 +415,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
81424 return 0;
81425 }
81426
81427 +/* returns:
81428 + 1 for suid privilege
81429 + 2 for sgid privilege
81430 + 3 for fscap privilege
81431 +*/
81432 +int is_privileged_binary(const struct dentry *dentry)
81433 +{
81434 + struct cpu_vfs_cap_data capdata;
81435 + struct inode *inode = dentry->d_inode;
81436 +
81437 + if (!inode || S_ISDIR(inode->i_mode))
81438 + return 0;
81439 +
81440 + if (inode->i_mode & S_ISUID)
81441 + return 1;
81442 + if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
81443 + return 2;
81444 +
81445 + if (!get_vfs_caps_from_disk(dentry, &capdata)) {
81446 + if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
81447 + return 3;
81448 + }
81449 +
81450 + return 0;
81451 +}
81452 +
81453 /*
81454 * Attempt to get the on-exec apply capability sets for an executable file from
81455 * its xattrs and, if present, apply them to the proposed credentials being
81456 @@ -583,6 +609,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
81457 const struct cred *cred = current_cred();
81458 kuid_t root_uid = make_kuid(cred->user_ns, 0);
81459
81460 + if (gr_acl_enable_at_secure())
81461 + return 1;
81462 +
81463 if (!uid_eq(cred->uid, root_uid)) {
81464 if (bprm->cap_effective)
81465 return 1;
81466 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
81467 index 3ccf7ac..d73ad64 100644
81468 --- a/security/integrity/ima/ima.h
81469 +++ b/security/integrity/ima/ima.h
81470 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
81471 extern spinlock_t ima_queue_lock;
81472
81473 struct ima_h_table {
81474 - atomic_long_t len; /* number of stored measurements in the list */
81475 - atomic_long_t violations;
81476 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
81477 + atomic_long_unchecked_t violations;
81478 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
81479 };
81480 extern struct ima_h_table ima_htable;
81481 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
81482 index 88a2788..581ab92 100644
81483 --- a/security/integrity/ima/ima_api.c
81484 +++ b/security/integrity/ima/ima_api.c
81485 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
81486 int result;
81487
81488 /* can overflow, only indicator */
81489 - atomic_long_inc(&ima_htable.violations);
81490 + atomic_long_inc_unchecked(&ima_htable.violations);
81491
81492 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
81493 if (!entry) {
81494 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
81495 index e1aa2b4..52027bf 100644
81496 --- a/security/integrity/ima/ima_fs.c
81497 +++ b/security/integrity/ima/ima_fs.c
81498 @@ -28,12 +28,12 @@
81499 static int valid_policy = 1;
81500 #define TMPBUFLEN 12
81501 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
81502 - loff_t *ppos, atomic_long_t *val)
81503 + loff_t *ppos, atomic_long_unchecked_t *val)
81504 {
81505 char tmpbuf[TMPBUFLEN];
81506 ssize_t len;
81507
81508 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
81509 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
81510 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
81511 }
81512
81513 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
81514 index 55a6271..ad829c3 100644
81515 --- a/security/integrity/ima/ima_queue.c
81516 +++ b/security/integrity/ima/ima_queue.c
81517 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
81518 INIT_LIST_HEAD(&qe->later);
81519 list_add_tail_rcu(&qe->later, &ima_measurements);
81520
81521 - atomic_long_inc(&ima_htable.len);
81522 + atomic_long_inc_unchecked(&ima_htable.len);
81523 key = ima_hash_key(entry->digest);
81524 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
81525 return 0;
81526 diff --git a/security/keys/compat.c b/security/keys/compat.c
81527 index c92d42b..341e7ea 100644
81528 --- a/security/keys/compat.c
81529 +++ b/security/keys/compat.c
81530 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
81531 if (ret == 0)
81532 goto no_payload_free;
81533
81534 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
81535 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
81536
81537 if (iov != iovstack)
81538 kfree(iov);
81539 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
81540 index 0f5b3f0..b8d47c1 100644
81541 --- a/security/keys/keyctl.c
81542 +++ b/security/keys/keyctl.c
81543 @@ -966,7 +966,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
81544 /*
81545 * Copy the iovec data from userspace
81546 */
81547 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
81548 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
81549 unsigned ioc)
81550 {
81551 for (; ioc > 0; ioc--) {
81552 @@ -988,7 +988,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
81553 * If successful, 0 will be returned.
81554 */
81555 long keyctl_instantiate_key_common(key_serial_t id,
81556 - const struct iovec *payload_iov,
81557 + const struct iovec __user *payload_iov,
81558 unsigned ioc,
81559 size_t plen,
81560 key_serial_t ringid)
81561 @@ -1083,7 +1083,7 @@ long keyctl_instantiate_key(key_serial_t id,
81562 [0].iov_len = plen
81563 };
81564
81565 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
81566 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
81567 }
81568
81569 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
81570 @@ -1116,7 +1116,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
81571 if (ret == 0)
81572 goto no_payload_free;
81573
81574 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
81575 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
81576
81577 if (iov != iovstack)
81578 kfree(iov);
81579 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
81580 index 7445875..262834f 100644
81581 --- a/security/keys/keyring.c
81582 +++ b/security/keys/keyring.c
81583 @@ -227,16 +227,16 @@ static long keyring_read(const struct key *keyring,
81584 ret = -EFAULT;
81585
81586 for (loop = 0; loop < klist->nkeys; loop++) {
81587 + key_serial_t serial;
81588 key = rcu_deref_link_locked(klist, loop,
81589 keyring);
81590 + serial = key->serial;
81591
81592 tmp = sizeof(key_serial_t);
81593 if (tmp > buflen)
81594 tmp = buflen;
81595
81596 - if (copy_to_user(buffer,
81597 - &key->serial,
81598 - tmp) != 0)
81599 + if (copy_to_user(buffer, &serial, tmp))
81600 goto error;
81601
81602 buflen -= tmp;
81603 diff --git a/security/min_addr.c b/security/min_addr.c
81604 index f728728..6457a0c 100644
81605 --- a/security/min_addr.c
81606 +++ b/security/min_addr.c
81607 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
81608 */
81609 static void update_mmap_min_addr(void)
81610 {
81611 +#ifndef SPARC
81612 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
81613 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
81614 mmap_min_addr = dac_mmap_min_addr;
81615 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
81616 #else
81617 mmap_min_addr = dac_mmap_min_addr;
81618 #endif
81619 +#endif
81620 }
81621
81622 /*
81623 diff --git a/security/security.c b/security/security.c
81624 index 860aeb3..45765c0 100644
81625 --- a/security/security.c
81626 +++ b/security/security.c
81627 @@ -20,6 +20,7 @@
81628 #include <linux/ima.h>
81629 #include <linux/evm.h>
81630 #include <linux/fsnotify.h>
81631 +#include <linux/mm.h>
81632 #include <linux/mman.h>
81633 #include <linux/mount.h>
81634 #include <linux/personality.h>
81635 @@ -32,8 +33,8 @@
81636 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
81637 CONFIG_DEFAULT_SECURITY;
81638
81639 -static struct security_operations *security_ops;
81640 -static struct security_operations default_security_ops = {
81641 +static struct security_operations *security_ops __read_only;
81642 +static struct security_operations default_security_ops __read_only = {
81643 .name = "default",
81644 };
81645
81646 @@ -74,7 +75,9 @@ int __init security_init(void)
81647
81648 void reset_security_ops(void)
81649 {
81650 + pax_open_kernel();
81651 security_ops = &default_security_ops;
81652 + pax_close_kernel();
81653 }
81654
81655 /* Save user chosen LSM */
81656 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
81657 index db10db2..99a640f 100644
81658 --- a/security/selinux/hooks.c
81659 +++ b/security/selinux/hooks.c
81660 @@ -95,8 +95,6 @@
81661
81662 #define NUM_SEL_MNT_OPTS 5
81663
81664 -extern struct security_operations *security_ops;
81665 -
81666 /* SECMARK reference count */
81667 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
81668
81669 @@ -5511,7 +5509,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
81670
81671 #endif
81672
81673 -static struct security_operations selinux_ops = {
81674 +static struct security_operations selinux_ops __read_only = {
81675 .name = "selinux",
81676
81677 .ptrace_access_check = selinux_ptrace_access_check,
81678 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
81679 index c220f31..89fab3f 100644
81680 --- a/security/selinux/include/xfrm.h
81681 +++ b/security/selinux/include/xfrm.h
81682 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
81683
81684 static inline void selinux_xfrm_notify_policyload(void)
81685 {
81686 - atomic_inc(&flow_cache_genid);
81687 + atomic_inc_unchecked(&flow_cache_genid);
81688 }
81689 #else
81690 static inline int selinux_xfrm_enabled(void)
81691 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
81692 index ee0bb57..57fcd43 100644
81693 --- a/security/smack/smack_lsm.c
81694 +++ b/security/smack/smack_lsm.c
81695 @@ -3432,7 +3432,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
81696 return 0;
81697 }
81698
81699 -struct security_operations smack_ops = {
81700 +struct security_operations smack_ops __read_only = {
81701 .name = "smack",
81702
81703 .ptrace_access_check = smack_ptrace_access_check,
81704 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
81705 index c2d04a5..e6a1aeb 100644
81706 --- a/security/tomoyo/tomoyo.c
81707 +++ b/security/tomoyo/tomoyo.c
81708 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
81709 * tomoyo_security_ops is a "struct security_operations" which is used for
81710 * registering TOMOYO.
81711 */
81712 -static struct security_operations tomoyo_security_ops = {
81713 +static struct security_operations tomoyo_security_ops __read_only = {
81714 .name = "tomoyo",
81715 .cred_alloc_blank = tomoyo_cred_alloc_blank,
81716 .cred_prepare = tomoyo_cred_prepare,
81717 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
81718 index 51d6709..1f3dbe2 100644
81719 --- a/security/yama/Kconfig
81720 +++ b/security/yama/Kconfig
81721 @@ -1,6 +1,6 @@
81722 config SECURITY_YAMA
81723 bool "Yama support"
81724 - depends on SECURITY
81725 + depends on SECURITY && !GRKERNSEC
81726 select SECURITYFS
81727 select SECURITY_PATH
81728 default n
81729 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
81730 index 4cedc69..e59d8a3 100644
81731 --- a/sound/aoa/codecs/onyx.c
81732 +++ b/sound/aoa/codecs/onyx.c
81733 @@ -54,7 +54,7 @@ struct onyx {
81734 spdif_locked:1,
81735 analog_locked:1,
81736 original_mute:2;
81737 - int open_count;
81738 + local_t open_count;
81739 struct codec_info *codec_info;
81740
81741 /* mutex serializes concurrent access to the device
81742 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
81743 struct onyx *onyx = cii->codec_data;
81744
81745 mutex_lock(&onyx->mutex);
81746 - onyx->open_count++;
81747 + local_inc(&onyx->open_count);
81748 mutex_unlock(&onyx->mutex);
81749
81750 return 0;
81751 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
81752 struct onyx *onyx = cii->codec_data;
81753
81754 mutex_lock(&onyx->mutex);
81755 - onyx->open_count--;
81756 - if (!onyx->open_count)
81757 + if (local_dec_and_test(&onyx->open_count))
81758 onyx->spdif_locked = onyx->analog_locked = 0;
81759 mutex_unlock(&onyx->mutex);
81760
81761 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
81762 index ffd2025..df062c9 100644
81763 --- a/sound/aoa/codecs/onyx.h
81764 +++ b/sound/aoa/codecs/onyx.h
81765 @@ -11,6 +11,7 @@
81766 #include <linux/i2c.h>
81767 #include <asm/pmac_low_i2c.h>
81768 #include <asm/prom.h>
81769 +#include <asm/local.h>
81770
81771 /* PCM3052 register definitions */
81772
81773 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
81774 index 08fde00..0bf641a 100644
81775 --- a/sound/core/oss/pcm_oss.c
81776 +++ b/sound/core/oss/pcm_oss.c
81777 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
81778 if (in_kernel) {
81779 mm_segment_t fs;
81780 fs = snd_enter_user();
81781 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
81782 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
81783 snd_leave_user(fs);
81784 } else {
81785 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
81786 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
81787 }
81788 if (ret != -EPIPE && ret != -ESTRPIPE)
81789 break;
81790 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
81791 if (in_kernel) {
81792 mm_segment_t fs;
81793 fs = snd_enter_user();
81794 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
81795 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
81796 snd_leave_user(fs);
81797 } else {
81798 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
81799 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
81800 }
81801 if (ret == -EPIPE) {
81802 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
81803 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
81804 struct snd_pcm_plugin_channel *channels;
81805 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
81806 if (!in_kernel) {
81807 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
81808 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
81809 return -EFAULT;
81810 buf = runtime->oss.buffer;
81811 }
81812 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
81813 }
81814 } else {
81815 tmp = snd_pcm_oss_write2(substream,
81816 - (const char __force *)buf,
81817 + (const char __force_kernel *)buf,
81818 runtime->oss.period_bytes, 0);
81819 if (tmp <= 0)
81820 goto err;
81821 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
81822 struct snd_pcm_runtime *runtime = substream->runtime;
81823 snd_pcm_sframes_t frames, frames1;
81824 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
81825 - char __user *final_dst = (char __force __user *)buf;
81826 + char __user *final_dst = (char __force_user *)buf;
81827 if (runtime->oss.plugin_first) {
81828 struct snd_pcm_plugin_channel *channels;
81829 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
81830 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
81831 xfer += tmp;
81832 runtime->oss.buffer_used -= tmp;
81833 } else {
81834 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
81835 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
81836 runtime->oss.period_bytes, 0);
81837 if (tmp <= 0)
81838 goto err;
81839 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
81840 size1);
81841 size1 /= runtime->channels; /* frames */
81842 fs = snd_enter_user();
81843 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
81844 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
81845 snd_leave_user(fs);
81846 }
81847 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
81848 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
81849 index 91cdf94..4085161 100644
81850 --- a/sound/core/pcm_compat.c
81851 +++ b/sound/core/pcm_compat.c
81852 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
81853 int err;
81854
81855 fs = snd_enter_user();
81856 - err = snd_pcm_delay(substream, &delay);
81857 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
81858 snd_leave_user(fs);
81859 if (err < 0)
81860 return err;
81861 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
81862 index 53b5ada..2db94c8 100644
81863 --- a/sound/core/pcm_native.c
81864 +++ b/sound/core/pcm_native.c
81865 @@ -2780,11 +2780,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
81866 switch (substream->stream) {
81867 case SNDRV_PCM_STREAM_PLAYBACK:
81868 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
81869 - (void __user *)arg);
81870 + (void __force_user *)arg);
81871 break;
81872 case SNDRV_PCM_STREAM_CAPTURE:
81873 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
81874 - (void __user *)arg);
81875 + (void __force_user *)arg);
81876 break;
81877 default:
81878 result = -EINVAL;
81879 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
81880 index 5cf8d65..912a79c 100644
81881 --- a/sound/core/seq/seq_device.c
81882 +++ b/sound/core/seq/seq_device.c
81883 @@ -64,7 +64,7 @@ struct ops_list {
81884 int argsize; /* argument size */
81885
81886 /* operators */
81887 - struct snd_seq_dev_ops ops;
81888 + struct snd_seq_dev_ops *ops;
81889
81890 /* registred devices */
81891 struct list_head dev_list; /* list of devices */
81892 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
81893
81894 mutex_lock(&ops->reg_mutex);
81895 /* copy driver operators */
81896 - ops->ops = *entry;
81897 + ops->ops = entry;
81898 ops->driver |= DRIVER_LOADED;
81899 ops->argsize = argsize;
81900
81901 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
81902 dev->name, ops->id, ops->argsize, dev->argsize);
81903 return -EINVAL;
81904 }
81905 - if (ops->ops.init_device(dev) >= 0) {
81906 + if (ops->ops->init_device(dev) >= 0) {
81907 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
81908 ops->num_init_devices++;
81909 } else {
81910 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
81911 dev->name, ops->id, ops->argsize, dev->argsize);
81912 return -EINVAL;
81913 }
81914 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
81915 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
81916 dev->status = SNDRV_SEQ_DEVICE_FREE;
81917 dev->driver_data = NULL;
81918 ops->num_init_devices--;
81919 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
81920 index 621e60e..f4543f5 100644
81921 --- a/sound/drivers/mts64.c
81922 +++ b/sound/drivers/mts64.c
81923 @@ -29,6 +29,7 @@
81924 #include <sound/initval.h>
81925 #include <sound/rawmidi.h>
81926 #include <sound/control.h>
81927 +#include <asm/local.h>
81928
81929 #define CARD_NAME "Miditerminal 4140"
81930 #define DRIVER_NAME "MTS64"
81931 @@ -67,7 +68,7 @@ struct mts64 {
81932 struct pardevice *pardev;
81933 int pardev_claimed;
81934
81935 - int open_count;
81936 + local_t open_count;
81937 int current_midi_output_port;
81938 int current_midi_input_port;
81939 u8 mode[MTS64_NUM_INPUT_PORTS];
81940 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
81941 {
81942 struct mts64 *mts = substream->rmidi->private_data;
81943
81944 - if (mts->open_count == 0) {
81945 + if (local_read(&mts->open_count) == 0) {
81946 /* We don't need a spinlock here, because this is just called
81947 if the device has not been opened before.
81948 So there aren't any IRQs from the device */
81949 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
81950
81951 msleep(50);
81952 }
81953 - ++(mts->open_count);
81954 + local_inc(&mts->open_count);
81955
81956 return 0;
81957 }
81958 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
81959 struct mts64 *mts = substream->rmidi->private_data;
81960 unsigned long flags;
81961
81962 - --(mts->open_count);
81963 - if (mts->open_count == 0) {
81964 + if (local_dec_return(&mts->open_count) == 0) {
81965 /* We need the spinlock_irqsave here because we can still
81966 have IRQs at this point */
81967 spin_lock_irqsave(&mts->lock, flags);
81968 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
81969
81970 msleep(500);
81971
81972 - } else if (mts->open_count < 0)
81973 - mts->open_count = 0;
81974 + } else if (local_read(&mts->open_count) < 0)
81975 + local_set(&mts->open_count, 0);
81976
81977 return 0;
81978 }
81979 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
81980 index b953fb4..1999c01 100644
81981 --- a/sound/drivers/opl4/opl4_lib.c
81982 +++ b/sound/drivers/opl4/opl4_lib.c
81983 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
81984 MODULE_DESCRIPTION("OPL4 driver");
81985 MODULE_LICENSE("GPL");
81986
81987 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
81988 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
81989 {
81990 int timeout = 10;
81991 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
81992 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
81993 index 3e32bd3..46fc152 100644
81994 --- a/sound/drivers/portman2x4.c
81995 +++ b/sound/drivers/portman2x4.c
81996 @@ -48,6 +48,7 @@
81997 #include <sound/initval.h>
81998 #include <sound/rawmidi.h>
81999 #include <sound/control.h>
82000 +#include <asm/local.h>
82001
82002 #define CARD_NAME "Portman 2x4"
82003 #define DRIVER_NAME "portman"
82004 @@ -85,7 +86,7 @@ struct portman {
82005 struct pardevice *pardev;
82006 int pardev_claimed;
82007
82008 - int open_count;
82009 + local_t open_count;
82010 int mode[PORTMAN_NUM_INPUT_PORTS];
82011 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
82012 };
82013 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
82014 index ea995af..f1bfa37 100644
82015 --- a/sound/firewire/amdtp.c
82016 +++ b/sound/firewire/amdtp.c
82017 @@ -389,7 +389,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
82018 ptr = s->pcm_buffer_pointer + data_blocks;
82019 if (ptr >= pcm->runtime->buffer_size)
82020 ptr -= pcm->runtime->buffer_size;
82021 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
82022 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
82023
82024 s->pcm_period_pointer += data_blocks;
82025 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
82026 @@ -557,7 +557,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer);
82027 */
82028 void amdtp_out_stream_update(struct amdtp_out_stream *s)
82029 {
82030 - ACCESS_ONCE(s->source_node_id_field) =
82031 + ACCESS_ONCE_RW(s->source_node_id_field) =
82032 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
82033 }
82034 EXPORT_SYMBOL(amdtp_out_stream_update);
82035 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
82036 index b680c5e..061b7a0 100644
82037 --- a/sound/firewire/amdtp.h
82038 +++ b/sound/firewire/amdtp.h
82039 @@ -139,7 +139,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s)
82040 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
82041 struct snd_pcm_substream *pcm)
82042 {
82043 - ACCESS_ONCE(s->pcm) = pcm;
82044 + ACCESS_ONCE_RW(s->pcm) = pcm;
82045 }
82046
82047 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
82048 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
82049 index d428ffe..751ef78 100644
82050 --- a/sound/firewire/isight.c
82051 +++ b/sound/firewire/isight.c
82052 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
82053 ptr += count;
82054 if (ptr >= runtime->buffer_size)
82055 ptr -= runtime->buffer_size;
82056 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
82057 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
82058
82059 isight->period_counter += count;
82060 if (isight->period_counter >= runtime->period_size) {
82061 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
82062 if (err < 0)
82063 return err;
82064
82065 - ACCESS_ONCE(isight->pcm_active) = true;
82066 + ACCESS_ONCE_RW(isight->pcm_active) = true;
82067
82068 return 0;
82069 }
82070 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
82071 {
82072 struct isight *isight = substream->private_data;
82073
82074 - ACCESS_ONCE(isight->pcm_active) = false;
82075 + ACCESS_ONCE_RW(isight->pcm_active) = false;
82076
82077 mutex_lock(&isight->mutex);
82078 isight_stop_streaming(isight);
82079 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
82080
82081 switch (cmd) {
82082 case SNDRV_PCM_TRIGGER_START:
82083 - ACCESS_ONCE(isight->pcm_running) = true;
82084 + ACCESS_ONCE_RW(isight->pcm_running) = true;
82085 break;
82086 case SNDRV_PCM_TRIGGER_STOP:
82087 - ACCESS_ONCE(isight->pcm_running) = false;
82088 + ACCESS_ONCE_RW(isight->pcm_running) = false;
82089 break;
82090 default:
82091 return -EINVAL;
82092 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
82093 index 7bd5e33..1fcab12 100644
82094 --- a/sound/isa/cmi8330.c
82095 +++ b/sound/isa/cmi8330.c
82096 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
82097
82098 struct snd_pcm *pcm;
82099 struct snd_cmi8330_stream {
82100 - struct snd_pcm_ops ops;
82101 + snd_pcm_ops_no_const ops;
82102 snd_pcm_open_callback_t open;
82103 void *private_data; /* sb or wss */
82104 } streams[2];
82105 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
82106 index 733b014..56ce96f 100644
82107 --- a/sound/oss/sb_audio.c
82108 +++ b/sound/oss/sb_audio.c
82109 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
82110 buf16 = (signed short *)(localbuf + localoffs);
82111 while (c)
82112 {
82113 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
82114 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
82115 if (copy_from_user(lbuf8,
82116 userbuf+useroffs + p,
82117 locallen))
82118 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
82119 index 09d4648..cf234c7 100644
82120 --- a/sound/oss/swarm_cs4297a.c
82121 +++ b/sound/oss/swarm_cs4297a.c
82122 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
82123 {
82124 struct cs4297a_state *s;
82125 u32 pwr, id;
82126 - mm_segment_t fs;
82127 int rval;
82128 #ifndef CONFIG_BCM_CS4297A_CSWARM
82129 u64 cfg;
82130 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
82131 if (!rval) {
82132 char *sb1250_duart_present;
82133
82134 +#if 0
82135 + mm_segment_t fs;
82136 fs = get_fs();
82137 set_fs(KERNEL_DS);
82138 -#if 0
82139 val = SOUND_MASK_LINE;
82140 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
82141 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
82142 val = initvol[i].vol;
82143 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
82144 }
82145 + set_fs(fs);
82146 // cs4297a_write_ac97(s, 0x18, 0x0808);
82147 #else
82148 // cs4297a_write_ac97(s, 0x5e, 0x180);
82149 cs4297a_write_ac97(s, 0x02, 0x0808);
82150 cs4297a_write_ac97(s, 0x18, 0x0808);
82151 #endif
82152 - set_fs(fs);
82153
82154 list_add(&s->list, &cs4297a_devs);
82155
82156 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
82157 index 2fdaadb..7df8fc6 100644
82158 --- a/sound/pci/hda/hda_codec.h
82159 +++ b/sound/pci/hda/hda_codec.h
82160 @@ -611,7 +611,7 @@ struct hda_bus_ops {
82161 /* notify power-up/down from codec to controller */
82162 void (*pm_notify)(struct hda_bus *bus);
82163 #endif
82164 -};
82165 +} __no_const;
82166
82167 /* template to pass to the bus constructor */
82168 struct hda_bus_template {
82169 @@ -711,6 +711,7 @@ struct hda_codec_ops {
82170 #endif
82171 void (*reboot_notify)(struct hda_codec *codec);
82172 };
82173 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
82174
82175 /* record for amp information cache */
82176 struct hda_cache_head {
82177 @@ -741,7 +742,7 @@ struct hda_pcm_ops {
82178 struct snd_pcm_substream *substream);
82179 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
82180 struct snd_pcm_substream *substream);
82181 -};
82182 +} __no_const;
82183
82184 /* PCM information for each substream */
82185 struct hda_pcm_stream {
82186 @@ -799,7 +800,7 @@ struct hda_codec {
82187 const char *modelname; /* model name for preset */
82188
82189 /* set by patch */
82190 - struct hda_codec_ops patch_ops;
82191 + hda_codec_ops_no_const patch_ops;
82192
82193 /* PCM to create, set by patch_ops.build_pcms callback */
82194 unsigned int num_pcms;
82195 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
82196 index 0da778a..bc38b84 100644
82197 --- a/sound/pci/ice1712/ice1712.h
82198 +++ b/sound/pci/ice1712/ice1712.h
82199 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
82200 unsigned int mask_flags; /* total mask bits */
82201 struct snd_akm4xxx_ops {
82202 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
82203 - } ops;
82204 + } __no_const ops;
82205 };
82206
82207 struct snd_ice1712_spdif {
82208 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
82209 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
82210 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
82211 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
82212 - } ops;
82213 + } __no_const ops;
82214 };
82215
82216
82217 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
82218 index a8159b81..5f006a5 100644
82219 --- a/sound/pci/ymfpci/ymfpci_main.c
82220 +++ b/sound/pci/ymfpci/ymfpci_main.c
82221 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
82222 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
82223 break;
82224 }
82225 - if (atomic_read(&chip->interrupt_sleep_count)) {
82226 - atomic_set(&chip->interrupt_sleep_count, 0);
82227 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
82228 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
82229 wake_up(&chip->interrupt_sleep);
82230 }
82231 __end:
82232 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
82233 continue;
82234 init_waitqueue_entry(&wait, current);
82235 add_wait_queue(&chip->interrupt_sleep, &wait);
82236 - atomic_inc(&chip->interrupt_sleep_count);
82237 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
82238 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
82239 remove_wait_queue(&chip->interrupt_sleep, &wait);
82240 }
82241 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
82242 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
82243 spin_unlock(&chip->reg_lock);
82244
82245 - if (atomic_read(&chip->interrupt_sleep_count)) {
82246 - atomic_set(&chip->interrupt_sleep_count, 0);
82247 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
82248 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
82249 wake_up(&chip->interrupt_sleep);
82250 }
82251 }
82252 @@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
82253 spin_lock_init(&chip->reg_lock);
82254 spin_lock_init(&chip->voice_lock);
82255 init_waitqueue_head(&chip->interrupt_sleep);
82256 - atomic_set(&chip->interrupt_sleep_count, 0);
82257 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
82258 chip->card = card;
82259 chip->pci = pci;
82260 chip->irq = -1;
82261 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
82262 new file mode 100644
82263 index 0000000..50f2f2f
82264 --- /dev/null
82265 +++ b/tools/gcc/.gitignore
82266 @@ -0,0 +1 @@
82267 +size_overflow_hash.h
82268 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
82269 new file mode 100644
82270 index 0000000..1d09b7e
82271 --- /dev/null
82272 +++ b/tools/gcc/Makefile
82273 @@ -0,0 +1,43 @@
82274 +#CC := gcc
82275 +#PLUGIN_SOURCE_FILES := pax_plugin.c
82276 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
82277 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
82278 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
82279 +
82280 +ifeq ($(PLUGINCC),$(HOSTCC))
82281 +HOSTLIBS := hostlibs
82282 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
82283 +else
82284 +HOSTLIBS := hostcxxlibs
82285 +HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
82286 +endif
82287 +
82288 +$(HOSTLIBS)-y := constify_plugin.so
82289 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
82290 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
82291 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
82292 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
82293 +$(HOSTLIBS)-y += colorize_plugin.so
82294 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
82295 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
82296 +
82297 +always := $($(HOSTLIBS)-y)
82298 +
82299 +constify_plugin-objs := constify_plugin.o
82300 +stackleak_plugin-objs := stackleak_plugin.o
82301 +kallocstat_plugin-objs := kallocstat_plugin.o
82302 +kernexec_plugin-objs := kernexec_plugin.o
82303 +checker_plugin-objs := checker_plugin.o
82304 +colorize_plugin-objs := colorize_plugin.o
82305 +size_overflow_plugin-objs := size_overflow_plugin.o
82306 +latent_entropy_plugin-objs := latent_entropy_plugin.o
82307 +
82308 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
82309 +
82310 +quiet_cmd_build_size_overflow_hash = GENHASH $@
82311 + cmd_build_size_overflow_hash = \
82312 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
82313 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
82314 + $(call if_changed,build_size_overflow_hash)
82315 +
82316 +targets += size_overflow_hash.h
82317 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
82318 new file mode 100644
82319 index 0000000..d41b5af
82320 --- /dev/null
82321 +++ b/tools/gcc/checker_plugin.c
82322 @@ -0,0 +1,171 @@
82323 +/*
82324 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
82325 + * Licensed under the GPL v2
82326 + *
82327 + * Note: the choice of the license means that the compilation process is
82328 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
82329 + * but for the kernel it doesn't matter since it doesn't link against
82330 + * any of the gcc libraries
82331 + *
82332 + * gcc plugin to implement various sparse (source code checker) features
82333 + *
82334 + * TODO:
82335 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
82336 + *
82337 + * BUGS:
82338 + * - none known
82339 + */
82340 +#include "gcc-plugin.h"
82341 +#include "config.h"
82342 +#include "system.h"
82343 +#include "coretypes.h"
82344 +#include "tree.h"
82345 +#include "tree-pass.h"
82346 +#include "flags.h"
82347 +#include "intl.h"
82348 +#include "toplev.h"
82349 +#include "plugin.h"
82350 +//#include "expr.h" where are you...
82351 +#include "diagnostic.h"
82352 +#include "plugin-version.h"
82353 +#include "tm.h"
82354 +#include "function.h"
82355 +#include "basic-block.h"
82356 +#include "gimple.h"
82357 +#include "rtl.h"
82358 +#include "emit-rtl.h"
82359 +#include "tree-flow.h"
82360 +#include "target.h"
82361 +
82362 +extern void c_register_addr_space (const char *str, addr_space_t as);
82363 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
82364 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
82365 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
82366 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
82367 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
82368 +
82369 +extern void print_gimple_stmt(FILE *, gimple, int, int);
82370 +extern rtx emit_move_insn(rtx x, rtx y);
82371 +
82372 +int plugin_is_GPL_compatible;
82373 +
82374 +static struct plugin_info checker_plugin_info = {
82375 + .version = "201111150100",
82376 +};
82377 +
82378 +#define ADDR_SPACE_KERNEL 0
82379 +#define ADDR_SPACE_FORCE_KERNEL 1
82380 +#define ADDR_SPACE_USER 2
82381 +#define ADDR_SPACE_FORCE_USER 3
82382 +#define ADDR_SPACE_IOMEM 0
82383 +#define ADDR_SPACE_FORCE_IOMEM 0
82384 +#define ADDR_SPACE_PERCPU 0
82385 +#define ADDR_SPACE_FORCE_PERCPU 0
82386 +#define ADDR_SPACE_RCU 0
82387 +#define ADDR_SPACE_FORCE_RCU 0
82388 +
82389 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
82390 +{
82391 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
82392 +}
82393 +
82394 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
82395 +{
82396 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
82397 +}
82398 +
82399 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
82400 +{
82401 + return default_addr_space_valid_pointer_mode(mode, as);
82402 +}
82403 +
82404 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
82405 +{
82406 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
82407 +}
82408 +
82409 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
82410 +{
82411 + return default_addr_space_legitimize_address(x, oldx, mode, as);
82412 +}
82413 +
82414 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
82415 +{
82416 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
82417 + return true;
82418 +
82419 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
82420 + return true;
82421 +
82422 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
82423 + return true;
82424 +
82425 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
82426 + return true;
82427 +
82428 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
82429 + return true;
82430 +
82431 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
82432 + return true;
82433 +
82434 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
82435 + return true;
82436 +
82437 + return subset == superset;
82438 +}
82439 +
82440 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
82441 +{
82442 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
82443 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
82444 +
82445 + return op;
82446 +}
82447 +
82448 +static void register_checker_address_spaces(void *event_data, void *data)
82449 +{
82450 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
82451 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
82452 + c_register_addr_space("__user", ADDR_SPACE_USER);
82453 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
82454 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
82455 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
82456 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
82457 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
82458 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
82459 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
82460 +
82461 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
82462 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
82463 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
82464 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
82465 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
82466 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
82467 + targetm.addr_space.convert = checker_addr_space_convert;
82468 +}
82469 +
82470 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
82471 +{
82472 + const char * const plugin_name = plugin_info->base_name;
82473 + const int argc = plugin_info->argc;
82474 + const struct plugin_argument * const argv = plugin_info->argv;
82475 + int i;
82476 +
82477 + if (!plugin_default_version_check(version, &gcc_version)) {
82478 + error(G_("incompatible gcc/plugin versions"));
82479 + return 1;
82480 + }
82481 +
82482 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
82483 +
82484 + for (i = 0; i < argc; ++i)
82485 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
82486 +
82487 + if (TARGET_64BIT == 0)
82488 + return 0;
82489 +
82490 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
82491 +
82492 + return 0;
82493 +}
82494 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
82495 new file mode 100644
82496 index 0000000..846aeb0
82497 --- /dev/null
82498 +++ b/tools/gcc/colorize_plugin.c
82499 @@ -0,0 +1,148 @@
82500 +/*
82501 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
82502 + * Licensed under the GPL v2
82503 + *
82504 + * Note: the choice of the license means that the compilation process is
82505 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
82506 + * but for the kernel it doesn't matter since it doesn't link against
82507 + * any of the gcc libraries
82508 + *
82509 + * gcc plugin to colorize diagnostic output
82510 + *
82511 + */
82512 +
82513 +#include "gcc-plugin.h"
82514 +#include "config.h"
82515 +#include "system.h"
82516 +#include "coretypes.h"
82517 +#include "tree.h"
82518 +#include "tree-pass.h"
82519 +#include "flags.h"
82520 +#include "intl.h"
82521 +#include "toplev.h"
82522 +#include "plugin.h"
82523 +#include "diagnostic.h"
82524 +#include "plugin-version.h"
82525 +#include "tm.h"
82526 +
82527 +int plugin_is_GPL_compatible;
82528 +
82529 +static struct plugin_info colorize_plugin_info = {
82530 + .version = "201203092200",
82531 + .help = NULL,
82532 +};
82533 +
82534 +#define GREEN "\033[32m\033[2m"
82535 +#define LIGHTGREEN "\033[32m\033[1m"
82536 +#define YELLOW "\033[33m\033[2m"
82537 +#define LIGHTYELLOW "\033[33m\033[1m"
82538 +#define RED "\033[31m\033[2m"
82539 +#define LIGHTRED "\033[31m\033[1m"
82540 +#define BLUE "\033[34m\033[2m"
82541 +#define LIGHTBLUE "\033[34m\033[1m"
82542 +#define BRIGHT "\033[m\033[1m"
82543 +#define NORMAL "\033[m"
82544 +
82545 +static diagnostic_starter_fn old_starter;
82546 +static diagnostic_finalizer_fn old_finalizer;
82547 +
82548 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
82549 +{
82550 + const char *color;
82551 + char *newprefix;
82552 +
82553 + switch (diagnostic->kind) {
82554 + case DK_NOTE:
82555 + color = LIGHTBLUE;
82556 + break;
82557 +
82558 + case DK_PEDWARN:
82559 + case DK_WARNING:
82560 + color = LIGHTYELLOW;
82561 + break;
82562 +
82563 + case DK_ERROR:
82564 + case DK_FATAL:
82565 + case DK_ICE:
82566 + case DK_PERMERROR:
82567 + case DK_SORRY:
82568 + color = LIGHTRED;
82569 + break;
82570 +
82571 + default:
82572 + color = NORMAL;
82573 + }
82574 +
82575 + old_starter(context, diagnostic);
82576 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
82577 + return;
82578 + pp_destroy_prefix(context->printer);
82579 + pp_set_prefix(context->printer, newprefix);
82580 +}
82581 +
82582 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
82583 +{
82584 + old_finalizer(context, diagnostic);
82585 +}
82586 +
82587 +static void colorize_arm(void)
82588 +{
82589 + old_starter = diagnostic_starter(global_dc);
82590 + old_finalizer = diagnostic_finalizer(global_dc);
82591 +
82592 + diagnostic_starter(global_dc) = start_colorize;
82593 + diagnostic_finalizer(global_dc) = finalize_colorize;
82594 +}
82595 +
82596 +static unsigned int execute_colorize_rearm(void)
82597 +{
82598 + if (diagnostic_starter(global_dc) == start_colorize)
82599 + return 0;
82600 +
82601 + colorize_arm();
82602 + return 0;
82603 +}
82604 +
82605 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
82606 + .pass = {
82607 + .type = SIMPLE_IPA_PASS,
82608 + .name = "colorize_rearm",
82609 + .gate = NULL,
82610 + .execute = execute_colorize_rearm,
82611 + .sub = NULL,
82612 + .next = NULL,
82613 + .static_pass_number = 0,
82614 + .tv_id = TV_NONE,
82615 + .properties_required = 0,
82616 + .properties_provided = 0,
82617 + .properties_destroyed = 0,
82618 + .todo_flags_start = 0,
82619 + .todo_flags_finish = 0
82620 + }
82621 +};
82622 +
82623 +static void colorize_start_unit(void *gcc_data, void *user_data)
82624 +{
82625 + colorize_arm();
82626 +}
82627 +
82628 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
82629 +{
82630 + const char * const plugin_name = plugin_info->base_name;
82631 + struct register_pass_info colorize_rearm_pass_info = {
82632 + .pass = &pass_ipa_colorize_rearm.pass,
82633 + .reference_pass_name = "*free_lang_data",
82634 + .ref_pass_instance_number = 1,
82635 + .pos_op = PASS_POS_INSERT_AFTER
82636 + };
82637 +
82638 + if (!plugin_default_version_check(version, &gcc_version)) {
82639 + error(G_("incompatible gcc/plugin versions"));
82640 + return 1;
82641 + }
82642 +
82643 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
82644 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
82645 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
82646 + return 0;
82647 +}
82648 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
82649 new file mode 100644
82650 index 0000000..92ed719
82651 --- /dev/null
82652 +++ b/tools/gcc/constify_plugin.c
82653 @@ -0,0 +1,331 @@
82654 +/*
82655 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
82656 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
82657 + * Licensed under the GPL v2, or (at your option) v3
82658 + *
82659 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
82660 + *
82661 + * Homepage:
82662 + * http://www.grsecurity.net/~ephox/const_plugin/
82663 + *
82664 + * Usage:
82665 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
82666 + * $ gcc -fplugin=constify_plugin.so test.c -O2
82667 + */
82668 +
82669 +#include "gcc-plugin.h"
82670 +#include "config.h"
82671 +#include "system.h"
82672 +#include "coretypes.h"
82673 +#include "tree.h"
82674 +#include "tree-pass.h"
82675 +#include "flags.h"
82676 +#include "intl.h"
82677 +#include "toplev.h"
82678 +#include "plugin.h"
82679 +#include "diagnostic.h"
82680 +#include "plugin-version.h"
82681 +#include "tm.h"
82682 +#include "function.h"
82683 +#include "basic-block.h"
82684 +#include "gimple.h"
82685 +#include "rtl.h"
82686 +#include "emit-rtl.h"
82687 +#include "tree-flow.h"
82688 +
82689 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
82690 +
82691 +int plugin_is_GPL_compatible;
82692 +
82693 +static struct plugin_info const_plugin_info = {
82694 + .version = "201205300030",
82695 + .help = "no-constify\tturn off constification\n",
82696 +};
82697 +
82698 +static void deconstify_tree(tree node);
82699 +
82700 +static void deconstify_type(tree type)
82701 +{
82702 + tree field;
82703 +
82704 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
82705 + tree type = TREE_TYPE(field);
82706 +
82707 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
82708 + continue;
82709 + if (!TYPE_READONLY(type))
82710 + continue;
82711 +
82712 + deconstify_tree(field);
82713 + }
82714 + TYPE_READONLY(type) = 0;
82715 + C_TYPE_FIELDS_READONLY(type) = 0;
82716 +}
82717 +
82718 +static void deconstify_tree(tree node)
82719 +{
82720 + tree old_type, new_type, field;
82721 +
82722 + old_type = TREE_TYPE(node);
82723 +
82724 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
82725 +
82726 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
82727 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
82728 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
82729 + DECL_FIELD_CONTEXT(field) = new_type;
82730 +
82731 + deconstify_type(new_type);
82732 +
82733 + TREE_READONLY(node) = 0;
82734 + TREE_TYPE(node) = new_type;
82735 +}
82736 +
82737 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
82738 +{
82739 + tree type;
82740 +
82741 + *no_add_attrs = true;
82742 + if (TREE_CODE(*node) == FUNCTION_DECL) {
82743 + error("%qE attribute does not apply to functions", name);
82744 + return NULL_TREE;
82745 + }
82746 +
82747 + if (TREE_CODE(*node) == VAR_DECL) {
82748 + error("%qE attribute does not apply to variables", name);
82749 + return NULL_TREE;
82750 + }
82751 +
82752 + if (TYPE_P(*node)) {
82753 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
82754 + *no_add_attrs = false;
82755 + else
82756 + error("%qE attribute applies to struct and union types only", name);
82757 + return NULL_TREE;
82758 + }
82759 +
82760 + type = TREE_TYPE(*node);
82761 +
82762 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
82763 + error("%qE attribute applies to struct and union types only", name);
82764 + return NULL_TREE;
82765 + }
82766 +
82767 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
82768 + error("%qE attribute is already applied to the type", name);
82769 + return NULL_TREE;
82770 + }
82771 +
82772 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
82773 + error("%qE attribute used on type that is not constified", name);
82774 + return NULL_TREE;
82775 + }
82776 +
82777 + if (TREE_CODE(*node) == TYPE_DECL) {
82778 + deconstify_tree(*node);
82779 + return NULL_TREE;
82780 + }
82781 +
82782 + return NULL_TREE;
82783 +}
82784 +
82785 +static void constify_type(tree type)
82786 +{
82787 + TYPE_READONLY(type) = 1;
82788 + C_TYPE_FIELDS_READONLY(type) = 1;
82789 +}
82790 +
82791 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
82792 +{
82793 + *no_add_attrs = true;
82794 + if (!TYPE_P(*node)) {
82795 + error("%qE attribute applies to types only", name);
82796 + return NULL_TREE;
82797 + }
82798 +
82799 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
82800 + error("%qE attribute applies to struct and union types only", name);
82801 + return NULL_TREE;
82802 + }
82803 +
82804 + *no_add_attrs = false;
82805 + constify_type(*node);
82806 + return NULL_TREE;
82807 +}
82808 +
82809 +static struct attribute_spec no_const_attr = {
82810 + .name = "no_const",
82811 + .min_length = 0,
82812 + .max_length = 0,
82813 + .decl_required = false,
82814 + .type_required = false,
82815 + .function_type_required = false,
82816 + .handler = handle_no_const_attribute,
82817 +#if BUILDING_GCC_VERSION >= 4007
82818 + .affects_type_identity = true
82819 +#endif
82820 +};
82821 +
82822 +static struct attribute_spec do_const_attr = {
82823 + .name = "do_const",
82824 + .min_length = 0,
82825 + .max_length = 0,
82826 + .decl_required = false,
82827 + .type_required = false,
82828 + .function_type_required = false,
82829 + .handler = handle_do_const_attribute,
82830 +#if BUILDING_GCC_VERSION >= 4007
82831 + .affects_type_identity = true
82832 +#endif
82833 +};
82834 +
82835 +static void register_attributes(void *event_data, void *data)
82836 +{
82837 + register_attribute(&no_const_attr);
82838 + register_attribute(&do_const_attr);
82839 +}
82840 +
82841 +static bool is_fptr(tree field)
82842 +{
82843 + tree ptr = TREE_TYPE(field);
82844 +
82845 + if (TREE_CODE(ptr) != POINTER_TYPE)
82846 + return false;
82847 +
82848 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
82849 +}
82850 +
82851 +static bool walk_struct(tree node)
82852 +{
82853 + tree field;
82854 +
82855 + if (TYPE_FIELDS(node) == NULL_TREE)
82856 + return false;
82857 +
82858 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
82859 + gcc_assert(!TYPE_READONLY(node));
82860 + deconstify_type(node);
82861 + return false;
82862 + }
82863 +
82864 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
82865 + tree type = TREE_TYPE(field);
82866 + enum tree_code code = TREE_CODE(type);
82867 +
82868 + if (node == type)
82869 + return false;
82870 + if (code == RECORD_TYPE || code == UNION_TYPE) {
82871 + if (!(walk_struct(type)))
82872 + return false;
82873 + } else if (!is_fptr(field) && !TREE_READONLY(field))
82874 + return false;
82875 + }
82876 + return true;
82877 +}
82878 +
82879 +static void finish_type(void *event_data, void *data)
82880 +{
82881 + tree type = (tree)event_data;
82882 +
82883 + if (type == NULL_TREE || type == error_mark_node)
82884 + return;
82885 +
82886 + if (TYPE_READONLY(type))
82887 + return;
82888 +
82889 + if (walk_struct(type))
82890 + constify_type(type);
82891 +}
82892 +
82893 +static unsigned int check_local_variables(void);
82894 +
82895 +struct gimple_opt_pass pass_local_variable = {
82896 + {
82897 + .type = GIMPLE_PASS,
82898 + .name = "check_local_variables",
82899 + .gate = NULL,
82900 + .execute = check_local_variables,
82901 + .sub = NULL,
82902 + .next = NULL,
82903 + .static_pass_number = 0,
82904 + .tv_id = TV_NONE,
82905 + .properties_required = 0,
82906 + .properties_provided = 0,
82907 + .properties_destroyed = 0,
82908 + .todo_flags_start = 0,
82909 + .todo_flags_finish = 0
82910 + }
82911 +};
82912 +
82913 +static unsigned int check_local_variables(void)
82914 +{
82915 + tree var;
82916 + referenced_var_iterator rvi;
82917 +
82918 +#if BUILDING_GCC_VERSION == 4005
82919 + FOR_EACH_REFERENCED_VAR(var, rvi) {
82920 +#else
82921 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
82922 +#endif
82923 + tree type = TREE_TYPE(var);
82924 +
82925 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
82926 + continue;
82927 +
82928 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
82929 + continue;
82930 +
82931 + if (!TYPE_READONLY(type))
82932 + continue;
82933 +
82934 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
82935 +// continue;
82936 +
82937 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
82938 +// continue;
82939 +
82940 + if (walk_struct(type)) {
82941 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
82942 + return 1;
82943 + }
82944 + }
82945 + return 0;
82946 +}
82947 +
82948 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
82949 +{
82950 + const char * const plugin_name = plugin_info->base_name;
82951 + const int argc = plugin_info->argc;
82952 + const struct plugin_argument * const argv = plugin_info->argv;
82953 + int i;
82954 + bool constify = true;
82955 +
82956 + struct register_pass_info local_variable_pass_info = {
82957 + .pass = &pass_local_variable.pass,
82958 + .reference_pass_name = "*referenced_vars",
82959 + .ref_pass_instance_number = 1,
82960 + .pos_op = PASS_POS_INSERT_AFTER
82961 + };
82962 +
82963 + if (!plugin_default_version_check(version, &gcc_version)) {
82964 + error(G_("incompatible gcc/plugin versions"));
82965 + return 1;
82966 + }
82967 +
82968 + for (i = 0; i < argc; ++i) {
82969 + if (!(strcmp(argv[i].key, "no-constify"))) {
82970 + constify = false;
82971 + continue;
82972 + }
82973 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
82974 + }
82975 +
82976 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
82977 + if (constify) {
82978 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
82979 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
82980 + }
82981 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
82982 +
82983 + return 0;
82984 +}
82985 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
82986 new file mode 100644
82987 index 0000000..02c6bec
82988 --- /dev/null
82989 +++ b/tools/gcc/generate_size_overflow_hash.sh
82990 @@ -0,0 +1,94 @@
82991 +#!/bin/bash
82992 +
82993 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
82994 +
82995 +header1="size_overflow_hash.h"
82996 +database="size_overflow_hash.data"
82997 +n=65536
82998 +
82999 +usage() {
83000 +cat <<EOF
83001 +usage: $0 options
83002 +OPTIONS:
83003 + -h|--help help
83004 + -o header file
83005 + -d database file
83006 + -n hash array size
83007 +EOF
83008 + return 0
83009 +}
83010 +
83011 +while true
83012 +do
83013 + case "$1" in
83014 + -h|--help) usage && exit 0;;
83015 + -n) n=$2; shift 2;;
83016 + -o) header1="$2"; shift 2;;
83017 + -d) database="$2"; shift 2;;
83018 + --) shift 1; break ;;
83019 + *) break ;;
83020 + esac
83021 +done
83022 +
83023 +create_defines() {
83024 + for i in `seq 1 32`
83025 + do
83026 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
83027 + done
83028 + echo >> "$header1"
83029 +}
83030 +
83031 +create_structs () {
83032 + rm -f "$header1"
83033 +
83034 + create_defines
83035 +
83036 + cat "$database" | while read data
83037 + do
83038 + data_array=($data)
83039 + struct_hash_name="${data_array[0]}"
83040 + funcn="${data_array[1]}"
83041 + params="${data_array[2]}"
83042 + next="${data_array[5]}"
83043 +
83044 + echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
83045 +
83046 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
83047 + echo -en "\t.param\t= " >> "$header1"
83048 + line=
83049 + for param_num in ${params//-/ };
83050 + do
83051 + line="${line}PARAM"$param_num"|"
83052 + done
83053 +
83054 + echo -e "${line%?},\n};\n" >> "$header1"
83055 + done
83056 +}
83057 +
83058 +create_headers () {
83059 + echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
83060 +}
83061 +
83062 +create_array_elements () {
83063 + index=0
83064 + grep -v "nohasharray" $database | sort -n -k 4 | while read data
83065 + do
83066 + data_array=($data)
83067 + i="${data_array[3]}"
83068 + hash="${data_array[4]}"
83069 + while [[ $index -lt $i ]]
83070 + do
83071 + echo -e "\t["$index"]\t= NULL," >> "$header1"
83072 + index=$(($index + 1))
83073 + done
83074 + index=$(($index + 1))
83075 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
83076 + done
83077 + echo '};' >> $header1
83078 +}
83079 +
83080 +create_structs
83081 +create_headers
83082 +create_array_elements
83083 +
83084 +exit 0
83085 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
83086 new file mode 100644
83087 index 0000000..a86e422
83088 --- /dev/null
83089 +++ b/tools/gcc/kallocstat_plugin.c
83090 @@ -0,0 +1,167 @@
83091 +/*
83092 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
83093 + * Licensed under the GPL v2
83094 + *
83095 + * Note: the choice of the license means that the compilation process is
83096 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
83097 + * but for the kernel it doesn't matter since it doesn't link against
83098 + * any of the gcc libraries
83099 + *
83100 + * gcc plugin to find the distribution of k*alloc sizes
83101 + *
83102 + * TODO:
83103 + *
83104 + * BUGS:
83105 + * - none known
83106 + */
83107 +#include "gcc-plugin.h"
83108 +#include "config.h"
83109 +#include "system.h"
83110 +#include "coretypes.h"
83111 +#include "tree.h"
83112 +#include "tree-pass.h"
83113 +#include "flags.h"
83114 +#include "intl.h"
83115 +#include "toplev.h"
83116 +#include "plugin.h"
83117 +//#include "expr.h" where are you...
83118 +#include "diagnostic.h"
83119 +#include "plugin-version.h"
83120 +#include "tm.h"
83121 +#include "function.h"
83122 +#include "basic-block.h"
83123 +#include "gimple.h"
83124 +#include "rtl.h"
83125 +#include "emit-rtl.h"
83126 +
83127 +extern void print_gimple_stmt(FILE *, gimple, int, int);
83128 +
83129 +int plugin_is_GPL_compatible;
83130 +
83131 +static const char * const kalloc_functions[] = {
83132 + "__kmalloc",
83133 + "kmalloc",
83134 + "kmalloc_large",
83135 + "kmalloc_node",
83136 + "kmalloc_order",
83137 + "kmalloc_order_trace",
83138 + "kmalloc_slab",
83139 + "kzalloc",
83140 + "kzalloc_node",
83141 +};
83142 +
83143 +static struct plugin_info kallocstat_plugin_info = {
83144 + .version = "201111150100",
83145 +};
83146 +
83147 +static unsigned int execute_kallocstat(void);
83148 +
83149 +static struct gimple_opt_pass kallocstat_pass = {
83150 + .pass = {
83151 + .type = GIMPLE_PASS,
83152 + .name = "kallocstat",
83153 + .gate = NULL,
83154 + .execute = execute_kallocstat,
83155 + .sub = NULL,
83156 + .next = NULL,
83157 + .static_pass_number = 0,
83158 + .tv_id = TV_NONE,
83159 + .properties_required = 0,
83160 + .properties_provided = 0,
83161 + .properties_destroyed = 0,
83162 + .todo_flags_start = 0,
83163 + .todo_flags_finish = 0
83164 + }
83165 +};
83166 +
83167 +static bool is_kalloc(const char *fnname)
83168 +{
83169 + size_t i;
83170 +
83171 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
83172 + if (!strcmp(fnname, kalloc_functions[i]))
83173 + return true;
83174 + return false;
83175 +}
83176 +
83177 +static unsigned int execute_kallocstat(void)
83178 +{
83179 + basic_block bb;
83180 +
83181 + // 1. loop through BBs and GIMPLE statements
83182 + FOR_EACH_BB(bb) {
83183 + gimple_stmt_iterator gsi;
83184 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
83185 + // gimple match:
83186 + tree fndecl, size;
83187 + gimple call_stmt;
83188 + const char *fnname;
83189 +
83190 + // is it a call
83191 + call_stmt = gsi_stmt(gsi);
83192 + if (!is_gimple_call(call_stmt))
83193 + continue;
83194 + fndecl = gimple_call_fndecl(call_stmt);
83195 + if (fndecl == NULL_TREE)
83196 + continue;
83197 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
83198 + continue;
83199 +
83200 + // is it a call to k*alloc
83201 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
83202 + if (!is_kalloc(fnname))
83203 + continue;
83204 +
83205 + // is the size arg the result of a simple const assignment
83206 + size = gimple_call_arg(call_stmt, 0);
83207 + while (true) {
83208 + gimple def_stmt;
83209 + expanded_location xloc;
83210 + size_t size_val;
83211 +
83212 + if (TREE_CODE(size) != SSA_NAME)
83213 + break;
83214 + def_stmt = SSA_NAME_DEF_STMT(size);
83215 + if (!def_stmt || !is_gimple_assign(def_stmt))
83216 + break;
83217 + if (gimple_num_ops(def_stmt) != 2)
83218 + break;
83219 + size = gimple_assign_rhs1(def_stmt);
83220 + if (!TREE_CONSTANT(size))
83221 + continue;
83222 + xloc = expand_location(gimple_location(def_stmt));
83223 + if (!xloc.file)
83224 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
83225 + size_val = TREE_INT_CST_LOW(size);
83226 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
83227 + break;
83228 + }
83229 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
83230 +//debug_tree(gimple_call_fn(call_stmt));
83231 +//print_node(stderr, "pax", fndecl, 4);
83232 + }
83233 + }
83234 +
83235 + return 0;
83236 +}
83237 +
83238 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
83239 +{
83240 + const char * const plugin_name = plugin_info->base_name;
83241 + struct register_pass_info kallocstat_pass_info = {
83242 + .pass = &kallocstat_pass.pass,
83243 + .reference_pass_name = "ssa",
83244 + .ref_pass_instance_number = 1,
83245 + .pos_op = PASS_POS_INSERT_AFTER
83246 + };
83247 +
83248 + if (!plugin_default_version_check(version, &gcc_version)) {
83249 + error(G_("incompatible gcc/plugin versions"));
83250 + return 1;
83251 + }
83252 +
83253 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
83254 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
83255 +
83256 + return 0;
83257 +}
83258 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
83259 new file mode 100644
83260 index 0000000..98011fa
83261 --- /dev/null
83262 +++ b/tools/gcc/kernexec_plugin.c
83263 @@ -0,0 +1,427 @@
83264 +/*
83265 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
83266 + * Licensed under the GPL v2
83267 + *
83268 + * Note: the choice of the license means that the compilation process is
83269 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
83270 + * but for the kernel it doesn't matter since it doesn't link against
83271 + * any of the gcc libraries
83272 + *
83273 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
83274 + *
83275 + * TODO:
83276 + *
83277 + * BUGS:
83278 + * - none known
83279 + */
83280 +#include "gcc-plugin.h"
83281 +#include "config.h"
83282 +#include "system.h"
83283 +#include "coretypes.h"
83284 +#include "tree.h"
83285 +#include "tree-pass.h"
83286 +#include "flags.h"
83287 +#include "intl.h"
83288 +#include "toplev.h"
83289 +#include "plugin.h"
83290 +//#include "expr.h" where are you...
83291 +#include "diagnostic.h"
83292 +#include "plugin-version.h"
83293 +#include "tm.h"
83294 +#include "function.h"
83295 +#include "basic-block.h"
83296 +#include "gimple.h"
83297 +#include "rtl.h"
83298 +#include "emit-rtl.h"
83299 +#include "tree-flow.h"
83300 +
83301 +extern void print_gimple_stmt(FILE *, gimple, int, int);
83302 +extern rtx emit_move_insn(rtx x, rtx y);
83303 +
83304 +int plugin_is_GPL_compatible;
83305 +
83306 +static struct plugin_info kernexec_plugin_info = {
83307 + .version = "201111291120",
83308 + .help = "method=[bts|or]\tinstrumentation method\n"
83309 +};
83310 +
83311 +static unsigned int execute_kernexec_reload(void);
83312 +static unsigned int execute_kernexec_fptr(void);
83313 +static unsigned int execute_kernexec_retaddr(void);
83314 +static bool kernexec_cmodel_check(void);
83315 +
83316 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
83317 +static void (*kernexec_instrument_retaddr)(rtx);
83318 +
83319 +static struct gimple_opt_pass kernexec_reload_pass = {
83320 + .pass = {
83321 + .type = GIMPLE_PASS,
83322 + .name = "kernexec_reload",
83323 + .gate = kernexec_cmodel_check,
83324 + .execute = execute_kernexec_reload,
83325 + .sub = NULL,
83326 + .next = NULL,
83327 + .static_pass_number = 0,
83328 + .tv_id = TV_NONE,
83329 + .properties_required = 0,
83330 + .properties_provided = 0,
83331 + .properties_destroyed = 0,
83332 + .todo_flags_start = 0,
83333 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
83334 + }
83335 +};
83336 +
83337 +static struct gimple_opt_pass kernexec_fptr_pass = {
83338 + .pass = {
83339 + .type = GIMPLE_PASS,
83340 + .name = "kernexec_fptr",
83341 + .gate = kernexec_cmodel_check,
83342 + .execute = execute_kernexec_fptr,
83343 + .sub = NULL,
83344 + .next = NULL,
83345 + .static_pass_number = 0,
83346 + .tv_id = TV_NONE,
83347 + .properties_required = 0,
83348 + .properties_provided = 0,
83349 + .properties_destroyed = 0,
83350 + .todo_flags_start = 0,
83351 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
83352 + }
83353 +};
83354 +
83355 +static struct rtl_opt_pass kernexec_retaddr_pass = {
83356 + .pass = {
83357 + .type = RTL_PASS,
83358 + .name = "kernexec_retaddr",
83359 + .gate = kernexec_cmodel_check,
83360 + .execute = execute_kernexec_retaddr,
83361 + .sub = NULL,
83362 + .next = NULL,
83363 + .static_pass_number = 0,
83364 + .tv_id = TV_NONE,
83365 + .properties_required = 0,
83366 + .properties_provided = 0,
83367 + .properties_destroyed = 0,
83368 + .todo_flags_start = 0,
83369 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
83370 + }
83371 +};
83372 +
83373 +static bool kernexec_cmodel_check(void)
83374 +{
83375 + tree section;
83376 +
83377 + if (ix86_cmodel != CM_KERNEL)
83378 + return false;
83379 +
83380 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
83381 + if (!section || !TREE_VALUE(section))
83382 + return true;
83383 +
83384 + section = TREE_VALUE(TREE_VALUE(section));
83385 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
83386 + return true;
83387 +
83388 + return false;
83389 +}
83390 +
83391 +/*
83392 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
83393 + */
83394 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
83395 +{
83396 + gimple asm_movabs_stmt;
83397 +
83398 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
83399 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
83400 + gimple_asm_set_volatile(asm_movabs_stmt, true);
83401 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
83402 + update_stmt(asm_movabs_stmt);
83403 +}
83404 +
83405 +/*
83406 + * find all asm() stmts that clobber r10 and add a reload of r10
83407 + */
83408 +static unsigned int execute_kernexec_reload(void)
83409 +{
83410 + basic_block bb;
83411 +
83412 + // 1. loop through BBs and GIMPLE statements
83413 + FOR_EACH_BB(bb) {
83414 + gimple_stmt_iterator gsi;
83415 +
83416 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
83417 + // gimple match: __asm__ ("" : : : "r10");
83418 + gimple asm_stmt;
83419 + size_t nclobbers;
83420 +
83421 + // is it an asm ...
83422 + asm_stmt = gsi_stmt(gsi);
83423 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
83424 + continue;
83425 +
83426 + // ... clobbering r10
83427 + nclobbers = gimple_asm_nclobbers(asm_stmt);
83428 + while (nclobbers--) {
83429 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
83430 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
83431 + continue;
83432 + kernexec_reload_fptr_mask(&gsi);
83433 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
83434 + break;
83435 + }
83436 + }
83437 + }
83438 +
83439 + return 0;
83440 +}
83441 +
83442 +/*
83443 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
83444 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
83445 + */
83446 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
83447 +{
83448 + gimple assign_intptr, assign_new_fptr, call_stmt;
83449 + tree intptr, old_fptr, new_fptr, kernexec_mask;
83450 +
83451 + call_stmt = gsi_stmt(*gsi);
83452 + old_fptr = gimple_call_fn(call_stmt);
83453 +
83454 + // create temporary unsigned long variable used for bitops and cast fptr to it
83455 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
83456 + add_referenced_var(intptr);
83457 + mark_sym_for_renaming(intptr);
83458 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
83459 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
83460 + update_stmt(assign_intptr);
83461 +
83462 + // apply logical or to temporary unsigned long and bitmask
83463 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
83464 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
83465 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
83466 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
83467 + update_stmt(assign_intptr);
83468 +
83469 + // cast temporary unsigned long back to a temporary fptr variable
83470 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
83471 + add_referenced_var(new_fptr);
83472 + mark_sym_for_renaming(new_fptr);
83473 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
83474 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
83475 + update_stmt(assign_new_fptr);
83476 +
83477 + // replace call stmt fn with the new fptr
83478 + gimple_call_set_fn(call_stmt, new_fptr);
83479 + update_stmt(call_stmt);
83480 +}
83481 +
83482 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
83483 +{
83484 + gimple asm_or_stmt, call_stmt;
83485 + tree old_fptr, new_fptr, input, output;
83486 + VEC(tree, gc) *inputs = NULL;
83487 + VEC(tree, gc) *outputs = NULL;
83488 +
83489 + call_stmt = gsi_stmt(*gsi);
83490 + old_fptr = gimple_call_fn(call_stmt);
83491 +
83492 + // create temporary fptr variable
83493 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
83494 + add_referenced_var(new_fptr);
83495 + mark_sym_for_renaming(new_fptr);
83496 +
83497 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
83498 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
83499 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
83500 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
83501 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
83502 + VEC_safe_push(tree, gc, inputs, input);
83503 + VEC_safe_push(tree, gc, outputs, output);
83504 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
83505 + gimple_asm_set_volatile(asm_or_stmt, true);
83506 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
83507 + update_stmt(asm_or_stmt);
83508 +
83509 + // replace call stmt fn with the new fptr
83510 + gimple_call_set_fn(call_stmt, new_fptr);
83511 + update_stmt(call_stmt);
83512 +}
83513 +
83514 +/*
83515 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
83516 + */
83517 +static unsigned int execute_kernexec_fptr(void)
83518 +{
83519 + basic_block bb;
83520 +
83521 + // 1. loop through BBs and GIMPLE statements
83522 + FOR_EACH_BB(bb) {
83523 + gimple_stmt_iterator gsi;
83524 +
83525 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
83526 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
83527 + tree fn;
83528 + gimple call_stmt;
83529 +
83530 + // is it a call ...
83531 + call_stmt = gsi_stmt(gsi);
83532 + if (!is_gimple_call(call_stmt))
83533 + continue;
83534 + fn = gimple_call_fn(call_stmt);
83535 + if (TREE_CODE(fn) == ADDR_EXPR)
83536 + continue;
83537 + if (TREE_CODE(fn) != SSA_NAME)
83538 + gcc_unreachable();
83539 +
83540 + // ... through a function pointer
83541 + fn = SSA_NAME_VAR(fn);
83542 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
83543 + continue;
83544 + fn = TREE_TYPE(fn);
83545 + if (TREE_CODE(fn) != POINTER_TYPE)
83546 + continue;
83547 + fn = TREE_TYPE(fn);
83548 + if (TREE_CODE(fn) != FUNCTION_TYPE)
83549 + continue;
83550 +
83551 + kernexec_instrument_fptr(&gsi);
83552 +
83553 +//debug_tree(gimple_call_fn(call_stmt));
83554 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
83555 + }
83556 + }
83557 +
83558 + return 0;
83559 +}
83560 +
83561 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
83562 +static void kernexec_instrument_retaddr_bts(rtx insn)
83563 +{
83564 + rtx btsq;
83565 + rtvec argvec, constraintvec, labelvec;
83566 + int line;
83567 +
83568 + // create asm volatile("btsq $63,(%%rsp)":::)
83569 + argvec = rtvec_alloc(0);
83570 + constraintvec = rtvec_alloc(0);
83571 + labelvec = rtvec_alloc(0);
83572 + line = expand_location(RTL_LOCATION(insn)).line;
83573 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
83574 + MEM_VOLATILE_P(btsq) = 1;
83575 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
83576 + emit_insn_before(btsq, insn);
83577 +}
83578 +
83579 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
83580 +static void kernexec_instrument_retaddr_or(rtx insn)
83581 +{
83582 + rtx orq;
83583 + rtvec argvec, constraintvec, labelvec;
83584 + int line;
83585 +
83586 + // create asm volatile("orq %%r10,(%%rsp)":::)
83587 + argvec = rtvec_alloc(0);
83588 + constraintvec = rtvec_alloc(0);
83589 + labelvec = rtvec_alloc(0);
83590 + line = expand_location(RTL_LOCATION(insn)).line;
83591 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
83592 + MEM_VOLATILE_P(orq) = 1;
83593 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
83594 + emit_insn_before(orq, insn);
83595 +}
83596 +
83597 +/*
83598 + * find all asm level function returns and forcibly set the highest bit of the return address
83599 + */
83600 +static unsigned int execute_kernexec_retaddr(void)
83601 +{
83602 + rtx insn;
83603 +
83604 + // 1. find function returns
83605 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
83606 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
83607 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
83608 + rtx body;
83609 +
83610 + // is it a retn
83611 + if (!JUMP_P(insn))
83612 + continue;
83613 + body = PATTERN(insn);
83614 + if (GET_CODE(body) == PARALLEL)
83615 + body = XVECEXP(body, 0, 0);
83616 + if (GET_CODE(body) != RETURN)
83617 + continue;
83618 + kernexec_instrument_retaddr(insn);
83619 + }
83620 +
83621 +// print_simple_rtl(stderr, get_insns());
83622 +// print_rtl(stderr, get_insns());
83623 +
83624 + return 0;
83625 +}
83626 +
83627 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
83628 +{
83629 + const char * const plugin_name = plugin_info->base_name;
83630 + const int argc = plugin_info->argc;
83631 + const struct plugin_argument * const argv = plugin_info->argv;
83632 + int i;
83633 + struct register_pass_info kernexec_reload_pass_info = {
83634 + .pass = &kernexec_reload_pass.pass,
83635 + .reference_pass_name = "ssa",
83636 + .ref_pass_instance_number = 1,
83637 + .pos_op = PASS_POS_INSERT_AFTER
83638 + };
83639 + struct register_pass_info kernexec_fptr_pass_info = {
83640 + .pass = &kernexec_fptr_pass.pass,
83641 + .reference_pass_name = "ssa",
83642 + .ref_pass_instance_number = 1,
83643 + .pos_op = PASS_POS_INSERT_AFTER
83644 + };
83645 + struct register_pass_info kernexec_retaddr_pass_info = {
83646 + .pass = &kernexec_retaddr_pass.pass,
83647 + .reference_pass_name = "pro_and_epilogue",
83648 + .ref_pass_instance_number = 1,
83649 + .pos_op = PASS_POS_INSERT_AFTER
83650 + };
83651 +
83652 + if (!plugin_default_version_check(version, &gcc_version)) {
83653 + error(G_("incompatible gcc/plugin versions"));
83654 + return 1;
83655 + }
83656 +
83657 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
83658 +
83659 + if (TARGET_64BIT == 0)
83660 + return 0;
83661 +
83662 + for (i = 0; i < argc; ++i) {
83663 + if (!strcmp(argv[i].key, "method")) {
83664 + if (!argv[i].value) {
83665 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
83666 + continue;
83667 + }
83668 + if (!strcmp(argv[i].value, "bts")) {
83669 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
83670 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
83671 + } else if (!strcmp(argv[i].value, "or")) {
83672 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
83673 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
83674 + fix_register("r10", 1, 1);
83675 + } else
83676 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
83677 + continue;
83678 + }
83679 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
83680 + }
83681 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
83682 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
83683 +
83684 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
83685 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
83686 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
83687 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
83688 +
83689 + return 0;
83690 +}
83691 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
83692 new file mode 100644
83693 index 0000000..b8008f7
83694 --- /dev/null
83695 +++ b/tools/gcc/latent_entropy_plugin.c
83696 @@ -0,0 +1,295 @@
83697 +/*
83698 + * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
83699 + * Licensed under the GPL v2
83700 + *
83701 + * Note: the choice of the license means that the compilation process is
83702 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
83703 + * but for the kernel it doesn't matter since it doesn't link against
83704 + * any of the gcc libraries
83705 + *
83706 + * gcc plugin to help generate a little bit of entropy from program state,
83707 + * used during boot in the kernel
83708 + *
83709 + * TODO:
83710 + * - add ipa pass to identify not explicitly marked candidate functions
83711 + * - mix in more program state (function arguments/return values, loop variables, etc)
83712 + * - more instrumentation control via attribute parameters
83713 + *
83714 + * BUGS:
83715 + * - LTO needs -flto-partition=none for now
83716 + */
83717 +#include "gcc-plugin.h"
83718 +#include "config.h"
83719 +#include "system.h"
83720 +#include "coretypes.h"
83721 +#include "tree.h"
83722 +#include "tree-pass.h"
83723 +#include "flags.h"
83724 +#include "intl.h"
83725 +#include "toplev.h"
83726 +#include "plugin.h"
83727 +//#include "expr.h" where are you...
83728 +#include "diagnostic.h"
83729 +#include "plugin-version.h"
83730 +#include "tm.h"
83731 +#include "function.h"
83732 +#include "basic-block.h"
83733 +#include "gimple.h"
83734 +#include "rtl.h"
83735 +#include "emit-rtl.h"
83736 +#include "tree-flow.h"
83737 +
83738 +int plugin_is_GPL_compatible;
83739 +
83740 +static tree latent_entropy_decl;
83741 +
83742 +static struct plugin_info latent_entropy_plugin_info = {
83743 + .version = "201207271820",
83744 + .help = NULL
83745 +};
83746 +
83747 +static unsigned int execute_latent_entropy(void);
83748 +static bool gate_latent_entropy(void);
83749 +
83750 +static struct gimple_opt_pass latent_entropy_pass = {
83751 + .pass = {
83752 + .type = GIMPLE_PASS,
83753 + .name = "latent_entropy",
83754 + .gate = gate_latent_entropy,
83755 + .execute = execute_latent_entropy,
83756 + .sub = NULL,
83757 + .next = NULL,
83758 + .static_pass_number = 0,
83759 + .tv_id = TV_NONE,
83760 + .properties_required = PROP_gimple_leh | PROP_cfg,
83761 + .properties_provided = 0,
83762 + .properties_destroyed = 0,
83763 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
83764 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
83765 + }
83766 +};
83767 +
83768 +static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
83769 +{
83770 + if (TREE_CODE(*node) != FUNCTION_DECL) {
83771 + *no_add_attrs = true;
83772 + error("%qE attribute only applies to functions", name);
83773 + }
83774 + return NULL_TREE;
83775 +}
83776 +
83777 +static struct attribute_spec latent_entropy_attr = {
83778 + .name = "latent_entropy",
83779 + .min_length = 0,
83780 + .max_length = 0,
83781 + .decl_required = true,
83782 + .type_required = false,
83783 + .function_type_required = false,
83784 + .handler = handle_latent_entropy_attribute,
83785 +#if BUILDING_GCC_VERSION >= 4007
83786 + .affects_type_identity = false
83787 +#endif
83788 +};
83789 +
83790 +static void register_attributes(void *event_data, void *data)
83791 +{
83792 + register_attribute(&latent_entropy_attr);
83793 +}
83794 +
83795 +static bool gate_latent_entropy(void)
83796 +{
83797 + tree latent_entropy_attr;
83798 +
83799 + latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
83800 + return latent_entropy_attr != NULL_TREE;
83801 +}
83802 +
83803 +static unsigned HOST_WIDE_INT seed;
83804 +static unsigned HOST_WIDE_INT get_random_const(void)
83805 +{
83806 + seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
83807 + return seed;
83808 +}
83809 +
83810 +static enum tree_code get_op(tree *rhs)
83811 +{
83812 + static enum tree_code op;
83813 + unsigned HOST_WIDE_INT random_const;
83814 +
83815 + random_const = get_random_const();
83816 +
83817 + switch (op) {
83818 + case BIT_XOR_EXPR:
83819 + op = PLUS_EXPR;
83820 + break;
83821 +
83822 + case PLUS_EXPR:
83823 + if (rhs) {
83824 + op = LROTATE_EXPR;
83825 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
83826 + break;
83827 + }
83828 +
83829 + case LROTATE_EXPR:
83830 + default:
83831 + op = BIT_XOR_EXPR;
83832 + break;
83833 + }
83834 + if (rhs)
83835 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
83836 + return op;
83837 +}
83838 +
83839 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
83840 +{
83841 + gimple_stmt_iterator gsi;
83842 + gimple assign;
83843 + tree addxorrol, rhs;
83844 + enum tree_code op;
83845 +
83846 + op = get_op(&rhs);
83847 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
83848 + assign = gimple_build_assign(local_entropy, addxorrol);
83849 + find_referenced_vars_in(assign);
83850 +//debug_bb(bb);
83851 + gsi = gsi_after_labels(bb);
83852 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
83853 + update_stmt(assign);
83854 +}
83855 +
83856 +static void perturb_latent_entropy(basic_block bb, tree rhs)
83857 +{
83858 + gimple_stmt_iterator gsi;
83859 + gimple assign;
83860 + tree addxorrol, temp;
83861 +
83862 + // 1. create temporary copy of latent_entropy
83863 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
83864 + add_referenced_var(temp);
83865 + mark_sym_for_renaming(temp);
83866 +
83867 + // 2. read...
83868 + assign = gimple_build_assign(temp, latent_entropy_decl);
83869 + find_referenced_vars_in(assign);
83870 + gsi = gsi_after_labels(bb);
83871 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
83872 + update_stmt(assign);
83873 +
83874 + // 3. ...modify...
83875 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
83876 + assign = gimple_build_assign(temp, addxorrol);
83877 + find_referenced_vars_in(assign);
83878 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
83879 + update_stmt(assign);
83880 +
83881 + // 4. ...write latent_entropy
83882 + assign = gimple_build_assign(latent_entropy_decl, temp);
83883 + find_referenced_vars_in(assign);
83884 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
83885 + update_stmt(assign);
83886 +}
83887 +
83888 +static unsigned int execute_latent_entropy(void)
83889 +{
83890 + basic_block bb;
83891 + gimple assign;
83892 + gimple_stmt_iterator gsi;
83893 + tree local_entropy;
83894 +
83895 + if (!latent_entropy_decl) {
83896 + struct varpool_node *node;
83897 +
83898 + for (node = varpool_nodes; node; node = node->next) {
83899 + tree var = node->decl;
83900 + if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
83901 + continue;
83902 + latent_entropy_decl = var;
83903 +// debug_tree(var);
83904 + break;
83905 + }
83906 + if (!latent_entropy_decl) {
83907 +// debug_tree(current_function_decl);
83908 + return 0;
83909 + }
83910 + }
83911 +
83912 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
83913 +
83914 + // 1. create local entropy variable
83915 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
83916 + add_referenced_var(local_entropy);
83917 + mark_sym_for_renaming(local_entropy);
83918 +
83919 + // 2. initialize local entropy variable
83920 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
83921 + if (dom_info_available_p(CDI_DOMINATORS))
83922 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
83923 + gsi = gsi_start_bb(bb);
83924 +
83925 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
83926 +// gimple_set_location(assign, loc);
83927 + find_referenced_vars_in(assign);
83928 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
83929 + update_stmt(assign);
83930 + bb = bb->next_bb;
83931 +
83932 + // 3. instrument each BB with an operation on the local entropy variable
83933 + while (bb != EXIT_BLOCK_PTR) {
83934 + perturb_local_entropy(bb, local_entropy);
83935 + bb = bb->next_bb;
83936 + };
83937 +
83938 + // 4. mix local entropy into the global entropy variable
83939 + perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
83940 + return 0;
83941 +}
83942 +
83943 +static void start_unit_callback(void *gcc_data, void *user_data)
83944 +{
83945 +#if BUILDING_GCC_VERSION >= 4007
83946 + seed = get_random_seed(false);
83947 +#else
83948 + sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
83949 + seed *= seed;
83950 +#endif
83951 +
83952 + if (in_lto_p)
83953 + return;
83954 +
83955 + // extern u64 latent_entropy
83956 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
83957 +
83958 + TREE_STATIC(latent_entropy_decl) = 1;
83959 + TREE_PUBLIC(latent_entropy_decl) = 1;
83960 + TREE_USED(latent_entropy_decl) = 1;
83961 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
83962 + DECL_EXTERNAL(latent_entropy_decl) = 1;
83963 + DECL_ARTIFICIAL(latent_entropy_decl) = 0;
83964 + DECL_INITIAL(latent_entropy_decl) = NULL;
83965 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
83966 +// varpool_finalize_decl(latent_entropy_decl);
83967 +// varpool_mark_needed_node(latent_entropy_decl);
83968 +}
83969 +
83970 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
83971 +{
83972 + const char * const plugin_name = plugin_info->base_name;
83973 + struct register_pass_info latent_entropy_pass_info = {
83974 + .pass = &latent_entropy_pass.pass,
83975 + .reference_pass_name = "optimized",
83976 + .ref_pass_instance_number = 1,
83977 + .pos_op = PASS_POS_INSERT_BEFORE
83978 + };
83979 +
83980 + if (!plugin_default_version_check(version, &gcc_version)) {
83981 + error(G_("incompatible gcc/plugin versions"));
83982 + return 1;
83983 + }
83984 +
83985 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
83986 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
83987 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
83988 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
83989 +
83990 + return 0;
83991 +}
83992 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
83993 new file mode 100644
83994 index 0000000..1ff52ad
83995 --- /dev/null
83996 +++ b/tools/gcc/size_overflow_hash.data
83997 @@ -0,0 +1,3633 @@
83998 +_000001_hash alloc_dr 2 65495 _000001_hash NULL
83999 +_000002_hash __copy_from_user 3 10918 _000002_hash NULL
84000 +_000003_hash copy_from_user 3 17559 _000003_hash NULL
84001 +_000004_hash __copy_from_user_inatomic 3 4365 _000004_hash NULL
84002 +_000005_hash __copy_from_user_nocache 3 39351 _000005_hash NULL
84003 +_000006_hash __copy_to_user_inatomic 3 19214 _000006_hash NULL
84004 +_000007_hash do_xip_mapping_read 5 60297 _000007_hash NULL
84005 +_000008_hash hugetlbfs_read 3 11268 _000008_hash NULL
84006 +_000009_hash kmalloc 1 60432 _002597_hash NULL nohasharray
84007 +_000010_hash kmalloc_array 1-2 9444 _000010_hash NULL
84008 +_000012_hash kmalloc_slab 1 11917 _000012_hash NULL
84009 +_000013_hash kmemdup 2 64015 _000013_hash NULL
84010 +_000014_hash __krealloc 2 14857 _000331_hash NULL nohasharray
84011 +_000015_hash memdup_user 2 59590 _000015_hash NULL
84012 +_000016_hash module_alloc 1 63630 _000016_hash NULL
84013 +_000017_hash read_default_ldt 2 14302 _000017_hash NULL
84014 +_000018_hash read_kcore 3 63488 _000018_hash NULL
84015 +_000019_hash read_ldt 2 47570 _000019_hash NULL
84016 +_000020_hash read_zero 3 19366 _000020_hash NULL
84017 +_000021_hash __vmalloc_node 1 39308 _000021_hash NULL
84018 +_000022_hash vm_map_ram 2 23078 _001054_hash NULL nohasharray
84019 +_000023_hash aa_simple_write_to_buffer 4-3 49683 _000023_hash NULL
84020 +_000024_hash ablkcipher_copy_iv 3 64140 _000024_hash NULL
84021 +_000025_hash ablkcipher_next_slow 4-3 47274 _000025_hash NULL
84022 +_000026_hash acpi_battery_write_alarm 3 1240 _000026_hash NULL
84023 +_000027_hash acpi_os_allocate 1 14892 _000027_hash NULL
84024 +_000028_hash acpi_system_write_wakeup_device 3 34853 _000028_hash NULL
84025 +_000029_hash adu_write 3 30487 _000029_hash NULL
84026 +_000030_hash aer_inject_write 3 52399 _000030_hash NULL
84027 +_000031_hash afs_alloc_flat_call 2-3 36399 _000031_hash NULL
84028 +_000033_hash afs_proc_cells_write 3 61139 _000033_hash NULL
84029 +_000034_hash afs_proc_rootcell_write 3 15822 _000034_hash NULL
84030 +_000035_hash agp_3_5_isochronous_node_enable 3 49465 _000035_hash NULL
84031 +_000036_hash agp_alloc_page_array 1 22554 _000036_hash NULL
84032 +_000037_hash ah_alloc_tmp 2-3 54378 _000037_hash NULL
84033 +_000038_hash ahash_setkey_unaligned 3 33521 _000038_hash NULL
84034 +_000039_hash alg_setkey 3 31485 _000039_hash NULL
84035 +_000040_hash aligned_kmalloc 1 3628 _000040_hash NULL
84036 +_000041_hash alloc_context 1 3194 _000041_hash NULL
84037 +_000042_hash alloc_ep_req 2 54860 _000042_hash NULL
84038 +_000043_hash alloc_fdmem 1 27083 _000043_hash NULL
84039 +_000044_hash alloc_flex_gd 1 57259 _000044_hash NULL
84040 +_000045_hash alloc_sglist 1-3-2 22960 _000045_hash NULL
84041 +_000046_hash aoedev_flush 2 44398 _000046_hash NULL
84042 +_000047_hash append_to_buffer 3 63550 _000047_hash NULL
84043 +_000048_hash asix_read_cmd 5 13245 _000048_hash NULL
84044 +_000049_hash asix_write_cmd 5 58192 _000049_hash NULL
84045 +_000050_hash asn1_octets_decode 2 9991 _000050_hash NULL
84046 +_000051_hash asn1_oid_decode 2 4999 _000051_hash NULL
84047 +_000052_hash at76_set_card_command 4 4471 _000052_hash NULL
84048 +_000053_hash ath6kl_add_bss_if_needed 6 24317 _000053_hash NULL
84049 +_000054_hash ath6kl_debug_roam_tbl_event 3 5224 _000054_hash NULL
84050 +_000055_hash ath6kl_mgmt_powersave_ap 6 13791 _000055_hash NULL
84051 +_000056_hash ath6kl_send_go_probe_resp 3 21113 _000056_hash NULL
84052 +_000057_hash ath6kl_set_ap_probe_resp_ies 3 50539 _000057_hash NULL
84053 +_000058_hash ath6kl_set_assoc_req_ies 3 43185 _000058_hash NULL
84054 +_000059_hash ath6kl_wmi_bssinfo_event_rx 3 2275 _000059_hash NULL
84055 +_000060_hash ath6kl_wmi_send_action_cmd 7 58860 _000060_hash NULL
84056 +_000061_hash __ath6kl_wmi_send_mgmt_cmd 7 38971 _000061_hash NULL
84057 +_000062_hash attach_hdlc_protocol 3 19986 _000062_hash NULL
84058 +_000063_hash audio_write 4 54261 _001597_hash NULL nohasharray
84059 +_000064_hash audit_unpack_string 3 13748 _000064_hash NULL
84060 +_000065_hash av7110_vbi_write 3 34384 _000065_hash NULL
84061 +_000066_hash ax25_setsockopt 5 42740 _000066_hash NULL
84062 +_000067_hash b43_debugfs_write 3 34838 _000067_hash NULL
84063 +_000068_hash b43legacy_debugfs_write 3 28556 _000068_hash NULL
84064 +_000069_hash bch_alloc 1 4593 _000069_hash NULL
84065 +_000070_hash befs_nls2utf 3 17163 _000070_hash NULL
84066 +_000071_hash befs_utf2nls 3 25628 _000071_hash NULL
84067 +_000072_hash bfad_debugfs_write_regrd 3 15218 _000072_hash NULL
84068 +_000073_hash bfad_debugfs_write_regwr 3 61841 _000073_hash NULL
84069 +_000074_hash bio_alloc_map_data 1-2 50782 _000074_hash NULL
84070 +_000076_hash bio_kmalloc 2 54672 _000076_hash NULL
84071 +_000077_hash blkcipher_copy_iv 3 24075 _000077_hash NULL
84072 +_000078_hash blkcipher_next_slow 4-3 52733 _000078_hash NULL
84073 +_000079_hash bl_pipe_downcall 3 34264 _000079_hash NULL
84074 +_000080_hash bnad_debugfs_write_regrd 3 6706 _000080_hash NULL
84075 +_000081_hash bnad_debugfs_write_regwr 3 57500 _000081_hash NULL
84076 +_000082_hash bnx2fc_cmd_mgr_alloc 2-3 24873 _000082_hash NULL
84077 +_000084_hash bnx2_nvram_write 4-2 7790 _000084_hash NULL
84078 +_000085_hash brcmf_sdbrcm_downloadvars 3 42064 _000085_hash NULL
84079 +_000086_hash btmrvl_gpiogap_write 3 35053 _000086_hash NULL
84080 +_000087_hash btmrvl_hscfgcmd_write 3 27143 _000087_hash NULL
84081 +_000088_hash btmrvl_hscmd_write 3 27089 _000088_hash NULL
84082 +_000089_hash btmrvl_hsmode_write 3 42252 _000089_hash NULL
84083 +_000090_hash btmrvl_pscmd_write 3 29504 _000090_hash NULL
84084 +_000091_hash btmrvl_psmode_write 3 3703 _000091_hash NULL
84085 +_000092_hash btrfs_alloc_delayed_item 1 11678 _000092_hash NULL
84086 +_000093_hash cache_do_downcall 3 6926 _000093_hash NULL
84087 +_000094_hash cachefiles_cook_key 2 33274 _000094_hash NULL
84088 +_000095_hash cachefiles_daemon_write 3 43535 _000095_hash NULL
84089 +_000096_hash capi_write 3 35104 _000096_hash NULL
84090 +_000097_hash carl9170_debugfs_write 3 50857 _000097_hash NULL
84091 +_000098_hash cciss_allocate_sg_chain_blocks 2-3 5368 _000098_hash NULL
84092 +_000100_hash cciss_proc_write 3 10259 _000100_hash NULL
84093 +_000101_hash cdrom_read_cdda_old 4 27664 _000101_hash NULL
84094 +_000102_hash ceph_alloc_page_vector 1 18710 _000102_hash NULL
84095 +_000103_hash ceph_buffer_new 1 35974 _000103_hash NULL
84096 +_000104_hash ceph_copy_user_to_page_vector 4-3 656 _000104_hash NULL
84097 +_000105_hash ceph_get_direct_page_vector 2 41917 _000105_hash NULL
84098 +_000106_hash ceph_msg_new 2 5846 _000106_hash NULL
84099 +_000107_hash ceph_setxattr 4 18913 _000107_hash NULL
84100 +_000108_hash cfi_read_pri 3 24366 _000108_hash NULL
84101 +_000109_hash cgroup_write_string 5 10900 _000109_hash NULL
84102 +_000110_hash cgroup_write_X64 5 54514 _000110_hash NULL
84103 +_000111_hash change_xattr 5 61390 _000111_hash NULL
84104 +_000112_hash check_load_and_stores 2 2143 _000112_hash NULL
84105 +_000113_hash cifs_idmap_key_instantiate 3 54503 _000113_hash NULL
84106 +_000114_hash cifs_security_flags_proc_write 3 5484 _000114_hash NULL
84107 +_000115_hash cifs_setxattr 4 23957 _000115_hash NULL
84108 +_000116_hash cifs_spnego_key_instantiate 3 23588 _000116_hash NULL
84109 +_000117_hash ci_ll_write 4 3740 _000117_hash NULL
84110 +_000118_hash cld_pipe_downcall 3 15058 _000118_hash NULL
84111 +_000119_hash clear_refs_write 3 61904 _000119_hash NULL
84112 +_000120_hash clusterip_proc_write 3 44729 _000120_hash NULL
84113 +_000121_hash cm4040_write 3 58079 _000121_hash NULL
84114 +_000122_hash cm_copy_private_data 2 3649 _000122_hash NULL
84115 +_000123_hash cmm_write 3 2896 _000123_hash NULL
84116 +_000124_hash cm_write 3 36858 _000124_hash NULL
84117 +_000125_hash coda_psdev_write 3 1711 _000125_hash NULL
84118 +_000126_hash codec_reg_read_file 3 36280 _000126_hash NULL
84119 +_000127_hash command_file_write 3 31318 _000127_hash NULL
84120 +_000128_hash command_write 3 58841 _000128_hash NULL
84121 +_000129_hash comm_write 3 44537 _001532_hash NULL nohasharray
84122 +_000130_hash concat_writev 3 21451 _000130_hash NULL
84123 +_000131_hash copy_and_check 3 19089 _000131_hash NULL
84124 +_000132_hash copy_from_user_toio 3 31966 _000132_hash NULL
84125 +_000133_hash copy_items 6 50140 _000133_hash NULL
84126 +_000134_hash copy_macs 4 45534 _000134_hash NULL
84127 +_000135_hash __copy_to_user 3 17551 _000135_hash NULL
84128 +_000136_hash copy_vm86_regs_from_user 3 45340 _000136_hash NULL
84129 +_000137_hash cosa_write 3 1774 _000137_hash NULL
84130 +_000138_hash create_entry 2 33479 _000138_hash NULL
84131 +_000139_hash create_queues 2-3 9088 _000139_hash NULL
84132 +_000141_hash create_xattr 5 54106 _000141_hash NULL
84133 +_000142_hash create_xattr_datum 5 33356 _000142_hash NULL
84134 +_000143_hash csum_partial_copy_fromiovecend 3-4 9957 _000143_hash NULL
84135 +_000145_hash ctrl_out 3-5 8712 _000145_hash NULL
84136 +_000147_hash cx24116_writeregN 4 41975 _000147_hash NULL
84137 +_000148_hash cxacru_cm_get_array 4 4412 _000148_hash NULL
84138 +_000149_hash cxgbi_alloc_big_mem 1 4707 _000149_hash NULL
84139 +_000150_hash dac960_user_command_proc_write 3 3071 _000150_hash NULL
84140 +_000151_hash datablob_format 2 39571 _002156_hash NULL nohasharray
84141 +_000152_hash dccp_feat_clone_sp_val 3 11942 _000152_hash NULL
84142 +_000153_hash dccp_setsockopt_ccid 4 30701 _000153_hash NULL
84143 +_000154_hash dccp_setsockopt_cscov 2 37766 _000154_hash NULL
84144 +_000155_hash dccp_setsockopt_service 4 65336 _000155_hash NULL
84145 +_000156_hash ddb_output_write 3 31902 _000156_hash NULL
84146 +_000157_hash ddebug_proc_write 3 18055 _000157_hash NULL
84147 +_000158_hash dev_config 3 8506 _000158_hash NULL
84148 +_000159_hash device_write 3 45156 _000159_hash NULL
84149 +_000160_hash devm_kzalloc 2 4966 _000160_hash NULL
84150 +_000161_hash devres_alloc 2 551 _000161_hash NULL
84151 +_000162_hash dfs_file_write 3 41196 _000162_hash NULL
84152 +_000163_hash direct_entry 3 38836 _000163_hash NULL
84153 +_000164_hash dispatch_proc_write 3 44320 _000164_hash NULL
84154 +_000165_hash diva_os_copy_from_user 4 7792 _000165_hash NULL
84155 +_000166_hash dlm_alloc_pagevec 1 54296 _000166_hash NULL
84156 +_000167_hash dlmfs_file_read 3 28385 _000167_hash NULL
84157 +_000168_hash dlmfs_file_write 3 6892 _000168_hash NULL
84158 +_000169_hash dm_read 3 15674 _000169_hash NULL
84159 +_000170_hash dm_write 3 2513 _000170_hash NULL
84160 +_000171_hash __dn_setsockopt 5 13060 _000171_hash NULL
84161 +_000172_hash dns_query 3 9676 _000172_hash NULL
84162 +_000173_hash dns_resolver_instantiate 3 63314 _000173_hash NULL
84163 +_000174_hash do_add_counters 3 3992 _000174_hash NULL
84164 +_000175_hash __do_config_autodelink 3 58763 _000175_hash NULL
84165 +_000176_hash do_ip_setsockopt 5 41852 _000176_hash NULL
84166 +_000177_hash do_ipv6_setsockopt 5 18215 _000177_hash NULL
84167 +_000178_hash do_ip_vs_set_ctl 4 48641 _000178_hash NULL
84168 +_000179_hash do_kimage_alloc 3 64827 _000179_hash NULL
84169 +_000180_hash do_register_entry 4 29478 _000180_hash NULL
84170 +_000181_hash do_tty_write 5 44896 _000181_hash NULL
84171 +_000182_hash do_update_counters 4 2259 _000182_hash NULL
84172 +_000183_hash dsp_write 2 46218 _000183_hash NULL
84173 +_000184_hash dup_to_netobj 3 26363 _000184_hash NULL
84174 +_000185_hash dvb_aplay 3 56296 _000185_hash NULL
84175 +_000186_hash dvb_ca_en50221_io_write 3 43533 _000186_hash NULL
84176 +_000187_hash dvbdmx_write 3 19423 _000187_hash NULL
84177 +_000188_hash dvb_play 3 50814 _000188_hash NULL
84178 +_000189_hash dw210x_op_rw 6 39915 _000189_hash NULL
84179 +_000190_hash dwc3_link_state_write 3 12641 _000190_hash NULL
84180 +_000191_hash dwc3_mode_write 3 51997 _000191_hash NULL
84181 +_000192_hash dwc3_testmode_write 3 30516 _000192_hash NULL
84182 +_000193_hash ecryptfs_copy_filename 4 11868 _000193_hash NULL
84183 +_000194_hash ecryptfs_miscdev_write 3 26847 _000194_hash NULL
84184 +_000195_hash ecryptfs_send_miscdev 2 64816 _000195_hash NULL
84185 +_000196_hash efx_tsoh_heap_alloc 2 58545 _000196_hash NULL
84186 +_000197_hash emi26_writememory 4 57908 _000197_hash NULL
84187 +_000198_hash emi62_writememory 4 29731 _000198_hash NULL
84188 +_000199_hash encrypted_instantiate 3 3168 _000199_hash NULL
84189 +_000200_hash encrypted_update 3 13414 _000200_hash NULL
84190 +_000201_hash ep0_write 3 14536 _001328_hash NULL nohasharray
84191 +_000202_hash ep_read 3 58813 _000202_hash NULL
84192 +_000203_hash ep_write 3 59008 _000203_hash NULL
84193 +_000204_hash erst_dbg_write 3 46715 _000204_hash NULL
84194 +_000205_hash esp_alloc_tmp 2-3 40558 _000205_hash NULL
84195 +_000206_hash exofs_read_lookup_dev_table 3 17733 _000206_hash NULL
84196 +_000207_hash ext4_kvmalloc 1 14796 _000207_hash NULL
84197 +_000208_hash ezusb_writememory 4 45976 _000208_hash NULL
84198 +_000209_hash fanotify_write 3 64623 _000209_hash NULL
84199 +_000210_hash fd_copyin 3 56247 _000210_hash NULL
84200 +_000211_hash ffs_epfile_io 3 64886 _000211_hash NULL
84201 +_000212_hash ffs_prepare_buffer 2 59892 _000212_hash NULL
84202 +_000213_hash f_hidg_write 3 7932 _000213_hash NULL
84203 +_000214_hash file_read_actor 4 1401 _000214_hash NULL
84204 +_000215_hash fill_write_buffer 3 3142 _000215_hash NULL
84205 +_000216_hash fl_create 5 56435 _000216_hash NULL
84206 +_000217_hash ftdi_elan_write 3 57309 _000217_hash NULL
84207 +_000218_hash fuse_conn_limit_write 3 30777 _003837_hash NULL nohasharray
84208 +_000219_hash fw_iso_buffer_init 3 54582 _000219_hash NULL
84209 +_000220_hash garmin_write_bulk 3 58191 _000220_hash NULL
84210 +_000221_hash garp_attr_create 3 3883 _000221_hash NULL
84211 +_000222_hash get_arg 3 5694 _000222_hash NULL
84212 +_000223_hash getdqbuf 1 62908 _000223_hash NULL
84213 +_000224_hash get_fdb_entries 3 41916 _000224_hash NULL
84214 +_000225_hash get_indirect_ea 4 51869 _000225_hash NULL
84215 +_000226_hash get_registers 3 26187 _000226_hash NULL
84216 +_000227_hash get_scq 2 10897 _000227_hash NULL
84217 +_000228_hash get_server_iovec 2 16804 _000228_hash NULL
84218 +_000229_hash get_ucode_user 3 38202 _000229_hash NULL
84219 +_000230_hash get_user_cpu_mask 2 14861 _000230_hash NULL
84220 +_000231_hash gfs2_alloc_sort_buffer 1 18275 _000231_hash NULL
84221 +_000232_hash gfs2_glock_nq_m 1 20347 _000232_hash NULL
84222 +_000233_hash gigaset_initcs 2 43753 _000233_hash NULL
84223 +_000234_hash gigaset_initdriver 2 1060 _000234_hash NULL
84224 +_000235_hash gs_alloc_req 2 58883 _000235_hash NULL
84225 +_000236_hash gs_buf_alloc 2 25067 _000236_hash NULL
84226 +_000237_hash gsm_data_alloc 3 42437 _000237_hash NULL
84227 +_000238_hash gss_pipe_downcall 3 23182 _000238_hash NULL
84228 +_000239_hash handle_request 9 10024 _000239_hash NULL
84229 +_000240_hash hash_new 1 62224 _000240_hash NULL
84230 +_000241_hash hashtab_create 3 33769 _000241_hash NULL
84231 +_000242_hash hcd_buffer_alloc 2 27495 _000242_hash NULL
84232 +_000243_hash hci_sock_setsockopt 5 28993 _000243_hash NULL
84233 +_000244_hash heap_init 2 49617 _000244_hash NULL
84234 +_000245_hash hest_ghes_dev_register 1 46766 _000245_hash NULL
84235 +_000246_hash hidraw_get_report 3 45609 _000246_hash NULL
84236 +_000247_hash hidraw_report_event 3 49578 _000509_hash NULL nohasharray
84237 +_000248_hash hidraw_send_report 3 23449 _000248_hash NULL
84238 +_000249_hash hpfs_translate_name 3 41497 _000249_hash NULL
84239 +_000250_hash hysdn_conf_write 3 52145 _000250_hash NULL
84240 +_000251_hash hysdn_log_write 3 48694 _000251_hash NULL
84241 +_000252_hash __i2400mu_send_barker 3 23652 _000252_hash NULL
84242 +_000253_hash i2cdev_read 3 1206 _000253_hash NULL
84243 +_000254_hash i2cdev_write 3 23310 _000254_hash NULL
84244 +_000255_hash i2o_parm_field_get 5 34477 _000255_hash NULL
84245 +_000256_hash i2o_parm_table_get 6 61635 _000256_hash NULL
84246 +_000257_hash ib_copy_from_udata 3 59502 _000257_hash NULL
84247 +_000258_hash ib_ucm_alloc_data 3 36885 _000258_hash NULL
84248 +_000259_hash ib_umad_write 3 47993 _000259_hash NULL
84249 +_000260_hash ib_uverbs_unmarshall_recv 5 12251 _000260_hash NULL
84250 +_000261_hash icn_writecmd 2 38629 _000261_hash NULL
84251 +_000262_hash ide_driver_proc_write 3 32493 _000262_hash NULL
84252 +_000263_hash ide_settings_proc_write 3 35110 _000263_hash NULL
84253 +_000264_hash idetape_chrdev_write 3 53976 _000264_hash NULL
84254 +_000265_hash idmap_pipe_downcall 3 14591 _000265_hash NULL
84255 +_000266_hash ieee80211_build_probe_req 7-5 27660 _000266_hash NULL
84256 +_000267_hash ieee80211_if_write 3 34894 _000267_hash NULL
84257 +_000268_hash if_write 3 51756 _000268_hash NULL
84258 +_000269_hash ilo_write 3 64378 _000269_hash NULL
84259 +_000270_hash ima_write_policy 3 40548 _000270_hash NULL
84260 +_000271_hash init_data_container 1 60709 _000271_hash NULL
84261 +_000272_hash init_send_hfcd 1 34586 _000272_hash NULL
84262 +_000273_hash insert_dent 7 65034 _000273_hash NULL
84263 +_000274_hash interpret_user_input 2 19393 _000274_hash NULL
84264 +_000275_hash int_proc_write 3 39542 _000275_hash NULL
84265 +_000276_hash ioctl_private_iw_point 7 1273 _000276_hash NULL
84266 +_000277_hash iov_iter_copy_from_user 4 31942 _000277_hash NULL
84267 +_000278_hash iov_iter_copy_from_user_atomic 4 56368 _000278_hash NULL
84268 +_000279_hash iowarrior_write 3 18604 _000279_hash NULL
84269 +_000280_hash ipc_alloc 1 1192 _000280_hash NULL
84270 +_000281_hash ipc_rcu_alloc 1 21208 _000281_hash NULL
84271 +_000282_hash ip_options_get_from_user 4 64958 _000282_hash NULL
84272 +_000283_hash ipv6_renew_option 3 38813 _000283_hash NULL
84273 +_000284_hash ip_vs_conn_fill_param_sync 6 29771 _002404_hash NULL nohasharray
84274 +_000285_hash ip_vs_create_timeout_table 2 64478 _000285_hash NULL
84275 +_000286_hash ipw_queue_tx_init 3 49161 _000286_hash NULL
84276 +_000287_hash irda_setsockopt 5 19824 _000287_hash NULL
84277 +_000288_hash irias_new_octseq_value 2 13596 _003296_hash NULL nohasharray
84278 +_000289_hash ir_lirc_transmit_ir 3 64403 _000289_hash NULL
84279 +_000290_hash irnet_ctrl_write 3 24139 _000290_hash NULL
84280 +_000291_hash isdn_add_channels 3 40905 _000291_hash NULL
84281 +_000292_hash isdn_ppp_fill_rq 2 41428 _000292_hash NULL
84282 +_000293_hash isdn_ppp_write 4 29109 _000293_hash NULL
84283 +_000294_hash isdn_read 3 50021 _000294_hash NULL
84284 +_000295_hash isdn_v110_open 3 2418 _000295_hash NULL
84285 +_000296_hash isdn_writebuf_stub 4 52383 _000296_hash NULL
84286 +_000297_hash islpci_mgt_transmit 5 34133 _000297_hash NULL
84287 +_000298_hash iso_callback 3 43208 _000298_hash NULL
84288 +_000299_hash iso_packets_buffer_init 3-4 29061 _000299_hash NULL
84289 +_000300_hash it821x_firmware_command 3 8628 _000300_hash NULL
84290 +_000301_hash ivtv_buf_copy_from_user 4 25502 _000301_hash NULL
84291 +_000302_hash iwch_alloc_fastreg_pbl 2 40153 _000302_hash NULL
84292 +_000303_hash iwl_calib_set 3 34400 _002188_hash NULL nohasharray
84293 +_000304_hash jbd2_journal_init_revoke_table 1 36336 _000304_hash NULL
84294 +_000305_hash jffs2_alloc_full_dirent 1 60179 _001111_hash NULL nohasharray
84295 +_000306_hash journal_init_revoke_table 1 56331 _000306_hash NULL
84296 +_000307_hash kcalloc 1-2 27770 _000307_hash NULL
84297 +_000309_hash keyctl_instantiate_key_common 4 47889 _000309_hash NULL
84298 +_000310_hash keyctl_update_key 3 26061 _000310_hash NULL
84299 +_000311_hash __kfifo_alloc 2-3 22173 _000311_hash NULL
84300 +_000313_hash kfifo_copy_from_user 3 5091 _000313_hash NULL
84301 +_000314_hash kmalloc_node 1 50163 _003293_hash NULL nohasharray
84302 +_000315_hash kmalloc_parameter 1 65279 _000315_hash NULL
84303 +_000316_hash kmem_alloc 1 31920 _000316_hash NULL
84304 +_000317_hash kobj_map 2-3 9566 _000317_hash NULL
84305 +_000319_hash kone_receive 4 4690 _000319_hash NULL
84306 +_000320_hash kone_send 4 63435 _000320_hash NULL
84307 +_000321_hash krealloc 2 14908 _000321_hash NULL
84308 +_000322_hash kvmalloc 1 32646 _000322_hash NULL
84309 +_000323_hash kvm_read_guest_atomic 4 10765 _000323_hash NULL
84310 +_000324_hash kvm_read_guest_cached 4 39666 _000324_hash NULL
84311 +_000325_hash kvm_read_guest_page 5 18074 _000325_hash NULL
84312 +_000326_hash kzalloc 1 54740 _000326_hash NULL
84313 +_000327_hash l2cap_sock_setsockopt 5 50207 _000327_hash NULL
84314 +_000328_hash l2cap_sock_setsockopt_old 4 29346 _000328_hash NULL
84315 +_000329_hash lane2_associate_req 4 45398 _000329_hash NULL
84316 +_000330_hash lbs_debugfs_write 3 48413 _000330_hash NULL
84317 +_000331_hash lcd_write 3 14857 _000331_hash &_000014_hash
84318 +_000332_hash ldm_frag_add 2 5611 _000332_hash NULL
84319 +_000333_hash __lgread 4 31668 _000333_hash NULL
84320 +_000334_hash libipw_alloc_txb 1-3-2 27579 _000334_hash NULL
84321 +_000335_hash link_send_sections_long 4 46556 _000335_hash NULL
84322 +_000336_hash listxattr 3 12769 _000336_hash NULL
84323 +_000337_hash LoadBitmap 2 19658 _000337_hash NULL
84324 +_000338_hash load_msg 2 95 _000338_hash NULL
84325 +_000339_hash lpfc_debugfs_dif_err_write 3 17424 _000339_hash NULL
84326 +_000340_hash lp_write 3 9511 _000340_hash NULL
84327 +_000341_hash mb_cache_create 2 17307 _000341_hash NULL
84328 +_000342_hash mce_write 3 26201 _000342_hash NULL
84329 +_000343_hash mcs7830_get_reg 3 33308 _000343_hash NULL
84330 +_000344_hash mcs7830_set_reg 3 31413 _000344_hash NULL
84331 +_000345_hash memcpy_fromiovec 3 55247 _000345_hash NULL
84332 +_000346_hash memcpy_fromiovecend 3-4 2707 _000346_hash NULL
84333 +_000348_hash mempool_kmalloc 2 53831 _000348_hash NULL
84334 +_000349_hash mempool_resize 2 47983 _001821_hash NULL nohasharray
84335 +_000350_hash mem_rw 3 22085 _000350_hash NULL
84336 +_000351_hash mgmt_control 3 7349 _000351_hash NULL
84337 +_000352_hash mgmt_pending_add 5 46976 _000352_hash NULL
84338 +_000353_hash mlx4_ib_alloc_fast_reg_page_list 2 46119 _000353_hash NULL
84339 +_000354_hash mmc_alloc_sg 1 21504 _000354_hash NULL
84340 +_000355_hash mmc_send_bus_test 4 18285 _000355_hash NULL
84341 +_000356_hash mmc_send_cxd_data 5 38655 _000356_hash NULL
84342 +_000357_hash module_alloc_update_bounds 1 47205 _000357_hash NULL
84343 +_000358_hash move_addr_to_kernel 2 32673 _000358_hash NULL
84344 +_000359_hash mpi_alloc_limb_space 1 23190 _000359_hash NULL
84345 +_000360_hash mpi_resize 2 44674 _000360_hash NULL
84346 +_000361_hash mptctl_getiocinfo 2 28545 _000361_hash NULL
84347 +_000362_hash mtdchar_readoob 4 31200 _000362_hash NULL
84348 +_000363_hash mtdchar_write 3 56831 _002688_hash NULL nohasharray
84349 +_000364_hash mtdchar_writeoob 4 3393 _000364_hash NULL
84350 +_000365_hash mtd_device_parse_register 5 5024 _000365_hash NULL
84351 +_000366_hash mtf_test_write 3 18844 _000366_hash NULL
84352 +_000367_hash mtrr_write 3 59622 _000367_hash NULL
84353 +_000368_hash musb_test_mode_write 3 33518 _000368_hash NULL
84354 +_000369_hash mwifiex_get_common_rates 3 17131 _000369_hash NULL
84355 +_000370_hash mwifiex_update_curr_bss_params 5 16908 _000370_hash NULL
84356 +_000371_hash nand_bch_init 2-3 16280 _001341_hash NULL nohasharray
84357 +_000373_hash ncp_file_write 3 3813 _000373_hash NULL
84358 +_000374_hash ncp__vol2io 5 4804 _000374_hash NULL
84359 +_000375_hash nes_alloc_fast_reg_page_list 2 33523 _000375_hash NULL
84360 +_000376_hash nfc_targets_found 3 29886 _000376_hash NULL
84361 +_000377_hash nfs4_acl_new 1 49806 _000377_hash NULL
84362 +_000378_hash nfs4_write_cached_acl 4 15070 _000378_hash NULL
84363 +_000379_hash nfsd_cache_update 3 59574 _000379_hash NULL
84364 +_000380_hash nfsd_symlink 6 63442 _000380_hash NULL
84365 +_000381_hash nfs_idmap_get_desc 2-4 42990 _000381_hash NULL
84366 +_000383_hash nfs_readdir_make_qstr 3 12509 _000383_hash NULL
84367 +_000384_hash note_last_dentry 3 12285 _000384_hash NULL
84368 +_000385_hash ntfs_copy_from_user 3-5 15072 _000385_hash NULL
84369 +_000387_hash __ntfs_copy_from_user_iovec_inatomic 3-4 38153 _000387_hash NULL
84370 +_000389_hash ntfs_ucstonls 3-5 23097 _000389_hash NULL
84371 +_000390_hash nvme_alloc_iod 1 56027 _000390_hash NULL
84372 +_000391_hash nvram_write 3 3894 _000391_hash NULL
84373 +_000392_hash o2hb_debug_create 4 18744 _000392_hash NULL
84374 +_000393_hash o2net_send_message_vec 4 879 _001792_hash NULL nohasharray
84375 +_000394_hash ocfs2_control_cfu 2 37750 _000394_hash NULL
84376 +_000395_hash oom_adjust_write 3 41116 _000395_hash NULL
84377 +_000396_hash oom_score_adj_write 3 42594 _000396_hash NULL
84378 +_000397_hash opera1_xilinx_rw 5 31453 _000397_hash NULL
84379 +_000398_hash oprofilefs_ulong_from_user 3 57251 _000398_hash NULL
84380 +_000399_hash opticon_write 4 60775 _000399_hash NULL
84381 +_000400_hash orig_node_add_if 2 32833 _000400_hash NULL
84382 +_000401_hash orig_node_del_if 2 28371 _000401_hash NULL
84383 +_000402_hash p9_check_zc_errors 4 15534 _000402_hash NULL
84384 +_000403_hash packet_buffer_init 2 1607 _000403_hash NULL
84385 +_000404_hash packet_setsockopt 5 17662 _000404_hash NULL
84386 +_000405_hash parse_command 2 37079 _000405_hash NULL
84387 +_000406_hash pcbit_writecmd 2 12332 _000406_hash NULL
84388 +_000407_hash pcmcia_replace_cis 3 57066 _000407_hash NULL
84389 +_000408_hash pgctrl_write 3 50453 _000408_hash NULL
84390 +_000409_hash pg_write 3 40766 _000409_hash NULL
84391 +_000410_hash pidlist_allocate 1 64404 _000410_hash NULL
84392 +_000411_hash pipe_iov_copy_from_user 3 23102 _000411_hash NULL
84393 +_000412_hash pipe_iov_copy_to_user 3 3447 _000412_hash NULL
84394 +_000413_hash pkt_add 3 39897 _000413_hash NULL
84395 +_000414_hash pktgen_if_write 3 55628 _000414_hash NULL
84396 +_000415_hash platform_device_add_data 3 310 _000415_hash NULL
84397 +_000416_hash platform_device_add_resources 3 13289 _000416_hash NULL
84398 +_000417_hash pm_qos_power_write 3 52513 _000417_hash NULL
84399 +_000418_hash pnpbios_proc_write 3 19758 _000418_hash NULL
84400 +_000419_hash pool_allocate 3 42012 _000419_hash NULL
84401 +_000420_hash posix_acl_alloc 1 48063 _000420_hash NULL
84402 +_000421_hash ppp_cp_parse_cr 4 5214 _000421_hash NULL
84403 +_000422_hash ppp_write 3 34034 _000422_hash NULL
84404 +_000423_hash pp_read 3 33210 _000423_hash NULL
84405 +_000424_hash pp_write 3 39554 _000424_hash NULL
84406 +_000425_hash printer_req_alloc 2 62687 _001807_hash NULL nohasharray
84407 +_000426_hash printer_write 3 60276 _000426_hash NULL
84408 +_000427_hash prism2_set_genericelement 3 29277 _000427_hash NULL
84409 +_000428_hash __probe_kernel_read 3 61119 _000428_hash NULL
84410 +_000429_hash __probe_kernel_write 3 29842 _000429_hash NULL
84411 +_000430_hash proc_coredump_filter_write 3 25625 _000430_hash NULL
84412 +_000431_hash _proc_do_string 2 6376 _000431_hash NULL
84413 +_000432_hash process_vm_rw_pages 5-6 15954 _000432_hash NULL
84414 +_000434_hash proc_loginuid_write 3 63648 _000434_hash NULL
84415 +_000435_hash proc_pid_attr_write 3 63845 _000435_hash NULL
84416 +_000436_hash proc_scsi_devinfo_write 3 32064 _000436_hash NULL
84417 +_000437_hash proc_scsi_write 3 29142 _000437_hash NULL
84418 +_000438_hash proc_scsi_write_proc 3 267 _000438_hash NULL
84419 +_000439_hash pstore_mkfile 5 50830 _000439_hash NULL
84420 +_000440_hash pti_char_write 3 60960 _000440_hash NULL
84421 +_000441_hash ptrace_writedata 4 45021 _000441_hash NULL
84422 +_000442_hash pt_write 3 40159 _000442_hash NULL
84423 +_000443_hash pvr2_ioread_set_sync_key 3 59882 _000443_hash NULL
84424 +_000444_hash pvr2_stream_buffer_count 2 33719 _000444_hash NULL
84425 +_000445_hash qdisc_class_hash_alloc 1 18262 _000445_hash NULL
84426 +_000446_hash r3964_write 4 57662 _000446_hash NULL
84427 +_000447_hash raw_seticmpfilter 3 6888 _000447_hash NULL
84428 +_000448_hash raw_setsockopt 5 45800 _000448_hash NULL
84429 +_000449_hash rawv6_seticmpfilter 5 12137 _000449_hash NULL
84430 +_000450_hash ray_cs_essid_proc_write 3 17875 _000450_hash NULL
84431 +_000451_hash rbd_add 3 16366 _000451_hash NULL
84432 +_000452_hash rbd_snap_add 4 19678 _000452_hash NULL
84433 +_000453_hash rdma_set_ib_paths 3 45592 _000453_hash NULL
84434 +_000454_hash rds_page_copy_user 4 35691 _000454_hash NULL
84435 +_000455_hash read 3 9397 _000455_hash NULL
84436 +_000456_hash read_buf 2 20469 _000456_hash NULL
84437 +_000457_hash read_cis_cache 4 29735 _000457_hash NULL
84438 +_000458_hash realloc_buffer 2 25816 _000458_hash NULL
84439 +_000459_hash realloc_packet_buffer 2 25569 _000459_hash NULL
84440 +_000460_hash receive_DataRequest 3 9904 _000460_hash NULL
84441 +_000461_hash recent_mt_proc_write 3 8206 _000461_hash NULL
84442 +_000462_hash regmap_access_read_file 3 37223 _000462_hash NULL
84443 +_000463_hash regmap_bulk_write 4 59049 _000463_hash NULL
84444 +_000464_hash regmap_map_read_file 3 37685 _000464_hash NULL
84445 +_000465_hash regset_tls_set 4 18459 _000465_hash NULL
84446 +_000466_hash reg_w_buf 3 27724 _000466_hash NULL
84447 +_000467_hash reg_w_ixbuf 4 34736 _000467_hash NULL
84448 +_000468_hash remote_settings_file_write 3 22987 _000468_hash NULL
84449 +_000469_hash request_key_auth_new 3 38092 _000469_hash NULL
84450 +_000470_hash restore_i387_fxsave 2 17528 _000470_hash NULL
84451 +_000471_hash revalidate 2 19043 _000471_hash NULL
84452 +_000472_hash rfcomm_sock_setsockopt 5 18254 _000472_hash NULL
84453 +_000473_hash rndis_add_response 2 58544 _000473_hash NULL
84454 +_000474_hash rndis_set_oid 4 6547 _000474_hash NULL
84455 +_000475_hash rngapi_reset 3 34366 _002911_hash NULL nohasharray
84456 +_000476_hash roccat_common_receive 4 53407 _000476_hash NULL
84457 +_000477_hash roccat_common_send 4 12284 _000477_hash NULL
84458 +_000478_hash rpc_malloc 2 43573 _000478_hash NULL
84459 +_000479_hash rt2x00debug_write_bbp 3 8212 _000479_hash NULL
84460 +_000480_hash rt2x00debug_write_csr 3 64753 _000480_hash NULL
84461 +_000481_hash rt2x00debug_write_eeprom 3 23091 _000481_hash NULL
84462 +_000482_hash rt2x00debug_write_rf 3 38195 _000482_hash NULL
84463 +_000483_hash rts51x_read_mem 4 26577 _000483_hash NULL
84464 +_000484_hash rts51x_read_status 4 11830 _000484_hash NULL
84465 +_000485_hash rts51x_write_mem 4 17598 _000485_hash NULL
84466 +_000486_hash rw_copy_check_uvector 3 34271 _000486_hash NULL
84467 +_000487_hash rxrpc_request_key 3 27235 _000487_hash NULL
84468 +_000488_hash rxrpc_server_keyring 3 16431 _000488_hash NULL
84469 +_000489_hash savemem 3 58129 _000489_hash NULL
84470 +_000490_hash sb16_copy_from_user 10-7-6 55836 _000490_hash NULL
84471 +_000493_hash sched_autogroup_write 3 10984 _000493_hash NULL
84472 +_000494_hash scsi_mode_select 6 37330 _000494_hash NULL
84473 +_000495_hash scsi_tgt_copy_sense 3 26933 _000495_hash NULL
84474 +_000496_hash sctp_auth_create_key 1 51641 _000496_hash NULL
84475 +_000497_hash sctp_getsockopt_delayed_ack 2 9232 _000497_hash NULL
84476 +_000498_hash sctp_getsockopt_local_addrs 2 25178 _000498_hash NULL
84477 +_000499_hash sctp_make_abort_user 3 29654 _000499_hash NULL
84478 +_000500_hash sctp_setsockopt_active_key 3 43755 _000500_hash NULL
84479 +_000501_hash sctp_setsockopt_adaptation_layer 3 26935 _001925_hash NULL nohasharray
84480 +_000502_hash sctp_setsockopt_associnfo 3 51684 _000502_hash NULL
84481 +_000503_hash sctp_setsockopt_auth_chunk 3 30843 _000503_hash NULL
84482 +_000504_hash sctp_setsockopt_auth_key 3 3793 _000504_hash NULL
84483 +_000505_hash sctp_setsockopt_autoclose 3 5775 _000505_hash NULL
84484 +_000506_hash sctp_setsockopt_bindx 3 49870 _000506_hash NULL
84485 +_000507_hash __sctp_setsockopt_connectx 3 46949 _000507_hash NULL
84486 +_000508_hash sctp_setsockopt_context 3 31091 _000508_hash NULL
84487 +_000509_hash sctp_setsockopt_default_send_param 3 49578 _000509_hash &_000247_hash
84488 +_000510_hash sctp_setsockopt_delayed_ack 3 40129 _000510_hash NULL
84489 +_000511_hash sctp_setsockopt_del_key 3 42304 _002281_hash NULL nohasharray
84490 +_000512_hash sctp_setsockopt_events 3 18862 _000512_hash NULL
84491 +_000513_hash sctp_setsockopt_hmac_ident 3 11687 _000513_hash NULL
84492 +_000514_hash sctp_setsockopt_initmsg 3 1383 _000514_hash NULL
84493 +_000515_hash sctp_setsockopt_maxburst 3 28041 _000515_hash NULL
84494 +_000516_hash sctp_setsockopt_maxseg 3 11829 _000516_hash NULL
84495 +_000517_hash sctp_setsockopt_peer_addr_params 3 734 _000517_hash NULL
84496 +_000518_hash sctp_setsockopt_peer_primary_addr 3 13440 _000518_hash NULL
84497 +_000519_hash sctp_setsockopt_rtoinfo 3 30941 _000519_hash NULL
84498 +_000520_hash security_context_to_sid_core 2 29248 _000520_hash NULL
84499 +_000521_hash sel_commit_bools_write 3 46077 _000521_hash NULL
84500 +_000522_hash sel_write_avc_cache_threshold 3 2256 _000522_hash NULL
84501 +_000523_hash sel_write_bool 3 46996 _000523_hash NULL
84502 +_000524_hash sel_write_checkreqprot 3 60774 _000524_hash NULL
84503 +_000525_hash sel_write_disable 3 10511 _000525_hash NULL
84504 +_000526_hash sel_write_enforce 3 48998 _000526_hash NULL
84505 +_000527_hash sel_write_load 3 63830 _000527_hash NULL
84506 +_000528_hash send_bulk_static_data 3 61932 _000528_hash NULL
84507 +_000529_hash send_control_msg 6 48498 _000529_hash NULL
84508 +_000530_hash set_aoe_iflist 2 42737 _000530_hash NULL
84509 +_000531_hash setkey_unaligned 3 39474 _000531_hash NULL
84510 +_000532_hash set_registers 3 53582 _000532_hash NULL
84511 +_000533_hash setsockopt 5 54539 _000533_hash NULL
84512 +_000534_hash setup_req 3 5848 _000534_hash NULL
84513 +_000535_hash setup_window 7-5-4-2 59178 _000535_hash NULL
84514 +_000536_hash setxattr 4 37006 _000536_hash NULL
84515 +_000537_hash sfq_alloc 1 2861 _000537_hash NULL
84516 +_000538_hash sg_kmalloc 1 50240 _000538_hash NULL
84517 +_000539_hash sgl_map_user_pages 2 30610 _000539_hash NULL
84518 +_000540_hash shash_setkey_unaligned 3 8620 _000540_hash NULL
84519 +_000541_hash shmem_xattr_alloc 2 61190 _000541_hash NULL
84520 +_000542_hash sierra_setup_urb 5 46029 _000542_hash NULL
84521 +_000543_hash simple_transaction_get 3 50633 _000543_hash NULL
84522 +_000544_hash simple_write_to_buffer 2-5 3122 _000544_hash NULL
84523 +_000546_hash sisusb_send_bulk_msg 3 17864 _000546_hash NULL
84524 +_000547_hash skb_add_data 3 48363 _000547_hash NULL
84525 +_000548_hash skb_do_copy_data_nocache 5 12465 _000548_hash NULL
84526 +_000549_hash sl_alloc_bufs 2 50380 _000549_hash NULL
84527 +_000550_hash sl_realloc_bufs 2 64086 _000550_hash NULL
84528 +_000551_hash smk_write_ambient 3 45691 _000551_hash NULL
84529 +_000552_hash smk_write_cipso 3 17989 _000552_hash NULL
84530 +_000553_hash smk_write_direct 3 46363 _000553_hash NULL
84531 +_000554_hash smk_write_doi 3 49621 _000554_hash NULL
84532 +_000555_hash smk_write_load_list 3 52280 _000555_hash NULL
84533 +_000556_hash smk_write_logging 3 2618 _000556_hash NULL
84534 +_000557_hash smk_write_netlbladdr 3 42525 _000557_hash NULL
84535 +_000558_hash smk_write_onlycap 3 14400 _000558_hash NULL
84536 +_000559_hash snd_ctl_elem_user_tlv 3 11695 _000559_hash NULL
84537 +_000560_hash snd_emu10k1_fx8010_read 5 9605 _000560_hash NULL
84538 +_000561_hash snd_emu10k1_synth_copy_from_user 3-5 9061 _000561_hash NULL
84539 +_000563_hash snd_gus_dram_poke 4 18525 _000563_hash NULL
84540 +_000564_hash snd_hdsp_playback_copy 5 20676 _000564_hash NULL
84541 +_000565_hash snd_info_entry_write 3 63474 _000565_hash NULL
84542 +_000566_hash snd_korg1212_copy_from 6 36169 _000566_hash NULL
84543 +_000567_hash snd_mem_proc_write 3 9786 _000567_hash NULL
84544 +_000568_hash snd_midi_channel_init_set 1 30092 _000568_hash NULL
84545 +_000569_hash snd_midi_event_new 1 9893 _000750_hash NULL nohasharray
84546 +_000570_hash snd_opl4_mem_proc_write 5 9670 _000570_hash NULL
84547 +_000571_hash snd_pcm_aio_read 3 13900 _000571_hash NULL
84548 +_000572_hash snd_pcm_aio_write 3 28738 _000572_hash NULL
84549 +_000573_hash snd_pcm_oss_write1 3 10872 _000573_hash NULL
84550 +_000574_hash snd_pcm_oss_write2 3 27332 _000574_hash NULL
84551 +_000575_hash snd_rawmidi_kernel_write1 4 56847 _000575_hash NULL
84552 +_000576_hash snd_rme9652_playback_copy 5 20970 _000576_hash NULL
84553 +_000577_hash snd_sb_csp_load_user 3 45190 _000577_hash NULL
84554 +_000578_hash snd_usb_ctl_msg 8 8436 _000578_hash NULL
84555 +_000579_hash sock_bindtodevice 3 50942 _000579_hash NULL
84556 +_000580_hash sock_kmalloc 2 62205 _000580_hash NULL
84557 +_000581_hash spidev_write 3 44510 _000581_hash NULL
84558 +_000582_hash squashfs_read_table 3 16945 _000582_hash NULL
84559 +_000583_hash srpt_alloc_ioctx 2-3 51042 _000583_hash NULL
84560 +_000585_hash srpt_alloc_ioctx_ring 2-4-3 49330 _000585_hash NULL
84561 +_000586_hash st5481_setup_isocpipes 6-4 61340 _000586_hash NULL
84562 +_000587_hash sta_agg_status_write 3 45164 _000587_hash NULL
84563 +_000588_hash svc_setsockopt 5 36876 _000588_hash NULL
84564 +_000589_hash sys_add_key 4 61288 _000589_hash NULL
84565 +_000590_hash sys_modify_ldt 3 18824 _000590_hash NULL
84566 +_000591_hash sys_semtimedop 3 4486 _000591_hash NULL
84567 +_000592_hash sys_setdomainname 2 4373 _000592_hash NULL
84568 +_000593_hash sys_sethostname 2 42962 _000593_hash NULL
84569 +_000594_hash tda10048_writeregbulk 4 11050 _000594_hash NULL
84570 +_000595_hash tipc_log_resize 1 34803 _000595_hash NULL
84571 +_000596_hash tomoyo_write_self 3 45161 _000596_hash NULL
84572 +_000597_hash tower_write 3 8580 _000597_hash NULL
84573 +_000598_hash tpm_write 3 50798 _000598_hash NULL
84574 +_000599_hash trusted_instantiate 3 4710 _000599_hash NULL
84575 +_000600_hash trusted_update 3 12664 _000600_hash NULL
84576 +_000601_hash tt_changes_fill_buffer 3 62649 _000601_hash NULL
84577 +_000602_hash tty_buffer_alloc 2 45437 _000602_hash NULL
84578 +_000603_hash __tun_chr_ioctl 4 22300 _000603_hash NULL
84579 +_000604_hash ubi_more_leb_change_data 4 63534 _000604_hash NULL
84580 +_000605_hash ubi_more_update_data 4 39189 _000605_hash NULL
84581 +_000606_hash ubi_resize_volume 2 50172 _000606_hash NULL
84582 +_000607_hash udf_alloc_i_data 2 35786 _000607_hash NULL
84583 +_000608_hash uea_idma_write 3 64139 _000608_hash NULL
84584 +_000609_hash uea_request 4 47613 _000609_hash NULL
84585 +_000610_hash uea_send_modem_cmd 3 3888 _000610_hash NULL
84586 +_000611_hash uio_write 3 43202 _000611_hash NULL
84587 +_000612_hash um_idi_write 3 18293 _000612_hash NULL
84588 +_000613_hash us122l_ctl_msg 8 13330 _000613_hash NULL
84589 +_000614_hash usb_alloc_urb 1 43436 _000614_hash NULL
84590 +_000615_hash usblp_new_writeurb 2 22894 _000615_hash NULL
84591 +_000616_hash usblp_write 3 23178 _000616_hash NULL
84592 +_000617_hash usbtest_alloc_urb 3-5 34446 _000617_hash NULL
84593 +_000619_hash usbtmc_write 3 64340 _000619_hash NULL
84594 +_000620_hash user_instantiate 3 26131 _000620_hash NULL
84595 +_000621_hash user_update 3 41332 _000621_hash NULL
84596 +_000622_hash uvc_simplify_fraction 3 31303 _000622_hash NULL
84597 +_000623_hash uwb_rc_cmd_done 4 35892 _000623_hash NULL
84598 +_000624_hash uwb_rc_neh_grok_event 3 55799 _000624_hash NULL
84599 +_000625_hash v9fs_alloc_rdir_buf 2 42150 _000625_hash NULL
84600 +_000626_hash __vb2_perform_fileio 3 63033 _000626_hash NULL
84601 +_000627_hash vc_do_resize 3-4 48842 _000627_hash NULL
84602 +_000629_hash vcs_write 3 3910 _000629_hash NULL
84603 +_000630_hash vfd_write 3 14717 _000630_hash NULL
84604 +_000631_hash vga_arb_write 3 36112 _000631_hash NULL
84605 +_000632_hash vga_switcheroo_debugfs_write 3 33984 _000632_hash NULL
84606 +_000633_hash vhci_get_user 3 45039 _000633_hash NULL
84607 +_000634_hash video_proc_write 3 6724 _000634_hash NULL
84608 +_000635_hash vlsi_alloc_ring 3-4 57003 _000635_hash NULL
84609 +_000637_hash __vmalloc 1 61168 _000637_hash NULL
84610 +_000638_hash vmalloc_32 1 1135 _000638_hash NULL
84611 +_000639_hash vmalloc_32_user 1 37519 _000639_hash NULL
84612 +_000640_hash vmalloc_exec 1 36132 _000640_hash NULL
84613 +_000641_hash vmalloc_node 1 58700 _000641_hash NULL
84614 +_000642_hash __vmalloc_node_flags 1 30352 _000642_hash NULL
84615 +_000643_hash vmalloc_user 1 32308 _000643_hash NULL
84616 +_000644_hash vol_cdev_direct_write 3 20751 _000644_hash NULL
84617 +_000645_hash vp_request_msix_vectors 2 28849 _000645_hash NULL
84618 +_000646_hash vring_add_indirect 3-4 20737 _000646_hash NULL
84619 +_000648_hash vring_new_virtqueue 1 9671 _000648_hash NULL
84620 +_000649_hash vxge_os_dma_malloc 2 46184 _000649_hash NULL
84621 +_000650_hash vxge_os_dma_malloc_async 3 56348 _000650_hash NULL
84622 +_000651_hash wdm_write 3 53735 _000651_hash NULL
84623 +_000652_hash wiimote_hid_send 3 48528 _000652_hash NULL
84624 +_000653_hash wl1273_fm_fops_write 3 60621 _000653_hash NULL
84625 +_000654_hash wlc_phy_loadsampletable_nphy 3 64367 _000654_hash NULL
84626 +_000655_hash write 3 62671 _000655_hash NULL
84627 +_000656_hash write_flush 3 50803 _000656_hash NULL
84628 +_000657_hash write_rio 3 54837 _000657_hash NULL
84629 +_000658_hash x25_asy_change_mtu 2 26928 _000658_hash NULL
84630 +_000659_hash xdi_copy_from_user 4 8395 _000659_hash NULL
84631 +_000660_hash xfrm_dst_alloc_copy 3 3034 _000660_hash NULL
84632 +_000661_hash xfrm_user_policy 4 62573 _000661_hash NULL
84633 +_000662_hash xfs_attrmulti_attr_set 4 59346 _000662_hash NULL
84634 +_000663_hash xfs_handle_to_dentry 3 12135 _000663_hash NULL
84635 +_000664_hash __xip_file_write 3-4 2733 _000664_hash NULL
84636 +_000665_hash xprt_rdma_allocate 2 31372 _000665_hash NULL
84637 +_000666_hash zd_usb_iowrite16v_async 3 23984 _000666_hash NULL
84638 +_000667_hash zd_usb_read_fw 4 22049 _000667_hash NULL
84639 +_000668_hash zerocopy_sg_from_iovec 3 11828 _000668_hash NULL
84640 +_000669_hash zoran_write 3 22404 _000669_hash NULL
84641 +_000671_hash acpi_ex_allocate_name_string 2-1 7685 _002855_hash NULL nohasharray
84642 +_000672_hash acpi_os_allocate_zeroed 1 37422 _000672_hash NULL
84643 +_000673_hash acpi_ut_initialize_buffer 2 47143 _002314_hash NULL nohasharray
84644 +_000674_hash ad7879_spi_xfer 3 36311 _000674_hash NULL
84645 +_000675_hash add_new_gdb 3 27643 _000675_hash NULL
84646 +_000676_hash add_numbered_child 5 14273 _000676_hash NULL
84647 +_000677_hash add_res_range 4 21310 _000677_hash NULL
84648 +_000678_hash addtgt 3 54703 _000678_hash NULL
84649 +_000679_hash add_uuid 4 49831 _000679_hash NULL
84650 +_000680_hash afs_cell_alloc 2 24052 _000680_hash NULL
84651 +_000681_hash aggr_recv_addba_req_evt 4 38037 _000681_hash NULL
84652 +_000682_hash agp_create_memory 1 1075 _000682_hash NULL
84653 +_000683_hash agp_create_user_memory 1 62955 _000683_hash NULL
84654 +_000684_hash alg_setsockopt 5 20985 _000684_hash NULL
84655 +_000685_hash alloc_async 1 14208 _000685_hash NULL
84656 +_000686_hash ___alloc_bootmem_nopanic 1 53626 _000686_hash NULL
84657 +_000687_hash alloc_buf 1 34532 _000687_hash NULL
84658 +_000688_hash alloc_chunk 1 49575 _000688_hash NULL
84659 +_000689_hash alloc_context 1 41283 _000689_hash NULL
84660 +_000690_hash alloc_ctrl_packet 1 44667 _000690_hash NULL
84661 +_000691_hash alloc_data_packet 1 46698 _000691_hash NULL
84662 +_000692_hash alloc_dca_provider 2 59670 _000692_hash NULL
84663 +_000693_hash __alloc_dev_table 2 54343 _000693_hash NULL
84664 +_000694_hash alloc_ep 1 17269 _000694_hash NULL
84665 +_000695_hash __alloc_extent_buffer 3 15093 _000695_hash NULL
84666 +_000696_hash alloc_group_attrs 2 9194 _000719_hash NULL nohasharray
84667 +_000697_hash alloc_large_system_hash 2 64490 _000697_hash NULL
84668 +_000698_hash alloc_netdev_mqs 1 30030 _000698_hash NULL
84669 +_000699_hash __alloc_objio_seg 1 7203 _000699_hash NULL
84670 +_000700_hash alloc_ring 2-4 15345 _000700_hash NULL
84671 +_000701_hash alloc_ring 2-4 39151 _000701_hash NULL
84672 +_000704_hash alloc_session 1-2 64171 _000704_hash NULL
84673 +_000708_hash alloc_smp_req 1 51337 _000708_hash NULL
84674 +_000709_hash alloc_smp_resp 1 3566 _000709_hash NULL
84675 +_000710_hash alloc_ts_config 1 45775 _000710_hash NULL
84676 +_000711_hash alloc_upcall 2 62186 _000711_hash NULL
84677 +_000712_hash altera_drscan 2 48698 _000712_hash NULL
84678 +_000713_hash altera_irscan 2 62396 _000713_hash NULL
84679 +_000714_hash altera_set_dr_post 2 54291 _000714_hash NULL
84680 +_000715_hash altera_set_dr_pre 2 64862 _000715_hash NULL
84681 +_000716_hash altera_set_ir_post 2 20948 _000716_hash NULL
84682 +_000717_hash altera_set_ir_pre 2 54103 _000717_hash NULL
84683 +_000718_hash altera_swap_dr 2 50090 _000718_hash NULL
84684 +_000719_hash altera_swap_ir 2 9194 _000719_hash &_000696_hash
84685 +_000720_hash amd_create_gatt_pages 1 20537 _000720_hash NULL
84686 +_000721_hash aoechr_write 3 62883 _001352_hash NULL nohasharray
84687 +_000722_hash applesmc_create_nodes 2 49392 _000722_hash NULL
84688 +_000723_hash array_zalloc 1-2 7519 _000723_hash NULL
84689 +_000725_hash arvo_sysfs_read 6 31617 _000725_hash NULL
84690 +_000726_hash arvo_sysfs_write 6 3311 _000726_hash NULL
84691 +_000727_hash asd_store_update_bios 4 10165 _000727_hash NULL
84692 +_000728_hash ata_host_alloc 2 46094 _000728_hash NULL
84693 +_000729_hash atalk_sendmsg 4 21677 _000729_hash NULL
84694 +_000730_hash ath6kl_cfg80211_connect_event 7-9-8 13443 _000730_hash NULL
84695 +_000731_hash ath6kl_mgmt_tx 9 21153 _000731_hash NULL
84696 +_000732_hash ath6kl_wmi_roam_tbl_event_rx 3 43440 _000732_hash NULL
84697 +_000733_hash ath6kl_wmi_send_mgmt_cmd 7 17347 _000733_hash NULL
84698 +_000734_hash ath_descdma_setup 5 12257 _000734_hash NULL
84699 +_000735_hash ath_rx_edma_init 2 65483 _000735_hash NULL
84700 +_000736_hash ati_create_gatt_pages 1 4722 _003185_hash NULL nohasharray
84701 +_000737_hash au0828_init_isoc 2-3 61917 _000737_hash NULL
84702 +_000739_hash audit_init_entry 1 38644 _000739_hash NULL
84703 +_000740_hash ax25_sendmsg 4 62770 _000740_hash NULL
84704 +_000741_hash b1_alloc_card 1 36155 _000741_hash NULL
84705 +_000742_hash b43_nphy_load_samples 3 36481 _000742_hash NULL
84706 +_000743_hash bio_copy_user_iov 4 37660 _000743_hash NULL
84707 +_000744_hash __bio_map_kern 2-3 47379 _000744_hash NULL
84708 +_000746_hash blk_register_region 1-2 51424 _000746_hash NULL
84709 +_000748_hash bm_entry_write 3 28338 _000748_hash NULL
84710 +_000749_hash bm_realloc_pages 2 9431 _000749_hash NULL
84711 +_000750_hash bm_register_write 3 9893 _000750_hash &_000569_hash
84712 +_000751_hash bm_status_write 3 12964 _000751_hash NULL
84713 +_000752_hash br_mdb_rehash 2 42643 _000752_hash NULL
84714 +_000753_hash btrfs_copy_from_user 3-1 43806 _000753_hash NULL
84715 +_000754_hash btrfs_insert_delayed_dir_index 4 63720 _000754_hash NULL
84716 +_000755_hash __btrfs_map_block 3 49839 _000755_hash NULL
84717 +_000756_hash __c4iw_init_resource_fifo 3 8334 _000756_hash NULL
84718 +_000757_hash cache_downcall 3 13666 _000757_hash NULL
84719 +_000758_hash cache_slow_downcall 2 8570 _000758_hash NULL
84720 +_000759_hash ca_extend 2 64541 _000759_hash NULL
84721 +_000760_hash caif_seqpkt_sendmsg 4 22961 _000760_hash NULL
84722 +_000761_hash caif_stream_sendmsg 4 9110 _000761_hash NULL
84723 +_000762_hash carl9170_cmd_buf 3 950 _000762_hash NULL
84724 +_000763_hash cdev_add 2-3 38176 _000763_hash NULL
84725 +_000765_hash cdrom_read_cdda 4 50478 _000765_hash NULL
84726 +_000766_hash ceph_dns_resolve_name 1-2 62488 _000766_hash NULL
84727 +_000767_hash ceph_msgpool_get 2 54258 _000767_hash NULL
84728 +_000768_hash cfg80211_connect_result 4-6 56515 _000768_hash NULL
84729 +_000770_hash cfg80211_disconnected 4 57 _000770_hash NULL
84730 +_000771_hash cfg80211_inform_bss 8 19332 _000771_hash NULL
84731 +_000772_hash cfg80211_inform_bss_frame 4 41078 _000772_hash NULL
84732 +_000773_hash cfg80211_mlme_register_mgmt 5 19852 _000773_hash NULL
84733 +_000774_hash cfg80211_roamed_bss 4-6 50198 _000774_hash NULL
84734 +_000776_hash cifs_readdata_alloc 1 50318 _000776_hash NULL
84735 +_000777_hash cifs_readv_from_socket 3 19109 _000777_hash NULL
84736 +_000778_hash cifs_writedata_alloc 1 32880 _003119_hash NULL nohasharray
84737 +_000779_hash cnic_alloc_dma 3 34641 _000779_hash NULL
84738 +_000780_hash configfs_write_file 3 61621 _000780_hash NULL
84739 +_000781_hash construct_key 3 11329 _000781_hash NULL
84740 +_000782_hash context_alloc 3 24645 _000782_hash NULL
84741 +_000783_hash copy_to_user 3 57835 _000783_hash NULL
84742 +_000784_hash create_attr_set 1 22861 _000784_hash NULL
84743 +_000785_hash create_bounce_buffer 3 39155 _000785_hash NULL
84744 +_000786_hash create_gpadl_header 2 19064 _000786_hash NULL
84745 +_000787_hash _create_sg_bios 4 31244 _000787_hash NULL
84746 +_000788_hash cryptd_alloc_instance 2-3 18048 _000788_hash NULL
84747 +_000790_hash crypto_ahash_setkey 3 55134 _000790_hash NULL
84748 +_000791_hash crypto_alloc_instance2 3 25277 _000791_hash NULL
84749 +_000792_hash crypto_shash_setkey 3 60483 _000792_hash NULL
84750 +_000793_hash cx231xx_init_bulk 3-2 47024 _000793_hash NULL
84751 +_000794_hash cx231xx_init_isoc 2-3 56453 _000794_hash NULL
84752 +_000796_hash cx231xx_init_vbi_isoc 2-3 28053 _000796_hash NULL
84753 +_000798_hash cxgb_alloc_mem 1 24007 _000798_hash NULL
84754 +_000799_hash cxgbi_device_portmap_create 3 25747 _000799_hash NULL
84755 +_000800_hash cxgbi_device_register 1-2 36746 _000800_hash NULL
84756 +_000802_hash __cxio_init_resource_fifo 3 23447 _000802_hash NULL
84757 +_000803_hash dccp_sendmsg 4 56058 _000803_hash NULL
84758 +_000804_hash ddp_make_gl 1 12179 _000804_hash NULL
84759 +_000805_hash depth_write 3 3021 _000805_hash NULL
84760 +_000806_hash dev_irnet_write 3 11398 _000806_hash NULL
84761 +_000807_hash dev_set_alias 3 50084 _000807_hash NULL
84762 +_000808_hash dev_write 3 7708 _000808_hash NULL
84763 +_000809_hash dfs_global_file_write 3 6112 _000809_hash NULL
84764 +_000810_hash dgram_sendmsg 4 45679 _000810_hash NULL
84765 +_000811_hash disconnect 4 32521 _000811_hash NULL
84766 +_000812_hash dma_attach 6-7 50831 _000812_hash NULL
84767 +_000814_hash dn_sendmsg 4 38390 _000814_hash NULL
84768 +_000815_hash do_dccp_setsockopt 5 54377 _003160_hash NULL nohasharray
84769 +_000816_hash do_jffs2_setxattr 5 25910 _000816_hash NULL
84770 +_000817_hash do_msgsnd 4 1387 _000817_hash NULL
84771 +_000818_hash do_raw_setsockopt 5 55215 _000818_hash NULL
84772 +_000819_hash do_readv_writev 4 51849 _000819_hash NULL
84773 +_000820_hash do_sync 1 9604 _000820_hash NULL
84774 +_000821_hash dup_array 3 33551 _000821_hash NULL
84775 +_000822_hash dvb_audio_write 3 51275 _000822_hash NULL
84776 +_000823_hash dvb_ca_en50221_init 4 45718 _000823_hash NULL
84777 +_000824_hash dvb_video_write 3 754 _000824_hash NULL
84778 +_000825_hash econet_sendmsg 4 51430 _000825_hash NULL
84779 +_000826_hash ecryptfs_decode_and_decrypt_filename 5 10379 _000826_hash NULL
84780 +_000827_hash ecryptfs_encrypt_and_encode_filename 6 2109 _000827_hash NULL
84781 +_000828_hash ecryptfs_send_message_locked 2 31801 _000828_hash NULL
84782 +_000829_hash edac_device_alloc_ctl_info 1 5941 _000829_hash NULL
84783 +_000830_hash edac_mc_alloc 1 54846 _000830_hash NULL
84784 +_000831_hash edac_pci_alloc_ctl_info 1 63388 _000831_hash NULL
84785 +_000832_hash efivar_create_sysfs_entry 2 19485 _000832_hash NULL
84786 +_000833_hash em28xx_alloc_isoc 4 46892 _000833_hash NULL
84787 +_000834_hash enable_write 3 30456 _000834_hash NULL
84788 +_000835_hash enclosure_register 3 57412 _000835_hash NULL
84789 +_000836_hash ext4_kvzalloc 1 47605 _000836_hash NULL
84790 +_000837_hash extend_netdev_table 2 31680 _000837_hash NULL
84791 +_000838_hash __feat_register_sp 6 64712 _000838_hash NULL
84792 +_000839_hash __ffs_ep0_read_events 3 48868 _000839_hash NULL
84793 +_000840_hash ffs_ep0_write 3 9438 _000840_hash NULL
84794 +_000841_hash ffs_epfile_read 3 18775 _000841_hash NULL
84795 +_000842_hash ffs_epfile_write 3 48014 _000842_hash NULL
84796 +_000843_hash fib_info_hash_alloc 1 9075 _000843_hash NULL
84797 +_000844_hash fillonedir 3 41746 _000844_hash NULL
84798 +_000845_hash flexcop_device_kmalloc 1 54793 _000845_hash NULL
84799 +_000846_hash frame_alloc 4 15981 _000846_hash NULL
84800 +_000847_hash fw_node_create 2 9559 _000847_hash NULL
84801 +_000848_hash garmin_read_process 3 27509 _000848_hash NULL
84802 +_000849_hash garp_request_join 4 7471 _000849_hash NULL
84803 +_000850_hash get_derived_key 4 61100 _000850_hash NULL
84804 +_000851_hash get_entry 4 16003 _000851_hash NULL
84805 +_000852_hash get_free_de 2 33714 _000852_hash NULL
84806 +_000853_hash get_new_cssid 2 51665 _000853_hash NULL
84807 +_000854_hash getxattr 4 24398 _003728_hash NULL nohasharray
84808 +_000855_hash gspca_dev_probe2 4 59833 _000855_hash NULL
84809 +_000856_hash hcd_alloc_coherent 5 55862 _000856_hash NULL
84810 +_000857_hash hci_sock_sendmsg 4 37420 _000857_hash NULL
84811 +_000858_hash hid_register_field 2-3 4874 _000858_hash NULL
84812 +_000860_hash hid_report_raw_event 4 7024 _000860_hash NULL
84813 +_000861_hash hpi_alloc_control_cache 1 35351 _000861_hash NULL
84814 +_000862_hash hugetlbfs_read_actor 2-5-4 34547 _000862_hash NULL
84815 +_000865_hash hvc_alloc 4 12579 _000865_hash NULL
84816 +_000866_hash __hwahc_dev_set_key 5 46328 _000866_hash NULL
84817 +_000867_hash i2400m_zrealloc_2x 3 54166 _001430_hash NULL nohasharray
84818 +_000868_hash ib_alloc_device 1 26483 _000868_hash NULL
84819 +_000869_hash ib_create_send_mad 5 1196 _000869_hash NULL
84820 +_000870_hash ibmasm_new_command 2 25714 _000870_hash NULL
84821 +_000871_hash ib_send_cm_drep 3 50186 _000871_hash NULL
84822 +_000872_hash ib_send_cm_mra 4 60202 _003875_hash NULL nohasharray
84823 +_000873_hash ib_send_cm_rtu 3 63138 _000873_hash NULL
84824 +_000874_hash ieee80211_key_alloc 3 19065 _000874_hash NULL
84825 +_000875_hash ieee80211_mgmt_tx 9 46860 _000875_hash NULL
84826 +_000876_hash ieee80211_send_probe_req 6-4 6924 _000876_hash NULL
84827 +_000877_hash if_writecmd 2 815 _000877_hash NULL
84828 +_000878_hash init_bch 1-2 64130 _000878_hash NULL
84829 +_000880_hash init_ipath 1 48187 _000880_hash NULL
84830 +_000881_hash init_list_set 2-3 39188 _000881_hash NULL
84831 +_000883_hash init_q 4 132 _000883_hash NULL
84832 +_000884_hash init_state 2 60165 _000884_hash NULL
84833 +_000885_hash init_tag_map 3 57515 _000885_hash NULL
84834 +_000886_hash input_ff_create 2 21240 _000886_hash NULL
84835 +_000887_hash input_mt_init_slots 2 31183 _000887_hash NULL
84836 +_000888_hash interfaces 2 38859 _000888_hash NULL
84837 +_000889_hash ioat2_alloc_ring 2 11172 _000889_hash NULL
84838 +_000890_hash ip_generic_getfrag 3-4 12187 _000890_hash NULL
84839 +_000892_hash ipr_alloc_ucode_buffer 1 40199 _000892_hash NULL
84840 +_000893_hash ip_set_alloc 1 57953 _000893_hash NULL
84841 +_000894_hash ipv6_flowlabel_opt 3 58135 _001125_hash NULL nohasharray
84842 +_000895_hash ipv6_renew_options 5 28867 _000895_hash NULL
84843 +_000896_hash ipxrtr_route_packet 4 54036 _000896_hash NULL
84844 +_000897_hash irda_sendmsg 4 4388 _000897_hash NULL
84845 +_000898_hash irda_sendmsg_dgram 4 38563 _000898_hash NULL
84846 +_000899_hash irda_sendmsg_ultra 4 42047 _000899_hash NULL
84847 +_000900_hash irias_add_octseq_attrib 4 29983 _000900_hash NULL
84848 +_000901_hash irq_alloc_generic_chip 2 26650 _000901_hash NULL
84849 +_000902_hash irq_domain_add_linear 2 29236 _000902_hash NULL
84850 +_000903_hash iscsi_alloc_session 3 49390 _000903_hash NULL
84851 +_000904_hash iscsi_create_conn 2 50425 _000904_hash NULL
84852 +_000905_hash iscsi_create_endpoint 1 15193 _000905_hash NULL
84853 +_000906_hash iscsi_create_iface 5 38510 _000906_hash NULL
84854 +_000907_hash iscsi_decode_text_input 4 58292 _000907_hash NULL
84855 +_000908_hash iscsi_pool_init 2-4 54913 _000908_hash NULL
84856 +_000910_hash iscsit_dump_data_payload 2 38683 _000910_hash NULL
84857 +_000911_hash isdn_write 3 45863 _000911_hash NULL
84858 +_000912_hash isku_receive 4 54130 _000912_hash NULL
84859 +_000913_hash isku_send 4 41542 _000913_hash NULL
84860 +_000914_hash islpci_mgt_transaction 5 23610 _000914_hash NULL
84861 +_000915_hash iso_sched_alloc 1 13377 _002079_hash NULL nohasharray
84862 +_000916_hash ivtv_v4l2_write 3 39226 _000916_hash NULL
84863 +_000917_hash iwl_trans_txq_alloc 3 36147 _000917_hash NULL
84864 +_000918_hash iwmct_fw_parser_init 4 37876 _000918_hash NULL
84865 +_000919_hash iwm_notif_send 6 12295 _000919_hash NULL
84866 +_000920_hash iwm_ntf_calib_res 3 11686 _000920_hash NULL
84867 +_000921_hash iwm_umac_set_config_var 4 17320 _000921_hash NULL
84868 +_000922_hash ixgbe_alloc_q_vector 3-5 45428 _000922_hash NULL
84869 +_000924_hash jbd2_journal_init_revoke 2 51088 _000924_hash NULL
84870 +_000925_hash jffs2_write_dirent 5 37311 _000925_hash NULL
84871 +_000926_hash journal_init_revoke 2 56933 _000926_hash NULL
84872 +_000927_hash keyctl_instantiate_key 3 41855 _000927_hash NULL
84873 +_000928_hash keyctl_instantiate_key_iov 3 16969 _000928_hash NULL
84874 +_000929_hash __kfifo_from_user 3 20399 _000929_hash NULL
84875 +_000930_hash kimage_crash_alloc 3 3233 _000930_hash NULL
84876 +_000931_hash kimage_normal_alloc 3 31140 _000931_hash NULL
84877 +_000932_hash kmem_realloc 2 37489 _000932_hash NULL
84878 +_000933_hash kmem_zalloc 1 11510 _000933_hash NULL
84879 +_000934_hash koneplus_send 4 18226 _000934_hash NULL
84880 +_000935_hash koneplus_sysfs_read 6 42792 _000935_hash NULL
84881 +_000936_hash kovaplus_send 4 10009 _000936_hash NULL
84882 +_000937_hash kvm_read_guest_page_mmu 6 37611 _000937_hash NULL
84883 +_000938_hash kvm_set_irq_routing 3 48704 _000938_hash NULL
84884 +_000939_hash kvm_write_guest_cached 4 11106 _000939_hash NULL
84885 +_000940_hash kvm_write_guest_page 5 63555 _002809_hash NULL nohasharray
84886 +_000941_hash l2cap_skbuff_fromiovec 3-4 35003 _000941_hash NULL
84887 +_000943_hash l2tp_ip_sendmsg 4 50411 _000943_hash NULL
84888 +_000944_hash l2tp_session_create 1 25286 _000944_hash NULL
84889 +_000945_hash lc_create 3 48662 _000945_hash NULL
84890 +_000946_hash leaf_dealloc 3 29566 _000946_hash NULL
84891 +_000947_hash linear_conf 2 23485 _003314_hash NULL nohasharray
84892 +_000948_hash lirc_buffer_init 2-3 53282 _000948_hash NULL
84893 +_000950_hash llc_ui_sendmsg 4 24987 _000950_hash NULL
84894 +_000951_hash lpfc_sli4_queue_alloc 3 62646 _000951_hash NULL
84895 +_000952_hash mce_request_packet 3 1073 _000952_hash NULL
84896 +_000953_hash mdiobus_alloc_size 1 52259 _000953_hash NULL
84897 +_000954_hash media_entity_init 2-4 15870 _001556_hash NULL nohasharray
84898 +_000956_hash memstick_alloc_host 1 142 _000956_hash NULL
84899 +_000957_hash mesh_table_alloc 1 22305 _000957_hash NULL
84900 +_000958_hash mfd_add_devices 4 56753 _000958_hash NULL
84901 +_000959_hash mISDN_sock_sendmsg 4 41035 _000959_hash NULL
84902 +_000960_hash mmc_alloc_host 1 48097 _000960_hash NULL
84903 +_000961_hash mmc_test_alloc_mem 3-2 28102 _000961_hash NULL
84904 +_000962_hash mpi_alloc 1 18094 _000962_hash NULL
84905 +_000963_hash mpihelp_mul_karatsuba_case 5-3 23918 _003873_hash NULL nohasharray
84906 +_000964_hash mpihelp_mul_n 4 16405 _000964_hash NULL
84907 +_000965_hash mpi_set_bit 2 15104 _000965_hash NULL
84908 +_000966_hash mpi_set_highbit 2 37327 _001420_hash NULL nohasharray
84909 +_000967_hash mtd_concat_create 2 14416 _000967_hash NULL
84910 +_000968_hash mvumi_alloc_mem_resource 3 47750 _000968_hash NULL
84911 +_000969_hash mwifiex_11n_create_rx_reorder_tbl 4 63806 _000969_hash NULL
84912 +_000970_hash mwifiex_alloc_sdio_mpa_buffers 2-3 60961 _000970_hash NULL
84913 +_000972_hash mwl8k_cmd_set_beacon 4 23110 _000972_hash NULL
84914 +_000973_hash neigh_hash_alloc 1 17595 _000973_hash NULL
84915 +_000974_hash netlink_sendmsg 4 33708 _001172_hash NULL nohasharray
84916 +_000975_hash netxen_alloc_sds_rings 2 13417 _000975_hash NULL
84917 +_000976_hash new_bind_ctl 2 35324 _000976_hash NULL
84918 +_000977_hash new_dir 3 31919 _000977_hash NULL
84919 +_000978_hash new_tape_buffer 2 32866 _000978_hash NULL
84920 +_000979_hash nfc_llcp_build_tlv 3 19536 _000979_hash NULL
84921 +_000980_hash nfc_llcp_send_i_frame 3 59130 _000980_hash NULL
84922 +_000981_hash nfs4_alloc_slots 1 2454 _003914_hash NULL nohasharray
84923 +_000982_hash nfsctl_transaction_write 3 64800 _000982_hash NULL
84924 +_000983_hash nfs_idmap_request_key 3 30208 _000983_hash NULL
84925 +_000984_hash nfs_readdata_alloc 1 9990 _000984_hash NULL
84926 +_000985_hash nfs_writedata_alloc 1 62868 _000985_hash NULL
84927 +_000986_hash nl_pid_hash_zalloc 1 23314 _000986_hash NULL
84928 +_000987_hash nr_sendmsg 4 53656 _000987_hash NULL
84929 +_000988_hash nsm_create_handle 4 38060 _000988_hash NULL
84930 +_000989_hash ntfs_copy_from_user_iovec 3-6 49829 _000989_hash NULL
84931 +_000991_hash ntfs_file_buffered_write 4-6 41442 _000991_hash NULL
84932 +_000993_hash __ntfs_malloc 1 34022 _000993_hash NULL
84933 +_000994_hash nvme_alloc_queue 3 46865 _000994_hash NULL
84934 +_000995_hash ocfs2_acl_from_xattr 2 21604 _000995_hash NULL
84935 +_000996_hash ocfs2_control_message 3 19564 _000996_hash NULL
84936 +_000997_hash opera1_usb_i2c_msgxfer 4 64521 _000997_hash NULL
84937 +_000998_hash _ore_get_io_state 3-5-4 2166 _000998_hash NULL
84938 +_000999_hash orig_hash_add_if 2 53676 _000999_hash NULL
84939 +_001000_hash orig_hash_del_if 2 45080 _001000_hash NULL
84940 +_001001_hash orinoco_set_key 5-7 17878 _001001_hash NULL
84941 +_001003_hash osdmap_set_max_osd 2 57630 _003740_hash NULL nohasharray
84942 +_001004_hash _osd_realloc_seg 3 54352 _001004_hash NULL
84943 +_001005_hash OSDSetBlock 2-4 38986 _001005_hash NULL
84944 +_001007_hash osst_execute 7-6 17607 _001007_hash NULL
84945 +_001008_hash osst_write 3 31581 _001008_hash NULL
84946 +_001009_hash otp_read 2-5-4 10594 _001009_hash NULL
84947 +_001012_hash ovs_vport_alloc 1 33475 _001012_hash NULL
84948 +_001013_hash packet_sendmsg_spkt 4 28885 _001013_hash NULL
84949 +_001014_hash pair_device 4 61175 _001708_hash NULL nohasharray
84950 +_001015_hash pccard_store_cis 6 18176 _001015_hash NULL
84951 +_001016_hash pci_add_cap_save_buffer 3 3426 _001016_hash NULL
84952 +_001017_hash pcnet32_realloc_rx_ring 3 36598 _001017_hash NULL
84953 +_001018_hash pcnet32_realloc_tx_ring 3 38428 _001018_hash NULL
84954 +_001019_hash pcpu_mem_zalloc 1 22948 _001019_hash NULL
84955 +_001020_hash pep_sendmsg 4 62524 _001020_hash NULL
84956 +_001021_hash pfkey_sendmsg 4 47394 _001021_hash NULL
84957 +_001022_hash pidlist_resize 2 496 _001022_hash NULL
84958 +_001023_hash pin_code_reply 4 46510 _001023_hash NULL
84959 +_001024_hash ping_getfrag 3-4 8360 _001024_hash NULL
84960 +_001026_hash pipe_set_size 2 5204 _001026_hash NULL
84961 +_001027_hash pkt_bio_alloc 1 48284 _001027_hash NULL
84962 +_001028_hash platform_create_bundle 4-6 12785 _001028_hash NULL
84963 +_001030_hash play_iframe 3 8219 _001030_hash NULL
84964 +_001031_hash pm8001_store_update_fw 4 55716 _001031_hash NULL
84965 +_001032_hash pmcraid_alloc_sglist 1 9864 _001032_hash NULL
84966 +_001033_hash pn533_dep_link_up 5 7659 _001033_hash NULL
84967 +_001034_hash pnp_alloc 1 24869 _001419_hash NULL nohasharray
84968 +_001035_hash pn_sendmsg 4 12640 _001035_hash NULL
84969 +_001036_hash pppoe_sendmsg 4 48039 _001036_hash NULL
84970 +_001037_hash pppol2tp_sendmsg 4 56420 _001037_hash NULL
84971 +_001038_hash process_vm_rw 3-5 47533 _001038_hash NULL
84972 +_001040_hash process_vm_rw_single_vec 1-2 26213 _001040_hash NULL
84973 +_001042_hash proc_write 3 51003 _001042_hash NULL
84974 +_001043_hash profile_load 3 58267 _001043_hash NULL
84975 +_001044_hash profile_remove 3 8556 _001044_hash NULL
84976 +_001045_hash profile_replace 3 14652 _001045_hash NULL
84977 +_001046_hash pscsi_get_bio 1 56103 _001046_hash NULL
84978 +_001047_hash pyra_send 4 12061 _001047_hash NULL
84979 +_001048_hash qc_capture 3 19298 _001048_hash NULL
84980 +_001049_hash qla4xxx_alloc_work 2 44813 _001049_hash NULL
84981 +_001050_hash qlcnic_alloc_msix_entries 2 46160 _001050_hash NULL
84982 +_001051_hash qlcnic_alloc_sds_rings 2 26795 _001051_hash NULL
84983 +_001052_hash queue_received_packet 5 9657 _001052_hash NULL
84984 +_001053_hash raw_send_hdrinc 4 58803 _001053_hash NULL
84985 +_001054_hash raw_sendmsg 4 23078 _001054_hash &_000022_hash
84986 +_001055_hash rawsock_sendmsg 4 60010 _001055_hash NULL
84987 +_001056_hash rawv6_send_hdrinc 3 35425 _001056_hash NULL
84988 +_001057_hash rb_alloc 1 3102 _001057_hash NULL
84989 +_001058_hash rbd_alloc_coll 1 33678 _001058_hash NULL
84990 +_001059_hash rbd_create_rw_ops 2 4605 _001059_hash NULL
84991 +_001060_hash rds_ib_inc_copy_to_user 3 55007 _001060_hash NULL
84992 +_001061_hash rds_iw_inc_copy_to_user 3 29214 _001061_hash NULL
84993 +_001062_hash rds_message_alloc 1 10517 _001062_hash NULL
84994 +_001063_hash rds_message_copy_from_user 3 45510 _001063_hash NULL
84995 +_001064_hash rds_message_inc_copy_to_user 3 26540 _001064_hash NULL
84996 +_001065_hash redrat3_transmit_ir 3 64244 _001065_hash NULL
84997 +_001066_hash regcache_rbtree_insert_to_block 5 58009 _001066_hash NULL
84998 +_001067_hash _regmap_raw_write 4 42652 _001067_hash NULL
84999 +_001068_hash regmap_register_patch 3 21681 _001068_hash NULL
85000 +_001069_hash relay_alloc_page_array 1 52735 _001069_hash NULL
85001 +_001070_hash remove_uuid 4 64505 _001070_hash NULL
85002 +_001071_hash reshape_ring 2 29147 _001071_hash NULL
85003 +_001072_hash RESIZE_IF_NEEDED 2 56286 _001072_hash NULL
85004 +_001073_hash resize_stripes 2 61650 _001073_hash NULL
85005 +_001074_hash rfcomm_sock_sendmsg 4 37661 _003927_hash NULL nohasharray
85006 +_001075_hash rose_sendmsg 4 20249 _001075_hash NULL
85007 +_001076_hash rxrpc_send_data 5 21553 _001076_hash NULL
85008 +_001077_hash rxrpc_setsockopt 5 50286 _001077_hash NULL
85009 +_001078_hash saa7146_vmalloc_build_pgtable 2 19780 _001078_hash NULL
85010 +_001079_hash saa7164_buffer_alloc_user 2 9627 _001079_hash NULL
85011 +_001081_hash sco_send_frame 3 41815 _001081_hash NULL
85012 +_001082_hash scsi_host_alloc 2 63041 _001082_hash NULL
85013 +_001083_hash scsi_tgt_kspace_exec 8 9522 _001083_hash NULL
85014 +_001084_hash sctp_sendmsg 4 61919 _001084_hash NULL
85015 +_001085_hash sctp_setsockopt 5 44788 _001085_hash NULL
85016 +_001086_hash sctp_setsockopt_connectx 3 6073 _001086_hash NULL
85017 +_001087_hash sctp_setsockopt_connectx_old 3 22631 _001087_hash NULL
85018 +_001088_hash sctp_tsnmap_init 2 36446 _001088_hash NULL
85019 +_001089_hash sctp_user_addto_chunk 2-3 62047 _001089_hash NULL
85020 +_001091_hash security_context_to_sid 2 19839 _001091_hash NULL
85021 +_001092_hash security_context_to_sid_default 2 3492 _003366_hash NULL nohasharray
85022 +_001093_hash security_context_to_sid_force 2 20724 _001093_hash NULL
85023 +_001094_hash selinux_transaction_write 3 59038 _001094_hash NULL
85024 +_001095_hash sel_write_access 3 51704 _001095_hash NULL
85025 +_001096_hash sel_write_create 3 11353 _001096_hash NULL
85026 +_001097_hash sel_write_member 3 28800 _001097_hash NULL
85027 +_001098_hash sel_write_relabel 3 55195 _001098_hash NULL
85028 +_001099_hash sel_write_user 3 45060 _001099_hash NULL
85029 +_001100_hash __seq_open_private 3 40715 _001100_hash NULL
85030 +_001101_hash serverworks_create_gatt_pages 1 46582 _001101_hash NULL
85031 +_001102_hash set_connectable 4 56458 _001102_hash NULL
85032 +_001103_hash set_dev_class 4 39645 _001697_hash NULL nohasharray
85033 +_001104_hash set_discoverable 4 48141 _001104_hash NULL
85034 +_001105_hash setkey 3 14987 _001105_hash NULL
85035 +_001106_hash set_le 4 30581 _001106_hash NULL
85036 +_001107_hash set_link_security 4 4502 _001107_hash NULL
85037 +_001108_hash set_local_name 4 55757 _001108_hash NULL
85038 +_001109_hash set_powered 4 12129 _001109_hash NULL
85039 +_001110_hash set_ssp 4 62411 _001110_hash NULL
85040 +_001111_hash sg_build_sgat 3 60179 _001111_hash &_000305_hash
85041 +_001112_hash sg_read_oxfer 3 51724 _001112_hash NULL
85042 +_001113_hash shmem_xattr_set 4 11843 _001113_hash NULL
85043 +_001114_hash simple_alloc_urb 3 60420 _001114_hash NULL
85044 +_001115_hash sisusb_send_bridge_packet 2 11649 _001115_hash NULL
85045 +_001116_hash sisusb_send_packet 2 20891 _001116_hash NULL
85046 +_001117_hash skb_add_data_nocache 4 4682 _001117_hash NULL
85047 +_001118_hash skb_copy_datagram_from_iovec 2-5-4 52014 _001118_hash NULL
85048 +_001121_hash skb_copy_to_page_nocache 6 58624 _001121_hash NULL
85049 +_001122_hash sk_chk_filter 2 42095 _001122_hash NULL
85050 +_001123_hash skcipher_sendmsg 4 30290 _001123_hash NULL
85051 +_001124_hash sl_change_mtu 2 7396 _001124_hash NULL
85052 +_001125_hash slhc_init 1-2 58135 _001125_hash &_000894_hash
85053 +_001127_hash sm501_create_subdev 3-4 48668 _003678_hash NULL nohasharray
85054 +_001129_hash smk_write_access 3 49561 _001129_hash NULL
85055 +_001130_hash snapshot_write 3 28351 _001130_hash NULL
85056 +_001131_hash snd_ac97_pcm_assign 2 30218 _001131_hash NULL
85057 +_001132_hash snd_card_create 4 64418 _001411_hash NULL nohasharray
85058 +_001133_hash snd_emux_create_port 3 42533 _001133_hash NULL
85059 +_001134_hash snd_gus_dram_write 4 38784 _001134_hash NULL
85060 +_001135_hash snd_midi_channel_alloc_set 1 28153 _001135_hash NULL
85061 +_001136_hash _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 _001136_hash NULL
85062 +_001137_hash snd_pcm_oss_sync1 2 45298 _001137_hash NULL
85063 +_001138_hash snd_pcm_oss_write 3 38108 _001138_hash NULL
85064 +_001139_hash snd_pcm_plugin_build 5 25505 _001139_hash NULL
85065 +_001140_hash snd_rawmidi_kernel_write 3 25106 _001140_hash NULL
85066 +_001141_hash snd_rawmidi_write 3 28008 _001141_hash NULL
85067 +_001142_hash snd_rme32_playback_copy 5 43732 _001142_hash NULL
85068 +_001143_hash snd_rme96_playback_copy 5 13111 _001143_hash NULL
85069 +_001144_hash snd_seq_device_new 4 31753 _001144_hash NULL
85070 +_001145_hash snd_seq_oss_readq_new 2 14283 _001145_hash NULL
85071 +_001146_hash snd_vx_create 4 40948 _001146_hash NULL
85072 +_001147_hash sock_setsockopt 5 50088 _001147_hash NULL
85073 +_001148_hash sound_write 3 5102 _001148_hash NULL
85074 +_001149_hash _sp2d_alloc 1-3-2 16944 _001149_hash NULL
85075 +_001150_hash spi_alloc_master 2 45223 _001150_hash NULL
85076 +_001151_hash spidev_message 3 5518 _001151_hash NULL
85077 +_001152_hash spi_register_board_info 2 35651 _001152_hash NULL
85078 +_001153_hash squashfs_cache_init 2 41656 _001153_hash NULL
85079 +_001154_hash squashfs_read_data 6 59440 _001154_hash NULL
85080 +_001155_hash srp_alloc_iu 2 44227 _001155_hash NULL
85081 +_001156_hash srp_iu_pool_alloc 2 17920 _001156_hash NULL
85082 +_001157_hash srp_ring_alloc 2 26760 _001157_hash NULL
85083 +_001159_hash start_isoc_chain 2 565 _001159_hash NULL
85084 +_001160_hash stk_prepare_sio_buffers 2 57168 _001160_hash NULL
85085 +_001161_hash store_iwmct_log_level 4 60209 _001161_hash NULL
85086 +_001162_hash store_iwmct_log_level_fw 4 1974 _001162_hash NULL
85087 +_001163_hash st_write 3 16874 _001163_hash NULL
85088 +_001164_hash svc_pool_map_alloc_arrays 2 47181 _001164_hash NULL
85089 +_001165_hash symtab_init 2 61050 _001165_hash NULL
85090 +_001166_hash sys_bind 3 10799 _001166_hash NULL
85091 +_001167_hash sys_connect 3 15291 _003291_hash NULL nohasharray
85092 +_001168_hash sys_flistxattr 3 41407 _001168_hash NULL
85093 +_001169_hash sys_fsetxattr 4 49736 _001169_hash NULL
85094 +_001170_hash sysfs_write_file 3 57116 _001170_hash NULL
85095 +_001171_hash sys_ipc 3 4889 _001171_hash NULL
85096 +_001172_hash sys_keyctl 4 33708 _001172_hash &_000974_hash
85097 +_001173_hash sys_listxattr 3 27833 _001173_hash NULL
85098 +_001174_hash sys_llistxattr 3 4532 _001174_hash NULL
85099 +_001175_hash sys_lsetxattr 4 61177 _001175_hash NULL
85100 +_001176_hash sys_mq_timedsend 3 57661 _001176_hash NULL
85101 +_001177_hash sys_sched_setaffinity 2 32046 _001177_hash NULL
85102 +_001178_hash sys_semop 3 39457 _001178_hash NULL
85103 +_001179_hash sys_sendto 6 20809 _001179_hash NULL
85104 +_001180_hash sys_setxattr 4 37880 _001180_hash NULL
85105 +_001181_hash t4_alloc_mem 1 32342 _001181_hash NULL
85106 +_001182_hash tcf_hash_create 4 54360 _001182_hash NULL
85107 +_001183_hash __team_options_register 3 63941 _001183_hash NULL
85108 +_001184_hash test_unaligned_bulk 3 52333 _001184_hash NULL
85109 +_001185_hash tifm_alloc_adapter 1 10903 _001185_hash NULL
85110 +_001186_hash timeout_write 3 50991 _001186_hash NULL
85111 +_001187_hash tipc_link_send_sections_fast 4 37920 _001187_hash NULL
85112 +_001188_hash tipc_subseq_alloc 1 5957 _001188_hash NULL
85113 +_001189_hash tm6000_read_write_usb 7 50774 _002917_hash NULL nohasharray
85114 +_001190_hash tnode_alloc 1 49407 _001190_hash NULL
85115 +_001191_hash tomoyo_commit_ok 2 20167 _001191_hash NULL
85116 +_001192_hash tomoyo_scan_bprm 2-4 15642 _001192_hash NULL
85117 +_001194_hash tps65910_i2c_write 3 39531 _001194_hash NULL
85118 +_001195_hash ts_write 3 64336 _001195_hash NULL
85119 +_001196_hash ttusb2_msg 4 3100 _001196_hash NULL
85120 +_001197_hash tty_write 3 5494 _001197_hash NULL
85121 +_001198_hash ubi_dbg_check_all_ff 4 59810 _001198_hash NULL
85122 +_001199_hash ubi_dbg_check_write 5 48525 _001199_hash NULL
85123 +_001200_hash ubifs_setxattr 4 59650 _001370_hash NULL nohasharray
85124 +_001201_hash udf_sb_alloc_partition_maps 2 62313 _001201_hash NULL
85125 +_001202_hash udplite_getfrag 3-4 14479 _001202_hash NULL
85126 +_001204_hash ulong_write_file 3 26485 _001204_hash NULL
85127 +_001205_hash unix_dgram_sendmsg 4 45699 _001205_hash NULL
85128 +_001206_hash unix_stream_sendmsg 4 61455 _001206_hash NULL
85129 +_001207_hash unlink_queued 3-4 645 _001207_hash NULL
85130 +_001208_hash update_pmkid 4 2481 _001208_hash NULL
85131 +_001209_hash usb_alloc_coherent 2 65444 _001209_hash NULL
85132 +_001210_hash uvc_alloc_buffers 2-3 9656 _001210_hash NULL
85133 +_001211_hash uvc_alloc_entity 3-4 20836 _001211_hash NULL
85134 +_001212_hash v4l2_ctrl_new 7 38725 _001212_hash NULL
85135 +_001213_hash v4l2_event_subscribe 3 19510 _001213_hash NULL
85136 +_001214_hash vb2_read 3 42703 _001214_hash NULL
85137 +_001215_hash vb2_write 3 31948 _001215_hash NULL
85138 +_001216_hash vc_resize 2-3 3585 _001216_hash NULL
85139 +_001218_hash __vhost_add_used_n 3 26554 _001218_hash NULL
85140 +_001219_hash __videobuf_alloc_vb 1 27062 _001219_hash NULL
85141 +_001220_hash videobuf_dma_init_kernel 3 6963 _001220_hash NULL
85142 +_001221_hash virtqueue_add_buf 3-4 59470 _001221_hash NULL
85143 +_001223_hash vmalloc 1 15464 _001223_hash NULL
85144 +_001224_hash vmalloc_to_sg 2 58354 _001224_hash NULL
85145 +_001225_hash vol_cdev_write 3 40915 _001225_hash NULL
85146 +_001226_hash vxge_device_register 4 7752 _001226_hash NULL
85147 +_001227_hash __vxge_hw_channel_allocate 3 55462 _001227_hash NULL
85148 +_001228_hash vzalloc 1 47421 _001228_hash NULL
85149 +_001229_hash vzalloc_node 1 23424 _001229_hash NULL
85150 +_001230_hash wa_nep_queue 2 8858 _001230_hash NULL
85151 +_001231_hash __wa_xfer_setup_segs 2 56725 _001231_hash NULL
85152 +_001232_hash wiphy_new 2 2482 _001232_hash NULL
85153 +_001233_hash wpan_phy_alloc 1 48056 _001233_hash NULL
85154 +_001234_hash wusb_ccm_mac 7 32199 _001234_hash NULL
85155 +_001235_hash x25_sendmsg 4 12487 _001235_hash NULL
85156 +_001236_hash xfrm_hash_alloc 1 10997 _001236_hash NULL
85157 +_001237_hash _xfs_buf_get_pages 2 46811 _001237_hash NULL
85158 +_001238_hash xfs_da_buf_make 1 55845 _001238_hash NULL
85159 +_001239_hash xfs_da_grow_inode_int 3 21785 _001239_hash NULL
85160 +_001240_hash xfs_dir_cilookup_result 3 64288 _003139_hash NULL nohasharray
85161 +_001241_hash xfs_iext_add_indirect_multi 3 32400 _001241_hash NULL
85162 +_001242_hash xfs_iext_inline_to_direct 2 12384 _001242_hash NULL
85163 +_001243_hash xfs_iroot_realloc 2 46826 _001243_hash NULL
85164 +_001244_hash xhci_alloc_stream_info 3 63902 _001244_hash NULL
85165 +_001245_hash xlog_recover_add_to_trans 4 62839 _001245_hash NULL
85166 +_001246_hash xprt_alloc 2 1475 _001246_hash NULL
85167 +_001247_hash xt_alloc_table_info 1 57903 _001247_hash NULL
85168 +_001248_hash _zd_iowrite32v_async_locked 3 39034 _001248_hash NULL
85169 +_001249_hash zd_usb_iowrite16v 3 49744 _001249_hash NULL
85170 +_001250_hash acpi_ds_build_internal_package_obj 3 58271 _001250_hash NULL
85171 +_001251_hash acpi_system_read_event 3 55362 _001251_hash NULL
85172 +_001252_hash acpi_ut_create_buffer_object 1 42030 _001252_hash NULL
85173 +_001253_hash acpi_ut_create_package_object 1 17594 _001253_hash NULL
85174 +_001254_hash acpi_ut_create_string_object 1 15360 _001254_hash NULL
85175 +_001255_hash ad7879_spi_multi_read 3 8218 _001255_hash NULL
85176 +_001256_hash add_child 4 45201 _001256_hash NULL
85177 +_001257_hash add_port 2 54941 _001257_hash NULL
85178 +_001258_hash adu_read 3 24177 _001258_hash NULL
85179 +_001259_hash afs_cell_create 2 27346 _001259_hash NULL
85180 +_001260_hash agp_generic_alloc_user 1 9470 _001260_hash NULL
85181 +_001261_hash alloc_agpphysmem_i8xx 1 39427 _001261_hash NULL
85182 +_001262_hash allocate_cnodes 1 5329 _001262_hash NULL
85183 +_001263_hash ___alloc_bootmem 1 11410 _001263_hash NULL
85184 +_001264_hash __alloc_bootmem_nopanic 1 65397 _001264_hash NULL
85185 +_001265_hash alloc_bulk_urbs_generic 5 12127 _001265_hash NULL
85186 +_001266_hash alloc_candev 1-2 7776 _001266_hash NULL
85187 +_001268_hash ____alloc_ei_netdev 1 51475 _001268_hash NULL
85188 +_001269_hash alloc_etherdev_mqs 1 36450 _001269_hash NULL
85189 +_001270_hash alloc_extent_buffer 3 52824 _001270_hash NULL
85190 +_001271_hash alloc_fcdev 1 18780 _001271_hash NULL
85191 +_001272_hash alloc_fddidev 1 15382 _001272_hash NULL
85192 +_001273_hash alloc_hippi_dev 1 51320 _001273_hash NULL
85193 +_001274_hash alloc_irdadev 1 19140 _001274_hash NULL
85194 +_001275_hash alloc_ltalkdev 1 38071 _001275_hash NULL
85195 +_001276_hash alloc_one_pg_vec_page 1 10747 _001276_hash NULL
85196 +_001277_hash alloc_orinocodev 1 21371 _001277_hash NULL
85197 +_001279_hash alloc_trdev 1 16399 _001279_hash NULL
85198 +_001280_hash async_setkey 3 35521 _001280_hash NULL
85199 +_001281_hash ata_host_alloc_pinfo 3 17325 _001281_hash NULL
85200 +_001284_hash ath6kl_connect_event 7-9-8 14267 _001284_hash NULL
85201 +_001285_hash ath6kl_fwlog_block_read 3 49836 _001285_hash NULL
85202 +_001286_hash ath6kl_fwlog_read 3 32101 _001286_hash NULL
85203 +_001287_hash ath_rx_init 2 43564 _001287_hash NULL
85204 +_001288_hash ath_tx_init 2 60515 _001288_hash NULL
85205 +_001289_hash atm_get_addr 3 31221 _001289_hash NULL
85206 +_001290_hash av7110_ipack_init 2 46655 _001290_hash NULL
85207 +_001291_hash bdx_rxdb_create 1 46525 _001291_hash NULL
85208 +_001292_hash bdx_tx_db_init 2 41719 _001292_hash NULL
85209 +_001293_hash bio_map_kern 3 64751 _001293_hash NULL
85210 +_001294_hash bits_to_user 3-2 47733 _001294_hash NULL
85211 +_001295_hash __blk_queue_init_tags 2 9778 _001295_hash NULL
85212 +_001296_hash blk_queue_resize_tags 2 28670 _001296_hash NULL
85213 +_001297_hash blk_rq_map_user_iov 5 16772 _001297_hash NULL
85214 +_001298_hash bm_init 2 13529 _001298_hash NULL
85215 +_001299_hash brcmf_alloc_wdev 1 60347 _001299_hash NULL
85216 +_001300_hash btrfs_insert_dir_item 4 59304 _001300_hash NULL
85217 +_001301_hash btrfs_map_block 3 64379 _001301_hash NULL
85218 +_001302_hash c4_add_card 3 54968 _001302_hash NULL
85219 +_001303_hash cache_read 3 24790 _001303_hash NULL
85220 +_001304_hash cache_write 3 13589 _001304_hash NULL
85221 +_001305_hash calc_hmac 3 32010 _001305_hash NULL
85222 +_001306_hash ccid_getsockopt_builtin_ccids 2 53634 _001306_hash NULL
85223 +_001307_hash ceph_copy_page_vector_to_user 4-3 31270 _001307_hash NULL
85224 +_001308_hash ceph_read_dir 3 17005 _001308_hash NULL
85225 +_001309_hash cfg80211_roamed 5-7 32632 _001309_hash NULL
85226 +_001311_hash ci_ll_init 3 12930 _001311_hash NULL
85227 +_001312_hash coda_psdev_read 3 35029 _001312_hash NULL
85228 +_001313_hash construct_key_and_link 4 8321 _001313_hash NULL
85229 +_001314_hash copy_counters_to_user 5 17027 _001824_hash NULL nohasharray
85230 +_001315_hash copy_entries_to_user 1 52367 _001315_hash NULL
85231 +_001316_hash copy_from_buf 4-2 27308 _001316_hash NULL
85232 +_001317_hash copy_oldmem_page 3-1 26164 _001317_hash NULL
85233 +_001318_hash copy_to_user_fromio 3 57432 _001318_hash NULL
85234 +_001319_hash cryptd_hash_setkey 3 42781 _001319_hash NULL
85235 +_001320_hash crypto_authenc_esn_setkey 3 6985 _001320_hash NULL
85236 +_001321_hash crypto_authenc_setkey 3 80 _003311_hash NULL nohasharray
85237 +_001322_hash cx18_copy_buf_to_user 4 22735 _001322_hash NULL
85238 +_001324_hash cxgbi_ddp_reserve 4 30091 _001324_hash NULL
85239 +_001325_hash datablob_hmac_append 3 40038 _001325_hash NULL
85240 +_001326_hash datablob_hmac_verify 4 24786 _001326_hash NULL
85241 +_001327_hash dataflash_read_fact_otp 3-2 33204 _001327_hash NULL
85242 +_001328_hash dataflash_read_user_otp 3-2 14536 _001328_hash &_000201_hash
85243 +_001329_hash dccp_feat_register_sp 5 17914 _001329_hash NULL
85244 +_001330_hash ddb_input_read 3 9743 _001330_hash NULL
85245 +_001331_hash dev_read 3 56369 _001331_hash NULL
85246 +_001332_hash diva_os_copy_to_user 4 48508 _001332_hash NULL
85247 +_001333_hash diva_os_malloc 2 16406 _001333_hash NULL
85248 +_001334_hash dlm_dir_lookup 4 56662 _001334_hash NULL
85249 +_001335_hash dm_vcalloc 1-2 16814 _001335_hash NULL
85250 +_001337_hash do_proc_readlink 3 14096 _001337_hash NULL
85251 +_001338_hash do_readlink 2 43518 _001338_hash NULL
85252 +_001339_hash __do_replace 5 37227 _001339_hash NULL
85253 +_001340_hash do_sigpending 2 9766 _001340_hash NULL
85254 +_001341_hash drbd_setsockopt 5 16280 _001341_hash &_000371_hash
85255 +_001342_hash dsp_buffer_alloc 2 11684 _001342_hash NULL
85256 +_001343_hash dump_midi 3 51040 _001343_hash NULL
85257 +_001344_hash dvb_dmxdev_set_buffer_size 2 55643 _001344_hash NULL
85258 +_001345_hash dvb_dvr_set_buffer_size 2 9840 _001345_hash NULL
85259 +_001346_hash dvb_ringbuffer_pkt_read_user 3-5-2 4303 _001346_hash NULL
85260 +_001348_hash dvb_ringbuffer_read_user 3 56702 _001348_hash NULL
85261 +_001349_hash ecryptfs_filldir 3 6622 _001349_hash NULL
85262 +_001350_hash ecryptfs_readlink 3 40775 _001350_hash NULL
85263 +_001351_hash ecryptfs_send_message 2 18322 _001351_hash NULL
85264 +_001352_hash em28xx_init_isoc 4 62883 _001352_hash &_000721_hash
85265 +_001353_hash et61x251_read 3 25420 _001353_hash NULL
85266 +_001354_hash ext4_add_new_descs 3 19509 _001354_hash NULL
85267 +_001355_hash fat_ioctl_filldir 3 36621 _001355_hash NULL
85268 +_001356_hash fd_copyout 3 59323 _001356_hash NULL
85269 +_001357_hash f_hidg_read 3 6238 _001357_hash NULL
85270 +_001358_hash filldir 3 55137 _001358_hash NULL
85271 +_001359_hash filldir64 3 46469 _001359_hash NULL
85272 +_001360_hash fops_read 3 40672 _001360_hash NULL
85273 +_001361_hash from_buffer 3 18625 _001361_hash NULL
85274 +_001362_hash fsm_init 2 16134 _001362_hash NULL
85275 +_001363_hash get_subdir 3 62581 _001363_hash NULL
85276 +_001364_hash gspca_dev_probe 4 2570 _001364_hash NULL
85277 +_001365_hash handle_received_packet 3 22457 _001365_hash NULL
85278 +_001366_hash hash_setkey 3 48310 _001366_hash NULL
85279 +_001367_hash hdlcdrv_register 2 6792 _001367_hash NULL
85280 +_001368_hash hdpvr_read 3 9273 _001368_hash NULL
85281 +_001369_hash hid_input_report 4 32458 _001369_hash NULL
85282 +_001370_hash hidraw_read 3 59650 _001370_hash &_001200_hash
85283 +_001371_hash HiSax_readstatus 2 15752 _001371_hash NULL
85284 +_001373_hash __hwahc_op_set_gtk 4 42038 _001373_hash NULL
85285 +_001374_hash __hwahc_op_set_ptk 5 36510 _001374_hash NULL
85286 +_001375_hash ib_copy_to_udata 3 27525 _001375_hash NULL
85287 +_001376_hash idetape_chrdev_read 3 2097 _001376_hash NULL
85288 +_001377_hash ieee80211_alloc_hw 1 43829 _001377_hash NULL
85289 +_001378_hash ieee80211_bss_info_update 4 13991 _001378_hash NULL
85290 +_001379_hash ilo_read 3 32531 _001379_hash NULL
85291 +_001380_hash init_map_ipmac 3-4 63896 _001380_hash NULL
85292 +_001382_hash init_tid_tabs 2-4-3 13252 _001382_hash NULL
85293 +_001385_hash iowarrior_read 3 53483 _001385_hash NULL
85294 +_001386_hash ipv6_getsockopt_sticky 5 56711 _001386_hash NULL
85295 +_001387_hash ipwireless_send_packet 4 8328 _001387_hash NULL
85296 +_001388_hash ipx_sendmsg 4 1362 _001388_hash NULL
85297 +_001389_hash iscsi_conn_setup 2 35159 _001389_hash NULL
85298 +_001390_hash iscsi_create_session 3 51647 _001390_hash NULL
85299 +_001391_hash iscsi_host_alloc 2 36671 _001391_hash NULL
85300 +_001392_hash iscsi_session_setup 4-5 196 _001392_hash NULL
85301 +_001394_hash iscsit_find_cmd_from_itt_or_dump 3 17194 _001701_hash NULL nohasharray
85302 +_001395_hash isdn_ppp_read 4 50356 _001395_hash NULL
85303 +_001396_hash isku_sysfs_read 6 58806 _001396_hash NULL
85304 +_001397_hash isku_sysfs_write 6 49767 _001397_hash NULL
85305 +_001398_hash iso_alloc_urb 4-5 45206 _001398_hash NULL
85306 +_001400_hash ivtv_copy_buf_to_user 4 6159 _001400_hash NULL
85307 +_001401_hash iwm_rx_handle 3 24899 _001401_hash NULL
85308 +_001402_hash iwm_wdev_alloc 1 38415 _001402_hash NULL
85309 +_001403_hash jbd2_alloc 1 41359 _001403_hash NULL
85310 +_001404_hash jffs2_do_link 6 42048 _001404_hash NULL
85311 +_001405_hash jffs2_do_unlink 4 62020 _001405_hash NULL
85312 +_001406_hash jffs2_security_setxattr 4 62107 _001406_hash NULL
85313 +_001407_hash jffs2_trusted_setxattr 4 17048 _001407_hash NULL
85314 +_001408_hash jffs2_user_setxattr 4 10182 _001408_hash NULL
85315 +_001409_hash kernel_setsockopt 5 35913 _001409_hash NULL
85316 +_001410_hash keyctl_describe_key 3 36853 _001410_hash NULL
85317 +_001411_hash keyctl_get_security 3 64418 _001411_hash &_001132_hash
85318 +_001412_hash keyring_read 3 13438 _001412_hash NULL
85319 +_001413_hash kfifo_copy_to_user 3 20646 _001413_hash NULL
85320 +_001414_hash kmem_zalloc_large 1 56128 _001414_hash NULL
85321 +_001415_hash kmp_init 2 41373 _001415_hash NULL
85322 +_001416_hash koneplus_sysfs_write 6 35993 _001416_hash NULL
85323 +_001417_hash kvm_clear_guest_page 4 2308 _001417_hash NULL
85324 +_001418_hash kvm_read_nested_guest_page 5 13337 _001418_hash NULL
85325 +_001419_hash l2cap_create_basic_pdu 3 24869 _003830_hash &_001034_hash nohasharray
85326 +_001420_hash l2cap_create_connless_pdu 3 37327 _001420_hash &_000966_hash
85327 +_001421_hash l2cap_create_iframe_pdu 3 51801 _001421_hash NULL
85328 +_001422_hash __lgwrite 4 57669 _001422_hash NULL
85329 +_001423_hash libfc_host_alloc 2 7917 _001423_hash NULL
85330 +_001424_hash llcp_sock_sendmsg 4 1092 _001424_hash NULL
85331 +_001425_hash macvtap_get_user 4 28185 _001425_hash NULL
85332 +_001426_hash mcam_v4l_read 3 36513 _001426_hash NULL
85333 +_001427_hash mce_async_out 3 58056 _001427_hash NULL
85334 +_001428_hash mce_flush_rx_buffer 2 14976 _001428_hash NULL
85335 +_001429_hash mdc800_device_read 3 22896 _001429_hash NULL
85336 +_001430_hash memcpy_toiovec 3 54166 _001430_hash &_000867_hash
85337 +_001431_hash memcpy_toiovecend 3-4 19736 _001431_hash NULL
85338 +_001433_hash mgt_set_varlen 4 60916 _001433_hash NULL
85339 +_001434_hash mlx4_en_create_rx_ring 3 62498 _001434_hash NULL
85340 +_001435_hash mlx4_en_create_tx_ring 4 48501 _001435_hash NULL
85341 +_001436_hash mon_bin_get_event 4 52863 _001436_hash NULL
85342 +_001437_hash mousedev_read 3 47123 _001437_hash NULL
85343 +_001438_hash move_addr_to_user 2 2868 _001438_hash NULL
85344 +_001439_hash mpihelp_mul 5-3 27805 _001439_hash NULL
85345 +_001441_hash mpi_lshift_limbs 2 9337 _001441_hash NULL
85346 +_001442_hash msnd_fifo_alloc 2 23179 _001442_hash NULL
85347 +_001443_hash mtdswap_init 2 55719 _001443_hash NULL
85348 +_001444_hash neigh_hash_grow 2 17283 _001444_hash NULL
85349 +_001445_hash nfs4_realloc_slot_table 2 22859 _001445_hash NULL
85350 +_001446_hash nfs_idmap_get_key 2 39616 _001446_hash NULL
85351 +_001447_hash nsm_get_handle 4 52089 _001447_hash NULL
85352 +_001448_hash ntfs_malloc_nofs 1 49572 _001448_hash NULL
85353 +_001449_hash ntfs_malloc_nofs_nofail 1 63631 _001449_hash NULL
85354 +_001450_hash nvme_create_queue 3 170 _001450_hash NULL
85355 +_001451_hash ocfs2_control_write 3 54737 _001451_hash NULL
85356 +_001452_hash orinoco_add_extscan_result 3 18207 _001452_hash NULL
85357 +_001454_hash override_release 2 52032 _001454_hash NULL
85358 +_001455_hash packet_snd 3 13634 _001455_hash NULL
85359 +_001456_hash pcbit_stat 2 27364 _001456_hash NULL
85360 +_001457_hash pcpu_extend_area_map 2 12589 _001457_hash NULL
85361 +_001458_hash pg_read 3 17276 _001458_hash NULL
85362 +_001459_hash picolcd_debug_eeprom_read 3 14549 _001459_hash NULL
85363 +_001460_hash pkt_alloc_packet_data 1 37928 _001460_hash NULL
85364 +_001461_hash pmcraid_build_passthrough_ioadls 2 62034 _001461_hash NULL
85365 +_001462_hash pms_capture 4 27142 _001462_hash NULL
85366 +_001463_hash posix_clock_register 2 5662 _001463_hash NULL
85367 +_001464_hash printer_read 3 54851 _001464_hash NULL
85368 +_001465_hash __proc_file_read 3 54978 _001465_hash NULL
85369 +_001466_hash pt_read 3 49136 _001466_hash NULL
85370 +_001467_hash put_cmsg 4 36589 _001467_hash NULL
85371 +_001468_hash pvr2_ioread_read 3 10720 _001505_hash NULL nohasharray
85372 +_001469_hash pwc_video_read 3 51735 _001469_hash NULL
85373 +_001470_hash px_raw_event 4 49371 _001470_hash NULL
85374 +_001471_hash qcam_read 3 13977 _001471_hash NULL
85375 +_001472_hash rawv6_sendmsg 4 20080 _001472_hash NULL
85376 +_001473_hash rds_sendmsg 4 40976 _001473_hash NULL
85377 +_001474_hash read_flush 3 43851 _001474_hash NULL
85378 +_001475_hash read_profile 3 27859 _001475_hash NULL
85379 +_001476_hash read_vmcore 3 26501 _001476_hash NULL
85380 +_001477_hash redirected_tty_write 3 65297 _001477_hash NULL
85381 +_001478_hash __register_chrdev 2-3 54223 _001478_hash NULL
85382 +_001480_hash regmap_raw_write 4 53803 _001480_hash NULL
85383 +_001481_hash reiserfs_allocate_list_bitmaps 3 21732 _001481_hash NULL
85384 +_001482_hash reiserfs_resize 2 34377 _001482_hash NULL
85385 +_001483_hash request_key_auth_read 3 24109 _001483_hash NULL
85386 +_001484_hash rfkill_fop_read 3 54711 _001484_hash NULL
85387 +_001485_hash rng_dev_read 3 41581 _001485_hash NULL
85388 +_001486_hash roccat_read 3 41093 _003519_hash NULL nohasharray
85389 +_001487_hash sco_sock_sendmsg 4 62542 _001487_hash NULL
85390 +_001488_hash scsi_register 2 49094 _001488_hash NULL
85391 +_001489_hash sctp_getsockopt_events 2 3607 _001489_hash NULL
85392 +_001490_hash sctp_getsockopt_maxburst 2 42941 _001490_hash NULL
85393 +_001491_hash sctp_getsockopt_maxseg 2 10737 _001491_hash NULL
85394 +_001492_hash sctpprobe_read 3 17741 _001492_hash NULL
85395 +_001493_hash sdhci_alloc_host 2 7509 _001493_hash NULL
85396 +_001494_hash selinux_inode_post_setxattr 4 26037 _001494_hash NULL
85397 +_001495_hash selinux_inode_setsecurity 4 18148 _001495_hash NULL
85398 +_001496_hash selinux_inode_setxattr 4 10708 _001496_hash NULL
85399 +_001497_hash selinux_secctx_to_secid 2 63744 _001497_hash NULL
85400 +_001498_hash selinux_setprocattr 4 55611 _001498_hash NULL
85401 +_001499_hash sel_write_context 3 25726 _002397_hash NULL nohasharray
85402 +_001500_hash seq_copy_in_user 3 18543 _001500_hash NULL
85403 +_001501_hash seq_open_net 4 8968 _001594_hash NULL nohasharray
85404 +_001502_hash seq_open_private 3 61589 _001502_hash NULL
85405 +_001503_hash set_arg 3 42824 _001503_hash NULL
85406 +_001504_hash sg_read 3 25799 _001504_hash NULL
85407 +_001505_hash shash_async_setkey 3 10720 _001505_hash &_001468_hash
85408 +_001506_hash shash_compat_setkey 3 12267 _001506_hash NULL
85409 +_001507_hash shmem_setxattr 4 55867 _001507_hash NULL
85410 +_001508_hash simple_read_from_buffer 2-5 55957 _001508_hash NULL
85411 +_001511_hash sm_checker_extend 2 23615 _001511_hash NULL
85412 +_001512_hash sn9c102_read 3 29305 _001512_hash NULL
85413 +_001513_hash snd_es1938_capture_copy 5 25930 _001513_hash NULL
85414 +_001514_hash snd_gus_dram_peek 4 9062 _001514_hash NULL
85415 +_001515_hash snd_hdsp_capture_copy 5 4011 _001515_hash NULL
85416 +_001516_hash snd_korg1212_copy_to 6 92 _001516_hash NULL
85417 +_001517_hash snd_opl4_mem_proc_read 5 63774 _001517_hash NULL
85418 +_001518_hash snd_pcm_alloc_vmalloc_buffer 2 44595 _001518_hash NULL
85419 +_001519_hash snd_pcm_oss_read1 3 63771 _001519_hash NULL
85420 +_001520_hash snd_rawmidi_kernel_read1 4 36740 _001520_hash NULL
85421 +_001521_hash snd_rme9652_capture_copy 5 10287 _001521_hash NULL
85422 +_001522_hash srp_target_alloc 3 37288 _001522_hash NULL
85423 +_001523_hash stk_allocate_buffers 2 16291 _001523_hash NULL
85424 +_001524_hash store_ifalias 4 35088 _001524_hash NULL
85425 +_001525_hash store_msg 3 56417 _001525_hash NULL
85426 +_001526_hash str_to_user 2 11411 _001526_hash NULL
85427 +_001527_hash subbuf_read_actor 3 2071 _001527_hash NULL
85428 +_001528_hash sys_fgetxattr 4 25166 _001528_hash NULL
85429 +_001529_hash sys_gethostname 2 49698 _001529_hash NULL
85430 +_001530_hash sys_getxattr 4 37418 _001530_hash NULL
85431 +_001531_hash sys_kexec_load 2 14222 _001531_hash NULL
85432 +_001532_hash sys_msgsnd 3 44537 _001532_hash &_000129_hash
85433 +_001533_hash sys_process_vm_readv 3-5 19090 _003125_hash NULL nohasharray
85434 +_001535_hash sys_process_vm_writev 3-5 4928 _001535_hash NULL
85435 +_001537_hash sys_sched_getaffinity 2 60033 _001537_hash NULL
85436 +_001538_hash sys_setsockopt 5 35320 _001538_hash NULL
85437 +_001539_hash t3_init_l2t 1 8261 _001539_hash NULL
85438 +_001540_hash team_options_register 3 20091 _001540_hash NULL
85439 +_001541_hash tipc_send2name 6 16809 _001541_hash NULL
85440 +_001542_hash tipc_send2port 5 63935 _001542_hash NULL
85441 +_001543_hash tipc_send 4 51238 _001543_hash NULL
85442 +_001544_hash tm6000_i2c_recv_regs16 5 2949 _001544_hash NULL
85443 +_001545_hash tm6000_i2c_recv_regs 5 46215 _001545_hash NULL
85444 +_001546_hash tm6000_i2c_send_regs 5 20250 _001546_hash NULL
85445 +_001547_hash tnode_new 3 44757 _003828_hash NULL nohasharray
85446 +_001548_hash tomoyo_read_self 3 33539 _001548_hash NULL
85447 +_001549_hash tomoyo_update_domain 2 5498 _001549_hash NULL
85448 +_001550_hash tomoyo_update_policy 2 40458 _001550_hash NULL
85449 +_001551_hash tpm_read 3 50344 _001551_hash NULL
85450 +_001552_hash TSS_rawhmac 3 17486 _001552_hash NULL
85451 +_001553_hash tt3650_ci_msg 4 57219 _001553_hash NULL
85452 +_001554_hash tun_get_user 3 33178 _001554_hash NULL
85453 +_001555_hash ubi_dbg_dump_flash 4 3870 _001555_hash NULL
85454 +_001556_hash ubi_io_write 4-5 15870 _001556_hash &_000954_hash
85455 +_001558_hash uio_read 3 49300 _001558_hash NULL
85456 +_001559_hash unix_seqpacket_sendmsg 4 27893 _001559_hash NULL
85457 +_001560_hash unlink1 3 63059 _001560_hash NULL
85458 +_001562_hash usb_allocate_stream_buffers 3 8964 _001562_hash NULL
85459 +_001563_hash usbdev_read 3 45114 _001563_hash NULL
85460 +_001564_hash usblp_read 3 57342 _003306_hash NULL nohasharray
85461 +_001565_hash usbtmc_read 3 32377 _001565_hash NULL
85462 +_001566_hash usbvision_v4l2_read 3 34386 _001566_hash NULL
85463 +_001567_hash _usb_writeN_sync 4 31682 _001567_hash NULL
85464 +_001568_hash user_read 3 51881 _001568_hash NULL
85465 +_001569_hash v4l_stk_read 3 39672 _001569_hash NULL
85466 +_001570_hash vcs_read 3 8017 _001570_hash NULL
85467 +_001571_hash vdma_mem_alloc 1 6171 _001571_hash NULL
85468 +_001572_hash venus_create 4 20555 _001572_hash NULL
85469 +_001573_hash venus_link 5 32165 _001573_hash NULL
85470 +_001574_hash venus_lookup 4 8121 _001574_hash NULL
85471 +_001575_hash venus_mkdir 4 8967 _001575_hash NULL
85472 +_001576_hash venus_remove 4 59781 _001576_hash NULL
85473 +_001577_hash venus_rename 4-5 17707 _003279_hash NULL nohasharray
85474 +_001579_hash venus_rmdir 4 45564 _001579_hash NULL
85475 +_001580_hash venus_symlink 4-6 23570 _001580_hash NULL
85476 +_001582_hash vfs_readlink 3 54368 _001582_hash NULL
85477 +_001583_hash vfs_readv 3 38011 _001583_hash NULL
85478 +_001584_hash vfs_writev 3 25278 _001584_hash NULL
85479 +_001585_hash vga_arb_read 3 4886 _001585_hash NULL
85480 +_001586_hash vhci_put_user 4 12604 _001586_hash NULL
85481 +_001587_hash vhost_add_used_n 3 10760 _001587_hash NULL
85482 +_001588_hash __videobuf_copy_to_user 4 15423 _001588_hash NULL
85483 +_001589_hash videobuf_pages_to_sg 2 3708 _001589_hash NULL
85484 +_001590_hash videobuf_vmalloc_to_sg 2 4548 _001590_hash NULL
85485 +_001591_hash virtnet_send_command 5-6 61993 _001591_hash NULL
85486 +_001593_hash vmbus_establish_gpadl 3 4495 _001593_hash NULL
85487 +_001594_hash vol_cdev_read 3 8968 _001594_hash &_001501_hash
85488 +_001595_hash w9966_v4l_read 3 31148 _001595_hash NULL
85489 +_001596_hash wdm_read 3 6549 _001596_hash NULL
85490 +_001597_hash wusb_prf 7 54261 _001597_hash &_000063_hash
85491 +_001598_hash xdi_copy_to_user 4 48900 _001598_hash NULL
85492 +_001599_hash xfs_buf_get_uncached 2 51477 _001599_hash NULL
85493 +_001600_hash xfs_efd_init 3 5463 _001600_hash NULL
85494 +_001601_hash xfs_efi_init 2 5476 _001601_hash NULL
85495 +_001602_hash xfs_iext_realloc_direct 2 20521 _001602_hash NULL
85496 +_001603_hash xfs_iext_realloc_indirect 2 59211 _001603_hash NULL
85497 +_001604_hash xfs_inumbers_fmt 3 12817 _001604_hash NULL
85498 +_001605_hash xlog_recover_add_to_cont_trans 4 44102 _001605_hash NULL
85499 +_001606_hash xz_dec_lzma2_create 2 36353 _002745_hash NULL nohasharray
85500 +_001607_hash _zd_iowrite32v_locked 3 44725 _001607_hash NULL
85501 +_001608_hash aat2870_reg_read_file 3 12221 _001608_hash NULL
85502 +_001609_hash add_sctp_bind_addr 3 12269 _001609_hash NULL
85503 +_001610_hash aes_decrypt_fail_read 3 54815 _001610_hash NULL
85504 +_001611_hash aes_decrypt_interrupt_read 3 19910 _001611_hash NULL
85505 +_001612_hash aes_decrypt_packets_read 3 10155 _001612_hash NULL
85506 +_001613_hash aes_encrypt_fail_read 3 32562 _001613_hash NULL
85507 +_001614_hash aes_encrypt_interrupt_read 3 39919 _001614_hash NULL
85508 +_001615_hash aes_encrypt_packets_read 3 48666 _001615_hash NULL
85509 +_001616_hash afs_cell_lookup 2 8482 _001616_hash NULL
85510 +_001617_hash agp_allocate_memory 2 58761 _001617_hash NULL
85511 +_001618_hash __alloc_bootmem 1 31498 _001618_hash NULL
85512 +_001619_hash __alloc_bootmem_low 1 43423 _003150_hash NULL nohasharray
85513 +_001620_hash __alloc_bootmem_node_nopanic 2 6432 _001620_hash NULL
85514 +_001621_hash alloc_cc770dev 1 48186 _001621_hash NULL
85515 +_001622_hash __alloc_ei_netdev 1 29338 _001622_hash NULL
85516 +_001623_hash __alloc_eip_netdev 1 51549 _001623_hash NULL
85517 +_001624_hash alloc_libipw 1 22708 _001624_hash NULL
85518 +_001625_hash alloc_pg_vec 2 8533 _001625_hash NULL
85519 +_001626_hash alloc_sja1000dev 1 17868 _001626_hash NULL
85520 +_001627_hash alloc_targets 2 8074 _003536_hash NULL nohasharray
85521 +_001630_hash ath6kl_disconnect_timeout_read 3 3650 _001630_hash NULL
85522 +_001631_hash ath6kl_endpoint_stats_read 3 41554 _001631_hash NULL
85523 +_001632_hash ath6kl_fwlog_mask_read 3 2050 _001632_hash NULL
85524 +_001633_hash ath6kl_keepalive_read 3 44303 _001633_hash NULL
85525 +_001634_hash ath6kl_listen_int_read 3 10355 _001634_hash NULL
85526 +_001635_hash ath6kl_lrssi_roam_read 3 61022 _001635_hash NULL
85527 +_001636_hash ath6kl_regdump_read 3 14393 _001636_hash NULL
85528 +_001637_hash ath6kl_regread_read 3 25884 _001637_hash NULL
85529 +_001638_hash ath6kl_regwrite_read 3 48747 _001638_hash NULL
85530 +_001639_hash ath6kl_roam_table_read 3 26166 _001639_hash NULL
85531 +_001640_hash ath9k_debugfs_read_buf 3 25316 _001640_hash NULL
85532 +_001641_hash atk_debugfs_ggrp_read 3 29522 _001641_hash NULL
85533 +_001642_hash b43_debugfs_read 3 24425 _001642_hash NULL
85534 +_001643_hash b43legacy_debugfs_read 3 2473 _001643_hash NULL
85535 +_001644_hash bcm_recvmsg 4 43992 _001644_hash NULL
85536 +_001645_hash bfad_debugfs_read 3 13119 _001645_hash NULL
85537 +_001646_hash bfad_debugfs_read_regrd 3 57830 _001646_hash NULL
85538 +_001647_hash blk_init_tags 1 30592 _001647_hash NULL
85539 +_001648_hash blk_queue_init_tags 2 44355 _002686_hash NULL nohasharray
85540 +_001649_hash blk_rq_map_kern 4 47004 _001649_hash NULL
85541 +_001650_hash bm_entry_read 3 10976 _001650_hash NULL
85542 +_001651_hash bm_status_read 3 19583 _001651_hash NULL
85543 +_001652_hash bnad_debugfs_read 3 50665 _001652_hash NULL
85544 +_001653_hash bnad_debugfs_read_regrd 3 51308 _001653_hash NULL
85545 +_001654_hash btmrvl_curpsmode_read 3 46939 _001654_hash NULL
85546 +_001655_hash btmrvl_gpiogap_read 3 4718 _001655_hash NULL
85547 +_001656_hash btmrvl_hscfgcmd_read 3 56303 _001656_hash NULL
85548 +_001657_hash btmrvl_hscmd_read 3 1614 _001657_hash NULL
85549 +_001658_hash btmrvl_hsmode_read 3 1647 _001658_hash NULL
85550 +_001659_hash btmrvl_hsstate_read 3 920 _001659_hash NULL
85551 +_001660_hash btmrvl_pscmd_read 3 24308 _001660_hash NULL
85552 +_001661_hash btmrvl_psmode_read 3 22395 _001661_hash NULL
85553 +_001662_hash btmrvl_psstate_read 3 50683 _001662_hash NULL
85554 +_001663_hash btmrvl_txdnldready_read 3 413 _001663_hash NULL
85555 +_001664_hash btrfs_add_link 5 9973 _001664_hash NULL
85556 +_001665_hash btrfs_discard_extent 2 38547 _001665_hash NULL
85557 +_001666_hash btrfs_find_create_tree_block 3 55812 _001666_hash NULL
85558 +_001667_hash btrfsic_map_block 2 56751 _001667_hash NULL
85559 +_001668_hash caif_stream_recvmsg 4 13173 _001668_hash NULL
85560 +_001669_hash carl9170_alloc 1 27 _001669_hash NULL
85561 +_001670_hash carl9170_debugfs_read 3 47738 _001670_hash NULL
85562 +_001671_hash cgroup_read_s64 5 19570 _001671_hash NULL
85563 +_001672_hash cgroup_read_u64 5 45532 _001672_hash NULL
85564 +_001673_hash channel_type_read 3 47308 _001673_hash NULL
85565 +_001674_hash codec_list_read_file 3 24910 _001674_hash NULL
85566 +_001675_hash configfs_read_file 3 1683 _001675_hash NULL
85567 +_001676_hash cpuset_common_file_read 5 8800 _001676_hash NULL
85568 +_001677_hash create_subvol 4 2347 _001677_hash NULL
85569 +_001678_hash cx18_copy_mdl_to_user 4 45549 _001678_hash NULL
85570 +_001679_hash dai_list_read_file 3 25421 _001679_hash NULL
85571 +_001680_hash dapm_bias_read_file 3 64715 _001680_hash NULL
85572 +_001681_hash dapm_widget_power_read_file 3 59950 _001754_hash NULL nohasharray
85573 +_001684_hash dbgfs_frame 3 45917 _001684_hash NULL
85574 +_001685_hash dbgfs_state 3 38894 _001685_hash NULL
85575 +_001686_hash debugfs_read 3 62535 _001686_hash NULL
85576 +_001687_hash debug_output 3 18575 _001687_hash NULL
85577 +_001688_hash debug_read 3 19322 _001688_hash NULL
85578 +_001689_hash dfs_file_read 3 18116 _001689_hash NULL
85579 +_001690_hash dma_memcpy_pg_to_iovec 6 1725 _001690_hash NULL
85580 +_001691_hash dma_memcpy_to_iovec 5 12173 _001691_hash NULL
85581 +_001692_hash dma_rx_errors_read 3 52045 _001692_hash NULL
85582 +_001693_hash dma_rx_requested_read 3 65354 _001693_hash NULL
85583 +_001694_hash dma_show_regs 3 35266 _001694_hash NULL
85584 +_001695_hash dma_tx_errors_read 3 46060 _001695_hash NULL
85585 +_001696_hash dma_tx_requested_read 3 16110 _001775_hash NULL nohasharray
85586 +_001697_hash dm_exception_table_init 2 39645 _001697_hash &_001103_hash
85587 +_001698_hash dn_recvmsg 4 17213 _001698_hash NULL
85588 +_001699_hash dns_resolver_read 3 54658 _001699_hash NULL
85589 +_001700_hash do_msgrcv 4 5590 _001700_hash NULL
85590 +_001701_hash driver_state_read 3 17194 _001701_hash &_001394_hash
85591 +_001702_hash dvb_demux_do_ioctl 3 34871 _001702_hash NULL
85592 +_001703_hash dvb_dmxdev_buffer_read 4 20682 _001703_hash NULL
85593 +_001704_hash dvb_dvr_do_ioctl 3 43355 _001704_hash NULL
85594 +_001705_hash econet_recvmsg 4 40978 _001705_hash NULL
85595 +_001706_hash event_calibration_read 3 21083 _001706_hash NULL
85596 +_001707_hash event_heart_beat_read 3 48961 _001707_hash NULL
85597 +_001708_hash event_oom_late_read 3 61175 _001708_hash &_001014_hash
85598 +_001709_hash event_phy_transmit_error_read 3 10471 _001709_hash NULL
85599 +_001710_hash event_rx_mem_empty_read 3 40363 _001710_hash NULL
85600 +_001711_hash event_rx_mismatch_read 3 38518 _001711_hash NULL
85601 +_001712_hash event_rx_pool_read 3 25792 _001712_hash NULL
85602 +_001713_hash event_tx_stuck_read 3 19305 _001713_hash NULL
85603 +_001714_hash excessive_retries_read 3 60425 _001714_hash NULL
85604 +_001715_hash fallback_on_nodma_alloc 2 35332 _001715_hash NULL
85605 +_001716_hash filter_read 3 61692 _001716_hash NULL
85606 +_001717_hash format_devstat_counter 3 32550 _001717_hash NULL
85607 +_001718_hash fragmentation_threshold_read 3 61718 _001718_hash NULL
85608 +_001719_hash fuse_conn_limit_read 3 20084 _001719_hash NULL
85609 +_001720_hash fuse_conn_waiting_read 3 49762 _001720_hash NULL
85610 +_001721_hash generic_readlink 3 32654 _001721_hash NULL
85611 +_001722_hash gpio_power_read 3 36059 _001722_hash NULL
85612 +_001723_hash hash_recvmsg 4 50924 _001723_hash NULL
85613 +_001724_hash ht40allow_map_read 3 55209 _002830_hash NULL nohasharray
85614 +_001725_hash hwflags_read 3 52318 _001725_hash NULL
85615 +_001726_hash hysdn_conf_read 3 42324 _003205_hash NULL nohasharray
85616 +_001727_hash i2400m_rx_stats_read 3 57706 _001727_hash NULL
85617 +_001728_hash i2400m_tx_stats_read 3 28527 _001728_hash NULL
85618 +_001729_hash idmouse_read 3 63374 _001729_hash NULL
85619 +_001730_hash ieee80211_if_read 3 6785 _001730_hash NULL
85620 +_001731_hash ieee80211_rx_bss_info 3 61630 _001731_hash NULL
85621 +_001732_hash ikconfig_read_current 3 1658 _001732_hash NULL
85622 +_001733_hash il3945_sta_dbgfs_stats_table_read 3 48802 _001733_hash NULL
85623 +_001734_hash il3945_ucode_general_stats_read 3 46111 _001734_hash NULL
85624 +_001735_hash il3945_ucode_rx_stats_read 3 3048 _001735_hash NULL
85625 +_001736_hash il3945_ucode_tx_stats_read 3 36016 _001736_hash NULL
85626 +_001737_hash il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 _001737_hash NULL
85627 +_001738_hash il4965_rs_sta_dbgfs_scale_table_read 3 38564 _001738_hash NULL
85628 +_001739_hash il4965_rs_sta_dbgfs_stats_table_read 3 49206 _001739_hash NULL
85629 +_001740_hash il4965_ucode_general_stats_read 3 56277 _001740_hash NULL
85630 +_001741_hash il4965_ucode_rx_stats_read 3 61948 _001741_hash NULL
85631 +_001742_hash il4965_ucode_tx_stats_read 3 12064 _001742_hash NULL
85632 +_001743_hash il_dbgfs_chain_noise_read 3 38044 _001743_hash NULL
85633 +_001744_hash il_dbgfs_channels_read 3 25005 _001744_hash NULL
85634 +_001745_hash il_dbgfs_disable_ht40_read 3 42386 _001745_hash NULL
85635 +_001746_hash il_dbgfs_fh_reg_read 3 40993 _001746_hash NULL
85636 +_001747_hash il_dbgfs_force_reset_read 3 57517 _001747_hash NULL
85637 +_001748_hash il_dbgfs_interrupt_read 3 3351 _001748_hash NULL
85638 +_001749_hash il_dbgfs_missed_beacon_read 3 59956 _001749_hash NULL
85639 +_001750_hash il_dbgfs_nvm_read 3 12288 _001750_hash NULL
85640 +_001751_hash il_dbgfs_power_save_status_read 3 43165 _001751_hash NULL
85641 +_001752_hash il_dbgfs_qos_read 3 33615 _001752_hash NULL
85642 +_001753_hash il_dbgfs_rxon_filter_flags_read 3 19281 _001753_hash NULL
85643 +_001754_hash il_dbgfs_rxon_flags_read 3 59950 _001754_hash &_001681_hash
85644 +_001755_hash il_dbgfs_rx_queue_read 3 11221 _001755_hash NULL
85645 +_001756_hash il_dbgfs_rx_stats_read 3 15243 _001756_hash NULL
85646 +_001757_hash il_dbgfs_sensitivity_read 3 2370 _001757_hash NULL
85647 +_001758_hash il_dbgfs_sram_read 3 62296 _001758_hash NULL
85648 +_001759_hash il_dbgfs_stations_read 3 21532 _001759_hash NULL
85649 +_001760_hash il_dbgfs_status_read 3 58388 _001760_hash NULL
85650 +_001761_hash il_dbgfs_tx_queue_read 3 55668 _001761_hash NULL
85651 +_001762_hash il_dbgfs_tx_stats_read 3 32913 _001762_hash NULL
85652 +_001763_hash ima_show_htable_value 2 57136 _001763_hash NULL
85653 +_001765_hash ipw_write 3 59807 _001765_hash NULL
85654 +_001766_hash irda_recvmsg_stream 4 35280 _001766_hash NULL
85655 +_001767_hash iscsi_tcp_conn_setup 2 16376 _001767_hash NULL
85656 +_001768_hash isr_cmd_cmplt_read 3 53439 _001768_hash NULL
85657 +_001769_hash isr_commands_read 3 41398 _001769_hash NULL
85658 +_001770_hash isr_decrypt_done_read 3 49490 _001770_hash NULL
85659 +_001771_hash isr_dma0_done_read 3 8574 _001771_hash NULL
85660 +_001772_hash isr_dma1_done_read 3 48159 _001772_hash NULL
85661 +_001773_hash isr_fiqs_read 3 34687 _001773_hash NULL
85662 +_001774_hash isr_host_acknowledges_read 3 54136 _001774_hash NULL
85663 +_001775_hash isr_hw_pm_mode_changes_read 3 16110 _001775_hash &_001696_hash
85664 +_001776_hash isr_irqs_read 3 9181 _001776_hash NULL
85665 +_001777_hash isr_low_rssi_read 3 64789 _001777_hash NULL
85666 +_001778_hash isr_pci_pm_read 3 30271 _001778_hash NULL
85667 +_001779_hash isr_rx_headers_read 3 38325 _001779_hash NULL
85668 +_001780_hash isr_rx_mem_overflow_read 3 43025 _001780_hash NULL
85669 +_001781_hash isr_rx_procs_read 3 31804 _001781_hash NULL
85670 +_001782_hash isr_rx_rdys_read 3 35283 _001782_hash NULL
85671 +_001783_hash isr_tx_exch_complete_read 3 16103 _001783_hash NULL
85672 +_001784_hash isr_tx_procs_read 3 23084 _001784_hash NULL
85673 +_001785_hash isr_wakeups_read 3 49607 _001785_hash NULL
85674 +_001786_hash ivtv_read 3 57796 _001786_hash NULL
85675 +_001787_hash iwl_dbgfs_bt_traffic_read 3 35534 _001787_hash NULL
85676 +_001788_hash iwl_dbgfs_chain_noise_read 3 46355 _001788_hash NULL
85677 +_001789_hash iwl_dbgfs_channels_read 3 6784 _001789_hash NULL
85678 +_001790_hash iwl_dbgfs_current_sleep_command_read 3 2081 _001790_hash NULL
85679 +_001791_hash iwl_dbgfs_disable_ht40_read 3 35761 _001791_hash NULL
85680 +_001792_hash iwl_dbgfs_fh_reg_read 3 879 _001792_hash &_000393_hash
85681 +_001793_hash iwl_dbgfs_force_reset_read 3 62628 _001793_hash NULL
85682 +_001794_hash iwl_dbgfs_interrupt_read 3 23574 _001794_hash NULL
85683 +_001795_hash iwl_dbgfs_log_event_read 3 2107 _001795_hash NULL
85684 +_001796_hash iwl_dbgfs_missed_beacon_read 3 50584 _001796_hash NULL
85685 +_001797_hash iwl_dbgfs_nvm_read 3 23845 _001797_hash NULL
85686 +_001798_hash iwl_dbgfs_plcp_delta_read 3 55407 _001798_hash NULL
85687 +_001799_hash iwl_dbgfs_power_save_status_read 3 54392 _001799_hash NULL
85688 +_001800_hash iwl_dbgfs_protection_mode_read 3 13943 _001800_hash NULL
85689 +_001801_hash iwl_dbgfs_qos_read 3 11753 _001801_hash NULL
85690 +_001802_hash iwl_dbgfs_reply_tx_error_read 3 19205 _001802_hash NULL
85691 +_001803_hash iwl_dbgfs_rx_handlers_read 3 18708 _001803_hash NULL
85692 +_001804_hash iwl_dbgfs_rxon_filter_flags_read 3 28832 _001804_hash NULL
85693 +_001805_hash iwl_dbgfs_rxon_flags_read 3 20795 _001805_hash NULL
85694 +_001806_hash iwl_dbgfs_rx_queue_read 3 19943 _001806_hash NULL
85695 +_001807_hash iwl_dbgfs_rx_statistics_read 3 62687 _001807_hash &_000425_hash
85696 +_001808_hash iwl_dbgfs_sensitivity_read 3 63116 _003026_hash NULL nohasharray
85697 +_001809_hash iwl_dbgfs_sleep_level_override_read 3 3038 _001809_hash NULL
85698 +_001810_hash iwl_dbgfs_sram_read 3 44505 _001810_hash NULL
85699 +_001811_hash iwl_dbgfs_stations_read 3 9309 _001811_hash NULL
85700 +_001812_hash iwl_dbgfs_status_read 3 5171 _001812_hash NULL
85701 +_001813_hash iwl_dbgfs_temperature_read 3 29224 _001813_hash NULL
85702 +_001814_hash iwl_dbgfs_thermal_throttling_read 3 38779 _001814_hash NULL
85703 +_001815_hash iwl_dbgfs_traffic_log_read 3 58870 _001815_hash NULL
85704 +_001816_hash iwl_dbgfs_tx_queue_read 3 4635 _001816_hash NULL
85705 +_001817_hash iwl_dbgfs_tx_statistics_read 3 314 _003437_hash NULL nohasharray
85706 +_001818_hash iwl_dbgfs_ucode_bt_stats_read 3 42820 _001818_hash NULL
85707 +_001819_hash iwl_dbgfs_ucode_general_stats_read 3 49199 _001819_hash NULL
85708 +_001820_hash iwl_dbgfs_ucode_rx_stats_read 3 58023 _001820_hash NULL
85709 +_001821_hash iwl_dbgfs_ucode_tracing_read 3 47983 _001821_hash &_000349_hash
85710 +_001822_hash iwl_dbgfs_ucode_tx_stats_read 3 31611 _001822_hash NULL
85711 +_001823_hash iwl_dbgfs_wowlan_sram_read 3 540 _001823_hash NULL
85712 +_001824_hash iwm_if_alloc 1 17027 _001824_hash &_001314_hash
85713 +_001825_hash kernel_readv 3 35617 _001825_hash NULL
85714 +_001826_hash key_algorithm_read 3 57946 _001826_hash NULL
85715 +_001827_hash key_icverrors_read 3 20895 _001827_hash NULL
85716 +_001828_hash key_key_read 3 3241 _001828_hash NULL
85717 +_001829_hash key_replays_read 3 62746 _001829_hash NULL
85718 +_001830_hash key_rx_spec_read 3 12736 _001830_hash NULL
85719 +_001831_hash key_tx_spec_read 3 4862 _001831_hash NULL
85720 +_001832_hash __kfifo_to_user 3 36555 _002199_hash NULL nohasharray
85721 +_001833_hash __kfifo_to_user_r 3 39123 _001833_hash NULL
85722 +_001834_hash kmem_zalloc_greedy 2-3 65268 _001834_hash NULL
85723 +_001836_hash l2cap_chan_send 3 49995 _001836_hash NULL
85724 +_001837_hash l2cap_sar_segment_sdu 3 27701 _001837_hash NULL
85725 +_001838_hash lbs_debugfs_read 3 30721 _001838_hash NULL
85726 +_001839_hash lbs_dev_info 3 51023 _001839_hash NULL
85727 +_001840_hash lbs_host_sleep_read 3 31013 _001840_hash NULL
85728 +_001841_hash lbs_rdbbp_read 3 45805 _001841_hash NULL
85729 +_001842_hash lbs_rdmac_read 3 418 _001842_hash NULL
85730 +_001843_hash lbs_rdrf_read 3 41431 _001843_hash NULL
85731 +_001844_hash lbs_sleepparams_read 3 10840 _001844_hash NULL
85732 +_001845_hash lbs_threshold_read 5 21046 _001845_hash NULL
85733 +_001846_hash libfc_vport_create 2 4415 _001846_hash NULL
85734 +_001847_hash lkdtm_debugfs_read 3 45752 _001847_hash NULL
85735 +_001848_hash llcp_sock_recvmsg 4 13556 _001848_hash NULL
85736 +_001849_hash long_retry_limit_read 3 59766 _001849_hash NULL
85737 +_001850_hash lpfc_debugfs_dif_err_read 3 36303 _001850_hash NULL
85738 +_001851_hash lpfc_debugfs_read 3 16566 _001851_hash NULL
85739 +_001852_hash lpfc_idiag_baracc_read 3 58466 _002447_hash NULL nohasharray
85740 +_001853_hash lpfc_idiag_ctlacc_read 3 33943 _001853_hash NULL
85741 +_001854_hash lpfc_idiag_drbacc_read 3 15948 _001854_hash NULL
85742 +_001855_hash lpfc_idiag_extacc_read 3 48301 _001855_hash NULL
85743 +_001856_hash lpfc_idiag_mbxacc_read 3 28061 _001856_hash NULL
85744 +_001857_hash lpfc_idiag_pcicfg_read 3 50334 _001857_hash NULL
85745 +_001858_hash lpfc_idiag_queacc_read 3 13950 _001858_hash NULL
85746 +_001859_hash lpfc_idiag_queinfo_read 3 55662 _001859_hash NULL
85747 +_001860_hash mac80211_format_buffer 2 41010 _001860_hash NULL
85748 +_001861_hash macvtap_put_user 4 55609 _001861_hash NULL
85749 +_001862_hash macvtap_sendmsg 4 30629 _001862_hash NULL
85750 +_001863_hash mic_calc_failure_read 3 59700 _001863_hash NULL
85751 +_001864_hash mic_rx_pkts_read 3 27972 _001864_hash NULL
85752 +_001865_hash minstrel_stats_read 3 17290 _001865_hash NULL
85753 +_001866_hash mmc_ext_csd_read 3 13205 _001866_hash NULL
85754 +_001867_hash mon_bin_read 3 6841 _001867_hash NULL
85755 +_001868_hash mon_stat_read 3 25238 _001868_hash NULL
85756 +_001870_hash mqueue_read_file 3 6228 _001870_hash NULL
85757 +_001871_hash mwifiex_debug_read 3 53074 _001871_hash NULL
85758 +_001872_hash mwifiex_getlog_read 3 54269 _001872_hash NULL
85759 +_001873_hash mwifiex_info_read 3 53447 _001873_hash NULL
85760 +_001874_hash mwifiex_rdeeprom_read 3 51429 _001874_hash NULL
85761 +_001875_hash mwifiex_regrdwr_read 3 34472 _001875_hash NULL
85762 +_001876_hash nfsd_vfs_read 6 62605 _003003_hash NULL nohasharray
85763 +_001877_hash nfsd_vfs_write 6 54577 _001877_hash NULL
85764 +_001878_hash nfs_idmap_lookup_id 2 10660 _001878_hash NULL
85765 +_001879_hash o2hb_debug_read 3 37851 _001879_hash NULL
85766 +_001880_hash o2net_debug_read 3 52105 _001880_hash NULL
85767 +_001881_hash ocfs2_control_read 3 56405 _001881_hash NULL
85768 +_001882_hash ocfs2_debug_read 3 14507 _001882_hash NULL
85769 +_001883_hash ocfs2_readlink 3 50656 _001883_hash NULL
85770 +_001884_hash oom_adjust_read 3 25127 _001884_hash NULL
85771 +_001885_hash oom_score_adj_read 3 39921 _002116_hash NULL nohasharray
85772 +_001886_hash oprofilefs_str_to_user 3 42182 _001886_hash NULL
85773 +_001887_hash oprofilefs_ulong_to_user 3 11582 _001887_hash NULL
85774 +_001888_hash _osd_req_list_objects 6 4204 _001888_hash NULL
85775 +_001889_hash osd_req_read_kern 5 59990 _001889_hash NULL
85776 +_001890_hash osd_req_write_kern 5 53486 _001890_hash NULL
85777 +_001891_hash p54_init_common 1 23850 _001891_hash NULL
85778 +_001892_hash packet_sendmsg 4 24954 _001892_hash NULL
85779 +_001893_hash page_readlink 3 23346 _001893_hash NULL
85780 +_001894_hash pcf50633_write_block 3 2124 _001894_hash NULL
85781 +_001895_hash platform_list_read_file 3 34734 _001895_hash NULL
85782 +_001896_hash pm860x_bulk_write 3 43875 _001896_hash NULL
85783 +_001897_hash pm_qos_power_read 3 55891 _001897_hash NULL
85784 +_001898_hash pms_read 3 53873 _001898_hash NULL
85785 +_001899_hash port_show_regs 3 5904 _001899_hash NULL
85786 +_001900_hash proc_coredump_filter_read 3 39153 _001900_hash NULL
85787 +_001901_hash proc_fdinfo_read 3 62043 _001901_hash NULL
85788 +_001902_hash proc_info_read 3 63344 _001902_hash NULL
85789 +_001903_hash proc_loginuid_read 3 15631 _001903_hash NULL
85790 +_001904_hash proc_pid_attr_read 3 10173 _001904_hash NULL
85791 +_001905_hash proc_pid_readlink 3 52186 _001905_hash NULL
85792 +_001906_hash proc_read 3 43614 _001906_hash NULL
85793 +_001907_hash proc_self_readlink 3 38094 _001907_hash NULL
85794 +_001908_hash proc_sessionid_read 3 6911 _002038_hash NULL nohasharray
85795 +_001909_hash provide_user_output 3 41105 _001909_hash NULL
85796 +_001910_hash ps_pspoll_max_apturn_read 3 6699 _001910_hash NULL
85797 +_001911_hash ps_pspoll_timeouts_read 3 11776 _001911_hash NULL
85798 +_001912_hash ps_pspoll_utilization_read 3 5361 _001912_hash NULL
85799 +_001913_hash pstore_file_read 3 57288 _001913_hash NULL
85800 +_001914_hash ps_upsd_max_apturn_read 3 19918 _001914_hash NULL
85801 +_001915_hash ps_upsd_max_sptime_read 3 63362 _001915_hash NULL
85802 +_001916_hash ps_upsd_timeouts_read 3 28924 _001916_hash NULL
85803 +_001917_hash ps_upsd_utilization_read 3 51669 _001917_hash NULL
85804 +_001918_hash pvr2_v4l2_read 3 18006 _001918_hash NULL
85805 +_001919_hash pwr_disable_ps_read 3 13176 _001919_hash NULL
85806 +_001920_hash pwr_elp_enter_read 3 5324 _001920_hash NULL
85807 +_001921_hash pwr_enable_ps_read 3 17686 _001921_hash NULL
85808 +_001922_hash pwr_fix_tsf_ps_read 3 26627 _001922_hash NULL
85809 +_001923_hash pwr_missing_bcns_read 3 25824 _001923_hash NULL
85810 +_001924_hash pwr_power_save_off_read 3 18355 _001924_hash NULL
85811 +_001925_hash pwr_ps_enter_read 3 26935 _001925_hash &_000501_hash
85812 +_001926_hash pwr_rcvd_awake_beacons_read 3 50505 _001926_hash NULL
85813 +_001927_hash pwr_rcvd_beacons_read 3 52836 _001927_hash NULL
85814 +_001928_hash pwr_tx_without_ps_read 3 48423 _001928_hash NULL
85815 +_001929_hash pwr_tx_with_ps_read 3 60851 _001929_hash NULL
85816 +_001930_hash pwr_wake_on_host_read 3 26321 _001930_hash NULL
85817 +_001931_hash pwr_wake_on_timer_exp_read 3 22640 _001931_hash NULL
85818 +_001932_hash queues_read 3 24877 _001932_hash NULL
85819 +_001933_hash raw_recvmsg 4 17277 _001933_hash NULL
85820 +_001934_hash rcname_read 3 25919 _001934_hash NULL
85821 +_001935_hash read_4k_modal_eeprom 3 30212 _001935_hash NULL
85822 +_001936_hash read_9287_modal_eeprom 3 59327 _001936_hash NULL
85823 +_001937_hash reada_find_extent 2 63486 _001937_hash NULL
85824 +_001938_hash read_def_modal_eeprom 3 14041 _001938_hash NULL
85825 +_001939_hash read_enabled_file_bool 3 37744 _001939_hash NULL
85826 +_001940_hash read_file_ani 3 23161 _001940_hash NULL
85827 +_001941_hash read_file_antenna 3 13574 _001941_hash NULL
85828 +_001942_hash read_file_base_eeprom 3 42168 _001942_hash NULL
85829 +_001943_hash read_file_beacon 3 32595 _001943_hash NULL
85830 +_001944_hash read_file_blob 3 57406 _001944_hash NULL
85831 +_001945_hash read_file_bool 3 4180 _001945_hash NULL
85832 +_001946_hash read_file_credit_dist_stats 3 54367 _001946_hash NULL
85833 +_001947_hash read_file_debug 3 58256 _001947_hash NULL
85834 +_001948_hash read_file_disable_ani 3 6536 _001948_hash NULL
85835 +_001949_hash read_file_dma 3 9530 _001949_hash NULL
85836 +_001950_hash read_file_dump_nfcal 3 18766 _001950_hash NULL
85837 +_001951_hash read_file_frameerrors 3 64001 _001951_hash NULL
85838 +_001952_hash read_file_interrupt 3 61742 _001959_hash NULL nohasharray
85839 +_001953_hash read_file_misc 3 9948 _001953_hash NULL
85840 +_001954_hash read_file_modal_eeprom 3 39909 _001954_hash NULL
85841 +_001955_hash read_file_queue 3 40895 _001955_hash NULL
85842 +_001956_hash read_file_rcstat 3 22854 _001956_hash NULL
85843 +_001957_hash read_file_recv 3 48232 _001957_hash NULL
85844 +_001958_hash read_file_regidx 3 33370 _001958_hash NULL
85845 +_001959_hash read_file_regval 3 61742 _001959_hash &_001952_hash
85846 +_001960_hash read_file_reset 3 52310 _001960_hash NULL
85847 +_001961_hash read_file_rx_chainmask 3 41605 _001961_hash NULL
85848 +_001962_hash read_file_slot 3 50111 _001962_hash NULL
85849 +_001963_hash read_file_stations 3 35795 _001963_hash NULL
85850 +_001964_hash read_file_tgt_int_stats 3 20697 _001964_hash NULL
85851 +_001965_hash read_file_tgt_rx_stats 3 33944 _001965_hash NULL
85852 +_001966_hash read_file_tgt_stats 3 8959 _001966_hash NULL
85853 +_001967_hash read_file_tgt_tx_stats 3 51847 _001967_hash NULL
85854 +_001968_hash read_file_tx_chainmask 3 3829 _001968_hash NULL
85855 +_001969_hash read_file_war_stats 3 292 _001969_hash NULL
85856 +_001970_hash read_file_xmit 3 21487 _001970_hash NULL
85857 +_001971_hash read_from_oldmem 2 3337 _001971_hash NULL
85858 +_001972_hash read_oldmem 3 55658 _001972_hash NULL
85859 +_001973_hash regmap_name_read_file 3 39379 _001973_hash NULL
85860 +_001974_hash repair_io_failure 4 4815 _001974_hash NULL
85861 +_001975_hash request_key_and_link 4 42693 _001975_hash NULL
85862 +_001976_hash res_counter_read 4 33499 _001976_hash NULL
85863 +_001977_hash retry_count_read 3 52129 _001977_hash NULL
85864 +_001978_hash rs_sta_dbgfs_rate_scale_data_read 3 47165 _001978_hash NULL
85865 +_001979_hash rs_sta_dbgfs_scale_table_read 3 40262 _001979_hash NULL
85866 +_001980_hash rs_sta_dbgfs_stats_table_read 3 56573 _001980_hash NULL
85867 +_001981_hash rts_threshold_read 3 44384 _001981_hash NULL
85868 +_001982_hash rx_dropped_read 3 44799 _001982_hash NULL
85869 +_001983_hash rx_fcs_err_read 3 62844 _001983_hash NULL
85870 +_001984_hash rx_hdr_overflow_read 3 64407 _001984_hash NULL
85871 +_001985_hash rx_hw_stuck_read 3 57179 _001985_hash NULL
85872 +_001986_hash rx_out_of_mem_read 3 10157 _001986_hash NULL
85873 +_001987_hash rx_path_reset_read 3 23801 _001987_hash NULL
85874 +_001988_hash rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 _001988_hash NULL
85875 +_001989_hash rxpipe_descr_host_int_trig_rx_data_read 3 22001 _003089_hash NULL nohasharray
85876 +_001990_hash rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 _001990_hash NULL
85877 +_001991_hash rxpipe_rx_prep_beacon_drop_read 3 2403 _001991_hash NULL
85878 +_001992_hash rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 _001992_hash NULL
85879 +_001993_hash rx_reset_counter_read 3 58001 _001993_hash NULL
85880 +_001994_hash rx_xfr_hint_trig_read 3 40283 _001994_hash NULL
85881 +_001995_hash s5m_bulk_write 3 4833 _001995_hash NULL
85882 +_001996_hash scrub_setup_recheck_block 3-4 56245 _001996_hash NULL
85883 +_001998_hash scsi_adjust_queue_depth 3 12802 _001998_hash NULL
85884 +_001999_hash selinux_inode_notifysecctx 3 36896 _001999_hash NULL
85885 +_002000_hash sel_read_avc_cache_threshold 3 33942 _002000_hash NULL
85886 +_002001_hash sel_read_avc_hash_stats 3 1984 _002001_hash NULL
85887 +_002002_hash sel_read_bool 3 24236 _002002_hash NULL
85888 +_002003_hash sel_read_checkreqprot 3 33068 _002003_hash NULL
85889 +_002004_hash sel_read_class 3 12669 _002541_hash NULL nohasharray
85890 +_002005_hash sel_read_enforce 3 2828 _002005_hash NULL
85891 +_002006_hash sel_read_handle_status 3 56139 _002006_hash NULL
85892 +_002007_hash sel_read_handle_unknown 3 57933 _002007_hash NULL
85893 +_002008_hash sel_read_initcon 3 32362 _002008_hash NULL
85894 +_002009_hash sel_read_mls 3 25369 _002009_hash NULL
85895 +_002010_hash sel_read_perm 3 42302 _002010_hash NULL
85896 +_002011_hash sel_read_policy 3 55947 _002011_hash NULL
85897 +_002012_hash sel_read_policycap 3 28544 _002012_hash NULL
85898 +_002013_hash sel_read_policyvers 3 55 _003257_hash NULL nohasharray
85899 +_002014_hash send_msg 4 37323 _002014_hash NULL
85900 +_002015_hash send_packet 4 52960 _002015_hash NULL
85901 +_002016_hash short_retry_limit_read 3 4687 _002016_hash NULL
85902 +_002017_hash simple_attr_read 3 24738 _002017_hash NULL
85903 +_002018_hash simple_transaction_read 3 17076 _002018_hash NULL
85904 +_002019_hash skb_copy_datagram_const_iovec 2-5-4 48102 _002019_hash NULL
85905 +_002022_hash skb_copy_datagram_iovec 2-4 5806 _002022_hash NULL
85906 +_002024_hash smk_read_ambient 3 61220 _002024_hash NULL
85907 +_002025_hash smk_read_direct 3 15803 _002025_hash NULL
85908 +_002026_hash smk_read_doi 3 30813 _002026_hash NULL
85909 +_002027_hash smk_read_logging 3 37804 _002027_hash NULL
85910 +_002028_hash smk_read_onlycap 3 3855 _002028_hash NULL
85911 +_002029_hash snapshot_read 3 22601 _002029_hash NULL
85912 +_002030_hash snd_cs4281_BA0_read 5 6847 _002030_hash NULL
85913 +_002031_hash snd_cs4281_BA1_read 5 20323 _002031_hash NULL
85914 +_002032_hash snd_cs46xx_io_read 5 45734 _002032_hash NULL
85915 +_002033_hash snd_gus_dram_read 4 56686 _002033_hash NULL
85916 +_002034_hash snd_pcm_oss_read 3 28317 _002034_hash NULL
85917 +_002035_hash snd_rme32_capture_copy 5 39653 _002035_hash NULL
85918 +_002036_hash snd_rme96_capture_copy 5 58484 _002036_hash NULL
85919 +_002037_hash snd_soc_hw_bulk_write_raw 4 14245 _002037_hash NULL
85920 +_002038_hash spi_show_regs 3 6911 _002038_hash &_001908_hash
85921 +_002039_hash sta_agg_status_read 3 14058 _002039_hash NULL
85922 +_002040_hash sta_connected_time_read 3 17435 _002040_hash NULL
85923 +_002041_hash sta_flags_read 3 56710 _002041_hash NULL
85924 +_002042_hash sta_ht_capa_read 3 10366 _002042_hash NULL
85925 +_002043_hash sta_last_seq_ctrl_read 3 19106 _002043_hash NULL
85926 +_002044_hash sta_num_ps_buf_frames_read 3 1488 _002044_hash NULL
85927 +_002045_hash st_read 3 51251 _002045_hash NULL
85928 +_002046_hash supply_map_read_file 3 10608 _002046_hash NULL
85929 +_002047_hash sysfs_read_file 3 42113 _002047_hash NULL
85930 +_002048_hash sys_lgetxattr 4 45531 _002048_hash NULL
85931 +_002049_hash sys_preadv 3 17100 _002049_hash NULL
85932 +_002050_hash sys_pwritev 3 41722 _002050_hash NULL
85933 +_002051_hash sys_readv 3 50664 _002051_hash NULL
85934 +_002052_hash sys_rt_sigpending 2 24961 _002052_hash NULL
85935 +_002053_hash sys_writev 3 28384 _002053_hash NULL
85936 +_002054_hash test_iso_queue 5 62534 _002054_hash NULL
85937 +_002055_hash ts_read 3 44687 _002055_hash NULL
85938 +_002056_hash TSS_authhmac 3 12839 _002056_hash NULL
85939 +_002057_hash TSS_checkhmac1 5 31429 _002057_hash NULL
85940 +_002058_hash TSS_checkhmac2 5-7 40520 _002058_hash NULL
85941 +_002060_hash tt3650_ci_msg_locked 4 8013 _002060_hash NULL
85942 +_002061_hash tun_sendmsg 4 10337 _002061_hash NULL
85943 +_002062_hash tx_internal_desc_overflow_read 3 47300 _002062_hash NULL
85944 +_002063_hash tx_queue_len_read 3 1463 _002063_hash NULL
85945 +_002064_hash tx_queue_status_read 3 44978 _002064_hash NULL
85946 +_002065_hash ubi_io_write_data 4-5 40305 _002065_hash NULL
85947 +_002067_hash uhci_debug_read 3 5911 _002067_hash NULL
85948 +_002068_hash unix_stream_recvmsg 4 35210 _002068_hash NULL
85949 +_002069_hash uvc_debugfs_stats_read 3 56651 _002069_hash NULL
85950 +_002070_hash vhost_add_used_and_signal_n 4 8038 _002070_hash NULL
85951 +_002071_hash vifs_state_read 3 33762 _002071_hash NULL
85952 +_002072_hash vmbus_open 2-3 12154 _002072_hash NULL
85953 +_002074_hash waiters_read 3 40902 _002074_hash NULL
85954 +_002075_hash wep_addr_key_count_read 3 20174 _002075_hash NULL
85955 +_002076_hash wep_decrypt_fail_read 3 58567 _002076_hash NULL
85956 +_002077_hash wep_default_key_count_read 3 43035 _002077_hash NULL
85957 +_002078_hash wep_interrupt_read 3 41492 _002078_hash NULL
85958 +_002079_hash wep_key_not_found_read 3 13377 _002079_hash &_000915_hash
85959 +_002080_hash wep_packets_read 3 18751 _002080_hash NULL
85960 +_002081_hash wl1271_format_buffer 2 20834 _002081_hash NULL
85961 +_002082_hash wm8994_bulk_write 3 13615 _002082_hash NULL
85962 +_002083_hash wusb_prf_256 7 29203 _002083_hash NULL
85963 +_002084_hash wusb_prf_64 7 51065 _002084_hash NULL
85964 +_002085_hash xfs_buf_read_uncached 4 27519 _002085_hash NULL
85965 +_002086_hash xfs_iext_add 3 41422 _002086_hash NULL
85966 +_002087_hash xfs_iext_remove_direct 3 40744 _002087_hash NULL
85967 +_002088_hash xfs_trans_get_efd 3 51148 _002088_hash NULL
85968 +_002089_hash xfs_trans_get_efi 2 7898 _002089_hash NULL
85969 +_002090_hash xlog_get_bp 2 23229 _002090_hash NULL
85970 +_002091_hash xz_dec_init 2 29029 _002091_hash NULL
85971 +_002092_hash aac_change_queue_depth 2 825 _002092_hash NULL
85972 +_002093_hash agp_allocate_memory_wrap 1 16576 _002093_hash NULL
85973 +_002094_hash arcmsr_adjust_disk_queue_depth 2 16756 _002094_hash NULL
85974 +_002095_hash atalk_recvmsg 4 22053 _002095_hash NULL
85975 +_002097_hash atomic_read_file 3 16227 _002097_hash NULL
85976 +_002098_hash ax25_recvmsg 4 64441 _002098_hash NULL
85977 +_002099_hash beacon_interval_read 3 7091 _002099_hash NULL
85978 +_002100_hash btrfs_init_new_buffer 4 55761 _002100_hash NULL
85979 +_002101_hash btrfs_mksubvol 3 39479 _002101_hash NULL
85980 +_002102_hash bt_sock_recvmsg 4 12316 _002102_hash NULL
85981 +_002103_hash bt_sock_stream_recvmsg 4 52518 _002103_hash NULL
85982 +_002104_hash caif_seqpkt_recvmsg 4 32241 _002104_hash NULL
85983 +_002105_hash cpu_type_read 3 36540 _002105_hash NULL
85984 +_002106_hash cx18_read 3 23699 _002106_hash NULL
85985 +_002107_hash dccp_recvmsg 4 16056 _002107_hash NULL
85986 +_002108_hash depth_read 3 31112 _002108_hash NULL
85987 +_002109_hash dfs_global_file_read 3 7787 _002109_hash NULL
85988 +_002110_hash dgram_recvmsg 4 23104 _002110_hash NULL
85989 +_002111_hash dma_skb_copy_datagram_iovec 3-5 21516 _002111_hash NULL
85990 +_002113_hash dtim_interval_read 3 654 _002113_hash NULL
85991 +_002114_hash dynamic_ps_timeout_read 3 10110 _002114_hash NULL
85992 +_002115_hash enable_read 3 2117 _002115_hash NULL
85993 +_002116_hash exofs_read_kern 6 39921 _002116_hash &_001885_hash
85994 +_002117_hash fc_change_queue_depth 2 36841 _002117_hash NULL
85995 +_002118_hash forced_ps_read 3 31685 _002118_hash NULL
85996 +_002119_hash frequency_read 3 64031 _003106_hash NULL nohasharray
85997 +_002120_hash get_alua_req 3 4166 _002120_hash NULL
85998 +_002121_hash get_rdac_req 3 45882 _002121_hash NULL
85999 +_002122_hash hci_sock_recvmsg 4 7072 _002122_hash NULL
86000 +_002123_hash hpsa_change_queue_depth 2 15449 _002123_hash NULL
86001 +_002124_hash hptiop_adjust_disk_queue_depth 2 20122 _002124_hash NULL
86002 +_002125_hash ide_queue_pc_tail 5 11673 _002125_hash NULL
86003 +_002126_hash ide_raw_taskfile 4 42355 _002126_hash NULL
86004 +_002127_hash idetape_queue_rw_tail 3 29562 _002127_hash NULL
86005 +_002128_hash ieee80211_if_read_aid 3 9705 _002128_hash NULL
86006 +_002129_hash ieee80211_if_read_auto_open_plinks 3 38268 _003504_hash NULL nohasharray
86007 +_002130_hash ieee80211_if_read_ave_beacon 3 64924 _002130_hash NULL
86008 +_002131_hash ieee80211_if_read_bssid 3 35161 _002131_hash NULL
86009 +_002132_hash ieee80211_if_read_channel_type 3 23884 _002132_hash NULL
86010 +_002133_hash ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 _002133_hash NULL
86011 +_002134_hash ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 _002134_hash NULL
86012 +_002135_hash ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 _002135_hash NULL
86013 +_002136_hash ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 _002136_hash NULL
86014 +_002137_hash ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 _002137_hash NULL
86015 +_002138_hash ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 _002138_hash NULL
86016 +_002139_hash ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 _002139_hash NULL
86017 +_002140_hash ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 _002140_hash NULL
86018 +_002141_hash ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 _002141_hash NULL
86019 +_002142_hash ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 _002142_hash NULL
86020 +_002143_hash ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 _002143_hash NULL
86021 +_002144_hash ieee80211_if_read_dot11MeshMaxRetries 3 12756 _002144_hash NULL
86022 +_002145_hash ieee80211_if_read_dot11MeshRetryTimeout 3 52168 _002145_hash NULL
86023 +_002146_hash ieee80211_if_read_dot11MeshTTL 3 58307 _002146_hash NULL
86024 +_002147_hash ieee80211_if_read_dropped_frames_congestion 3 32603 _002147_hash NULL
86025 +_002148_hash ieee80211_if_read_dropped_frames_no_route 3 33383 _002148_hash NULL
86026 +_002149_hash ieee80211_if_read_dropped_frames_ttl 3 44500 _002149_hash NULL
86027 +_002150_hash ieee80211_if_read_drop_unencrypted 3 37053 _002150_hash NULL
86028 +_002151_hash ieee80211_if_read_dtim_count 3 38419 _002151_hash NULL
86029 +_002152_hash ieee80211_if_read_element_ttl 3 18869 _002152_hash NULL
86030 +_002153_hash ieee80211_if_read_estab_plinks 3 32533 _002153_hash NULL
86031 +_002154_hash ieee80211_if_read_flags 3 57470 _002389_hash NULL nohasharray
86032 +_002155_hash ieee80211_if_read_fwded_frames 3 36520 _002155_hash NULL
86033 +_002156_hash ieee80211_if_read_fwded_mcast 3 39571 _002156_hash &_000151_hash
86034 +_002157_hash ieee80211_if_read_fwded_unicast 3 59740 _002859_hash NULL nohasharray
86035 +_002158_hash ieee80211_if_read_last_beacon 3 31257 _002158_hash NULL
86036 +_002159_hash ieee80211_if_read_min_discovery_timeout 3 13946 _002159_hash NULL
86037 +_002160_hash ieee80211_if_read_num_buffered_multicast 3 12716 _002160_hash NULL
86038 +_002161_hash ieee80211_if_read_num_sta_authorized 3 56177 _002161_hash NULL
86039 +_002162_hash ieee80211_if_read_num_sta_ps 3 34722 _002162_hash NULL
86040 +_002163_hash ieee80211_if_read_path_refresh_time 3 25545 _002163_hash NULL
86041 +_002164_hash ieee80211_if_read_peer 3 45233 _002164_hash NULL
86042 +_002165_hash ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 _002165_hash NULL
86043 +_002166_hash ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 _002166_hash NULL
86044 +_002167_hash ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 _002167_hash NULL
86045 +_002168_hash ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 _002168_hash NULL
86046 +_002169_hash ieee80211_if_read_rssi_threshold 3 49260 _002169_hash NULL
86047 +_002170_hash ieee80211_if_read_smps 3 27416 _002170_hash NULL
86048 +_002171_hash ieee80211_if_read_state 3 9813 _002280_hash NULL nohasharray
86049 +_002172_hash ieee80211_if_read_tkip_mic_test 3 19565 _002172_hash NULL
86050 +_002173_hash ieee80211_if_read_tsf 3 16420 _002173_hash NULL
86051 +_002174_hash ieee80211_if_read_uapsd_max_sp_len 3 15067 _002174_hash NULL
86052 +_002175_hash ieee80211_if_read_uapsd_queues 3 55150 _002175_hash NULL
86053 +_002176_hash ieee80211_rx_mgmt_beacon 3 24430 _002176_hash NULL
86054 +_002177_hash ieee80211_rx_mgmt_probe_resp 3 6918 _002177_hash NULL
86055 +_002178_hash ima_show_htable_violations 3 10619 _002178_hash NULL
86056 +_002179_hash ima_show_measurements_count 3 23536 _002179_hash NULL
86057 +_002180_hash insert_one_name 7 61668 _002180_hash NULL
86058 +_002181_hash ipr_change_queue_depth 2 6431 _002181_hash NULL
86059 +_002182_hash ip_recv_error 3 23109 _002182_hash NULL
86060 +_002183_hash ipv6_recv_error 3 56347 _002183_hash NULL
86061 +_002184_hash ipv6_recv_rxpmtu 3 7142 _002184_hash NULL
86062 +_002185_hash ipx_recvmsg 4 44366 _002185_hash NULL
86063 +_002186_hash irda_recvmsg_dgram 4 32631 _002186_hash NULL
86064 +_002187_hash iscsi_change_queue_depth 2 23416 _002187_hash NULL
86065 +_002188_hash ivtv_read_pos 3 34400 _002188_hash &_000303_hash
86066 +_002189_hash key_conf_hw_key_idx_read 3 25003 _002189_hash NULL
86067 +_002190_hash key_conf_keyidx_read 3 42443 _002190_hash NULL
86068 +_002191_hash key_conf_keylen_read 3 49758 _002191_hash NULL
86069 +_002192_hash key_flags_read 3 25931 _002192_hash NULL
86070 +_002193_hash key_ifindex_read 3 31411 _002193_hash NULL
86071 +_002194_hash key_tx_rx_count_read 3 44742 _002194_hash NULL
86072 +_002195_hash l2cap_sock_sendmsg 4 63427 _002195_hash NULL
86073 +_002196_hash l2tp_ip_recvmsg 4 22681 _002196_hash NULL
86074 +_002197_hash llc_ui_recvmsg 4 3826 _002197_hash NULL
86075 +_002198_hash lpfc_change_queue_depth 2 25905 _002198_hash NULL
86076 +_002199_hash macvtap_do_read 4 36555 _002199_hash &_001832_hash
86077 +_002200_hash megaraid_change_queue_depth 2 64815 _002200_hash NULL
86078 +_002201_hash megasas_change_queue_depth 2 32747 _002201_hash NULL
86079 +_002202_hash mptscsih_change_queue_depth 2 26036 _002202_hash NULL
86080 +_002203_hash NCR_700_change_queue_depth 2 31742 _002203_hash NULL
86081 +_002204_hash netlink_recvmsg 4 61600 _002204_hash NULL
86082 +_002205_hash nfsctl_transaction_read 3 48250 _002205_hash NULL
86083 +_002206_hash nfs_map_group_to_gid 3 15892 _002206_hash NULL
86084 +_002207_hash nfs_map_name_to_uid 3 51132 _002207_hash NULL
86085 +_002208_hash nr_recvmsg 4 12649 _002208_hash NULL
86086 +_002209_hash osd_req_list_collection_objects 5 36664 _002209_hash NULL
86087 +_002210_hash osd_req_list_partition_objects 5 56464 _002210_hash NULL
86088 +_002212_hash packet_recv_error 3 16669 _002212_hash NULL
86089 +_002213_hash packet_recvmsg 4 47700 _002213_hash NULL
86090 +_002214_hash pep_recvmsg 4 19402 _002214_hash NULL
86091 +_002215_hash pfkey_recvmsg 4 53604 _002215_hash NULL
86092 +_002216_hash ping_recvmsg 4 25597 _002216_hash NULL
86093 +_002217_hash pmcraid_change_queue_depth 2 9116 _002217_hash NULL
86094 +_002218_hash pn_recvmsg 4 30887 _002218_hash NULL
86095 +_002219_hash pointer_size_read 3 51863 _002219_hash NULL
86096 +_002220_hash power_read 3 15939 _002220_hash NULL
86097 +_002221_hash pppoe_recvmsg 4 15073 _002221_hash NULL
86098 +_002222_hash pppol2tp_recvmsg 4 57742 _003858_hash NULL nohasharray
86099 +_002223_hash qla2x00_adjust_sdev_qdepth_up 2 20097 _002223_hash NULL
86100 +_002224_hash qla2x00_change_queue_depth 2 24742 _002224_hash NULL
86101 +_002225_hash raw_recvmsg 4 52529 _002225_hash NULL
86102 +_002226_hash rawsock_recvmsg 4 12144 _002226_hash NULL
86103 +_002227_hash rawv6_recvmsg 4 30265 _002227_hash NULL
86104 +_002228_hash reada_add_block 2 54247 _002228_hash NULL
86105 +_002229_hash readahead_tree_block 3 36285 _002229_hash NULL
86106 +_002230_hash reada_tree_block_flagged 3 18402 _002230_hash NULL
86107 +_002231_hash read_tree_block 3 841 _002231_hash NULL
86108 +_002232_hash recover_peb 6-7 29238 _002232_hash NULL
86109 +_002234_hash recv_msg 4 48709 _002234_hash NULL
86110 +_002235_hash recv_stream 4 30138 _002235_hash NULL
86111 +_002236_hash _req_append_segment 2 41031 _002236_hash NULL
86112 +_002237_hash request_key_async 4 6990 _002237_hash NULL
86113 +_002238_hash request_key_async_with_auxdata 4 46624 _002238_hash NULL
86114 +_002239_hash request_key_with_auxdata 4 24515 _002239_hash NULL
86115 +_002240_hash rose_recvmsg 4 2368 _002240_hash NULL
86116 +_002241_hash rxrpc_recvmsg 4 26233 _002241_hash NULL
86117 +_002242_hash rx_streaming_always_read 3 49401 _002242_hash NULL
86118 +_002243_hash rx_streaming_interval_read 3 55291 _002243_hash NULL
86119 +_002244_hash sas_change_queue_depth 2 18555 _002244_hash NULL
86120 +_002245_hash scsi_activate_tcq 2 42640 _002245_hash NULL
86121 +_002246_hash scsi_deactivate_tcq 2 47086 _002246_hash NULL
86122 +_002247_hash scsi_execute 5 33596 _002247_hash NULL
86123 +_002248_hash _scsih_adjust_queue_depth 2 1083 _002248_hash NULL
86124 +_002249_hash scsi_init_shared_tag_map 2 59812 _002249_hash NULL
86125 +_002250_hash scsi_track_queue_full 2 44239 _002250_hash NULL
86126 +_002251_hash sctp_recvmsg 4 23265 _002251_hash NULL
86127 +_002252_hash send_stream 4 3397 _002252_hash NULL
86128 +_002253_hash skb_copy_and_csum_datagram_iovec 2 24466 _002253_hash NULL
86129 +_002255_hash snd_gf1_mem_proc_dump 5 16926 _003922_hash NULL nohasharray
86130 +_002256_hash split_scan_timeout_read 3 20029 _002256_hash NULL
86131 +_002257_hash sta_dev_read 3 14782 _002257_hash NULL
86132 +_002258_hash sta_inactive_ms_read 3 25690 _002258_hash NULL
86133 +_002259_hash sta_last_signal_read 3 31818 _002259_hash NULL
86134 +_002260_hash stats_dot11ACKFailureCount_read 3 45558 _002260_hash NULL
86135 +_002261_hash stats_dot11FCSErrorCount_read 3 28154 _002261_hash NULL
86136 +_002262_hash stats_dot11RTSFailureCount_read 3 43948 _002262_hash NULL
86137 +_002263_hash stats_dot11RTSSuccessCount_read 3 33065 _002263_hash NULL
86138 +_002264_hash storvsc_connect_to_vsp 2 22 _002264_hash NULL
86139 +_002265_hash suspend_dtim_interval_read 3 64971 _002265_hash NULL
86140 +_002266_hash sys_msgrcv 3 959 _002266_hash NULL
86141 +_002267_hash tcm_loop_change_queue_depth 2 42454 _002267_hash NULL
86142 +_002268_hash tcp_copy_to_iovec 3 28344 _002268_hash NULL
86143 +_002269_hash tcp_recvmsg 4 31238 _002269_hash NULL
86144 +_002270_hash timeout_read 3 47915 _002270_hash NULL
86145 +_002271_hash total_ps_buffered_read 3 16365 _002271_hash NULL
86146 +_002272_hash tun_put_user 4 59849 _002272_hash NULL
86147 +_002273_hash twa_change_queue_depth 2 48808 _002273_hash NULL
86148 +_002274_hash tw_change_queue_depth 2 11116 _002274_hash NULL
86149 +_002275_hash twl_change_queue_depth 2 41342 _002275_hash NULL
86150 +_002276_hash ubi_eba_write_leb 5-6 19826 _002276_hash NULL
86151 +_002278_hash ubi_eba_write_leb_st 5 27896 _002278_hash NULL
86152 +_002279_hash udp_recvmsg 4 42558 _002279_hash NULL
86153 +_002280_hash udpv6_recvmsg 4 9813 _002280_hash &_002171_hash
86154 +_002281_hash ulong_read_file 3 42304 _002281_hash &_000511_hash
86155 +_002282_hash unix_dgram_recvmsg 4 14952 _002282_hash NULL
86156 +_002283_hash user_power_read 3 39414 _002283_hash NULL
86157 +_002284_hash vcc_recvmsg 4 37198 _002284_hash NULL
86158 +_002285_hash wep_iv_read 3 54744 _002285_hash NULL
86159 +_002286_hash x25_recvmsg 4 42777 _002286_hash NULL
86160 +_002287_hash xfs_iext_insert 3 18667 _003817_hash NULL nohasharray
86161 +_002288_hash xfs_iext_remove 3 50909 _002288_hash NULL
86162 +_002289_hash xlog_find_verify_log_record 2 18870 _002289_hash NULL
86163 +_002290_hash btrfs_alloc_free_block 3 29982 _002290_hash NULL
86164 +_002291_hash cx18_read_pos 3 4683 _002291_hash NULL
86165 +_002292_hash l2cap_sock_recvmsg 4 59886 _002292_hash NULL
86166 +_002293_hash osd_req_list_dev_partitions 4 60027 _002293_hash NULL
86167 +_002294_hash osd_req_list_partition_collections 5 38223 _002294_hash NULL
86168 +_002295_hash osst_do_scsi 4 44410 _002295_hash NULL
86169 +_002296_hash qla2x00_handle_queue_full 2 24365 _002296_hash NULL
86170 +_002297_hash rfcomm_sock_recvmsg 4 22227 _002297_hash NULL
86171 +_002298_hash scsi_execute_req 5 42088 _002298_hash NULL
86172 +_002299_hash _scsih_change_queue_depth 2 26230 _002299_hash NULL
86173 +_002300_hash spi_execute 5 28736 _002300_hash NULL
86174 +_002301_hash submit_inquiry 3 42108 _002301_hash NULL
86175 +_002302_hash tcp_dma_try_early_copy 3 37651 _002302_hash NULL
86176 +_002303_hash tun_do_read 4 50800 _002303_hash NULL
86177 +_002304_hash ubi_eba_atomic_leb_change 5 13041 _002304_hash NULL
86178 +_002305_hash ubi_leb_write 4-5 41691 _002305_hash NULL
86179 +_002307_hash unix_seqpacket_recvmsg 4 23062 _003542_hash NULL nohasharray
86180 +_002308_hash write_leb 5 36957 _002308_hash NULL
86181 +_002309_hash ch_do_scsi 4 31171 _002309_hash NULL
86182 +_002310_hash dbg_leb_write 4-5 20478 _002310_hash NULL
86183 +_002312_hash scsi_mode_sense 5 16835 _002312_hash NULL
86184 +_002313_hash scsi_vpd_inquiry 4 30040 _002313_hash NULL
86185 +_002314_hash ses_recv_diag 4 47143 _002314_hash &_000673_hash
86186 +_002315_hash ses_send_diag 4 64527 _002315_hash NULL
86187 +_002316_hash spi_dv_device_echo_buffer 2-3 39846 _002316_hash NULL
86188 +_002318_hash ubifs_leb_write 4-5 61226 _002318_hash NULL
86189 +_002320_hash ubi_leb_change 4 14899 _002320_hash NULL
86190 +_002321_hash ubi_write 4-5 30809 _002321_hash NULL
86191 +_002322_hash dbg_leb_change 4 19969 _002322_hash NULL
86192 +_002323_hash gluebi_write 3 27905 _002323_hash NULL
86193 +_002324_hash scsi_get_vpd_page 4 51951 _002324_hash NULL
86194 +_002325_hash sd_do_mode_sense 5 11507 _002325_hash NULL
86195 +_002326_hash ubifs_leb_change 4 22399 _002436_hash NULL nohasharray
86196 +_002327_hash ubifs_write_node 5 15088 _002327_hash NULL
86197 +_002328_hash fixup_leb 3 43256 _002328_hash NULL
86198 +_002329_hash recover_head 3 17904 _002329_hash NULL
86199 +_002330_hash alloc_cpu_rmap 1 65363 _002330_hash NULL
86200 +_002331_hash alloc_ebda_hpc 1-2 50046 _002331_hash NULL
86201 +_002333_hash alloc_sched_domains 1 28972 _002333_hash NULL
86202 +_002334_hash amthi_read 4 45831 _002334_hash NULL
86203 +_002335_hash bcm_char_read 3 31750 _002335_hash NULL
86204 +_002336_hash BcmCopySection 5 2035 _002336_hash NULL
86205 +_002337_hash buffer_from_user 3 51826 _002337_hash NULL
86206 +_002338_hash buffer_to_user 3 35439 _002338_hash NULL
86207 +_002339_hash c4iw_init_resource_fifo 3 48090 _002339_hash NULL
86208 +_002340_hash c4iw_init_resource_fifo_random 3 25547 _002340_hash NULL
86209 +_002341_hash card_send_command 3 40757 _002341_hash NULL
86210 +_002342_hash chd_dec_fetch_cdata 3 50926 _002342_hash NULL
86211 +_002343_hash crystalhd_create_dio_pool 2 3427 _002343_hash NULL
86212 +_002344_hash crystalhd_user_data 3 18407 _002344_hash NULL
86213 +_002345_hash cxio_init_resource_fifo 3 28764 _002345_hash NULL
86214 +_002346_hash cxio_init_resource_fifo_random 3 47151 _002346_hash NULL
86215 +_002347_hash do_pages_stat 2 4437 _002347_hash NULL
86216 +_002348_hash do_read_log_to_user 4 3236 _002348_hash NULL
86217 +_002349_hash do_write_log_from_user 3 39362 _002349_hash NULL
86218 +_002350_hash dt3155_read 3 59226 _002350_hash NULL
86219 +_002351_hash easycap_alsa_vmalloc 2 14426 _002351_hash NULL
86220 +_002352_hash evm_read_key 3 54674 _002352_hash NULL
86221 +_002353_hash evm_write_key 3 27715 _002353_hash NULL
86222 +_002354_hash fir16_create 3 5574 _002354_hash NULL
86223 +_002355_hash iio_allocate_device 1 18821 _002355_hash NULL
86224 +_002356_hash __iio_allocate_kfifo 2-3 55738 _002356_hash NULL
86225 +_002358_hash __iio_allocate_sw_ring_buffer 3 4843 _002358_hash NULL
86226 +_002359_hash iio_debugfs_read_reg 3 60908 _002359_hash NULL
86227 +_002360_hash iio_debugfs_write_reg 3 22742 _002360_hash NULL
86228 +_002361_hash iio_event_chrdev_read 3 54757 _002361_hash NULL
86229 +_002362_hash iio_read_first_n_kfifo 2 57910 _002362_hash NULL
86230 +_002363_hash iio_read_first_n_sw_rb 2 51911 _002363_hash NULL
86231 +_002364_hash ioapic_setup_resources 1 35255 _002364_hash NULL
86232 +_002365_hash keymap_store 4 45406 _002365_hash NULL
86233 +_002366_hash kzalloc_node 1 24352 _002366_hash NULL
86234 +_002367_hash line6_alloc_sysex_buffer 4 28225 _002367_hash NULL
86235 +_002368_hash line6_dumpreq_initbuf 3 53123 _002368_hash NULL
86236 +_002369_hash line6_midibuf_init 2 52425 _002369_hash NULL
86237 +_002370_hash lirc_write 3 20604 _002370_hash NULL
86238 +_002371_hash _malloc 1 54077 _002371_hash NULL
86239 +_002372_hash mei_read 3 6507 _002372_hash NULL
86240 +_002373_hash mei_write 3 4005 _002373_hash NULL
86241 +_002374_hash mempool_create_node 1 44715 _002374_hash NULL
86242 +_002375_hash msg_set 3 51725 _002375_hash NULL
86243 +_002376_hash newpart 6 47485 _002376_hash NULL
86244 +_002377_hash OS_kmalloc 1 36909 _002377_hash NULL
86245 +_002378_hash pcpu_alloc_bootmem 2 62074 _002378_hash NULL
86246 +_002379_hash pcpu_get_vm_areas 3 50085 _002379_hash NULL
86247 +_002380_hash resource_from_user 3 30341 _002380_hash NULL
86248 +_002381_hash sca3000_read_data 4 57064 _002381_hash NULL
86249 +_002382_hash sca3000_read_first_n_hw_rb 2 11479 _002382_hash NULL
86250 +_002383_hash send_midi_async 3 57463 _002383_hash NULL
86251 +_002384_hash sep_create_dcb_dmatables_context 6 37551 _002384_hash NULL
86252 +_002385_hash sep_create_dcb_dmatables_context_kernel 6 49728 _002385_hash NULL
86253 +_002386_hash sep_create_msgarea_context 4 33829 _002386_hash NULL
86254 +_002387_hash sep_lli_table_secure_dma 2-3 64042 _002387_hash NULL
86255 +_002389_hash sep_lock_user_pages 2-3 57470 _002389_hash &_002154_hash
86256 +_002391_hash sep_prepare_input_output_dma_table_in_dcb 4-5-2-3 63087 _002391_hash NULL
86257 +_002393_hash sep_read 3 17161 _002393_hash NULL
86258 +_002394_hash TransmitTcb 4 12989 _002394_hash NULL
86259 +_002395_hash ValidateDSDParamsChecksum 3 63654 _002395_hash NULL
86260 +_002396_hash Wb35Reg_BurstWrite 4 62327 _002396_hash NULL
86261 +_002397_hash __alloc_bootmem_low_node 2 25726 _002397_hash &_001499_hash
86262 +_002398_hash __alloc_bootmem_node 2 1992 _002398_hash NULL
86263 +_002399_hash alloc_irq_cpu_rmap 1 28459 _002399_hash NULL
86264 +_002400_hash alloc_ring 2-4 18278 _002400_hash NULL
86265 +_002402_hash c4iw_init_resource 2-3 30393 _002402_hash NULL
86266 +_002404_hash cxio_hal_init_resource 2-7-6 29771 _002404_hash &_000284_hash
86267 +_002407_hash cxio_hal_init_rhdl_resource 1 25104 _002407_hash NULL
86268 +_002408_hash disk_expand_part_tbl 2 30561 _002408_hash NULL
86269 +_002409_hash InterfaceTransmitPacket 3 42058 _002409_hash NULL
86270 +_002410_hash line6_dumpreq_init 3 34473 _002410_hash NULL
86271 +_002411_hash mempool_create 1 29437 _002411_hash NULL
86272 +_002412_hash pcpu_fc_alloc 2 11818 _002412_hash NULL
86273 +_002413_hash pod_alloc_sysex_buffer 3 31651 _002413_hash NULL
86274 +_002414_hash r8712_usbctrl_vendorreq 6 48489 _002414_hash NULL
86275 +_002415_hash r871x_set_wpa_ie 3 7000 _002415_hash NULL
86276 +_002416_hash sys_move_pages 2 42626 _002416_hash NULL
86277 +_002417_hash variax_alloc_sysex_buffer 3 15237 _002417_hash NULL
86278 +_002418_hash vme_user_write 3 15587 _002418_hash NULL
86279 +_002419_hash add_partition 2 55588 _002419_hash NULL
86280 +_002420_hash __alloc_bootmem_node_high 2 65076 _002420_hash NULL
86281 +_002421_hash ceph_msgpool_init 3 33312 _002421_hash NULL
86282 +_002423_hash mempool_create_kmalloc_pool 1 41650 _002423_hash NULL
86283 +_002424_hash mempool_create_page_pool 1 30189 _002424_hash NULL
86284 +_002425_hash mempool_create_slab_pool 1 62907 _002425_hash NULL
86285 +_002426_hash variax_set_raw2 4 32374 _002426_hash NULL
86286 +_002427_hash bioset_create 1 5580 _002427_hash NULL
86287 +_002428_hash bioset_integrity_create 2 62708 _002428_hash NULL
86288 +_002429_hash biovec_create_pools 2 9575 _002429_hash NULL
86289 +_002430_hash i2o_pool_alloc 4 55485 _002430_hash NULL
86290 +_002431_hash prison_create 1 43623 _002431_hash NULL
86291 +_002432_hash unlink_simple 3 47506 _002432_hash NULL
86292 +_002433_hash alloc_ieee80211 1 20063 _002433_hash NULL
86293 +_002434_hash alloc_ieee80211_rsl 1 34564 _002434_hash NULL
86294 +_002435_hash alloc_page_cgroup 1 2919 _002435_hash NULL
86295 +_002436_hash alloc_private 2 22399 _002436_hash &_002326_hash
86296 +_002437_hash alloc_rtllib 1 51136 _002437_hash NULL
86297 +_002438_hash alloc_rx_desc_ring 2 18016 _002438_hash NULL
86298 +_002439_hash alloc_subdevices 2 43300 _002439_hash NULL
86299 +_002440_hash atomic_counters_read 3 48827 _002440_hash NULL
86300 +_002441_hash atomic_stats_read 3 36228 _002441_hash NULL
86301 +_002442_hash capabilities_read 3 58457 _002442_hash NULL
86302 +_002443_hash comedi_read 3 13199 _002443_hash NULL
86303 +_002444_hash comedi_write 3 47926 _002444_hash NULL
86304 +_002445_hash compat_do_arpt_set_ctl 4 12184 _002445_hash NULL
86305 +_002446_hash compat_do_ip6t_set_ctl 4 3184 _002446_hash NULL
86306 +_002447_hash compat_do_ipt_set_ctl 4 58466 _002447_hash &_001852_hash
86307 +_002448_hash compat_filldir 3 32999 _002448_hash NULL
86308 +_002449_hash compat_filldir64 3 35354 _002449_hash NULL
86309 +_002450_hash compat_fillonedir 3 15620 _002450_hash NULL
86310 +_002451_hash compat_rw_copy_check_uvector 3 25242 _002451_hash NULL
86311 +_002452_hash compat_sock_setsockopt 5 23 _002452_hash NULL
86312 +_002453_hash compat_sys_kexec_load 2 35674 _002453_hash NULL
86313 +_002454_hash compat_sys_keyctl 4 9639 _002454_hash NULL
86314 +_002455_hash compat_sys_move_pages 2 5861 _002455_hash NULL
86315 +_002456_hash compat_sys_mq_timedsend 3 31060 _002456_hash NULL
86316 +_002457_hash compat_sys_msgrcv 2 7482 _002457_hash NULL
86317 +_002458_hash compat_sys_msgsnd 2 10738 _002458_hash NULL
86318 +_002459_hash compat_sys_semtimedop 3 3606 _002459_hash NULL
86319 +_002460_hash __copy_in_user 3 34790 _002460_hash NULL
86320 +_002461_hash copy_in_user 3 57502 _002461_hash NULL
86321 +_002462_hash dev_counters_read 3 19216 _002462_hash NULL
86322 +_002463_hash dev_names_read 3 38509 _002463_hash NULL
86323 +_002464_hash do_arpt_set_ctl 4 51053 _002464_hash NULL
86324 +_002465_hash do_ip6t_set_ctl 4 60040 _002465_hash NULL
86325 +_002466_hash do_ipt_set_ctl 4 56238 _002466_hash NULL
86326 +_002467_hash drbd_bm_resize 2 20522 _002467_hash NULL
86327 +_002468_hash driver_names_read 3 60399 _002468_hash NULL
86328 +_002469_hash driver_stats_read 3 8944 _002469_hash NULL
86329 +_002470_hash __earlyonly_bootmem_alloc 2 23824 _002470_hash NULL
86330 +_002471_hash evtchn_read 3 3569 _002471_hash NULL
86331 +_002472_hash ext_sd_execute_read_data 9 48589 _002472_hash NULL
86332 +_002473_hash ext_sd_execute_write_data 9 8175 _002473_hash NULL
86333 +_002474_hash fat_compat_ioctl_filldir 3 36328 _002474_hash NULL
86334 +_002475_hash firmwareUpload 3 32794 _002475_hash NULL
86335 +_002476_hash flash_read 3 57843 _002476_hash NULL
86336 +_002477_hash flash_write 3 62354 _002477_hash NULL
86337 +_002478_hash gather_array 3 56641 _002478_hash NULL
86338 +_002479_hash ghash_async_setkey 3 60001 _002479_hash NULL
86339 +_002480_hash gntdev_alloc_map 2 35145 _002480_hash NULL
86340 +_002481_hash gnttab_map 2 56439 _002481_hash NULL
86341 +_002482_hash gru_alloc_gts 2-3 60056 _003495_hash NULL nohasharray
86342 +_002484_hash handle_eviocgbit 3 44193 _002484_hash NULL
86343 +_002485_hash hid_parse_report 3 51737 _002485_hash NULL
86344 +_002486_hash ieee80211_alloc_txb 1-2 52477 _002486_hash NULL
86345 +_002487_hash ieee80211_wx_set_gen_ie 3 51399 _002487_hash NULL
86346 +_002488_hash ieee80211_wx_set_gen_ie_rsl 3 3521 _002488_hash NULL
86347 +_002489_hash init_cdev 1 8274 _002489_hash NULL
86348 +_002490_hash init_per_cpu 1 17880 _002490_hash NULL
86349 +_002491_hash ipath_create_cq 2 45586 _002491_hash NULL
86350 +_002492_hash ipath_get_base_info 3 7043 _002492_hash NULL
86351 +_002493_hash ipath_init_qp_table 2 25167 _002493_hash NULL
86352 +_002494_hash ipath_resize_cq 2 712 _002494_hash NULL
86353 +_002495_hash ni_gpct_device_construct 5 610 _002495_hash NULL
86354 +_002496_hash options_write 3 47243 _002496_hash NULL
86355 +_002497_hash portcntrs_1_read 3 47253 _002497_hash NULL
86356 +_002498_hash portcntrs_2_read 3 56586 _002498_hash NULL
86357 +_002499_hash portnames_read 3 41958 _002499_hash NULL
86358 +_002500_hash ptc_proc_write 3 12076 _002500_hash NULL
86359 +_002501_hash put_cmsg_compat 4 35937 _002501_hash NULL
86360 +_002502_hash qib_alloc_devdata 2 51819 _002502_hash NULL
86361 +_002503_hash qib_alloc_fast_reg_page_list 2 10507 _002503_hash NULL
86362 +_002504_hash qib_cdev_init 1 34778 _002504_hash NULL
86363 +_002505_hash qib_create_cq 2 27497 _002505_hash NULL
86364 +_002506_hash qib_diag_write 3 62133 _002506_hash NULL
86365 +_002507_hash qib_get_base_info 3 11369 _002507_hash NULL
86366 +_002508_hash qib_resize_cq 2 53090 _002508_hash NULL
86367 +_002509_hash qsfp_1_read 3 21915 _002509_hash NULL
86368 +_002510_hash qsfp_2_read 3 31491 _002510_hash NULL
86369 +_002511_hash queue_reply 3 22416 _002511_hash NULL
86370 +_002512_hash Realloc 2 34961 _002512_hash NULL
86371 +_002513_hash rfc4106_set_key 3 54519 _002513_hash NULL
86372 +_002514_hash rtllib_alloc_txb 1-2 21687 _002514_hash NULL
86373 +_002515_hash rtllib_wx_set_gen_ie 3 59808 _002515_hash NULL
86374 +_002516_hash rts51x_transfer_data_partial 6 5735 _002516_hash NULL
86375 +_002517_hash sparse_early_usemaps_alloc_node 4 9269 _002517_hash NULL
86376 +_002518_hash split 2 11691 _002518_hash NULL
86377 +_002519_hash stats_read_ul 3 32751 _002519_hash NULL
86378 +_002520_hash store_debug_level 3 35652 _002520_hash NULL
86379 +_002521_hash sys32_ipc 3 7238 _002521_hash NULL
86380 +_002522_hash sys32_rt_sigpending 2 25814 _002522_hash NULL
86381 +_002523_hash tunables_read 3 36385 _002523_hash NULL
86382 +_002524_hash tunables_write 3 59563 _002524_hash NULL
86383 +_002525_hash u32_array_read 3 2219 _002525_hash NULL
86384 +_002526_hash usb_buffer_alloc 2 36276 _002526_hash NULL
86385 +_002527_hash xenbus_file_write 3 6282 _002527_hash NULL
86386 +_002528_hash xpc_kmalloc_cacheline_aligned 1 42895 _002528_hash NULL
86387 +_002529_hash xpc_kzalloc_cacheline_aligned 1 65433 _002529_hash NULL
86388 +_002530_hash xsd_read 3 15653 _002530_hash NULL
86389 +_002531_hash compat_do_readv_writev 4 49102 _002531_hash NULL
86390 +_002532_hash compat_keyctl_instantiate_key_iov 3 57431 _003110_hash NULL nohasharray
86391 +_002533_hash compat_process_vm_rw 3-5 22254 _002533_hash NULL
86392 +_002535_hash compat_sys_setsockopt 5 3326 _002535_hash NULL
86393 +_002536_hash ipath_cdev_init 1 37752 _002536_hash NULL
86394 +_002537_hash ms_read_multiple_pages 4-5 8052 _002537_hash NULL
86395 +_002539_hash ms_write_multiple_pages 5-6 10362 _002539_hash NULL
86396 +_002541_hash sparse_mem_maps_populate_node 4 12669 _002541_hash &_002004_hash
86397 +_002542_hash vmemmap_alloc_block 1 43245 _002542_hash NULL
86398 +_002543_hash xd_read_multiple_pages 4-5 11422 _002543_hash NULL
86399 +_002545_hash xd_write_multiple_pages 5-6 53633 _002545_hash NULL
86400 +_002546_hash compat_readv 3 30273 _002546_hash NULL
86401 +_002547_hash compat_sys_process_vm_readv 3-5 15374 _002547_hash NULL
86402 +_002549_hash compat_sys_process_vm_writev 3-5 41194 _002549_hash NULL
86403 +_002551_hash compat_writev 3 60063 _002551_hash NULL
86404 +_002552_hash ms_rw_multi_sector 4-3 7459 _002552_hash NULL
86405 +_002553_hash sparse_early_mem_maps_alloc_node 4 36971 _002553_hash NULL
86406 +_002554_hash vmemmap_alloc_block_buf 1 61126 _002554_hash NULL
86407 +_002555_hash xd_rw 4-3 49020 _002555_hash NULL
86408 +_002556_hash compat_sys_preadv64 3 24283 _002556_hash NULL
86409 +_002557_hash compat_sys_pwritev64 3 51151 _002557_hash NULL
86410 +_002558_hash compat_sys_readv 3 20911 _002558_hash NULL
86411 +_002559_hash compat_sys_writev 3 5784 _002559_hash NULL
86412 +_002560_hash ms_rw 4 17220 _002560_hash NULL
86413 +_002561_hash compat_sys_preadv 3 583 _002561_hash NULL
86414 +_002562_hash compat_sys_pwritev 3 17886 _002562_hash NULL
86415 +_002563_hash alloc_apertures 1 56561 _002563_hash NULL
86416 +_002564_hash bin_uuid 3 28999 _002564_hash NULL
86417 +_002565_hash __copy_from_user_inatomic_nocache 3 49921 _002565_hash NULL
86418 +_002566_hash do_dmabuf_dirty_sou 7 3017 _002566_hash NULL
86419 +_002567_hash do_surface_dirty_sou 7 39678 _002567_hash NULL
86420 +_002568_hash drm_agp_bind_pages 3 56748 _002568_hash NULL
86421 +_002569_hash drm_calloc_large 1-2 65421 _002569_hash NULL
86422 +_002571_hash drm_fb_helper_init 3-4 19044 _002571_hash NULL
86423 +_002573_hash drm_ht_create 2 18853 _002573_hash NULL
86424 +_002574_hash drm_malloc_ab 1-2 16831 _002574_hash NULL
86425 +_002576_hash drm_mode_crtc_set_gamma_size 2 31881 _002576_hash NULL
86426 +_002577_hash drm_plane_init 6 28731 _002577_hash NULL
86427 +_002578_hash drm_property_create 4 51239 _002578_hash NULL
86428 +_002579_hash drm_property_create_blob 2 7414 _002579_hash NULL
86429 +_002580_hash drm_vblank_init 2 11362 _002580_hash NULL
86430 +_002581_hash drm_vmalloc_dma 1 14550 _002581_hash NULL
86431 +_002582_hash fb_alloc_cmap_gfp 2 20792 _002582_hash NULL
86432 +_002583_hash fbcon_prepare_logo 5 6246 _002583_hash NULL
86433 +_002584_hash fb_read 3 33506 _002584_hash NULL
86434 +_002585_hash fb_write 3 46924 _002585_hash NULL
86435 +_002586_hash framebuffer_alloc 1 59145 _002586_hash NULL
86436 +_002587_hash i915_cache_sharing_read 3 24775 _002587_hash NULL
86437 +_002588_hash i915_cache_sharing_write 3 57961 _002588_hash NULL
86438 +_002589_hash i915_max_freq_read 3 20581 _002589_hash NULL
86439 +_002590_hash i915_max_freq_write 3 11350 _002590_hash NULL
86440 +_002591_hash i915_wedged_read 3 35474 _002591_hash NULL
86441 +_002592_hash i915_wedged_write 3 47771 _002592_hash NULL
86442 +_002593_hash p9_client_read 5 19750 _002593_hash NULL
86443 +_002594_hash probe_kernel_write 3 17481 _002594_hash NULL
86444 +_002595_hash sched_feat_write 3 55202 _002595_hash NULL
86445 +_002596_hash sd_alloc_ctl_entry 1 29708 _002596_hash NULL
86446 +_002597_hash tstats_write 3 60432 _002597_hash &_000009_hash
86447 +_002598_hash ttm_bo_fbdev_io 4 9805 _002598_hash NULL
86448 +_002599_hash ttm_bo_io 5 47000 _002599_hash NULL
86449 +_002600_hash ttm_dma_page_pool_free 2 34135 _002600_hash NULL
86450 +_002601_hash ttm_page_pool_free 2 61661 _002601_hash NULL
86451 +_002602_hash vmw_execbuf_process 5 22885 _002602_hash NULL
86452 +_002603_hash vmw_fifo_reserve 2 12141 _002603_hash NULL
86453 +_002604_hash vmw_kms_present 9 38130 _002604_hash NULL
86454 +_002605_hash vmw_kms_readback 6 5727 _002605_hash NULL
86455 +_002606_hash do_dmabuf_dirty_ldu 6 52241 _002606_hash NULL
86456 +_002607_hash drm_mode_create_tv_properties 2 23122 _002607_hash NULL
86457 +_002608_hash drm_property_create_enum 5 29201 _002608_hash NULL
86458 +_002609_hash fast_user_write 5 20494 _002609_hash NULL
86459 +_002610_hash fb_alloc_cmap 2 6554 _002610_hash NULL
86460 +_002611_hash i915_gem_execbuffer_relocate_slow 7 25355 _002611_hash NULL
86461 +_002612_hash kgdb_hex2mem 3 24755 _002612_hash NULL
86462 +_002613_hash ttm_object_device_init 2 10321 _002613_hash NULL
86463 +_002614_hash ttm_object_file_init 2 27804 _002614_hash NULL
86464 +_002615_hash vmw_cursor_update_image 3-4 16332 _002615_hash NULL
86465 +_002617_hash vmw_gmr2_bind 3 21305 _002617_hash NULL
86466 +_002618_hash vmw_cursor_update_dmabuf 3-4 32045 _002618_hash NULL
86467 +_002620_hash vmw_gmr_bind 3 44130 _002620_hash NULL
86468 +_002621_hash vmw_du_crtc_cursor_set 4-5 28479 _002621_hash NULL
86469 +_002622_hash __module_alloc 1 50004 _002622_hash NULL
86470 +_002623_hash module_alloc_update_bounds_rw 1 63233 _002623_hash NULL
86471 +_002624_hash module_alloc_update_bounds_rx 1 58634 _002624_hash NULL
86472 +_002625_hash acpi_system_write_alarm 3 40205 _002625_hash NULL
86473 +_002626_hash create_table 2 16213 _002626_hash NULL
86474 +_002627_hash mem_read 3 57631 _002627_hash NULL
86475 +_002628_hash mem_write 3 22232 _002628_hash NULL
86476 +_002629_hash proc_fault_inject_read 3 36802 _002629_hash NULL
86477 +_002630_hash proc_fault_inject_write 3 21058 _002630_hash NULL
86478 +_002631_hash v9fs_fid_readn 4 60544 _002631_hash NULL
86479 +_002632_hash v9fs_file_read 3 40858 _002632_hash NULL
86480 +_002633_hash __devres_alloc 2 25598 _002633_hash NULL
86481 +_002634_hash alloc_dummy_extent_buffer 2 56374 _002634_hash NULL
86482 +_002635_hash alloc_fdtable 1 17389 _002635_hash NULL
86483 +_002636_hash alloc_large_system_hash 2 22391 _002636_hash NULL
86484 +_002637_hash alloc_ldt 2 21972 _002637_hash NULL
86485 +_002638_hash __alloc_skb 1 23940 _002638_hash NULL
86486 +_002639_hash __ata_change_queue_depth 3 23484 _002639_hash NULL
86487 +_002640_hash btrfs_alloc_free_block 3 8986 _002640_hash NULL
86488 +_002641_hash btrfs_find_device_for_logical 2 44993 _002641_hash NULL
86489 +_002642_hash ccid3_hc_rx_getsockopt 3 62331 _002642_hash NULL
86490 +_002643_hash ccid3_hc_tx_getsockopt 3 16314 _002643_hash NULL
86491 +_002644_hash cifs_readdata_alloc 1 26360 _002644_hash NULL
86492 +_002645_hash cistpl_vers_1 4 15023 _002645_hash NULL
86493 +_002646_hash cmm_read 3 57520 _002646_hash NULL
86494 +_002647_hash cosa_read 3 25966 _002647_hash NULL
86495 +_002648_hash dm_table_create 3 35687 _002648_hash NULL
86496 +_002649_hash dpcm_state_read_file 3 65489 _002649_hash NULL
86497 +_002651_hash edac_mc_alloc 4 3611 _002651_hash NULL
86498 +_002652_hash ep0_read 3 38095 _002652_hash NULL
86499 +_002653_hash event_buffer_read 3 48772 _002765_hash NULL nohasharray
86500 +_002654_hash extend_netdev_table 2 21453 _002654_hash NULL
86501 +_002655_hash extract_entropy_user 3 26952 _003616_hash NULL nohasharray
86502 +_002656_hash fcoe_ctlr_device_add 3 1793 _002656_hash NULL
86503 +_002657_hash fd_do_readv 3 51297 _002657_hash NULL
86504 +_002658_hash fd_do_writev 3 29329 _002658_hash NULL
86505 +_002659_hash ffs_ep0_read 3 2672 _002659_hash NULL
86506 +_002660_hash fill_readbuf 3 32464 _002660_hash NULL
86507 +_002661_hash fw_iso_buffer_alloc 2 13704 _002661_hash NULL
86508 +_002662_hash get_fd_set 1 3866 _002662_hash NULL
86509 +_002663_hash hidraw_report_event 3 20503 _002663_hash NULL
86510 +_002664_hash ieee80211_if_read_ht_opmode 3 29044 _002664_hash NULL
86511 +_002665_hash ieee80211_if_read_num_mcast_sta 3 12419 _002665_hash NULL
86512 +_002666_hash iwl_dbgfs_calib_disabled_read 3 22649 _002666_hash NULL
86513 +_002667_hash iwl_dbgfs_rf_reset_read 3 26512 _002667_hash NULL
86514 +_002668_hash ixgbe_alloc_q_vector 4-6 24439 _002668_hash NULL
86515 +_002670_hash joydev_handle_JSIOCSAXMAP 3 48898 _002836_hash NULL nohasharray
86516 +_002671_hash joydev_handle_JSIOCSBTNMAP 3 15643 _002671_hash NULL
86517 +_002672_hash __kfifo_from_user_r 3 60345 _002672_hash NULL
86518 +_002673_hash kstrtoint_from_user 2 8778 _002673_hash NULL
86519 +_002674_hash kstrtol_from_user 2 10168 _002674_hash NULL
86520 +_002675_hash kstrtoll_from_user 2 19500 _002675_hash NULL
86521 +_002676_hash kstrtos16_from_user 2 28300 _002676_hash NULL
86522 +_002677_hash kstrtos8_from_user 2 58268 _002677_hash NULL
86523 +_002678_hash kstrtou16_from_user 2 54274 _002678_hash NULL
86524 +_002679_hash kstrtou8_from_user 2 55599 _002679_hash NULL
86525 +_002680_hash kstrtouint_from_user 2 10536 _002680_hash NULL
86526 +_002681_hash kstrtoul_from_user 2 64569 _002681_hash NULL
86527 +_002682_hash kstrtoull_from_user 2 63026 _002682_hash NULL
86528 +_002683_hash l2cap_create_iframe_pdu 3 40055 _002683_hash NULL
86529 +_002684_hash l2tp_ip6_recvmsg 4 62874 _002684_hash NULL
86530 +_002685_hash mem_cgroup_read 5 22461 _002685_hash NULL
86531 +_002686_hash nfs_fscache_get_super_cookie 3 44355 _002686_hash &_001648_hash
86532 +_002687_hash nfs_pgarray_set 2 1085 _002687_hash NULL
86533 +_002688_hash ntfs_rl_realloc 3 56831 _002688_hash &_000363_hash
86534 +_002689_hash ntfs_rl_realloc_nofail 3 32173 _002689_hash NULL
86535 +_002690_hash pn533_dep_link_up 5 22154 _002690_hash NULL
86536 +_002691_hash port_fops_write 3 54627 _002691_hash NULL
86537 +_002692_hash ptp_read 4 63251 _002692_hash NULL
86538 +_002693_hash qla4xxx_change_queue_depth 2 1268 _002693_hash NULL
86539 +_002694_hash reqsk_queue_alloc 2 40272 _002694_hash NULL
86540 +_002695_hash resize_info_buffer 2 62889 _002695_hash NULL
86541 +_002696_hash rfkill_fop_write 3 64808 _002696_hash NULL
86542 +_002697_hash rt2x00debug_write_rfcsr 3 41473 _002697_hash NULL
86543 +_002698_hash rvmalloc 1 46873 _002698_hash NULL
86544 +_002699_hash rw_copy_check_uvector 3 45748 _003398_hash NULL nohasharray
86545 +_002700_hash sctp_getsockopt_active_key 2 45483 _002700_hash NULL
86546 +_002701_hash sctp_getsockopt_adaptation_layer 2 45375 _002701_hash NULL
86547 +_002702_hash sctp_getsockopt_assoc_ids 2 9043 _002702_hash NULL
86548 +_002703_hash sctp_getsockopt_associnfo 2 58169 _002703_hash NULL
86549 +_002704_hash sctp_getsockopt_assoc_number 2 6384 _002704_hash NULL
86550 +_002705_hash sctp_getsockopt_auto_asconf 2 46584 _002705_hash NULL
86551 +_002706_hash sctp_getsockopt_context 2 52490 _002706_hash NULL
86552 +_002707_hash sctp_getsockopt_default_send_param 2 63056 _002707_hash NULL
86553 +_002708_hash sctp_getsockopt_disable_fragments 2 12330 _002708_hash NULL
86554 +_002709_hash sctp_getsockopt_fragment_interleave 2 51215 _002709_hash NULL
86555 +_002710_hash sctp_getsockopt_initmsg 2 26042 _002710_hash NULL
86556 +_002711_hash sctp_getsockopt_mappedv4 2 20044 _002711_hash NULL
86557 +_002712_hash sctp_getsockopt_nodelay 2 9560 _002712_hash NULL
86558 +_002713_hash sctp_getsockopt_partial_delivery_point 2 60952 _002713_hash NULL
86559 +_002714_hash sctp_getsockopt_peeloff 2 59190 _002714_hash NULL
86560 +_002715_hash sctp_getsockopt_peer_addr_info 2 6024 _002715_hash NULL
86561 +_002716_hash sctp_getsockopt_peer_addr_params 2 53645 _002716_hash NULL
86562 +_002717_hash sctp_getsockopt_primary_addr 2 24639 _002717_hash NULL
86563 +_002718_hash sctp_getsockopt_rtoinfo 2 62027 _002718_hash NULL
86564 +_002719_hash sctp_getsockopt_sctp_status 2 56540 _002719_hash NULL
86565 +_002720_hash self_check_write 5 50856 _002720_hash NULL
86566 +_002721_hash smk_read_mapped 3 7562 _002721_hash NULL
86567 +_002722_hash smk_set_cipso 3 20379 _002722_hash NULL
86568 +_002723_hash smk_user_access 3 24440 _002723_hash NULL
86569 +_002724_hash smk_write_mapped 3 13519 _002724_hash NULL
86570 +_002725_hash smk_write_rules_list 3 18565 _002725_hash NULL
86571 +_002726_hash snd_mixart_BA0_read 5 45069 _002726_hash NULL
86572 +_002727_hash snd_mixart_BA1_read 5 5082 _002727_hash NULL
86573 +_002728_hash snd_pcm_oss_read2 3 54387 _002728_hash NULL
86574 +_002729_hash syslog_print 2 307 _002729_hash NULL
86575 +_002730_hash tcp_dma_try_early_copy 3 4457 _002730_hash NULL
86576 +_002731_hash tcp_send_rcvq 3 11316 _002731_hash NULL
86577 +_002732_hash tomoyo_init_log 2 61526 _002732_hash NULL
86578 +_002733_hash ubi_dump_flash 4 46381 _002733_hash NULL
86579 +_002734_hash ubi_eba_atomic_leb_change 5 60379 _002734_hash NULL
86580 +_002735_hash ubi_eba_write_leb 5-6 36029 _002735_hash NULL
86581 +_002737_hash ubi_eba_write_leb_st 5 44343 _002737_hash NULL
86582 +_002738_hash ubi_self_check_all_ff 4 41959 _002738_hash NULL
86583 +_002739_hash unix_bind 3 15668 _002739_hash NULL
86584 +_002740_hash usbvision_rvmalloc 1 19655 _002740_hash NULL
86585 +_002742_hash v4l2_ctrl_new 7 24927 _002742_hash NULL
86586 +_002743_hash v4l2_event_subscribe 3 53687 _002743_hash NULL
86587 +_002744_hash v9fs_direct_read 3 45546 _002744_hash NULL
86588 +_002745_hash v9fs_file_readn 4 36353 _002745_hash &_001606_hash
86589 +_002746_hash __videobuf_alloc_vb 1 5665 _002746_hash NULL
86590 +_002747_hash wm8350_write 3 24480 _002747_hash NULL
86591 +_002748_hash xfs_buf_read_uncached 3 42844 _002748_hash NULL
86592 +_002749_hash yurex_write 3 8761 _002749_hash NULL
86593 +_002750_hash alloc_skb 1 55439 _002750_hash NULL
86594 +_002751_hash alloc_skb_fclone 1 3467 _002751_hash NULL
86595 +_002752_hash ata_scsi_change_queue_depth 2 23126 _002752_hash NULL
86596 +_002753_hash ath6kl_disconnect_timeout_write 3 794 _002753_hash NULL
86597 +_002754_hash ath6kl_keepalive_write 3 45600 _002754_hash NULL
86598 +_002755_hash ath6kl_lrssi_roam_write 3 8362 _002755_hash NULL
86599 +_002756_hash ath6kl_regread_write 3 14220 _002756_hash NULL
86600 +_002757_hash core_sys_select 1 47494 _002757_hash NULL
86601 +_002758_hash do_syslog 3 56807 _002758_hash NULL
86602 +_002759_hash expand_fdtable 2 39273 _002759_hash NULL
86603 +_002760_hash fd_execute_cmd 3 1132 _002760_hash NULL
86604 +_002761_hash get_chars 3 40373 _002761_hash NULL
86605 +_002762_hash hid_report_raw_event 4 2762 _002762_hash NULL
86606 +_002763_hash inet_csk_listen_start 2 38233 _002763_hash NULL
86607 +_002764_hash kstrtou32_from_user 2 30361 _002764_hash NULL
86608 +_002765_hash l2cap_segment_sdu 4 48772 _002765_hash &_002653_hash
86609 +_002766_hash __netdev_alloc_skb 2 18595 _002766_hash NULL
86610 +_002767_hash nfs_readdata_alloc 2 65015 _002767_hash NULL
86611 +_002768_hash nfs_writedata_alloc 2 12133 _002768_hash NULL
86612 +_002769_hash ntfs_rl_append 2-4 6037 _002769_hash NULL
86613 +_002771_hash ntfs_rl_insert 2-4 4931 _002771_hash NULL
86614 +_002773_hash ntfs_rl_replace 2-4 14136 _002773_hash NULL
86615 +_002775_hash ntfs_rl_split 2-4 52328 _002775_hash NULL
86616 +_002777_hash port_fops_read 3 49626 _002777_hash NULL
86617 +_002778_hash random_read 3 13815 _002778_hash NULL
86618 +_002779_hash sg_proc_write_adio 3 45704 _002779_hash NULL
86619 +_002780_hash sg_proc_write_dressz 3 46316 _002780_hash NULL
86620 +_002781_hash tcp_sendmsg 4 30296 _002781_hash NULL
86621 +_002782_hash tomoyo_write_log2 2 34318 _002782_hash NULL
86622 +_002783_hash ubi_leb_change 4 10289 _002783_hash NULL
86623 +_002784_hash ubi_leb_write 4-5 5478 _002784_hash NULL
86624 +_002786_hash urandom_read 3 30462 _002786_hash NULL
86625 +_002787_hash v9fs_cached_file_read 3 2514 _002787_hash NULL
86626 +_002788_hash __videobuf_alloc_cached 1 12740 _002788_hash NULL
86627 +_002789_hash __videobuf_alloc_uncached 1 55711 _002789_hash NULL
86628 +_002790_hash wm8350_block_write 3 19727 _002790_hash NULL
86629 +_002791_hash alloc_tx 2 32143 _002791_hash NULL
86630 +_002792_hash alloc_wr 1-2 24635 _002792_hash NULL
86631 +_002794_hash ath6kl_endpoint_stats_write 3 59621 _002794_hash NULL
86632 +_002795_hash ath6kl_fwlog_mask_write 3 24810 _002795_hash NULL
86633 +_002796_hash ath9k_wmi_cmd 4 327 _002796_hash NULL
86634 +_002797_hash atm_alloc_charge 2 19517 _002879_hash NULL nohasharray
86635 +_002798_hash ax25_output 2 22736 _002798_hash NULL
86636 +_002799_hash bcsp_prepare_pkt 3 12961 _002799_hash NULL
86637 +_002800_hash bt_skb_alloc 1 6404 _002800_hash NULL
86638 +_002801_hash capinc_tty_write 3 28539 _002801_hash NULL
86639 +_002802_hash cfpkt_create_pfx 1-2 23594 _002802_hash NULL
86640 +_002804_hash cmd_complete 6 51629 _002804_hash NULL
86641 +_002805_hash cmtp_add_msgpart 4 9252 _002805_hash NULL
86642 +_002806_hash cmtp_send_interopmsg 7 376 _002806_hash NULL
86643 +_002807_hash cxgb3_get_cpl_reply_skb 2 10620 _002807_hash NULL
86644 +_002808_hash dbg_leb_change 4 23555 _002808_hash NULL
86645 +_002809_hash dbg_leb_write 4-5 63555 _002809_hash &_000940_hash
86646 +_002811_hash dccp_listen_start 2 35918 _002811_hash NULL
86647 +_002812_hash __dev_alloc_skb 1 28681 _002812_hash NULL
86648 +_002813_hash diva_os_alloc_message_buffer 1 64568 _002813_hash NULL
86649 +_002814_hash dn_alloc_skb 2 6631 _002814_hash NULL
86650 +_002815_hash do_pselect 1 62061 _002815_hash NULL
86651 +_002816_hash _fc_frame_alloc 1 43568 _002816_hash NULL
86652 +_002817_hash find_skb 2 20431 _002817_hash NULL
86653 +_002818_hash fm_send_cmd 5 39639 _002818_hash NULL
86654 +_002819_hash gem_alloc_skb 2 51715 _002819_hash NULL
86655 +_002820_hash get_packet 3 41914 _002820_hash NULL
86656 +_002821_hash get_packet 3 5747 _002821_hash NULL
86657 +_002822_hash get_packet_pg 4 28023 _002822_hash NULL
86658 +_002823_hash get_skb 2 63008 _002823_hash NULL
86659 +_002824_hash hidp_queue_report 3 1881 _002824_hash NULL
86660 +_002825_hash __hidp_send_ctrl_message 4 28303 _002825_hash NULL
86661 +_002826_hash hycapi_rx_capipkt 3 11602 _002826_hash NULL
86662 +_002827_hash i2400m_net_rx 5 27170 _002827_hash NULL
86663 +_002828_hash igmpv3_newpack 2 35912 _002828_hash NULL
86664 +_002829_hash inet_listen 2 14723 _002829_hash NULL
86665 +_002830_hash isdn_net_ciscohdlck_alloc_skb 2 55209 _002830_hash &_001724_hash
86666 +_002831_hash isdn_ppp_ccp_xmit_reset 6 63297 _002831_hash NULL
86667 +_002832_hash kmsg_read 3 46514 _002832_hash NULL
86668 +_002833_hash _l2_alloc_skb 1 11883 _002833_hash NULL
86669 +_002834_hash l3_alloc_skb 1 32289 _002834_hash NULL
86670 +_002835_hash llc_alloc_frame 4 64366 _002835_hash NULL
86671 +_002836_hash mac_drv_rx_init 2 48898 _002836_hash &_002670_hash
86672 +_002837_hash mgmt_event 4 12810 _002837_hash NULL
86673 +_002838_hash mI_alloc_skb 1 24770 _002838_hash NULL
86674 +_002839_hash nci_skb_alloc 2 49757 _002839_hash NULL
86675 +_002840_hash netdev_alloc_skb 2 62437 _002840_hash NULL
86676 +_002841_hash __netdev_alloc_skb_ip_align 2 55067 _002841_hash NULL
86677 +_002842_hash new_skb 1 21148 _002842_hash NULL
86678 +_002843_hash nfc_alloc_recv_skb 1 10244 _002843_hash NULL
86679 +_002844_hash nfcwilink_skb_alloc 1 16167 _002844_hash NULL
86680 +_002845_hash nfulnl_alloc_skb 2 65207 _002845_hash NULL
86681 +_002846_hash ni65_alloc_mem 3 10664 _002846_hash NULL
86682 +_002847_hash pep_alloc_skb 3 46303 _002847_hash NULL
86683 +_002848_hash pn_raw_send 2 54330 _002848_hash NULL
86684 +_002849_hash __pskb_copy 2 9038 _002849_hash NULL
86685 +_002850_hash refill_pool 2 19477 _002850_hash NULL
86686 +_002851_hash rfcomm_wmalloc 2 58090 _002851_hash NULL
86687 +_002852_hash rx 4 57944 _002852_hash NULL
86688 +_002853_hash sctp_ulpevent_new 1 33377 _002853_hash NULL
86689 +_002854_hash send_command 4 10832 _002854_hash NULL
86690 +_002855_hash skb_copy_expand 2-3 7685 _002855_hash &_000671_hash
86691 +_002857_hash sk_stream_alloc_skb 2 57622 _002857_hash NULL
86692 +_002858_hash sock_alloc_send_pskb 2 21246 _002858_hash NULL
86693 +_002859_hash sock_rmalloc 2 59740 _002859_hash &_002157_hash
86694 +_002860_hash sock_wmalloc 2 16472 _002860_hash NULL
86695 +_002861_hash solos_param_store 4 34755 _002861_hash NULL
86696 +_002862_hash sys_select 1 38827 _002862_hash NULL
86697 +_002863_hash sys_syslog 3 10746 _002863_hash NULL
86698 +_002864_hash t4vf_pktgl_to_skb 2 39005 _002864_hash NULL
86699 +_002865_hash tcp_collapse 5-6 63294 _002865_hash NULL
86700 +_002867_hash tipc_cfg_reply_alloc 1 27606 _002867_hash NULL
86701 +_002868_hash ubifs_leb_change 4 17789 _002868_hash NULL
86702 +_002869_hash ubifs_leb_write 4-5 22679 _002869_hash NULL
86703 +_002871_hash ulog_alloc_skb 1 23427 _002871_hash NULL
86704 +_002872_hash _alloc_mISDN_skb 3 52232 _002872_hash NULL
86705 +_002873_hash ath9k_multi_regread 4 65056 _002873_hash NULL
86706 +_002874_hash ath_rxbuf_alloc 2 24745 _002874_hash NULL
86707 +_002875_hash ax25_send_frame 2 19964 _002875_hash NULL
86708 +_002876_hash bchannel_get_rxbuf 2 37213 _002876_hash NULL
86709 +_002877_hash cfpkt_create 1 18197 _002877_hash NULL
86710 +_002878_hash console_store 4 36007 _002878_hash NULL
86711 +_002879_hash dev_alloc_skb 1 19517 _002879_hash &_002797_hash
86712 +_002880_hash dn_nsp_do_disc 2-6 49474 _002880_hash NULL
86713 +_002882_hash do_write_orph_node 2 64343 _002882_hash NULL
86714 +_002883_hash dsp_cmx_send_member 2 15625 _002883_hash NULL
86715 +_002884_hash fc_frame_alloc 2 1596 _002884_hash NULL
86716 +_002885_hash fc_frame_alloc_fill 2 59394 _002885_hash NULL
86717 +_002886_hash fmc_send_cmd 5 20435 _002886_hash NULL
86718 +_002887_hash hci_send_cmd 3 43810 _002887_hash NULL
86719 +_002888_hash hci_si_event 3 1404 _002888_hash NULL
86720 +_002889_hash hfcpci_empty_bfifo 4 62323 _002889_hash NULL
86721 +_002890_hash hidp_send_ctrl_message 4 43702 _002890_hash NULL
86722 +_002891_hash hysdn_sched_rx 3 60533 _002891_hash NULL
86723 +_002892_hash inet_dccp_listen 2 28565 _002892_hash NULL
86724 +_002893_hash ip6_append_data 4-5 36490 _002893_hash NULL
86725 +_002894_hash __ip_append_data 7-8 36191 _002894_hash NULL
86726 +_002895_hash l1oip_socket_recv 6 56537 _002895_hash NULL
86727 +_002896_hash l2cap_build_cmd 4 48676 _002896_hash NULL
86728 +_002897_hash l2down_create 4 21755 _002897_hash NULL
86729 +_002898_hash l2up_create 3 6430 _002898_hash NULL
86730 +_002899_hash ldisc_receive 4 41516 _002899_hash NULL
86731 +_002902_hash lro_gen_skb 6 2644 _002902_hash NULL
86732 +_002903_hash macvtap_alloc_skb 2-4-3 50629 _002903_hash NULL
86733 +_002906_hash mgmt_device_found 10 14146 _002906_hash NULL
86734 +_002907_hash nci_send_cmd 3 58206 _002907_hash NULL
86735 +_002908_hash netdev_alloc_skb_ip_align 2 40811 _002908_hash NULL
86736 +_002909_hash nfcwilink_send_bts_cmd 3 10802 _002909_hash NULL
86737 +_002910_hash nfqnl_mangle 2 14583 _002910_hash NULL
86738 +_002911_hash p54_alloc_skb 3 34366 _002911_hash &_000475_hash
86739 +_002912_hash packet_alloc_skb 2-5-4 62602 _002912_hash NULL
86740 +_002915_hash pep_indicate 5 38611 _002915_hash NULL
86741 +_002916_hash pep_reply 5 50582 _002916_hash NULL
86742 +_002917_hash pipe_handler_request 5 50774 _002917_hash &_001189_hash
86743 +_002918_hash ql_process_mac_rx_page 4 15543 _002918_hash NULL
86744 +_002919_hash ql_process_mac_rx_skb 4 6689 _002919_hash NULL
86745 +_002920_hash rfcomm_tty_write 3 51603 _002920_hash NULL
86746 +_002921_hash send_mpa_reject 3 7135 _002921_hash NULL
86747 +_002922_hash send_mpa_reply 3 32372 _002922_hash NULL
86748 +_002923_hash set_rxd_buffer_pointer 8 9950 _002923_hash NULL
86749 +_002924_hash sge_rx 3 50594 _002924_hash NULL
86750 +_002925_hash skb_cow_data 2 11565 _002925_hash NULL
86751 +_002926_hash smp_build_cmd 3 45853 _002926_hash NULL
86752 +_002927_hash sock_alloc_send_skb 2 23720 _002927_hash NULL
86753 +_002928_hash sys_pselect6 1 57449 _002928_hash NULL
86754 +_002929_hash tcp_fragment 3 20436 _002929_hash NULL
86755 +_002930_hash teiup_create 3 43201 _002930_hash NULL
86756 +_002931_hash tg3_run_loopback 2 30093 _002931_hash NULL
86757 +_002932_hash tun_alloc_skb 2-4-3 41216 _002932_hash NULL
86758 +_002935_hash ubifs_write_node 5-3 11258 _002935_hash NULL
86759 +_002936_hash use_pool 2 64607 _002936_hash NULL
86760 +_002937_hash vxge_rx_alloc 3 52024 _002937_hash NULL
86761 +_002938_hash add_packet 3 54433 _002938_hash NULL
86762 +_002939_hash add_rx_skb 3 8257 _002939_hash NULL
86763 +_002940_hash ath6kl_buf_alloc 1 57304 _002940_hash NULL
86764 +_002941_hash bat_iv_ogm_aggregate_new 2 2620 _002941_hash NULL
86765 +_002942_hash bnx2fc_process_l2_frame_compl 3 65072 _002942_hash NULL
86766 +_002943_hash brcmu_pkt_buf_get_skb 1 5556 _002943_hash NULL
86767 +_002944_hash br_send_bpdu 3 29669 _002944_hash NULL
86768 +_002945_hash bt_skb_send_alloc 2 6581 _002945_hash NULL
86769 +_002946_hash c4iw_reject_cr 3 28174 _002946_hash NULL
86770 +_002947_hash carl9170_rx_copy_data 2 21656 _002947_hash NULL
86771 +_002948_hash cfpkt_add_body 3 44630 _002948_hash NULL
86772 +_002949_hash cfpkt_append 3 61206 _002949_hash NULL
86773 +_002950_hash cosa_net_setup_rx 2 38594 _002950_hash NULL
86774 +_002951_hash cxgb4_pktgl_to_skb 2 61899 _002951_hash NULL
86775 +_002952_hash dn_alloc_send_pskb 2 4465 _002952_hash NULL
86776 +_002953_hash dn_nsp_return_disc 2 60296 _002953_hash NULL
86777 +_002954_hash dn_nsp_send_disc 2 23469 _002954_hash NULL
86778 +_002955_hash dsp_tone_hw_message 3 17678 _002955_hash NULL
86779 +_002956_hash dvb_net_sec 3 37884 _002956_hash NULL
86780 +_002957_hash e1000_check_copybreak 3 62448 _002957_hash NULL
86781 +_002958_hash fast_rx_path 3 59214 _002958_hash NULL
86782 +_002959_hash fc_fcp_frame_alloc 2 12624 _002959_hash NULL
86783 +_002960_hash fcoe_ctlr_send_keep_alive 3 15308 _002960_hash NULL
86784 +_002961_hash fwnet_incoming_packet 3 40380 _002961_hash NULL
86785 +_002962_hash fwnet_pd_new 4 39947 _002962_hash NULL
86786 +_002963_hash got_frame 2 16028 _002963_hash NULL
86787 +_002964_hash gsm_mux_rx_netchar 3 33336 _002964_hash NULL
86788 +_002965_hash hdlcdev_rx 3 997 _002965_hash NULL
86789 +_002966_hash hdlc_empty_fifo 2 18397 _002966_hash NULL
86790 +_002967_hash hfc_empty_fifo 2 57972 _002967_hash NULL
86791 +_002968_hash hfcpci_empty_fifo 4 2427 _002968_hash NULL
86792 +_002969_hash hfcsusb_rx_frame 3 52745 _002969_hash NULL
86793 +_002970_hash hidp_output_raw_report 3 5629 _002970_hash NULL
86794 +_002971_hash hscx_empty_fifo 2 13360 _002971_hash NULL
86795 +_002972_hash hysdn_rx_netpkt 3 16136 _002972_hash NULL
86796 +_002973_hash ieee80211_fragment 4 33112 _002973_hash NULL
86797 +_002974_hash ieee80211_probereq_get 4-6 29069 _002974_hash NULL
86798 +_002976_hash ieee80211_send_auth 5 24121 _002976_hash NULL
86799 +_002977_hash ieee80211_set_probe_resp 3 10077 _002977_hash NULL
86800 +_002978_hash ieee80211_tdls_mgmt 8 9581 _002978_hash NULL
86801 +_002979_hash ip6_ufo_append_data 5-7-6 4780 _002979_hash NULL
86802 +_002982_hash ip_ufo_append_data 6-8-7 12775 _002982_hash NULL
86803 +_002985_hash ipw_packet_received_skb 2 1230 _002985_hash NULL
86804 +_002986_hash iwch_reject_cr 3 23901 _002986_hash NULL
86805 +_002987_hash iwm_rx_packet_alloc 3 9898 _002987_hash NULL
86806 +_002988_hash ixgb_check_copybreak 3 5847 _002988_hash NULL
86807 +_002989_hash l1oip_socket_parse 4 4507 _002989_hash NULL
86808 +_002990_hash l2cap_send_cmd 4 14548 _002990_hash NULL
86809 +_002991_hash l2tp_ip6_sendmsg 4 7461 _002991_hash NULL
86810 +_002993_hash lowpan_fragment_xmit 3-4 22095 _002993_hash NULL
86811 +_002996_hash mcs_unwrap_fir 3 25733 _002996_hash NULL
86812 +_002997_hash mcs_unwrap_mir 3 9455 _002997_hash NULL
86813 +_002998_hash mld_newpack 2 50950 _002998_hash NULL
86814 +_002999_hash nfc_alloc_send_skb 4 3167 _002999_hash NULL
86815 +_003000_hash p54_download_eeprom 4 43842 _003000_hash NULL
86816 +_003002_hash ppp_tx_cp 5 62044 _003002_hash NULL
86817 +_003003_hash prism2_send_mgmt 4 62605 _003003_hash &_001876_hash
86818 +_003004_hash prism2_sta_send_mgmt 5 43916 _003004_hash NULL
86819 +_003005_hash _queue_data 4 54983 _003005_hash NULL
86820 +_003006_hash read_dma 3 55086 _003006_hash NULL
86821 +_003007_hash read_fifo 3 826 _003007_hash NULL
86822 +_003008_hash receive_copy 3 12216 _003008_hash NULL
86823 +_003009_hash rtl8169_try_rx_copy 3 705 _003009_hash NULL
86824 +_003010_hash _rtl92s_firmware_downloadcode 3 14021 _003010_hash NULL
86825 +_003011_hash rx_data 4 60442 _003011_hash NULL
86826 +_003012_hash sis190_try_rx_copy 3 57069 _003012_hash NULL
86827 +_003013_hash skge_rx_get 3 40598 _003013_hash NULL
86828 +_003014_hash tcp_mark_head_lost 2 35895 _003014_hash NULL
86829 +_003015_hash tcp_match_skb_to_sack 3-4 23568 _003015_hash NULL
86830 +_003017_hash tso_fragment 3 29050 _003017_hash NULL
86831 +_003018_hash tt_response_fill_table 1 57902 _003018_hash NULL
86832 +_003020_hash udpv6_sendmsg 4 22316 _003020_hash NULL
86833 +_003021_hash velocity_rx_copy 2 34583 _003021_hash NULL
86834 +_003022_hash W6692_empty_Bfifo 2 47804 _003022_hash NULL
86835 +_003023_hash zd_mac_rx 3 38296 _003023_hash NULL
86836 +_003024_hash ath6kl_wmi_get_new_buf 1 52304 _003024_hash NULL
86837 +_003025_hash bat_iv_ogm_queue_add 3 30870 _003025_hash NULL
86838 +_003026_hash brcmf_alloc_pkt_and_read 2 63116 _003026_hash &_001808_hash
86839 +_003027_hash brcmf_sdcard_recv_buf 6 38179 _003027_hash NULL
86840 +_003028_hash brcmf_sdcard_rwdata 5 65041 _003028_hash NULL
86841 +_003029_hash brcmf_sdcard_send_buf 6 7713 _003029_hash NULL
86842 +_003030_hash carl9170_handle_mpdu 3 11056 _003030_hash NULL
86843 +_003031_hash cfpkt_add_trail 3 27260 _003031_hash NULL
86844 +_003032_hash cfpkt_pad_trail 2 55511 _003032_hash NULL
86845 +_003033_hash dvb_net_sec_callback 2 28786 _003033_hash NULL
86846 +_003034_hash fwnet_receive_packet 9 50537 _003034_hash NULL
86847 +_003035_hash handle_rx_packet 3 58993 _003035_hash NULL
86848 +_003036_hash HDLC_irq 2 8709 _003036_hash NULL
86849 +_003037_hash hdlc_rpr_irq 2 10240 _003037_hash NULL
86850 +_003043_hash ipwireless_network_packet_received 4 51277 _003043_hash NULL
86851 +_003044_hash l2cap_bredr_sig_cmd 3 49065 _003044_hash NULL
86852 +_003045_hash l2cap_sock_alloc_skb_cb 2 33532 _003045_hash NULL
86853 +_003046_hash llcp_allocate_pdu 3 19866 _003046_hash NULL
86854 +_003047_hash ppp_cp_event 6 2965 _003047_hash NULL
86855 +_003048_hash receive_client_update_packet 3 49104 _003048_hash NULL
86856 +_003049_hash receive_server_sync_packet 3 59021 _003049_hash NULL
86857 +_003050_hash sky2_receive 2 13407 _003050_hash NULL
86858 +_003051_hash tcp_sacktag_walk 5-6 49703 _003051_hash NULL
86859 +_003053_hash tcp_write_xmit 2 64602 _003053_hash NULL
86860 +_003054_hash ath6kl_wmi_add_wow_pattern_cmd 4 12842 _003054_hash NULL
86861 +_003055_hash ath6kl_wmi_beginscan_cmd 8 25462 _003055_hash NULL
86862 +_003056_hash ath6kl_wmi_send_probe_response_cmd 6 31728 _003056_hash NULL
86863 +_003057_hash ath6kl_wmi_set_appie_cmd 5 39266 _003057_hash NULL
86864 +_003058_hash ath6kl_wmi_set_ie_cmd 6 37260 _003058_hash NULL
86865 +_003059_hash ath6kl_wmi_startscan_cmd 8 33674 _003059_hash NULL
86866 +_003060_hash ath6kl_wmi_test_cmd 3 27312 _003060_hash NULL
86867 +_003061_hash brcmf_sdbrcm_membytes 3-5 37324 _003061_hash NULL
86868 +_003063_hash brcmf_sdbrcm_read_control 3 22721 _003063_hash NULL
86869 +_003064_hash brcmf_tx_frame 3 20978 _003064_hash NULL
86870 +_003065_hash __carl9170_rx 3 56784 _003065_hash NULL
86871 +_003066_hash cfpkt_setlen 2 49343 _003066_hash NULL
86872 +_003067_hash hdlc_irq_one 2 3944 _003067_hash NULL
86873 +_003069_hash tcp_push_one 2 48816 _003069_hash NULL
86874 +_003070_hash __tcp_push_pending_frames 2 48148 _003070_hash NULL
86875 +_003071_hash brcmf_sdbrcm_bus_txctl 3 42492 _003071_hash NULL
86876 +_003072_hash carl9170_rx 3 13272 _003072_hash NULL
86877 +_003073_hash carl9170_rx_stream 3 1334 _003073_hash NULL
86878 +_003074_hash tcp_push 3 10680 _003074_hash NULL
86879 +_003075_hash create_log 2 8225 _003075_hash NULL
86880 +_003076_hash expand_files 2 17080 _003076_hash NULL
86881 +_003077_hash iio_device_alloc 1 41440 _003077_hash NULL
86882 +_003078_hash OS_mem_token_alloc 1 14276 _003078_hash NULL
86883 +_003079_hash packet_came 3 18072 _003079_hash NULL
86884 +_003080_hash softsynth_write 3 3455 _003080_hash NULL
86885 +_003081_hash alloc_fd 1 37637 _003081_hash NULL
86886 +_003082_hash sys_dup3 2 33421 _003082_hash NULL
86887 +_003083_hash do_fcntl 3 31468 _003083_hash NULL
86888 +_003084_hash sys_dup2 2 25284 _003084_hash NULL
86889 +_003085_hash sys_fcntl 3 19267 _003085_hash NULL
86890 +_003086_hash sys_fcntl64 3 29031 _003086_hash NULL
86891 +_003087_hash cmpk_message_handle_tx 4 54024 _003087_hash NULL
86892 +_003088_hash comedi_buf_alloc 3 24822 _003088_hash NULL
86893 +_003089_hash compat_rw_copy_check_uvector 3 22001 _003089_hash &_001989_hash
86894 +_003090_hash compat_sys_fcntl64 3 60256 _003090_hash NULL
86895 +_003091_hash evtchn_write 3 43278 _003091_hash NULL
86896 +_003092_hash fw_download_code 3 13249 _003092_hash NULL
86897 +_003093_hash fwSendNullPacket 2 54618 _003093_hash NULL
86898 +_003095_hash ieee80211_authentication_req 3 63973 _003095_hash NULL
86899 +_003097_hash rtllib_authentication_req 3 26713 _003097_hash NULL
86900 +_003098_hash SendTxCommandPacket 3 42901 _003098_hash NULL
86901 +_003099_hash snd_nm256_capture_copy 5 28622 _003099_hash NULL
86902 +_003100_hash snd_nm256_playback_copy 5 38567 _003100_hash NULL
86903 +_003101_hash tomoyo_init_log 2 14806 _003101_hash NULL
86904 +_003102_hash usbdux_attach_common 4 51764 _003271_hash NULL nohasharray
86905 +_003103_hash compat_sys_fcntl 3 15654 _003103_hash NULL
86906 +_003104_hash ieee80211_auth_challenge 3 18810 _003104_hash NULL
86907 +_003105_hash ieee80211_rtl_auth_challenge 3 61897 _003105_hash NULL
86908 +_003106_hash resize_async_buffer 4 64031 _003106_hash &_002119_hash
86909 +_003107_hash rtllib_auth_challenge 3 12493 _003107_hash NULL
86910 +_003108_hash tomoyo_write_log2 2 11732 _003108_hash NULL
86911 +_003109_hash allocate_probes 1 40204 _003109_hash NULL
86912 +_003110_hash alloc_ftrace_hash 1 57431 _003110_hash &_002532_hash
86913 +_003111_hash __alloc_preds 2 9492 _003111_hash NULL
86914 +_003112_hash __alloc_pred_stack 2 26687 _003112_hash NULL
86915 +_003113_hash alloc_sched_domains 1 47756 _003113_hash NULL
86916 +_003114_hash alloc_trace_probe 6 38720 _003114_hash NULL
86917 +_003115_hash alloc_trace_uprobe 3 13870 _003850_hash NULL nohasharray
86918 +_003116_hash arcfb_write 3 8702 _003116_hash NULL
86919 +_003117_hash ath6kl_sdio_alloc_prep_scat_req 2 51986 _003117_hash NULL
86920 +_003118_hash ath6kl_usb_post_recv_transfers 2 32892 _003118_hash NULL
86921 +_003119_hash ath6kl_usb_submit_ctrl_in 6 32880 _003119_hash &_000778_hash
86922 +_003120_hash ath6kl_usb_submit_ctrl_out 6 9978 _003120_hash NULL
86923 +_003121_hash auok190xfb_write 3 37001 _003121_hash NULL
86924 +_003122_hash beacon_interval_write 3 17952 _003122_hash NULL
86925 +_003123_hash blk_dropped_read 3 4168 _003123_hash NULL
86926 +_003124_hash blk_msg_write 3 13655 _003124_hash NULL
86927 +_003125_hash brcmf_usbdev_qinit 2 19090 _003125_hash &_001533_hash
86928 +_003126_hash brcmf_usb_dl_cmd 4 53130 _003126_hash NULL
86929 +_003127_hash broadsheetfb_write 3 39976 _003127_hash NULL
86930 +_003128_hash broadsheet_spiflash_rewrite_sector 2 54864 _003128_hash NULL
86931 +_003129_hash cyttsp_probe 4 1940 _003129_hash NULL
86932 +_003130_hash da9052_group_write 3 4534 _003130_hash NULL
86933 +_003131_hash dccpprobe_read 3 52549 _003131_hash NULL
86934 +_003132_hash drm_property_create_bitmask 5 30195 _003132_hash NULL
86935 +_003133_hash dtim_interval_write 3 30489 _003133_hash NULL
86936 +_003134_hash dynamic_ps_timeout_write 3 37713 _003134_hash NULL
86937 +_003135_hash event_enable_read 3 7074 _003135_hash NULL
86938 +_003136_hash event_enable_write 3 45238 _003136_hash NULL
86939 +_003137_hash event_filter_read 3 23494 _003137_hash NULL
86940 +_003138_hash event_filter_write 3 56609 _003138_hash NULL
86941 +_003139_hash event_id_read 3 64288 _003139_hash &_001240_hash
86942 +_003140_hash f_audio_buffer_alloc 1 41110 _003140_hash NULL
86943 +_003141_hash fb_sys_read 3 13778 _003141_hash NULL
86944 +_003142_hash fb_sys_write 3 33130 _003142_hash NULL
86945 +_003143_hash forced_ps_write 3 37209 _003143_hash NULL
86946 +_003144_hash __fprog_create 2 41263 _003144_hash NULL
86947 +_003145_hash fq_codel_zalloc 1 15378 _003145_hash NULL
86948 +_003146_hash ftrace_pid_write 3 39710 _003146_hash NULL
86949 +_003147_hash ftrace_profile_read 3 21327 _003147_hash NULL
86950 +_003148_hash ftrace_profile_write 3 53327 _003148_hash NULL
86951 +_003149_hash ftrace_write 3 29551 _003149_hash NULL
86952 +_003150_hash gdm_wimax_netif_rx 3 43423 _003150_hash &_001619_hash
86953 +_003151_hash gpio_power_write 3 1991 _003151_hash NULL
86954 +_003152_hash hecubafb_write 3 26942 _003152_hash NULL
86955 +_003153_hash hsc_msg_alloc 1 60990 _003153_hash NULL
86956 +_003154_hash hsc_write 3 55875 _003154_hash NULL
86957 +_003155_hash hsi_alloc_controller 1 41802 _003155_hash NULL
86958 +_003156_hash hsi_register_board_info 2 13820 _003156_hash NULL
86959 +_003157_hash i915_ring_stop_read 3 42549 _003406_hash NULL nohasharray
86960 +_003158_hash i915_ring_stop_write 3 59010 _003158_hash NULL
86961 +_003159_hash ieee802154_alloc_device 1 13767 _003159_hash NULL
86962 +_003160_hash intel_sdvo_write_cmd 4 54377 _003160_hash &_000815_hash
86963 +_003161_hash ivtvfb_write 3 40023 _003161_hash NULL
86964 +_003162_hash metronomefb_write 3 8823 _003162_hash NULL
86965 +_003163_hash mwifiex_usb_submit_rx_urb 2 54558 _003163_hash NULL
86966 +_003164_hash nfc_hci_hcp_message_tx 6 14534 _003164_hash NULL
86967 +_003165_hash nfc_hci_set_param 5 40697 _003165_hash NULL
86968 +_003166_hash nfc_shdlc_alloc_skb 2 12741 _003166_hash NULL
86969 +_003167_hash odev_update 2 50169 _003167_hash NULL
86970 +_003168_hash oz_add_farewell 5 20652 _003168_hash NULL
86971 +_003169_hash oz_cdev_read 3 20659 _003169_hash NULL
86972 +_003170_hash oz_cdev_write 3 33852 _003170_hash NULL
86973 +_003171_hash oz_ep_alloc 2 5587 _003171_hash NULL
86974 +_003172_hash oz_events_read 3 47535 _003172_hash NULL
86975 +_003173_hash pmcraid_copy_sglist 3 38431 _003173_hash NULL
86976 +_003174_hash prctl_set_mm 3 64538 _003174_hash NULL
86977 +_003175_hash ptp_filter_init 2 36780 _003175_hash NULL
86978 +_003176_hash rb_simple_read 3 45972 _003176_hash NULL
86979 +_003177_hash rb_simple_write 3 20890 _003177_hash NULL
86980 +_003178_hash read_file_dfs 3 43145 _003178_hash NULL
86981 +_003179_hash rx_streaming_always_write 3 32357 _003436_hash NULL nohasharray
86982 +_003180_hash rx_streaming_interval_write 3 50120 _003180_hash NULL
86983 +_003181_hash shmem_pread_fast 3 34147 _003181_hash NULL
86984 +_003182_hash shmem_pread_slow 3 3198 _003182_hash NULL
86985 +_003183_hash shmem_pwrite_fast 3 46842 _003183_hash NULL
86986 +_003184_hash shmem_pwrite_slow 3 31741 _003184_hash NULL
86987 +_003185_hash show_header 3 4722 _003185_hash &_000736_hash
86988 +_003186_hash split_scan_timeout_write 3 52128 _003186_hash NULL
86989 +_003187_hash stack_max_size_read 3 1445 _003187_hash NULL
86990 +_003188_hash stack_max_size_write 3 36068 _003188_hash NULL
86991 +_003189_hash subsystem_filter_read 3 62310 _003189_hash NULL
86992 +_003190_hash subsystem_filter_write 3 13022 _003190_hash NULL
86993 +_003191_hash suspend_dtim_interval_write 3 48854 _003191_hash NULL
86994 +_003192_hash system_enable_read 3 25815 _003192_hash NULL
86995 +_003193_hash system_enable_write 3 61396 _003193_hash NULL
86996 +_003194_hash trace_options_core_read 3 47390 _003194_hash NULL
86997 +_003195_hash trace_options_core_write 3 61551 _003195_hash NULL
86998 +_003196_hash trace_options_read 3 11419 _003196_hash NULL
86999 +_003197_hash trace_options_write 3 48275 _003197_hash NULL
87000 +_003198_hash trace_parser_get_init 2 31379 _003198_hash NULL
87001 +_003199_hash traceprobe_probes_write 3 64969 _003199_hash NULL
87002 +_003200_hash trace_seq_to_user 3 65398 _003200_hash NULL
87003 +_003201_hash tracing_buffers_read 3 11124 _003201_hash NULL
87004 +_003202_hash tracing_clock_write 3 27961 _003202_hash NULL
87005 +_003203_hash tracing_cpumask_read 3 7010 _003203_hash NULL
87006 +_003204_hash tracing_ctrl_read 3 46922 _003204_hash NULL
87007 +_003205_hash tracing_ctrl_write 3 42324 _003205_hash &_001726_hash
87008 +_003206_hash tracing_entries_read 3 8345 _003206_hash NULL
87009 +_003207_hash tracing_entries_write 3 60563 _003207_hash NULL
87010 +_003208_hash tracing_max_lat_read 3 8890 _003208_hash NULL
87011 +_003209_hash tracing_max_lat_write 3 8728 _003209_hash NULL
87012 +_003210_hash tracing_read_dyn_info 3 45468 _003210_hash NULL
87013 +_003211_hash tracing_readme_read 3 16493 _003211_hash NULL
87014 +_003212_hash tracing_saved_cmdlines_read 3 21434 _003212_hash NULL
87015 +_003213_hash tracing_set_trace_read 3 44122 _003213_hash NULL
87016 +_003214_hash tracing_set_trace_write 3 57096 _003214_hash NULL
87017 +_003215_hash tracing_stats_read 3 34537 _003215_hash NULL
87018 +_003216_hash tracing_total_entries_read 3 62817 _003216_hash NULL
87019 +_003217_hash tracing_trace_options_write 3 153 _003217_hash NULL
87020 +_003218_hash ttm_put_pages 2 9179 _003218_hash NULL
87021 +_003219_hash udl_prime_create 2 57159 _003219_hash NULL
87022 +_003220_hash ufx_alloc_urb_list 3 10349 _003220_hash NULL
87023 +_003221_hash u_memcpya 2-3 30139 _003221_hash NULL
87024 +_003223_hash viafb_dfph_proc_write 3 49288 _003223_hash NULL
87025 +_003224_hash viafb_dfpl_proc_write 3 627 _003224_hash NULL
87026 +_003225_hash viafb_dvp0_proc_write 3 23023 _003225_hash NULL
87027 +_003226_hash viafb_dvp1_proc_write 3 48864 _003226_hash NULL
87028 +_003227_hash viafb_vt1636_proc_write 3 16018 _003227_hash NULL
87029 +_003228_hash vivi_read 3 23073 _003228_hash NULL
87030 +_003229_hash wl1271_rx_filter_alloc_field 5 46721 _003229_hash NULL
87031 +_003230_hash wl12xx_cmd_build_probe_req 6-8 3098 _003230_hash NULL
87032 +_003232_hash wlcore_alloc_hw 1 7785 _003232_hash NULL
87033 +_003233_hash alloc_and_copy_ftrace_hash 1 29368 _003233_hash NULL
87034 +_003234_hash create_trace_probe 1 20175 _003234_hash NULL
87035 +_003235_hash create_trace_uprobe 1 13184 _003235_hash NULL
87036 +_003236_hash intel_sdvo_set_value 4 2311 _003236_hash NULL
87037 +_003237_hash mmio_read 4 40348 _003237_hash NULL
87038 +_003238_hash nfc_hci_execute_cmd 5 43882 _003238_hash NULL
87039 +_003239_hash nfc_hci_send_event 5 21452 _003239_hash NULL
87040 +_003240_hash nfc_hci_send_response 5 56462 _003240_hash NULL
87041 +_003241_hash picolcd_fb_write 3 2318 _003241_hash NULL
87042 +_003242_hash probes_write 3 29711 _003242_hash NULL
87043 +_003243_hash sys_prctl 4 8766 _003243_hash NULL
87044 +_003244_hash tracing_read_pipe 3 35312 _003244_hash NULL
87045 +_003245_hash brcmf_usb_attach 1-2 44656 _003245_hash NULL
87046 +_003247_hash dlfb_ops_write 3 64150 _003247_hash NULL
87047 +_003248_hash nfc_hci_send_cmd 5 55714 _003248_hash NULL
87048 +_003249_hash ufx_ops_write 3 54848 _003249_hash NULL
87049 +_003250_hash viafb_iga1_odev_proc_write 3 36241 _003250_hash NULL
87050 +_003251_hash viafb_iga2_odev_proc_write 3 2363 _003251_hash NULL
87051 +_003252_hash xenfb_write 3 43412 _003252_hash NULL
87052 +_003253_hash acl_alloc 1 35979 _003253_hash NULL
87053 +_003254_hash acl_alloc_stack_init 1 60630 _003254_hash NULL
87054 +_003255_hash acl_alloc_num 1-2 60778 _003255_hash NULL
87055 +_003257_hash padzero 1 55 _003257_hash &_002013_hash
87056 +_003258_hash __get_vm_area_node 1 55305 _003258_hash NULL
87057 +_003259_hash get_vm_area 1 18080 _003259_hash NULL
87058 +_003260_hash __get_vm_area 1 61599 _003260_hash NULL
87059 +_003261_hash get_vm_area_caller 1 10527 _003261_hash NULL
87060 +_003262_hash __get_vm_area_caller 1 56416 _003302_hash NULL nohasharray
87061 +_003263_hash alloc_vm_area 1 36149 _003263_hash NULL
87062 +_003264_hash __ioremap_caller 1-2 21800 _003264_hash NULL
87063 +_003266_hash vmap 2 15025 _003266_hash NULL
87064 +_003267_hash ioremap_cache 1-2 47189 _003267_hash NULL
87065 +_003269_hash ioremap_nocache 1-2 2439 _003269_hash NULL
87066 +_003271_hash ioremap_prot 1-2 51764 _003271_hash &_003102_hash
87067 +_003273_hash ioremap_wc 1-2 62695 _003273_hash NULL
87068 +_003274_hash acpi_os_ioremap 1-2 49523 _003274_hash NULL
87069 +_003276_hash ca91cx42_alloc_resource 2 10502 _003276_hash NULL
87070 +_003277_hash devm_ioremap_nocache 2-3 2036 _003277_hash NULL
87071 +_003279_hash __einj_error_trigger 1 17707 _003279_hash &_001577_hash
87072 +_003280_hash io_mapping_map_wc 2 19284 _003280_hash NULL
87073 +_003281_hash ioremap 1-2 23172 _003281_hash NULL
87074 +_003283_hash lguest_map 1-2 42008 _003283_hash NULL
87075 +_003285_hash msix_map_region 3 3411 _003285_hash NULL
87076 +_003286_hash pci_iomap 3 47575 _003286_hash NULL
87077 +_003287_hash sfi_map_memory 1-2 5183 _003287_hash NULL
87078 +_003289_hash tsi148_alloc_resource 2 24563 _003289_hash NULL
87079 +_003290_hash vb2_vmalloc_get_userptr 3 31374 _003290_hash NULL
87080 +_003291_hash xlate_dev_mem_ptr 1 15291 _003291_hash &_001167_hash
87081 +_003292_hash a4t_cs_init 3 27734 _003292_hash NULL
87082 +_003293_hash aac_nark_ioremap 2 50163 _003293_hash &_000314_hash
87083 +_003294_hash aac_rkt_ioremap 2 3333 _003294_hash NULL
87084 +_003295_hash aac_rx_ioremap 2 52410 _003295_hash NULL
87085 +_003296_hash aac_sa_ioremap 2 13596 _003296_hash &_000288_hash
87086 +_003297_hash aac_src_ioremap 2 41688 _003297_hash NULL
87087 +_003298_hash aac_srcv_ioremap 2 6659 _003298_hash NULL
87088 +_003299_hash acpi_map 1-2 58725 _003299_hash NULL
87089 +_003301_hash acpi_os_read_memory 1-3 54186 _003301_hash NULL
87090 +_003302_hash acpi_os_write_memory 1-3 56416 _003302_hash &_003262_hash
87091 +_003303_hash c101_run 2 37279 _003303_hash NULL
87092 +_003304_hash ca91cx42_master_set 4 23146 _003304_hash NULL
87093 +_003305_hash check586 2 29914 _003305_hash NULL
87094 +_003306_hash check_mirror 1-2 57342 _003306_hash &_001564_hash
87095 +_003308_hash cru_detect 1 11272 _003308_hash NULL
87096 +_003309_hash cs553x_init_one 3 58886 _003309_hash NULL
87097 +_003310_hash cycx_setup 4 47562 _003310_hash NULL
87098 +_003311_hash DepcaSignature 2 80 _003311_hash &_001321_hash
87099 +_003312_hash devm_ioremap 2-3 29235 _003312_hash NULL
87100 +_003314_hash divasa_remap_pci_bar 3-4 23485 _003314_hash &_000947_hash
87101 +_003316_hash dma_declare_coherent_memory 2-4 14244 _003316_hash NULL
87102 +_003318_hash doc_probe 1 23285 _003318_hash NULL
87103 +_003319_hash DoC_Probe 1 57534 _003319_hash NULL
87104 +_003320_hash ems_pcmcia_add_card 2 62627 _003320_hash NULL
87105 +_003321_hash gdth_init_isa 1 28091 _003321_hash NULL
87106 +_003322_hash gdth_search_isa 1 58595 _003322_hash NULL
87107 +_003323_hash isp1760_register 1-2 628 _003323_hash NULL
87108 +_003325_hash mthca_map_reg 2-3 5664 _003325_hash NULL
87109 +_003327_hash n2_run 3 53459 _003327_hash NULL
87110 +_003328_hash pcim_iomap 3 58334 _003328_hash NULL
87111 +_003329_hash probe_bios 1 17467 _003329_hash NULL
87112 +_003330_hash register_device 2-3 60015 _003330_hash NULL
87113 +_003332_hash remap_pci_mem 1-2 15966 _003332_hash NULL
87114 +_003334_hash rtl_port_map 1-2 2385 _003334_hash NULL
87115 +_003336_hash sfi_map_table 1 5462 _003336_hash NULL
87116 +_003337_hash sriov_enable_migration 2 14889 _003337_hash NULL
87117 +_003338_hash ssb_bus_scan 2 36578 _003338_hash NULL
87118 +_003339_hash ssb_ioremap 2 5228 _003339_hash NULL
87119 +_003340_hash tpm_tis_init 2-3 15304 _003340_hash NULL
87120 +_003342_hash tsi148_master_set 4 14685 _003342_hash NULL
87121 +_003343_hash acpi_os_map_memory 1-2 11161 _003343_hash NULL
87122 +_003345_hash com90xx_found 3 13974 _003345_hash NULL
87123 +_003346_hash dmam_declare_coherent_memory 2-4 43679 _003346_hash NULL
87124 +_003348_hash gdth_isa_probe_one 1 48925 _003348_hash NULL
87125 +_003349_hash sfi_check_table 1 6772 _003349_hash NULL
87126 +_003350_hash sfi_sysfs_install_table 1 51688 _003350_hash NULL
87127 +_003351_hash sriov_enable 2 59689 _003351_hash NULL
87128 +_003352_hash ssb_bus_register 3 65183 _003352_hash NULL
87129 +_003353_hash acpi_ex_system_memory_space_handler 2 31192 _003353_hash NULL
87130 +_003354_hash acpi_tb_check_xsdt 1 21862 _003354_hash NULL
87131 +_003355_hash acpi_tb_install_table 1 12988 _003355_hash NULL
87132 +_003356_hash acpi_tb_parse_root_table 1 53455 _003356_hash NULL
87133 +_003357_hash check_vendor_extension 1 3254 _003357_hash NULL
87134 +_003358_hash pci_enable_sriov 2 35745 _003358_hash NULL
87135 +_003359_hash ssb_bus_pcmciabus_register 3 56020 _003359_hash NULL
87136 +_003360_hash ssb_bus_ssbbus_register 2 2217 _003360_hash NULL
87137 +_003361_hash lpfc_sli_probe_sriov_nr_virtfn 2 26004 _003361_hash NULL
87138 +_003364_hash alloc_vm_area 1 15989 _003364_hash NULL
87139 +_003366_hash efi_ioremap 1-2 3492 _003366_hash &_001092_hash
87140 +_003368_hash init_chip_wc_pat 2 62768 _003368_hash NULL
87141 +_003369_hash io_mapping_create_wc 1-2 1354 _003369_hash NULL
87142 +_003371_hash iommu_map_mmio_space 1 30919 _003371_hash NULL
87143 +_003372_hash arch_gnttab_map_shared 3 41306 _003372_hash NULL
87144 +_003373_hash arch_gnttab_map_status 3 49812 _003373_hash NULL
87145 +_003374_hash intel_render_ring_init_dri 2-3 45446 _003374_hash NULL
87146 +_003376_hash persistent_ram_iomap 1-2 47156 _003376_hash NULL
87147 +_003378_hash sparse_early_usemaps_alloc_pgdat_section 2 62304 _003378_hash NULL
87148 +_003379_hash ttm_bo_ioremap 2-3 31082 _003379_hash NULL
87149 +_003381_hash ttm_bo_kmap_ttm 3 5922 _003381_hash NULL
87150 +_003382_hash atyfb_setup_generic 3 49151 _003382_hash NULL
87151 +_003383_hash do_test 1 15766 _003383_hash NULL
87152 +_003384_hash mga_ioremap 1-2 8571 _003384_hash NULL
87153 +_003386_hash mid_get_vbt_data_r0 2 10876 _003386_hash NULL
87154 +_003387_hash mid_get_vbt_data_r10 2 6308 _003387_hash NULL
87155 +_003388_hash mid_get_vbt_data_r1 2 26170 _003388_hash NULL
87156 +_003389_hash persistent_ram_buffer_map 1-2 11332 _003389_hash NULL
87157 +_003391_hash read_vbt_r0 1 503 _003391_hash NULL
87158 +_003392_hash read_vbt_r10 1 60679 _003392_hash NULL
87159 +_003393_hash tpci200_slot_map_space 2 3848 _003393_hash NULL
87160 +_003394_hash ttm_bo_kmap 2-3 60118 _003394_hash NULL
87161 +_003395_hash persistent_ram_new 1-2 14588 _003395_hash NULL
87162 +_003396_hash mpt_lan_receive_post_turbo 2 13592 _003396_hash NULL
87163 +_003397_hash v4l2_ctrl_new_int_menu 4 41151 _003397_hash NULL
87164 +_003398_hash v4l2_ctrl_new_std 5 45748 _003398_hash &_002699_hash
87165 +_003399_hash v4l2_ctrl_new_std_menu 4 6221 _003399_hash NULL
87166 +_003400_hash xhci_alloc_streams 5 37586 _003400_hash NULL
87167 +_003401_hash cx2341x_ctrl_new_menu 3 49700 _003401_hash NULL
87168 +_003402_hash cx2341x_ctrl_new_std 4 57061 _003402_hash NULL
87169 +_003405_hash _alloc_get_attr_desc 2 470 _003405_hash NULL
87170 +_003406_hash ath6kl_wmi_proc_events_vif 5 42549 _003406_hash &_003157_hash
87171 +_003407_hash bitmap_resize 2 33054 _003407_hash NULL
87172 +_003408_hash bitmap_storage_alloc 2 55077 _003408_hash NULL
87173 +_003411_hash bnx2fc_process_unsol_compl 2 15576 _003411_hash NULL
87174 +_003413_hash btmrvl_sdio_host_to_card 3 12152 _003413_hash NULL
87175 +_003415_hash btrfs_error_discard_extent 2 50444 _003415_hash NULL
87176 +_003416_hash btrfsic_cmp_log_and_dev_bytenr 2 49628 _003416_hash NULL
87177 +_003417_hash c4iw_id_table_alloc 3 48163 _003417_hash NULL
87178 +_003418_hash cache_read_pipefs 3 47615 _003418_hash NULL
87179 +_003419_hash cache_read_procfs 3 52882 _003419_hash NULL
87180 +_003420_hash cache_write_pipefs 3 48270 _003420_hash NULL
87181 +_003421_hash cache_write_procfs 3 22491 _003421_hash NULL
87182 +_003425_hash cfpkt_split 2 47541 _003425_hash NULL
87183 +_003426_hash cgroup_file_read 3 28804 _003426_hash NULL
87184 +_003427_hash cgroup_file_write 3 52417 _003427_hash NULL
87185 +_003428_hash cnic_init_id_tbl 2 41354 _003428_hash NULL
87186 +_003430_hash copy_nodes_to_user 2 63807 _003430_hash NULL
87187 +_003431_hash cp210x_get_config 4 56229 _003431_hash NULL
87188 +_003432_hash cp210x_set_config 4 46447 _003432_hash NULL
87189 +_003433_hash cx18_v4l2_read 3 21196 _003433_hash NULL
87190 +_003434_hash dccp_setsockopt 5 60367 _003434_hash NULL
87191 +_003435_hash ddp_ppod_write_idata 5 25610 _003435_hash NULL
87192 +_003436_hash dispatch_ioctl 2 32357 _003436_hash &_003179_hash
87193 +_003437_hash dn_setsockopt 5 314 _003437_hash &_001817_hash
87194 +_003438_hash dt3155_alloc_coherent 2 58073 _003438_hash NULL
87195 +_003439_hash dvb_ca_write 3 41171 _003439_hash NULL
87196 +_003440_hash dvb_demux_read 3 13981 _003440_hash NULL
87197 +_003441_hash dvb_dmxdev_read_sec 4 7892 _003441_hash NULL
87198 +_003442_hash dvb_dvr_read 3 17073 _003442_hash NULL
87199 +_003443_hash dvb_usercopy 2 14036 _003443_hash NULL
87200 +_003445_hash evdev_do_ioctl 2 24459 _003445_hash NULL
87201 +_003446_hash fc_host_post_vendor_event 3 30903 _003446_hash NULL
87202 +_003447_hash fix_unclean_leb 3 23188 _003447_hash NULL
87203 +_003448_hash fs_devrw_entry 3 11924 _003448_hash NULL
87204 +_003449_hash fuse_conn_congestion_threshold_read 3 51028 _003449_hash NULL
87205 +_003450_hash fuse_conn_congestion_threshold_write 3 43736 _003450_hash NULL
87206 +_003451_hash fuse_conn_max_background_read 3 10855 _003451_hash NULL
87207 +_003452_hash fuse_conn_max_background_write 3 50061 _003452_hash NULL
87208 +_003453_hash fuse_fill_write_pages 4 53682 _003453_hash NULL
87209 +_003454_hash generic_perform_write 3 54832 _003454_hash NULL
87210 +_003455_hash gen_pool_add_virt 4 39913 _003455_hash NULL
87211 +_003456_hash get_info 3 55681 _003456_hash NULL
87212 +_003457_hash get_nodes 3 39012 _003457_hash NULL
87213 +_003458_hash groups_alloc 1 7614 _003458_hash NULL
87214 +_003459_hash hiddev_ioctl 2 36816 _003459_hash NULL
87215 +_003460_hash hidraw_ioctl 2 63658 _003460_hash NULL
87216 +_003461_hash hidraw_write 3 31536 _003461_hash NULL
87217 +_003462_hash ide_core_cp_entry 3 22636 _003462_hash NULL
87218 +_003463_hash ieee80211_amsdu_to_8023s 5 15561 _003463_hash NULL
87219 +_003464_hash ieee80211_if_write_smps 3 35550 _003464_hash NULL
87220 +_003465_hash ieee80211_if_write_tkip_mic_test 3 58748 _003465_hash NULL
87221 +_003466_hash ieee80211_if_write_tsf 3 36077 _003466_hash NULL
87222 +_003467_hash ieee80211_if_write_uapsd_max_sp_len 3 14233 _003467_hash NULL
87223 +_003468_hash ieee80211_if_write_uapsd_queues 3 51526 _003468_hash NULL
87224 +_003469_hash if_spi_host_to_card 4 62890 _003469_hash NULL
87225 +_003470_hash intel_fake_agp_alloc_by_type 1 1 _003470_hash NULL
87226 +_003471_hash int_hardware_entry 3 36833 _003471_hash NULL
87227 +_003472_hash int_hw_irq_en 3 46776 _003472_hash NULL
87228 +_003473_hash int_tasklet_entry 3 52500 _003473_hash NULL
87229 +_003474_hash ip_append_data 5-6 16942 _003474_hash NULL
87230 +_003476_hash ip_make_skb 5-6 13129 _003476_hash NULL
87231 +_003478_hash ip_options_get_alloc 1 7448 _003478_hash NULL
87232 +_003479_hash ip_setsockopt 5 33487 _003479_hash NULL
87233 +_003480_hash ipv6_setsockopt 5 29871 _003480_hash NULL
87234 +_003481_hash iscsi_if_send_reply 7 52219 _003481_hash NULL
87235 +_003482_hash iscsi_offload_mesg 5 58425 _003482_hash NULL
87236 +_003483_hash iscsi_ping_comp_event 5 38263 _003483_hash NULL
87237 +_003484_hash iscsi_post_host_event 4 13473 _003484_hash NULL
87238 +_003485_hash iscsi_recv_pdu 4 16755 _003485_hash NULL
87239 +_003487_hash ivtv_v4l2_read 3 1964 _003487_hash NULL
87240 +_003488_hash joydev_ioctl_common 2 49359 _003488_hash NULL
87241 +_003489_hash lbs_bcnmiss_read 3 8678 _003489_hash NULL
87242 +_003490_hash lbs_failcount_read 3 31063 _003490_hash NULL
87243 +_003491_hash lbs_highrssi_read 3 64089 _003491_hash NULL
87244 +_003492_hash lbs_highsnr_read 3 5931 _003492_hash NULL
87245 +_003493_hash lbs_lowrssi_read 3 32242 _003493_hash NULL
87246 +_003494_hash lbs_lowsnr_read 3 29571 _003494_hash NULL
87247 +_003495_hash load_module 2 60056 _003495_hash &_002482_hash
87248 +_003496_hash logger_read 3 59607 _003496_hash NULL
87249 +_003497_hash mem_swapout_entry 3 32586 _003497_hash NULL
87250 +_003498_hash mlx4_init_icm_table 4-5 2151 _003498_hash NULL
87251 +_003501_hash mon_bin_ioctl 3 2771 _003501_hash NULL
87252 +_003502_hash mpi_set_buffer 3 65294 _003502_hash NULL
87253 +_003503_hash __mptctl_ioctl 2 15875 _003503_hash NULL
87254 +_003504_hash mthca_alloc_icm_table 3-4 38268 _003504_hash &_002129_hash
87255 +_003506_hash mthca_alloc_init 2 21754 _003506_hash NULL
87256 +_003507_hash mthca_array_init 2 39987 _003507_hash NULL
87257 +_003508_hash mthca_buf_alloc 2 35861 _003508_hash NULL
87258 +_003509_hash mthca_setup_cmd_doorbells 2 53954 _003509_hash NULL
87259 +_003510_hash __netlink_change_ngroups 2 46156 _003510_hash NULL
87260 +_003511_hash netlink_kernel_create 3 18110 _003511_hash NULL
87261 +_003512_hash netpoll_send_udp 3 58955 _003512_hash NULL
87262 +_003513_hash netxen_nic_map_indirect_address_128M 2 42257 _003513_hash NULL
87263 +_003514_hash nfs4_alloc_pages 1 48426 _003514_hash NULL
87264 +_003515_hash nfsd_read 5 19568 _003515_hash NULL
87265 +_003516_hash nfsd_read_file 6 62241 _003516_hash NULL
87266 +_003517_hash nfsd_write 6 54809 _003517_hash NULL
87267 +_003519_hash nvme_map_user_pages 3-4 41093 _003519_hash &_001486_hash
87268 +_003523_hash osd_req_read_sg_kern 5 6378 _003523_hash NULL
87269 +_003524_hash osd_req_write_sg_kern 5 10514 _003524_hash NULL
87270 +_003525_hash osst_read 3 40237 _003525_hash NULL
87271 +_003526_hash p54_parse_rssical 3 64493 _003526_hash NULL
87272 +_003527_hash p9_client_zc_rpc 7 14345 _003527_hash NULL
87273 +_003528_hash pcpu_alloc_alloc_info 1-2 45813 _003528_hash NULL
87274 +_003530_hash prism2_info_hostscanresults 3 39657 _003530_hash NULL
87275 +_003531_hash prism2_info_scanresults 3 59729 _003531_hash NULL
87276 +_003532_hash proc_file_read 3 53905 _003532_hash NULL
87277 +_003533_hash pskb_expand_head 2-3 42881 _003533_hash NULL
87278 +_003535_hash qla4xxx_post_aen_work 3 46953 _003535_hash NULL
87279 +_003536_hash qla4xxx_post_ping_evt_work 4 8074 _003536_hash &_001627_hash
87280 +_003537_hash rawv6_setsockopt 5 56165 _003537_hash NULL
87281 +_003538_hash rds_message_map_pages 2 31487 _003538_hash NULL
87282 +_003539_hash read_flush_pipefs 3 20171 _003539_hash NULL
87283 +_003540_hash read_flush_procfs 3 27642 _003540_hash NULL
87284 +_003541_hash receive_packet 2 12367 _003541_hash NULL
87285 +_003542_hash reiserfs_add_entry 4 23062 _003542_hash &_002307_hash
87286 +_003543_hash rsc_mgr_init 3 16299 _003543_hash NULL
87287 +_003544_hash rtsx_read_cfg_seq 3-5 48139 _003544_hash NULL
87288 +_003546_hash rtsx_write_cfg_seq 3-5 27485 _003546_hash NULL
87289 +_003548_hash rxrpc_client_sendmsg 5 23236 _003548_hash NULL
87290 +_003549_hash rxrpc_kernel_send_data 3 60083 _003549_hash NULL
87291 +_003550_hash rxrpc_server_sendmsg 4 37331 _003550_hash NULL
87292 +_003551_hash scsi_dispatch_cmd_entry 3 49848 _003551_hash NULL
87293 +_003552_hash scsi_nl_send_vendor_msg 5 16394 _003552_hash NULL
87294 +_003553_hash sctp_datamsg_from_user 4 55342 _003553_hash NULL
87295 +_003554_hash sctp_make_chunk 4 12986 _003554_hash NULL
87296 +_003555_hash sctp_tsnmap_grow 2 32784 _003555_hash NULL
87297 +_003556_hash sep_prepare_input_dma_table 2-3 2009 _003556_hash NULL
87298 +_003558_hash sep_prepare_input_output_dma_table 4-3-2 63429 _003558_hash NULL
87299 +_003559_hash set_fd_set 1 35249 _003559_hash NULL
87300 +_003563_hash sisusbcon_do_font_op 9 52271 _003563_hash NULL
87301 +_003564_hash sisusb_write_mem_bulk 4 29678 _003564_hash NULL
87302 +_003565_hash smk_write_access2 3 19170 _003565_hash NULL
87303 +_003566_hash smk_write_cipso2 3 1021 _003566_hash NULL
87304 +_003567_hash smk_write_load2 3 52155 _003567_hash NULL
87305 +_003568_hash smk_write_load 3 26829 _003568_hash NULL
87306 +_003569_hash smk_write_load_self2 3 591 _003569_hash NULL
87307 +_003570_hash smk_write_load_self 3 7958 _003570_hash NULL
87308 +_003571_hash snd_pcm_plugin_alloc 2 12580 _003571_hash NULL
87309 +_003572_hash snd_rawmidi_kernel_read 3 4328 _003572_hash NULL
87310 +_003573_hash snd_rawmidi_read 3 56337 _003573_hash NULL
87311 +_003576_hash spidev_ioctl 2 12846 _003576_hash NULL
87312 +_003577_hash squashfs_read_fragment_index_table 4 2506 _003577_hash NULL
87313 +_003578_hash squashfs_read_id_index_table 4 61961 _003578_hash NULL
87314 +_003579_hash squashfs_read_inode_lookup_table 4 64739 _003579_hash NULL
87315 +_003582_hash swap_cgroup_swapon 2 13614 _003582_hash NULL
87316 +_003583_hash timeradd_entry 3 49850 _003583_hash NULL
87317 +_003584_hash tipc_buf_acquire 1 60437 _003584_hash NULL
87318 +_003585_hash tty_buffer_find 2 2443 _003585_hash NULL
87319 +_003586_hash ubifs_wbuf_write_nolock 3 64946 _003586_hash NULL
87320 +_003588_hash usblp_ioctl 2 30203 _003588_hash NULL
87321 +_003589_hash vgacon_adjust_height 2 28124 _003589_hash NULL
87322 +_003590_hash vhci_read 3 47878 _003590_hash NULL
87323 +_003591_hash vhci_write 3 2224 _003591_hash NULL
87324 +_003592_hash __videobuf_copy_stream 4 44769 _003592_hash NULL
87325 +_003593_hash videobuf_read_one 3 31637 _003593_hash NULL
87326 +_003594_hash video_usercopy 2 62151 _003594_hash NULL
87327 +_003595_hash vme_user_read 3 55338 _003595_hash NULL
87328 +_003596_hash __vxge_hw_blockpool_malloc 2 5786 _003596_hash NULL
87329 +_003597_hash write_adapter_mem 3 3234 _003597_hash NULL
87330 +_003598_hash write_flush_pipefs 3 2021 _003598_hash NULL
87331 +_003599_hash write_flush_procfs 3 44011 _003599_hash NULL
87332 +_003600_hash xfs_buf_associate_memory 3 17915 _003600_hash NULL
87333 +_003601_hash xfs_dir2_leaf_getdents 3 23841 _003601_hash NULL
87334 +_003602_hash xfs_idata_realloc 2 26199 _003602_hash NULL
87335 +_003603_hash xfs_iformat_local 4 49472 _003603_hash NULL
87336 +_003604_hash xip_file_read 3 58592 _003604_hash NULL
87337 +_003605_hash afs_send_simple_reply 3 63940 _003605_hash NULL
87338 +_003606_hash audit_expand 2 2098 _003606_hash NULL
87339 +_003607_hash bnx2i_send_nl_mesg 4 53353 _003607_hash NULL
87340 +_003608_hash __btrfs_buffered_write 3 35311 _003608_hash NULL
87341 +_003609_hash bttv_read 3 11432 _003609_hash NULL
87342 +_003610_hash ceph_parse_server_name 2 60318 _003610_hash NULL
87343 +_003611_hash ddp_clear_map 4 46152 _003611_hash NULL
87344 +_003612_hash ddp_set_map 4 751 _003612_hash NULL
87345 +_003613_hash do_arpt_get_ctl 4 49526 _003613_hash NULL
87346 +_003614_hash do_ip6t_get_ctl 4 47808 _003614_hash NULL
87347 +_003615_hash do_ipt_get_ctl 4 33897 _003615_hash NULL
87348 +_003616_hash do_trimming 3 26952 _003616_hash &_002655_hash
87349 +_003617_hash dvb_ca_en50221_io_ioctl 2 26490 _003617_hash NULL
87350 +_003618_hash dvb_demux_ioctl 2 42733 _003618_hash NULL
87351 +_003619_hash dvb_dvr_ioctl 2 49182 _003619_hash NULL
87352 +_003620_hash dvb_generic_ioctl 2 21810 _003620_hash NULL
87353 +_003621_hash dvb_net_ioctl 2 61559 _003621_hash NULL
87354 +_003622_hash enlarge_skb 2 44248 _003622_hash NULL
87355 +_003623_hash evdev_ioctl_handler 2 21705 _003623_hash NULL
87356 +_003624_hash fuse_perform_write 4 18457 _003624_hash NULL
87357 +_003625_hash fw_device_op_ioctl 2 11595 _003625_hash NULL
87358 +_003626_hash generic_file_buffered_write 4 25464 _003626_hash NULL
87359 +_003627_hash gen_pool_add 3 21776 _003627_hash NULL
87360 +_003628_hash ieee80211_skb_resize 3 50211 _003628_hash NULL
87361 +_003629_hash ip_options_get 4 56538 _003629_hash NULL
87362 +_003630_hash ip_send_reply 5 19987 _003630_hash NULL
87363 +_003631_hash __iscsi_complete_pdu 4 10726 _003631_hash NULL
87364 +_003632_hash iscsi_nop_out_rsp 4 51117 _003632_hash NULL
87365 +_003633_hash joydev_ioctl 2 33343 _003633_hash NULL
87366 +_003634_hash mlx4_init_cmpt_table 3 11569 _003634_hash NULL
87367 +_003635_hash mptctl_ioctl 2 12355 _003635_hash NULL
87368 +_003636_hash mthca_alloc_cq_buf 3 46512 _003636_hash NULL
87369 +_003637_hash named_prepare_buf 2 24532 _003637_hash NULL
87370 +_003638_hash netlink_change_ngroups 2 16457 _003638_hash NULL
87371 +_003639_hash netxen_nic_hw_read_wx_128M 2 26858 _003639_hash NULL
87372 +_003640_hash netxen_nic_hw_write_wx_128M 2 33488 _003640_hash NULL
87373 +_003642_hash osd_req_add_get_attr_list 3 49278 _003642_hash NULL
87374 +_003643_hash pcpu_build_alloc_info 1-3-2 41443 _003643_hash NULL
87375 +_003646_hash ping_sendmsg 4 3782 _003646_hash NULL
87376 +_003647_hash __pskb_pull_tail 2 60287 _003647_hash NULL
87377 +_003648_hash raid5_resize 2 63306 _003648_hash NULL
87378 +_003649_hash rxrpc_sendmsg 4 29049 _003649_hash NULL
87379 +_003650_hash sctp_make_abort 3 34459 _003650_hash NULL
87380 +_003651_hash sctp_make_asconf 3 4078 _003651_hash NULL
87381 +_003652_hash sctp_make_asconf_ack 3 31726 _003652_hash NULL
87382 +_003653_hash sctp_make_datafrag_empty 3 34737 _003653_hash NULL
87383 +_003654_hash sctp_make_fwdtsn 3 53265 _003654_hash NULL
87384 +_003655_hash sctp_make_heartbeat_ack 4 34411 _003655_hash NULL
87385 +_003656_hash sctp_make_init 4 58401 _003656_hash NULL
87386 +_003657_hash sctp_make_init_ack 4 3335 _003657_hash NULL
87387 +_003658_hash sctp_make_op_error_space 3 5528 _003658_hash NULL
87388 +_003659_hash sctp_tsnmap_mark 2 35929 _003659_hash NULL
87389 +_003663_hash sisusb_clear_vram 2-3 57466 _003663_hash NULL
87390 +_003665_hash sisusb_copy_memory 4 35016 _003665_hash NULL
87391 +_003666_hash sisusb_write 3 44834 _003666_hash NULL
87392 +_003667_hash __skb_cow 2 39254 _003667_hash NULL
87393 +_003668_hash skb_pad 2 17302 _003668_hash NULL
87394 +_003669_hash skb_realloc_headroom 2 19516 _003669_hash NULL
87395 +_003670_hash snd_pcm_plug_alloc 2 42339 _003670_hash NULL
87396 +_003673_hash subdev_ioctl 2 28417 _003673_hash NULL
87397 +_003674_hash sys_get_mempolicy 3 30379 _003674_hash NULL
87398 +_003675_hash sys_init_module 2 36047 _003675_hash NULL
87399 +_003676_hash sys_mbind 5 7990 _003676_hash NULL
87400 +_003677_hash sys_migrate_pages 2 39825 _003677_hash NULL
87401 +_003678_hash sys_setgroups 1 48668 _003678_hash &_001127_hash
87402 +_003679_hash sys_setgroups16 1 48882 _003679_hash NULL
87403 +_003680_hash sys_set_mempolicy 3 32608 _003680_hash NULL
87404 +_003681_hash tipc_msg_build 4 12326 _003681_hash NULL
87405 +_003682_hash __tty_buffer_request_room 2 27700 _003682_hash NULL
87406 +_003683_hash ubifs_recover_leb 3 60639 _003683_hash NULL
87407 +_003684_hash udp_sendmsg 4 4492 _003684_hash NULL
87408 +_003685_hash udp_setsockopt 5 25985 _003685_hash NULL
87409 +_003686_hash udpv6_setsockopt 5 18487 _003686_hash NULL
87410 +_003687_hash uvc_v4l2_ioctl 2 8411 _003687_hash NULL
87411 +_003688_hash videobuf_read_stream 3 14956 _003688_hash NULL
87412 +_003689_hash video_ioctl2 2 21380 _003689_hash NULL
87413 +_003690_hash video_read 3 28148 _003690_hash NULL
87414 +_003691_hash write_head 4 30481 _003691_hash NULL
87415 +_003692_hash write_node 4 33121 _003692_hash NULL
87416 +_003693_hash write_pbl 4 59583 _003693_hash NULL
87417 +_003694_hash xfs_dir2_block_to_sf 3 37868 _003694_hash NULL
87418 +_003695_hash xfs_dir2_sf_addname_hard 3 54254 _003695_hash NULL
87419 +_003696_hash xfs_readdir 3 41200 _003696_hash NULL
87420 +_003697_hash xlog_bread_offset 3 60030 _003697_hash NULL
87421 +_003698_hash zr364xx_read 3 2354 _003698_hash NULL
87422 +_003699_hash au0828_v4l2_read 3 40220 _003699_hash NULL
87423 +_003700_hash audit_log_n_hex 3 45617 _003700_hash NULL
87424 +_003701_hash audit_log_n_string 3 31705 _003701_hash NULL
87425 +_003702_hash beiscsi_process_async_pdu 7 39834 _003702_hash NULL
87426 +_003703_hash __btrfs_direct_write 4 22273 _003703_hash NULL
87427 +_003704_hash btrfs_file_aio_write 4 21520 _003704_hash NULL
87428 +_003705_hash cx231xx_v4l2_read 3 55014 _003705_hash NULL
87429 +_003706_hash cx25821_video_ioctl 2 30188 _003706_hash NULL
87430 +_003707_hash em28xx_v4l2_read 3 16701 _003707_hash NULL
87431 +_003708_hash evdev_ioctl 2 22371 _003708_hash NULL
87432 +_003709_hash fuse_file_aio_write 4 46399 _003709_hash NULL
87433 +_003710_hash iscsi_complete_pdu 4 48372 _003710_hash NULL
87434 +_003711_hash isdn_ppp_skb_push 2 5236 _003711_hash NULL
87435 +_003712_hash ivtv_v4l2_ioctl 2 16915 _003712_hash NULL
87436 +_003713_hash mpeg_read 3 6708 _003713_hash NULL
87437 +_003714_hash mthca_alloc_resize_buf 3 60394 _003714_hash NULL
87438 +_003715_hash mthca_init_cq 2 60011 _003715_hash NULL
87439 +_003716_hash named_distribute 4 48544 _003716_hash NULL
87440 +_003717_hash __nf_nat_mangle_tcp_packet 5-7 8190 _003717_hash NULL
87441 +_003719_hash nf_nat_mangle_udp_packet 5-7 13321 _003719_hash NULL
87442 +_003722_hash pcpu_embed_first_chunk 1-3-2 24224 _003722_hash NULL
87443 +_003724_hash pcpu_page_first_chunk 1 20712 _003724_hash NULL
87444 +_003725_hash pd_video_read 3 24510 _003725_hash NULL
87445 +_003726_hash pskb_may_pull 2 22546 _003726_hash NULL
87446 +_003727_hash __pskb_pull 2 42602 _003727_hash NULL
87447 +_003728_hash pvr2_v4l2_ioctl 2 24398 _003728_hash &_000854_hash
87448 +_003729_hash sctp_abort_pkt_new 5 55218 _003729_hash NULL
87449 +_003730_hash sctp_make_abort_violation 4 27959 _003730_hash NULL
87450 +_003731_hash sctp_make_op_error 5-6 7057 _003731_hash NULL
87451 +_003734_hash sisusbcon_bmove 5-7-6 21873 _003734_hash NULL
87452 +_003737_hash sisusbcon_clear 3-5-4 64329 _003737_hash NULL
87453 +_003740_hash sisusbcon_putcs 3 57630 _003740_hash &_001003_hash
87454 +_003741_hash sisusbcon_scroll 5-3-2 31315 _003741_hash NULL
87455 +_003742_hash sisusbcon_scroll_area 3-4 25899 _003742_hash NULL
87456 +_003744_hash skb_cow 2 26138 _003744_hash NULL
87457 +_003745_hash skb_cow_head 2 52495 _003745_hash NULL
87458 +_003746_hash skb_make_writable 2 24783 _003746_hash NULL
87459 +_003747_hash skb_padto 2 50759 _003747_hash NULL
87460 +_003748_hash solo_enc_read 3 33553 _003748_hash NULL
87461 +_003749_hash solo_v4l2_read 3 59247 _003749_hash NULL
87462 +_003750_hash timblogiw_read 3 48305 _003750_hash NULL
87463 +_003751_hash tipc_multicast 5 49144 _003751_hash NULL
87464 +_003752_hash tipc_port_recv_sections 4 42890 _003752_hash NULL
87465 +_003753_hash tipc_port_reject_sections 5 55229 _003753_hash NULL
87466 +_003754_hash tm6000_read 3 4151 _003754_hash NULL
87467 +_003755_hash trim_bitmaps 3 24158 _003755_hash NULL
87468 +_003756_hash trim_no_bitmap 3 22524 _003756_hash NULL
87469 +_003757_hash tty_buffer_request_room 2 23228 _003757_hash NULL
87470 +_003758_hash tty_insert_flip_string_fixed_flag 4 37428 _003758_hash NULL
87471 +_003759_hash tty_insert_flip_string_flags 4 30969 _003759_hash NULL
87472 +_003760_hash tty_prepare_flip_string 3 39955 _003760_hash NULL
87473 +_003761_hash tty_prepare_flip_string_flags 4 59240 _003761_hash NULL
87474 +_003762_hash ubifs_recover_log_leb 3 12079 _003762_hash NULL
87475 +_003763_hash vbi_read 3 63673 _003763_hash NULL
87476 +_003764_hash xfs_file_buffered_aio_write 4 11492 _003764_hash NULL
87477 +_003765_hash xlog_do_recovery_pass 3 21618 _003765_hash NULL
87478 +_003766_hash zoran_ioctl 2 30465 _003766_hash NULL
87479 +_003767_hash audit_log_n_untrustedstring 3 9548 _003767_hash NULL
87480 +_003768_hash bla_is_backbone_gw 3 53001 _003768_hash NULL
87481 +_003769_hash btrfs_trim_block_group 3 28963 _003769_hash NULL
87482 +_003770_hash check_header 2 56930 _003770_hash NULL
87483 +_003771_hash check_management_packet 3 976 _003771_hash NULL
87484 +_003772_hash check_unicast_packet 2 62217 _003772_hash NULL
87485 +_003773_hash cx18_v4l2_ioctl 2 46647 _003773_hash NULL
87486 +_003774_hash dccp_manip_pkt 2 30229 _003774_hash NULL
87487 +_003775_hash dma_push_rx 2 39973 _003775_hash NULL
87488 +_003776_hash ftdi_process_packet 5 45005 _003776_hash NULL
87489 +_003777_hash gre_manip_pkt 2 38785 _003777_hash NULL
87490 +_003778_hash handle_response 5 55951 _003778_hash NULL
87491 +_003779_hash handle_response_icmp 7 39574 _003779_hash NULL
87492 +_003780_hash help 4 14971 _003780_hash NULL
87493 +_003781_hash icmp_manip_pkt 2 48801 _003781_hash NULL
87494 +_003782_hash interface_rx 4 20404 _003782_hash NULL
87495 +_003783_hash ip4ip6_err 5 36772 _003783_hash NULL
87496 +_003784_hash ip6ip6_err 5 18308 _003784_hash NULL
87497 +_003785_hash ip_vs_icmp_xmit 4 59624 _003785_hash NULL
87498 +_003786_hash ip_vs_icmp_xmit_v6 4 20464 _003786_hash NULL
87499 +_003787_hash iscsi_iser_recv 4 41948 _003787_hash NULL
87500 +_003788_hash l2tp_xmit_skb 3 42672 _003788_hash NULL
87501 +_003789_hash mangle_packet 6-8 27864 _003789_hash NULL
87502 +_003791_hash manip_pkt 3 7741 _003791_hash NULL
87503 +_003792_hash mthca_resize_cq 2 19333 _003792_hash NULL
87504 +_003793_hash my_skb_head_push 2 58297 _003793_hash NULL
87505 +_003794_hash nf_nat_mangle_tcp_packet 5-7 8643 _003794_hash NULL
87506 +_003797_hash pskb_network_may_pull 2 35336 _003797_hash NULL
87507 +_003798_hash pskb_pull 2 65005 _003798_hash NULL
87508 +_003799_hash replay_log_leb 3 18704 _003799_hash NULL
87509 +_003800_hash sctp_manip_pkt 2 40620 _003800_hash NULL
87510 +_003801_hash sctp_sf_abort_violation 6 38380 _003801_hash NULL
87511 +_003806_hash skb_gro_header_slow 2 34958 _003806_hash NULL
87512 +_003807_hash tcf_csum_skb_nextlayer 3 64025 _003807_hash NULL
87513 +_003808_hash tcp_manip_pkt 2 14202 _003808_hash NULL
87514 +_003809_hash tty_audit_log 8 47280 _003809_hash NULL
87515 +_003810_hash tty_insert_flip_string 3 34042 _003810_hash NULL
87516 +_003811_hash udplite_manip_pkt 2 62433 _003811_hash NULL
87517 +_003812_hash udp_manip_pkt 2 50770 _003812_hash NULL
87518 +_003813_hash xfs_file_aio_write 4 33234 _003813_hash NULL
87519 +_003814_hash xlog_do_log_recovery 3 17550 _003814_hash NULL
87520 +_003815_hash afs_extract_data 5 50261 _003815_hash NULL
87521 +_003816_hash aircable_process_packet 5 46639 _003816_hash NULL
87522 +_003817_hash edge_tty_recv 4 18667 _003817_hash &_002287_hash
87523 +_003818_hash gigaset_if_receive 3 4861 _003818_hash NULL
87524 +_003819_hash gsm_dlci_data 3 14155 _003819_hash NULL
87525 +_003820_hash ifx_spi_insert_flip_string 3 51752 _003820_hash NULL
87526 +_003821_hash ip_nat_sdp_port 6 52938 _003821_hash NULL
87527 +_003822_hash ip_nat_sip_expect 7 45693 _003822_hash NULL
87528 +_003823_hash ipwireless_tty_received 3 49154 _003823_hash NULL
87529 +_003824_hash iser_rcv_completion 2 8048 _003824_hash NULL
87530 +_003825_hash mangle_sdp_packet 9 36279 _003825_hash NULL
87531 +_003826_hash map_addr 6 4666 _003826_hash NULL
87532 +_003827_hash nf_nat_ftp 5 47948 _003827_hash NULL
87533 +_003828_hash pty_write 3 44757 _003828_hash &_001547_hash
87534 +_003829_hash push_rx 3 28939 _003829_hash NULL
87535 +_003830_hash put_data_to_circ_buf 3 24869 _003830_hash &_001419_hash
87536 +_003831_hash rds_tcp_data_recv 3 53476 _003831_hash NULL
87537 +_003832_hash send_to_tty 3 45141 _003832_hash NULL
87538 +_003834_hash tcf_csum_ipv4_icmp 3 9258 _003834_hash NULL
87539 +_003835_hash tcf_csum_ipv4_igmp 3 60446 _003835_hash NULL
87540 +_003836_hash tcf_csum_ipv4_tcp 4 39713 _003836_hash NULL
87541 +_003837_hash tcf_csum_ipv4_udp 4 30777 _003837_hash &_000218_hash
87542 +_003838_hash tcf_csum_ipv6_icmp 4 11738 _003838_hash NULL
87543 +_003839_hash tcf_csum_ipv6_tcp 4 54877 _003839_hash NULL
87544 +_003840_hash tcf_csum_ipv6_udp 4 25241 _003840_hash NULL
87545 +_003841_hash ti_recv 4 22027 _003841_hash NULL
87546 +_003842_hash xlog_do_recover 3 59789 _003842_hash NULL
87547 +_003843_hash ip_nat_sdp_media 8 23386 _003843_hash NULL
87548 +_003844_hash lock_loop 1 61681 _003844_hash NULL
87549 +_003845_hash max3107_handlerx 2 58978 _003845_hash NULL
87550 +_003846_hash process_rcvd_data 3 6679 _003846_hash NULL
87551 +_003847_hash alloc_mr 1 45935 _003847_hash NULL
87552 +_003848_hash compat_core_sys_select 1 65285 _003848_hash NULL
87553 +_003849_hash compat_dccp_setsockopt 5 51263 _003849_hash NULL
87554 +_003850_hash compat_ip_setsockopt 5 13870 _003850_hash &_003115_hash
87555 +_003851_hash compat_ipv6_setsockopt 5 20468 _003851_hash NULL
87556 +_003852_hash compat_mpctl_ioctl 2 45671 _003852_hash NULL
87557 +_003853_hash compat_raw_setsockopt 5 30634 _003853_hash NULL
87558 +_003854_hash compat_rawv6_setsockopt 5 4967 _003854_hash NULL
87559 +_003855_hash compat_sys_get_mempolicy 3 31109 _003855_hash NULL
87560 +_003856_hash compat_sys_mbind 5 36256 _003856_hash NULL
87561 +_003857_hash compat_sys_migrate_pages 2 3157 _003857_hash NULL
87562 +_003858_hash compat_sys_set_mempolicy 3 57742 _003858_hash &_002222_hash
87563 +_003859_hash evdev_ioctl_compat 2 13851 _003859_hash NULL
87564 +_003860_hash fw_device_op_compat_ioctl 2 42804 _003860_hash NULL
87565 +_003861_hash gnttab_expand 1 15817 _003861_hash NULL
87566 +_003862_hash hiddev_compat_ioctl 2 41255 _003862_hash NULL
87567 +_003863_hash joydev_compat_ioctl 2 8765 _003863_hash NULL
87568 +_003864_hash mon_bin_compat_ioctl 3 50234 _003864_hash NULL
87569 +_003865_hash spidev_compat_ioctl 2 63778 _003865_hash NULL
87570 +_003866_hash uvc_v4l2_compat_ioctl32 2 8375 _003866_hash NULL
87571 +_003867_hash xlbd_reserve_minors 1-2 18365 _003867_hash NULL
87572 +_003868_hash compat_sys_select 1 16131 _003868_hash NULL
87573 +_003869_hash compat_udp_setsockopt 5 38840 _003869_hash NULL
87574 +_003870_hash compat_udpv6_setsockopt 5 42981 _003870_hash NULL
87575 +_003871_hash do_compat_pselect 1 10398 _003871_hash NULL
87576 +_003872_hash get_free_entries 1 46030 _003872_hash NULL
87577 +_003873_hash ipath_reg_phys_mr 3 23918 _003873_hash &_000963_hash
87578 +_003874_hash qib_alloc_fast_reg_mr 2 12526 _003874_hash NULL
87579 +_003875_hash qib_reg_phys_mr 3 60202 _003875_hash &_000872_hash
87580 +_003876_hash compat_sys_pselect6 1 14105 _003876_hash NULL
87581 +_003877_hash gnttab_alloc_grant_references 1 18240 _003877_hash NULL
87582 +_003878_hash alc_auto_create_extra_outs 2 18975 _003878_hash NULL
87583 +_003879_hash _alloc_cdb_cont 2 23609 _003879_hash NULL
87584 +_003880_hash _alloc_set_attr_list 4 48991 _003880_hash NULL
87585 +_003881_hash __btrfs_free_reserved_extent 2 31207 _003881_hash NULL
87586 +_003882_hash btrfsic_create_link_to_next_block 4 58246 _003882_hash NULL
87587 +_003883_hash diva_alloc_dma_map 2 23798 _003883_hash NULL
87588 +_003884_hash diva_xdi_write 4 63975 _003884_hash NULL
87589 +_003885_hash gsm_control_reply 4 53333 _003885_hash NULL
87590 +_003886_hash iwm_ntf_rx_packet 3 60452 _003886_hash NULL
87591 +_003887_hash macvtap_recvmsg 4 63949 _003887_hash NULL
87592 +_003890_hash smp_send_cmd 3 512 _003890_hash NULL
87593 +_003891_hash tun_recvmsg 4 48463 _003891_hash NULL
87594 +_003892_hash um_idi_read 3 850 _003892_hash NULL
87595 +_003893_hash _add_sg_continuation_descriptor 3 54721 _003893_hash NULL
87596 +_003894_hash btrfs_free_and_pin_reserved_extent 2 53016 _003894_hash NULL
87597 +_003895_hash btrfs_free_reserved_extent 2 9867 _003895_hash NULL
87598 +_003896_hash diva_init_dma_map 3 58336 _003896_hash NULL
87599 +_003897_hash divas_write 3 63901 _003897_hash NULL
87600 +_003898_hash gsm_control_message 4 18209 _003898_hash NULL
87601 +_003899_hash gsm_control_modem 3 55303 _003899_hash NULL
87602 +_003900_hash gsm_control_rls 3 3353 _003900_hash NULL
87603 +_003901_hash osd_req_read_sg 5 47905 _003901_hash NULL
87604 +_003902_hash osd_req_write_sg 5 50908 _003902_hash NULL
87605 +_003903_hash agp_remap 2 30665 _003903_hash NULL
87606 +_003904_hash alloc_arraycache 2 47505 _003904_hash NULL
87607 +_003905_hash drm_buffer_alloc 2 44405 _003905_hash NULL
87608 +_003906_hash drm_ioctl 2 42813 _003906_hash NULL
87609 +_003907_hash fbcon_do_set_font 2-3 4079 _003907_hash NULL
87610 +_003909_hash slabinfo_write 3 18600 _003909_hash NULL
87611 +_003910_hash do_tune_cpucache 2 14828 _003910_hash NULL
87612 +_003911_hash drm_compat_ioctl 2 51717 _003911_hash NULL
87613 +_003912_hash i915_compat_ioctl 2 3656 _003912_hash NULL
87614 +_003913_hash ath6kl_usb_bmi_read 3 48745 _003913_hash NULL
87615 +_003914_hash ath6kl_usb_bmi_write 3 2454 _003914_hash &_000981_hash
87616 +_003915_hash ath6kl_usb_ctrl_msg_exchange 4 33327 _003915_hash NULL
87617 +_003916_hash mga_compat_ioctl 2 52170 _003916_hash NULL
87618 +_003918_hash netlink_send 5 38434 _003918_hash NULL
87619 +_003919_hash nouveau_compat_ioctl 2 28305 _003919_hash NULL
87620 +_003920_hash persistent_ram_vmap 1-2 709 _003920_hash NULL
87621 +_003922_hash psb_unlocked_ioctl 2 16926 _003922_hash &_002255_hash
87622 +_003923_hash r128_compat_ioctl 2 39250 _003923_hash NULL
87623 +_003924_hash radeon_compat_ioctl 2 59150 _003924_hash NULL
87624 +_003925_hash radeon_kms_compat_ioctl 2 51371 _003925_hash NULL
87625 +_003926_hash viacam_read 3 54526 _003926_hash NULL
87626 +_003927_hash vmw_framebuffer_dmabuf_dirty 6 37661 _003927_hash &_001074_hash
87627 +_003928_hash vmw_framebuffer_surface_dirty 6 48132 _003928_hash NULL
87628 +_003929_hash vmw_unlocked_ioctl 2 19212 _003929_hash NULL
87629 +_003930_hash wl1251_cmd_template_set 4 6172 _003930_hash NULL
87630 +_003931_hash cma_create_area 2 38642 _003931_hash NULL
87631 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
87632 new file mode 100644
87633 index 0000000..244559e
87634 --- /dev/null
87635 +++ b/tools/gcc/size_overflow_plugin.c
87636 @@ -0,0 +1,1879 @@
87637 +/*
87638 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
87639 + * Licensed under the GPL v2, or (at your option) v3
87640 + *
87641 + * Homepage:
87642 + * http://www.grsecurity.net/~ephox/overflow_plugin/
87643 + *
87644 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
87645 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
87646 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
87647 + *
87648 + * Usage:
87649 + * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c
87650 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
87651 + */
87652 +
87653 +#include "gcc-plugin.h"
87654 +#include "config.h"
87655 +#include "system.h"
87656 +#include "coretypes.h"
87657 +#include "tree.h"
87658 +#include "tree-pass.h"
87659 +#include "intl.h"
87660 +#include "plugin-version.h"
87661 +#include "tm.h"
87662 +#include "toplev.h"
87663 +#include "function.h"
87664 +#include "tree-flow.h"
87665 +#include "plugin.h"
87666 +#include "gimple.h"
87667 +#include "c-common.h"
87668 +#include "diagnostic.h"
87669 +#include "cfgloop.h"
87670 +
87671 +#if BUILDING_GCC_VERSION >= 4007
87672 +#include "c-tree.h"
87673 +#else
87674 +#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
87675 +#endif
87676 +
87677 +struct size_overflow_hash {
87678 + const struct size_overflow_hash * const next;
87679 + const char * const name;
87680 + const unsigned int param;
87681 +};
87682 +
87683 +#include "size_overflow_hash.h"
87684 +
87685 +enum marked {
87686 + MARKED_NO, MARKED_YES, MARKED_NOT_INTENTIONAL
87687 +};
87688 +
87689 +#define __unused __attribute__((__unused__))
87690 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
87691 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
87692 +#define BEFORE_STMT true
87693 +#define AFTER_STMT false
87694 +#define CREATE_NEW_VAR NULL_TREE
87695 +#define CODES_LIMIT 32
87696 +#define MAX_PARAM 32
87697 +#define MY_STMT GF_PLF_1
87698 +#define NO_CAST_CHECK GF_PLF_2
87699 +
87700 +#if BUILDING_GCC_VERSION == 4005
87701 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
87702 +#endif
87703 +
87704 +int plugin_is_GPL_compatible;
87705 +void debug_gimple_stmt(gimple gs);
87706 +
87707 +static tree expand(struct pointer_set_t *visited, tree lhs);
87708 +static bool pre_expand(struct pointer_set_t *visited, const_tree lhs);
87709 +static tree report_size_overflow_decl;
87710 +static const_tree const_char_ptr_type_node;
87711 +static unsigned int handle_function(void);
87712 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
87713 +static tree get_size_overflow_type(gimple stmt, const_tree node);
87714 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3);
87715 +
87716 +static struct plugin_info size_overflow_plugin_info = {
87717 + .version = "20120930beta",
87718 + .help = "no-size-overflow\tturn off size overflow checking\n",
87719 +};
87720 +
87721 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
87722 +{
87723 + unsigned int arg_count;
87724 + enum tree_code code = TREE_CODE(*node);
87725 +
87726 + switch (code) {
87727 + case FUNCTION_DECL:
87728 + arg_count = type_num_arguments(TREE_TYPE(*node));
87729 + break;
87730 + case FUNCTION_TYPE:
87731 + case METHOD_TYPE:
87732 + arg_count = type_num_arguments(*node);
87733 + break;
87734 + default:
87735 + *no_add_attrs = true;
87736 + error("%s: %qE attribute only applies to functions", __func__, name);
87737 + return NULL_TREE;
87738 + }
87739 +
87740 + for (; args; args = TREE_CHAIN(args)) {
87741 + tree position = TREE_VALUE(args);
87742 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
87743 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
87744 + *no_add_attrs = true;
87745 + }
87746 + }
87747 + return NULL_TREE;
87748 +}
87749 +
87750 +static const char* get_asm_name(tree node)
87751 +{
87752 + return IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node));
87753 +}
87754 +
87755 +static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
87756 +{
87757 + unsigned int arg_count, arg_num;
87758 + enum tree_code code = TREE_CODE(*node);
87759 +
87760 + switch (code) {
87761 + case FUNCTION_DECL:
87762 + arg_count = type_num_arguments(TREE_TYPE(*node));
87763 + break;
87764 + case FUNCTION_TYPE:
87765 + case METHOD_TYPE:
87766 + arg_count = type_num_arguments(*node);
87767 + break;
87768 + case FIELD_DECL:
87769 + arg_num = TREE_INT_CST_LOW(TREE_VALUE(args));
87770 + if (arg_num != 0) {
87771 + *no_add_attrs = true;
87772 + error("%s: %qE attribute parameter can only be 0 in structure fields", __func__, name);
87773 + }
87774 + return NULL_TREE;
87775 + default:
87776 + *no_add_attrs = true;
87777 + error("%qE attribute only applies to functions", name);
87778 + return NULL_TREE;
87779 + }
87780 +
87781 + for (; args; args = TREE_CHAIN(args)) {
87782 + tree position = TREE_VALUE(args);
87783 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) > arg_count ) {
87784 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
87785 + *no_add_attrs = true;
87786 + }
87787 + }
87788 + return NULL_TREE;
87789 +}
87790 +
87791 +static struct attribute_spec size_overflow_attr = {
87792 + .name = "size_overflow",
87793 + .min_length = 1,
87794 + .max_length = -1,
87795 + .decl_required = true,
87796 + .type_required = false,
87797 + .function_type_required = false,
87798 + .handler = handle_size_overflow_attribute,
87799 +#if BUILDING_GCC_VERSION >= 4007
87800 + .affects_type_identity = false
87801 +#endif
87802 +};
87803 +
87804 +static struct attribute_spec intentional_overflow_attr = {
87805 + .name = "intentional_overflow",
87806 + .min_length = 1,
87807 + .max_length = -1,
87808 + .decl_required = true,
87809 + .type_required = false,
87810 + .function_type_required = false,
87811 + .handler = handle_intentional_overflow_attribute,
87812 +#if BUILDING_GCC_VERSION >= 4007
87813 + .affects_type_identity = false
87814 +#endif
87815 +};
87816 +
87817 +static void register_attributes(void __unused *event_data, void __unused *data)
87818 +{
87819 + register_attribute(&size_overflow_attr);
87820 + register_attribute(&intentional_overflow_attr);
87821 +}
87822 +
87823 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
87824 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
87825 +{
87826 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
87827 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
87828 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
87829 +
87830 + unsigned int m = 0x57559429;
87831 + unsigned int n = 0x5052acdb;
87832 + const unsigned int *key4 = (const unsigned int *)key;
87833 + unsigned int h = len;
87834 + unsigned int k = len + seed + n;
87835 + unsigned long long p;
87836 +
87837 + while (len >= 8) {
87838 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
87839 + len -= 8;
87840 + }
87841 + if (len >= 4) {
87842 + cwmixb(key4[0]) key4 += 1;
87843 + len -= 4;
87844 + }
87845 + if (len)
87846 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
87847 + cwmixb(h ^ (k + n));
87848 + return k ^ h;
87849 +
87850 +#undef cwfold
87851 +#undef cwmixa
87852 +#undef cwmixb
87853 +}
87854 +
87855 +static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
87856 +{
87857 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
87858 + unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
87859 + return fn ^ codes;
87860 +}
87861 +
87862 +static inline tree get_original_function_decl(tree fndecl)
87863 +{
87864 + if (DECL_ABSTRACT_ORIGIN(fndecl))
87865 + return DECL_ABSTRACT_ORIGIN(fndecl);
87866 + return fndecl;
87867 +}
87868 +
87869 +static inline gimple get_def_stmt(const_tree node)
87870 +{
87871 + gcc_assert(node != NULL_TREE);
87872 + gcc_assert(TREE_CODE(node) == SSA_NAME);
87873 + return SSA_NAME_DEF_STMT(node);
87874 +}
87875 +
87876 +static unsigned char get_tree_code(const_tree type)
87877 +{
87878 + switch (TREE_CODE(type)) {
87879 + case ARRAY_TYPE:
87880 + return 0;
87881 + case BOOLEAN_TYPE:
87882 + return 1;
87883 + case ENUMERAL_TYPE:
87884 + return 2;
87885 + case FUNCTION_TYPE:
87886 + return 3;
87887 + case INTEGER_TYPE:
87888 + return 4;
87889 + case POINTER_TYPE:
87890 + return 5;
87891 + case RECORD_TYPE:
87892 + return 6;
87893 + case UNION_TYPE:
87894 + return 7;
87895 + case VOID_TYPE:
87896 + return 8;
87897 + case REAL_TYPE:
87898 + return 9;
87899 + case VECTOR_TYPE:
87900 + return 10;
87901 + case REFERENCE_TYPE:
87902 + return 11;
87903 + case OFFSET_TYPE:
87904 + return 12;
87905 + case COMPLEX_TYPE:
87906 + return 13;
87907 + default:
87908 + debug_tree((tree)type);
87909 + gcc_unreachable();
87910 + }
87911 +}
87912 +
87913 +static size_t add_type_codes(const_tree type, unsigned char *tree_codes, size_t len)
87914 +{
87915 + gcc_assert(type != NULL_TREE);
87916 +
87917 + while (type && len < CODES_LIMIT) {
87918 + tree_codes[len] = get_tree_code(type);
87919 + len++;
87920 + type = TREE_TYPE(type);
87921 + }
87922 + return len;
87923 +}
87924 +
87925 +static unsigned int get_function_decl(const_tree fndecl, unsigned char *tree_codes)
87926 +{
87927 + const_tree arg, result, arg_field, type = TREE_TYPE(fndecl);
87928 + enum tree_code code = TREE_CODE(type);
87929 + size_t len = 0;
87930 +
87931 + gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
87932 +
87933 + arg = TYPE_ARG_TYPES(type);
87934 + // skip builtins __builtin_constant_p
87935 + if (!arg && DECL_BUILT_IN(fndecl))
87936 + return 0;
87937 +
87938 + if (TREE_CODE_CLASS(code) == tcc_type)
87939 + result = type;
87940 + else
87941 + result = DECL_RESULT(fndecl);
87942 +
87943 + gcc_assert(result != NULL_TREE);
87944 + len = add_type_codes(TREE_TYPE(result), tree_codes, len);
87945 +
87946 + if (arg == NULL_TREE) {
87947 + gcc_assert(CODE_CONTAINS_STRUCT(TREE_CODE(fndecl), TS_DECL_NON_COMMON));
87948 + arg_field = DECL_ARGUMENT_FLD(fndecl);
87949 + if (arg_field == NULL_TREE)
87950 + return 0;
87951 + arg = TREE_TYPE(arg_field);
87952 + len = add_type_codes(arg, tree_codes, len);
87953 + gcc_assert(len != 0);
87954 + return len;
87955 + }
87956 +
87957 + gcc_assert(arg != NULL_TREE && TREE_CODE(arg) == TREE_LIST);
87958 + while (arg && len < CODES_LIMIT) {
87959 + len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
87960 + arg = TREE_CHAIN(arg);
87961 + }
87962 +
87963 + gcc_assert(len != 0);
87964 + return len;
87965 +}
87966 +
87967 +static const struct size_overflow_hash *get_function_hash(tree fndecl)
87968 +{
87969 + unsigned int hash;
87970 + const struct size_overflow_hash *entry;
87971 + unsigned char tree_codes[CODES_LIMIT];
87972 + size_t len;
87973 + const char *func_name = get_asm_name(fndecl);
87974 +
87975 + len = get_function_decl(fndecl, tree_codes);
87976 + if (len == 0)
87977 + return NULL;
87978 +
87979 + hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
87980 +
87981 + entry = size_overflow_hash[hash];
87982 + while (entry) {
87983 + if (!strcmp(entry->name, func_name))
87984 + return entry;
87985 + entry = entry->next;
87986 + }
87987 +
87988 + return NULL;
87989 +}
87990 +
87991 +static void check_arg_type(const_tree arg)
87992 +{
87993 + const_tree type = TREE_TYPE(arg);
87994 + enum tree_code code = TREE_CODE(type);
87995 +
87996 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
87997 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
87998 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
87999 +}
88000 +
88001 +static int find_arg_number(const_tree arg, tree func)
88002 +{
88003 + tree var;
88004 + unsigned int argnum = 1;
88005 +
88006 + if (TREE_CODE(arg) == SSA_NAME)
88007 + arg = SSA_NAME_VAR(arg);
88008 +
88009 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
88010 + if (strcmp(NAME(arg), NAME(var))) {
88011 + argnum++;
88012 + continue;
88013 + }
88014 + check_arg_type(var);
88015 + return argnum;
88016 + }
88017 + gcc_unreachable();
88018 +}
88019 +
88020 +static tree create_new_var(tree type)
88021 +{
88022 + tree new_var = create_tmp_var(type, "cicus");
88023 +
88024 + add_referenced_var(new_var);
88025 + mark_sym_for_renaming(new_var);
88026 + return new_var;
88027 +}
88028 +
88029 +static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
88030 +{
88031 + gimple assign;
88032 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
88033 + tree type = TREE_TYPE(rhs1);
88034 + tree lhs = create_new_var(type);
88035 +
88036 + assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
88037 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
88038 +
88039 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
88040 + update_stmt(assign);
88041 + gimple_set_plf(assign, MY_STMT, true);
88042 + return assign;
88043 +}
88044 +
88045 +static bool is_bool(const_tree node)
88046 +{
88047 + const_tree type;
88048 +
88049 + if (node == NULL_TREE)
88050 + return false;
88051 +
88052 + type = TREE_TYPE(node);
88053 + if (!INTEGRAL_TYPE_P(type))
88054 + return false;
88055 + if (TREE_CODE(type) == BOOLEAN_TYPE)
88056 + return true;
88057 + if (TYPE_PRECISION(type) == 1)
88058 + return true;
88059 + return false;
88060 +}
88061 +
88062 +static tree cast_a_tree(tree type, tree var)
88063 +{
88064 + gcc_assert(type != NULL_TREE);
88065 + gcc_assert(var != NULL_TREE);
88066 + gcc_assert(fold_convertible_p(type, var));
88067 +
88068 + return fold_convert(type, var);
88069 +}
88070 +
88071 +static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before)
88072 +{
88073 + gimple assign;
88074 +
88075 + gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
88076 + if (gsi_end_p(*gsi) && before == AFTER_STMT)
88077 + gcc_unreachable();
88078 +
88079 + if (lhs == CREATE_NEW_VAR)
88080 + lhs = create_new_var(dst_type);
88081 +
88082 + assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
88083 +
88084 + if (!gsi_end_p(*gsi)) {
88085 + location_t loc = gimple_location(gsi_stmt(*gsi));
88086 + gimple_set_location(assign, loc);
88087 + }
88088 +
88089 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
88090 +
88091 + if (before)
88092 + gsi_insert_before(gsi, assign, GSI_NEW_STMT);
88093 + else
88094 + gsi_insert_after(gsi, assign, GSI_NEW_STMT);
88095 + update_stmt(assign);
88096 + gimple_set_plf(assign, MY_STMT, true);
88097 +
88098 + return assign;
88099 +}
88100 +
88101 +static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before)
88102 +{
88103 + const_gimple assign;
88104 + gimple_stmt_iterator gsi;
88105 +
88106 + if (new_rhs1 == NULL_TREE)
88107 + return NULL_TREE;
88108 +
88109 + if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) {
88110 + gsi = gsi_for_stmt(stmt);
88111 + assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before);
88112 + return gimple_get_lhs(assign);
88113 + }
88114 + return new_rhs1;
88115 +}
88116 +
88117 +static tree follow_overflow_type_and_dup(struct pointer_set_t *visited, gimple stmt, const_tree node, tree new_rhs1, tree new_rhs2, tree new_rhs3)
88118 +{
88119 + tree size_overflow_type = get_size_overflow_type(stmt, node);
88120 +
88121 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
88122 +
88123 + if (new_rhs2 != NULL_TREE)
88124 + new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
88125 +
88126 + if (new_rhs3 != NULL_TREE)
88127 + new_rhs3 = cast_to_new_size_overflow_type(stmt, new_rhs3, size_overflow_type, BEFORE_STMT);
88128 +
88129 + return dup_assign(visited, stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3);
88130 +}
88131 +
88132 +
88133 +static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
88134 +{
88135 + tree size_overflow_type, lhs;
88136 + gimple stmt;
88137 + gimple_stmt_iterator gsi;
88138 +
88139 + if (rhs1 == NULL_TREE) {
88140 + debug_gimple_stmt(oldstmt);
88141 + error("%s: rhs1 is NULL_TREE", __func__);
88142 + gcc_unreachable();
88143 + }
88144 +
88145 + if (gimple_code(oldstmt) == GIMPLE_ASM)
88146 + lhs = rhs1;
88147 + else
88148 + lhs = gimple_get_lhs(oldstmt);
88149 +
88150 + gsi = gsi_for_stmt(oldstmt);
88151 + pointer_set_insert(visited, oldstmt);
88152 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
88153 + basic_block next_bb, cur_bb;
88154 + const_edge e;
88155 +
88156 + gcc_assert(before == false);
88157 + gcc_assert(stmt_can_throw_internal(oldstmt));
88158 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
88159 + gcc_assert(!gsi_end_p(gsi));
88160 +
88161 + cur_bb = gimple_bb(oldstmt);
88162 + next_bb = cur_bb->next_bb;
88163 + e = find_edge(cur_bb, next_bb);
88164 + gcc_assert(e != NULL);
88165 + gcc_assert(e->flags & EDGE_FALLTHRU);
88166 +
88167 + gsi = gsi_after_labels(next_bb);
88168 + gcc_assert(!gsi_end_p(gsi));
88169 +
88170 + before = true;
88171 + oldstmt = gsi_stmt(gsi);
88172 + }
88173 +
88174 + size_overflow_type = get_size_overflow_type(oldstmt, lhs);
88175 +
88176 + stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before);
88177 + gimple_set_plf(stmt, MY_STMT, true);
88178 + return gimple_get_lhs(stmt);
88179 +}
88180 +
88181 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3)
88182 +{
88183 + gimple stmt;
88184 + gimple_stmt_iterator gsi;
88185 + tree new_var, lhs = gimple_get_lhs(oldstmt);
88186 +
88187 + if (gimple_plf(oldstmt, MY_STMT))
88188 + return lhs;
88189 +
88190 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
88191 + rhs1 = gimple_assign_rhs1(oldstmt);
88192 + rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
88193 + }
88194 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
88195 + rhs2 = gimple_assign_rhs2(oldstmt);
88196 + rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
88197 + }
88198 +
88199 + stmt = gimple_copy(oldstmt);
88200 + gimple_set_location(stmt, gimple_location(oldstmt));
88201 + gimple_set_plf(stmt, MY_STMT, true);
88202 +
88203 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
88204 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
88205 +
88206 + if (is_bool(lhs))
88207 + new_var = SSA_NAME_VAR(lhs);
88208 + else
88209 + new_var = create_new_var(size_overflow_type);
88210 + new_var = make_ssa_name(new_var, stmt);
88211 + gimple_set_lhs(stmt, new_var);
88212 +
88213 + if (rhs1 != NULL_TREE) {
88214 + if (!gimple_assign_cast_p(oldstmt))
88215 + rhs1 = cast_a_tree(size_overflow_type, rhs1);
88216 + gimple_assign_set_rhs1(stmt, rhs1);
88217 + }
88218 +
88219 + if (rhs2 != NULL_TREE)
88220 + gimple_assign_set_rhs2(stmt, rhs2);
88221 +#if BUILDING_GCC_VERSION >= 4007
88222 + if (rhs3 != NULL_TREE)
88223 + gimple_assign_set_rhs3(stmt, rhs3);
88224 +#endif
88225 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
88226 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
88227 +
88228 + gsi = gsi_for_stmt(oldstmt);
88229 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
88230 + update_stmt(stmt);
88231 + pointer_set_insert(visited, oldstmt);
88232 + return gimple_get_lhs(stmt);
88233 +}
88234 +
88235 +static gimple overflow_create_phi_node(gimple oldstmt, tree result)
88236 +{
88237 + basic_block bb;
88238 + gimple phi;
88239 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
88240 +
88241 + bb = gsi_bb(gsi);
88242 +
88243 + phi = create_phi_node(result, bb);
88244 + gsi = gsi_last(phi_nodes(bb));
88245 + gsi_remove(&gsi, false);
88246 +
88247 + gsi = gsi_for_stmt(oldstmt);
88248 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
88249 + gimple_set_bb(phi, bb);
88250 + gimple_set_plf(phi, MY_STMT, true);
88251 + return phi;
88252 +}
88253 +
88254 +static basic_block create_a_first_bb(void)
88255 +{
88256 + basic_block first_bb;
88257 +
88258 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
88259 + if (dom_info_available_p(CDI_DOMINATORS))
88260 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
88261 + return first_bb;
88262 +}
88263 +
88264 +static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i)
88265 +{
88266 + basic_block bb;
88267 + const_gimple newstmt;
88268 + gimple_stmt_iterator gsi;
88269 + bool before = BEFORE_STMT;
88270 +
88271 + if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) {
88272 + gsi = gsi_for_stmt(get_def_stmt(arg));
88273 + newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT);
88274 + return gimple_get_lhs(newstmt);
88275 + }
88276 +
88277 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
88278 + gsi = gsi_after_labels(bb);
88279 + if (bb->index == 0) {
88280 + bb = create_a_first_bb();
88281 + gsi = gsi_start_bb(bb);
88282 + }
88283 + newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before);
88284 + return gimple_get_lhs(newstmt);
88285 +}
88286 +
88287 +static const_gimple handle_new_phi_arg(const_tree arg, tree new_var, tree new_rhs)
88288 +{
88289 + gimple newstmt;
88290 + gimple_stmt_iterator gsi;
88291 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
88292 + gimple def_newstmt = get_def_stmt(new_rhs);
88293 +
88294 + gsi_insert = gsi_insert_after;
88295 + gsi = gsi_for_stmt(def_newstmt);
88296 +
88297 + switch (gimple_code(get_def_stmt(arg))) {
88298 + case GIMPLE_PHI:
88299 + newstmt = gimple_build_assign(new_var, new_rhs);
88300 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
88301 + gsi_insert = gsi_insert_before;
88302 + break;
88303 + case GIMPLE_ASM:
88304 + case GIMPLE_CALL:
88305 + newstmt = gimple_build_assign(new_var, new_rhs);
88306 + break;
88307 + case GIMPLE_ASSIGN:
88308 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
88309 + break;
88310 + default:
88311 + /* unknown gimple_code (handle_build_new_phi_arg) */
88312 + gcc_unreachable();
88313 + }
88314 +
88315 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
88316 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
88317 + gimple_set_plf(newstmt, MY_STMT, true);
88318 + update_stmt(newstmt);
88319 + return newstmt;
88320 +}
88321 +
88322 +static tree build_new_phi_arg(struct pointer_set_t *visited, tree size_overflow_type, tree arg, tree new_var)
88323 +{
88324 + const_gimple newstmt;
88325 + gimple def_stmt;
88326 + tree new_rhs;
88327 +
88328 + new_rhs = expand(visited, arg);
88329 + if (new_rhs == NULL_TREE)
88330 + return NULL_TREE;
88331 +
88332 + def_stmt = get_def_stmt(new_rhs);
88333 + if (gimple_code(def_stmt) == GIMPLE_NOP)
88334 + return NULL_TREE;
88335 + new_rhs = cast_to_new_size_overflow_type(def_stmt, new_rhs, size_overflow_type, AFTER_STMT);
88336 +
88337 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
88338 + return gimple_get_lhs(newstmt);
88339 +}
88340 +
88341 +static tree build_new_phi(struct pointer_set_t *visited, tree orig_result)
88342 +{
88343 + gimple phi, oldstmt = get_def_stmt(orig_result);
88344 + tree new_result, size_overflow_type;
88345 + unsigned int i;
88346 + unsigned int n = gimple_phi_num_args(oldstmt);
88347 +
88348 + size_overflow_type = get_size_overflow_type(oldstmt, orig_result);
88349 +
88350 + new_result = create_new_var(size_overflow_type);
88351 +
88352 + pointer_set_insert(visited, oldstmt);
88353 + phi = overflow_create_phi_node(oldstmt, new_result);
88354 + for (i = 0; i < n; i++) {
88355 + tree arg, lhs;
88356 +
88357 + arg = gimple_phi_arg_def(oldstmt, i);
88358 + if (is_gimple_constant(arg))
88359 + arg = cast_a_tree(size_overflow_type, arg);
88360 + lhs = build_new_phi_arg(visited, size_overflow_type, arg, new_result);
88361 + if (lhs == NULL_TREE)
88362 + lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_result, i);
88363 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
88364 + }
88365 +
88366 + update_stmt(phi);
88367 + return gimple_phi_result(phi);
88368 +}
88369 +
88370 +static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
88371 +{
88372 + const_gimple assign;
88373 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
88374 + tree origtype = TREE_TYPE(orig_rhs);
88375 +
88376 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
88377 +
88378 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
88379 + return gimple_get_lhs(assign);
88380 +}
88381 +
88382 +static void change_rhs1(gimple stmt, tree new_rhs1)
88383 +{
88384 + tree assign_rhs;
88385 + const_tree rhs = gimple_assign_rhs1(stmt);
88386 +
88387 + assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1);
88388 + gimple_assign_set_rhs1(stmt, assign_rhs);
88389 + update_stmt(stmt);
88390 +}
88391 +
88392 +static bool check_mode_type(const_gimple stmt)
88393 +{
88394 + const_tree lhs = gimple_get_lhs(stmt);
88395 + const_tree lhs_type = TREE_TYPE(lhs);
88396 + const_tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt));
88397 + enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
88398 + enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
88399 +
88400 + if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type))
88401 + return false;
88402 +
88403 + if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
88404 + return false;
88405 +
88406 + return true;
88407 +}
88408 +
88409 +static bool check_undefined_integer_operation(const_gimple stmt)
88410 +{
88411 + const_gimple def_stmt;
88412 + const_tree lhs = gimple_get_lhs(stmt);
88413 + const_tree rhs1 = gimple_assign_rhs1(stmt);
88414 + const_tree rhs1_type = TREE_TYPE(rhs1);
88415 + const_tree lhs_type = TREE_TYPE(lhs);
88416 +
88417 + if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
88418 + return false;
88419 +
88420 + def_stmt = get_def_stmt(rhs1);
88421 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN)
88422 + return false;
88423 +
88424 + if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
88425 + return false;
88426 + return true;
88427 +}
88428 +
88429 +static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
88430 +{
88431 + const_tree rhs1, lhs, rhs1_type, lhs_type;
88432 + enum machine_mode lhs_mode, rhs_mode;
88433 + gimple def_stmt = get_def_stmt(no_const_rhs);
88434 +
88435 + if (!gimple_assign_cast_p(def_stmt))
88436 + return false;
88437 +
88438 + rhs1 = gimple_assign_rhs1(def_stmt);
88439 + lhs = gimple_get_lhs(def_stmt);
88440 + rhs1_type = TREE_TYPE(rhs1);
88441 + lhs_type = TREE_TYPE(lhs);
88442 + rhs_mode = TYPE_MODE(rhs1_type);
88443 + lhs_mode = TYPE_MODE(lhs_type);
88444 + if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
88445 + return false;
88446 +
88447 + return true;
88448 +}
88449 +
88450 +static tree handle_unary_rhs(struct pointer_set_t *visited, gimple stmt)
88451 +{
88452 + tree size_overflow_type, lhs = gimple_get_lhs(stmt);
88453 + tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt);
88454 + const_tree rhs1_type = TREE_TYPE(rhs1);
88455 + const_tree lhs_type = TREE_TYPE(lhs);
88456 +
88457 + new_rhs1 = expand(visited, rhs1);
88458 +
88459 + if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE)
88460 + return create_assign(visited, stmt, lhs, AFTER_STMT);
88461 +
88462 + if (gimple_plf(stmt, MY_STMT))
88463 + return lhs;
88464 +
88465 + if (gimple_plf(stmt, NO_CAST_CHECK))
88466 + return follow_overflow_type_and_dup(visited, stmt, rhs1, new_rhs1, NULL_TREE, NULL_TREE);
88467 +
88468 + if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
88469 + size_overflow_type = get_size_overflow_type(stmt, rhs1);
88470 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
88471 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
88472 + return create_assign(visited, stmt, lhs, AFTER_STMT);
88473 + }
88474 +
88475 + if (!gimple_assign_cast_p(stmt) || check_undefined_integer_operation(stmt))
88476 + return follow_overflow_type_and_dup(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
88477 +
88478 + size_overflow_type = get_size_overflow_type(stmt, rhs1);
88479 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
88480 +
88481 + change_rhs1(stmt, new_rhs1);
88482 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
88483 +
88484 + rhs1 = gimple_assign_rhs1(stmt);
88485 + rhs1_type = TREE_TYPE(rhs1);
88486 + if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type))
88487 + return create_assign(visited, stmt, rhs1, AFTER_STMT);
88488 +
88489 + if (!check_mode_type(stmt))
88490 + return create_assign(visited, stmt, lhs, AFTER_STMT);
88491 +
88492 + size_overflow_type = get_size_overflow_type(stmt, lhs);
88493 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
88494 +
88495 + check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, BEFORE_STMT);
88496 +
88497 + return create_assign(visited, stmt, lhs, AFTER_STMT);
88498 +}
88499 +
88500 +static tree handle_unary_ops(struct pointer_set_t *visited, tree lhs)
88501 +{
88502 + gimple def_stmt = get_def_stmt(lhs);
88503 + tree rhs1 = gimple_assign_rhs1(def_stmt);
88504 +
88505 + if (is_gimple_constant(rhs1))
88506 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
88507 +
88508 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
88509 + switch (TREE_CODE(rhs1)) {
88510 + case SSA_NAME:
88511 + return handle_unary_rhs(visited, def_stmt);
88512 + case ARRAY_REF:
88513 + case BIT_FIELD_REF:
88514 + case ADDR_EXPR:
88515 + case COMPONENT_REF:
88516 + case INDIRECT_REF:
88517 +#if BUILDING_GCC_VERSION >= 4006
88518 + case MEM_REF:
88519 +#endif
88520 + case PARM_DECL:
88521 + case TARGET_MEM_REF:
88522 + case VAR_DECL:
88523 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
88524 +
88525 + default:
88526 + debug_gimple_stmt(def_stmt);
88527 + debug_tree(rhs1);
88528 + gcc_unreachable();
88529 + }
88530 +}
88531 +
88532 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
88533 +{
88534 + gimple cond_stmt;
88535 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
88536 +
88537 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
88538 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
88539 + update_stmt(cond_stmt);
88540 +}
88541 +
88542 +static tree create_string_param(tree string)
88543 +{
88544 + tree i_type, a_type;
88545 + const int length = TREE_STRING_LENGTH(string);
88546 +
88547 + gcc_assert(length > 0);
88548 +
88549 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
88550 + a_type = build_array_type(char_type_node, i_type);
88551 +
88552 + TREE_TYPE(string) = a_type;
88553 + TREE_CONSTANT(string) = 1;
88554 + TREE_READONLY(string) = 1;
88555 +
88556 + return build1(ADDR_EXPR, ptr_type_node, string);
88557 +}
88558 +
88559 +static void insert_cond_result(basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
88560 +{
88561 + gimple func_stmt;
88562 + const_gimple def_stmt;
88563 + const_tree loc_line;
88564 + tree loc_file, ssa_name, current_func;
88565 + expanded_location xloc;
88566 + char ssa_name_buf[256];
88567 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
88568 +
88569 + def_stmt = get_def_stmt(arg);
88570 + xloc = expand_location(gimple_location(def_stmt));
88571 +
88572 + if (!gimple_has_location(def_stmt)) {
88573 + xloc = expand_location(gimple_location(stmt));
88574 + if (!gimple_has_location(stmt))
88575 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
88576 + }
88577 +
88578 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
88579 +
88580 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
88581 + loc_file = create_string_param(loc_file);
88582 +
88583 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
88584 + current_func = create_string_param(current_func);
88585 +
88586 + snprintf(ssa_name_buf, 256, "%s_%u (%s)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max");
88587 + ssa_name = build_string(256, ssa_name_buf);
88588 + ssa_name = create_string_param(ssa_name);
88589 +
88590 + // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
88591 + func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
88592 +
88593 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
88594 +}
88595 +
88596 +static void __unused print_the_code_insertions(const_gimple stmt)
88597 +{
88598 + location_t loc = gimple_location(stmt);
88599 +
88600 + inform(loc, "Integer size_overflow check applied here.");
88601 +}
88602 +
88603 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
88604 +{
88605 + basic_block cond_bb, join_bb, bb_true;
88606 + edge e;
88607 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
88608 +
88609 + cond_bb = gimple_bb(stmt);
88610 + if (before)
88611 + gsi_prev(&gsi);
88612 + if (gsi_end_p(gsi))
88613 + e = split_block_after_labels(cond_bb);
88614 + else
88615 + e = split_block(cond_bb, gsi_stmt(gsi));
88616 + cond_bb = e->src;
88617 + join_bb = e->dest;
88618 + e->flags = EDGE_FALSE_VALUE;
88619 + e->probability = REG_BR_PROB_BASE;
88620 +
88621 + bb_true = create_empty_bb(cond_bb);
88622 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
88623 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
88624 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
88625 +
88626 + if (dom_info_available_p(CDI_DOMINATORS)) {
88627 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
88628 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
88629 + }
88630 +
88631 + if (current_loops != NULL) {
88632 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
88633 + add_bb_to_loop(bb_true, cond_bb->loop_father);
88634 + }
88635 +
88636 + insert_cond(cond_bb, arg, cond_code, type_value);
88637 + insert_cond_result(bb_true, stmt, arg, min);
88638 +
88639 +// print_the_code_insertions(stmt);
88640 +}
88641 +
88642 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
88643 +{
88644 + const_tree rhs_type = TREE_TYPE(rhs);
88645 + tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
88646 +
88647 + gcc_assert(rhs_type != NULL_TREE);
88648 + if (TREE_CODE(rhs_type) == POINTER_TYPE)
88649 + return;
88650 +
88651 + gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
88652 +
88653 + type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
88654 + type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
88655 +
88656 + gcc_assert(!TREE_OVERFLOW(type_max));
88657 +
88658 + cast_rhs_type = TREE_TYPE(cast_rhs);
88659 + type_max_type = TREE_TYPE(type_max);
88660 + type_min_type = TREE_TYPE(type_min);
88661 + gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type));
88662 + gcc_assert(useless_type_conversion_p(type_max_type, type_min_type));
88663 +
88664 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
88665 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
88666 +}
88667 +
88668 +static tree get_size_overflow_type_for_intentional_overflow(gimple def_stmt, tree change_rhs)
88669 +{
88670 + gimple change_rhs_def_stmt;
88671 + tree lhs = gimple_get_lhs(def_stmt);
88672 + tree lhs_type = TREE_TYPE(lhs);
88673 + tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt));
88674 + tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt));
88675 +
88676 + if (change_rhs == NULL_TREE)
88677 + return get_size_overflow_type(def_stmt, lhs);
88678 +
88679 + change_rhs_def_stmt = get_def_stmt(change_rhs);
88680 +
88681 + if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
88682 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
88683 +
88684 + if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR)
88685 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
88686 +
88687 + if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
88688 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
88689 +
88690 + if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) {
88691 + debug_gimple_stmt(def_stmt);
88692 + gcc_unreachable();
88693 + }
88694 +
88695 + return get_size_overflow_type(def_stmt, lhs);
88696 +}
88697 +
88698 +static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
88699 +{
88700 + if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
88701 + return false;
88702 + if (!is_gimple_constant(rhs))
88703 + return false;
88704 + return true;
88705 +}
88706 +
88707 +static tree get_cast_def_stmt_rhs(const_tree new_rhs)
88708 +{
88709 + gimple def_stmt;
88710 +
88711 + def_stmt = get_def_stmt(new_rhs);
88712 + // get_size_overflow_type
88713 + if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode))
88714 + gcc_assert(gimple_assign_cast_p(def_stmt));
88715 + return gimple_assign_rhs1(def_stmt);
88716 +}
88717 +
88718 +static tree cast_to_int_TI_type_and_check(gimple stmt, tree new_rhs)
88719 +{
88720 + gimple_stmt_iterator gsi;
88721 + const_gimple cast_stmt;
88722 + gimple def_stmt;
88723 + enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs));
88724 +
88725 + if (mode != TImode && mode != DImode) {
88726 + def_stmt = get_def_stmt(new_rhs);
88727 + gcc_assert(gimple_assign_cast_p(def_stmt));
88728 + new_rhs = gimple_assign_rhs1(def_stmt);
88729 + mode = TYPE_MODE(TREE_TYPE(new_rhs));
88730 + }
88731 +
88732 + gcc_assert(mode == TImode || mode == DImode);
88733 +
88734 + if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node))
88735 + return new_rhs;
88736 +
88737 + gsi = gsi_for_stmt(stmt);
88738 + cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
88739 + new_rhs = gimple_get_lhs(cast_stmt);
88740 +
88741 + if (mode == DImode)
88742 + return new_rhs;
88743 +
88744 + check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, BEFORE_STMT);
88745 +
88746 + return new_rhs;
88747 +}
88748 +
88749 +static bool is_an_integer_trunction(const_gimple stmt)
88750 +{
88751 + gimple rhs1_def_stmt, rhs2_def_stmt;
88752 + const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1;
88753 + enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode;
88754 + const_tree rhs1 = gimple_assign_rhs1(stmt);
88755 + const_tree rhs2 = gimple_assign_rhs2(stmt);
88756 + enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1));
88757 + enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2));
88758 +
88759 + if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
88760 + return false;
88761 +
88762 + gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
88763 +
88764 + if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode)
88765 + return false;
88766 +
88767 + rhs1_def_stmt = get_def_stmt(rhs1);
88768 + rhs2_def_stmt = get_def_stmt(rhs2);
88769 + if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
88770 + return false;
88771 +
88772 + rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
88773 + rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
88774 + rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
88775 + rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
88776 + if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode)
88777 + return false;
88778 +
88779 + gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
88780 + gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
88781 + return true;
88782 +}
88783 +
88784 +static tree handle_integer_truncation(struct pointer_set_t *visited, const_tree lhs)
88785 +{
88786 + tree new_rhs1, new_rhs2;
88787 + tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
88788 + tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type;
88789 + gimple assign, stmt = get_def_stmt(lhs);
88790 + tree rhs1 = gimple_assign_rhs1(stmt);
88791 + tree rhs2 = gimple_assign_rhs2(stmt);
88792 +
88793 + if (!is_an_integer_trunction(stmt))
88794 + return NULL_TREE;
88795 +
88796 + new_rhs1 = expand(visited, rhs1);
88797 + new_rhs2 = expand(visited, rhs2);
88798 +
88799 + new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1);
88800 + new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2);
88801 +
88802 + new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1);
88803 + new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1);
88804 +
88805 + if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) {
88806 + new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(stmt, new_rhs1_def_stmt_rhs1);
88807 + new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(stmt, new_rhs2_def_stmt_rhs1);
88808 + }
88809 +
88810 + assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
88811 + new_lhs = gimple_get_lhs(assign);
88812 + check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
88813 +
88814 + return follow_overflow_type_and_dup(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
88815 +}
88816 +
88817 +static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
88818 +{
88819 + const_gimple def_stmt;
88820 +
88821 + if (TREE_CODE(rhs) != SSA_NAME)
88822 + return false;
88823 +
88824 + if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
88825 + return false;
88826 +
88827 + def_stmt = get_def_stmt(rhs);
88828 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
88829 + return false;
88830 +
88831 + return true;
88832 +}
88833 +
88834 +static tree handle_intentional_overflow(struct pointer_set_t *visited, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs1, tree new_rhs2)
88835 +{
88836 + tree new_rhs, size_overflow_type, orig_rhs;
88837 + void (*gimple_assign_set_rhs)(gimple, tree);
88838 + tree rhs1 = gimple_assign_rhs1(stmt);
88839 + tree rhs2 = gimple_assign_rhs2(stmt);
88840 + tree lhs = gimple_get_lhs(stmt);
88841 +
88842 + if (change_rhs == NULL_TREE)
88843 + return create_assign(visited, stmt, lhs, AFTER_STMT);
88844 +
88845 + if (new_rhs2 == NULL_TREE) {
88846 + size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs1);
88847 + new_rhs2 = cast_a_tree(size_overflow_type, rhs2);
88848 + orig_rhs = rhs1;
88849 + gimple_assign_set_rhs = &gimple_assign_set_rhs1;
88850 + } else {
88851 + size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs2);
88852 + new_rhs1 = cast_a_tree(size_overflow_type, rhs1);
88853 + orig_rhs = rhs2;
88854 + gimple_assign_set_rhs = &gimple_assign_set_rhs2;
88855 + }
88856 +
88857 + change_rhs = cast_to_new_size_overflow_type(stmt, change_rhs, size_overflow_type, BEFORE_STMT);
88858 +
88859 + if (check_overflow)
88860 + check_size_overflow(stmt, size_overflow_type, change_rhs, orig_rhs, BEFORE_STMT);
88861 +
88862 + new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
88863 + gimple_assign_set_rhs(stmt, new_rhs);
88864 + update_stmt(stmt);
88865 +
88866 + return create_assign(visited, stmt, lhs, AFTER_STMT);
88867 +}
88868 +
88869 +static tree handle_binary_ops(struct pointer_set_t *visited, tree lhs)
88870 +{
88871 + tree rhs1, rhs2, new_lhs;
88872 + gimple def_stmt = get_def_stmt(lhs);
88873 + tree new_rhs1 = NULL_TREE;
88874 + tree new_rhs2 = NULL_TREE;
88875 +
88876 + rhs1 = gimple_assign_rhs1(def_stmt);
88877 + rhs2 = gimple_assign_rhs2(def_stmt);
88878 +
88879 + /* no DImode/TImode division in the 32/64 bit kernel */
88880 + switch (gimple_assign_rhs_code(def_stmt)) {
88881 + case RDIV_EXPR:
88882 + case TRUNC_DIV_EXPR:
88883 + case CEIL_DIV_EXPR:
88884 + case FLOOR_DIV_EXPR:
88885 + case ROUND_DIV_EXPR:
88886 + case TRUNC_MOD_EXPR:
88887 + case CEIL_MOD_EXPR:
88888 + case FLOOR_MOD_EXPR:
88889 + case ROUND_MOD_EXPR:
88890 + case EXACT_DIV_EXPR:
88891 + case POINTER_PLUS_EXPR:
88892 + case BIT_AND_EXPR:
88893 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
88894 + default:
88895 + break;
88896 + }
88897 +
88898 + new_lhs = handle_integer_truncation(visited, lhs);
88899 + if (new_lhs != NULL_TREE)
88900 + return new_lhs;
88901 +
88902 + if (TREE_CODE(rhs1) == SSA_NAME)
88903 + new_rhs1 = expand(visited, rhs1);
88904 + if (TREE_CODE(rhs2) == SSA_NAME)
88905 + new_rhs2 = expand(visited, rhs2);
88906 +
88907 + if (is_a_neg_overflow(def_stmt, rhs2))
88908 + return handle_intentional_overflow(visited, true, def_stmt, new_rhs1, new_rhs1, NULL_TREE);
88909 + if (is_a_neg_overflow(def_stmt, rhs1))
88910 + return handle_intentional_overflow(visited, true, def_stmt, new_rhs2, NULL_TREE, new_rhs2);
88911 +
88912 + if (is_a_constant_overflow(def_stmt, rhs2))
88913 + return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, new_rhs1, NULL_TREE);
88914 + if (is_a_constant_overflow(def_stmt, rhs1))
88915 + return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, NULL_TREE, new_rhs2);
88916 +
88917 + return follow_overflow_type_and_dup(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
88918 +}
88919 +
88920 +#if BUILDING_GCC_VERSION >= 4007
88921 +static tree get_new_rhs(struct pointer_set_t *visited, tree size_overflow_type, tree rhs)
88922 +{
88923 + if (is_gimple_constant(rhs))
88924 + return cast_a_tree(size_overflow_type, rhs);
88925 + if (TREE_CODE(rhs) != SSA_NAME)
88926 + return NULL_TREE;
88927 + return expand(visited, rhs);
88928 +}
88929 +
88930 +static tree handle_ternary_ops(struct pointer_set_t *visited, tree lhs)
88931 +{
88932 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
88933 + gimple def_stmt = get_def_stmt(lhs);
88934 +
88935 + size_overflow_type = get_size_overflow_type(def_stmt, lhs);
88936 +
88937 + rhs1 = gimple_assign_rhs1(def_stmt);
88938 + rhs2 = gimple_assign_rhs2(def_stmt);
88939 + rhs3 = gimple_assign_rhs3(def_stmt);
88940 + new_rhs1 = get_new_rhs(visited, size_overflow_type, rhs1);
88941 + new_rhs2 = get_new_rhs(visited, size_overflow_type, rhs2);
88942 + new_rhs3 = get_new_rhs(visited, size_overflow_type, rhs3);
88943 +
88944 + return follow_overflow_type_and_dup(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
88945 +}
88946 +#endif
88947 +
88948 +static tree get_size_overflow_type(gimple stmt, const_tree node)
88949 +{
88950 + const_tree type;
88951 +
88952 + gcc_assert(node != NULL_TREE);
88953 +
88954 + type = TREE_TYPE(node);
88955 +
88956 + if (gimple_plf(stmt, MY_STMT))
88957 + return TREE_TYPE(node);
88958 +
88959 + switch (TYPE_MODE(type)) {
88960 + case QImode:
88961 + return (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node;
88962 + case HImode:
88963 + return (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node;
88964 + case SImode:
88965 + return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
88966 + case DImode:
88967 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
88968 + return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
88969 + return (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node;
88970 + default:
88971 + debug_tree((tree)node);
88972 + error("%s: unsupported gcc configuration.", __func__);
88973 + gcc_unreachable();
88974 + }
88975 +}
88976 +
88977 +static tree expand_visited(gimple def_stmt)
88978 +{
88979 + const_gimple next_stmt;
88980 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
88981 +
88982 + gsi_next(&gsi);
88983 + next_stmt = gsi_stmt(gsi);
88984 +
88985 + gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT));
88986 +
88987 + switch (gimple_code(next_stmt)) {
88988 + case GIMPLE_ASSIGN:
88989 + return gimple_get_lhs(next_stmt);
88990 + case GIMPLE_PHI:
88991 + return gimple_phi_result(next_stmt);
88992 + case GIMPLE_CALL:
88993 + return gimple_call_lhs(next_stmt);
88994 + default:
88995 + return NULL_TREE;
88996 + }
88997 +}
88998 +
88999 +static tree expand(struct pointer_set_t *visited, tree lhs)
89000 +{
89001 + gimple def_stmt;
89002 + enum tree_code code = TREE_CODE(TREE_TYPE(lhs));
89003 +
89004 + if (is_gimple_constant(lhs))
89005 + return NULL_TREE;
89006 +
89007 + if (TREE_CODE(lhs) == ADDR_EXPR)
89008 + return NULL_TREE;
89009 +
89010 + if (code == REAL_TYPE)
89011 + return NULL_TREE;
89012 +
89013 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
89014 +
89015 +
89016 + def_stmt = get_def_stmt(lhs);
89017 +
89018 + if (!def_stmt)
89019 + return NULL_TREE;
89020 +
89021 + if (gimple_plf(def_stmt, MY_STMT))
89022 + return lhs;
89023 +
89024 + if (pointer_set_contains(visited, def_stmt))
89025 + return expand_visited(def_stmt);
89026 +
89027 + switch (gimple_code(def_stmt)) {
89028 + case GIMPLE_NOP:
89029 + return NULL_TREE;
89030 + case GIMPLE_PHI:
89031 + return build_new_phi(visited, lhs);
89032 + case GIMPLE_CALL:
89033 + case GIMPLE_ASM:
89034 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
89035 + case GIMPLE_ASSIGN:
89036 + switch (gimple_num_ops(def_stmt)) {
89037 + case 2:
89038 + return handle_unary_ops(visited, lhs);
89039 + case 3:
89040 + return handle_binary_ops(visited, lhs);
89041 +#if BUILDING_GCC_VERSION >= 4007
89042 + case 4:
89043 + return handle_ternary_ops(visited, lhs);
89044 +#endif
89045 + }
89046 + default:
89047 + debug_gimple_stmt(def_stmt);
89048 + error("%s: unknown gimple code", __func__);
89049 + gcc_unreachable();
89050 + }
89051 +}
89052 +
89053 +static void change_function_arg(gimple stmt, const_tree origarg, unsigned int argnum, tree newarg)
89054 +{
89055 + const_gimple assign;
89056 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
89057 + tree origtype = TREE_TYPE(origarg);
89058 +
89059 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
89060 +
89061 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
89062 +
89063 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
89064 + update_stmt(stmt);
89065 +}
89066 +
89067 +static bool get_function_arg(unsigned int* argnum, const_tree fndecl)
89068 +{
89069 + const char *origid;
89070 + tree arg;
89071 + const_tree origarg;
89072 +
89073 + if (!DECL_ABSTRACT_ORIGIN(fndecl))
89074 + return true;
89075 +
89076 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
89077 + while (origarg && *argnum) {
89078 + (*argnum)--;
89079 + origarg = TREE_CHAIN(origarg);
89080 + }
89081 +
89082 + gcc_assert(*argnum == 0);
89083 +
89084 + gcc_assert(origarg != NULL_TREE);
89085 + origid = NAME(origarg);
89086 + *argnum = 0;
89087 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
89088 + if (!strcmp(origid, NAME(arg)))
89089 + return true;
89090 + (*argnum)++;
89091 + }
89092 + return false;
89093 +}
89094 +
89095 +static bool skip_types(const_tree var)
89096 +{
89097 + switch (TREE_CODE(var)) {
89098 + case ADDR_EXPR:
89099 +#if BUILDING_GCC_VERSION >= 4006
89100 + case MEM_REF:
89101 +#endif
89102 + case ARRAY_REF:
89103 + case BIT_FIELD_REF:
89104 + case INDIRECT_REF:
89105 + case TARGET_MEM_REF:
89106 + case VAR_DECL:
89107 + return true;
89108 + default:
89109 + break;
89110 + }
89111 + return false;
89112 +}
89113 +
89114 +static bool walk_phi(struct pointer_set_t *visited, const_tree result)
89115 +{
89116 + gimple phi = get_def_stmt(result);
89117 + unsigned int i, n = gimple_phi_num_args(phi);
89118 +
89119 + if (!phi)
89120 + return false;
89121 +
89122 + pointer_set_insert(visited, phi);
89123 + for (i = 0; i < n; i++) {
89124 + const_tree arg = gimple_phi_arg_def(phi, i);
89125 + if (pre_expand(visited, arg))
89126 + return true;
89127 + }
89128 + return false;
89129 +}
89130 +
89131 +static bool walk_unary_ops(struct pointer_set_t *visited, const_tree lhs)
89132 +{
89133 + gimple def_stmt = get_def_stmt(lhs);
89134 + const_tree rhs;
89135 +
89136 + if (!def_stmt)
89137 + return false;
89138 +
89139 + rhs = gimple_assign_rhs1(def_stmt);
89140 + if (pre_expand(visited, rhs))
89141 + return true;
89142 + return false;
89143 +}
89144 +
89145 +static bool walk_binary_ops(struct pointer_set_t *visited, const_tree lhs)
89146 +{
89147 + bool rhs1_found, rhs2_found;
89148 + gimple def_stmt = get_def_stmt(lhs);
89149 + const_tree rhs1, rhs2;
89150 +
89151 + if (!def_stmt)
89152 + return false;
89153 +
89154 + rhs1 = gimple_assign_rhs1(def_stmt);
89155 + rhs2 = gimple_assign_rhs2(def_stmt);
89156 + rhs1_found = pre_expand(visited, rhs1);
89157 + rhs2_found = pre_expand(visited, rhs2);
89158 +
89159 + return rhs1_found || rhs2_found;
89160 +}
89161 +
89162 +static const_tree search_field_decl(const_tree comp_ref)
89163 +{
89164 + const_tree field = NULL_TREE;
89165 + unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
89166 +
89167 + for (i = 0; i < len; i++) {
89168 + field = TREE_OPERAND(comp_ref, i);
89169 + if (TREE_CODE(field) == FIELD_DECL)
89170 + break;
89171 + }
89172 + gcc_assert(TREE_CODE(field) == FIELD_DECL);
89173 + return field;
89174 +}
89175 +
89176 +static enum marked mark_status(const_tree fndecl, unsigned int argnum)
89177 +{
89178 + const_tree attr, p;
89179 +
89180 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(fndecl));
89181 + if (!attr || !TREE_VALUE(attr))
89182 + return MARKED_NO;
89183 +
89184 + p = TREE_VALUE(attr);
89185 + if (!TREE_INT_CST_LOW(TREE_VALUE(p)))
89186 + return MARKED_NOT_INTENTIONAL;
89187 +
89188 + do {
89189 + if (argnum == TREE_INT_CST_LOW(TREE_VALUE(p)))
89190 + return MARKED_YES;
89191 + p = TREE_CHAIN(p);
89192 + } while (p);
89193 +
89194 + return MARKED_NO;
89195 +}
89196 +
89197 +static void print_missing_msg(tree func, unsigned int argnum)
89198 +{
89199 + unsigned int new_hash;
89200 + size_t len;
89201 + unsigned char tree_codes[CODES_LIMIT];
89202 + location_t loc = DECL_SOURCE_LOCATION(func);
89203 + const char *curfunc = get_asm_name(func);
89204 +
89205 + len = get_function_decl(func, tree_codes);
89206 + new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
89207 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, new_hash);
89208 +}
89209 +
89210 +static unsigned int search_missing_attribute(const_tree arg)
89211 +{
89212 + const_tree type = TREE_TYPE(arg);
89213 + tree func = get_original_function_decl(current_function_decl);
89214 + unsigned int argnum;
89215 + const struct size_overflow_hash *hash;
89216 +
89217 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
89218 +
89219 + if (TREE_CODE(type) == POINTER_TYPE)
89220 + return 0;
89221 +
89222 + argnum = find_arg_number(arg, func);
89223 + if (argnum == 0)
89224 + return 0;
89225 +
89226 + if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
89227 + return argnum;
89228 +
89229 + hash = get_function_hash(func);
89230 + if (!hash || !(hash->param & (1U << argnum))) {
89231 + print_missing_msg(func, argnum);
89232 + return 0;
89233 + }
89234 + return argnum;
89235 +}
89236 +
89237 +static bool is_already_marked(const_tree lhs)
89238 +{
89239 + unsigned int argnum;
89240 + const_tree fndecl;
89241 +
89242 + argnum = search_missing_attribute(lhs);
89243 + fndecl = get_original_function_decl(current_function_decl);
89244 + if (argnum && mark_status(fndecl, argnum) == MARKED_YES)
89245 + return true;
89246 + return false;
89247 +}
89248 +
89249 +static bool pre_expand(struct pointer_set_t *visited, const_tree lhs)
89250 +{
89251 + const_gimple def_stmt;
89252 +
89253 + if (is_gimple_constant(lhs))
89254 + return false;
89255 +
89256 + if (skip_types(lhs))
89257 + return false;
89258 +
89259 + if (TREE_CODE(lhs) == PARM_DECL)
89260 + return is_already_marked(lhs);
89261 +
89262 + if (TREE_CODE(lhs) == COMPONENT_REF) {
89263 + const_tree field, attr;
89264 +
89265 + field = search_field_decl(lhs);
89266 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(field));
89267 + if (!attr || !TREE_VALUE(attr))
89268 + return false;
89269 + return true;
89270 + }
89271 +
89272 + def_stmt = get_def_stmt(lhs);
89273 +
89274 + if (!def_stmt)
89275 + return false;
89276 +
89277 + if (pointer_set_contains(visited, def_stmt))
89278 + return false;
89279 +
89280 + switch (gimple_code(def_stmt)) {
89281 + case GIMPLE_NOP:
89282 + if (TREE_CODE(SSA_NAME_VAR(lhs)) == PARM_DECL)
89283 + return is_already_marked(lhs);
89284 + return false;
89285 + case GIMPLE_PHI:
89286 + return walk_phi(visited, lhs);
89287 + case GIMPLE_CALL:
89288 + case GIMPLE_ASM:
89289 + return false;
89290 + case GIMPLE_ASSIGN:
89291 + switch (gimple_num_ops(def_stmt)) {
89292 + case 2:
89293 + return walk_unary_ops(visited, lhs);
89294 + case 3:
89295 + return walk_binary_ops(visited, lhs);
89296 + }
89297 + default:
89298 + debug_gimple_stmt((gimple)def_stmt);
89299 + error("%s: unknown gimple code", __func__);
89300 + gcc_unreachable();
89301 + }
89302 +}
89303 +
89304 +static bool search_attributes(tree fndecl, const_tree arg, unsigned int argnum)
89305 +{
89306 + struct pointer_set_t *visited;
89307 + bool is_found;
89308 + enum marked is_marked;
89309 + location_t loc;
89310 +
89311 + visited = pointer_set_create();
89312 + is_found = pre_expand(visited, arg);
89313 + pointer_set_destroy(visited);
89314 +
89315 + is_marked = mark_status(fndecl, argnum + 1);
89316 + if ((is_found && is_marked == MARKED_YES) || is_marked == MARKED_NOT_INTENTIONAL)
89317 + return true;
89318 +
89319 + if (is_found) {
89320 + loc = DECL_SOURCE_LOCATION(fndecl);
89321 + inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", get_asm_name(fndecl), argnum + 1);
89322 + return true;
89323 + }
89324 + return false;
89325 +}
89326 +
89327 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
89328 +{
89329 + struct pointer_set_t *visited;
89330 + tree arg, newarg;
89331 + bool match;
89332 +
89333 + match = get_function_arg(&argnum, fndecl);
89334 + if (!match)
89335 + return;
89336 + gcc_assert(gimple_call_num_args(stmt) > argnum);
89337 + arg = gimple_call_arg(stmt, argnum);
89338 + if (arg == NULL_TREE)
89339 + return;
89340 +
89341 + if (is_gimple_constant(arg))
89342 + return;
89343 +
89344 + if (search_attributes(fndecl, arg, argnum))
89345 + return;
89346 +
89347 + if (TREE_CODE(arg) != SSA_NAME)
89348 + return;
89349 +
89350 + check_arg_type(arg);
89351 +
89352 + visited = pointer_set_create();
89353 + newarg = expand(visited, arg);
89354 + pointer_set_destroy(visited);
89355 +
89356 + if (newarg == NULL_TREE)
89357 + return;
89358 +
89359 + change_function_arg(stmt, arg, argnum, newarg);
89360 +
89361 + check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, BEFORE_STMT);
89362 +}
89363 +
89364 +static void handle_function_by_attribute(gimple stmt, const_tree attr, tree fndecl)
89365 +{
89366 + tree p = TREE_VALUE(attr);
89367 + do {
89368 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
89369 + p = TREE_CHAIN(p);
89370 + } while (p);
89371 +}
89372 +
89373 +static void handle_function_by_hash(gimple stmt, tree fndecl)
89374 +{
89375 + tree orig_fndecl;
89376 + unsigned int num;
89377 + const struct size_overflow_hash *hash;
89378 +
89379 + orig_fndecl = get_original_function_decl(fndecl);
89380 + if (C_DECL_IMPLICIT(orig_fndecl))
89381 + return;
89382 + hash = get_function_hash(orig_fndecl);
89383 + if (!hash)
89384 + return;
89385 +
89386 + for (num = 1; num <= MAX_PARAM; num++)
89387 + if (hash->param & (1U << num))
89388 + handle_function_arg(stmt, fndecl, num - 1);
89389 +}
89390 +
89391 +static void set_plf_false(void)
89392 +{
89393 + basic_block bb;
89394 +
89395 + FOR_ALL_BB(bb) {
89396 + gimple_stmt_iterator si;
89397 +
89398 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
89399 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
89400 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
89401 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
89402 + }
89403 +}
89404 +
89405 +static unsigned int handle_function(void)
89406 +{
89407 + basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
89408 +
89409 + set_plf_false();
89410 +
89411 + do {
89412 + gimple_stmt_iterator gsi;
89413 + next = bb->next_bb;
89414 +
89415 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
89416 + tree fndecl, attr;
89417 + gimple stmt = gsi_stmt(gsi);
89418 +
89419 + if (!(is_gimple_call(stmt)))
89420 + continue;
89421 + fndecl = gimple_call_fndecl(stmt);
89422 + if (fndecl == NULL_TREE)
89423 + continue;
89424 + if (gimple_call_num_args(stmt) == 0)
89425 + continue;
89426 + attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
89427 + if (!attr || !TREE_VALUE(attr))
89428 + handle_function_by_hash(stmt, fndecl);
89429 + else
89430 + handle_function_by_attribute(stmt, attr, fndecl);
89431 + gsi = gsi_for_stmt(stmt);
89432 + next = gimple_bb(stmt)->next_bb;
89433 + }
89434 + bb = next;
89435 + } while (bb);
89436 + return 0;
89437 +}
89438 +
89439 +static struct gimple_opt_pass size_overflow_pass = {
89440 + .pass = {
89441 + .type = GIMPLE_PASS,
89442 + .name = "size_overflow",
89443 + .gate = NULL,
89444 + .execute = handle_function,
89445 + .sub = NULL,
89446 + .next = NULL,
89447 + .static_pass_number = 0,
89448 + .tv_id = TV_NONE,
89449 + .properties_required = PROP_cfg | PROP_referenced_vars,
89450 + .properties_provided = 0,
89451 + .properties_destroyed = 0,
89452 + .todo_flags_start = 0,
89453 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
89454 + }
89455 +};
89456 +
89457 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
89458 +{
89459 + tree fntype;
89460 +
89461 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
89462 +
89463 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
89464 + fntype = build_function_type_list(void_type_node,
89465 + const_char_ptr_type_node,
89466 + unsigned_type_node,
89467 + const_char_ptr_type_node,
89468 + const_char_ptr_type_node,
89469 + NULL_TREE);
89470 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
89471 +
89472 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
89473 + TREE_PUBLIC(report_size_overflow_decl) = 1;
89474 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
89475 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
89476 + TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
89477 +}
89478 +
89479 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
89480 +{
89481 + int i;
89482 + const char * const plugin_name = plugin_info->base_name;
89483 + const int argc = plugin_info->argc;
89484 + const struct plugin_argument * const argv = plugin_info->argv;
89485 + bool enable = true;
89486 +
89487 + struct register_pass_info size_overflow_pass_info = {
89488 + .pass = &size_overflow_pass.pass,
89489 + .reference_pass_name = "ssa",
89490 + .ref_pass_instance_number = 1,
89491 + .pos_op = PASS_POS_INSERT_AFTER
89492 + };
89493 +
89494 + if (!plugin_default_version_check(version, &gcc_version)) {
89495 + error(G_("incompatible gcc/plugin versions"));
89496 + return 1;
89497 + }
89498 +
89499 + for (i = 0; i < argc; ++i) {
89500 + if (!strcmp(argv[i].key, "no-size-overflow")) {
89501 + enable = false;
89502 + continue;
89503 + }
89504 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
89505 + }
89506 +
89507 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
89508 + if (enable) {
89509 + register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
89510 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
89511 + }
89512 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
89513 +
89514 + return 0;
89515 +}
89516 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
89517 new file mode 100644
89518 index 0000000..38d2014
89519 --- /dev/null
89520 +++ b/tools/gcc/stackleak_plugin.c
89521 @@ -0,0 +1,313 @@
89522 +/*
89523 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
89524 + * Licensed under the GPL v2
89525 + *
89526 + * Note: the choice of the license means that the compilation process is
89527 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
89528 + * but for the kernel it doesn't matter since it doesn't link against
89529 + * any of the gcc libraries
89530 + *
89531 + * gcc plugin to help implement various PaX features
89532 + *
89533 + * - track lowest stack pointer
89534 + *
89535 + * TODO:
89536 + * - initialize all local variables
89537 + *
89538 + * BUGS:
89539 + * - none known
89540 + */
89541 +#include "gcc-plugin.h"
89542 +#include "config.h"
89543 +#include "system.h"
89544 +#include "coretypes.h"
89545 +#include "tree.h"
89546 +#include "tree-pass.h"
89547 +#include "flags.h"
89548 +#include "intl.h"
89549 +#include "toplev.h"
89550 +#include "plugin.h"
89551 +//#include "expr.h" where are you...
89552 +#include "diagnostic.h"
89553 +#include "plugin-version.h"
89554 +#include "tm.h"
89555 +#include "function.h"
89556 +#include "basic-block.h"
89557 +#include "gimple.h"
89558 +#include "rtl.h"
89559 +#include "emit-rtl.h"
89560 +
89561 +extern void print_gimple_stmt(FILE *, gimple, int, int);
89562 +
89563 +int plugin_is_GPL_compatible;
89564 +
89565 +static int track_frame_size = -1;
89566 +static const char track_function[] = "pax_track_stack";
89567 +static const char check_function[] = "pax_check_alloca";
89568 +static bool init_locals;
89569 +
89570 +static struct plugin_info stackleak_plugin_info = {
89571 + .version = "201203140940",
89572 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
89573 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
89574 +};
89575 +
89576 +static bool gate_stackleak_track_stack(void);
89577 +static unsigned int execute_stackleak_tree_instrument(void);
89578 +static unsigned int execute_stackleak_final(void);
89579 +
89580 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
89581 + .pass = {
89582 + .type = GIMPLE_PASS,
89583 + .name = "stackleak_tree_instrument",
89584 + .gate = gate_stackleak_track_stack,
89585 + .execute = execute_stackleak_tree_instrument,
89586 + .sub = NULL,
89587 + .next = NULL,
89588 + .static_pass_number = 0,
89589 + .tv_id = TV_NONE,
89590 + .properties_required = PROP_gimple_leh | PROP_cfg,
89591 + .properties_provided = 0,
89592 + .properties_destroyed = 0,
89593 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
89594 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
89595 + }
89596 +};
89597 +
89598 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
89599 + .pass = {
89600 + .type = RTL_PASS,
89601 + .name = "stackleak_final",
89602 + .gate = gate_stackleak_track_stack,
89603 + .execute = execute_stackleak_final,
89604 + .sub = NULL,
89605 + .next = NULL,
89606 + .static_pass_number = 0,
89607 + .tv_id = TV_NONE,
89608 + .properties_required = 0,
89609 + .properties_provided = 0,
89610 + .properties_destroyed = 0,
89611 + .todo_flags_start = 0,
89612 + .todo_flags_finish = TODO_dump_func
89613 + }
89614 +};
89615 +
89616 +static bool gate_stackleak_track_stack(void)
89617 +{
89618 + return track_frame_size >= 0;
89619 +}
89620 +
89621 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
89622 +{
89623 + gimple check_alloca;
89624 + tree fntype, fndecl, alloca_size;
89625 +
89626 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
89627 + fndecl = build_fn_decl(check_function, fntype);
89628 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
89629 +
89630 + // insert call to void pax_check_alloca(unsigned long size)
89631 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
89632 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
89633 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
89634 +}
89635 +
89636 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
89637 +{
89638 + gimple track_stack;
89639 + tree fntype, fndecl;
89640 +
89641 + fntype = build_function_type_list(void_type_node, NULL_TREE);
89642 + fndecl = build_fn_decl(track_function, fntype);
89643 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
89644 +
89645 + // insert call to void pax_track_stack(void)
89646 + track_stack = gimple_build_call(fndecl, 0);
89647 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
89648 +}
89649 +
89650 +#if BUILDING_GCC_VERSION == 4005
89651 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
89652 +{
89653 + tree fndecl;
89654 +
89655 + if (!is_gimple_call(stmt))
89656 + return false;
89657 + fndecl = gimple_call_fndecl(stmt);
89658 + if (!fndecl)
89659 + return false;
89660 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
89661 + return false;
89662 +// print_node(stderr, "pax", fndecl, 4);
89663 + return DECL_FUNCTION_CODE(fndecl) == code;
89664 +}
89665 +#endif
89666 +
89667 +static bool is_alloca(gimple stmt)
89668 +{
89669 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
89670 + return true;
89671 +
89672 +#if BUILDING_GCC_VERSION >= 4007
89673 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
89674 + return true;
89675 +#endif
89676 +
89677 + return false;
89678 +}
89679 +
89680 +static unsigned int execute_stackleak_tree_instrument(void)
89681 +{
89682 + basic_block bb, entry_bb;
89683 + bool prologue_instrumented = false, is_leaf = true;
89684 +
89685 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
89686 +
89687 + // 1. loop through BBs and GIMPLE statements
89688 + FOR_EACH_BB(bb) {
89689 + gimple_stmt_iterator gsi;
89690 +
89691 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
89692 + gimple stmt;
89693 +
89694 + stmt = gsi_stmt(gsi);
89695 +
89696 + if (is_gimple_call(stmt))
89697 + is_leaf = false;
89698 +
89699 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
89700 + if (!is_alloca(stmt))
89701 + continue;
89702 +
89703 + // 2. insert stack overflow check before each __builtin_alloca call
89704 + stackleak_check_alloca(&gsi);
89705 +
89706 + // 3. insert track call after each __builtin_alloca call
89707 + stackleak_add_instrumentation(&gsi);
89708 + if (bb == entry_bb)
89709 + prologue_instrumented = true;
89710 + }
89711 + }
89712 +
89713 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
89714 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
89715 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
89716 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
89717 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
89718 + return 0;
89719 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
89720 + return 0;
89721 +
89722 + // 4. insert track call at the beginning
89723 + if (!prologue_instrumented) {
89724 + gimple_stmt_iterator gsi;
89725 +
89726 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
89727 + if (dom_info_available_p(CDI_DOMINATORS))
89728 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
89729 + gsi = gsi_start_bb(bb);
89730 + stackleak_add_instrumentation(&gsi);
89731 + }
89732 +
89733 + return 0;
89734 +}
89735 +
89736 +static unsigned int execute_stackleak_final(void)
89737 +{
89738 + rtx insn;
89739 +
89740 + if (cfun->calls_alloca)
89741 + return 0;
89742 +
89743 + // keep calls only if function frame is big enough
89744 + if (get_frame_size() >= track_frame_size)
89745 + return 0;
89746 +
89747 + // 1. find pax_track_stack calls
89748 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
89749 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
89750 + rtx body;
89751 +
89752 + if (!CALL_P(insn))
89753 + continue;
89754 + body = PATTERN(insn);
89755 + if (GET_CODE(body) != CALL)
89756 + continue;
89757 + body = XEXP(body, 0);
89758 + if (GET_CODE(body) != MEM)
89759 + continue;
89760 + body = XEXP(body, 0);
89761 + if (GET_CODE(body) != SYMBOL_REF)
89762 + continue;
89763 + if (strcmp(XSTR(body, 0), track_function))
89764 + continue;
89765 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
89766 + // 2. delete call
89767 + insn = delete_insn_and_edges(insn);
89768 +#if BUILDING_GCC_VERSION >= 4007
89769 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
89770 + insn = delete_insn_and_edges(insn);
89771 +#endif
89772 + }
89773 +
89774 +// print_simple_rtl(stderr, get_insns());
89775 +// print_rtl(stderr, get_insns());
89776 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
89777 +
89778 + return 0;
89779 +}
89780 +
89781 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
89782 +{
89783 + const char * const plugin_name = plugin_info->base_name;
89784 + const int argc = plugin_info->argc;
89785 + const struct plugin_argument * const argv = plugin_info->argv;
89786 + int i;
89787 + struct register_pass_info stackleak_tree_instrument_pass_info = {
89788 + .pass = &stackleak_tree_instrument_pass.pass,
89789 +// .reference_pass_name = "tree_profile",
89790 + .reference_pass_name = "optimized",
89791 + .ref_pass_instance_number = 1,
89792 + .pos_op = PASS_POS_INSERT_BEFORE
89793 + };
89794 + struct register_pass_info stackleak_final_pass_info = {
89795 + .pass = &stackleak_final_rtl_opt_pass.pass,
89796 + .reference_pass_name = "final",
89797 + .ref_pass_instance_number = 1,
89798 + .pos_op = PASS_POS_INSERT_BEFORE
89799 + };
89800 +
89801 + if (!plugin_default_version_check(version, &gcc_version)) {
89802 + error(G_("incompatible gcc/plugin versions"));
89803 + return 1;
89804 + }
89805 +
89806 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
89807 +
89808 + for (i = 0; i < argc; ++i) {
89809 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
89810 + if (!argv[i].value) {
89811 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
89812 + continue;
89813 + }
89814 + track_frame_size = atoi(argv[i].value);
89815 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
89816 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
89817 + continue;
89818 + }
89819 + if (!strcmp(argv[i].key, "initialize-locals")) {
89820 + if (argv[i].value) {
89821 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
89822 + continue;
89823 + }
89824 + init_locals = true;
89825 + continue;
89826 + }
89827 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
89828 + }
89829 +
89830 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
89831 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
89832 +
89833 + return 0;
89834 +}
89835 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
89836 index 6789d78..4afd019e 100644
89837 --- a/tools/perf/util/include/asm/alternative-asm.h
89838 +++ b/tools/perf/util/include/asm/alternative-asm.h
89839 @@ -5,4 +5,7 @@
89840
89841 #define altinstruction_entry #
89842
89843 + .macro pax_force_retaddr rip=0, reload=0
89844 + .endm
89845 +
89846 #endif
89847 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
89848 index af0f22f..9a7d479 100644
89849 --- a/usr/gen_init_cpio.c
89850 +++ b/usr/gen_init_cpio.c
89851 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
89852 int retval;
89853 int rc = -1;
89854 int namesize;
89855 - int i;
89856 + unsigned int i;
89857
89858 mode |= S_IFREG;
89859
89860 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
89861 *env_var = *expanded = '\0';
89862 strncat(env_var, start + 2, end - start - 2);
89863 strncat(expanded, new_location, start - new_location);
89864 - strncat(expanded, getenv(env_var), PATH_MAX);
89865 - strncat(expanded, end + 1, PATH_MAX);
89866 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
89867 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
89868 strncpy(new_location, expanded, PATH_MAX);
89869 + new_location[PATH_MAX] = 0;
89870 } else
89871 break;
89872 }
89873 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
89874 index 44ee712..a01c4b8 100644
89875 --- a/virt/kvm/kvm_main.c
89876 +++ b/virt/kvm/kvm_main.c
89877 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
89878
89879 static cpumask_var_t cpus_hardware_enabled;
89880 static int kvm_usage_count = 0;
89881 -static atomic_t hardware_enable_failed;
89882 +static atomic_unchecked_t hardware_enable_failed;
89883
89884 struct kmem_cache *kvm_vcpu_cache;
89885 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
89886 @@ -703,7 +703,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
89887 /* We can read the guest memory with __xxx_user() later on. */
89888 if (user_alloc &&
89889 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
89890 - !access_ok(VERIFY_WRITE,
89891 + !__access_ok(VERIFY_WRITE,
89892 (void __user *)(unsigned long)mem->userspace_addr,
89893 mem->memory_size)))
89894 goto out;
89895 @@ -2291,7 +2291,7 @@ static void hardware_enable_nolock(void *junk)
89896
89897 if (r) {
89898 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
89899 - atomic_inc(&hardware_enable_failed);
89900 + atomic_inc_unchecked(&hardware_enable_failed);
89901 printk(KERN_INFO "kvm: enabling virtualization on "
89902 "CPU%d failed\n", cpu);
89903 }
89904 @@ -2345,10 +2345,10 @@ static int hardware_enable_all(void)
89905
89906 kvm_usage_count++;
89907 if (kvm_usage_count == 1) {
89908 - atomic_set(&hardware_enable_failed, 0);
89909 + atomic_set_unchecked(&hardware_enable_failed, 0);
89910 on_each_cpu(hardware_enable_nolock, NULL, 1);
89911
89912 - if (atomic_read(&hardware_enable_failed)) {
89913 + if (atomic_read_unchecked(&hardware_enable_failed)) {
89914 hardware_disable_all_nolock();
89915 r = -EBUSY;
89916 }
89917 @@ -2709,7 +2709,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
89918 kvm_arch_vcpu_put(vcpu);
89919 }
89920
89921 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
89922 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
89923 struct module *module)
89924 {
89925 int r;
89926 @@ -2772,7 +2772,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
89927 if (!vcpu_align)
89928 vcpu_align = __alignof__(struct kvm_vcpu);
89929 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
89930 - 0, NULL);
89931 + SLAB_USERCOPY, NULL);
89932 if (!kvm_vcpu_cache) {
89933 r = -ENOMEM;
89934 goto out_free_3;
89935 @@ -2782,9 +2782,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
89936 if (r)
89937 goto out_free;
89938
89939 - kvm_chardev_ops.owner = module;
89940 - kvm_vm_fops.owner = module;
89941 - kvm_vcpu_fops.owner = module;
89942 + pax_open_kernel();
89943 + *(void **)&kvm_chardev_ops.owner = module;
89944 + *(void **)&kvm_vm_fops.owner = module;
89945 + *(void **)&kvm_vcpu_fops.owner = module;
89946 + pax_close_kernel();
89947
89948 r = misc_register(&kvm_dev);
89949 if (r) {