]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.4.6-201207281434.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.4.6-201207281434.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b4a898f..781c7ad 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 @@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56 +ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60 @@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64 +builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70 +clut_vga16.c
71 +common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78 +config.c
79 config.mak
80 config.mak.autogen
81 +config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85 @@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89 +dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93 +exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97 @@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101 +gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108 +hash
109 +hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113 @@ -145,7 +163,7 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117 -kconfig
118 +kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 @@ -153,7 +171,7 @@ kxgettext
123 lkc_defs.h
124 lex.c
125 lex.*.c
126 -linux
127 +lib1funcs.S
128 logo_*.c
129 logo_*_clut224.c
130 logo_*_mono.c
131 @@ -164,14 +182,15 @@ machtypes.h
132 map
133 map_hugetlb
134 maui_boot.h
135 -media
136 mconf
137 +mdp
138 miboot*
139 mk_elfconfig
140 mkboot
141 mkbugboot
142 mkcpustr
143 mkdep
144 +mkpiggy
145 mkprep
146 mkregtable
147 mktables
148 @@ -188,6 +207,7 @@ oui.c*
149 page-types
150 parse.c
151 parse.h
152 +parse-events*
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156 @@ -197,6 +217,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160 +pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164 @@ -207,6 +228,7 @@ r300_reg_safe.h
165 r420_reg_safe.h
166 r600_reg_safe.h
167 recordmcount
168 +regdb.c
169 relocs
170 rlim_names.h
171 rn50_reg_safe.h
172 @@ -216,7 +238,9 @@ series
173 setup
174 setup.bin
175 setup.elf
176 +size_overflow_hash.h
177 sImage
178 +slabinfo
179 sm_tbl*
180 split-include
181 syscalltab.h
182 @@ -227,6 +251,7 @@ tftpboot.img
183 timeconst.h
184 times.h*
185 trix_boot.h
186 +user_constants.h
187 utsrelease.h*
188 vdso-syms.lds
189 vdso.lds
190 @@ -238,13 +263,17 @@ vdso32.lds
191 vdso32.so.dbg
192 vdso64.lds
193 vdso64.so.dbg
194 +vdsox32.lds
195 +vdsox32-syms.lds
196 version.h*
197 vmImage
198 vmlinux
199 vmlinux-*
200 vmlinux.aout
201 vmlinux.bin.all
202 +vmlinux.bin.bz2
203 vmlinux.lds
204 +vmlinux.relocs
205 vmlinuz
206 voffset.h
207 vsyscall.lds
208 @@ -252,9 +281,11 @@ vsyscall_32.lds
209 wanxlfw.inc
210 uImage
211 unifdef
212 +utsrelease.h
213 wakeup.bin
214 wakeup.elf
215 wakeup.lds
216 zImage*
217 zconf.hash.c
218 +zconf.lex.c
219 zoffset.h
220 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
221 index c1601e5..08557ce 100644
222 --- a/Documentation/kernel-parameters.txt
223 +++ b/Documentation/kernel-parameters.txt
224 @@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
225 the specified number of seconds. This is to be used if
226 your oopses keep scrolling off the screen.
227
228 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
229 + virtualization environments that don't cope well with the
230 + expand down segment used by UDEREF on X86-32 or the frequent
231 + page table updates on X86-64.
232 +
233 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
234 +
235 pcbit= [HW,ISDN]
236
237 pcd. [PARIDE]
238 diff --git a/Makefile b/Makefile
239 index 5d0edcb..f69ee4c 100644
240 --- a/Makefile
241 +++ b/Makefile
242 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
243
244 HOSTCC = gcc
245 HOSTCXX = g++
246 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
247 -HOSTCXXFLAGS = -O2
248 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
249 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
250 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
251
252 # Decide whether to build built-in, modular, or both.
253 # Normally, just do built-in.
254 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
255 # Rules shared between *config targets and build targets
256
257 # Basic helpers built in scripts/
258 -PHONY += scripts_basic
259 -scripts_basic:
260 +PHONY += scripts_basic gcc-plugins
261 +scripts_basic: gcc-plugins
262 $(Q)$(MAKE) $(build)=scripts/basic
263 $(Q)rm -f .tmp_quiet_recordmcount
264
265 @@ -564,6 +565,60 @@ else
266 KBUILD_CFLAGS += -O2
267 endif
268
269 +ifndef DISABLE_PAX_PLUGINS
270 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
271 +ifneq ($(PLUGINCC),)
272 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
273 +ifndef CONFIG_UML
274 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
275 +endif
276 +endif
277 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
278 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
279 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
280 +endif
281 +ifdef CONFIG_KALLOCSTAT_PLUGIN
282 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
283 +endif
284 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
285 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
286 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
287 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
288 +endif
289 +ifdef CONFIG_CHECKER_PLUGIN
290 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
291 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
292 +endif
293 +endif
294 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
295 +ifdef CONFIG_PAX_SIZE_OVERFLOW
296 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
297 +endif
298 +ifdef CONFIG_PAX_LATENT_ENTROPY
299 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
300 +endif
301 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
302 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
303 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
304 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
305 +export PLUGINCC CONSTIFY_PLUGIN
306 +ifeq ($(KBUILD_EXTMOD),)
307 +gcc-plugins:
308 + $(Q)$(MAKE) $(build)=tools/gcc
309 +else
310 +gcc-plugins: ;
311 +endif
312 +else
313 +gcc-plugins:
314 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
315 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
316 +else
317 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
318 +endif
319 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
320 +endif
321 +endif
322 +
323 include $(srctree)/arch/$(SRCARCH)/Makefile
324
325 ifneq ($(CONFIG_FRAME_WARN),0)
326 @@ -708,7 +763,7 @@ export mod_strip_cmd
327
328
329 ifeq ($(KBUILD_EXTMOD),)
330 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
331 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
332
333 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
334 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
335 @@ -932,6 +987,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
336
337 # The actual objects are generated when descending,
338 # make sure no implicit rule kicks in
339 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
340 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
341 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
342
343 # Handle descending into subdirectories listed in $(vmlinux-dirs)
344 @@ -941,7 +998,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
345 # Error messages still appears in the original language
346
347 PHONY += $(vmlinux-dirs)
348 -$(vmlinux-dirs): prepare scripts
349 +$(vmlinux-dirs): gcc-plugins prepare scripts
350 $(Q)$(MAKE) $(build)=$@
351
352 # Store (new) KERNELRELASE string in include/config/kernel.release
353 @@ -985,6 +1042,7 @@ prepare0: archprepare FORCE
354 $(Q)$(MAKE) $(build)=.
355
356 # All the preparing..
357 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
358 prepare: prepare0
359
360 # Generate some files
361 @@ -1092,6 +1150,8 @@ all: modules
362 # using awk while concatenating to the final file.
363
364 PHONY += modules
365 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
366 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
367 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
368 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
369 @$(kecho) ' Building modules, stage 2.';
370 @@ -1107,7 +1167,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
371
372 # Target to prepare building external modules
373 PHONY += modules_prepare
374 -modules_prepare: prepare scripts
375 +modules_prepare: gcc-plugins prepare scripts
376
377 # Target to install modules
378 PHONY += modules_install
379 @@ -1166,7 +1226,7 @@ CLEAN_FILES += vmlinux System.map \
380 MRPROPER_DIRS += include/config usr/include include/generated \
381 arch/*/include/generated
382 MRPROPER_FILES += .config .config.old .version .old_version \
383 - include/linux/version.h \
384 + include/linux/version.h tools/gcc/size_overflow_hash.h\
385 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
386
387 # clean - Delete most, but leave enough to build external modules
388 @@ -1204,6 +1264,7 @@ distclean: mrproper
389 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
390 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
391 -o -name '.*.rej' \
392 + -o -name '.*.rej' -o -name '*.so' \
393 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
394 -type f -print | xargs rm -f
395
396 @@ -1364,6 +1425,8 @@ PHONY += $(module-dirs) modules
397 $(module-dirs): crmodverdir $(objtree)/Module.symvers
398 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
399
400 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402 modules: $(module-dirs)
403 @$(kecho) ' Building modules, stage 2.';
404 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
405 @@ -1490,17 +1553,21 @@ else
406 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
407 endif
408
409 -%.s: %.c prepare scripts FORCE
410 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
411 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
412 +%.s: %.c gcc-plugins prepare scripts FORCE
413 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
414 %.i: %.c prepare scripts FORCE
415 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
416 -%.o: %.c prepare scripts FORCE
417 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
418 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
419 +%.o: %.c gcc-plugins prepare scripts FORCE
420 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
421 %.lst: %.c prepare scripts FORCE
422 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
423 -%.s: %.S prepare scripts FORCE
424 +%.s: %.S gcc-plugins prepare scripts FORCE
425 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
426 -%.o: %.S prepare scripts FORCE
427 +%.o: %.S gcc-plugins prepare scripts FORCE
428 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
429 %.symtypes: %.c prepare scripts FORCE
430 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
431 @@ -1510,11 +1577,15 @@ endif
432 $(cmd_crmodverdir)
433 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
434 $(build)=$(build-dir)
435 -%/: prepare scripts FORCE
436 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
437 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
438 +%/: gcc-plugins prepare scripts FORCE
439 $(cmd_crmodverdir)
440 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
441 $(build)=$(build-dir)
442 -%.ko: prepare scripts FORCE
443 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
444 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
445 +%.ko: gcc-plugins prepare scripts FORCE
446 $(cmd_crmodverdir)
447 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
448 $(build)=$(build-dir) $(@:.ko=.o)
449 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
450 index 3bb7ffe..347a54c 100644
451 --- a/arch/alpha/include/asm/atomic.h
452 +++ b/arch/alpha/include/asm/atomic.h
453 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
454 #define atomic_dec(v) atomic_sub(1,(v))
455 #define atomic64_dec(v) atomic64_sub(1,(v))
456
457 +#define atomic64_read_unchecked(v) atomic64_read(v)
458 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
459 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
460 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
461 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
462 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
463 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
464 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
465 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
466 +
467 #define smp_mb__before_atomic_dec() smp_mb()
468 #define smp_mb__after_atomic_dec() smp_mb()
469 #define smp_mb__before_atomic_inc() smp_mb()
470 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
471 index ad368a9..fbe0f25 100644
472 --- a/arch/alpha/include/asm/cache.h
473 +++ b/arch/alpha/include/asm/cache.h
474 @@ -4,19 +4,19 @@
475 #ifndef __ARCH_ALPHA_CACHE_H
476 #define __ARCH_ALPHA_CACHE_H
477
478 +#include <linux/const.h>
479
480 /* Bytes per L1 (data) cache line. */
481 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
482 -# define L1_CACHE_BYTES 64
483 # define L1_CACHE_SHIFT 6
484 #else
485 /* Both EV4 and EV5 are write-through, read-allocate,
486 direct-mapped, physical.
487 */
488 -# define L1_CACHE_BYTES 32
489 # define L1_CACHE_SHIFT 5
490 #endif
491
492 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
493 #define SMP_CACHE_BYTES L1_CACHE_BYTES
494
495 #endif
496 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
497 index 968d999..d36b2df 100644
498 --- a/arch/alpha/include/asm/elf.h
499 +++ b/arch/alpha/include/asm/elf.h
500 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
501
502 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
503
504 +#ifdef CONFIG_PAX_ASLR
505 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
506 +
507 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
508 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
509 +#endif
510 +
511 /* $0 is set by ld.so to a pointer to a function which might be
512 registered using atexit. This provides a mean for the dynamic
513 linker to call DT_FINI functions for shared libraries that have
514 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
515 index bc2a0da..8ad11ee 100644
516 --- a/arch/alpha/include/asm/pgalloc.h
517 +++ b/arch/alpha/include/asm/pgalloc.h
518 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
519 pgd_set(pgd, pmd);
520 }
521
522 +static inline void
523 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
524 +{
525 + pgd_populate(mm, pgd, pmd);
526 +}
527 +
528 extern pgd_t *pgd_alloc(struct mm_struct *mm);
529
530 static inline void
531 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
532 index 81a4342..348b927 100644
533 --- a/arch/alpha/include/asm/pgtable.h
534 +++ b/arch/alpha/include/asm/pgtable.h
535 @@ -102,6 +102,17 @@ struct vm_area_struct;
536 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
537 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
538 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
539 +
540 +#ifdef CONFIG_PAX_PAGEEXEC
541 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
542 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
543 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
544 +#else
545 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
546 +# define PAGE_COPY_NOEXEC PAGE_COPY
547 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
548 +#endif
549 +
550 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
551
552 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
553 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
554 index 2fd00b7..cfd5069 100644
555 --- a/arch/alpha/kernel/module.c
556 +++ b/arch/alpha/kernel/module.c
557 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
558
559 /* The small sections were sorted to the end of the segment.
560 The following should definitely cover them. */
561 - gp = (u64)me->module_core + me->core_size - 0x8000;
562 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
563 got = sechdrs[me->arch.gotsecindex].sh_addr;
564
565 for (i = 0; i < n; i++) {
566 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
567 index 49ee319..9ee7d14 100644
568 --- a/arch/alpha/kernel/osf_sys.c
569 +++ b/arch/alpha/kernel/osf_sys.c
570 @@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
571 /* At this point: (!vma || addr < vma->vm_end). */
572 if (limit - len < addr)
573 return -ENOMEM;
574 - if (!vma || addr + len <= vma->vm_start)
575 + if (check_heap_stack_gap(vma, addr, len))
576 return addr;
577 addr = vma->vm_end;
578 vma = vma->vm_next;
579 @@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
580 merely specific addresses, but regions of memory -- perhaps
581 this feature should be incorporated into all ports? */
582
583 +#ifdef CONFIG_PAX_RANDMMAP
584 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
585 +#endif
586 +
587 if (addr) {
588 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
589 if (addr != (unsigned long) -ENOMEM)
590 @@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
591 }
592
593 /* Next, try allocating at TASK_UNMAPPED_BASE. */
594 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
595 - len, limit);
596 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
597 +
598 if (addr != (unsigned long) -ENOMEM)
599 return addr;
600
601 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
602 index 5eecab1..609abc0 100644
603 --- a/arch/alpha/mm/fault.c
604 +++ b/arch/alpha/mm/fault.c
605 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
606 __reload_thread(pcb);
607 }
608
609 +#ifdef CONFIG_PAX_PAGEEXEC
610 +/*
611 + * PaX: decide what to do with offenders (regs->pc = fault address)
612 + *
613 + * returns 1 when task should be killed
614 + * 2 when patched PLT trampoline was detected
615 + * 3 when unpatched PLT trampoline was detected
616 + */
617 +static int pax_handle_fetch_fault(struct pt_regs *regs)
618 +{
619 +
620 +#ifdef CONFIG_PAX_EMUPLT
621 + int err;
622 +
623 + do { /* PaX: patched PLT emulation #1 */
624 + unsigned int ldah, ldq, jmp;
625 +
626 + err = get_user(ldah, (unsigned int *)regs->pc);
627 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
628 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
629 +
630 + if (err)
631 + break;
632 +
633 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
634 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
635 + jmp == 0x6BFB0000U)
636 + {
637 + unsigned long r27, addr;
638 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
639 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
640 +
641 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
642 + err = get_user(r27, (unsigned long *)addr);
643 + if (err)
644 + break;
645 +
646 + regs->r27 = r27;
647 + regs->pc = r27;
648 + return 2;
649 + }
650 + } while (0);
651 +
652 + do { /* PaX: patched PLT emulation #2 */
653 + unsigned int ldah, lda, br;
654 +
655 + err = get_user(ldah, (unsigned int *)regs->pc);
656 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
657 + err |= get_user(br, (unsigned int *)(regs->pc+8));
658 +
659 + if (err)
660 + break;
661 +
662 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
663 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
664 + (br & 0xFFE00000U) == 0xC3E00000U)
665 + {
666 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
667 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
668 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
669 +
670 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
671 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
672 + return 2;
673 + }
674 + } while (0);
675 +
676 + do { /* PaX: unpatched PLT emulation */
677 + unsigned int br;
678 +
679 + err = get_user(br, (unsigned int *)regs->pc);
680 +
681 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
682 + unsigned int br2, ldq, nop, jmp;
683 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
684 +
685 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
686 + err = get_user(br2, (unsigned int *)addr);
687 + err |= get_user(ldq, (unsigned int *)(addr+4));
688 + err |= get_user(nop, (unsigned int *)(addr+8));
689 + err |= get_user(jmp, (unsigned int *)(addr+12));
690 + err |= get_user(resolver, (unsigned long *)(addr+16));
691 +
692 + if (err)
693 + break;
694 +
695 + if (br2 == 0xC3600000U &&
696 + ldq == 0xA77B000CU &&
697 + nop == 0x47FF041FU &&
698 + jmp == 0x6B7B0000U)
699 + {
700 + regs->r28 = regs->pc+4;
701 + regs->r27 = addr+16;
702 + regs->pc = resolver;
703 + return 3;
704 + }
705 + }
706 + } while (0);
707 +#endif
708 +
709 + return 1;
710 +}
711 +
712 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
713 +{
714 + unsigned long i;
715 +
716 + printk(KERN_ERR "PAX: bytes at PC: ");
717 + for (i = 0; i < 5; i++) {
718 + unsigned int c;
719 + if (get_user(c, (unsigned int *)pc+i))
720 + printk(KERN_CONT "???????? ");
721 + else
722 + printk(KERN_CONT "%08x ", c);
723 + }
724 + printk("\n");
725 +}
726 +#endif
727
728 /*
729 * This routine handles page faults. It determines the address,
730 @@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
731 good_area:
732 si_code = SEGV_ACCERR;
733 if (cause < 0) {
734 - if (!(vma->vm_flags & VM_EXEC))
735 + if (!(vma->vm_flags & VM_EXEC)) {
736 +
737 +#ifdef CONFIG_PAX_PAGEEXEC
738 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
739 + goto bad_area;
740 +
741 + up_read(&mm->mmap_sem);
742 + switch (pax_handle_fetch_fault(regs)) {
743 +
744 +#ifdef CONFIG_PAX_EMUPLT
745 + case 2:
746 + case 3:
747 + return;
748 +#endif
749 +
750 + }
751 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
752 + do_group_exit(SIGKILL);
753 +#else
754 goto bad_area;
755 +#endif
756 +
757 + }
758 } else if (!cause) {
759 /* Allow reads even for write-only mappings */
760 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
761 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
762 index 68374ba..cff7196 100644
763 --- a/arch/arm/include/asm/atomic.h
764 +++ b/arch/arm/include/asm/atomic.h
765 @@ -17,17 +17,35 @@
766 #include <asm/barrier.h>
767 #include <asm/cmpxchg.h>
768
769 +#ifdef CONFIG_GENERIC_ATOMIC64
770 +#include <asm-generic/atomic64.h>
771 +#endif
772 +
773 #define ATOMIC_INIT(i) { (i) }
774
775 #ifdef __KERNEL__
776
777 +#define _ASM_EXTABLE(from, to) \
778 +" .pushsection __ex_table,\"a\"\n"\
779 +" .align 3\n" \
780 +" .long " #from ", " #to"\n" \
781 +" .popsection"
782 +
783 /*
784 * On ARM, ordinary assignment (str instruction) doesn't clear the local
785 * strex/ldrex monitor on some implementations. The reason we can use it for
786 * atomic_set() is the clrex or dummy strex done on every exception return.
787 */
788 #define atomic_read(v) (*(volatile int *)&(v)->counter)
789 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
790 +{
791 + return v->counter;
792 +}
793 #define atomic_set(v,i) (((v)->counter) = (i))
794 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
795 +{
796 + v->counter = i;
797 +}
798
799 #if __LINUX_ARM_ARCH__ >= 6
800
801 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
802 int result;
803
804 __asm__ __volatile__("@ atomic_add\n"
805 +"1: ldrex %1, [%3]\n"
806 +" adds %0, %1, %4\n"
807 +
808 +#ifdef CONFIG_PAX_REFCOUNT
809 +" bvc 3f\n"
810 +"2: bkpt 0xf103\n"
811 +"3:\n"
812 +#endif
813 +
814 +" strex %1, %0, [%3]\n"
815 +" teq %1, #0\n"
816 +" bne 1b"
817 +
818 +#ifdef CONFIG_PAX_REFCOUNT
819 +"\n4:\n"
820 + _ASM_EXTABLE(2b, 4b)
821 +#endif
822 +
823 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
824 + : "r" (&v->counter), "Ir" (i)
825 + : "cc");
826 +}
827 +
828 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
829 +{
830 + unsigned long tmp;
831 + int result;
832 +
833 + __asm__ __volatile__("@ atomic_add_unchecked\n"
834 "1: ldrex %0, [%3]\n"
835 " add %0, %0, %4\n"
836 " strex %1, %0, [%3]\n"
837 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
838 smp_mb();
839
840 __asm__ __volatile__("@ atomic_add_return\n"
841 +"1: ldrex %1, [%3]\n"
842 +" adds %0, %1, %4\n"
843 +
844 +#ifdef CONFIG_PAX_REFCOUNT
845 +" bvc 3f\n"
846 +" mov %0, %1\n"
847 +"2: bkpt 0xf103\n"
848 +"3:\n"
849 +#endif
850 +
851 +" strex %1, %0, [%3]\n"
852 +" teq %1, #0\n"
853 +" bne 1b"
854 +
855 +#ifdef CONFIG_PAX_REFCOUNT
856 +"\n4:\n"
857 + _ASM_EXTABLE(2b, 4b)
858 +#endif
859 +
860 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
861 + : "r" (&v->counter), "Ir" (i)
862 + : "cc");
863 +
864 + smp_mb();
865 +
866 + return result;
867 +}
868 +
869 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
870 +{
871 + unsigned long tmp;
872 + int result;
873 +
874 + smp_mb();
875 +
876 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
877 "1: ldrex %0, [%3]\n"
878 " add %0, %0, %4\n"
879 " strex %1, %0, [%3]\n"
880 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
881 int result;
882
883 __asm__ __volatile__("@ atomic_sub\n"
884 +"1: ldrex %1, [%3]\n"
885 +" subs %0, %1, %4\n"
886 +
887 +#ifdef CONFIG_PAX_REFCOUNT
888 +" bvc 3f\n"
889 +"2: bkpt 0xf103\n"
890 +"3:\n"
891 +#endif
892 +
893 +" strex %1, %0, [%3]\n"
894 +" teq %1, #0\n"
895 +" bne 1b"
896 +
897 +#ifdef CONFIG_PAX_REFCOUNT
898 +"\n4:\n"
899 + _ASM_EXTABLE(2b, 4b)
900 +#endif
901 +
902 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
903 + : "r" (&v->counter), "Ir" (i)
904 + : "cc");
905 +}
906 +
907 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
908 +{
909 + unsigned long tmp;
910 + int result;
911 +
912 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
913 "1: ldrex %0, [%3]\n"
914 " sub %0, %0, %4\n"
915 " strex %1, %0, [%3]\n"
916 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
917 smp_mb();
918
919 __asm__ __volatile__("@ atomic_sub_return\n"
920 -"1: ldrex %0, [%3]\n"
921 -" sub %0, %0, %4\n"
922 +"1: ldrex %1, [%3]\n"
923 +" sub %0, %1, %4\n"
924 +
925 +#ifdef CONFIG_PAX_REFCOUNT
926 +" bvc 3f\n"
927 +" mov %0, %1\n"
928 +"2: bkpt 0xf103\n"
929 +"3:\n"
930 +#endif
931 +
932 " strex %1, %0, [%3]\n"
933 " teq %1, #0\n"
934 " bne 1b"
935 +
936 +#ifdef CONFIG_PAX_REFCOUNT
937 +"\n4:\n"
938 + _ASM_EXTABLE(2b, 4b)
939 +#endif
940 +
941 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
942 : "r" (&v->counter), "Ir" (i)
943 : "cc");
944 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
945 return oldval;
946 }
947
948 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
949 +{
950 + unsigned long oldval, res;
951 +
952 + smp_mb();
953 +
954 + do {
955 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
956 + "ldrex %1, [%3]\n"
957 + "mov %0, #0\n"
958 + "teq %1, %4\n"
959 + "strexeq %0, %5, [%3]\n"
960 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
961 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
962 + : "cc");
963 + } while (res);
964 +
965 + smp_mb();
966 +
967 + return oldval;
968 +}
969 +
970 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
971 {
972 unsigned long tmp, tmp2;
973 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
974
975 return val;
976 }
977 +
978 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
979 +{
980 + return atomic_add_return(i, v);
981 +}
982 +
983 #define atomic_add(i, v) (void) atomic_add_return(i, v)
984 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
985 +{
986 + (void) atomic_add_return(i, v);
987 +}
988
989 static inline int atomic_sub_return(int i, atomic_t *v)
990 {
991 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
992 return val;
993 }
994 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
995 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
996 +{
997 + (void) atomic_sub_return(i, v);
998 +}
999
1000 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1001 {
1002 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1003 return ret;
1004 }
1005
1006 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1007 +{
1008 + return atomic_cmpxchg(v, old, new);
1009 +}
1010 +
1011 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1012 {
1013 unsigned long flags;
1014 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1015 #endif /* __LINUX_ARM_ARCH__ */
1016
1017 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1018 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1019 +{
1020 + return xchg(&v->counter, new);
1021 +}
1022
1023 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1024 {
1025 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1026 }
1027
1028 #define atomic_inc(v) atomic_add(1, v)
1029 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1030 +{
1031 + atomic_add_unchecked(1, v);
1032 +}
1033 #define atomic_dec(v) atomic_sub(1, v)
1034 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1035 +{
1036 + atomic_sub_unchecked(1, v);
1037 +}
1038
1039 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1040 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1041 +{
1042 + return atomic_add_return_unchecked(1, v) == 0;
1043 +}
1044 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1045 #define atomic_inc_return(v) (atomic_add_return(1, v))
1046 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1047 +{
1048 + return atomic_add_return_unchecked(1, v);
1049 +}
1050 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1051 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1052
1053 @@ -241,6 +428,14 @@ typedef struct {
1054 u64 __aligned(8) counter;
1055 } atomic64_t;
1056
1057 +#ifdef CONFIG_PAX_REFCOUNT
1058 +typedef struct {
1059 + u64 __aligned(8) counter;
1060 +} atomic64_unchecked_t;
1061 +#else
1062 +typedef atomic64_t atomic64_unchecked_t;
1063 +#endif
1064 +
1065 #define ATOMIC64_INIT(i) { (i) }
1066
1067 static inline u64 atomic64_read(atomic64_t *v)
1068 @@ -256,6 +451,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1069 return result;
1070 }
1071
1072 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1073 +{
1074 + u64 result;
1075 +
1076 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1077 +" ldrexd %0, %H0, [%1]"
1078 + : "=&r" (result)
1079 + : "r" (&v->counter), "Qo" (v->counter)
1080 + );
1081 +
1082 + return result;
1083 +}
1084 +
1085 static inline void atomic64_set(atomic64_t *v, u64 i)
1086 {
1087 u64 tmp;
1088 @@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1089 : "cc");
1090 }
1091
1092 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1093 +{
1094 + u64 tmp;
1095 +
1096 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1097 +"1: ldrexd %0, %H0, [%2]\n"
1098 +" strexd %0, %3, %H3, [%2]\n"
1099 +" teq %0, #0\n"
1100 +" bne 1b"
1101 + : "=&r" (tmp), "=Qo" (v->counter)
1102 + : "r" (&v->counter), "r" (i)
1103 + : "cc");
1104 +}
1105 +
1106 static inline void atomic64_add(u64 i, atomic64_t *v)
1107 {
1108 u64 result;
1109 @@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1110 __asm__ __volatile__("@ atomic64_add\n"
1111 "1: ldrexd %0, %H0, [%3]\n"
1112 " adds %0, %0, %4\n"
1113 +" adcs %H0, %H0, %H4\n"
1114 +
1115 +#ifdef CONFIG_PAX_REFCOUNT
1116 +" bvc 3f\n"
1117 +"2: bkpt 0xf103\n"
1118 +"3:\n"
1119 +#endif
1120 +
1121 +" strexd %1, %0, %H0, [%3]\n"
1122 +" teq %1, #0\n"
1123 +" bne 1b"
1124 +
1125 +#ifdef CONFIG_PAX_REFCOUNT
1126 +"\n4:\n"
1127 + _ASM_EXTABLE(2b, 4b)
1128 +#endif
1129 +
1130 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1131 + : "r" (&v->counter), "r" (i)
1132 + : "cc");
1133 +}
1134 +
1135 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1136 +{
1137 + u64 result;
1138 + unsigned long tmp;
1139 +
1140 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1141 +"1: ldrexd %0, %H0, [%3]\n"
1142 +" adds %0, %0, %4\n"
1143 " adc %H0, %H0, %H4\n"
1144 " strexd %1, %0, %H0, [%3]\n"
1145 " teq %1, #0\n"
1146 @@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1147
1148 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1149 {
1150 - u64 result;
1151 - unsigned long tmp;
1152 + u64 result, tmp;
1153
1154 smp_mb();
1155
1156 __asm__ __volatile__("@ atomic64_add_return\n"
1157 +"1: ldrexd %1, %H1, [%3]\n"
1158 +" adds %0, %1, %4\n"
1159 +" adcs %H0, %H1, %H4\n"
1160 +
1161 +#ifdef CONFIG_PAX_REFCOUNT
1162 +" bvc 3f\n"
1163 +" mov %0, %1\n"
1164 +" mov %H0, %H1\n"
1165 +"2: bkpt 0xf103\n"
1166 +"3:\n"
1167 +#endif
1168 +
1169 +" strexd %1, %0, %H0, [%3]\n"
1170 +" teq %1, #0\n"
1171 +" bne 1b"
1172 +
1173 +#ifdef CONFIG_PAX_REFCOUNT
1174 +"\n4:\n"
1175 + _ASM_EXTABLE(2b, 4b)
1176 +#endif
1177 +
1178 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1179 + : "r" (&v->counter), "r" (i)
1180 + : "cc");
1181 +
1182 + smp_mb();
1183 +
1184 + return result;
1185 +}
1186 +
1187 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1188 +{
1189 + u64 result;
1190 + unsigned long tmp;
1191 +
1192 + smp_mb();
1193 +
1194 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1195 "1: ldrexd %0, %H0, [%3]\n"
1196 " adds %0, %0, %4\n"
1197 " adc %H0, %H0, %H4\n"
1198 @@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1199 __asm__ __volatile__("@ atomic64_sub\n"
1200 "1: ldrexd %0, %H0, [%3]\n"
1201 " subs %0, %0, %4\n"
1202 +" sbcs %H0, %H0, %H4\n"
1203 +
1204 +#ifdef CONFIG_PAX_REFCOUNT
1205 +" bvc 3f\n"
1206 +"2: bkpt 0xf103\n"
1207 +"3:\n"
1208 +#endif
1209 +
1210 +" strexd %1, %0, %H0, [%3]\n"
1211 +" teq %1, #0\n"
1212 +" bne 1b"
1213 +
1214 +#ifdef CONFIG_PAX_REFCOUNT
1215 +"\n4:\n"
1216 + _ASM_EXTABLE(2b, 4b)
1217 +#endif
1218 +
1219 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1220 + : "r" (&v->counter), "r" (i)
1221 + : "cc");
1222 +}
1223 +
1224 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1225 +{
1226 + u64 result;
1227 + unsigned long tmp;
1228 +
1229 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1230 +"1: ldrexd %0, %H0, [%3]\n"
1231 +" subs %0, %0, %4\n"
1232 " sbc %H0, %H0, %H4\n"
1233 " strexd %1, %0, %H0, [%3]\n"
1234 " teq %1, #0\n"
1235 @@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1236
1237 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1238 {
1239 - u64 result;
1240 - unsigned long tmp;
1241 + u64 result, tmp;
1242
1243 smp_mb();
1244
1245 __asm__ __volatile__("@ atomic64_sub_return\n"
1246 -"1: ldrexd %0, %H0, [%3]\n"
1247 -" subs %0, %0, %4\n"
1248 -" sbc %H0, %H0, %H4\n"
1249 +"1: ldrexd %1, %H1, [%3]\n"
1250 +" subs %0, %1, %4\n"
1251 +" sbc %H0, %H1, %H4\n"
1252 +
1253 +#ifdef CONFIG_PAX_REFCOUNT
1254 +" bvc 3f\n"
1255 +" mov %0, %1\n"
1256 +" mov %H0, %H1\n"
1257 +"2: bkpt 0xf103\n"
1258 +"3:\n"
1259 +#endif
1260 +
1261 " strexd %1, %0, %H0, [%3]\n"
1262 " teq %1, #0\n"
1263 " bne 1b"
1264 +
1265 +#ifdef CONFIG_PAX_REFCOUNT
1266 +"\n4:\n"
1267 + _ASM_EXTABLE(2b, 4b)
1268 +#endif
1269 +
1270 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1271 : "r" (&v->counter), "r" (i)
1272 : "cc");
1273 @@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1274 return oldval;
1275 }
1276
1277 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1278 +{
1279 + u64 oldval;
1280 + unsigned long res;
1281 +
1282 + smp_mb();
1283 +
1284 + do {
1285 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1286 + "ldrexd %1, %H1, [%3]\n"
1287 + "mov %0, #0\n"
1288 + "teq %1, %4\n"
1289 + "teqeq %H1, %H4\n"
1290 + "strexdeq %0, %5, %H5, [%3]"
1291 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1292 + : "r" (&ptr->counter), "r" (old), "r" (new)
1293 + : "cc");
1294 + } while (res);
1295 +
1296 + smp_mb();
1297 +
1298 + return oldval;
1299 +}
1300 +
1301 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1302 {
1303 u64 result;
1304 @@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1305
1306 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1307 {
1308 - u64 result;
1309 - unsigned long tmp;
1310 + u64 result, tmp;
1311
1312 smp_mb();
1313
1314 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1315 -"1: ldrexd %0, %H0, [%3]\n"
1316 -" subs %0, %0, #1\n"
1317 -" sbc %H0, %H0, #0\n"
1318 +"1: ldrexd %1, %H1, [%3]\n"
1319 +" subs %0, %1, #1\n"
1320 +" sbc %H0, %H1, #0\n"
1321 +
1322 +#ifdef CONFIG_PAX_REFCOUNT
1323 +" bvc 3f\n"
1324 +" mov %0, %1\n"
1325 +" mov %H0, %H1\n"
1326 +"2: bkpt 0xf103\n"
1327 +"3:\n"
1328 +#endif
1329 +
1330 " teq %H0, #0\n"
1331 -" bmi 2f\n"
1332 +" bmi 4f\n"
1333 " strexd %1, %0, %H0, [%3]\n"
1334 " teq %1, #0\n"
1335 " bne 1b\n"
1336 -"2:"
1337 +"4:\n"
1338 +
1339 +#ifdef CONFIG_PAX_REFCOUNT
1340 + _ASM_EXTABLE(2b, 4b)
1341 +#endif
1342 +
1343 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1344 : "r" (&v->counter)
1345 : "cc");
1346 @@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1347 " teq %0, %5\n"
1348 " teqeq %H0, %H5\n"
1349 " moveq %1, #0\n"
1350 -" beq 2f\n"
1351 +" beq 4f\n"
1352 " adds %0, %0, %6\n"
1353 " adc %H0, %H0, %H6\n"
1354 +
1355 +#ifdef CONFIG_PAX_REFCOUNT
1356 +" bvc 3f\n"
1357 +"2: bkpt 0xf103\n"
1358 +"3:\n"
1359 +#endif
1360 +
1361 " strexd %2, %0, %H0, [%4]\n"
1362 " teq %2, #0\n"
1363 " bne 1b\n"
1364 -"2:"
1365 +"4:\n"
1366 +
1367 +#ifdef CONFIG_PAX_REFCOUNT
1368 + _ASM_EXTABLE(2b, 4b)
1369 +#endif
1370 +
1371 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1372 : "r" (&v->counter), "r" (u), "r" (a)
1373 : "cc");
1374 @@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1375
1376 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1377 #define atomic64_inc(v) atomic64_add(1LL, (v))
1378 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1379 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1380 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1381 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1382 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1383 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1384 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1385 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1386 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1387 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1388 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1389 index 75fe66b..2255c86 100644
1390 --- a/arch/arm/include/asm/cache.h
1391 +++ b/arch/arm/include/asm/cache.h
1392 @@ -4,8 +4,10 @@
1393 #ifndef __ASMARM_CACHE_H
1394 #define __ASMARM_CACHE_H
1395
1396 +#include <linux/const.h>
1397 +
1398 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1399 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1400 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1401
1402 /*
1403 * Memory returned by kmalloc() may be used for DMA, so we must make
1404 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1405 index 1252a26..9dc17b5 100644
1406 --- a/arch/arm/include/asm/cacheflush.h
1407 +++ b/arch/arm/include/asm/cacheflush.h
1408 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1409 void (*dma_unmap_area)(const void *, size_t, int);
1410
1411 void (*dma_flush_range)(const void *, const void *);
1412 -};
1413 +} __no_const;
1414
1415 /*
1416 * Select the calling method
1417 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1418 index d41d7cb..9bea5e0 100644
1419 --- a/arch/arm/include/asm/cmpxchg.h
1420 +++ b/arch/arm/include/asm/cmpxchg.h
1421 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1422
1423 #define xchg(ptr,x) \
1424 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1425 +#define xchg_unchecked(ptr,x) \
1426 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1427
1428 #include <asm-generic/cmpxchg-local.h>
1429
1430 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1431 index 38050b1..9d90e8b 100644
1432 --- a/arch/arm/include/asm/elf.h
1433 +++ b/arch/arm/include/asm/elf.h
1434 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1435 the loader. We need to make sure that it is out of the way of the program
1436 that it will "exec", and that there is sufficient room for the brk. */
1437
1438 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1439 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1440 +
1441 +#ifdef CONFIG_PAX_ASLR
1442 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1443 +
1444 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1445 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1446 +#endif
1447
1448 /* When the program starts, a1 contains a pointer to a function to be
1449 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1450 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1451 extern void elf_set_personality(const struct elf32_hdr *);
1452 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1453
1454 -struct mm_struct;
1455 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1456 -#define arch_randomize_brk arch_randomize_brk
1457 -
1458 #endif
1459 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1460 index e51b1e8..32a3113 100644
1461 --- a/arch/arm/include/asm/kmap_types.h
1462 +++ b/arch/arm/include/asm/kmap_types.h
1463 @@ -21,6 +21,7 @@ enum km_type {
1464 KM_L1_CACHE,
1465 KM_L2_CACHE,
1466 KM_KDB,
1467 + KM_CLEARPAGE,
1468 KM_TYPE_NR
1469 };
1470
1471 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1472 index 53426c6..c7baff3 100644
1473 --- a/arch/arm/include/asm/outercache.h
1474 +++ b/arch/arm/include/asm/outercache.h
1475 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1476 #endif
1477 void (*set_debug)(unsigned long);
1478 void (*resume)(void);
1479 -};
1480 +} __no_const;
1481
1482 #ifdef CONFIG_OUTER_CACHE
1483
1484 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1485 index 5838361..da6e813 100644
1486 --- a/arch/arm/include/asm/page.h
1487 +++ b/arch/arm/include/asm/page.h
1488 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1489 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1490 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1491 unsigned long vaddr, struct vm_area_struct *vma);
1492 -};
1493 +} __no_const;
1494
1495 #ifdef MULTI_USER
1496 extern struct cpu_user_fns cpu_user;
1497 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1498 index 943504f..bf8d667 100644
1499 --- a/arch/arm/include/asm/pgalloc.h
1500 +++ b/arch/arm/include/asm/pgalloc.h
1501 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1502 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1503 }
1504
1505 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1506 +{
1507 + pud_populate(mm, pud, pmd);
1508 +}
1509 +
1510 #else /* !CONFIG_ARM_LPAE */
1511
1512 /*
1513 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1514 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1515 #define pmd_free(mm, pmd) do { } while (0)
1516 #define pud_populate(mm,pmd,pte) BUG()
1517 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1518
1519 #endif /* CONFIG_ARM_LPAE */
1520
1521 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1522 index 0f04d84..2be5648 100644
1523 --- a/arch/arm/include/asm/thread_info.h
1524 +++ b/arch/arm/include/asm/thread_info.h
1525 @@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1526 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1527 #define TIF_SYSCALL_TRACE 8
1528 #define TIF_SYSCALL_AUDIT 9
1529 +
1530 +/* within 8 bits of TIF_SYSCALL_TRACE
1531 + to meet flexible second operand requirements
1532 +*/
1533 +#define TIF_GRSEC_SETXID 10
1534 +
1535 #define TIF_POLLING_NRFLAG 16
1536 #define TIF_USING_IWMMXT 17
1537 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1538 @@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1539 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1540 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1541 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1542 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1543
1544 /* Checks for any syscall work in entry-common.S */
1545 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1546 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1547 + _TIF_GRSEC_SETXID)
1548
1549 /*
1550 * Change these and you break ASM code in entry-common.S
1551 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1552 index 71f6536..602f279 100644
1553 --- a/arch/arm/include/asm/uaccess.h
1554 +++ b/arch/arm/include/asm/uaccess.h
1555 @@ -22,6 +22,8 @@
1556 #define VERIFY_READ 0
1557 #define VERIFY_WRITE 1
1558
1559 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1560 +
1561 /*
1562 * The exception table consists of pairs of addresses: the first is the
1563 * address of an instruction that is allowed to fault, and the second is
1564 @@ -387,8 +389,23 @@ do { \
1565
1566
1567 #ifdef CONFIG_MMU
1568 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1569 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1570 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1571 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1572 +
1573 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1574 +{
1575 + if (!__builtin_constant_p(n))
1576 + check_object_size(to, n, false);
1577 + return ___copy_from_user(to, from, n);
1578 +}
1579 +
1580 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1581 +{
1582 + if (!__builtin_constant_p(n))
1583 + check_object_size(from, n, true);
1584 + return ___copy_to_user(to, from, n);
1585 +}
1586 +
1587 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1588 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1589 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1590 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1591
1592 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1593 {
1594 + if ((long)n < 0)
1595 + return n;
1596 +
1597 if (access_ok(VERIFY_READ, from, n))
1598 n = __copy_from_user(to, from, n);
1599 else /* security hole - plug it */
1600 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1601
1602 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1603 {
1604 + if ((long)n < 0)
1605 + return n;
1606 +
1607 if (access_ok(VERIFY_WRITE, to, n))
1608 n = __copy_to_user(to, from, n);
1609 return n;
1610 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1611 index b57c75e..ed2d6b2 100644
1612 --- a/arch/arm/kernel/armksyms.c
1613 +++ b/arch/arm/kernel/armksyms.c
1614 @@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1615 #ifdef CONFIG_MMU
1616 EXPORT_SYMBOL(copy_page);
1617
1618 -EXPORT_SYMBOL(__copy_from_user);
1619 -EXPORT_SYMBOL(__copy_to_user);
1620 +EXPORT_SYMBOL(___copy_from_user);
1621 +EXPORT_SYMBOL(___copy_to_user);
1622 EXPORT_SYMBOL(__clear_user);
1623
1624 EXPORT_SYMBOL(__get_user_1);
1625 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1626 index 2b7b017..c380fa2 100644
1627 --- a/arch/arm/kernel/process.c
1628 +++ b/arch/arm/kernel/process.c
1629 @@ -28,7 +28,6 @@
1630 #include <linux/tick.h>
1631 #include <linux/utsname.h>
1632 #include <linux/uaccess.h>
1633 -#include <linux/random.h>
1634 #include <linux/hw_breakpoint.h>
1635 #include <linux/cpuidle.h>
1636
1637 @@ -275,9 +274,10 @@ void machine_power_off(void)
1638 machine_shutdown();
1639 if (pm_power_off)
1640 pm_power_off();
1641 + BUG();
1642 }
1643
1644 -void machine_restart(char *cmd)
1645 +__noreturn void machine_restart(char *cmd)
1646 {
1647 machine_shutdown();
1648
1649 @@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
1650 return 0;
1651 }
1652
1653 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1654 -{
1655 - unsigned long range_end = mm->brk + 0x02000000;
1656 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1657 -}
1658 -
1659 #ifdef CONFIG_MMU
1660 /*
1661 * The vectors page is always readable from user space for the
1662 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1663 index 9650c14..ae30cdd 100644
1664 --- a/arch/arm/kernel/ptrace.c
1665 +++ b/arch/arm/kernel/ptrace.c
1666 @@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
1667 return ret;
1668 }
1669
1670 +#ifdef CONFIG_GRKERNSEC_SETXID
1671 +extern void gr_delayed_cred_worker(void);
1672 +#endif
1673 +
1674 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1675 {
1676 unsigned long ip;
1677
1678 +#ifdef CONFIG_GRKERNSEC_SETXID
1679 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1680 + gr_delayed_cred_worker();
1681 +#endif
1682 +
1683 if (why)
1684 audit_syscall_exit(regs);
1685 else
1686 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1687 index ebfac78..cbea9c0 100644
1688 --- a/arch/arm/kernel/setup.c
1689 +++ b/arch/arm/kernel/setup.c
1690 @@ -111,13 +111,13 @@ struct processor processor __read_mostly;
1691 struct cpu_tlb_fns cpu_tlb __read_mostly;
1692 #endif
1693 #ifdef MULTI_USER
1694 -struct cpu_user_fns cpu_user __read_mostly;
1695 +struct cpu_user_fns cpu_user __read_only;
1696 #endif
1697 #ifdef MULTI_CACHE
1698 -struct cpu_cache_fns cpu_cache __read_mostly;
1699 +struct cpu_cache_fns cpu_cache __read_only;
1700 #endif
1701 #ifdef CONFIG_OUTER_CACHE
1702 -struct outer_cache_fns outer_cache __read_mostly;
1703 +struct outer_cache_fns outer_cache __read_only;
1704 EXPORT_SYMBOL(outer_cache);
1705 #endif
1706
1707 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1708 index 63d402f..db1d714 100644
1709 --- a/arch/arm/kernel/traps.c
1710 +++ b/arch/arm/kernel/traps.c
1711 @@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1712
1713 static DEFINE_RAW_SPINLOCK(die_lock);
1714
1715 +extern void gr_handle_kernel_exploit(void);
1716 +
1717 /*
1718 * This function is protected against re-entrancy.
1719 */
1720 @@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1721 panic("Fatal exception in interrupt");
1722 if (panic_on_oops)
1723 panic("Fatal exception");
1724 +
1725 + gr_handle_kernel_exploit();
1726 +
1727 if (ret != NOTIFY_STOP)
1728 do_exit(SIGSEGV);
1729 }
1730 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1731 index 66a477a..bee61d3 100644
1732 --- a/arch/arm/lib/copy_from_user.S
1733 +++ b/arch/arm/lib/copy_from_user.S
1734 @@ -16,7 +16,7 @@
1735 /*
1736 * Prototype:
1737 *
1738 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1739 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1740 *
1741 * Purpose:
1742 *
1743 @@ -84,11 +84,11 @@
1744
1745 .text
1746
1747 -ENTRY(__copy_from_user)
1748 +ENTRY(___copy_from_user)
1749
1750 #include "copy_template.S"
1751
1752 -ENDPROC(__copy_from_user)
1753 +ENDPROC(___copy_from_user)
1754
1755 .pushsection .fixup,"ax"
1756 .align 0
1757 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1758 index 6ee2f67..d1cce76 100644
1759 --- a/arch/arm/lib/copy_page.S
1760 +++ b/arch/arm/lib/copy_page.S
1761 @@ -10,6 +10,7 @@
1762 * ASM optimised string functions
1763 */
1764 #include <linux/linkage.h>
1765 +#include <linux/const.h>
1766 #include <asm/assembler.h>
1767 #include <asm/asm-offsets.h>
1768 #include <asm/cache.h>
1769 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1770 index d066df6..df28194 100644
1771 --- a/arch/arm/lib/copy_to_user.S
1772 +++ b/arch/arm/lib/copy_to_user.S
1773 @@ -16,7 +16,7 @@
1774 /*
1775 * Prototype:
1776 *
1777 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1778 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1779 *
1780 * Purpose:
1781 *
1782 @@ -88,11 +88,11 @@
1783 .text
1784
1785 ENTRY(__copy_to_user_std)
1786 -WEAK(__copy_to_user)
1787 +WEAK(___copy_to_user)
1788
1789 #include "copy_template.S"
1790
1791 -ENDPROC(__copy_to_user)
1792 +ENDPROC(___copy_to_user)
1793 ENDPROC(__copy_to_user_std)
1794
1795 .pushsection .fixup,"ax"
1796 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1797 index 5c908b1..e712687 100644
1798 --- a/arch/arm/lib/uaccess.S
1799 +++ b/arch/arm/lib/uaccess.S
1800 @@ -20,7 +20,7 @@
1801
1802 #define PAGE_SHIFT 12
1803
1804 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1805 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1806 * Purpose : copy a block to user memory from kernel memory
1807 * Params : to - user memory
1808 * : from - kernel memory
1809 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1810 sub r2, r2, ip
1811 b .Lc2u_dest_aligned
1812
1813 -ENTRY(__copy_to_user)
1814 +ENTRY(___copy_to_user)
1815 stmfd sp!, {r2, r4 - r7, lr}
1816 cmp r2, #4
1817 blt .Lc2u_not_enough
1818 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1819 ldrgtb r3, [r1], #0
1820 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1821 b .Lc2u_finished
1822 -ENDPROC(__copy_to_user)
1823 +ENDPROC(___copy_to_user)
1824
1825 .pushsection .fixup,"ax"
1826 .align 0
1827 9001: ldmfd sp!, {r0, r4 - r7, pc}
1828 .popsection
1829
1830 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1831 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1832 * Purpose : copy a block from user memory to kernel memory
1833 * Params : to - kernel memory
1834 * : from - user memory
1835 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1836 sub r2, r2, ip
1837 b .Lcfu_dest_aligned
1838
1839 -ENTRY(__copy_from_user)
1840 +ENTRY(___copy_from_user)
1841 stmfd sp!, {r0, r2, r4 - r7, lr}
1842 cmp r2, #4
1843 blt .Lcfu_not_enough
1844 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1845 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1846 strgtb r3, [r0], #1
1847 b .Lcfu_finished
1848 -ENDPROC(__copy_from_user)
1849 +ENDPROC(___copy_from_user)
1850
1851 .pushsection .fixup,"ax"
1852 .align 0
1853 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1854 index 025f742..8432b08 100644
1855 --- a/arch/arm/lib/uaccess_with_memcpy.c
1856 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1857 @@ -104,7 +104,7 @@ out:
1858 }
1859
1860 unsigned long
1861 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1862 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1863 {
1864 /*
1865 * This test is stubbed out of the main function above to keep
1866 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1867 index 518091c..eae9a76 100644
1868 --- a/arch/arm/mach-omap2/board-n8x0.c
1869 +++ b/arch/arm/mach-omap2/board-n8x0.c
1870 @@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1871 }
1872 #endif
1873
1874 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1875 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1876 .late_init = n8x0_menelaus_late_init,
1877 };
1878
1879 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1880 index 5bb4835..4760f68 100644
1881 --- a/arch/arm/mm/fault.c
1882 +++ b/arch/arm/mm/fault.c
1883 @@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1884 }
1885 #endif
1886
1887 +#ifdef CONFIG_PAX_PAGEEXEC
1888 + if (fsr & FSR_LNX_PF) {
1889 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1890 + do_group_exit(SIGKILL);
1891 + }
1892 +#endif
1893 +
1894 tsk->thread.address = addr;
1895 tsk->thread.error_code = fsr;
1896 tsk->thread.trap_no = 14;
1897 @@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1898 }
1899 #endif /* CONFIG_MMU */
1900
1901 +#ifdef CONFIG_PAX_PAGEEXEC
1902 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1903 +{
1904 + long i;
1905 +
1906 + printk(KERN_ERR "PAX: bytes at PC: ");
1907 + for (i = 0; i < 20; i++) {
1908 + unsigned char c;
1909 + if (get_user(c, (__force unsigned char __user *)pc+i))
1910 + printk(KERN_CONT "?? ");
1911 + else
1912 + printk(KERN_CONT "%02x ", c);
1913 + }
1914 + printk("\n");
1915 +
1916 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1917 + for (i = -1; i < 20; i++) {
1918 + unsigned long c;
1919 + if (get_user(c, (__force unsigned long __user *)sp+i))
1920 + printk(KERN_CONT "???????? ");
1921 + else
1922 + printk(KERN_CONT "%08lx ", c);
1923 + }
1924 + printk("\n");
1925 +}
1926 +#endif
1927 +
1928 /*
1929 * First Level Translation Fault Handler
1930 *
1931 @@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1932 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1933 struct siginfo info;
1934
1935 +#ifdef CONFIG_PAX_REFCOUNT
1936 + if (fsr_fs(ifsr) == 2) {
1937 + unsigned int bkpt;
1938 +
1939 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1940 + current->thread.error_code = ifsr;
1941 + current->thread.trap_no = 0;
1942 + pax_report_refcount_overflow(regs);
1943 + fixup_exception(regs);
1944 + return;
1945 + }
1946 + }
1947 +#endif
1948 +
1949 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1950 return;
1951
1952 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1953 index ce8cb19..3ec539d 100644
1954 --- a/arch/arm/mm/mmap.c
1955 +++ b/arch/arm/mm/mmap.c
1956 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1957 if (len > TASK_SIZE)
1958 return -ENOMEM;
1959
1960 +#ifdef CONFIG_PAX_RANDMMAP
1961 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1962 +#endif
1963 +
1964 if (addr) {
1965 if (do_align)
1966 addr = COLOUR_ALIGN(addr, pgoff);
1967 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1968 addr = PAGE_ALIGN(addr);
1969
1970 vma = find_vma(mm, addr);
1971 - if (TASK_SIZE - len >= addr &&
1972 - (!vma || addr + len <= vma->vm_start))
1973 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1974 return addr;
1975 }
1976 if (len > mm->cached_hole_size) {
1977 - start_addr = addr = mm->free_area_cache;
1978 + start_addr = addr = mm->free_area_cache;
1979 } else {
1980 - start_addr = addr = mm->mmap_base;
1981 - mm->cached_hole_size = 0;
1982 + start_addr = addr = mm->mmap_base;
1983 + mm->cached_hole_size = 0;
1984 }
1985
1986 full_search:
1987 @@ -124,14 +127,14 @@ full_search:
1988 * Start a new search - just in case we missed
1989 * some holes.
1990 */
1991 - if (start_addr != TASK_UNMAPPED_BASE) {
1992 - start_addr = addr = TASK_UNMAPPED_BASE;
1993 + if (start_addr != mm->mmap_base) {
1994 + start_addr = addr = mm->mmap_base;
1995 mm->cached_hole_size = 0;
1996 goto full_search;
1997 }
1998 return -ENOMEM;
1999 }
2000 - if (!vma || addr + len <= vma->vm_start) {
2001 + if (check_heap_stack_gap(vma, addr, len)) {
2002 /*
2003 * Remember the place where we stopped the search:
2004 */
2005 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2006
2007 if (mmap_is_legacy()) {
2008 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2009 +
2010 +#ifdef CONFIG_PAX_RANDMMAP
2011 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2012 + mm->mmap_base += mm->delta_mmap;
2013 +#endif
2014 +
2015 mm->get_unmapped_area = arch_get_unmapped_area;
2016 mm->unmap_area = arch_unmap_area;
2017 } else {
2018 mm->mmap_base = mmap_base(random_factor);
2019 +
2020 +#ifdef CONFIG_PAX_RANDMMAP
2021 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2022 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2023 +#endif
2024 +
2025 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2026 mm->unmap_area = arch_unmap_area_topdown;
2027 }
2028 diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2029 index fd556f7..af2e7d2 100644
2030 --- a/arch/arm/plat-orion/include/plat/addr-map.h
2031 +++ b/arch/arm/plat-orion/include/plat/addr-map.h
2032 @@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2033 value in bridge_virt_base */
2034 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2035 const int win);
2036 -};
2037 +} __no_const;
2038
2039 /*
2040 * Information needed to setup one address mapping.
2041 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2042 index 71a6827..e7fbc23 100644
2043 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2044 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2045 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
2046 int (*started)(unsigned ch);
2047 int (*flush)(unsigned ch);
2048 int (*stop)(unsigned ch);
2049 -};
2050 +} __no_const;
2051
2052 extern void *samsung_dmadev_get_ops(void);
2053 extern void *s3c_dma_get_ops(void);
2054 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2055 index 5f28cae..3d23723 100644
2056 --- a/arch/arm/plat-samsung/include/plat/ehci.h
2057 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
2058 @@ -14,7 +14,7 @@
2059 struct s5p_ehci_platdata {
2060 int (*phy_init)(struct platform_device *pdev, int type);
2061 int (*phy_exit)(struct platform_device *pdev, int type);
2062 -};
2063 +} __no_const;
2064
2065 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2066
2067 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2068 index c3a58a1..78fbf54 100644
2069 --- a/arch/avr32/include/asm/cache.h
2070 +++ b/arch/avr32/include/asm/cache.h
2071 @@ -1,8 +1,10 @@
2072 #ifndef __ASM_AVR32_CACHE_H
2073 #define __ASM_AVR32_CACHE_H
2074
2075 +#include <linux/const.h>
2076 +
2077 #define L1_CACHE_SHIFT 5
2078 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2079 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2080
2081 /*
2082 * Memory returned by kmalloc() may be used for DMA, so we must make
2083 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2084 index 3b3159b..425ea94 100644
2085 --- a/arch/avr32/include/asm/elf.h
2086 +++ b/arch/avr32/include/asm/elf.h
2087 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2088 the loader. We need to make sure that it is out of the way of the program
2089 that it will "exec", and that there is sufficient room for the brk. */
2090
2091 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2092 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2093
2094 +#ifdef CONFIG_PAX_ASLR
2095 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2096 +
2097 +#define PAX_DELTA_MMAP_LEN 15
2098 +#define PAX_DELTA_STACK_LEN 15
2099 +#endif
2100
2101 /* This yields a mask that user programs can use to figure out what
2102 instruction set this CPU supports. This could be done in user space,
2103 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2104 index b7f5c68..556135c 100644
2105 --- a/arch/avr32/include/asm/kmap_types.h
2106 +++ b/arch/avr32/include/asm/kmap_types.h
2107 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2108 D(11) KM_IRQ1,
2109 D(12) KM_SOFTIRQ0,
2110 D(13) KM_SOFTIRQ1,
2111 -D(14) KM_TYPE_NR
2112 +D(14) KM_CLEARPAGE,
2113 +D(15) KM_TYPE_NR
2114 };
2115
2116 #undef D
2117 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2118 index f7040a1..db9f300 100644
2119 --- a/arch/avr32/mm/fault.c
2120 +++ b/arch/avr32/mm/fault.c
2121 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2122
2123 int exception_trace = 1;
2124
2125 +#ifdef CONFIG_PAX_PAGEEXEC
2126 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2127 +{
2128 + unsigned long i;
2129 +
2130 + printk(KERN_ERR "PAX: bytes at PC: ");
2131 + for (i = 0; i < 20; i++) {
2132 + unsigned char c;
2133 + if (get_user(c, (unsigned char *)pc+i))
2134 + printk(KERN_CONT "???????? ");
2135 + else
2136 + printk(KERN_CONT "%02x ", c);
2137 + }
2138 + printk("\n");
2139 +}
2140 +#endif
2141 +
2142 /*
2143 * This routine handles page faults. It determines the address and the
2144 * problem, and then passes it off to one of the appropriate routines.
2145 @@ -156,6 +173,16 @@ bad_area:
2146 up_read(&mm->mmap_sem);
2147
2148 if (user_mode(regs)) {
2149 +
2150 +#ifdef CONFIG_PAX_PAGEEXEC
2151 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2152 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2153 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2154 + do_group_exit(SIGKILL);
2155 + }
2156 + }
2157 +#endif
2158 +
2159 if (exception_trace && printk_ratelimit())
2160 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2161 "sp %08lx ecr %lu\n",
2162 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2163 index 568885a..f8008df 100644
2164 --- a/arch/blackfin/include/asm/cache.h
2165 +++ b/arch/blackfin/include/asm/cache.h
2166 @@ -7,6 +7,7 @@
2167 #ifndef __ARCH_BLACKFIN_CACHE_H
2168 #define __ARCH_BLACKFIN_CACHE_H
2169
2170 +#include <linux/const.h>
2171 #include <linux/linkage.h> /* for asmlinkage */
2172
2173 /*
2174 @@ -14,7 +15,7 @@
2175 * Blackfin loads 32 bytes for cache
2176 */
2177 #define L1_CACHE_SHIFT 5
2178 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2179 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2180 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2181
2182 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2183 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2184 index aea2718..3639a60 100644
2185 --- a/arch/cris/include/arch-v10/arch/cache.h
2186 +++ b/arch/cris/include/arch-v10/arch/cache.h
2187 @@ -1,8 +1,9 @@
2188 #ifndef _ASM_ARCH_CACHE_H
2189 #define _ASM_ARCH_CACHE_H
2190
2191 +#include <linux/const.h>
2192 /* Etrax 100LX have 32-byte cache-lines. */
2193 -#define L1_CACHE_BYTES 32
2194 #define L1_CACHE_SHIFT 5
2195 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2196
2197 #endif /* _ASM_ARCH_CACHE_H */
2198 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2199 index 1de779f..336fad3 100644
2200 --- a/arch/cris/include/arch-v32/arch/cache.h
2201 +++ b/arch/cris/include/arch-v32/arch/cache.h
2202 @@ -1,11 +1,12 @@
2203 #ifndef _ASM_CRIS_ARCH_CACHE_H
2204 #define _ASM_CRIS_ARCH_CACHE_H
2205
2206 +#include <linux/const.h>
2207 #include <arch/hwregs/dma.h>
2208
2209 /* A cache-line is 32 bytes. */
2210 -#define L1_CACHE_BYTES 32
2211 #define L1_CACHE_SHIFT 5
2212 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2213
2214 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2215
2216 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2217 index b86329d..6709906 100644
2218 --- a/arch/frv/include/asm/atomic.h
2219 +++ b/arch/frv/include/asm/atomic.h
2220 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2221 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2222 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2223
2224 +#define atomic64_read_unchecked(v) atomic64_read(v)
2225 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2226 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2227 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2228 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2229 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2230 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2231 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2232 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2233 +
2234 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2235 {
2236 int c, old;
2237 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2238 index 2797163..c2a401d 100644
2239 --- a/arch/frv/include/asm/cache.h
2240 +++ b/arch/frv/include/asm/cache.h
2241 @@ -12,10 +12,11 @@
2242 #ifndef __ASM_CACHE_H
2243 #define __ASM_CACHE_H
2244
2245 +#include <linux/const.h>
2246
2247 /* bytes per L1 cache line */
2248 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2249 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2250 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2251
2252 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2253 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2254 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2255 index f8e16b2..c73ff79 100644
2256 --- a/arch/frv/include/asm/kmap_types.h
2257 +++ b/arch/frv/include/asm/kmap_types.h
2258 @@ -23,6 +23,7 @@ enum km_type {
2259 KM_IRQ1,
2260 KM_SOFTIRQ0,
2261 KM_SOFTIRQ1,
2262 + KM_CLEARPAGE,
2263 KM_TYPE_NR
2264 };
2265
2266 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2267 index 385fd30..6c3d97e 100644
2268 --- a/arch/frv/mm/elf-fdpic.c
2269 +++ b/arch/frv/mm/elf-fdpic.c
2270 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2271 if (addr) {
2272 addr = PAGE_ALIGN(addr);
2273 vma = find_vma(current->mm, addr);
2274 - if (TASK_SIZE - len >= addr &&
2275 - (!vma || addr + len <= vma->vm_start))
2276 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2277 goto success;
2278 }
2279
2280 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2281 for (; vma; vma = vma->vm_next) {
2282 if (addr > limit)
2283 break;
2284 - if (addr + len <= vma->vm_start)
2285 + if (check_heap_stack_gap(vma, addr, len))
2286 goto success;
2287 addr = vma->vm_end;
2288 }
2289 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2290 for (; vma; vma = vma->vm_next) {
2291 if (addr > limit)
2292 break;
2293 - if (addr + len <= vma->vm_start)
2294 + if (check_heap_stack_gap(vma, addr, len))
2295 goto success;
2296 addr = vma->vm_end;
2297 }
2298 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2299 index c635028..6d9445a 100644
2300 --- a/arch/h8300/include/asm/cache.h
2301 +++ b/arch/h8300/include/asm/cache.h
2302 @@ -1,8 +1,10 @@
2303 #ifndef __ARCH_H8300_CACHE_H
2304 #define __ARCH_H8300_CACHE_H
2305
2306 +#include <linux/const.h>
2307 +
2308 /* bytes per L1 cache line */
2309 -#define L1_CACHE_BYTES 4
2310 +#define L1_CACHE_BYTES _AC(4,UL)
2311
2312 /* m68k-elf-gcc 2.95.2 doesn't like these */
2313
2314 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2315 index 0f01de2..d37d309 100644
2316 --- a/arch/hexagon/include/asm/cache.h
2317 +++ b/arch/hexagon/include/asm/cache.h
2318 @@ -21,9 +21,11 @@
2319 #ifndef __ASM_CACHE_H
2320 #define __ASM_CACHE_H
2321
2322 +#include <linux/const.h>
2323 +
2324 /* Bytes per L1 cache line */
2325 -#define L1_CACHE_SHIFT (5)
2326 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2327 +#define L1_CACHE_SHIFT 5
2328 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2329
2330 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2331 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2332 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2333 index 7d91166..88ab87e 100644
2334 --- a/arch/ia64/include/asm/atomic.h
2335 +++ b/arch/ia64/include/asm/atomic.h
2336 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2337 #define atomic64_inc(v) atomic64_add(1, (v))
2338 #define atomic64_dec(v) atomic64_sub(1, (v))
2339
2340 +#define atomic64_read_unchecked(v) atomic64_read(v)
2341 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2342 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2343 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2344 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2345 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2346 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2347 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2348 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2349 +
2350 /* Atomic operations are already serializing */
2351 #define smp_mb__before_atomic_dec() barrier()
2352 #define smp_mb__after_atomic_dec() barrier()
2353 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2354 index 988254a..e1ee885 100644
2355 --- a/arch/ia64/include/asm/cache.h
2356 +++ b/arch/ia64/include/asm/cache.h
2357 @@ -1,6 +1,7 @@
2358 #ifndef _ASM_IA64_CACHE_H
2359 #define _ASM_IA64_CACHE_H
2360
2361 +#include <linux/const.h>
2362
2363 /*
2364 * Copyright (C) 1998-2000 Hewlett-Packard Co
2365 @@ -9,7 +10,7 @@
2366
2367 /* Bytes per L1 (data) cache line. */
2368 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2369 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2370 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2371
2372 #ifdef CONFIG_SMP
2373 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2374 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2375 index b5298eb..67c6e62 100644
2376 --- a/arch/ia64/include/asm/elf.h
2377 +++ b/arch/ia64/include/asm/elf.h
2378 @@ -42,6 +42,13 @@
2379 */
2380 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2381
2382 +#ifdef CONFIG_PAX_ASLR
2383 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2384 +
2385 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2386 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2387 +#endif
2388 +
2389 #define PT_IA_64_UNWIND 0x70000001
2390
2391 /* IA-64 relocations: */
2392 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2393 index 96a8d92..617a1cf 100644
2394 --- a/arch/ia64/include/asm/pgalloc.h
2395 +++ b/arch/ia64/include/asm/pgalloc.h
2396 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2397 pgd_val(*pgd_entry) = __pa(pud);
2398 }
2399
2400 +static inline void
2401 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2402 +{
2403 + pgd_populate(mm, pgd_entry, pud);
2404 +}
2405 +
2406 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2407 {
2408 return quicklist_alloc(0, GFP_KERNEL, NULL);
2409 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2410 pud_val(*pud_entry) = __pa(pmd);
2411 }
2412
2413 +static inline void
2414 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2415 +{
2416 + pud_populate(mm, pud_entry, pmd);
2417 +}
2418 +
2419 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2420 {
2421 return quicklist_alloc(0, GFP_KERNEL, NULL);
2422 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2423 index 815810c..d60bd4c 100644
2424 --- a/arch/ia64/include/asm/pgtable.h
2425 +++ b/arch/ia64/include/asm/pgtable.h
2426 @@ -12,7 +12,7 @@
2427 * David Mosberger-Tang <davidm@hpl.hp.com>
2428 */
2429
2430 -
2431 +#include <linux/const.h>
2432 #include <asm/mman.h>
2433 #include <asm/page.h>
2434 #include <asm/processor.h>
2435 @@ -142,6 +142,17 @@
2436 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2437 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2438 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2439 +
2440 +#ifdef CONFIG_PAX_PAGEEXEC
2441 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2442 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2443 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2444 +#else
2445 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2446 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2447 +# define PAGE_COPY_NOEXEC PAGE_COPY
2448 +#endif
2449 +
2450 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2451 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2452 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2453 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2454 index 54ff557..70c88b7 100644
2455 --- a/arch/ia64/include/asm/spinlock.h
2456 +++ b/arch/ia64/include/asm/spinlock.h
2457 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2458 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2459
2460 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2461 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2462 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2463 }
2464
2465 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2466 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2467 index 449c8c0..432a3d2 100644
2468 --- a/arch/ia64/include/asm/uaccess.h
2469 +++ b/arch/ia64/include/asm/uaccess.h
2470 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2471 const void *__cu_from = (from); \
2472 long __cu_len = (n); \
2473 \
2474 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2475 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2476 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2477 __cu_len; \
2478 })
2479 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2480 long __cu_len = (n); \
2481 \
2482 __chk_user_ptr(__cu_from); \
2483 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2484 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2485 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2486 __cu_len; \
2487 })
2488 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2489 index 24603be..948052d 100644
2490 --- a/arch/ia64/kernel/module.c
2491 +++ b/arch/ia64/kernel/module.c
2492 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2493 void
2494 module_free (struct module *mod, void *module_region)
2495 {
2496 - if (mod && mod->arch.init_unw_table &&
2497 - module_region == mod->module_init) {
2498 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2499 unw_remove_unwind_table(mod->arch.init_unw_table);
2500 mod->arch.init_unw_table = NULL;
2501 }
2502 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2503 }
2504
2505 static inline int
2506 +in_init_rx (const struct module *mod, uint64_t addr)
2507 +{
2508 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2509 +}
2510 +
2511 +static inline int
2512 +in_init_rw (const struct module *mod, uint64_t addr)
2513 +{
2514 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2515 +}
2516 +
2517 +static inline int
2518 in_init (const struct module *mod, uint64_t addr)
2519 {
2520 - return addr - (uint64_t) mod->module_init < mod->init_size;
2521 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2522 +}
2523 +
2524 +static inline int
2525 +in_core_rx (const struct module *mod, uint64_t addr)
2526 +{
2527 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2528 +}
2529 +
2530 +static inline int
2531 +in_core_rw (const struct module *mod, uint64_t addr)
2532 +{
2533 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2534 }
2535
2536 static inline int
2537 in_core (const struct module *mod, uint64_t addr)
2538 {
2539 - return addr - (uint64_t) mod->module_core < mod->core_size;
2540 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2541 }
2542
2543 static inline int
2544 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2545 break;
2546
2547 case RV_BDREL:
2548 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2549 + if (in_init_rx(mod, val))
2550 + val -= (uint64_t) mod->module_init_rx;
2551 + else if (in_init_rw(mod, val))
2552 + val -= (uint64_t) mod->module_init_rw;
2553 + else if (in_core_rx(mod, val))
2554 + val -= (uint64_t) mod->module_core_rx;
2555 + else if (in_core_rw(mod, val))
2556 + val -= (uint64_t) mod->module_core_rw;
2557 break;
2558
2559 case RV_LTV:
2560 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2561 * addresses have been selected...
2562 */
2563 uint64_t gp;
2564 - if (mod->core_size > MAX_LTOFF)
2565 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2566 /*
2567 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2568 * at the end of the module.
2569 */
2570 - gp = mod->core_size - MAX_LTOFF / 2;
2571 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2572 else
2573 - gp = mod->core_size / 2;
2574 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2575 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2576 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2577 mod->arch.gp = gp;
2578 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2579 }
2580 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2581 index 609d500..7dde2a8 100644
2582 --- a/arch/ia64/kernel/sys_ia64.c
2583 +++ b/arch/ia64/kernel/sys_ia64.c
2584 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2585 if (REGION_NUMBER(addr) == RGN_HPAGE)
2586 addr = 0;
2587 #endif
2588 +
2589 +#ifdef CONFIG_PAX_RANDMMAP
2590 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2591 + addr = mm->free_area_cache;
2592 + else
2593 +#endif
2594 +
2595 if (!addr)
2596 addr = mm->free_area_cache;
2597
2598 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2599 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2600 /* At this point: (!vma || addr < vma->vm_end). */
2601 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2602 - if (start_addr != TASK_UNMAPPED_BASE) {
2603 + if (start_addr != mm->mmap_base) {
2604 /* Start a new search --- just in case we missed some holes. */
2605 - addr = TASK_UNMAPPED_BASE;
2606 + addr = mm->mmap_base;
2607 goto full_search;
2608 }
2609 return -ENOMEM;
2610 }
2611 - if (!vma || addr + len <= vma->vm_start) {
2612 + if (check_heap_stack_gap(vma, addr, len)) {
2613 /* Remember the address where we stopped this search: */
2614 mm->free_area_cache = addr + len;
2615 return addr;
2616 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2617 index 0ccb28f..8992469 100644
2618 --- a/arch/ia64/kernel/vmlinux.lds.S
2619 +++ b/arch/ia64/kernel/vmlinux.lds.S
2620 @@ -198,7 +198,7 @@ SECTIONS {
2621 /* Per-cpu data: */
2622 . = ALIGN(PERCPU_PAGE_SIZE);
2623 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2624 - __phys_per_cpu_start = __per_cpu_load;
2625 + __phys_per_cpu_start = per_cpu_load;
2626 /*
2627 * ensure percpu data fits
2628 * into percpu page size
2629 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2630 index 02d29c2..ea893df 100644
2631 --- a/arch/ia64/mm/fault.c
2632 +++ b/arch/ia64/mm/fault.c
2633 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2634 return pte_present(pte);
2635 }
2636
2637 +#ifdef CONFIG_PAX_PAGEEXEC
2638 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2639 +{
2640 + unsigned long i;
2641 +
2642 + printk(KERN_ERR "PAX: bytes at PC: ");
2643 + for (i = 0; i < 8; i++) {
2644 + unsigned int c;
2645 + if (get_user(c, (unsigned int *)pc+i))
2646 + printk(KERN_CONT "???????? ");
2647 + else
2648 + printk(KERN_CONT "%08x ", c);
2649 + }
2650 + printk("\n");
2651 +}
2652 +#endif
2653 +
2654 void __kprobes
2655 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2656 {
2657 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2658 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2659 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2660
2661 - if ((vma->vm_flags & mask) != mask)
2662 + if ((vma->vm_flags & mask) != mask) {
2663 +
2664 +#ifdef CONFIG_PAX_PAGEEXEC
2665 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2666 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2667 + goto bad_area;
2668 +
2669 + up_read(&mm->mmap_sem);
2670 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2671 + do_group_exit(SIGKILL);
2672 + }
2673 +#endif
2674 +
2675 goto bad_area;
2676
2677 + }
2678 +
2679 /*
2680 * If for any reason at all we couldn't handle the fault, make
2681 * sure we exit gracefully rather than endlessly redo the
2682 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2683 index 5ca674b..e0e1b70 100644
2684 --- a/arch/ia64/mm/hugetlbpage.c
2685 +++ b/arch/ia64/mm/hugetlbpage.c
2686 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2687 /* At this point: (!vmm || addr < vmm->vm_end). */
2688 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2689 return -ENOMEM;
2690 - if (!vmm || (addr + len) <= vmm->vm_start)
2691 + if (check_heap_stack_gap(vmm, addr, len))
2692 return addr;
2693 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2694 }
2695 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2696 index 0eab454..bd794f2 100644
2697 --- a/arch/ia64/mm/init.c
2698 +++ b/arch/ia64/mm/init.c
2699 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2700 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2701 vma->vm_end = vma->vm_start + PAGE_SIZE;
2702 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2703 +
2704 +#ifdef CONFIG_PAX_PAGEEXEC
2705 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2706 + vma->vm_flags &= ~VM_EXEC;
2707 +
2708 +#ifdef CONFIG_PAX_MPROTECT
2709 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2710 + vma->vm_flags &= ~VM_MAYEXEC;
2711 +#endif
2712 +
2713 + }
2714 +#endif
2715 +
2716 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2717 down_write(&current->mm->mmap_sem);
2718 if (insert_vm_struct(current->mm, vma)) {
2719 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2720 index 40b3ee9..8c2c112 100644
2721 --- a/arch/m32r/include/asm/cache.h
2722 +++ b/arch/m32r/include/asm/cache.h
2723 @@ -1,8 +1,10 @@
2724 #ifndef _ASM_M32R_CACHE_H
2725 #define _ASM_M32R_CACHE_H
2726
2727 +#include <linux/const.h>
2728 +
2729 /* L1 cache line size */
2730 #define L1_CACHE_SHIFT 4
2731 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2732 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2733
2734 #endif /* _ASM_M32R_CACHE_H */
2735 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2736 index 82abd15..d95ae5d 100644
2737 --- a/arch/m32r/lib/usercopy.c
2738 +++ b/arch/m32r/lib/usercopy.c
2739 @@ -14,6 +14,9 @@
2740 unsigned long
2741 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2742 {
2743 + if ((long)n < 0)
2744 + return n;
2745 +
2746 prefetch(from);
2747 if (access_ok(VERIFY_WRITE, to, n))
2748 __copy_user(to,from,n);
2749 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2750 unsigned long
2751 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2752 {
2753 + if ((long)n < 0)
2754 + return n;
2755 +
2756 prefetchw(to);
2757 if (access_ok(VERIFY_READ, from, n))
2758 __copy_user_zeroing(to,from,n);
2759 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2760 index 0395c51..5f26031 100644
2761 --- a/arch/m68k/include/asm/cache.h
2762 +++ b/arch/m68k/include/asm/cache.h
2763 @@ -4,9 +4,11 @@
2764 #ifndef __ARCH_M68K_CACHE_H
2765 #define __ARCH_M68K_CACHE_H
2766
2767 +#include <linux/const.h>
2768 +
2769 /* bytes per L1 cache line */
2770 #define L1_CACHE_SHIFT 4
2771 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2772 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2773
2774 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2775
2776 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2777 index 4efe96a..60e8699 100644
2778 --- a/arch/microblaze/include/asm/cache.h
2779 +++ b/arch/microblaze/include/asm/cache.h
2780 @@ -13,11 +13,12 @@
2781 #ifndef _ASM_MICROBLAZE_CACHE_H
2782 #define _ASM_MICROBLAZE_CACHE_H
2783
2784 +#include <linux/const.h>
2785 #include <asm/registers.h>
2786
2787 #define L1_CACHE_SHIFT 5
2788 /* word-granular cache in microblaze */
2789 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2790 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2791
2792 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2793
2794 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2795 index 3f4c5cb..3439c6e 100644
2796 --- a/arch/mips/include/asm/atomic.h
2797 +++ b/arch/mips/include/asm/atomic.h
2798 @@ -21,6 +21,10 @@
2799 #include <asm/cmpxchg.h>
2800 #include <asm/war.h>
2801
2802 +#ifdef CONFIG_GENERIC_ATOMIC64
2803 +#include <asm-generic/atomic64.h>
2804 +#endif
2805 +
2806 #define ATOMIC_INIT(i) { (i) }
2807
2808 /*
2809 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2810 */
2811 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2812
2813 +#define atomic64_read_unchecked(v) atomic64_read(v)
2814 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2815 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2816 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2817 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2818 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2819 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2820 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2821 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2822 +
2823 #endif /* CONFIG_64BIT */
2824
2825 /*
2826 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2827 index b4db69f..8f3b093 100644
2828 --- a/arch/mips/include/asm/cache.h
2829 +++ b/arch/mips/include/asm/cache.h
2830 @@ -9,10 +9,11 @@
2831 #ifndef _ASM_CACHE_H
2832 #define _ASM_CACHE_H
2833
2834 +#include <linux/const.h>
2835 #include <kmalloc.h>
2836
2837 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2838 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2839 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2840
2841 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2842 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2843 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2844 index 455c0ac..ad65fbe 100644
2845 --- a/arch/mips/include/asm/elf.h
2846 +++ b/arch/mips/include/asm/elf.h
2847 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2848 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2849 #endif
2850
2851 +#ifdef CONFIG_PAX_ASLR
2852 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2853 +
2854 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2855 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2856 +#endif
2857 +
2858 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2859 struct linux_binprm;
2860 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2861 int uses_interp);
2862
2863 -struct mm_struct;
2864 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2865 -#define arch_randomize_brk arch_randomize_brk
2866 -
2867 #endif /* _ASM_ELF_H */
2868 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2869 index c1f6afa..38cc6e9 100644
2870 --- a/arch/mips/include/asm/exec.h
2871 +++ b/arch/mips/include/asm/exec.h
2872 @@ -12,6 +12,6 @@
2873 #ifndef _ASM_EXEC_H
2874 #define _ASM_EXEC_H
2875
2876 -extern unsigned long arch_align_stack(unsigned long sp);
2877 +#define arch_align_stack(x) ((x) & ~0xfUL)
2878
2879 #endif /* _ASM_EXEC_H */
2880 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2881 index da9bd7d..91aa7ab 100644
2882 --- a/arch/mips/include/asm/page.h
2883 +++ b/arch/mips/include/asm/page.h
2884 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2885 #ifdef CONFIG_CPU_MIPS32
2886 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2887 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2888 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2889 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2890 #else
2891 typedef struct { unsigned long long pte; } pte_t;
2892 #define pte_val(x) ((x).pte)
2893 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2894 index 881d18b..cea38bc 100644
2895 --- a/arch/mips/include/asm/pgalloc.h
2896 +++ b/arch/mips/include/asm/pgalloc.h
2897 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2898 {
2899 set_pud(pud, __pud((unsigned long)pmd));
2900 }
2901 +
2902 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2903 +{
2904 + pud_populate(mm, pud, pmd);
2905 +}
2906 #endif
2907
2908 /*
2909 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2910 index 0d85d8e..ec71487 100644
2911 --- a/arch/mips/include/asm/thread_info.h
2912 +++ b/arch/mips/include/asm/thread_info.h
2913 @@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2914 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2915 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2916 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2917 +/* li takes a 32bit immediate */
2918 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2919 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2920
2921 #ifdef CONFIG_MIPS32_O32
2922 @@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2923 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2924 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2925 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2926 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2927 +
2928 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2929
2930 /* work to do in syscall_trace_leave() */
2931 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2932 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2933
2934 /* work to do on interrupt/exception return */
2935 #define _TIF_WORK_MASK (0x0000ffef & \
2936 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2937 /* work to do on any return to u-space */
2938 -#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2939 +#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2940
2941 #endif /* __KERNEL__ */
2942
2943 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2944 index 9fdd8bc..4bd7f1a 100644
2945 --- a/arch/mips/kernel/binfmt_elfn32.c
2946 +++ b/arch/mips/kernel/binfmt_elfn32.c
2947 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2948 #undef ELF_ET_DYN_BASE
2949 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2950
2951 +#ifdef CONFIG_PAX_ASLR
2952 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2953 +
2954 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2955 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2956 +#endif
2957 +
2958 #include <asm/processor.h>
2959 #include <linux/module.h>
2960 #include <linux/elfcore.h>
2961 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2962 index ff44823..97f8906 100644
2963 --- a/arch/mips/kernel/binfmt_elfo32.c
2964 +++ b/arch/mips/kernel/binfmt_elfo32.c
2965 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2966 #undef ELF_ET_DYN_BASE
2967 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2968
2969 +#ifdef CONFIG_PAX_ASLR
2970 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2971 +
2972 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2973 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2974 +#endif
2975 +
2976 #include <asm/processor.h>
2977
2978 /*
2979 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2980 index e9a5fd7..378809a 100644
2981 --- a/arch/mips/kernel/process.c
2982 +++ b/arch/mips/kernel/process.c
2983 @@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2984 out:
2985 return pc;
2986 }
2987 -
2988 -/*
2989 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2990 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2991 - */
2992 -unsigned long arch_align_stack(unsigned long sp)
2993 -{
2994 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2995 - sp -= get_random_int() & ~PAGE_MASK;
2996 -
2997 - return sp & ALMASK;
2998 -}
2999 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
3000 index 7c24c29..e2f1981 100644
3001 --- a/arch/mips/kernel/ptrace.c
3002 +++ b/arch/mips/kernel/ptrace.c
3003 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
3004 return arch;
3005 }
3006
3007 +#ifdef CONFIG_GRKERNSEC_SETXID
3008 +extern void gr_delayed_cred_worker(void);
3009 +#endif
3010 +
3011 /*
3012 * Notification of system call entry/exit
3013 * - triggered by current->work.syscall_trace
3014 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3015 /* do the secure computing check first */
3016 secure_computing(regs->regs[2]);
3017
3018 +#ifdef CONFIG_GRKERNSEC_SETXID
3019 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3020 + gr_delayed_cred_worker();
3021 +#endif
3022 +
3023 if (!(current->ptrace & PT_PTRACED))
3024 goto out;
3025
3026 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3027 index a632bc1..0b77c7c 100644
3028 --- a/arch/mips/kernel/scall32-o32.S
3029 +++ b/arch/mips/kernel/scall32-o32.S
3030 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3031
3032 stack_done:
3033 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3034 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3035 + li t1, _TIF_SYSCALL_WORK
3036 and t0, t1
3037 bnez t0, syscall_trace_entry # -> yes
3038
3039 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3040 index 3b5a5e9..e1ee86d 100644
3041 --- a/arch/mips/kernel/scall64-64.S
3042 +++ b/arch/mips/kernel/scall64-64.S
3043 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3044
3045 sd a3, PT_R26(sp) # save a3 for syscall restarting
3046
3047 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3048 + li t1, _TIF_SYSCALL_WORK
3049 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3050 and t0, t1, t0
3051 bnez t0, syscall_trace_entry
3052 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3053 index 6be6f70..1859577 100644
3054 --- a/arch/mips/kernel/scall64-n32.S
3055 +++ b/arch/mips/kernel/scall64-n32.S
3056 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3057
3058 sd a3, PT_R26(sp) # save a3 for syscall restarting
3059
3060 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3061 + li t1, _TIF_SYSCALL_WORK
3062 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3063 and t0, t1, t0
3064 bnez t0, n32_syscall_trace_entry
3065 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3066 index 5422855..74e63a3 100644
3067 --- a/arch/mips/kernel/scall64-o32.S
3068 +++ b/arch/mips/kernel/scall64-o32.S
3069 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3070 PTR 4b, bad_stack
3071 .previous
3072
3073 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3074 + li t1, _TIF_SYSCALL_WORK
3075 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3076 and t0, t1, t0
3077 bnez t0, trace_a_syscall
3078 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3079 index c14f6df..537e729 100644
3080 --- a/arch/mips/mm/fault.c
3081 +++ b/arch/mips/mm/fault.c
3082 @@ -27,6 +27,23 @@
3083 #include <asm/highmem.h> /* For VMALLOC_END */
3084 #include <linux/kdebug.h>
3085
3086 +#ifdef CONFIG_PAX_PAGEEXEC
3087 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3088 +{
3089 + unsigned long i;
3090 +
3091 + printk(KERN_ERR "PAX: bytes at PC: ");
3092 + for (i = 0; i < 5; i++) {
3093 + unsigned int c;
3094 + if (get_user(c, (unsigned int *)pc+i))
3095 + printk(KERN_CONT "???????? ");
3096 + else
3097 + printk(KERN_CONT "%08x ", c);
3098 + }
3099 + printk("\n");
3100 +}
3101 +#endif
3102 +
3103 /*
3104 * This routine handles page faults. It determines the address,
3105 * and the problem, and then passes it off to one of the appropriate
3106 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3107 index 302d779..7d35bf8 100644
3108 --- a/arch/mips/mm/mmap.c
3109 +++ b/arch/mips/mm/mmap.c
3110 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3111 do_color_align = 1;
3112
3113 /* requesting a specific address */
3114 +
3115 +#ifdef CONFIG_PAX_RANDMMAP
3116 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3117 +#endif
3118 +
3119 if (addr) {
3120 if (do_color_align)
3121 addr = COLOUR_ALIGN(addr, pgoff);
3122 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3123 addr = PAGE_ALIGN(addr);
3124
3125 vma = find_vma(mm, addr);
3126 - if (TASK_SIZE - len >= addr &&
3127 - (!vma || addr + len <= vma->vm_start))
3128 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3129 return addr;
3130 }
3131
3132 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3133 /* At this point: (!vma || addr < vma->vm_end). */
3134 if (TASK_SIZE - len < addr)
3135 return -ENOMEM;
3136 - if (!vma || addr + len <= vma->vm_start)
3137 + if (check_heap_stack_gap(vmm, addr, len))
3138 return addr;
3139 addr = vma->vm_end;
3140 if (do_color_align)
3141 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3142 /* make sure it can fit in the remaining address space */
3143 if (likely(addr > len)) {
3144 vma = find_vma(mm, addr - len);
3145 - if (!vma || addr <= vma->vm_start) {
3146 + if (check_heap_stack_gap(vmm, addr - len, len))
3147 /* cache the address as a hint for next time */
3148 return mm->free_area_cache = addr - len;
3149 }
3150 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3151 * return with success:
3152 */
3153 vma = find_vma(mm, addr);
3154 - if (likely(!vma || addr + len <= vma->vm_start)) {
3155 + if (check_heap_stack_gap(vmm, addr, len)) {
3156 /* cache the address as a hint for next time */
3157 return mm->free_area_cache = addr;
3158 }
3159 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3160 mm->unmap_area = arch_unmap_area_topdown;
3161 }
3162 }
3163 -
3164 -static inline unsigned long brk_rnd(void)
3165 -{
3166 - unsigned long rnd = get_random_int();
3167 -
3168 - rnd = rnd << PAGE_SHIFT;
3169 - /* 8MB for 32bit, 256MB for 64bit */
3170 - if (TASK_IS_32BIT_ADDR)
3171 - rnd = rnd & 0x7ffffful;
3172 - else
3173 - rnd = rnd & 0xffffffful;
3174 -
3175 - return rnd;
3176 -}
3177 -
3178 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3179 -{
3180 - unsigned long base = mm->brk;
3181 - unsigned long ret;
3182 -
3183 - ret = PAGE_ALIGN(base + brk_rnd());
3184 -
3185 - if (ret < mm->brk)
3186 - return mm->brk;
3187 -
3188 - return ret;
3189 -}
3190 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3191 index 967d144..db12197 100644
3192 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3193 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3194 @@ -11,12 +11,14 @@
3195 #ifndef _ASM_PROC_CACHE_H
3196 #define _ASM_PROC_CACHE_H
3197
3198 +#include <linux/const.h>
3199 +
3200 /* L1 cache */
3201
3202 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3203 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3204 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3205 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3206 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3207 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3208
3209 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3210 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3211 index bcb5df2..84fabd2 100644
3212 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3213 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3214 @@ -16,13 +16,15 @@
3215 #ifndef _ASM_PROC_CACHE_H
3216 #define _ASM_PROC_CACHE_H
3217
3218 +#include <linux/const.h>
3219 +
3220 /*
3221 * L1 cache
3222 */
3223 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3224 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3225 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3226 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3227 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3228 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3229
3230 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3231 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3232 index 4ce7a01..449202a 100644
3233 --- a/arch/openrisc/include/asm/cache.h
3234 +++ b/arch/openrisc/include/asm/cache.h
3235 @@ -19,11 +19,13 @@
3236 #ifndef __ASM_OPENRISC_CACHE_H
3237 #define __ASM_OPENRISC_CACHE_H
3238
3239 +#include <linux/const.h>
3240 +
3241 /* FIXME: How can we replace these with values from the CPU...
3242 * they shouldn't be hard-coded!
3243 */
3244
3245 -#define L1_CACHE_BYTES 16
3246 #define L1_CACHE_SHIFT 4
3247 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3248
3249 #endif /* __ASM_OPENRISC_CACHE_H */
3250 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3251 index 6c6defc..d30653d 100644
3252 --- a/arch/parisc/include/asm/atomic.h
3253 +++ b/arch/parisc/include/asm/atomic.h
3254 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3255
3256 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3257
3258 +#define atomic64_read_unchecked(v) atomic64_read(v)
3259 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3260 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3261 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3262 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3263 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3264 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3265 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3266 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3267 +
3268 #endif /* !CONFIG_64BIT */
3269
3270
3271 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3272 index 47f11c7..3420df2 100644
3273 --- a/arch/parisc/include/asm/cache.h
3274 +++ b/arch/parisc/include/asm/cache.h
3275 @@ -5,6 +5,7 @@
3276 #ifndef __ARCH_PARISC_CACHE_H
3277 #define __ARCH_PARISC_CACHE_H
3278
3279 +#include <linux/const.h>
3280
3281 /*
3282 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3283 @@ -15,13 +16,13 @@
3284 * just ruin performance.
3285 */
3286 #ifdef CONFIG_PA20
3287 -#define L1_CACHE_BYTES 64
3288 #define L1_CACHE_SHIFT 6
3289 #else
3290 -#define L1_CACHE_BYTES 32
3291 #define L1_CACHE_SHIFT 5
3292 #endif
3293
3294 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3295 +
3296 #ifndef __ASSEMBLY__
3297
3298 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3299 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3300 index 19f6cb1..6c78cf2 100644
3301 --- a/arch/parisc/include/asm/elf.h
3302 +++ b/arch/parisc/include/asm/elf.h
3303 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3304
3305 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3306
3307 +#ifdef CONFIG_PAX_ASLR
3308 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3309 +
3310 +#define PAX_DELTA_MMAP_LEN 16
3311 +#define PAX_DELTA_STACK_LEN 16
3312 +#endif
3313 +
3314 /* This yields a mask that user programs can use to figure out what
3315 instruction set this CPU supports. This could be done in user space,
3316 but it's not easy, and we've already done it here. */
3317 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3318 index fc987a1..6e068ef 100644
3319 --- a/arch/parisc/include/asm/pgalloc.h
3320 +++ b/arch/parisc/include/asm/pgalloc.h
3321 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3322 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3323 }
3324
3325 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3326 +{
3327 + pgd_populate(mm, pgd, pmd);
3328 +}
3329 +
3330 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3331 {
3332 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3333 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3334 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3335 #define pmd_free(mm, x) do { } while (0)
3336 #define pgd_populate(mm, pmd, pte) BUG()
3337 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3338
3339 #endif
3340
3341 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3342 index ee99f23..802b0a1 100644
3343 --- a/arch/parisc/include/asm/pgtable.h
3344 +++ b/arch/parisc/include/asm/pgtable.h
3345 @@ -212,6 +212,17 @@ struct vm_area_struct;
3346 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3347 #define PAGE_COPY PAGE_EXECREAD
3348 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3349 +
3350 +#ifdef CONFIG_PAX_PAGEEXEC
3351 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3352 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3353 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3354 +#else
3355 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3356 +# define PAGE_COPY_NOEXEC PAGE_COPY
3357 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3358 +#endif
3359 +
3360 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3361 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3362 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3363 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
3364 index 9ac0660..6ed15c4 100644
3365 --- a/arch/parisc/include/asm/uaccess.h
3366 +++ b/arch/parisc/include/asm/uaccess.h
3367 @@ -252,10 +252,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
3368 const void __user *from,
3369 unsigned long n)
3370 {
3371 - int sz = __compiletime_object_size(to);
3372 + size_t sz = __compiletime_object_size(to);
3373 int ret = -EFAULT;
3374
3375 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
3376 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
3377 ret = __copy_from_user(to, from, n);
3378 else
3379 copy_from_user_overflow();
3380 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3381 index 5e34ccf..672bc9c 100644
3382 --- a/arch/parisc/kernel/module.c
3383 +++ b/arch/parisc/kernel/module.c
3384 @@ -98,16 +98,38 @@
3385
3386 /* three functions to determine where in the module core
3387 * or init pieces the location is */
3388 +static inline int in_init_rx(struct module *me, void *loc)
3389 +{
3390 + return (loc >= me->module_init_rx &&
3391 + loc < (me->module_init_rx + me->init_size_rx));
3392 +}
3393 +
3394 +static inline int in_init_rw(struct module *me, void *loc)
3395 +{
3396 + return (loc >= me->module_init_rw &&
3397 + loc < (me->module_init_rw + me->init_size_rw));
3398 +}
3399 +
3400 static inline int in_init(struct module *me, void *loc)
3401 {
3402 - return (loc >= me->module_init &&
3403 - loc <= (me->module_init + me->init_size));
3404 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3405 +}
3406 +
3407 +static inline int in_core_rx(struct module *me, void *loc)
3408 +{
3409 + return (loc >= me->module_core_rx &&
3410 + loc < (me->module_core_rx + me->core_size_rx));
3411 +}
3412 +
3413 +static inline int in_core_rw(struct module *me, void *loc)
3414 +{
3415 + return (loc >= me->module_core_rw &&
3416 + loc < (me->module_core_rw + me->core_size_rw));
3417 }
3418
3419 static inline int in_core(struct module *me, void *loc)
3420 {
3421 - return (loc >= me->module_core &&
3422 - loc <= (me->module_core + me->core_size));
3423 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3424 }
3425
3426 static inline int in_local(struct module *me, void *loc)
3427 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3428 }
3429
3430 /* align things a bit */
3431 - me->core_size = ALIGN(me->core_size, 16);
3432 - me->arch.got_offset = me->core_size;
3433 - me->core_size += gots * sizeof(struct got_entry);
3434 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3435 + me->arch.got_offset = me->core_size_rw;
3436 + me->core_size_rw += gots * sizeof(struct got_entry);
3437
3438 - me->core_size = ALIGN(me->core_size, 16);
3439 - me->arch.fdesc_offset = me->core_size;
3440 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3441 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3442 + me->arch.fdesc_offset = me->core_size_rw;
3443 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3444
3445 me->arch.got_max = gots;
3446 me->arch.fdesc_max = fdescs;
3447 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3448
3449 BUG_ON(value == 0);
3450
3451 - got = me->module_core + me->arch.got_offset;
3452 + got = me->module_core_rw + me->arch.got_offset;
3453 for (i = 0; got[i].addr; i++)
3454 if (got[i].addr == value)
3455 goto out;
3456 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3457 #ifdef CONFIG_64BIT
3458 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3459 {
3460 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3461 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3462
3463 if (!value) {
3464 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3465 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3466
3467 /* Create new one */
3468 fdesc->addr = value;
3469 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3470 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3471 return (Elf_Addr)fdesc;
3472 }
3473 #endif /* CONFIG_64BIT */
3474 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3475
3476 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3477 end = table + sechdrs[me->arch.unwind_section].sh_size;
3478 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3479 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3480
3481 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3482 me->arch.unwind_section, table, end, gp);
3483 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3484 index c9b9322..02d8940 100644
3485 --- a/arch/parisc/kernel/sys_parisc.c
3486 +++ b/arch/parisc/kernel/sys_parisc.c
3487 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3488 /* At this point: (!vma || addr < vma->vm_end). */
3489 if (TASK_SIZE - len < addr)
3490 return -ENOMEM;
3491 - if (!vma || addr + len <= vma->vm_start)
3492 + if (check_heap_stack_gap(vma, addr, len))
3493 return addr;
3494 addr = vma->vm_end;
3495 }
3496 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3497 /* At this point: (!vma || addr < vma->vm_end). */
3498 if (TASK_SIZE - len < addr)
3499 return -ENOMEM;
3500 - if (!vma || addr + len <= vma->vm_start)
3501 + if (check_heap_stack_gap(vma, addr, len))
3502 return addr;
3503 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3504 if (addr < vma->vm_end) /* handle wraparound */
3505 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3506 if (flags & MAP_FIXED)
3507 return addr;
3508 if (!addr)
3509 - addr = TASK_UNMAPPED_BASE;
3510 + addr = current->mm->mmap_base;
3511
3512 if (filp) {
3513 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3514 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3515 index 45ba99f..8e22c33 100644
3516 --- a/arch/parisc/kernel/traps.c
3517 +++ b/arch/parisc/kernel/traps.c
3518 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3519
3520 down_read(&current->mm->mmap_sem);
3521 vma = find_vma(current->mm,regs->iaoq[0]);
3522 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3523 - && (vma->vm_flags & VM_EXEC)) {
3524 -
3525 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3526 fault_address = regs->iaoq[0];
3527 fault_space = regs->iasq[0];
3528
3529 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3530 index 18162ce..94de376 100644
3531 --- a/arch/parisc/mm/fault.c
3532 +++ b/arch/parisc/mm/fault.c
3533 @@ -15,6 +15,7 @@
3534 #include <linux/sched.h>
3535 #include <linux/interrupt.h>
3536 #include <linux/module.h>
3537 +#include <linux/unistd.h>
3538
3539 #include <asm/uaccess.h>
3540 #include <asm/traps.h>
3541 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3542 static unsigned long
3543 parisc_acctyp(unsigned long code, unsigned int inst)
3544 {
3545 - if (code == 6 || code == 16)
3546 + if (code == 6 || code == 7 || code == 16)
3547 return VM_EXEC;
3548
3549 switch (inst & 0xf0000000) {
3550 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3551 }
3552 #endif
3553
3554 +#ifdef CONFIG_PAX_PAGEEXEC
3555 +/*
3556 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3557 + *
3558 + * returns 1 when task should be killed
3559 + * 2 when rt_sigreturn trampoline was detected
3560 + * 3 when unpatched PLT trampoline was detected
3561 + */
3562 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3563 +{
3564 +
3565 +#ifdef CONFIG_PAX_EMUPLT
3566 + int err;
3567 +
3568 + do { /* PaX: unpatched PLT emulation */
3569 + unsigned int bl, depwi;
3570 +
3571 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3572 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3573 +
3574 + if (err)
3575 + break;
3576 +
3577 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3578 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3579 +
3580 + err = get_user(ldw, (unsigned int *)addr);
3581 + err |= get_user(bv, (unsigned int *)(addr+4));
3582 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3583 +
3584 + if (err)
3585 + break;
3586 +
3587 + if (ldw == 0x0E801096U &&
3588 + bv == 0xEAC0C000U &&
3589 + ldw2 == 0x0E881095U)
3590 + {
3591 + unsigned int resolver, map;
3592 +
3593 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3594 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3595 + if (err)
3596 + break;
3597 +
3598 + regs->gr[20] = instruction_pointer(regs)+8;
3599 + regs->gr[21] = map;
3600 + regs->gr[22] = resolver;
3601 + regs->iaoq[0] = resolver | 3UL;
3602 + regs->iaoq[1] = regs->iaoq[0] + 4;
3603 + return 3;
3604 + }
3605 + }
3606 + } while (0);
3607 +#endif
3608 +
3609 +#ifdef CONFIG_PAX_EMUTRAMP
3610 +
3611 +#ifndef CONFIG_PAX_EMUSIGRT
3612 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3613 + return 1;
3614 +#endif
3615 +
3616 + do { /* PaX: rt_sigreturn emulation */
3617 + unsigned int ldi1, ldi2, bel, nop;
3618 +
3619 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3620 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3621 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3622 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3623 +
3624 + if (err)
3625 + break;
3626 +
3627 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3628 + ldi2 == 0x3414015AU &&
3629 + bel == 0xE4008200U &&
3630 + nop == 0x08000240U)
3631 + {
3632 + regs->gr[25] = (ldi1 & 2) >> 1;
3633 + regs->gr[20] = __NR_rt_sigreturn;
3634 + regs->gr[31] = regs->iaoq[1] + 16;
3635 + regs->sr[0] = regs->iasq[1];
3636 + regs->iaoq[0] = 0x100UL;
3637 + regs->iaoq[1] = regs->iaoq[0] + 4;
3638 + regs->iasq[0] = regs->sr[2];
3639 + regs->iasq[1] = regs->sr[2];
3640 + return 2;
3641 + }
3642 + } while (0);
3643 +#endif
3644 +
3645 + return 1;
3646 +}
3647 +
3648 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3649 +{
3650 + unsigned long i;
3651 +
3652 + printk(KERN_ERR "PAX: bytes at PC: ");
3653 + for (i = 0; i < 5; i++) {
3654 + unsigned int c;
3655 + if (get_user(c, (unsigned int *)pc+i))
3656 + printk(KERN_CONT "???????? ");
3657 + else
3658 + printk(KERN_CONT "%08x ", c);
3659 + }
3660 + printk("\n");
3661 +}
3662 +#endif
3663 +
3664 int fixup_exception(struct pt_regs *regs)
3665 {
3666 const struct exception_table_entry *fix;
3667 @@ -192,8 +303,33 @@ good_area:
3668
3669 acc_type = parisc_acctyp(code,regs->iir);
3670
3671 - if ((vma->vm_flags & acc_type) != acc_type)
3672 + if ((vma->vm_flags & acc_type) != acc_type) {
3673 +
3674 +#ifdef CONFIG_PAX_PAGEEXEC
3675 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3676 + (address & ~3UL) == instruction_pointer(regs))
3677 + {
3678 + up_read(&mm->mmap_sem);
3679 + switch (pax_handle_fetch_fault(regs)) {
3680 +
3681 +#ifdef CONFIG_PAX_EMUPLT
3682 + case 3:
3683 + return;
3684 +#endif
3685 +
3686 +#ifdef CONFIG_PAX_EMUTRAMP
3687 + case 2:
3688 + return;
3689 +#endif
3690 +
3691 + }
3692 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3693 + do_group_exit(SIGKILL);
3694 + }
3695 +#endif
3696 +
3697 goto bad_area;
3698 + }
3699
3700 /*
3701 * If for any reason at all we couldn't handle the fault, make
3702 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3703 index da29032..f76c24c 100644
3704 --- a/arch/powerpc/include/asm/atomic.h
3705 +++ b/arch/powerpc/include/asm/atomic.h
3706 @@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3707 return t1;
3708 }
3709
3710 +#define atomic64_read_unchecked(v) atomic64_read(v)
3711 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3712 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3713 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3714 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3715 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3716 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3717 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3718 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3719 +
3720 #endif /* __powerpc64__ */
3721
3722 #endif /* __KERNEL__ */
3723 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3724 index 9e495c9..b6878e5 100644
3725 --- a/arch/powerpc/include/asm/cache.h
3726 +++ b/arch/powerpc/include/asm/cache.h
3727 @@ -3,6 +3,7 @@
3728
3729 #ifdef __KERNEL__
3730
3731 +#include <linux/const.h>
3732
3733 /* bytes per L1 cache line */
3734 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3735 @@ -22,7 +23,7 @@
3736 #define L1_CACHE_SHIFT 7
3737 #endif
3738
3739 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3740 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3741
3742 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3743
3744 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3745 index 3bf9cca..e7457d0 100644
3746 --- a/arch/powerpc/include/asm/elf.h
3747 +++ b/arch/powerpc/include/asm/elf.h
3748 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3749 the loader. We need to make sure that it is out of the way of the program
3750 that it will "exec", and that there is sufficient room for the brk. */
3751
3752 -extern unsigned long randomize_et_dyn(unsigned long base);
3753 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3754 +#define ELF_ET_DYN_BASE (0x20000000)
3755 +
3756 +#ifdef CONFIG_PAX_ASLR
3757 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3758 +
3759 +#ifdef __powerpc64__
3760 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3761 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3762 +#else
3763 +#define PAX_DELTA_MMAP_LEN 15
3764 +#define PAX_DELTA_STACK_LEN 15
3765 +#endif
3766 +#endif
3767
3768 /*
3769 * Our registers are always unsigned longs, whether we're a 32 bit
3770 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3771 (0x7ff >> (PAGE_SHIFT - 12)) : \
3772 (0x3ffff >> (PAGE_SHIFT - 12)))
3773
3774 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3775 -#define arch_randomize_brk arch_randomize_brk
3776 -
3777 #endif /* __KERNEL__ */
3778
3779 /*
3780 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3781 index 8196e9c..d83a9f3 100644
3782 --- a/arch/powerpc/include/asm/exec.h
3783 +++ b/arch/powerpc/include/asm/exec.h
3784 @@ -4,6 +4,6 @@
3785 #ifndef _ASM_POWERPC_EXEC_H
3786 #define _ASM_POWERPC_EXEC_H
3787
3788 -extern unsigned long arch_align_stack(unsigned long sp);
3789 +#define arch_align_stack(x) ((x) & ~0xfUL)
3790
3791 #endif /* _ASM_POWERPC_EXEC_H */
3792 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3793 index bca8fdc..61e9580 100644
3794 --- a/arch/powerpc/include/asm/kmap_types.h
3795 +++ b/arch/powerpc/include/asm/kmap_types.h
3796 @@ -27,6 +27,7 @@ enum km_type {
3797 KM_PPC_SYNC_PAGE,
3798 KM_PPC_SYNC_ICACHE,
3799 KM_KDB,
3800 + KM_CLEARPAGE,
3801 KM_TYPE_NR
3802 };
3803
3804 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3805 index d4a7f64..451de1c 100644
3806 --- a/arch/powerpc/include/asm/mman.h
3807 +++ b/arch/powerpc/include/asm/mman.h
3808 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3809 }
3810 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3811
3812 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3813 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3814 {
3815 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3816 }
3817 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3818 index f072e97..b436dee 100644
3819 --- a/arch/powerpc/include/asm/page.h
3820 +++ b/arch/powerpc/include/asm/page.h
3821 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3822 * and needs to be executable. This means the whole heap ends
3823 * up being executable.
3824 */
3825 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3826 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3827 +#define VM_DATA_DEFAULT_FLAGS32 \
3828 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3829 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3830
3831 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3832 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3833 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3834 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3835 #endif
3836
3837 +#define ktla_ktva(addr) (addr)
3838 +#define ktva_ktla(addr) (addr)
3839 +
3840 /*
3841 * Use the top bit of the higher-level page table entries to indicate whether
3842 * the entries we point to contain hugepages. This works because we know that
3843 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3844 index fed85e6..da5c71b 100644
3845 --- a/arch/powerpc/include/asm/page_64.h
3846 +++ b/arch/powerpc/include/asm/page_64.h
3847 @@ -146,15 +146,18 @@ do { \
3848 * stack by default, so in the absence of a PT_GNU_STACK program header
3849 * we turn execute permission off.
3850 */
3851 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3852 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3853 +#define VM_STACK_DEFAULT_FLAGS32 \
3854 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3855 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3856
3857 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3858 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3859
3860 +#ifndef CONFIG_PAX_PAGEEXEC
3861 #define VM_STACK_DEFAULT_FLAGS \
3862 (is_32bit_task() ? \
3863 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3864 +#endif
3865
3866 #include <asm-generic/getorder.h>
3867
3868 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3869 index 292725c..f87ae14 100644
3870 --- a/arch/powerpc/include/asm/pgalloc-64.h
3871 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3872 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3873 #ifndef CONFIG_PPC_64K_PAGES
3874
3875 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3876 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3877
3878 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3879 {
3880 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3881 pud_set(pud, (unsigned long)pmd);
3882 }
3883
3884 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3885 +{
3886 + pud_populate(mm, pud, pmd);
3887 +}
3888 +
3889 #define pmd_populate(mm, pmd, pte_page) \
3890 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3891 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3892 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3893 #else /* CONFIG_PPC_64K_PAGES */
3894
3895 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3896 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3897
3898 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3899 pte_t *pte)
3900 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3901 index 2e0e411..7899c68 100644
3902 --- a/arch/powerpc/include/asm/pgtable.h
3903 +++ b/arch/powerpc/include/asm/pgtable.h
3904 @@ -2,6 +2,7 @@
3905 #define _ASM_POWERPC_PGTABLE_H
3906 #ifdef __KERNEL__
3907
3908 +#include <linux/const.h>
3909 #ifndef __ASSEMBLY__
3910 #include <asm/processor.h> /* For TASK_SIZE */
3911 #include <asm/mmu.h>
3912 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3913 index 4aad413..85d86bf 100644
3914 --- a/arch/powerpc/include/asm/pte-hash32.h
3915 +++ b/arch/powerpc/include/asm/pte-hash32.h
3916 @@ -21,6 +21,7 @@
3917 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3918 #define _PAGE_USER 0x004 /* usermode access allowed */
3919 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3920 +#define _PAGE_EXEC _PAGE_GUARDED
3921 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3922 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3923 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3924 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3925 index 9d7f0fb..a28fe69 100644
3926 --- a/arch/powerpc/include/asm/reg.h
3927 +++ b/arch/powerpc/include/asm/reg.h
3928 @@ -212,6 +212,7 @@
3929 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3930 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3931 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3932 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3933 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3934 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3935 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3936 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3937 index 4a741c7..c8162227b 100644
3938 --- a/arch/powerpc/include/asm/thread_info.h
3939 +++ b/arch/powerpc/include/asm/thread_info.h
3940 @@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
3941 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3942 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3943 #define TIF_SINGLESTEP 8 /* singlestepping active */
3944 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3945 #define TIF_SECCOMP 10 /* secure computing */
3946 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3947 #define TIF_NOERROR 12 /* Force successful syscall return */
3948 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3949 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3950 +#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3951 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3952 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3953
3954 /* as above, but as bit values */
3955 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3956 @@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
3957 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3958 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3959 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3960 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3961 +
3962 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3963 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3964 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3965 + _TIF_GRSEC_SETXID)
3966
3967 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3968 _TIF_NOTIFY_RESUME)
3969 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3970 index bd0fb84..a42a14b 100644
3971 --- a/arch/powerpc/include/asm/uaccess.h
3972 +++ b/arch/powerpc/include/asm/uaccess.h
3973 @@ -13,6 +13,8 @@
3974 #define VERIFY_READ 0
3975 #define VERIFY_WRITE 1
3976
3977 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3978 +
3979 /*
3980 * The fs value determines whether argument validity checking should be
3981 * performed or not. If get_fs() == USER_DS, checking is performed, with
3982 @@ -327,52 +329,6 @@ do { \
3983 extern unsigned long __copy_tofrom_user(void __user *to,
3984 const void __user *from, unsigned long size);
3985
3986 -#ifndef __powerpc64__
3987 -
3988 -static inline unsigned long copy_from_user(void *to,
3989 - const void __user *from, unsigned long n)
3990 -{
3991 - unsigned long over;
3992 -
3993 - if (access_ok(VERIFY_READ, from, n))
3994 - return __copy_tofrom_user((__force void __user *)to, from, n);
3995 - if ((unsigned long)from < TASK_SIZE) {
3996 - over = (unsigned long)from + n - TASK_SIZE;
3997 - return __copy_tofrom_user((__force void __user *)to, from,
3998 - n - over) + over;
3999 - }
4000 - return n;
4001 -}
4002 -
4003 -static inline unsigned long copy_to_user(void __user *to,
4004 - const void *from, unsigned long n)
4005 -{
4006 - unsigned long over;
4007 -
4008 - if (access_ok(VERIFY_WRITE, to, n))
4009 - return __copy_tofrom_user(to, (__force void __user *)from, n);
4010 - if ((unsigned long)to < TASK_SIZE) {
4011 - over = (unsigned long)to + n - TASK_SIZE;
4012 - return __copy_tofrom_user(to, (__force void __user *)from,
4013 - n - over) + over;
4014 - }
4015 - return n;
4016 -}
4017 -
4018 -#else /* __powerpc64__ */
4019 -
4020 -#define __copy_in_user(to, from, size) \
4021 - __copy_tofrom_user((to), (from), (size))
4022 -
4023 -extern unsigned long copy_from_user(void *to, const void __user *from,
4024 - unsigned long n);
4025 -extern unsigned long copy_to_user(void __user *to, const void *from,
4026 - unsigned long n);
4027 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
4028 - unsigned long n);
4029 -
4030 -#endif /* __powerpc64__ */
4031 -
4032 static inline unsigned long __copy_from_user_inatomic(void *to,
4033 const void __user *from, unsigned long n)
4034 {
4035 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4036 if (ret == 0)
4037 return 0;
4038 }
4039 +
4040 + if (!__builtin_constant_p(n))
4041 + check_object_size(to, n, false);
4042 +
4043 return __copy_tofrom_user((__force void __user *)to, from, n);
4044 }
4045
4046 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4047 if (ret == 0)
4048 return 0;
4049 }
4050 +
4051 + if (!__builtin_constant_p(n))
4052 + check_object_size(from, n, true);
4053 +
4054 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4055 }
4056
4057 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4058 return __copy_to_user_inatomic(to, from, size);
4059 }
4060
4061 +#ifndef __powerpc64__
4062 +
4063 +static inline unsigned long __must_check copy_from_user(void *to,
4064 + const void __user *from, unsigned long n)
4065 +{
4066 + unsigned long over;
4067 +
4068 + if ((long)n < 0)
4069 + return n;
4070 +
4071 + if (access_ok(VERIFY_READ, from, n)) {
4072 + if (!__builtin_constant_p(n))
4073 + check_object_size(to, n, false);
4074 + return __copy_tofrom_user((__force void __user *)to, from, n);
4075 + }
4076 + if ((unsigned long)from < TASK_SIZE) {
4077 + over = (unsigned long)from + n - TASK_SIZE;
4078 + if (!__builtin_constant_p(n - over))
4079 + check_object_size(to, n - over, false);
4080 + return __copy_tofrom_user((__force void __user *)to, from,
4081 + n - over) + over;
4082 + }
4083 + return n;
4084 +}
4085 +
4086 +static inline unsigned long __must_check copy_to_user(void __user *to,
4087 + const void *from, unsigned long n)
4088 +{
4089 + unsigned long over;
4090 +
4091 + if ((long)n < 0)
4092 + return n;
4093 +
4094 + if (access_ok(VERIFY_WRITE, to, n)) {
4095 + if (!__builtin_constant_p(n))
4096 + check_object_size(from, n, true);
4097 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4098 + }
4099 + if ((unsigned long)to < TASK_SIZE) {
4100 + over = (unsigned long)to + n - TASK_SIZE;
4101 + if (!__builtin_constant_p(n))
4102 + check_object_size(from, n - over, true);
4103 + return __copy_tofrom_user(to, (__force void __user *)from,
4104 + n - over) + over;
4105 + }
4106 + return n;
4107 +}
4108 +
4109 +#else /* __powerpc64__ */
4110 +
4111 +#define __copy_in_user(to, from, size) \
4112 + __copy_tofrom_user((to), (from), (size))
4113 +
4114 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4115 +{
4116 + if ((long)n < 0 || n > INT_MAX)
4117 + return n;
4118 +
4119 + if (!__builtin_constant_p(n))
4120 + check_object_size(to, n, false);
4121 +
4122 + if (likely(access_ok(VERIFY_READ, from, n)))
4123 + n = __copy_from_user(to, from, n);
4124 + else
4125 + memset(to, 0, n);
4126 + return n;
4127 +}
4128 +
4129 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4130 +{
4131 + if ((long)n < 0 || n > INT_MAX)
4132 + return n;
4133 +
4134 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4135 + if (!__builtin_constant_p(n))
4136 + check_object_size(from, n, true);
4137 + n = __copy_to_user(to, from, n);
4138 + }
4139 + return n;
4140 +}
4141 +
4142 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4143 + unsigned long n);
4144 +
4145 +#endif /* __powerpc64__ */
4146 +
4147 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4148
4149 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4150 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4151 index 7215cc2..a9730c1 100644
4152 --- a/arch/powerpc/kernel/exceptions-64e.S
4153 +++ b/arch/powerpc/kernel/exceptions-64e.S
4154 @@ -661,6 +661,7 @@ storage_fault_common:
4155 std r14,_DAR(r1)
4156 std r15,_DSISR(r1)
4157 addi r3,r1,STACK_FRAME_OVERHEAD
4158 + bl .save_nvgprs
4159 mr r4,r14
4160 mr r5,r15
4161 ld r14,PACA_EXGEN+EX_R14(r13)
4162 @@ -669,8 +670,7 @@ storage_fault_common:
4163 cmpdi r3,0
4164 bne- 1f
4165 b .ret_from_except_lite
4166 -1: bl .save_nvgprs
4167 - mr r5,r3
4168 +1: mr r5,r3
4169 addi r3,r1,STACK_FRAME_OVERHEAD
4170 ld r4,_DAR(r1)
4171 bl .bad_page_fault
4172 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4173 index 8f880bc..c5bd2f3 100644
4174 --- a/arch/powerpc/kernel/exceptions-64s.S
4175 +++ b/arch/powerpc/kernel/exceptions-64s.S
4176 @@ -890,10 +890,10 @@ handle_page_fault:
4177 11: ld r4,_DAR(r1)
4178 ld r5,_DSISR(r1)
4179 addi r3,r1,STACK_FRAME_OVERHEAD
4180 + bl .save_nvgprs
4181 bl .do_page_fault
4182 cmpdi r3,0
4183 beq+ 12f
4184 - bl .save_nvgprs
4185 mr r5,r3
4186 addi r3,r1,STACK_FRAME_OVERHEAD
4187 lwz r4,_DAR(r1)
4188 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4189 index 2e3200c..72095ce 100644
4190 --- a/arch/powerpc/kernel/module_32.c
4191 +++ b/arch/powerpc/kernel/module_32.c
4192 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4193 me->arch.core_plt_section = i;
4194 }
4195 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4196 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4197 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4198 return -ENOEXEC;
4199 }
4200
4201 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4202
4203 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4204 /* Init, or core PLT? */
4205 - if (location >= mod->module_core
4206 - && location < mod->module_core + mod->core_size)
4207 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4208 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4209 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4210 - else
4211 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4212 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4213 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4214 + else {
4215 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4216 + return ~0UL;
4217 + }
4218
4219 /* Find this entry, or if that fails, the next avail. entry */
4220 while (entry->jump[0]) {
4221 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4222 index 4937c96..70714b7 100644
4223 --- a/arch/powerpc/kernel/process.c
4224 +++ b/arch/powerpc/kernel/process.c
4225 @@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4226 * Lookup NIP late so we have the best change of getting the
4227 * above info out without failing
4228 */
4229 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4230 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4231 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4232 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4233 #endif
4234 show_stack(current, (unsigned long *) regs->gpr[1]);
4235 if (!user_mode(regs))
4236 @@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4237 newsp = stack[0];
4238 ip = stack[STACK_FRAME_LR_SAVE];
4239 if (!firstframe || ip != lr) {
4240 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4241 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4242 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4243 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4244 - printk(" (%pS)",
4245 + printk(" (%pA)",
4246 (void *)current->ret_stack[curr_frame].ret);
4247 curr_frame--;
4248 }
4249 @@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4250 struct pt_regs *regs = (struct pt_regs *)
4251 (sp + STACK_FRAME_OVERHEAD);
4252 lr = regs->link;
4253 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4254 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4255 regs->trap, (void *)regs->nip, (void *)lr);
4256 firstframe = 1;
4257 }
4258 @@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
4259 }
4260
4261 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4262 -
4263 -unsigned long arch_align_stack(unsigned long sp)
4264 -{
4265 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4266 - sp -= get_random_int() & ~PAGE_MASK;
4267 - return sp & ~0xf;
4268 -}
4269 -
4270 -static inline unsigned long brk_rnd(void)
4271 -{
4272 - unsigned long rnd = 0;
4273 -
4274 - /* 8MB for 32bit, 1GB for 64bit */
4275 - if (is_32bit_task())
4276 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4277 - else
4278 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4279 -
4280 - return rnd << PAGE_SHIFT;
4281 -}
4282 -
4283 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4284 -{
4285 - unsigned long base = mm->brk;
4286 - unsigned long ret;
4287 -
4288 -#ifdef CONFIG_PPC_STD_MMU_64
4289 - /*
4290 - * If we are using 1TB segments and we are allowed to randomise
4291 - * the heap, we can put it above 1TB so it is backed by a 1TB
4292 - * segment. Otherwise the heap will be in the bottom 1TB
4293 - * which always uses 256MB segments and this may result in a
4294 - * performance penalty.
4295 - */
4296 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4297 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4298 -#endif
4299 -
4300 - ret = PAGE_ALIGN(base + brk_rnd());
4301 -
4302 - if (ret < mm->brk)
4303 - return mm->brk;
4304 -
4305 - return ret;
4306 -}
4307 -
4308 -unsigned long randomize_et_dyn(unsigned long base)
4309 -{
4310 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4311 -
4312 - if (ret < base)
4313 - return base;
4314 -
4315 - return ret;
4316 -}
4317 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4318 index 8d8e028..c2aeb50 100644
4319 --- a/arch/powerpc/kernel/ptrace.c
4320 +++ b/arch/powerpc/kernel/ptrace.c
4321 @@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4322 return ret;
4323 }
4324
4325 +#ifdef CONFIG_GRKERNSEC_SETXID
4326 +extern void gr_delayed_cred_worker(void);
4327 +#endif
4328 +
4329 /*
4330 * We must return the syscall number to actually look up in the table.
4331 * This can be -1L to skip running any syscall at all.
4332 @@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4333
4334 secure_computing(regs->gpr[0]);
4335
4336 +#ifdef CONFIG_GRKERNSEC_SETXID
4337 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4338 + gr_delayed_cred_worker();
4339 +#endif
4340 +
4341 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4342 tracehook_report_syscall_entry(regs))
4343 /*
4344 @@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4345 {
4346 int step;
4347
4348 +#ifdef CONFIG_GRKERNSEC_SETXID
4349 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4350 + gr_delayed_cred_worker();
4351 +#endif
4352 +
4353 audit_syscall_exit(regs);
4354
4355 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4356 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4357 index 45eb998..0cb36bc 100644
4358 --- a/arch/powerpc/kernel/signal_32.c
4359 +++ b/arch/powerpc/kernel/signal_32.c
4360 @@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4361 /* Save user registers on the stack */
4362 frame = &rt_sf->uc.uc_mcontext;
4363 addr = frame;
4364 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4365 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4366 if (save_user_regs(regs, frame, 0, 1))
4367 goto badframe;
4368 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4369 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4370 index 2692efd..6673d2e 100644
4371 --- a/arch/powerpc/kernel/signal_64.c
4372 +++ b/arch/powerpc/kernel/signal_64.c
4373 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4374 current->thread.fpscr.val = 0;
4375
4376 /* Set up to return from userspace. */
4377 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4378 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4379 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4380 } else {
4381 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4382 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4383 index 1589723..cefe690 100644
4384 --- a/arch/powerpc/kernel/traps.c
4385 +++ b/arch/powerpc/kernel/traps.c
4386 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4387 return flags;
4388 }
4389
4390 +extern void gr_handle_kernel_exploit(void);
4391 +
4392 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4393 int signr)
4394 {
4395 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4396 panic("Fatal exception in interrupt");
4397 if (panic_on_oops)
4398 panic("Fatal exception");
4399 +
4400 + gr_handle_kernel_exploit();
4401 +
4402 do_exit(signr);
4403 }
4404
4405 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4406 index 9eb5b9b..e45498a 100644
4407 --- a/arch/powerpc/kernel/vdso.c
4408 +++ b/arch/powerpc/kernel/vdso.c
4409 @@ -34,6 +34,7 @@
4410 #include <asm/firmware.h>
4411 #include <asm/vdso.h>
4412 #include <asm/vdso_datapage.h>
4413 +#include <asm/mman.h>
4414
4415 #include "setup.h"
4416
4417 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4418 vdso_base = VDSO32_MBASE;
4419 #endif
4420
4421 - current->mm->context.vdso_base = 0;
4422 + current->mm->context.vdso_base = ~0UL;
4423
4424 /* vDSO has a problem and was disabled, just don't "enable" it for the
4425 * process
4426 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4427 vdso_base = get_unmapped_area(NULL, vdso_base,
4428 (vdso_pages << PAGE_SHIFT) +
4429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4430 - 0, 0);
4431 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4432 if (IS_ERR_VALUE(vdso_base)) {
4433 rc = vdso_base;
4434 goto fail_mmapsem;
4435 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4436 index 5eea6f3..5d10396 100644
4437 --- a/arch/powerpc/lib/usercopy_64.c
4438 +++ b/arch/powerpc/lib/usercopy_64.c
4439 @@ -9,22 +9,6 @@
4440 #include <linux/module.h>
4441 #include <asm/uaccess.h>
4442
4443 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4444 -{
4445 - if (likely(access_ok(VERIFY_READ, from, n)))
4446 - n = __copy_from_user(to, from, n);
4447 - else
4448 - memset(to, 0, n);
4449 - return n;
4450 -}
4451 -
4452 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4453 -{
4454 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4455 - n = __copy_to_user(to, from, n);
4456 - return n;
4457 -}
4458 -
4459 unsigned long copy_in_user(void __user *to, const void __user *from,
4460 unsigned long n)
4461 {
4462 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4463 return n;
4464 }
4465
4466 -EXPORT_SYMBOL(copy_from_user);
4467 -EXPORT_SYMBOL(copy_to_user);
4468 EXPORT_SYMBOL(copy_in_user);
4469
4470 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4471 index 08ffcf5..a0ab912 100644
4472 --- a/arch/powerpc/mm/fault.c
4473 +++ b/arch/powerpc/mm/fault.c
4474 @@ -32,6 +32,10 @@
4475 #include <linux/perf_event.h>
4476 #include <linux/magic.h>
4477 #include <linux/ratelimit.h>
4478 +#include <linux/slab.h>
4479 +#include <linux/pagemap.h>
4480 +#include <linux/compiler.h>
4481 +#include <linux/unistd.h>
4482
4483 #include <asm/firmware.h>
4484 #include <asm/page.h>
4485 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4486 }
4487 #endif
4488
4489 +#ifdef CONFIG_PAX_PAGEEXEC
4490 +/*
4491 + * PaX: decide what to do with offenders (regs->nip = fault address)
4492 + *
4493 + * returns 1 when task should be killed
4494 + */
4495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4496 +{
4497 + return 1;
4498 +}
4499 +
4500 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4501 +{
4502 + unsigned long i;
4503 +
4504 + printk(KERN_ERR "PAX: bytes at PC: ");
4505 + for (i = 0; i < 5; i++) {
4506 + unsigned int c;
4507 + if (get_user(c, (unsigned int __user *)pc+i))
4508 + printk(KERN_CONT "???????? ");
4509 + else
4510 + printk(KERN_CONT "%08x ", c);
4511 + }
4512 + printk("\n");
4513 +}
4514 +#endif
4515 +
4516 /*
4517 * Check whether the instruction at regs->nip is a store using
4518 * an update addressing form which will update r1.
4519 @@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4520 * indicate errors in DSISR but can validly be set in SRR1.
4521 */
4522 if (trap == 0x400)
4523 - error_code &= 0x48200000;
4524 + error_code &= 0x58200000;
4525 else
4526 is_write = error_code & DSISR_ISSTORE;
4527 #else
4528 @@ -366,7 +397,7 @@ good_area:
4529 * "undefined". Of those that can be set, this is the only
4530 * one which seems bad.
4531 */
4532 - if (error_code & 0x10000000)
4533 + if (error_code & DSISR_GUARDED)
4534 /* Guarded storage error. */
4535 goto bad_area;
4536 #endif /* CONFIG_8xx */
4537 @@ -381,7 +412,7 @@ good_area:
4538 * processors use the same I/D cache coherency mechanism
4539 * as embedded.
4540 */
4541 - if (error_code & DSISR_PROTFAULT)
4542 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4543 goto bad_area;
4544 #endif /* CONFIG_PPC_STD_MMU */
4545
4546 @@ -463,6 +494,23 @@ bad_area:
4547 bad_area_nosemaphore:
4548 /* User mode accesses cause a SIGSEGV */
4549 if (user_mode(regs)) {
4550 +
4551 +#ifdef CONFIG_PAX_PAGEEXEC
4552 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4553 +#ifdef CONFIG_PPC_STD_MMU
4554 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4555 +#else
4556 + if (is_exec && regs->nip == address) {
4557 +#endif
4558 + switch (pax_handle_fetch_fault(regs)) {
4559 + }
4560 +
4561 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4562 + do_group_exit(SIGKILL);
4563 + }
4564 + }
4565 +#endif
4566 +
4567 _exception(SIGSEGV, regs, code, address);
4568 return 0;
4569 }
4570 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4571 index 67a42ed..1c7210c 100644
4572 --- a/arch/powerpc/mm/mmap_64.c
4573 +++ b/arch/powerpc/mm/mmap_64.c
4574 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4575 */
4576 if (mmap_is_legacy()) {
4577 mm->mmap_base = TASK_UNMAPPED_BASE;
4578 +
4579 +#ifdef CONFIG_PAX_RANDMMAP
4580 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4581 + mm->mmap_base += mm->delta_mmap;
4582 +#endif
4583 +
4584 mm->get_unmapped_area = arch_get_unmapped_area;
4585 mm->unmap_area = arch_unmap_area;
4586 } else {
4587 mm->mmap_base = mmap_base();
4588 +
4589 +#ifdef CONFIG_PAX_RANDMMAP
4590 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4591 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4592 +#endif
4593 +
4594 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4595 mm->unmap_area = arch_unmap_area_topdown;
4596 }
4597 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4598 index 73709f7..6b90313 100644
4599 --- a/arch/powerpc/mm/slice.c
4600 +++ b/arch/powerpc/mm/slice.c
4601 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4602 if ((mm->task_size - len) < addr)
4603 return 0;
4604 vma = find_vma(mm, addr);
4605 - return (!vma || (addr + len) <= vma->vm_start);
4606 + return check_heap_stack_gap(vma, addr, len);
4607 }
4608
4609 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4610 @@ -256,7 +256,7 @@ full_search:
4611 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4612 continue;
4613 }
4614 - if (!vma || addr + len <= vma->vm_start) {
4615 + if (check_heap_stack_gap(vma, addr, len)) {
4616 /*
4617 * Remember the place where we stopped the search:
4618 */
4619 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4620 }
4621 }
4622
4623 - addr = mm->mmap_base;
4624 - while (addr > len) {
4625 + if (mm->mmap_base < len)
4626 + addr = -ENOMEM;
4627 + else
4628 + addr = mm->mmap_base - len;
4629 +
4630 + while (!IS_ERR_VALUE(addr)) {
4631 /* Go down by chunk size */
4632 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4633 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4634
4635 /* Check for hit with different page size */
4636 mask = slice_range_to_mask(addr, len);
4637 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4638 * return with success:
4639 */
4640 vma = find_vma(mm, addr);
4641 - if (!vma || (addr + len) <= vma->vm_start) {
4642 + if (check_heap_stack_gap(vma, addr, len)) {
4643 /* remember the address as a hint for next time */
4644 if (use_cache)
4645 mm->free_area_cache = addr;
4646 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4647 mm->cached_hole_size = vma->vm_start - addr;
4648
4649 /* try just below the current vma->vm_start */
4650 - addr = vma->vm_start;
4651 + addr = skip_heap_stack_gap(vma, len);
4652 }
4653
4654 /*
4655 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4656 if (fixed && addr > (mm->task_size - len))
4657 return -EINVAL;
4658
4659 +#ifdef CONFIG_PAX_RANDMMAP
4660 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4661 + addr = 0;
4662 +#endif
4663 +
4664 /* If hint, make sure it matches our alignment restrictions */
4665 if (!fixed && addr) {
4666 addr = _ALIGN_UP(addr, 1ul << pshift);
4667 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4668 index 748347b..81bc6c7 100644
4669 --- a/arch/s390/include/asm/atomic.h
4670 +++ b/arch/s390/include/asm/atomic.h
4671 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4672 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4673 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4674
4675 +#define atomic64_read_unchecked(v) atomic64_read(v)
4676 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4677 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4678 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4679 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4680 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4681 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4682 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4683 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4684 +
4685 #define smp_mb__before_atomic_dec() smp_mb()
4686 #define smp_mb__after_atomic_dec() smp_mb()
4687 #define smp_mb__before_atomic_inc() smp_mb()
4688 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4689 index 2a30d5a..5e5586f 100644
4690 --- a/arch/s390/include/asm/cache.h
4691 +++ b/arch/s390/include/asm/cache.h
4692 @@ -11,8 +11,10 @@
4693 #ifndef __ARCH_S390_CACHE_H
4694 #define __ARCH_S390_CACHE_H
4695
4696 -#define L1_CACHE_BYTES 256
4697 +#include <linux/const.h>
4698 +
4699 #define L1_CACHE_SHIFT 8
4700 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4701 #define NET_SKB_PAD 32
4702
4703 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4704 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4705 index c4ee39f..352881b 100644
4706 --- a/arch/s390/include/asm/elf.h
4707 +++ b/arch/s390/include/asm/elf.h
4708 @@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4709 the loader. We need to make sure that it is out of the way of the program
4710 that it will "exec", and that there is sufficient room for the brk. */
4711
4712 -extern unsigned long randomize_et_dyn(unsigned long base);
4713 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4714 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4715 +
4716 +#ifdef CONFIG_PAX_ASLR
4717 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4718 +
4719 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4720 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4721 +#endif
4722
4723 /* This yields a mask that user programs can use to figure out what
4724 instruction set this CPU supports. */
4725 @@ -210,7 +216,4 @@ struct linux_binprm;
4726 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4727 int arch_setup_additional_pages(struct linux_binprm *, int);
4728
4729 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4730 -#define arch_randomize_brk arch_randomize_brk
4731 -
4732 #endif
4733 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4734 index c4a93d6..4d2a9b4 100644
4735 --- a/arch/s390/include/asm/exec.h
4736 +++ b/arch/s390/include/asm/exec.h
4737 @@ -7,6 +7,6 @@
4738 #ifndef __ASM_EXEC_H
4739 #define __ASM_EXEC_H
4740
4741 -extern unsigned long arch_align_stack(unsigned long sp);
4742 +#define arch_align_stack(x) ((x) & ~0xfUL)
4743
4744 #endif /* __ASM_EXEC_H */
4745 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4746 index 8f2cada..43072c1 100644
4747 --- a/arch/s390/include/asm/uaccess.h
4748 +++ b/arch/s390/include/asm/uaccess.h
4749 @@ -236,6 +236,10 @@ static inline unsigned long __must_check
4750 copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752 might_fault();
4753 +
4754 + if ((long)n < 0)
4755 + return n;
4756 +
4757 if (access_ok(VERIFY_WRITE, to, n))
4758 n = __copy_to_user(to, from, n);
4759 return n;
4760 @@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4761 static inline unsigned long __must_check
4762 __copy_from_user(void *to, const void __user *from, unsigned long n)
4763 {
4764 + if ((long)n < 0)
4765 + return n;
4766 +
4767 if (__builtin_constant_p(n) && (n <= 256))
4768 return uaccess.copy_from_user_small(n, from, to);
4769 else
4770 @@ -292,10 +299,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
4771 static inline unsigned long __must_check
4772 copy_from_user(void *to, const void __user *from, unsigned long n)
4773 {
4774 - unsigned int sz = __compiletime_object_size(to);
4775 + size_t sz = __compiletime_object_size(to);
4776
4777 might_fault();
4778 - if (unlikely(sz != -1 && sz < n)) {
4779 +
4780 + if ((long)n < 0)
4781 + return n;
4782 +
4783 + if (unlikely(sz != (size_t)-1 && sz < n)) {
4784 copy_from_user_overflow();
4785 return n;
4786 }
4787 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4788 index dfcb343..eda788a 100644
4789 --- a/arch/s390/kernel/module.c
4790 +++ b/arch/s390/kernel/module.c
4791 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4792
4793 /* Increase core size by size of got & plt and set start
4794 offsets for got and plt. */
4795 - me->core_size = ALIGN(me->core_size, 4);
4796 - me->arch.got_offset = me->core_size;
4797 - me->core_size += me->arch.got_size;
4798 - me->arch.plt_offset = me->core_size;
4799 - me->core_size += me->arch.plt_size;
4800 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4801 + me->arch.got_offset = me->core_size_rw;
4802 + me->core_size_rw += me->arch.got_size;
4803 + me->arch.plt_offset = me->core_size_rx;
4804 + me->core_size_rx += me->arch.plt_size;
4805 return 0;
4806 }
4807
4808 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4809 if (info->got_initialized == 0) {
4810 Elf_Addr *gotent;
4811
4812 - gotent = me->module_core + me->arch.got_offset +
4813 + gotent = me->module_core_rw + me->arch.got_offset +
4814 info->got_offset;
4815 *gotent = val;
4816 info->got_initialized = 1;
4817 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4818 else if (r_type == R_390_GOTENT ||
4819 r_type == R_390_GOTPLTENT)
4820 *(unsigned int *) loc =
4821 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4822 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4823 else if (r_type == R_390_GOT64 ||
4824 r_type == R_390_GOTPLT64)
4825 *(unsigned long *) loc = val;
4826 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4827 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4828 if (info->plt_initialized == 0) {
4829 unsigned int *ip;
4830 - ip = me->module_core + me->arch.plt_offset +
4831 + ip = me->module_core_rx + me->arch.plt_offset +
4832 info->plt_offset;
4833 #ifndef CONFIG_64BIT
4834 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4835 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4836 val - loc + 0xffffUL < 0x1ffffeUL) ||
4837 (r_type == R_390_PLT32DBL &&
4838 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4839 - val = (Elf_Addr) me->module_core +
4840 + val = (Elf_Addr) me->module_core_rx +
4841 me->arch.plt_offset +
4842 info->plt_offset;
4843 val += rela->r_addend - loc;
4844 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4845 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4846 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4847 val = val + rela->r_addend -
4848 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4849 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4850 if (r_type == R_390_GOTOFF16)
4851 *(unsigned short *) loc = val;
4852 else if (r_type == R_390_GOTOFF32)
4853 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4854 break;
4855 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4856 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4857 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4858 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4859 rela->r_addend - loc;
4860 if (r_type == R_390_GOTPC)
4861 *(unsigned int *) loc = val;
4862 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4863 index 60055ce..ee4b252 100644
4864 --- a/arch/s390/kernel/process.c
4865 +++ b/arch/s390/kernel/process.c
4866 @@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4867 }
4868 return 0;
4869 }
4870 -
4871 -unsigned long arch_align_stack(unsigned long sp)
4872 -{
4873 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4874 - sp -= get_random_int() & ~PAGE_MASK;
4875 - return sp & ~0xf;
4876 -}
4877 -
4878 -static inline unsigned long brk_rnd(void)
4879 -{
4880 - /* 8MB for 32bit, 1GB for 64bit */
4881 - if (is_32bit_task())
4882 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4883 - else
4884 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4885 -}
4886 -
4887 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4888 -{
4889 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4890 -
4891 - if (ret < mm->brk)
4892 - return mm->brk;
4893 - return ret;
4894 -}
4895 -
4896 -unsigned long randomize_et_dyn(unsigned long base)
4897 -{
4898 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4899 -
4900 - if (!(current->flags & PF_RANDOMIZE))
4901 - return base;
4902 - if (ret < base)
4903 - return base;
4904 - return ret;
4905 -}
4906 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4907 index 2857c48..d047481 100644
4908 --- a/arch/s390/mm/mmap.c
4909 +++ b/arch/s390/mm/mmap.c
4910 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4911 */
4912 if (mmap_is_legacy()) {
4913 mm->mmap_base = TASK_UNMAPPED_BASE;
4914 +
4915 +#ifdef CONFIG_PAX_RANDMMAP
4916 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4917 + mm->mmap_base += mm->delta_mmap;
4918 +#endif
4919 +
4920 mm->get_unmapped_area = arch_get_unmapped_area;
4921 mm->unmap_area = arch_unmap_area;
4922 } else {
4923 mm->mmap_base = mmap_base();
4924 +
4925 +#ifdef CONFIG_PAX_RANDMMAP
4926 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4927 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4928 +#endif
4929 +
4930 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4931 mm->unmap_area = arch_unmap_area_topdown;
4932 }
4933 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4934 */
4935 if (mmap_is_legacy()) {
4936 mm->mmap_base = TASK_UNMAPPED_BASE;
4937 +
4938 +#ifdef CONFIG_PAX_RANDMMAP
4939 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4940 + mm->mmap_base += mm->delta_mmap;
4941 +#endif
4942 +
4943 mm->get_unmapped_area = s390_get_unmapped_area;
4944 mm->unmap_area = arch_unmap_area;
4945 } else {
4946 mm->mmap_base = mmap_base();
4947 +
4948 +#ifdef CONFIG_PAX_RANDMMAP
4949 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4950 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4951 +#endif
4952 +
4953 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4954 mm->unmap_area = arch_unmap_area_topdown;
4955 }
4956 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4957 index ae3d59f..f65f075 100644
4958 --- a/arch/score/include/asm/cache.h
4959 +++ b/arch/score/include/asm/cache.h
4960 @@ -1,7 +1,9 @@
4961 #ifndef _ASM_SCORE_CACHE_H
4962 #define _ASM_SCORE_CACHE_H
4963
4964 +#include <linux/const.h>
4965 +
4966 #define L1_CACHE_SHIFT 4
4967 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4968 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4969
4970 #endif /* _ASM_SCORE_CACHE_H */
4971 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4972 index f9f3cd5..58ff438 100644
4973 --- a/arch/score/include/asm/exec.h
4974 +++ b/arch/score/include/asm/exec.h
4975 @@ -1,6 +1,6 @@
4976 #ifndef _ASM_SCORE_EXEC_H
4977 #define _ASM_SCORE_EXEC_H
4978
4979 -extern unsigned long arch_align_stack(unsigned long sp);
4980 +#define arch_align_stack(x) (x)
4981
4982 #endif /* _ASM_SCORE_EXEC_H */
4983 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4984 index 2707023..1c2a3b7 100644
4985 --- a/arch/score/kernel/process.c
4986 +++ b/arch/score/kernel/process.c
4987 @@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
4988
4989 return task_pt_regs(task)->cp0_epc;
4990 }
4991 -
4992 -unsigned long arch_align_stack(unsigned long sp)
4993 -{
4994 - return sp;
4995 -}
4996 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4997 index ef9e555..331bd29 100644
4998 --- a/arch/sh/include/asm/cache.h
4999 +++ b/arch/sh/include/asm/cache.h
5000 @@ -9,10 +9,11 @@
5001 #define __ASM_SH_CACHE_H
5002 #ifdef __KERNEL__
5003
5004 +#include <linux/const.h>
5005 #include <linux/init.h>
5006 #include <cpu/cache.h>
5007
5008 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5009 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5010
5011 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5012
5013 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5014 index afeb710..d1d1289 100644
5015 --- a/arch/sh/mm/mmap.c
5016 +++ b/arch/sh/mm/mmap.c
5017 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5018 addr = PAGE_ALIGN(addr);
5019
5020 vma = find_vma(mm, addr);
5021 - if (TASK_SIZE - len >= addr &&
5022 - (!vma || addr + len <= vma->vm_start))
5023 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5024 return addr;
5025 }
5026
5027 @@ -106,7 +105,7 @@ full_search:
5028 }
5029 return -ENOMEM;
5030 }
5031 - if (likely(!vma || addr + len <= vma->vm_start)) {
5032 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5033 /*
5034 * Remember the place where we stopped the search:
5035 */
5036 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5037 addr = PAGE_ALIGN(addr);
5038
5039 vma = find_vma(mm, addr);
5040 - if (TASK_SIZE - len >= addr &&
5041 - (!vma || addr + len <= vma->vm_start))
5042 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5043 return addr;
5044 }
5045
5046 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5047 /* make sure it can fit in the remaining address space */
5048 if (likely(addr > len)) {
5049 vma = find_vma(mm, addr-len);
5050 - if (!vma || addr <= vma->vm_start) {
5051 + if (check_heap_stack_gap(vma, addr - len, len)) {
5052 /* remember the address as a hint for next time */
5053 return (mm->free_area_cache = addr-len);
5054 }
5055 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5056 if (unlikely(mm->mmap_base < len))
5057 goto bottomup;
5058
5059 - addr = mm->mmap_base-len;
5060 - if (do_colour_align)
5061 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5062 + addr = mm->mmap_base - len;
5063
5064 do {
5065 + if (do_colour_align)
5066 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5067 /*
5068 * Lookup failure means no vma is above this address,
5069 * else if new region fits below vma->vm_start,
5070 * return with success:
5071 */
5072 vma = find_vma(mm, addr);
5073 - if (likely(!vma || addr+len <= vma->vm_start)) {
5074 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5075 /* remember the address as a hint for next time */
5076 return (mm->free_area_cache = addr);
5077 }
5078 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5079 mm->cached_hole_size = vma->vm_start - addr;
5080
5081 /* try just below the current vma->vm_start */
5082 - addr = vma->vm_start-len;
5083 - if (do_colour_align)
5084 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5085 - } while (likely(len < vma->vm_start));
5086 + addr = skip_heap_stack_gap(vma, len);
5087 + } while (!IS_ERR_VALUE(addr));
5088
5089 bottomup:
5090 /*
5091 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5092 index eddcfb3..b117d90 100644
5093 --- a/arch/sparc/Makefile
5094 +++ b/arch/sparc/Makefile
5095 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5096 # Export what is needed by arch/sparc/boot/Makefile
5097 export VMLINUX_INIT VMLINUX_MAIN
5098 VMLINUX_INIT := $(head-y) $(init-y)
5099 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5100 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5101 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5102 VMLINUX_MAIN += $(drivers-y) $(net-y)
5103
5104 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5105 index ce35a1c..2e7b8f9 100644
5106 --- a/arch/sparc/include/asm/atomic_64.h
5107 +++ b/arch/sparc/include/asm/atomic_64.h
5108 @@ -14,18 +14,40 @@
5109 #define ATOMIC64_INIT(i) { (i) }
5110
5111 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5112 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5113 +{
5114 + return v->counter;
5115 +}
5116 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5117 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5118 +{
5119 + return v->counter;
5120 +}
5121
5122 #define atomic_set(v, i) (((v)->counter) = i)
5123 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5124 +{
5125 + v->counter = i;
5126 +}
5127 #define atomic64_set(v, i) (((v)->counter) = i)
5128 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5129 +{
5130 + v->counter = i;
5131 +}
5132
5133 extern void atomic_add(int, atomic_t *);
5134 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5135 extern void atomic64_add(long, atomic64_t *);
5136 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5137 extern void atomic_sub(int, atomic_t *);
5138 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5139 extern void atomic64_sub(long, atomic64_t *);
5140 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5141
5142 extern int atomic_add_ret(int, atomic_t *);
5143 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5144 extern long atomic64_add_ret(long, atomic64_t *);
5145 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5146 extern int atomic_sub_ret(int, atomic_t *);
5147 extern long atomic64_sub_ret(long, atomic64_t *);
5148
5149 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5150 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5151
5152 #define atomic_inc_return(v) atomic_add_ret(1, v)
5153 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5154 +{
5155 + return atomic_add_ret_unchecked(1, v);
5156 +}
5157 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5158 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5159 +{
5160 + return atomic64_add_ret_unchecked(1, v);
5161 +}
5162
5163 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5164 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5165
5166 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5167 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5168 +{
5169 + return atomic_add_ret_unchecked(i, v);
5170 +}
5171 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5172 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5173 +{
5174 + return atomic64_add_ret_unchecked(i, v);
5175 +}
5176
5177 /*
5178 * atomic_inc_and_test - increment and test
5179 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5180 * other cases.
5181 */
5182 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5183 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5184 +{
5185 + return atomic_inc_return_unchecked(v) == 0;
5186 +}
5187 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5188
5189 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5190 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5191 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5192
5193 #define atomic_inc(v) atomic_add(1, v)
5194 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5195 +{
5196 + atomic_add_unchecked(1, v);
5197 +}
5198 #define atomic64_inc(v) atomic64_add(1, v)
5199 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5200 +{
5201 + atomic64_add_unchecked(1, v);
5202 +}
5203
5204 #define atomic_dec(v) atomic_sub(1, v)
5205 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5206 +{
5207 + atomic_sub_unchecked(1, v);
5208 +}
5209 #define atomic64_dec(v) atomic64_sub(1, v)
5210 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5211 +{
5212 + atomic64_sub_unchecked(1, v);
5213 +}
5214
5215 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5216 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5217
5218 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5219 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5220 +{
5221 + return cmpxchg(&v->counter, old, new);
5222 +}
5223 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5224 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5225 +{
5226 + return xchg(&v->counter, new);
5227 +}
5228
5229 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5230 {
5231 - int c, old;
5232 + int c, old, new;
5233 c = atomic_read(v);
5234 for (;;) {
5235 - if (unlikely(c == (u)))
5236 + if (unlikely(c == u))
5237 break;
5238 - old = atomic_cmpxchg((v), c, c + (a));
5239 +
5240 + asm volatile("addcc %2, %0, %0\n"
5241 +
5242 +#ifdef CONFIG_PAX_REFCOUNT
5243 + "tvs %%icc, 6\n"
5244 +#endif
5245 +
5246 + : "=r" (new)
5247 + : "0" (c), "ir" (a)
5248 + : "cc");
5249 +
5250 + old = atomic_cmpxchg(v, c, new);
5251 if (likely(old == c))
5252 break;
5253 c = old;
5254 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5255 #define atomic64_cmpxchg(v, o, n) \
5256 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5257 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5258 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5259 +{
5260 + return xchg(&v->counter, new);
5261 +}
5262
5263 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5264 {
5265 - long c, old;
5266 + long c, old, new;
5267 c = atomic64_read(v);
5268 for (;;) {
5269 - if (unlikely(c == (u)))
5270 + if (unlikely(c == u))
5271 break;
5272 - old = atomic64_cmpxchg((v), c, c + (a));
5273 +
5274 + asm volatile("addcc %2, %0, %0\n"
5275 +
5276 +#ifdef CONFIG_PAX_REFCOUNT
5277 + "tvs %%xcc, 6\n"
5278 +#endif
5279 +
5280 + : "=r" (new)
5281 + : "0" (c), "ir" (a)
5282 + : "cc");
5283 +
5284 + old = atomic64_cmpxchg(v, c, new);
5285 if (likely(old == c))
5286 break;
5287 c = old;
5288 }
5289 - return c != (u);
5290 + return c != u;
5291 }
5292
5293 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5294 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5295 index 69358b5..9d0d492 100644
5296 --- a/arch/sparc/include/asm/cache.h
5297 +++ b/arch/sparc/include/asm/cache.h
5298 @@ -7,10 +7,12 @@
5299 #ifndef _SPARC_CACHE_H
5300 #define _SPARC_CACHE_H
5301
5302 +#include <linux/const.h>
5303 +
5304 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5305
5306 #define L1_CACHE_SHIFT 5
5307 -#define L1_CACHE_BYTES 32
5308 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5309
5310 #ifdef CONFIG_SPARC32
5311 #define SMP_CACHE_BYTES_SHIFT 5
5312 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5313 index 4269ca6..e3da77f 100644
5314 --- a/arch/sparc/include/asm/elf_32.h
5315 +++ b/arch/sparc/include/asm/elf_32.h
5316 @@ -114,6 +114,13 @@ typedef struct {
5317
5318 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5319
5320 +#ifdef CONFIG_PAX_ASLR
5321 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5322 +
5323 +#define PAX_DELTA_MMAP_LEN 16
5324 +#define PAX_DELTA_STACK_LEN 16
5325 +#endif
5326 +
5327 /* This yields a mask that user programs can use to figure out what
5328 instruction set this cpu supports. This can NOT be done in userspace
5329 on Sparc. */
5330 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5331 index 7df8b7f..4946269 100644
5332 --- a/arch/sparc/include/asm/elf_64.h
5333 +++ b/arch/sparc/include/asm/elf_64.h
5334 @@ -180,6 +180,13 @@ typedef struct {
5335 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5336 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5337
5338 +#ifdef CONFIG_PAX_ASLR
5339 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5340 +
5341 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5342 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5343 +#endif
5344 +
5345 extern unsigned long sparc64_elf_hwcap;
5346 #define ELF_HWCAP sparc64_elf_hwcap
5347
5348 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5349 index ca2b344..c6084f89 100644
5350 --- a/arch/sparc/include/asm/pgalloc_32.h
5351 +++ b/arch/sparc/include/asm/pgalloc_32.h
5352 @@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5353 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5354 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5355 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5356 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5357
5358 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5359 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5360 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5361 index 40b2d7a..22a665b 100644
5362 --- a/arch/sparc/include/asm/pgalloc_64.h
5363 +++ b/arch/sparc/include/asm/pgalloc_64.h
5364 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5365 }
5366
5367 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5368 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5369
5370 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5371 {
5372 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5373 index 3d71018..48a11c5 100644
5374 --- a/arch/sparc/include/asm/pgtable_32.h
5375 +++ b/arch/sparc/include/asm/pgtable_32.h
5376 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5377 BTFIXUPDEF_INT(page_none)
5378 BTFIXUPDEF_INT(page_copy)
5379 BTFIXUPDEF_INT(page_readonly)
5380 +
5381 +#ifdef CONFIG_PAX_PAGEEXEC
5382 +BTFIXUPDEF_INT(page_shared_noexec)
5383 +BTFIXUPDEF_INT(page_copy_noexec)
5384 +BTFIXUPDEF_INT(page_readonly_noexec)
5385 +#endif
5386 +
5387 BTFIXUPDEF_INT(page_kernel)
5388
5389 #define PMD_SHIFT SUN4C_PMD_SHIFT
5390 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5391 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5392 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5393
5394 +#ifdef CONFIG_PAX_PAGEEXEC
5395 +extern pgprot_t PAGE_SHARED_NOEXEC;
5396 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5397 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5398 +#else
5399 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5400 +# define PAGE_COPY_NOEXEC PAGE_COPY
5401 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5402 +#endif
5403 +
5404 extern unsigned long page_kernel;
5405
5406 #ifdef MODULE
5407 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5408 index f6ae2b2..b03ffc7 100644
5409 --- a/arch/sparc/include/asm/pgtsrmmu.h
5410 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5411 @@ -115,6 +115,13 @@
5412 SRMMU_EXEC | SRMMU_REF)
5413 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5414 SRMMU_EXEC | SRMMU_REF)
5415 +
5416 +#ifdef CONFIG_PAX_PAGEEXEC
5417 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5418 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5419 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5420 +#endif
5421 +
5422 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5423 SRMMU_DIRTY | SRMMU_REF)
5424
5425 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5426 index 9689176..63c18ea 100644
5427 --- a/arch/sparc/include/asm/spinlock_64.h
5428 +++ b/arch/sparc/include/asm/spinlock_64.h
5429 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5430
5431 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5432
5433 -static void inline arch_read_lock(arch_rwlock_t *lock)
5434 +static inline void arch_read_lock(arch_rwlock_t *lock)
5435 {
5436 unsigned long tmp1, tmp2;
5437
5438 __asm__ __volatile__ (
5439 "1: ldsw [%2], %0\n"
5440 " brlz,pn %0, 2f\n"
5441 -"4: add %0, 1, %1\n"
5442 +"4: addcc %0, 1, %1\n"
5443 +
5444 +#ifdef CONFIG_PAX_REFCOUNT
5445 +" tvs %%icc, 6\n"
5446 +#endif
5447 +
5448 " cas [%2], %0, %1\n"
5449 " cmp %0, %1\n"
5450 " bne,pn %%icc, 1b\n"
5451 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5452 " .previous"
5453 : "=&r" (tmp1), "=&r" (tmp2)
5454 : "r" (lock)
5455 - : "memory");
5456 + : "memory", "cc");
5457 }
5458
5459 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5460 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5461 {
5462 int tmp1, tmp2;
5463
5464 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5465 "1: ldsw [%2], %0\n"
5466 " brlz,a,pn %0, 2f\n"
5467 " mov 0, %0\n"
5468 -" add %0, 1, %1\n"
5469 +" addcc %0, 1, %1\n"
5470 +
5471 +#ifdef CONFIG_PAX_REFCOUNT
5472 +" tvs %%icc, 6\n"
5473 +#endif
5474 +
5475 " cas [%2], %0, %1\n"
5476 " cmp %0, %1\n"
5477 " bne,pn %%icc, 1b\n"
5478 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5479 return tmp1;
5480 }
5481
5482 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5483 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5484 {
5485 unsigned long tmp1, tmp2;
5486
5487 __asm__ __volatile__(
5488 "1: lduw [%2], %0\n"
5489 -" sub %0, 1, %1\n"
5490 +" subcc %0, 1, %1\n"
5491 +
5492 +#ifdef CONFIG_PAX_REFCOUNT
5493 +" tvs %%icc, 6\n"
5494 +#endif
5495 +
5496 " cas [%2], %0, %1\n"
5497 " cmp %0, %1\n"
5498 " bne,pn %%xcc, 1b\n"
5499 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5500 : "memory");
5501 }
5502
5503 -static void inline arch_write_lock(arch_rwlock_t *lock)
5504 +static inline void arch_write_lock(arch_rwlock_t *lock)
5505 {
5506 unsigned long mask, tmp1, tmp2;
5507
5508 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5509 : "memory");
5510 }
5511
5512 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5513 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5514 {
5515 __asm__ __volatile__(
5516 " stw %%g0, [%0]"
5517 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5518 : "memory");
5519 }
5520
5521 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5522 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5523 {
5524 unsigned long mask, tmp1, tmp2, result;
5525
5526 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5527 index c2a1080..21ed218 100644
5528 --- a/arch/sparc/include/asm/thread_info_32.h
5529 +++ b/arch/sparc/include/asm/thread_info_32.h
5530 @@ -50,6 +50,8 @@ struct thread_info {
5531 unsigned long w_saved;
5532
5533 struct restart_block restart_block;
5534 +
5535 + unsigned long lowest_stack;
5536 };
5537
5538 /*
5539 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5540 index 01d057f..13a7d2f 100644
5541 --- a/arch/sparc/include/asm/thread_info_64.h
5542 +++ b/arch/sparc/include/asm/thread_info_64.h
5543 @@ -63,6 +63,8 @@ struct thread_info {
5544 struct pt_regs *kern_una_regs;
5545 unsigned int kern_una_insn;
5546
5547 + unsigned long lowest_stack;
5548 +
5549 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5550 };
5551
5552 @@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5553 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5554 /* flag bit 6 is available */
5555 #define TIF_32BIT 7 /* 32-bit binary */
5556 -/* flag bit 8 is available */
5557 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5558 #define TIF_SECCOMP 9 /* secure computing */
5559 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5560 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5561 +
5562 /* NOTE: Thread flags >= 12 should be ones we have no interest
5563 * in using in assembly, else we can't use the mask as
5564 * an immediate value in instructions such as andcc.
5565 @@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5566 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5567 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5568 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5569 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5570
5571 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5572 _TIF_DO_NOTIFY_RESUME_MASK | \
5573 _TIF_NEED_RESCHED)
5574 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5575
5576 +#define _TIF_WORK_SYSCALL \
5577 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5578 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5579 +
5580 +
5581 /*
5582 * Thread-synchronous status.
5583 *
5584 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5585 index e88fbe5..96b0ce5 100644
5586 --- a/arch/sparc/include/asm/uaccess.h
5587 +++ b/arch/sparc/include/asm/uaccess.h
5588 @@ -1,5 +1,13 @@
5589 #ifndef ___ASM_SPARC_UACCESS_H
5590 #define ___ASM_SPARC_UACCESS_H
5591 +
5592 +#ifdef __KERNEL__
5593 +#ifndef __ASSEMBLY__
5594 +#include <linux/types.h>
5595 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5596 +#endif
5597 +#endif
5598 +
5599 #if defined(__sparc__) && defined(__arch64__)
5600 #include <asm/uaccess_64.h>
5601 #else
5602 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5603 index 8303ac4..07f333d 100644
5604 --- a/arch/sparc/include/asm/uaccess_32.h
5605 +++ b/arch/sparc/include/asm/uaccess_32.h
5606 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5607
5608 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5609 {
5610 - if (n && __access_ok((unsigned long) to, n))
5611 + if ((long)n < 0)
5612 + return n;
5613 +
5614 + if (n && __access_ok((unsigned long) to, n)) {
5615 + if (!__builtin_constant_p(n))
5616 + check_object_size(from, n, true);
5617 return __copy_user(to, (__force void __user *) from, n);
5618 - else
5619 + } else
5620 return n;
5621 }
5622
5623 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5624 {
5625 + if ((long)n < 0)
5626 + return n;
5627 +
5628 + if (!__builtin_constant_p(n))
5629 + check_object_size(from, n, true);
5630 +
5631 return __copy_user(to, (__force void __user *) from, n);
5632 }
5633
5634 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5635 {
5636 - if (n && __access_ok((unsigned long) from, n))
5637 + if ((long)n < 0)
5638 + return n;
5639 +
5640 + if (n && __access_ok((unsigned long) from, n)) {
5641 + if (!__builtin_constant_p(n))
5642 + check_object_size(to, n, false);
5643 return __copy_user((__force void __user *) to, from, n);
5644 - else
5645 + } else
5646 return n;
5647 }
5648
5649 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5650 {
5651 + if ((long)n < 0)
5652 + return n;
5653 +
5654 return __copy_user((__force void __user *) to, from, n);
5655 }
5656
5657 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5658 index a1091afb..380228e 100644
5659 --- a/arch/sparc/include/asm/uaccess_64.h
5660 +++ b/arch/sparc/include/asm/uaccess_64.h
5661 @@ -10,6 +10,7 @@
5662 #include <linux/compiler.h>
5663 #include <linux/string.h>
5664 #include <linux/thread_info.h>
5665 +#include <linux/kernel.h>
5666 #include <asm/asi.h>
5667 #include <asm/spitfire.h>
5668 #include <asm-generic/uaccess-unaligned.h>
5669 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5670 static inline unsigned long __must_check
5671 copy_from_user(void *to, const void __user *from, unsigned long size)
5672 {
5673 - unsigned long ret = ___copy_from_user(to, from, size);
5674 + unsigned long ret;
5675
5676 + if ((long)size < 0 || size > INT_MAX)
5677 + return size;
5678 +
5679 + if (!__builtin_constant_p(size))
5680 + check_object_size(to, size, false);
5681 +
5682 + ret = ___copy_from_user(to, from, size);
5683 if (unlikely(ret))
5684 ret = copy_from_user_fixup(to, from, size);
5685
5686 @@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5687 static inline unsigned long __must_check
5688 copy_to_user(void __user *to, const void *from, unsigned long size)
5689 {
5690 - unsigned long ret = ___copy_to_user(to, from, size);
5691 + unsigned long ret;
5692
5693 + if ((long)size < 0 || size > INT_MAX)
5694 + return size;
5695 +
5696 + if (!__builtin_constant_p(size))
5697 + check_object_size(from, size, true);
5698 +
5699 + ret = ___copy_to_user(to, from, size);
5700 if (unlikely(ret))
5701 ret = copy_to_user_fixup(to, from, size);
5702 return ret;
5703 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5704 index cb85458..e063f17 100644
5705 --- a/arch/sparc/kernel/Makefile
5706 +++ b/arch/sparc/kernel/Makefile
5707 @@ -3,7 +3,7 @@
5708 #
5709
5710 asflags-y := -ansi
5711 -ccflags-y := -Werror
5712 +#ccflags-y := -Werror
5713
5714 extra-y := head_$(BITS).o
5715 extra-y += init_task.o
5716 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5717 index efa0754..74b03fe 100644
5718 --- a/arch/sparc/kernel/process_32.c
5719 +++ b/arch/sparc/kernel/process_32.c
5720 @@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
5721 rw->ins[4], rw->ins[5],
5722 rw->ins[6],
5723 rw->ins[7]);
5724 - printk("%pS\n", (void *) rw->ins[7]);
5725 + printk("%pA\n", (void *) rw->ins[7]);
5726 rw = (struct reg_window32 *) rw->ins[6];
5727 }
5728 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5729 @@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
5730
5731 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5732 r->psr, r->pc, r->npc, r->y, print_tainted());
5733 - printk("PC: <%pS>\n", (void *) r->pc);
5734 + printk("PC: <%pA>\n", (void *) r->pc);
5735 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5736 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5737 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5738 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5739 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5740 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5741 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5742 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5743
5744 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5745 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5746 @@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5747 rw = (struct reg_window32 *) fp;
5748 pc = rw->ins[7];
5749 printk("[%08lx : ", pc);
5750 - printk("%pS ] ", (void *) pc);
5751 + printk("%pA ] ", (void *) pc);
5752 fp = rw->ins[6];
5753 } while (++count < 16);
5754 printk("\n");
5755 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5756 index aff0c72..9067b39 100644
5757 --- a/arch/sparc/kernel/process_64.c
5758 +++ b/arch/sparc/kernel/process_64.c
5759 @@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5760 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5761 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5762 if (regs->tstate & TSTATE_PRIV)
5763 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5764 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5765 }
5766
5767 void show_regs(struct pt_regs *regs)
5768 {
5769 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5770 regs->tpc, regs->tnpc, regs->y, print_tainted());
5771 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5772 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5773 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5774 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5775 regs->u_regs[3]);
5776 @@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5777 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5778 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5779 regs->u_regs[15]);
5780 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5781 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5782 show_regwindow(regs);
5783 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5784 }
5785 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5786 ((tp && tp->task) ? tp->task->pid : -1));
5787
5788 if (gp->tstate & TSTATE_PRIV) {
5789 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5790 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5791 (void *) gp->tpc,
5792 (void *) gp->o7,
5793 (void *) gp->i7,
5794 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5795 index 6f97c07..b1300ec 100644
5796 --- a/arch/sparc/kernel/ptrace_64.c
5797 +++ b/arch/sparc/kernel/ptrace_64.c
5798 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5799 return ret;
5800 }
5801
5802 +#ifdef CONFIG_GRKERNSEC_SETXID
5803 +extern void gr_delayed_cred_worker(void);
5804 +#endif
5805 +
5806 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5807 {
5808 int ret = 0;
5809 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5810 /* do the secure computing check first */
5811 secure_computing(regs->u_regs[UREG_G1]);
5812
5813 +#ifdef CONFIG_GRKERNSEC_SETXID
5814 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5815 + gr_delayed_cred_worker();
5816 +#endif
5817 +
5818 if (test_thread_flag(TIF_SYSCALL_TRACE))
5819 ret = tracehook_report_syscall_entry(regs);
5820
5821 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5822
5823 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5824 {
5825 +#ifdef CONFIG_GRKERNSEC_SETXID
5826 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5827 + gr_delayed_cred_worker();
5828 +#endif
5829 +
5830 audit_syscall_exit(regs);
5831
5832 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5833 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5834 index 42b282f..28ce9f2 100644
5835 --- a/arch/sparc/kernel/sys_sparc_32.c
5836 +++ b/arch/sparc/kernel/sys_sparc_32.c
5837 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5838 if (ARCH_SUN4C && len > 0x20000000)
5839 return -ENOMEM;
5840 if (!addr)
5841 - addr = TASK_UNMAPPED_BASE;
5842 + addr = current->mm->mmap_base;
5843
5844 if (flags & MAP_SHARED)
5845 addr = COLOUR_ALIGN(addr);
5846 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5847 }
5848 if (TASK_SIZE - PAGE_SIZE - len < addr)
5849 return -ENOMEM;
5850 - if (!vmm || addr + len <= vmm->vm_start)
5851 + if (check_heap_stack_gap(vmm, addr, len))
5852 return addr;
5853 addr = vmm->vm_end;
5854 if (flags & MAP_SHARED)
5855 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5856 index 3ee51f1..2ba4913 100644
5857 --- a/arch/sparc/kernel/sys_sparc_64.c
5858 +++ b/arch/sparc/kernel/sys_sparc_64.c
5859 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5860 /* We do not accept a shared mapping if it would violate
5861 * cache aliasing constraints.
5862 */
5863 - if ((flags & MAP_SHARED) &&
5864 + if ((filp || (flags & MAP_SHARED)) &&
5865 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5866 return -EINVAL;
5867 return addr;
5868 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5869 if (filp || (flags & MAP_SHARED))
5870 do_color_align = 1;
5871
5872 +#ifdef CONFIG_PAX_RANDMMAP
5873 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5874 +#endif
5875 +
5876 if (addr) {
5877 if (do_color_align)
5878 addr = COLOUR_ALIGN(addr, pgoff);
5879 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5880 addr = PAGE_ALIGN(addr);
5881
5882 vma = find_vma(mm, addr);
5883 - if (task_size - len >= addr &&
5884 - (!vma || addr + len <= vma->vm_start))
5885 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5886 return addr;
5887 }
5888
5889 if (len > mm->cached_hole_size) {
5890 - start_addr = addr = mm->free_area_cache;
5891 + start_addr = addr = mm->free_area_cache;
5892 } else {
5893 - start_addr = addr = TASK_UNMAPPED_BASE;
5894 + start_addr = addr = mm->mmap_base;
5895 mm->cached_hole_size = 0;
5896 }
5897
5898 @@ -174,14 +177,14 @@ full_search:
5899 vma = find_vma(mm, VA_EXCLUDE_END);
5900 }
5901 if (unlikely(task_size < addr)) {
5902 - if (start_addr != TASK_UNMAPPED_BASE) {
5903 - start_addr = addr = TASK_UNMAPPED_BASE;
5904 + if (start_addr != mm->mmap_base) {
5905 + start_addr = addr = mm->mmap_base;
5906 mm->cached_hole_size = 0;
5907 goto full_search;
5908 }
5909 return -ENOMEM;
5910 }
5911 - if (likely(!vma || addr + len <= vma->vm_start)) {
5912 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5913 /*
5914 * Remember the place where we stopped the search:
5915 */
5916 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5917 /* We do not accept a shared mapping if it would violate
5918 * cache aliasing constraints.
5919 */
5920 - if ((flags & MAP_SHARED) &&
5921 + if ((filp || (flags & MAP_SHARED)) &&
5922 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5923 return -EINVAL;
5924 return addr;
5925 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5926 addr = PAGE_ALIGN(addr);
5927
5928 vma = find_vma(mm, addr);
5929 - if (task_size - len >= addr &&
5930 - (!vma || addr + len <= vma->vm_start))
5931 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5932 return addr;
5933 }
5934
5935 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5936 /* make sure it can fit in the remaining address space */
5937 if (likely(addr > len)) {
5938 vma = find_vma(mm, addr-len);
5939 - if (!vma || addr <= vma->vm_start) {
5940 + if (check_heap_stack_gap(vma, addr - len, len)) {
5941 /* remember the address as a hint for next time */
5942 return (mm->free_area_cache = addr-len);
5943 }
5944 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5945 if (unlikely(mm->mmap_base < len))
5946 goto bottomup;
5947
5948 - addr = mm->mmap_base-len;
5949 - if (do_color_align)
5950 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5951 + addr = mm->mmap_base - len;
5952
5953 do {
5954 + if (do_color_align)
5955 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5956 /*
5957 * Lookup failure means no vma is above this address,
5958 * else if new region fits below vma->vm_start,
5959 * return with success:
5960 */
5961 vma = find_vma(mm, addr);
5962 - if (likely(!vma || addr+len <= vma->vm_start)) {
5963 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5964 /* remember the address as a hint for next time */
5965 return (mm->free_area_cache = addr);
5966 }
5967 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5968 mm->cached_hole_size = vma->vm_start - addr;
5969
5970 /* try just below the current vma->vm_start */
5971 - addr = vma->vm_start-len;
5972 - if (do_color_align)
5973 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5974 - } while (likely(len < vma->vm_start));
5975 + addr = skip_heap_stack_gap(vma, len);
5976 + } while (!IS_ERR_VALUE(addr));
5977
5978 bottomup:
5979 /*
5980 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5981 gap == RLIM_INFINITY ||
5982 sysctl_legacy_va_layout) {
5983 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5984 +
5985 +#ifdef CONFIG_PAX_RANDMMAP
5986 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5987 + mm->mmap_base += mm->delta_mmap;
5988 +#endif
5989 +
5990 mm->get_unmapped_area = arch_get_unmapped_area;
5991 mm->unmap_area = arch_unmap_area;
5992 } else {
5993 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5994 gap = (task_size / 6 * 5);
5995
5996 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5997 +
5998 +#ifdef CONFIG_PAX_RANDMMAP
5999 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6000 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6001 +#endif
6002 +
6003 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6004 mm->unmap_area = arch_unmap_area_topdown;
6005 }
6006 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6007 index 1d7e274..b39c527 100644
6008 --- a/arch/sparc/kernel/syscalls.S
6009 +++ b/arch/sparc/kernel/syscalls.S
6010 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6011 #endif
6012 .align 32
6013 1: ldx [%g6 + TI_FLAGS], %l5
6014 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6015 + andcc %l5, _TIF_WORK_SYSCALL, %g0
6016 be,pt %icc, rtrap
6017 nop
6018 call syscall_trace_leave
6019 @@ -179,7 +179,7 @@ linux_sparc_syscall32:
6020
6021 srl %i5, 0, %o5 ! IEU1
6022 srl %i2, 0, %o2 ! IEU0 Group
6023 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6024 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6025 bne,pn %icc, linux_syscall_trace32 ! CTI
6026 mov %i0, %l5 ! IEU1
6027 call %l7 ! CTI Group brk forced
6028 @@ -202,7 +202,7 @@ linux_sparc_syscall:
6029
6030 mov %i3, %o3 ! IEU1
6031 mov %i4, %o4 ! IEU0 Group
6032 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6033 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6034 bne,pn %icc, linux_syscall_trace ! CTI Group
6035 mov %i0, %l5 ! IEU0
6036 2: call %l7 ! CTI Group brk forced
6037 @@ -226,7 +226,7 @@ ret_sys_call:
6038
6039 cmp %o0, -ERESTART_RESTARTBLOCK
6040 bgeu,pn %xcc, 1f
6041 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6042 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6043 80:
6044 /* System call success, clear Carry condition code. */
6045 andn %g3, %g2, %g3
6046 @@ -241,7 +241,7 @@ ret_sys_call:
6047 /* System call failure, set Carry condition code.
6048 * Also, get abs(errno) to return to the process.
6049 */
6050 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6051 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6052 sub %g0, %o0, %o0
6053 or %g3, %g2, %g3
6054 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6055 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6056 index d2de213..6b22bc3 100644
6057 --- a/arch/sparc/kernel/traps_32.c
6058 +++ b/arch/sparc/kernel/traps_32.c
6059 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6060 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6061 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6062
6063 +extern void gr_handle_kernel_exploit(void);
6064 +
6065 void die_if_kernel(char *str, struct pt_regs *regs)
6066 {
6067 static int die_counter;
6068 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6069 count++ < 30 &&
6070 (((unsigned long) rw) >= PAGE_OFFSET) &&
6071 !(((unsigned long) rw) & 0x7)) {
6072 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
6073 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
6074 (void *) rw->ins[7]);
6075 rw = (struct reg_window32 *)rw->ins[6];
6076 }
6077 }
6078 printk("Instruction DUMP:");
6079 instruction_dump ((unsigned long *) regs->pc);
6080 - if(regs->psr & PSR_PS)
6081 + if(regs->psr & PSR_PS) {
6082 + gr_handle_kernel_exploit();
6083 do_exit(SIGKILL);
6084 + }
6085 do_exit(SIGSEGV);
6086 }
6087
6088 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6089 index c72fdf5..743a344 100644
6090 --- a/arch/sparc/kernel/traps_64.c
6091 +++ b/arch/sparc/kernel/traps_64.c
6092 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6093 i + 1,
6094 p->trapstack[i].tstate, p->trapstack[i].tpc,
6095 p->trapstack[i].tnpc, p->trapstack[i].tt);
6096 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6097 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6098 }
6099 }
6100
6101 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6102
6103 lvl -= 0x100;
6104 if (regs->tstate & TSTATE_PRIV) {
6105 +
6106 +#ifdef CONFIG_PAX_REFCOUNT
6107 + if (lvl == 6)
6108 + pax_report_refcount_overflow(regs);
6109 +#endif
6110 +
6111 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6112 die_if_kernel(buffer, regs);
6113 }
6114 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6115 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6116 {
6117 char buffer[32];
6118 -
6119 +
6120 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6121 0, lvl, SIGTRAP) == NOTIFY_STOP)
6122 return;
6123
6124 +#ifdef CONFIG_PAX_REFCOUNT
6125 + if (lvl == 6)
6126 + pax_report_refcount_overflow(regs);
6127 +#endif
6128 +
6129 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6130
6131 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6132 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6133 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6134 printk("%s" "ERROR(%d): ",
6135 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6136 - printk("TPC<%pS>\n", (void *) regs->tpc);
6137 + printk("TPC<%pA>\n", (void *) regs->tpc);
6138 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6139 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6140 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6141 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6142 smp_processor_id(),
6143 (type & 0x1) ? 'I' : 'D',
6144 regs->tpc);
6145 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6146 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6147 panic("Irrecoverable Cheetah+ parity error.");
6148 }
6149
6150 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6151 smp_processor_id(),
6152 (type & 0x1) ? 'I' : 'D',
6153 regs->tpc);
6154 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6155 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6156 }
6157
6158 struct sun4v_error_entry {
6159 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6160
6161 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6162 regs->tpc, tl);
6163 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6164 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6165 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6166 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6167 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6168 (void *) regs->u_regs[UREG_I7]);
6169 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6170 "pte[%lx] error[%lx]\n",
6171 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6172
6173 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6174 regs->tpc, tl);
6175 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6176 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6177 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6178 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6179 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6180 (void *) regs->u_regs[UREG_I7]);
6181 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6182 "pte[%lx] error[%lx]\n",
6183 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6184 fp = (unsigned long)sf->fp + STACK_BIAS;
6185 }
6186
6187 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6188 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6189 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6190 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6191 int index = tsk->curr_ret_stack;
6192 if (tsk->ret_stack && index >= graph) {
6193 pc = tsk->ret_stack[index - graph].ret;
6194 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6195 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6196 graph++;
6197 }
6198 }
6199 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6200 return (struct reg_window *) (fp + STACK_BIAS);
6201 }
6202
6203 +extern void gr_handle_kernel_exploit(void);
6204 +
6205 void die_if_kernel(char *str, struct pt_regs *regs)
6206 {
6207 static int die_counter;
6208 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6209 while (rw &&
6210 count++ < 30 &&
6211 kstack_valid(tp, (unsigned long) rw)) {
6212 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6213 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6214 (void *) rw->ins[7]);
6215
6216 rw = kernel_stack_up(rw);
6217 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6218 }
6219 user_instruction_dump ((unsigned int __user *) regs->tpc);
6220 }
6221 - if (regs->tstate & TSTATE_PRIV)
6222 + if (regs->tstate & TSTATE_PRIV) {
6223 + gr_handle_kernel_exploit();
6224 do_exit(SIGKILL);
6225 + }
6226 do_exit(SIGSEGV);
6227 }
6228 EXPORT_SYMBOL(die_if_kernel);
6229 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6230 index dae85bc..af1e19d 100644
6231 --- a/arch/sparc/kernel/unaligned_64.c
6232 +++ b/arch/sparc/kernel/unaligned_64.c
6233 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6234 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6235
6236 if (__ratelimit(&ratelimit)) {
6237 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6238 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6239 regs->tpc, (void *) regs->tpc);
6240 }
6241 }
6242 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6243 index a3fc437..fea9957 100644
6244 --- a/arch/sparc/lib/Makefile
6245 +++ b/arch/sparc/lib/Makefile
6246 @@ -2,7 +2,7 @@
6247 #
6248
6249 asflags-y := -ansi -DST_DIV0=0x02
6250 -ccflags-y := -Werror
6251 +#ccflags-y := -Werror
6252
6253 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6254 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6255 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6256 index 59186e0..f747d7a 100644
6257 --- a/arch/sparc/lib/atomic_64.S
6258 +++ b/arch/sparc/lib/atomic_64.S
6259 @@ -18,7 +18,12 @@
6260 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6261 BACKOFF_SETUP(%o2)
6262 1: lduw [%o1], %g1
6263 - add %g1, %o0, %g7
6264 + addcc %g1, %o0, %g7
6265 +
6266 +#ifdef CONFIG_PAX_REFCOUNT
6267 + tvs %icc, 6
6268 +#endif
6269 +
6270 cas [%o1], %g1, %g7
6271 cmp %g1, %g7
6272 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6273 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6274 2: BACKOFF_SPIN(%o2, %o3, 1b)
6275 .size atomic_add, .-atomic_add
6276
6277 + .globl atomic_add_unchecked
6278 + .type atomic_add_unchecked,#function
6279 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6280 + BACKOFF_SETUP(%o2)
6281 +1: lduw [%o1], %g1
6282 + add %g1, %o0, %g7
6283 + cas [%o1], %g1, %g7
6284 + cmp %g1, %g7
6285 + bne,pn %icc, 2f
6286 + nop
6287 + retl
6288 + nop
6289 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6290 + .size atomic_add_unchecked, .-atomic_add_unchecked
6291 +
6292 .globl atomic_sub
6293 .type atomic_sub,#function
6294 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6295 BACKOFF_SETUP(%o2)
6296 1: lduw [%o1], %g1
6297 - sub %g1, %o0, %g7
6298 + subcc %g1, %o0, %g7
6299 +
6300 +#ifdef CONFIG_PAX_REFCOUNT
6301 + tvs %icc, 6
6302 +#endif
6303 +
6304 cas [%o1], %g1, %g7
6305 cmp %g1, %g7
6306 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6307 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6308 2: BACKOFF_SPIN(%o2, %o3, 1b)
6309 .size atomic_sub, .-atomic_sub
6310
6311 + .globl atomic_sub_unchecked
6312 + .type atomic_sub_unchecked,#function
6313 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6314 + BACKOFF_SETUP(%o2)
6315 +1: lduw [%o1], %g1
6316 + sub %g1, %o0, %g7
6317 + cas [%o1], %g1, %g7
6318 + cmp %g1, %g7
6319 + bne,pn %icc, 2f
6320 + nop
6321 + retl
6322 + nop
6323 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6324 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
6325 +
6326 .globl atomic_add_ret
6327 .type atomic_add_ret,#function
6328 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6329 BACKOFF_SETUP(%o2)
6330 1: lduw [%o1], %g1
6331 - add %g1, %o0, %g7
6332 + addcc %g1, %o0, %g7
6333 +
6334 +#ifdef CONFIG_PAX_REFCOUNT
6335 + tvs %icc, 6
6336 +#endif
6337 +
6338 cas [%o1], %g1, %g7
6339 cmp %g1, %g7
6340 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6341 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6342 2: BACKOFF_SPIN(%o2, %o3, 1b)
6343 .size atomic_add_ret, .-atomic_add_ret
6344
6345 + .globl atomic_add_ret_unchecked
6346 + .type atomic_add_ret_unchecked,#function
6347 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6348 + BACKOFF_SETUP(%o2)
6349 +1: lduw [%o1], %g1
6350 + addcc %g1, %o0, %g7
6351 + cas [%o1], %g1, %g7
6352 + cmp %g1, %g7
6353 + bne,pn %icc, 2f
6354 + add %g7, %o0, %g7
6355 + sra %g7, 0, %o0
6356 + retl
6357 + nop
6358 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6359 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6360 +
6361 .globl atomic_sub_ret
6362 .type atomic_sub_ret,#function
6363 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6364 BACKOFF_SETUP(%o2)
6365 1: lduw [%o1], %g1
6366 - sub %g1, %o0, %g7
6367 + subcc %g1, %o0, %g7
6368 +
6369 +#ifdef CONFIG_PAX_REFCOUNT
6370 + tvs %icc, 6
6371 +#endif
6372 +
6373 cas [%o1], %g1, %g7
6374 cmp %g1, %g7
6375 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6376 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6377 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6378 BACKOFF_SETUP(%o2)
6379 1: ldx [%o1], %g1
6380 - add %g1, %o0, %g7
6381 + addcc %g1, %o0, %g7
6382 +
6383 +#ifdef CONFIG_PAX_REFCOUNT
6384 + tvs %xcc, 6
6385 +#endif
6386 +
6387 casx [%o1], %g1, %g7
6388 cmp %g1, %g7
6389 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6390 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6391 2: BACKOFF_SPIN(%o2, %o3, 1b)
6392 .size atomic64_add, .-atomic64_add
6393
6394 + .globl atomic64_add_unchecked
6395 + .type atomic64_add_unchecked,#function
6396 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6397 + BACKOFF_SETUP(%o2)
6398 +1: ldx [%o1], %g1
6399 + addcc %g1, %o0, %g7
6400 + casx [%o1], %g1, %g7
6401 + cmp %g1, %g7
6402 + bne,pn %xcc, 2f
6403 + nop
6404 + retl
6405 + nop
6406 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6407 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6408 +
6409 .globl atomic64_sub
6410 .type atomic64_sub,#function
6411 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6412 BACKOFF_SETUP(%o2)
6413 1: ldx [%o1], %g1
6414 - sub %g1, %o0, %g7
6415 + subcc %g1, %o0, %g7
6416 +
6417 +#ifdef CONFIG_PAX_REFCOUNT
6418 + tvs %xcc, 6
6419 +#endif
6420 +
6421 casx [%o1], %g1, %g7
6422 cmp %g1, %g7
6423 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6424 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6425 2: BACKOFF_SPIN(%o2, %o3, 1b)
6426 .size atomic64_sub, .-atomic64_sub
6427
6428 + .globl atomic64_sub_unchecked
6429 + .type atomic64_sub_unchecked,#function
6430 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6431 + BACKOFF_SETUP(%o2)
6432 +1: ldx [%o1], %g1
6433 + subcc %g1, %o0, %g7
6434 + casx [%o1], %g1, %g7
6435 + cmp %g1, %g7
6436 + bne,pn %xcc, 2f
6437 + nop
6438 + retl
6439 + nop
6440 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6441 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6442 +
6443 .globl atomic64_add_ret
6444 .type atomic64_add_ret,#function
6445 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6446 BACKOFF_SETUP(%o2)
6447 1: ldx [%o1], %g1
6448 - add %g1, %o0, %g7
6449 + addcc %g1, %o0, %g7
6450 +
6451 +#ifdef CONFIG_PAX_REFCOUNT
6452 + tvs %xcc, 6
6453 +#endif
6454 +
6455 casx [%o1], %g1, %g7
6456 cmp %g1, %g7
6457 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6458 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6459 2: BACKOFF_SPIN(%o2, %o3, 1b)
6460 .size atomic64_add_ret, .-atomic64_add_ret
6461
6462 + .globl atomic64_add_ret_unchecked
6463 + .type atomic64_add_ret_unchecked,#function
6464 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6465 + BACKOFF_SETUP(%o2)
6466 +1: ldx [%o1], %g1
6467 + addcc %g1, %o0, %g7
6468 + casx [%o1], %g1, %g7
6469 + cmp %g1, %g7
6470 + bne,pn %xcc, 2f
6471 + add %g7, %o0, %g7
6472 + mov %g7, %o0
6473 + retl
6474 + nop
6475 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6476 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6477 +
6478 .globl atomic64_sub_ret
6479 .type atomic64_sub_ret,#function
6480 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6481 BACKOFF_SETUP(%o2)
6482 1: ldx [%o1], %g1
6483 - sub %g1, %o0, %g7
6484 + subcc %g1, %o0, %g7
6485 +
6486 +#ifdef CONFIG_PAX_REFCOUNT
6487 + tvs %xcc, 6
6488 +#endif
6489 +
6490 casx [%o1], %g1, %g7
6491 cmp %g1, %g7
6492 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6493 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6494 index f73c224..662af10 100644
6495 --- a/arch/sparc/lib/ksyms.c
6496 +++ b/arch/sparc/lib/ksyms.c
6497 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6498
6499 /* Atomic counter implementation. */
6500 EXPORT_SYMBOL(atomic_add);
6501 +EXPORT_SYMBOL(atomic_add_unchecked);
6502 EXPORT_SYMBOL(atomic_add_ret);
6503 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6504 EXPORT_SYMBOL(atomic_sub);
6505 +EXPORT_SYMBOL(atomic_sub_unchecked);
6506 EXPORT_SYMBOL(atomic_sub_ret);
6507 EXPORT_SYMBOL(atomic64_add);
6508 +EXPORT_SYMBOL(atomic64_add_unchecked);
6509 EXPORT_SYMBOL(atomic64_add_ret);
6510 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6511 EXPORT_SYMBOL(atomic64_sub);
6512 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6513 EXPORT_SYMBOL(atomic64_sub_ret);
6514
6515 /* Atomic bit operations. */
6516 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6517 index 301421c..e2535d1 100644
6518 --- a/arch/sparc/mm/Makefile
6519 +++ b/arch/sparc/mm/Makefile
6520 @@ -2,7 +2,7 @@
6521 #
6522
6523 asflags-y := -ansi
6524 -ccflags-y := -Werror
6525 +#ccflags-y := -Werror
6526
6527 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6528 obj-y += fault_$(BITS).o
6529 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6530 index df3155a..eb708b8 100644
6531 --- a/arch/sparc/mm/fault_32.c
6532 +++ b/arch/sparc/mm/fault_32.c
6533 @@ -21,6 +21,9 @@
6534 #include <linux/perf_event.h>
6535 #include <linux/interrupt.h>
6536 #include <linux/kdebug.h>
6537 +#include <linux/slab.h>
6538 +#include <linux/pagemap.h>
6539 +#include <linux/compiler.h>
6540
6541 #include <asm/page.h>
6542 #include <asm/pgtable.h>
6543 @@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6544 return safe_compute_effective_address(regs, insn);
6545 }
6546
6547 +#ifdef CONFIG_PAX_PAGEEXEC
6548 +#ifdef CONFIG_PAX_DLRESOLVE
6549 +static void pax_emuplt_close(struct vm_area_struct *vma)
6550 +{
6551 + vma->vm_mm->call_dl_resolve = 0UL;
6552 +}
6553 +
6554 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6555 +{
6556 + unsigned int *kaddr;
6557 +
6558 + vmf->page = alloc_page(GFP_HIGHUSER);
6559 + if (!vmf->page)
6560 + return VM_FAULT_OOM;
6561 +
6562 + kaddr = kmap(vmf->page);
6563 + memset(kaddr, 0, PAGE_SIZE);
6564 + kaddr[0] = 0x9DE3BFA8U; /* save */
6565 + flush_dcache_page(vmf->page);
6566 + kunmap(vmf->page);
6567 + return VM_FAULT_MAJOR;
6568 +}
6569 +
6570 +static const struct vm_operations_struct pax_vm_ops = {
6571 + .close = pax_emuplt_close,
6572 + .fault = pax_emuplt_fault
6573 +};
6574 +
6575 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6576 +{
6577 + int ret;
6578 +
6579 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6580 + vma->vm_mm = current->mm;
6581 + vma->vm_start = addr;
6582 + vma->vm_end = addr + PAGE_SIZE;
6583 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6584 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6585 + vma->vm_ops = &pax_vm_ops;
6586 +
6587 + ret = insert_vm_struct(current->mm, vma);
6588 + if (ret)
6589 + return ret;
6590 +
6591 + ++current->mm->total_vm;
6592 + return 0;
6593 +}
6594 +#endif
6595 +
6596 +/*
6597 + * PaX: decide what to do with offenders (regs->pc = fault address)
6598 + *
6599 + * returns 1 when task should be killed
6600 + * 2 when patched PLT trampoline was detected
6601 + * 3 when unpatched PLT trampoline was detected
6602 + */
6603 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6604 +{
6605 +
6606 +#ifdef CONFIG_PAX_EMUPLT
6607 + int err;
6608 +
6609 + do { /* PaX: patched PLT emulation #1 */
6610 + unsigned int sethi1, sethi2, jmpl;
6611 +
6612 + err = get_user(sethi1, (unsigned int *)regs->pc);
6613 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6614 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6615 +
6616 + if (err)
6617 + break;
6618 +
6619 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6620 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6621 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6622 + {
6623 + unsigned int addr;
6624 +
6625 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6626 + addr = regs->u_regs[UREG_G1];
6627 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6628 + regs->pc = addr;
6629 + regs->npc = addr+4;
6630 + return 2;
6631 + }
6632 + } while (0);
6633 +
6634 + { /* PaX: patched PLT emulation #2 */
6635 + unsigned int ba;
6636 +
6637 + err = get_user(ba, (unsigned int *)regs->pc);
6638 +
6639 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6640 + unsigned int addr;
6641 +
6642 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6643 + regs->pc = addr;
6644 + regs->npc = addr+4;
6645 + return 2;
6646 + }
6647 + }
6648 +
6649 + do { /* PaX: patched PLT emulation #3 */
6650 + unsigned int sethi, jmpl, nop;
6651 +
6652 + err = get_user(sethi, (unsigned int *)regs->pc);
6653 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6654 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6655 +
6656 + if (err)
6657 + break;
6658 +
6659 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6660 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6661 + nop == 0x01000000U)
6662 + {
6663 + unsigned int addr;
6664 +
6665 + addr = (sethi & 0x003FFFFFU) << 10;
6666 + regs->u_regs[UREG_G1] = addr;
6667 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6668 + regs->pc = addr;
6669 + regs->npc = addr+4;
6670 + return 2;
6671 + }
6672 + } while (0);
6673 +
6674 + do { /* PaX: unpatched PLT emulation step 1 */
6675 + unsigned int sethi, ba, nop;
6676 +
6677 + err = get_user(sethi, (unsigned int *)regs->pc);
6678 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6679 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6680 +
6681 + if (err)
6682 + break;
6683 +
6684 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6685 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6686 + nop == 0x01000000U)
6687 + {
6688 + unsigned int addr, save, call;
6689 +
6690 + if ((ba & 0xFFC00000U) == 0x30800000U)
6691 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6692 + else
6693 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6694 +
6695 + err = get_user(save, (unsigned int *)addr);
6696 + err |= get_user(call, (unsigned int *)(addr+4));
6697 + err |= get_user(nop, (unsigned int *)(addr+8));
6698 + if (err)
6699 + break;
6700 +
6701 +#ifdef CONFIG_PAX_DLRESOLVE
6702 + if (save == 0x9DE3BFA8U &&
6703 + (call & 0xC0000000U) == 0x40000000U &&
6704 + nop == 0x01000000U)
6705 + {
6706 + struct vm_area_struct *vma;
6707 + unsigned long call_dl_resolve;
6708 +
6709 + down_read(&current->mm->mmap_sem);
6710 + call_dl_resolve = current->mm->call_dl_resolve;
6711 + up_read(&current->mm->mmap_sem);
6712 + if (likely(call_dl_resolve))
6713 + goto emulate;
6714 +
6715 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6716 +
6717 + down_write(&current->mm->mmap_sem);
6718 + if (current->mm->call_dl_resolve) {
6719 + call_dl_resolve = current->mm->call_dl_resolve;
6720 + up_write(&current->mm->mmap_sem);
6721 + if (vma)
6722 + kmem_cache_free(vm_area_cachep, vma);
6723 + goto emulate;
6724 + }
6725 +
6726 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6727 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6728 + up_write(&current->mm->mmap_sem);
6729 + if (vma)
6730 + kmem_cache_free(vm_area_cachep, vma);
6731 + return 1;
6732 + }
6733 +
6734 + if (pax_insert_vma(vma, call_dl_resolve)) {
6735 + up_write(&current->mm->mmap_sem);
6736 + kmem_cache_free(vm_area_cachep, vma);
6737 + return 1;
6738 + }
6739 +
6740 + current->mm->call_dl_resolve = call_dl_resolve;
6741 + up_write(&current->mm->mmap_sem);
6742 +
6743 +emulate:
6744 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6745 + regs->pc = call_dl_resolve;
6746 + regs->npc = addr+4;
6747 + return 3;
6748 + }
6749 +#endif
6750 +
6751 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6752 + if ((save & 0xFFC00000U) == 0x05000000U &&
6753 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6754 + nop == 0x01000000U)
6755 + {
6756 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6757 + regs->u_regs[UREG_G2] = addr + 4;
6758 + addr = (save & 0x003FFFFFU) << 10;
6759 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6760 + regs->pc = addr;
6761 + regs->npc = addr+4;
6762 + return 3;
6763 + }
6764 + }
6765 + } while (0);
6766 +
6767 + do { /* PaX: unpatched PLT emulation step 2 */
6768 + unsigned int save, call, nop;
6769 +
6770 + err = get_user(save, (unsigned int *)(regs->pc-4));
6771 + err |= get_user(call, (unsigned int *)regs->pc);
6772 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6773 + if (err)
6774 + break;
6775 +
6776 + if (save == 0x9DE3BFA8U &&
6777 + (call & 0xC0000000U) == 0x40000000U &&
6778 + nop == 0x01000000U)
6779 + {
6780 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6781 +
6782 + regs->u_regs[UREG_RETPC] = regs->pc;
6783 + regs->pc = dl_resolve;
6784 + regs->npc = dl_resolve+4;
6785 + return 3;
6786 + }
6787 + } while (0);
6788 +#endif
6789 +
6790 + return 1;
6791 +}
6792 +
6793 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6794 +{
6795 + unsigned long i;
6796 +
6797 + printk(KERN_ERR "PAX: bytes at PC: ");
6798 + for (i = 0; i < 8; i++) {
6799 + unsigned int c;
6800 + if (get_user(c, (unsigned int *)pc+i))
6801 + printk(KERN_CONT "???????? ");
6802 + else
6803 + printk(KERN_CONT "%08x ", c);
6804 + }
6805 + printk("\n");
6806 +}
6807 +#endif
6808 +
6809 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6810 int text_fault)
6811 {
6812 @@ -282,6 +547,24 @@ good_area:
6813 if(!(vma->vm_flags & VM_WRITE))
6814 goto bad_area;
6815 } else {
6816 +
6817 +#ifdef CONFIG_PAX_PAGEEXEC
6818 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6819 + up_read(&mm->mmap_sem);
6820 + switch (pax_handle_fetch_fault(regs)) {
6821 +
6822 +#ifdef CONFIG_PAX_EMUPLT
6823 + case 2:
6824 + case 3:
6825 + return;
6826 +#endif
6827 +
6828 + }
6829 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6830 + do_group_exit(SIGKILL);
6831 + }
6832 +#endif
6833 +
6834 /* Allow reads even for write-only mappings */
6835 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6836 goto bad_area;
6837 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6838 index 1fe0429..aee2e87 100644
6839 --- a/arch/sparc/mm/fault_64.c
6840 +++ b/arch/sparc/mm/fault_64.c
6841 @@ -21,6 +21,9 @@
6842 #include <linux/kprobes.h>
6843 #include <linux/kdebug.h>
6844 #include <linux/percpu.h>
6845 +#include <linux/slab.h>
6846 +#include <linux/pagemap.h>
6847 +#include <linux/compiler.h>
6848
6849 #include <asm/page.h>
6850 #include <asm/pgtable.h>
6851 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6852 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6853 regs->tpc);
6854 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6855 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6856 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6857 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6858 dump_stack();
6859 unhandled_fault(regs->tpc, current, regs);
6860 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6861 show_regs(regs);
6862 }
6863
6864 +#ifdef CONFIG_PAX_PAGEEXEC
6865 +#ifdef CONFIG_PAX_DLRESOLVE
6866 +static void pax_emuplt_close(struct vm_area_struct *vma)
6867 +{
6868 + vma->vm_mm->call_dl_resolve = 0UL;
6869 +}
6870 +
6871 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6872 +{
6873 + unsigned int *kaddr;
6874 +
6875 + vmf->page = alloc_page(GFP_HIGHUSER);
6876 + if (!vmf->page)
6877 + return VM_FAULT_OOM;
6878 +
6879 + kaddr = kmap(vmf->page);
6880 + memset(kaddr, 0, PAGE_SIZE);
6881 + kaddr[0] = 0x9DE3BFA8U; /* save */
6882 + flush_dcache_page(vmf->page);
6883 + kunmap(vmf->page);
6884 + return VM_FAULT_MAJOR;
6885 +}
6886 +
6887 +static const struct vm_operations_struct pax_vm_ops = {
6888 + .close = pax_emuplt_close,
6889 + .fault = pax_emuplt_fault
6890 +};
6891 +
6892 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6893 +{
6894 + int ret;
6895 +
6896 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6897 + vma->vm_mm = current->mm;
6898 + vma->vm_start = addr;
6899 + vma->vm_end = addr + PAGE_SIZE;
6900 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6901 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6902 + vma->vm_ops = &pax_vm_ops;
6903 +
6904 + ret = insert_vm_struct(current->mm, vma);
6905 + if (ret)
6906 + return ret;
6907 +
6908 + ++current->mm->total_vm;
6909 + return 0;
6910 +}
6911 +#endif
6912 +
6913 +/*
6914 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6915 + *
6916 + * returns 1 when task should be killed
6917 + * 2 when patched PLT trampoline was detected
6918 + * 3 when unpatched PLT trampoline was detected
6919 + */
6920 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6921 +{
6922 +
6923 +#ifdef CONFIG_PAX_EMUPLT
6924 + int err;
6925 +
6926 + do { /* PaX: patched PLT emulation #1 */
6927 + unsigned int sethi1, sethi2, jmpl;
6928 +
6929 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6930 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6931 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6932 +
6933 + if (err)
6934 + break;
6935 +
6936 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6937 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6938 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6939 + {
6940 + unsigned long addr;
6941 +
6942 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6943 + addr = regs->u_regs[UREG_G1];
6944 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6945 +
6946 + if (test_thread_flag(TIF_32BIT))
6947 + addr &= 0xFFFFFFFFUL;
6948 +
6949 + regs->tpc = addr;
6950 + regs->tnpc = addr+4;
6951 + return 2;
6952 + }
6953 + } while (0);
6954 +
6955 + { /* PaX: patched PLT emulation #2 */
6956 + unsigned int ba;
6957 +
6958 + err = get_user(ba, (unsigned int *)regs->tpc);
6959 +
6960 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6961 + unsigned long addr;
6962 +
6963 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6964 +
6965 + if (test_thread_flag(TIF_32BIT))
6966 + addr &= 0xFFFFFFFFUL;
6967 +
6968 + regs->tpc = addr;
6969 + regs->tnpc = addr+4;
6970 + return 2;
6971 + }
6972 + }
6973 +
6974 + do { /* PaX: patched PLT emulation #3 */
6975 + unsigned int sethi, jmpl, nop;
6976 +
6977 + err = get_user(sethi, (unsigned int *)regs->tpc);
6978 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6979 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6980 +
6981 + if (err)
6982 + break;
6983 +
6984 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6985 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6986 + nop == 0x01000000U)
6987 + {
6988 + unsigned long addr;
6989 +
6990 + addr = (sethi & 0x003FFFFFU) << 10;
6991 + regs->u_regs[UREG_G1] = addr;
6992 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6993 +
6994 + if (test_thread_flag(TIF_32BIT))
6995 + addr &= 0xFFFFFFFFUL;
6996 +
6997 + regs->tpc = addr;
6998 + regs->tnpc = addr+4;
6999 + return 2;
7000 + }
7001 + } while (0);
7002 +
7003 + do { /* PaX: patched PLT emulation #4 */
7004 + unsigned int sethi, mov1, call, mov2;
7005 +
7006 + err = get_user(sethi, (unsigned int *)regs->tpc);
7007 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7008 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
7009 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7010 +
7011 + if (err)
7012 + break;
7013 +
7014 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7015 + mov1 == 0x8210000FU &&
7016 + (call & 0xC0000000U) == 0x40000000U &&
7017 + mov2 == 0x9E100001U)
7018 + {
7019 + unsigned long addr;
7020 +
7021 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7022 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7023 +
7024 + if (test_thread_flag(TIF_32BIT))
7025 + addr &= 0xFFFFFFFFUL;
7026 +
7027 + regs->tpc = addr;
7028 + regs->tnpc = addr+4;
7029 + return 2;
7030 + }
7031 + } while (0);
7032 +
7033 + do { /* PaX: patched PLT emulation #5 */
7034 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7035 +
7036 + err = get_user(sethi, (unsigned int *)regs->tpc);
7037 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7038 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7039 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7040 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7041 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7042 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7043 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7044 +
7045 + if (err)
7046 + break;
7047 +
7048 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7049 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7050 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7051 + (or1 & 0xFFFFE000U) == 0x82106000U &&
7052 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7053 + sllx == 0x83287020U &&
7054 + jmpl == 0x81C04005U &&
7055 + nop == 0x01000000U)
7056 + {
7057 + unsigned long addr;
7058 +
7059 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7060 + regs->u_regs[UREG_G1] <<= 32;
7061 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7062 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7063 + regs->tpc = addr;
7064 + regs->tnpc = addr+4;
7065 + return 2;
7066 + }
7067 + } while (0);
7068 +
7069 + do { /* PaX: patched PLT emulation #6 */
7070 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7071 +
7072 + err = get_user(sethi, (unsigned int *)regs->tpc);
7073 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7074 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7075 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7076 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
7077 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7078 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7079 +
7080 + if (err)
7081 + break;
7082 +
7083 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7084 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7085 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7086 + sllx == 0x83287020U &&
7087 + (or & 0xFFFFE000U) == 0x8A116000U &&
7088 + jmpl == 0x81C04005U &&
7089 + nop == 0x01000000U)
7090 + {
7091 + unsigned long addr;
7092 +
7093 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7094 + regs->u_regs[UREG_G1] <<= 32;
7095 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7096 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7097 + regs->tpc = addr;
7098 + regs->tnpc = addr+4;
7099 + return 2;
7100 + }
7101 + } while (0);
7102 +
7103 + do { /* PaX: unpatched PLT emulation step 1 */
7104 + unsigned int sethi, ba, nop;
7105 +
7106 + err = get_user(sethi, (unsigned int *)regs->tpc);
7107 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7108 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7109 +
7110 + if (err)
7111 + break;
7112 +
7113 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7114 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7115 + nop == 0x01000000U)
7116 + {
7117 + unsigned long addr;
7118 + unsigned int save, call;
7119 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7120 +
7121 + if ((ba & 0xFFC00000U) == 0x30800000U)
7122 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7123 + else
7124 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7125 +
7126 + if (test_thread_flag(TIF_32BIT))
7127 + addr &= 0xFFFFFFFFUL;
7128 +
7129 + err = get_user(save, (unsigned int *)addr);
7130 + err |= get_user(call, (unsigned int *)(addr+4));
7131 + err |= get_user(nop, (unsigned int *)(addr+8));
7132 + if (err)
7133 + break;
7134 +
7135 +#ifdef CONFIG_PAX_DLRESOLVE
7136 + if (save == 0x9DE3BFA8U &&
7137 + (call & 0xC0000000U) == 0x40000000U &&
7138 + nop == 0x01000000U)
7139 + {
7140 + struct vm_area_struct *vma;
7141 + unsigned long call_dl_resolve;
7142 +
7143 + down_read(&current->mm->mmap_sem);
7144 + call_dl_resolve = current->mm->call_dl_resolve;
7145 + up_read(&current->mm->mmap_sem);
7146 + if (likely(call_dl_resolve))
7147 + goto emulate;
7148 +
7149 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7150 +
7151 + down_write(&current->mm->mmap_sem);
7152 + if (current->mm->call_dl_resolve) {
7153 + call_dl_resolve = current->mm->call_dl_resolve;
7154 + up_write(&current->mm->mmap_sem);
7155 + if (vma)
7156 + kmem_cache_free(vm_area_cachep, vma);
7157 + goto emulate;
7158 + }
7159 +
7160 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7161 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7162 + up_write(&current->mm->mmap_sem);
7163 + if (vma)
7164 + kmem_cache_free(vm_area_cachep, vma);
7165 + return 1;
7166 + }
7167 +
7168 + if (pax_insert_vma(vma, call_dl_resolve)) {
7169 + up_write(&current->mm->mmap_sem);
7170 + kmem_cache_free(vm_area_cachep, vma);
7171 + return 1;
7172 + }
7173 +
7174 + current->mm->call_dl_resolve = call_dl_resolve;
7175 + up_write(&current->mm->mmap_sem);
7176 +
7177 +emulate:
7178 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7179 + regs->tpc = call_dl_resolve;
7180 + regs->tnpc = addr+4;
7181 + return 3;
7182 + }
7183 +#endif
7184 +
7185 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7186 + if ((save & 0xFFC00000U) == 0x05000000U &&
7187 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7188 + nop == 0x01000000U)
7189 + {
7190 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7191 + regs->u_regs[UREG_G2] = addr + 4;
7192 + addr = (save & 0x003FFFFFU) << 10;
7193 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7194 +
7195 + if (test_thread_flag(TIF_32BIT))
7196 + addr &= 0xFFFFFFFFUL;
7197 +
7198 + regs->tpc = addr;
7199 + regs->tnpc = addr+4;
7200 + return 3;
7201 + }
7202 +
7203 + /* PaX: 64-bit PLT stub */
7204 + err = get_user(sethi1, (unsigned int *)addr);
7205 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7206 + err |= get_user(or1, (unsigned int *)(addr+8));
7207 + err |= get_user(or2, (unsigned int *)(addr+12));
7208 + err |= get_user(sllx, (unsigned int *)(addr+16));
7209 + err |= get_user(add, (unsigned int *)(addr+20));
7210 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7211 + err |= get_user(nop, (unsigned int *)(addr+28));
7212 + if (err)
7213 + break;
7214 +
7215 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7216 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7217 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7218 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7219 + sllx == 0x89293020U &&
7220 + add == 0x8A010005U &&
7221 + jmpl == 0x89C14000U &&
7222 + nop == 0x01000000U)
7223 + {
7224 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7225 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7226 + regs->u_regs[UREG_G4] <<= 32;
7227 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7228 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7229 + regs->u_regs[UREG_G4] = addr + 24;
7230 + addr = regs->u_regs[UREG_G5];
7231 + regs->tpc = addr;
7232 + regs->tnpc = addr+4;
7233 + return 3;
7234 + }
7235 + }
7236 + } while (0);
7237 +
7238 +#ifdef CONFIG_PAX_DLRESOLVE
7239 + do { /* PaX: unpatched PLT emulation step 2 */
7240 + unsigned int save, call, nop;
7241 +
7242 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7243 + err |= get_user(call, (unsigned int *)regs->tpc);
7244 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7245 + if (err)
7246 + break;
7247 +
7248 + if (save == 0x9DE3BFA8U &&
7249 + (call & 0xC0000000U) == 0x40000000U &&
7250 + nop == 0x01000000U)
7251 + {
7252 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7253 +
7254 + if (test_thread_flag(TIF_32BIT))
7255 + dl_resolve &= 0xFFFFFFFFUL;
7256 +
7257 + regs->u_regs[UREG_RETPC] = regs->tpc;
7258 + regs->tpc = dl_resolve;
7259 + regs->tnpc = dl_resolve+4;
7260 + return 3;
7261 + }
7262 + } while (0);
7263 +#endif
7264 +
7265 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7266 + unsigned int sethi, ba, nop;
7267 +
7268 + err = get_user(sethi, (unsigned int *)regs->tpc);
7269 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7270 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7271 +
7272 + if (err)
7273 + break;
7274 +
7275 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7276 + (ba & 0xFFF00000U) == 0x30600000U &&
7277 + nop == 0x01000000U)
7278 + {
7279 + unsigned long addr;
7280 +
7281 + addr = (sethi & 0x003FFFFFU) << 10;
7282 + regs->u_regs[UREG_G1] = addr;
7283 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7284 +
7285 + if (test_thread_flag(TIF_32BIT))
7286 + addr &= 0xFFFFFFFFUL;
7287 +
7288 + regs->tpc = addr;
7289 + regs->tnpc = addr+4;
7290 + return 2;
7291 + }
7292 + } while (0);
7293 +
7294 +#endif
7295 +
7296 + return 1;
7297 +}
7298 +
7299 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7300 +{
7301 + unsigned long i;
7302 +
7303 + printk(KERN_ERR "PAX: bytes at PC: ");
7304 + for (i = 0; i < 8; i++) {
7305 + unsigned int c;
7306 + if (get_user(c, (unsigned int *)pc+i))
7307 + printk(KERN_CONT "???????? ");
7308 + else
7309 + printk(KERN_CONT "%08x ", c);
7310 + }
7311 + printk("\n");
7312 +}
7313 +#endif
7314 +
7315 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7316 {
7317 struct mm_struct *mm = current->mm;
7318 @@ -343,6 +797,29 @@ retry:
7319 if (!vma)
7320 goto bad_area;
7321
7322 +#ifdef CONFIG_PAX_PAGEEXEC
7323 + /* PaX: detect ITLB misses on non-exec pages */
7324 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7325 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7326 + {
7327 + if (address != regs->tpc)
7328 + goto good_area;
7329 +
7330 + up_read(&mm->mmap_sem);
7331 + switch (pax_handle_fetch_fault(regs)) {
7332 +
7333 +#ifdef CONFIG_PAX_EMUPLT
7334 + case 2:
7335 + case 3:
7336 + return;
7337 +#endif
7338 +
7339 + }
7340 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7341 + do_group_exit(SIGKILL);
7342 + }
7343 +#endif
7344 +
7345 /* Pure DTLB misses do not tell us whether the fault causing
7346 * load/store/atomic was a write or not, it only says that there
7347 * was no match. So in such a case we (carefully) read the
7348 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7349 index 07e1453..0a7d9e9 100644
7350 --- a/arch/sparc/mm/hugetlbpage.c
7351 +++ b/arch/sparc/mm/hugetlbpage.c
7352 @@ -67,7 +67,7 @@ full_search:
7353 }
7354 return -ENOMEM;
7355 }
7356 - if (likely(!vma || addr + len <= vma->vm_start)) {
7357 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7358 /*
7359 * Remember the place where we stopped the search:
7360 */
7361 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7362 /* make sure it can fit in the remaining address space */
7363 if (likely(addr > len)) {
7364 vma = find_vma(mm, addr-len);
7365 - if (!vma || addr <= vma->vm_start) {
7366 + if (check_heap_stack_gap(vma, addr - len, len)) {
7367 /* remember the address as a hint for next time */
7368 return (mm->free_area_cache = addr-len);
7369 }
7370 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7371 if (unlikely(mm->mmap_base < len))
7372 goto bottomup;
7373
7374 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7375 + addr = mm->mmap_base - len;
7376
7377 do {
7378 + addr &= HPAGE_MASK;
7379 /*
7380 * Lookup failure means no vma is above this address,
7381 * else if new region fits below vma->vm_start,
7382 * return with success:
7383 */
7384 vma = find_vma(mm, addr);
7385 - if (likely(!vma || addr+len <= vma->vm_start)) {
7386 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7387 /* remember the address as a hint for next time */
7388 return (mm->free_area_cache = addr);
7389 }
7390 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7391 mm->cached_hole_size = vma->vm_start - addr;
7392
7393 /* try just below the current vma->vm_start */
7394 - addr = (vma->vm_start-len) & HPAGE_MASK;
7395 - } while (likely(len < vma->vm_start));
7396 + addr = skip_heap_stack_gap(vma, len);
7397 + } while (!IS_ERR_VALUE(addr));
7398
7399 bottomup:
7400 /*
7401 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7402 if (addr) {
7403 addr = ALIGN(addr, HPAGE_SIZE);
7404 vma = find_vma(mm, addr);
7405 - if (task_size - len >= addr &&
7406 - (!vma || addr + len <= vma->vm_start))
7407 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7408 return addr;
7409 }
7410 if (mm->get_unmapped_area == arch_get_unmapped_area)
7411 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7412 index c5f9021..7591bae 100644
7413 --- a/arch/sparc/mm/init_32.c
7414 +++ b/arch/sparc/mm/init_32.c
7415 @@ -315,6 +315,9 @@ extern void device_scan(void);
7416 pgprot_t PAGE_SHARED __read_mostly;
7417 EXPORT_SYMBOL(PAGE_SHARED);
7418
7419 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7420 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7421 +
7422 void __init paging_init(void)
7423 {
7424 switch(sparc_cpu_model) {
7425 @@ -343,17 +346,17 @@ void __init paging_init(void)
7426
7427 /* Initialize the protection map with non-constant, MMU dependent values. */
7428 protection_map[0] = PAGE_NONE;
7429 - protection_map[1] = PAGE_READONLY;
7430 - protection_map[2] = PAGE_COPY;
7431 - protection_map[3] = PAGE_COPY;
7432 + protection_map[1] = PAGE_READONLY_NOEXEC;
7433 + protection_map[2] = PAGE_COPY_NOEXEC;
7434 + protection_map[3] = PAGE_COPY_NOEXEC;
7435 protection_map[4] = PAGE_READONLY;
7436 protection_map[5] = PAGE_READONLY;
7437 protection_map[6] = PAGE_COPY;
7438 protection_map[7] = PAGE_COPY;
7439 protection_map[8] = PAGE_NONE;
7440 - protection_map[9] = PAGE_READONLY;
7441 - protection_map[10] = PAGE_SHARED;
7442 - protection_map[11] = PAGE_SHARED;
7443 + protection_map[9] = PAGE_READONLY_NOEXEC;
7444 + protection_map[10] = PAGE_SHARED_NOEXEC;
7445 + protection_map[11] = PAGE_SHARED_NOEXEC;
7446 protection_map[12] = PAGE_READONLY;
7447 protection_map[13] = PAGE_READONLY;
7448 protection_map[14] = PAGE_SHARED;
7449 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7450 index cbef74e..c38fead 100644
7451 --- a/arch/sparc/mm/srmmu.c
7452 +++ b/arch/sparc/mm/srmmu.c
7453 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7454 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7455 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7456 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7457 +
7458 +#ifdef CONFIG_PAX_PAGEEXEC
7459 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7460 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7461 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7462 +#endif
7463 +
7464 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7465 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7466
7467 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7468 index f4500c6..889656c 100644
7469 --- a/arch/tile/include/asm/atomic_64.h
7470 +++ b/arch/tile/include/asm/atomic_64.h
7471 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7472
7473 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7474
7475 +#define atomic64_read_unchecked(v) atomic64_read(v)
7476 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7477 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7478 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7479 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7480 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7481 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7482 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7483 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7484 +
7485 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7486 #define smp_mb__before_atomic_dec() smp_mb()
7487 #define smp_mb__after_atomic_dec() smp_mb()
7488 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7489 index 392e533..536b092 100644
7490 --- a/arch/tile/include/asm/cache.h
7491 +++ b/arch/tile/include/asm/cache.h
7492 @@ -15,11 +15,12 @@
7493 #ifndef _ASM_TILE_CACHE_H
7494 #define _ASM_TILE_CACHE_H
7495
7496 +#include <linux/const.h>
7497 #include <arch/chip.h>
7498
7499 /* bytes per L1 data cache line */
7500 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7501 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7502 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7503
7504 /* bytes per L2 cache line */
7505 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7506 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
7507 index ef34d2c..d6ce60c 100644
7508 --- a/arch/tile/include/asm/uaccess.h
7509 +++ b/arch/tile/include/asm/uaccess.h
7510 @@ -361,9 +361,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
7511 const void __user *from,
7512 unsigned long n)
7513 {
7514 - int sz = __compiletime_object_size(to);
7515 + size_t sz = __compiletime_object_size(to);
7516
7517 - if (likely(sz == -1 || sz >= n))
7518 + if (likely(sz == (size_t)-1 || sz >= n))
7519 n = _copy_from_user(to, from, n);
7520 else
7521 copy_from_user_overflow();
7522 diff --git a/arch/um/Makefile b/arch/um/Makefile
7523 index 55c0661..86ad413 100644
7524 --- a/arch/um/Makefile
7525 +++ b/arch/um/Makefile
7526 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7527 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7528 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7529
7530 +ifdef CONSTIFY_PLUGIN
7531 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7532 +endif
7533 +
7534 #This will adjust *FLAGS accordingly to the platform.
7535 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7536
7537 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7538 index 19e1bdd..3665b77 100644
7539 --- a/arch/um/include/asm/cache.h
7540 +++ b/arch/um/include/asm/cache.h
7541 @@ -1,6 +1,7 @@
7542 #ifndef __UM_CACHE_H
7543 #define __UM_CACHE_H
7544
7545 +#include <linux/const.h>
7546
7547 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7548 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7549 @@ -12,6 +13,6 @@
7550 # define L1_CACHE_SHIFT 5
7551 #endif
7552
7553 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7554 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7555
7556 #endif
7557 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7558 index 6c03acd..a5e0215 100644
7559 --- a/arch/um/include/asm/kmap_types.h
7560 +++ b/arch/um/include/asm/kmap_types.h
7561 @@ -23,6 +23,7 @@ enum km_type {
7562 KM_IRQ1,
7563 KM_SOFTIRQ0,
7564 KM_SOFTIRQ1,
7565 + KM_CLEARPAGE,
7566 KM_TYPE_NR
7567 };
7568
7569 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7570 index 7cfc3ce..cbd1a58 100644
7571 --- a/arch/um/include/asm/page.h
7572 +++ b/arch/um/include/asm/page.h
7573 @@ -14,6 +14,9 @@
7574 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7575 #define PAGE_MASK (~(PAGE_SIZE-1))
7576
7577 +#define ktla_ktva(addr) (addr)
7578 +#define ktva_ktla(addr) (addr)
7579 +
7580 #ifndef __ASSEMBLY__
7581
7582 struct page;
7583 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7584 index 0032f92..cd151e0 100644
7585 --- a/arch/um/include/asm/pgtable-3level.h
7586 +++ b/arch/um/include/asm/pgtable-3level.h
7587 @@ -58,6 +58,7 @@
7588 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7589 #define pud_populate(mm, pud, pmd) \
7590 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7591 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7592
7593 #ifdef CONFIG_64BIT
7594 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7595 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7596 index 2b73ded..804f540 100644
7597 --- a/arch/um/kernel/process.c
7598 +++ b/arch/um/kernel/process.c
7599 @@ -404,22 +404,6 @@ int singlestepping(void * t)
7600 return 2;
7601 }
7602
7603 -/*
7604 - * Only x86 and x86_64 have an arch_align_stack().
7605 - * All other arches have "#define arch_align_stack(x) (x)"
7606 - * in their asm/system.h
7607 - * As this is included in UML from asm-um/system-generic.h,
7608 - * we can use it to behave as the subarch does.
7609 - */
7610 -#ifndef arch_align_stack
7611 -unsigned long arch_align_stack(unsigned long sp)
7612 -{
7613 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7614 - sp -= get_random_int() % 8192;
7615 - return sp & ~0xf;
7616 -}
7617 -#endif
7618 -
7619 unsigned long get_wchan(struct task_struct *p)
7620 {
7621 unsigned long stack_page, sp, ip;
7622 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7623 index ad8f795..2c7eec6 100644
7624 --- a/arch/unicore32/include/asm/cache.h
7625 +++ b/arch/unicore32/include/asm/cache.h
7626 @@ -12,8 +12,10 @@
7627 #ifndef __UNICORE_CACHE_H__
7628 #define __UNICORE_CACHE_H__
7629
7630 -#define L1_CACHE_SHIFT (5)
7631 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7632 +#include <linux/const.h>
7633 +
7634 +#define L1_CACHE_SHIFT 5
7635 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7636
7637 /*
7638 * Memory returned by kmalloc() may be used for DMA, so we must make
7639 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7640 index c9866b0..fe53aef 100644
7641 --- a/arch/x86/Kconfig
7642 +++ b/arch/x86/Kconfig
7643 @@ -229,7 +229,7 @@ config X86_HT
7644
7645 config X86_32_LAZY_GS
7646 def_bool y
7647 - depends on X86_32 && !CC_STACKPROTECTOR
7648 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7649
7650 config ARCH_HWEIGHT_CFLAGS
7651 string
7652 @@ -1042,7 +1042,7 @@ choice
7653
7654 config NOHIGHMEM
7655 bool "off"
7656 - depends on !X86_NUMAQ
7657 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7658 ---help---
7659 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7660 However, the address space of 32-bit x86 processors is only 4
7661 @@ -1079,7 +1079,7 @@ config NOHIGHMEM
7662
7663 config HIGHMEM4G
7664 bool "4GB"
7665 - depends on !X86_NUMAQ
7666 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7667 ---help---
7668 Select this if you have a 32-bit processor and between 1 and 4
7669 gigabytes of physical RAM.
7670 @@ -1133,7 +1133,7 @@ config PAGE_OFFSET
7671 hex
7672 default 0xB0000000 if VMSPLIT_3G_OPT
7673 default 0x80000000 if VMSPLIT_2G
7674 - default 0x78000000 if VMSPLIT_2G_OPT
7675 + default 0x70000000 if VMSPLIT_2G_OPT
7676 default 0x40000000 if VMSPLIT_1G
7677 default 0xC0000000
7678 depends on X86_32
7679 @@ -1523,6 +1523,7 @@ config SECCOMP
7680
7681 config CC_STACKPROTECTOR
7682 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7683 + depends on X86_64 || !PAX_MEMORY_UDEREF
7684 ---help---
7685 This option turns on the -fstack-protector GCC feature. This
7686 feature puts, at the beginning of functions, a canary value on
7687 @@ -1580,6 +1581,7 @@ config KEXEC_JUMP
7688 config PHYSICAL_START
7689 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7690 default "0x1000000"
7691 + range 0x400000 0x40000000
7692 ---help---
7693 This gives the physical address where the kernel is loaded.
7694
7695 @@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
7696 config PHYSICAL_ALIGN
7697 hex "Alignment value to which kernel should be aligned" if X86_32
7698 default "0x1000000"
7699 + range 0x400000 0x1000000 if PAX_KERNEXEC
7700 range 0x2000 0x1000000
7701 ---help---
7702 This value puts the alignment restrictions on physical address
7703 @@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
7704 Say N if you want to disable CPU hotplug.
7705
7706 config COMPAT_VDSO
7707 - def_bool y
7708 + def_bool n
7709 prompt "Compat VDSO support"
7710 depends on X86_32 || IA32_EMULATION
7711 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7712 ---help---
7713 Map the 32-bit VDSO to the predictable old-style address too.
7714
7715 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7716 index 706e12e..62e4feb 100644
7717 --- a/arch/x86/Kconfig.cpu
7718 +++ b/arch/x86/Kconfig.cpu
7719 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7720
7721 config X86_F00F_BUG
7722 def_bool y
7723 - depends on M586MMX || M586TSC || M586 || M486 || M386
7724 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7725
7726 config X86_INVD_BUG
7727 def_bool y
7728 @@ -358,7 +358,7 @@ config X86_POPAD_OK
7729
7730 config X86_ALIGNMENT_16
7731 def_bool y
7732 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7733 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7734
7735 config X86_INTEL_USERCOPY
7736 def_bool y
7737 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
7738 # generates cmov.
7739 config X86_CMOV
7740 def_bool y
7741 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7742 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7743
7744 config X86_MINIMUM_CPU_FAMILY
7745 int
7746 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7747 index e46c214..ab62fd1 100644
7748 --- a/arch/x86/Kconfig.debug
7749 +++ b/arch/x86/Kconfig.debug
7750 @@ -84,7 +84,7 @@ config X86_PTDUMP
7751 config DEBUG_RODATA
7752 bool "Write protect kernel read-only data structures"
7753 default y
7754 - depends on DEBUG_KERNEL
7755 + depends on DEBUG_KERNEL && BROKEN
7756 ---help---
7757 Mark the kernel read-only data as write-protected in the pagetables,
7758 in order to catch accidental (and incorrect) writes to such const
7759 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7760
7761 config DEBUG_SET_MODULE_RONX
7762 bool "Set loadable kernel module data as NX and text as RO"
7763 - depends on MODULES
7764 + depends on MODULES && BROKEN
7765 ---help---
7766 This option helps catch unintended modifications to loadable
7767 kernel module's text and read-only data. It also prevents execution
7768 @@ -275,7 +275,7 @@ config OPTIMIZE_INLINING
7769
7770 config DEBUG_STRICT_USER_COPY_CHECKS
7771 bool "Strict copy size checks"
7772 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
7773 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
7774 ---help---
7775 Enabling this option turns a certain set of sanity checks for user
7776 copy operations into compile time failures.
7777 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7778 index b1c611e..2c1a823 100644
7779 --- a/arch/x86/Makefile
7780 +++ b/arch/x86/Makefile
7781 @@ -46,6 +46,7 @@ else
7782 UTS_MACHINE := x86_64
7783 CHECKFLAGS += -D__x86_64__ -m64
7784
7785 + biarch := $(call cc-option,-m64)
7786 KBUILD_AFLAGS += -m64
7787 KBUILD_CFLAGS += -m64
7788
7789 @@ -222,3 +223,12 @@ define archhelp
7790 echo ' FDARGS="..." arguments for the booted kernel'
7791 echo ' FDINITRD=file initrd for the booted kernel'
7792 endef
7793 +
7794 +define OLD_LD
7795 +
7796 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7797 +*** Please upgrade your binutils to 2.18 or newer
7798 +endef
7799 +
7800 +archprepare:
7801 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7802 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7803 index 5a747dd..ff7b12c 100644
7804 --- a/arch/x86/boot/Makefile
7805 +++ b/arch/x86/boot/Makefile
7806 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7807 $(call cc-option, -fno-stack-protector) \
7808 $(call cc-option, -mpreferred-stack-boundary=2)
7809 KBUILD_CFLAGS += $(call cc-option, -m32)
7810 +ifdef CONSTIFY_PLUGIN
7811 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7812 +endif
7813 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7814 GCOV_PROFILE := n
7815
7816 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7817 index 878e4b9..20537ab 100644
7818 --- a/arch/x86/boot/bitops.h
7819 +++ b/arch/x86/boot/bitops.h
7820 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7821 u8 v;
7822 const u32 *p = (const u32 *)addr;
7823
7824 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7825 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7826 return v;
7827 }
7828
7829 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7830
7831 static inline void set_bit(int nr, void *addr)
7832 {
7833 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7834 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7835 }
7836
7837 #endif /* BOOT_BITOPS_H */
7838 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7839 index 18997e5..83d9c67 100644
7840 --- a/arch/x86/boot/boot.h
7841 +++ b/arch/x86/boot/boot.h
7842 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7843 static inline u16 ds(void)
7844 {
7845 u16 seg;
7846 - asm("movw %%ds,%0" : "=rm" (seg));
7847 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7848 return seg;
7849 }
7850
7851 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7852 static inline int memcmp(const void *s1, const void *s2, size_t len)
7853 {
7854 u8 diff;
7855 - asm("repe; cmpsb; setnz %0"
7856 + asm volatile("repe; cmpsb; setnz %0"
7857 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7858 return diff;
7859 }
7860 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7861 index e398bb5..3a382ca 100644
7862 --- a/arch/x86/boot/compressed/Makefile
7863 +++ b/arch/x86/boot/compressed/Makefile
7864 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7865 KBUILD_CFLAGS += $(cflags-y)
7866 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7867 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7868 +ifdef CONSTIFY_PLUGIN
7869 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7870 +endif
7871
7872 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7873 GCOV_PROFILE := n
7874 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7875 index 0cdfc0d..6e79437 100644
7876 --- a/arch/x86/boot/compressed/eboot.c
7877 +++ b/arch/x86/boot/compressed/eboot.c
7878 @@ -122,7 +122,6 @@ again:
7879 *addr = max_addr;
7880 }
7881
7882 -free_pool:
7883 efi_call_phys1(sys_table->boottime->free_pool, map);
7884
7885 fail:
7886 @@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7887 if (i == map_size / desc_size)
7888 status = EFI_NOT_FOUND;
7889
7890 -free_pool:
7891 efi_call_phys1(sys_table->boottime->free_pool, map);
7892 fail:
7893 return status;
7894 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7895 index c85e3ac..6f5aa80 100644
7896 --- a/arch/x86/boot/compressed/head_32.S
7897 +++ b/arch/x86/boot/compressed/head_32.S
7898 @@ -106,7 +106,7 @@ preferred_addr:
7899 notl %eax
7900 andl %eax, %ebx
7901 #else
7902 - movl $LOAD_PHYSICAL_ADDR, %ebx
7903 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7904 #endif
7905
7906 /* Target address to relocate to for decompression */
7907 @@ -192,7 +192,7 @@ relocated:
7908 * and where it was actually loaded.
7909 */
7910 movl %ebp, %ebx
7911 - subl $LOAD_PHYSICAL_ADDR, %ebx
7912 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7913 jz 2f /* Nothing to be done if loaded at compiled addr. */
7914 /*
7915 * Process relocations.
7916 @@ -200,8 +200,7 @@ relocated:
7917
7918 1: subl $4, %edi
7919 movl (%edi), %ecx
7920 - testl %ecx, %ecx
7921 - jz 2f
7922 + jecxz 2f
7923 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7924 jmp 1b
7925 2:
7926 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7927 index 87e03a1..0d94c76 100644
7928 --- a/arch/x86/boot/compressed/head_64.S
7929 +++ b/arch/x86/boot/compressed/head_64.S
7930 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7931 notl %eax
7932 andl %eax, %ebx
7933 #else
7934 - movl $LOAD_PHYSICAL_ADDR, %ebx
7935 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7936 #endif
7937
7938 /* Target address to relocate to for decompression */
7939 @@ -263,7 +263,7 @@ preferred_addr:
7940 notq %rax
7941 andq %rax, %rbp
7942 #else
7943 - movq $LOAD_PHYSICAL_ADDR, %rbp
7944 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7945 #endif
7946
7947 /* Target address to relocate to for decompression */
7948 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7949 index 7116dcb..d9ae1d7 100644
7950 --- a/arch/x86/boot/compressed/misc.c
7951 +++ b/arch/x86/boot/compressed/misc.c
7952 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7953 case PT_LOAD:
7954 #ifdef CONFIG_RELOCATABLE
7955 dest = output;
7956 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7957 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7958 #else
7959 dest = (void *)(phdr->p_paddr);
7960 #endif
7961 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7962 error("Destination address too large");
7963 #endif
7964 #ifndef CONFIG_RELOCATABLE
7965 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7966 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7967 error("Wrong destination address");
7968 #endif
7969
7970 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7971 index 4d3ff03..e4972ff 100644
7972 --- a/arch/x86/boot/cpucheck.c
7973 +++ b/arch/x86/boot/cpucheck.c
7974 @@ -74,7 +74,7 @@ static int has_fpu(void)
7975 u16 fcw = -1, fsw = -1;
7976 u32 cr0;
7977
7978 - asm("movl %%cr0,%0" : "=r" (cr0));
7979 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7980 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7981 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7982 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7983 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7984 {
7985 u32 f0, f1;
7986
7987 - asm("pushfl ; "
7988 + asm volatile("pushfl ; "
7989 "pushfl ; "
7990 "popl %0 ; "
7991 "movl %0,%1 ; "
7992 @@ -115,7 +115,7 @@ static void get_flags(void)
7993 set_bit(X86_FEATURE_FPU, cpu.flags);
7994
7995 if (has_eflag(X86_EFLAGS_ID)) {
7996 - asm("cpuid"
7997 + asm volatile("cpuid"
7998 : "=a" (max_intel_level),
7999 "=b" (cpu_vendor[0]),
8000 "=d" (cpu_vendor[1]),
8001 @@ -124,7 +124,7 @@ static void get_flags(void)
8002
8003 if (max_intel_level >= 0x00000001 &&
8004 max_intel_level <= 0x0000ffff) {
8005 - asm("cpuid"
8006 + asm volatile("cpuid"
8007 : "=a" (tfms),
8008 "=c" (cpu.flags[4]),
8009 "=d" (cpu.flags[0])
8010 @@ -136,7 +136,7 @@ static void get_flags(void)
8011 cpu.model += ((tfms >> 16) & 0xf) << 4;
8012 }
8013
8014 - asm("cpuid"
8015 + asm volatile("cpuid"
8016 : "=a" (max_amd_level)
8017 : "a" (0x80000000)
8018 : "ebx", "ecx", "edx");
8019 @@ -144,7 +144,7 @@ static void get_flags(void)
8020 if (max_amd_level >= 0x80000001 &&
8021 max_amd_level <= 0x8000ffff) {
8022 u32 eax = 0x80000001;
8023 - asm("cpuid"
8024 + asm volatile("cpuid"
8025 : "+a" (eax),
8026 "=c" (cpu.flags[6]),
8027 "=d" (cpu.flags[1])
8028 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8029 u32 ecx = MSR_K7_HWCR;
8030 u32 eax, edx;
8031
8032 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8033 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8034 eax &= ~(1 << 15);
8035 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8036 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8037
8038 get_flags(); /* Make sure it really did something */
8039 err = check_flags();
8040 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8041 u32 ecx = MSR_VIA_FCR;
8042 u32 eax, edx;
8043
8044 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8045 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8046 eax |= (1<<1)|(1<<7);
8047 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8048 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8049
8050 set_bit(X86_FEATURE_CX8, cpu.flags);
8051 err = check_flags();
8052 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8053 u32 eax, edx;
8054 u32 level = 1;
8055
8056 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8057 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8058 - asm("cpuid"
8059 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8060 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8061 + asm volatile("cpuid"
8062 : "+a" (level), "=d" (cpu.flags[0])
8063 : : "ecx", "ebx");
8064 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8065 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8066
8067 err = check_flags();
8068 }
8069 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8070 index f1bbeeb..aff09cb 100644
8071 --- a/arch/x86/boot/header.S
8072 +++ b/arch/x86/boot/header.S
8073 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8074 # single linked list of
8075 # struct setup_data
8076
8077 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8078 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8079
8080 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8081 #define VO_INIT_SIZE (VO__end - VO__text)
8082 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8083 index db75d07..8e6d0af 100644
8084 --- a/arch/x86/boot/memory.c
8085 +++ b/arch/x86/boot/memory.c
8086 @@ -19,7 +19,7 @@
8087
8088 static int detect_memory_e820(void)
8089 {
8090 - int count = 0;
8091 + unsigned int count = 0;
8092 struct biosregs ireg, oreg;
8093 struct e820entry *desc = boot_params.e820_map;
8094 static struct e820entry buf; /* static so it is zeroed */
8095 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8096 index 11e8c6e..fdbb1ed 100644
8097 --- a/arch/x86/boot/video-vesa.c
8098 +++ b/arch/x86/boot/video-vesa.c
8099 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8100
8101 boot_params.screen_info.vesapm_seg = oreg.es;
8102 boot_params.screen_info.vesapm_off = oreg.di;
8103 + boot_params.screen_info.vesapm_size = oreg.cx;
8104 }
8105
8106 /*
8107 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8108 index 43eda28..5ab5fdb 100644
8109 --- a/arch/x86/boot/video.c
8110 +++ b/arch/x86/boot/video.c
8111 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8112 static unsigned int get_entry(void)
8113 {
8114 char entry_buf[4];
8115 - int i, len = 0;
8116 + unsigned int i, len = 0;
8117 int key;
8118 unsigned int v;
8119
8120 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8121 index 5b577d5..3c1fed4 100644
8122 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8123 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8124 @@ -8,6 +8,8 @@
8125 * including this sentence is retained in full.
8126 */
8127
8128 +#include <asm/alternative-asm.h>
8129 +
8130 .extern crypto_ft_tab
8131 .extern crypto_it_tab
8132 .extern crypto_fl_tab
8133 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8134 je B192; \
8135 leaq 32(r9),r9;
8136
8137 +#define ret pax_force_retaddr 0, 1; ret
8138 +
8139 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8140 movq r1,r2; \
8141 movq r3,r4; \
8142 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8143 index 3470624..201259d 100644
8144 --- a/arch/x86/crypto/aesni-intel_asm.S
8145 +++ b/arch/x86/crypto/aesni-intel_asm.S
8146 @@ -31,6 +31,7 @@
8147
8148 #include <linux/linkage.h>
8149 #include <asm/inst.h>
8150 +#include <asm/alternative-asm.h>
8151
8152 #ifdef __x86_64__
8153 .data
8154 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8155 pop %r14
8156 pop %r13
8157 pop %r12
8158 + pax_force_retaddr 0, 1
8159 ret
8160 +ENDPROC(aesni_gcm_dec)
8161
8162
8163 /*****************************************************************************
8164 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8165 pop %r14
8166 pop %r13
8167 pop %r12
8168 + pax_force_retaddr 0, 1
8169 ret
8170 +ENDPROC(aesni_gcm_enc)
8171
8172 #endif
8173
8174 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8175 pxor %xmm1, %xmm0
8176 movaps %xmm0, (TKEYP)
8177 add $0x10, TKEYP
8178 + pax_force_retaddr_bts
8179 ret
8180
8181 .align 4
8182 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8183 shufps $0b01001110, %xmm2, %xmm1
8184 movaps %xmm1, 0x10(TKEYP)
8185 add $0x20, TKEYP
8186 + pax_force_retaddr_bts
8187 ret
8188
8189 .align 4
8190 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8191
8192 movaps %xmm0, (TKEYP)
8193 add $0x10, TKEYP
8194 + pax_force_retaddr_bts
8195 ret
8196
8197 .align 4
8198 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8199 pxor %xmm1, %xmm2
8200 movaps %xmm2, (TKEYP)
8201 add $0x10, TKEYP
8202 + pax_force_retaddr_bts
8203 ret
8204
8205 /*
8206 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8207 #ifndef __x86_64__
8208 popl KEYP
8209 #endif
8210 + pax_force_retaddr 0, 1
8211 ret
8212 +ENDPROC(aesni_set_key)
8213
8214 /*
8215 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8216 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8217 popl KLEN
8218 popl KEYP
8219 #endif
8220 + pax_force_retaddr 0, 1
8221 ret
8222 +ENDPROC(aesni_enc)
8223
8224 /*
8225 * _aesni_enc1: internal ABI
8226 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8227 AESENC KEY STATE
8228 movaps 0x70(TKEYP), KEY
8229 AESENCLAST KEY STATE
8230 + pax_force_retaddr_bts
8231 ret
8232
8233 /*
8234 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8235 AESENCLAST KEY STATE2
8236 AESENCLAST KEY STATE3
8237 AESENCLAST KEY STATE4
8238 + pax_force_retaddr_bts
8239 ret
8240
8241 /*
8242 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8243 popl KLEN
8244 popl KEYP
8245 #endif
8246 + pax_force_retaddr 0, 1
8247 ret
8248 +ENDPROC(aesni_dec)
8249
8250 /*
8251 * _aesni_dec1: internal ABI
8252 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8253 AESDEC KEY STATE
8254 movaps 0x70(TKEYP), KEY
8255 AESDECLAST KEY STATE
8256 + pax_force_retaddr_bts
8257 ret
8258
8259 /*
8260 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8261 AESDECLAST KEY STATE2
8262 AESDECLAST KEY STATE3
8263 AESDECLAST KEY STATE4
8264 + pax_force_retaddr_bts
8265 ret
8266
8267 /*
8268 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8269 popl KEYP
8270 popl LEN
8271 #endif
8272 + pax_force_retaddr 0, 1
8273 ret
8274 +ENDPROC(aesni_ecb_enc)
8275
8276 /*
8277 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8278 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8279 popl KEYP
8280 popl LEN
8281 #endif
8282 + pax_force_retaddr 0, 1
8283 ret
8284 +ENDPROC(aesni_ecb_dec)
8285
8286 /*
8287 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8288 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8289 popl LEN
8290 popl IVP
8291 #endif
8292 + pax_force_retaddr 0, 1
8293 ret
8294 +ENDPROC(aesni_cbc_enc)
8295
8296 /*
8297 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8298 @@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8299 popl LEN
8300 popl IVP
8301 #endif
8302 + pax_force_retaddr 0, 1
8303 ret
8304 +ENDPROC(aesni_cbc_dec)
8305
8306 #ifdef __x86_64__
8307 .align 16
8308 @@ -2526,6 +2553,7 @@ _aesni_inc_init:
8309 mov $1, TCTR_LOW
8310 MOVQ_R64_XMM TCTR_LOW INC
8311 MOVQ_R64_XMM CTR TCTR_LOW
8312 + pax_force_retaddr_bts
8313 ret
8314
8315 /*
8316 @@ -2554,6 +2582,7 @@ _aesni_inc:
8317 .Linc_low:
8318 movaps CTR, IV
8319 PSHUFB_XMM BSWAP_MASK IV
8320 + pax_force_retaddr_bts
8321 ret
8322
8323 /*
8324 @@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8325 .Lctr_enc_ret:
8326 movups IV, (IVP)
8327 .Lctr_enc_just_ret:
8328 + pax_force_retaddr 0, 1
8329 ret
8330 +ENDPROC(aesni_ctr_enc)
8331 #endif
8332 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8333 index 391d245..67f35c2 100644
8334 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8335 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8336 @@ -20,6 +20,8 @@
8337 *
8338 */
8339
8340 +#include <asm/alternative-asm.h>
8341 +
8342 .file "blowfish-x86_64-asm.S"
8343 .text
8344
8345 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8346 jnz __enc_xor;
8347
8348 write_block();
8349 + pax_force_retaddr 0, 1
8350 ret;
8351 __enc_xor:
8352 xor_block();
8353 + pax_force_retaddr 0, 1
8354 ret;
8355
8356 .align 8
8357 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8358
8359 movq %r11, %rbp;
8360
8361 + pax_force_retaddr 0, 1
8362 ret;
8363
8364 /**********************************************************************
8365 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8366
8367 popq %rbx;
8368 popq %rbp;
8369 + pax_force_retaddr 0, 1
8370 ret;
8371
8372 __enc_xor4:
8373 @@ -349,6 +355,7 @@ __enc_xor4:
8374
8375 popq %rbx;
8376 popq %rbp;
8377 + pax_force_retaddr 0, 1
8378 ret;
8379
8380 .align 8
8381 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8382 popq %rbx;
8383 popq %rbp;
8384
8385 + pax_force_retaddr 0, 1
8386 ret;
8387
8388 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8389 index 0b33743..7a56206 100644
8390 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8391 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8392 @@ -20,6 +20,8 @@
8393 *
8394 */
8395
8396 +#include <asm/alternative-asm.h>
8397 +
8398 .file "camellia-x86_64-asm_64.S"
8399 .text
8400
8401 @@ -229,12 +231,14 @@ __enc_done:
8402 enc_outunpack(mov, RT1);
8403
8404 movq RRBP, %rbp;
8405 + pax_force_retaddr 0, 1
8406 ret;
8407
8408 __enc_xor:
8409 enc_outunpack(xor, RT1);
8410
8411 movq RRBP, %rbp;
8412 + pax_force_retaddr 0, 1
8413 ret;
8414
8415 .global camellia_dec_blk;
8416 @@ -275,6 +279,7 @@ __dec_rounds16:
8417 dec_outunpack();
8418
8419 movq RRBP, %rbp;
8420 + pax_force_retaddr 0, 1
8421 ret;
8422
8423 /**********************************************************************
8424 @@ -468,6 +473,7 @@ __enc2_done:
8425
8426 movq RRBP, %rbp;
8427 popq %rbx;
8428 + pax_force_retaddr 0, 1
8429 ret;
8430
8431 __enc2_xor:
8432 @@ -475,6 +481,7 @@ __enc2_xor:
8433
8434 movq RRBP, %rbp;
8435 popq %rbx;
8436 + pax_force_retaddr 0, 1
8437 ret;
8438
8439 .global camellia_dec_blk_2way;
8440 @@ -517,4 +524,5 @@ __dec2_rounds16:
8441
8442 movq RRBP, %rbp;
8443 movq RXOR, %rbx;
8444 + pax_force_retaddr 0, 1
8445 ret;
8446 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8447 index 6214a9b..1f4fc9a 100644
8448 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8449 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8450 @@ -1,3 +1,5 @@
8451 +#include <asm/alternative-asm.h>
8452 +
8453 # enter ECRYPT_encrypt_bytes
8454 .text
8455 .p2align 5
8456 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8457 add %r11,%rsp
8458 mov %rdi,%rax
8459 mov %rsi,%rdx
8460 + pax_force_retaddr 0, 1
8461 ret
8462 # bytesatleast65:
8463 ._bytesatleast65:
8464 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8465 add %r11,%rsp
8466 mov %rdi,%rax
8467 mov %rsi,%rdx
8468 + pax_force_retaddr
8469 ret
8470 # enter ECRYPT_ivsetup
8471 .text
8472 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8473 add %r11,%rsp
8474 mov %rdi,%rax
8475 mov %rsi,%rdx
8476 + pax_force_retaddr
8477 ret
8478 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8479 index 3ee1ff0..cbc568b 100644
8480 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8481 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8482 @@ -24,6 +24,8 @@
8483 *
8484 */
8485
8486 +#include <asm/alternative-asm.h>
8487 +
8488 .file "serpent-sse2-x86_64-asm_64.S"
8489 .text
8490
8491 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8492 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8493 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8494
8495 + pax_force_retaddr
8496 ret;
8497
8498 __enc_xor8:
8499 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8500 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8501
8502 + pax_force_retaddr
8503 ret;
8504
8505 .align 8
8506 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8507 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8508 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8509
8510 + pax_force_retaddr
8511 ret;
8512 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8513 index b2c2f57..8470cab 100644
8514 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8515 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8516 @@ -28,6 +28,8 @@
8517 * (at your option) any later version.
8518 */
8519
8520 +#include <asm/alternative-asm.h>
8521 +
8522 #define CTX %rdi // arg1
8523 #define BUF %rsi // arg2
8524 #define CNT %rdx // arg3
8525 @@ -104,6 +106,7 @@
8526 pop %r12
8527 pop %rbp
8528 pop %rbx
8529 + pax_force_retaddr 0, 1
8530 ret
8531
8532 .size \name, .-\name
8533 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8534 index 5b012a2..36d5364 100644
8535 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8536 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8537 @@ -20,6 +20,8 @@
8538 *
8539 */
8540
8541 +#include <asm/alternative-asm.h>
8542 +
8543 .file "twofish-x86_64-asm-3way.S"
8544 .text
8545
8546 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8547 popq %r13;
8548 popq %r14;
8549 popq %r15;
8550 + pax_force_retaddr 0, 1
8551 ret;
8552
8553 __enc_xor3:
8554 @@ -271,6 +274,7 @@ __enc_xor3:
8555 popq %r13;
8556 popq %r14;
8557 popq %r15;
8558 + pax_force_retaddr 0, 1
8559 ret;
8560
8561 .global twofish_dec_blk_3way
8562 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8563 popq %r13;
8564 popq %r14;
8565 popq %r15;
8566 + pax_force_retaddr 0, 1
8567 ret;
8568
8569 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8570 index 7bcf3fc..f53832f 100644
8571 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8572 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8573 @@ -21,6 +21,7 @@
8574 .text
8575
8576 #include <asm/asm-offsets.h>
8577 +#include <asm/alternative-asm.h>
8578
8579 #define a_offset 0
8580 #define b_offset 4
8581 @@ -268,6 +269,7 @@ twofish_enc_blk:
8582
8583 popq R1
8584 movq $1,%rax
8585 + pax_force_retaddr 0, 1
8586 ret
8587
8588 twofish_dec_blk:
8589 @@ -319,4 +321,5 @@ twofish_dec_blk:
8590
8591 popq R1
8592 movq $1,%rax
8593 + pax_force_retaddr 0, 1
8594 ret
8595 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8596 index 07b3a68..bd2a388 100644
8597 --- a/arch/x86/ia32/ia32_aout.c
8598 +++ b/arch/x86/ia32/ia32_aout.c
8599 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8600 unsigned long dump_start, dump_size;
8601 struct user32 dump;
8602
8603 + memset(&dump, 0, sizeof(dump));
8604 +
8605 fs = get_fs();
8606 set_fs(KERNEL_DS);
8607 has_dumped = 1;
8608 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8609 index 4f5bfac..e1ef0d3 100644
8610 --- a/arch/x86/ia32/ia32_signal.c
8611 +++ b/arch/x86/ia32/ia32_signal.c
8612 @@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8613 }
8614 seg = get_fs();
8615 set_fs(KERNEL_DS);
8616 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8617 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8618 set_fs(seg);
8619 if (ret >= 0 && uoss_ptr) {
8620 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8621 @@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8622 */
8623 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8624 size_t frame_size,
8625 - void **fpstate)
8626 + void __user **fpstate)
8627 {
8628 unsigned long sp;
8629
8630 @@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8631
8632 if (used_math()) {
8633 sp = sp - sig_xstate_ia32_size;
8634 - *fpstate = (struct _fpstate_ia32 *) sp;
8635 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8636 if (save_i387_xstate_ia32(*fpstate) < 0)
8637 return (void __user *) -1L;
8638 }
8639 @@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8640 sp -= frame_size;
8641 /* Align the stack pointer according to the i386 ABI,
8642 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8643 - sp = ((sp + 4) & -16ul) - 4;
8644 + sp = ((sp - 12) & -16ul) - 4;
8645 return (void __user *) sp;
8646 }
8647
8648 @@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8649 * These are actually not used anymore, but left because some
8650 * gdb versions depend on them as a marker.
8651 */
8652 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8653 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8654 } put_user_catch(err);
8655
8656 if (err)
8657 @@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8658 0xb8,
8659 __NR_ia32_rt_sigreturn,
8660 0x80cd,
8661 - 0,
8662 + 0
8663 };
8664
8665 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8666 @@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8667
8668 if (ka->sa.sa_flags & SA_RESTORER)
8669 restorer = ka->sa.sa_restorer;
8670 + else if (current->mm->context.vdso)
8671 + /* Return stub is in 32bit vsyscall page */
8672 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8673 else
8674 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8675 - rt_sigreturn);
8676 + restorer = &frame->retcode;
8677 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8678
8679 /*
8680 * Not actually used anymore, but left because some gdb
8681 * versions need it.
8682 */
8683 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8684 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8685 } put_user_catch(err);
8686
8687 if (err)
8688 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8689 index e3e7340..05ed805 100644
8690 --- a/arch/x86/ia32/ia32entry.S
8691 +++ b/arch/x86/ia32/ia32entry.S
8692 @@ -13,8 +13,10 @@
8693 #include <asm/thread_info.h>
8694 #include <asm/segment.h>
8695 #include <asm/irqflags.h>
8696 +#include <asm/pgtable.h>
8697 #include <linux/linkage.h>
8698 #include <linux/err.h>
8699 +#include <asm/alternative-asm.h>
8700
8701 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8702 #include <linux/elf-em.h>
8703 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8704 ENDPROC(native_irq_enable_sysexit)
8705 #endif
8706
8707 + .macro pax_enter_kernel_user
8708 + pax_set_fptr_mask
8709 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8710 + call pax_enter_kernel_user
8711 +#endif
8712 + .endm
8713 +
8714 + .macro pax_exit_kernel_user
8715 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8716 + call pax_exit_kernel_user
8717 +#endif
8718 +#ifdef CONFIG_PAX_RANDKSTACK
8719 + pushq %rax
8720 + pushq %r11
8721 + call pax_randomize_kstack
8722 + popq %r11
8723 + popq %rax
8724 +#endif
8725 + .endm
8726 +
8727 +.macro pax_erase_kstack
8728 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8729 + call pax_erase_kstack
8730 +#endif
8731 +.endm
8732 +
8733 /*
8734 * 32bit SYSENTER instruction entry.
8735 *
8736 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8737 CFI_REGISTER rsp,rbp
8738 SWAPGS_UNSAFE_STACK
8739 movq PER_CPU_VAR(kernel_stack), %rsp
8740 - addq $(KERNEL_STACK_OFFSET),%rsp
8741 - /*
8742 - * No need to follow this irqs on/off section: the syscall
8743 - * disabled irqs, here we enable it straight after entry:
8744 - */
8745 - ENABLE_INTERRUPTS(CLBR_NONE)
8746 movl %ebp,%ebp /* zero extension */
8747 pushq_cfi $__USER32_DS
8748 /*CFI_REL_OFFSET ss,0*/
8749 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8750 CFI_REL_OFFSET rsp,0
8751 pushfq_cfi
8752 /*CFI_REL_OFFSET rflags,0*/
8753 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8754 - CFI_REGISTER rip,r10
8755 + orl $X86_EFLAGS_IF,(%rsp)
8756 + GET_THREAD_INFO(%r11)
8757 + movl TI_sysenter_return(%r11), %r11d
8758 + CFI_REGISTER rip,r11
8759 pushq_cfi $__USER32_CS
8760 /*CFI_REL_OFFSET cs,0*/
8761 movl %eax, %eax
8762 - pushq_cfi %r10
8763 + pushq_cfi %r11
8764 CFI_REL_OFFSET rip,0
8765 pushq_cfi %rax
8766 cld
8767 SAVE_ARGS 0,1,0
8768 + pax_enter_kernel_user
8769 + /*
8770 + * No need to follow this irqs on/off section: the syscall
8771 + * disabled irqs, here we enable it straight after entry:
8772 + */
8773 + ENABLE_INTERRUPTS(CLBR_NONE)
8774 /* no need to do an access_ok check here because rbp has been
8775 32bit zero extended */
8776 +
8777 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8778 + mov $PAX_USER_SHADOW_BASE,%r11
8779 + add %r11,%rbp
8780 +#endif
8781 +
8782 1: movl (%rbp),%ebp
8783 .section __ex_table,"a"
8784 .quad 1b,ia32_badarg
8785 .previous
8786 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8787 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8788 + GET_THREAD_INFO(%r11)
8789 + orl $TS_COMPAT,TI_status(%r11)
8790 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8791 CFI_REMEMBER_STATE
8792 jnz sysenter_tracesys
8793 cmpq $(IA32_NR_syscalls-1),%rax
8794 @@ -160,12 +197,15 @@ sysenter_do_call:
8795 sysenter_dispatch:
8796 call *ia32_sys_call_table(,%rax,8)
8797 movq %rax,RAX-ARGOFFSET(%rsp)
8798 + GET_THREAD_INFO(%r11)
8799 DISABLE_INTERRUPTS(CLBR_NONE)
8800 TRACE_IRQS_OFF
8801 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8802 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8803 jnz sysexit_audit
8804 sysexit_from_sys_call:
8805 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8806 + pax_exit_kernel_user
8807 + pax_erase_kstack
8808 + andl $~TS_COMPAT,TI_status(%r11)
8809 /* clear IF, that popfq doesn't enable interrupts early */
8810 andl $~0x200,EFLAGS-R11(%rsp)
8811 movl RIP-R11(%rsp),%edx /* User %eip */
8812 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8813 movl %eax,%esi /* 2nd arg: syscall number */
8814 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8815 call __audit_syscall_entry
8816 +
8817 + pax_erase_kstack
8818 +
8819 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8820 cmpq $(IA32_NR_syscalls-1),%rax
8821 ja ia32_badsys
8822 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8823 .endm
8824
8825 .macro auditsys_exit exit
8826 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8827 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8828 jnz ia32_ret_from_sys_call
8829 TRACE_IRQS_ON
8830 sti
8831 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8832 1: setbe %al /* 1 if error, 0 if not */
8833 movzbl %al,%edi /* zero-extend that into %edi */
8834 call __audit_syscall_exit
8835 + GET_THREAD_INFO(%r11)
8836 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8837 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8838 cli
8839 TRACE_IRQS_OFF
8840 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8841 + testl %edi,TI_flags(%r11)
8842 jz \exit
8843 CLEAR_RREGS -ARGOFFSET
8844 jmp int_with_check
8845 @@ -235,7 +279,7 @@ sysexit_audit:
8846
8847 sysenter_tracesys:
8848 #ifdef CONFIG_AUDITSYSCALL
8849 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8850 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8851 jz sysenter_auditsys
8852 #endif
8853 SAVE_REST
8854 @@ -243,6 +287,9 @@ sysenter_tracesys:
8855 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8856 movq %rsp,%rdi /* &pt_regs -> arg1 */
8857 call syscall_trace_enter
8858 +
8859 + pax_erase_kstack
8860 +
8861 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8862 RESTORE_REST
8863 cmpq $(IA32_NR_syscalls-1),%rax
8864 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8865 ENTRY(ia32_cstar_target)
8866 CFI_STARTPROC32 simple
8867 CFI_SIGNAL_FRAME
8868 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8869 + CFI_DEF_CFA rsp,0
8870 CFI_REGISTER rip,rcx
8871 /*CFI_REGISTER rflags,r11*/
8872 SWAPGS_UNSAFE_STACK
8873 movl %esp,%r8d
8874 CFI_REGISTER rsp,r8
8875 movq PER_CPU_VAR(kernel_stack),%rsp
8876 + SAVE_ARGS 8*6,0,0
8877 + pax_enter_kernel_user
8878 /*
8879 * No need to follow this irqs on/off section: the syscall
8880 * disabled irqs and here we enable it straight after entry:
8881 */
8882 ENABLE_INTERRUPTS(CLBR_NONE)
8883 - SAVE_ARGS 8,0,0
8884 movl %eax,%eax /* zero extension */
8885 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8886 movq %rcx,RIP-ARGOFFSET(%rsp)
8887 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8888 /* no need to do an access_ok check here because r8 has been
8889 32bit zero extended */
8890 /* hardware stack frame is complete now */
8891 +
8892 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8893 + mov $PAX_USER_SHADOW_BASE,%r11
8894 + add %r11,%r8
8895 +#endif
8896 +
8897 1: movl (%r8),%r9d
8898 .section __ex_table,"a"
8899 .quad 1b,ia32_badarg
8900 .previous
8901 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8902 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8903 + GET_THREAD_INFO(%r11)
8904 + orl $TS_COMPAT,TI_status(%r11)
8905 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8906 CFI_REMEMBER_STATE
8907 jnz cstar_tracesys
8908 cmpq $IA32_NR_syscalls-1,%rax
8909 @@ -317,12 +372,15 @@ cstar_do_call:
8910 cstar_dispatch:
8911 call *ia32_sys_call_table(,%rax,8)
8912 movq %rax,RAX-ARGOFFSET(%rsp)
8913 + GET_THREAD_INFO(%r11)
8914 DISABLE_INTERRUPTS(CLBR_NONE)
8915 TRACE_IRQS_OFF
8916 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8917 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8918 jnz sysretl_audit
8919 sysretl_from_sys_call:
8920 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8921 + pax_exit_kernel_user
8922 + pax_erase_kstack
8923 + andl $~TS_COMPAT,TI_status(%r11)
8924 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8925 movl RIP-ARGOFFSET(%rsp),%ecx
8926 CFI_REGISTER rip,rcx
8927 @@ -350,7 +408,7 @@ sysretl_audit:
8928
8929 cstar_tracesys:
8930 #ifdef CONFIG_AUDITSYSCALL
8931 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8932 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8933 jz cstar_auditsys
8934 #endif
8935 xchgl %r9d,%ebp
8936 @@ -359,6 +417,9 @@ cstar_tracesys:
8937 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8938 movq %rsp,%rdi /* &pt_regs -> arg1 */
8939 call syscall_trace_enter
8940 +
8941 + pax_erase_kstack
8942 +
8943 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8944 RESTORE_REST
8945 xchgl %ebp,%r9d
8946 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8947 CFI_REL_OFFSET rip,RIP-RIP
8948 PARAVIRT_ADJUST_EXCEPTION_FRAME
8949 SWAPGS
8950 - /*
8951 - * No need to follow this irqs on/off section: the syscall
8952 - * disabled irqs and here we enable it straight after entry:
8953 - */
8954 - ENABLE_INTERRUPTS(CLBR_NONE)
8955 movl %eax,%eax
8956 pushq_cfi %rax
8957 cld
8958 /* note the registers are not zero extended to the sf.
8959 this could be a problem. */
8960 SAVE_ARGS 0,1,0
8961 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8962 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8963 + pax_enter_kernel_user
8964 + /*
8965 + * No need to follow this irqs on/off section: the syscall
8966 + * disabled irqs and here we enable it straight after entry:
8967 + */
8968 + ENABLE_INTERRUPTS(CLBR_NONE)
8969 + GET_THREAD_INFO(%r11)
8970 + orl $TS_COMPAT,TI_status(%r11)
8971 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8972 jnz ia32_tracesys
8973 cmpq $(IA32_NR_syscalls-1),%rax
8974 ja ia32_badsys
8975 @@ -435,6 +498,9 @@ ia32_tracesys:
8976 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8977 movq %rsp,%rdi /* &pt_regs -> arg1 */
8978 call syscall_trace_enter
8979 +
8980 + pax_erase_kstack
8981 +
8982 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8983 RESTORE_REST
8984 cmpq $(IA32_NR_syscalls-1),%rax
8985 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8986 index aec2202..f76174e 100644
8987 --- a/arch/x86/ia32/sys_ia32.c
8988 +++ b/arch/x86/ia32/sys_ia32.c
8989 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8990 */
8991 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8992 {
8993 - typeof(ubuf->st_uid) uid = 0;
8994 - typeof(ubuf->st_gid) gid = 0;
8995 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8996 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8997 SET_UID(uid, stat->uid);
8998 SET_GID(gid, stat->gid);
8999 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9000 @@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
9001 return alarm_setitimer(seconds);
9002 }
9003
9004 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
9005 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
9006 int options)
9007 {
9008 return compat_sys_wait4(pid, stat_addr, options, NULL);
9009 @@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9010 mm_segment_t old_fs = get_fs();
9011
9012 set_fs(KERNEL_DS);
9013 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9014 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9015 set_fs(old_fs);
9016 if (put_compat_timespec(&t, interval))
9017 return -EFAULT;
9018 @@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9019 mm_segment_t old_fs = get_fs();
9020
9021 set_fs(KERNEL_DS);
9022 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9023 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9024 set_fs(old_fs);
9025 if (!ret) {
9026 switch (_NSIG_WORDS) {
9027 @@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9028 if (copy_siginfo_from_user32(&info, uinfo))
9029 return -EFAULT;
9030 set_fs(KERNEL_DS);
9031 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9032 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9033 set_fs(old_fs);
9034 return ret;
9035 }
9036 @@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9037 return -EFAULT;
9038
9039 set_fs(KERNEL_DS);
9040 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9041 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9042 count);
9043 set_fs(old_fs);
9044
9045 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9046 index 952bd01..7692c6f 100644
9047 --- a/arch/x86/include/asm/alternative-asm.h
9048 +++ b/arch/x86/include/asm/alternative-asm.h
9049 @@ -15,6 +15,45 @@
9050 .endm
9051 #endif
9052
9053 +#ifdef KERNEXEC_PLUGIN
9054 + .macro pax_force_retaddr_bts rip=0
9055 + btsq $63,\rip(%rsp)
9056 + .endm
9057 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9058 + .macro pax_force_retaddr rip=0, reload=0
9059 + btsq $63,\rip(%rsp)
9060 + .endm
9061 + .macro pax_force_fptr ptr
9062 + btsq $63,\ptr
9063 + .endm
9064 + .macro pax_set_fptr_mask
9065 + .endm
9066 +#endif
9067 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9068 + .macro pax_force_retaddr rip=0, reload=0
9069 + .if \reload
9070 + pax_set_fptr_mask
9071 + .endif
9072 + orq %r10,\rip(%rsp)
9073 + .endm
9074 + .macro pax_force_fptr ptr
9075 + orq %r10,\ptr
9076 + .endm
9077 + .macro pax_set_fptr_mask
9078 + movabs $0x8000000000000000,%r10
9079 + .endm
9080 +#endif
9081 +#else
9082 + .macro pax_force_retaddr rip=0, reload=0
9083 + .endm
9084 + .macro pax_force_fptr ptr
9085 + .endm
9086 + .macro pax_force_retaddr_bts rip=0
9087 + .endm
9088 + .macro pax_set_fptr_mask
9089 + .endm
9090 +#endif
9091 +
9092 .macro altinstruction_entry orig alt feature orig_len alt_len
9093 .long \orig - .
9094 .long \alt - .
9095 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9096 index 49331be..9706065 100644
9097 --- a/arch/x86/include/asm/alternative.h
9098 +++ b/arch/x86/include/asm/alternative.h
9099 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9100 ".section .discard,\"aw\",@progbits\n" \
9101 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9102 ".previous\n" \
9103 - ".section .altinstr_replacement, \"ax\"\n" \
9104 + ".section .altinstr_replacement, \"a\"\n" \
9105 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9106 ".previous"
9107
9108 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9109 index d854101..f6ea947 100644
9110 --- a/arch/x86/include/asm/apic.h
9111 +++ b/arch/x86/include/asm/apic.h
9112 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9113
9114 #ifdef CONFIG_X86_LOCAL_APIC
9115
9116 -extern unsigned int apic_verbosity;
9117 +extern int apic_verbosity;
9118 extern int local_apic_timer_c2_ok;
9119
9120 extern int disable_apic;
9121 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9122 index 20370c6..a2eb9b0 100644
9123 --- a/arch/x86/include/asm/apm.h
9124 +++ b/arch/x86/include/asm/apm.h
9125 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9126 __asm__ __volatile__(APM_DO_ZERO_SEGS
9127 "pushl %%edi\n\t"
9128 "pushl %%ebp\n\t"
9129 - "lcall *%%cs:apm_bios_entry\n\t"
9130 + "lcall *%%ss:apm_bios_entry\n\t"
9131 "setc %%al\n\t"
9132 "popl %%ebp\n\t"
9133 "popl %%edi\n\t"
9134 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9135 __asm__ __volatile__(APM_DO_ZERO_SEGS
9136 "pushl %%edi\n\t"
9137 "pushl %%ebp\n\t"
9138 - "lcall *%%cs:apm_bios_entry\n\t"
9139 + "lcall *%%ss:apm_bios_entry\n\t"
9140 "setc %%bl\n\t"
9141 "popl %%ebp\n\t"
9142 "popl %%edi\n\t"
9143 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9144 index 58cb6d4..a4b806c 100644
9145 --- a/arch/x86/include/asm/atomic.h
9146 +++ b/arch/x86/include/asm/atomic.h
9147 @@ -22,7 +22,18 @@
9148 */
9149 static inline int atomic_read(const atomic_t *v)
9150 {
9151 - return (*(volatile int *)&(v)->counter);
9152 + return (*(volatile const int *)&(v)->counter);
9153 +}
9154 +
9155 +/**
9156 + * atomic_read_unchecked - read atomic variable
9157 + * @v: pointer of type atomic_unchecked_t
9158 + *
9159 + * Atomically reads the value of @v.
9160 + */
9161 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9162 +{
9163 + return (*(volatile const int *)&(v)->counter);
9164 }
9165
9166 /**
9167 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9168 }
9169
9170 /**
9171 + * atomic_set_unchecked - set atomic variable
9172 + * @v: pointer of type atomic_unchecked_t
9173 + * @i: required value
9174 + *
9175 + * Atomically sets the value of @v to @i.
9176 + */
9177 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9178 +{
9179 + v->counter = i;
9180 +}
9181 +
9182 +/**
9183 * atomic_add - add integer to atomic variable
9184 * @i: integer value to add
9185 * @v: pointer of type atomic_t
9186 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9187 */
9188 static inline void atomic_add(int i, atomic_t *v)
9189 {
9190 - asm volatile(LOCK_PREFIX "addl %1,%0"
9191 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9192 +
9193 +#ifdef CONFIG_PAX_REFCOUNT
9194 + "jno 0f\n"
9195 + LOCK_PREFIX "subl %1,%0\n"
9196 + "int $4\n0:\n"
9197 + _ASM_EXTABLE(0b, 0b)
9198 +#endif
9199 +
9200 + : "+m" (v->counter)
9201 + : "ir" (i));
9202 +}
9203 +
9204 +/**
9205 + * atomic_add_unchecked - add integer to atomic variable
9206 + * @i: integer value to add
9207 + * @v: pointer of type atomic_unchecked_t
9208 + *
9209 + * Atomically adds @i to @v.
9210 + */
9211 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9212 +{
9213 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9214 : "+m" (v->counter)
9215 : "ir" (i));
9216 }
9217 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9218 */
9219 static inline void atomic_sub(int i, atomic_t *v)
9220 {
9221 - asm volatile(LOCK_PREFIX "subl %1,%0"
9222 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9223 +
9224 +#ifdef CONFIG_PAX_REFCOUNT
9225 + "jno 0f\n"
9226 + LOCK_PREFIX "addl %1,%0\n"
9227 + "int $4\n0:\n"
9228 + _ASM_EXTABLE(0b, 0b)
9229 +#endif
9230 +
9231 + : "+m" (v->counter)
9232 + : "ir" (i));
9233 +}
9234 +
9235 +/**
9236 + * atomic_sub_unchecked - subtract integer from atomic variable
9237 + * @i: integer value to subtract
9238 + * @v: pointer of type atomic_unchecked_t
9239 + *
9240 + * Atomically subtracts @i from @v.
9241 + */
9242 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9243 +{
9244 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9245 : "+m" (v->counter)
9246 : "ir" (i));
9247 }
9248 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9249 {
9250 unsigned char c;
9251
9252 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9253 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9254 +
9255 +#ifdef CONFIG_PAX_REFCOUNT
9256 + "jno 0f\n"
9257 + LOCK_PREFIX "addl %2,%0\n"
9258 + "int $4\n0:\n"
9259 + _ASM_EXTABLE(0b, 0b)
9260 +#endif
9261 +
9262 + "sete %1\n"
9263 : "+m" (v->counter), "=qm" (c)
9264 : "ir" (i) : "memory");
9265 return c;
9266 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9267 */
9268 static inline void atomic_inc(atomic_t *v)
9269 {
9270 - asm volatile(LOCK_PREFIX "incl %0"
9271 + asm volatile(LOCK_PREFIX "incl %0\n"
9272 +
9273 +#ifdef CONFIG_PAX_REFCOUNT
9274 + "jno 0f\n"
9275 + LOCK_PREFIX "decl %0\n"
9276 + "int $4\n0:\n"
9277 + _ASM_EXTABLE(0b, 0b)
9278 +#endif
9279 +
9280 + : "+m" (v->counter));
9281 +}
9282 +
9283 +/**
9284 + * atomic_inc_unchecked - increment atomic variable
9285 + * @v: pointer of type atomic_unchecked_t
9286 + *
9287 + * Atomically increments @v by 1.
9288 + */
9289 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9290 +{
9291 + asm volatile(LOCK_PREFIX "incl %0\n"
9292 : "+m" (v->counter));
9293 }
9294
9295 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9296 */
9297 static inline void atomic_dec(atomic_t *v)
9298 {
9299 - asm volatile(LOCK_PREFIX "decl %0"
9300 + asm volatile(LOCK_PREFIX "decl %0\n"
9301 +
9302 +#ifdef CONFIG_PAX_REFCOUNT
9303 + "jno 0f\n"
9304 + LOCK_PREFIX "incl %0\n"
9305 + "int $4\n0:\n"
9306 + _ASM_EXTABLE(0b, 0b)
9307 +#endif
9308 +
9309 + : "+m" (v->counter));
9310 +}
9311 +
9312 +/**
9313 + * atomic_dec_unchecked - decrement atomic variable
9314 + * @v: pointer of type atomic_unchecked_t
9315 + *
9316 + * Atomically decrements @v by 1.
9317 + */
9318 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9319 +{
9320 + asm volatile(LOCK_PREFIX "decl %0\n"
9321 : "+m" (v->counter));
9322 }
9323
9324 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9325 {
9326 unsigned char c;
9327
9328 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9329 + asm volatile(LOCK_PREFIX "decl %0\n"
9330 +
9331 +#ifdef CONFIG_PAX_REFCOUNT
9332 + "jno 0f\n"
9333 + LOCK_PREFIX "incl %0\n"
9334 + "int $4\n0:\n"
9335 + _ASM_EXTABLE(0b, 0b)
9336 +#endif
9337 +
9338 + "sete %1\n"
9339 : "+m" (v->counter), "=qm" (c)
9340 : : "memory");
9341 return c != 0;
9342 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9343 {
9344 unsigned char c;
9345
9346 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9347 + asm volatile(LOCK_PREFIX "incl %0\n"
9348 +
9349 +#ifdef CONFIG_PAX_REFCOUNT
9350 + "jno 0f\n"
9351 + LOCK_PREFIX "decl %0\n"
9352 + "int $4\n0:\n"
9353 + _ASM_EXTABLE(0b, 0b)
9354 +#endif
9355 +
9356 + "sete %1\n"
9357 + : "+m" (v->counter), "=qm" (c)
9358 + : : "memory");
9359 + return c != 0;
9360 +}
9361 +
9362 +/**
9363 + * atomic_inc_and_test_unchecked - increment and test
9364 + * @v: pointer of type atomic_unchecked_t
9365 + *
9366 + * Atomically increments @v by 1
9367 + * and returns true if the result is zero, or false for all
9368 + * other cases.
9369 + */
9370 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9371 +{
9372 + unsigned char c;
9373 +
9374 + asm volatile(LOCK_PREFIX "incl %0\n"
9375 + "sete %1\n"
9376 : "+m" (v->counter), "=qm" (c)
9377 : : "memory");
9378 return c != 0;
9379 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9380 {
9381 unsigned char c;
9382
9383 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9384 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9385 +
9386 +#ifdef CONFIG_PAX_REFCOUNT
9387 + "jno 0f\n"
9388 + LOCK_PREFIX "subl %2,%0\n"
9389 + "int $4\n0:\n"
9390 + _ASM_EXTABLE(0b, 0b)
9391 +#endif
9392 +
9393 + "sets %1\n"
9394 : "+m" (v->counter), "=qm" (c)
9395 : "ir" (i) : "memory");
9396 return c;
9397 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9398 goto no_xadd;
9399 #endif
9400 /* Modern 486+ processor */
9401 - return i + xadd(&v->counter, i);
9402 + return i + xadd_check_overflow(&v->counter, i);
9403
9404 #ifdef CONFIG_M386
9405 no_xadd: /* Legacy 386 processor */
9406 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9407 }
9408
9409 /**
9410 + * atomic_add_return_unchecked - add integer and return
9411 + * @i: integer value to add
9412 + * @v: pointer of type atomic_unchecked_t
9413 + *
9414 + * Atomically adds @i to @v and returns @i + @v
9415 + */
9416 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9417 +{
9418 +#ifdef CONFIG_M386
9419 + int __i;
9420 + unsigned long flags;
9421 + if (unlikely(boot_cpu_data.x86 <= 3))
9422 + goto no_xadd;
9423 +#endif
9424 + /* Modern 486+ processor */
9425 + return i + xadd(&v->counter, i);
9426 +
9427 +#ifdef CONFIG_M386
9428 +no_xadd: /* Legacy 386 processor */
9429 + raw_local_irq_save(flags);
9430 + __i = atomic_read_unchecked(v);
9431 + atomic_set_unchecked(v, i + __i);
9432 + raw_local_irq_restore(flags);
9433 + return i + __i;
9434 +#endif
9435 +}
9436 +
9437 +/**
9438 * atomic_sub_return - subtract integer and return
9439 * @v: pointer of type atomic_t
9440 * @i: integer value to subtract
9441 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9442 }
9443
9444 #define atomic_inc_return(v) (atomic_add_return(1, v))
9445 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9446 +{
9447 + return atomic_add_return_unchecked(1, v);
9448 +}
9449 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9450
9451 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9452 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9453 return cmpxchg(&v->counter, old, new);
9454 }
9455
9456 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9457 +{
9458 + return cmpxchg(&v->counter, old, new);
9459 +}
9460 +
9461 static inline int atomic_xchg(atomic_t *v, int new)
9462 {
9463 return xchg(&v->counter, new);
9464 }
9465
9466 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9467 +{
9468 + return xchg(&v->counter, new);
9469 +}
9470 +
9471 /**
9472 * __atomic_add_unless - add unless the number is already a given value
9473 * @v: pointer of type atomic_t
9474 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9475 */
9476 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9477 {
9478 - int c, old;
9479 + int c, old, new;
9480 c = atomic_read(v);
9481 for (;;) {
9482 - if (unlikely(c == (u)))
9483 + if (unlikely(c == u))
9484 break;
9485 - old = atomic_cmpxchg((v), c, c + (a));
9486 +
9487 + asm volatile("addl %2,%0\n"
9488 +
9489 +#ifdef CONFIG_PAX_REFCOUNT
9490 + "jno 0f\n"
9491 + "subl %2,%0\n"
9492 + "int $4\n0:\n"
9493 + _ASM_EXTABLE(0b, 0b)
9494 +#endif
9495 +
9496 + : "=r" (new)
9497 + : "0" (c), "ir" (a));
9498 +
9499 + old = atomic_cmpxchg(v, c, new);
9500 if (likely(old == c))
9501 break;
9502 c = old;
9503 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9504 return c;
9505 }
9506
9507 +/**
9508 + * atomic_inc_not_zero_hint - increment if not null
9509 + * @v: pointer of type atomic_t
9510 + * @hint: probable value of the atomic before the increment
9511 + *
9512 + * This version of atomic_inc_not_zero() gives a hint of probable
9513 + * value of the atomic. This helps processor to not read the memory
9514 + * before doing the atomic read/modify/write cycle, lowering
9515 + * number of bus transactions on some arches.
9516 + *
9517 + * Returns: 0 if increment was not done, 1 otherwise.
9518 + */
9519 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9520 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9521 +{
9522 + int val, c = hint, new;
9523 +
9524 + /* sanity test, should be removed by compiler if hint is a constant */
9525 + if (!hint)
9526 + return __atomic_add_unless(v, 1, 0);
9527 +
9528 + do {
9529 + asm volatile("incl %0\n"
9530 +
9531 +#ifdef CONFIG_PAX_REFCOUNT
9532 + "jno 0f\n"
9533 + "decl %0\n"
9534 + "int $4\n0:\n"
9535 + _ASM_EXTABLE(0b, 0b)
9536 +#endif
9537 +
9538 + : "=r" (new)
9539 + : "0" (c));
9540 +
9541 + val = atomic_cmpxchg(v, c, new);
9542 + if (val == c)
9543 + return 1;
9544 + c = val;
9545 + } while (c);
9546 +
9547 + return 0;
9548 +}
9549
9550 /*
9551 * atomic_dec_if_positive - decrement by 1 if old value positive
9552 @@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
9553 #endif
9554
9555 /* These are x86-specific, used by some header files */
9556 -#define atomic_clear_mask(mask, addr) \
9557 - asm volatile(LOCK_PREFIX "andl %0,%1" \
9558 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
9559 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
9560 +{
9561 + asm volatile(LOCK_PREFIX "andl %1,%0"
9562 + : "+m" (v->counter)
9563 + : "r" (~(mask))
9564 + : "memory");
9565 +}
9566
9567 -#define atomic_set_mask(mask, addr) \
9568 - asm volatile(LOCK_PREFIX "orl %0,%1" \
9569 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
9570 - : "memory")
9571 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9572 +{
9573 + asm volatile(LOCK_PREFIX "andl %1,%0"
9574 + : "+m" (v->counter)
9575 + : "r" (~(mask))
9576 + : "memory");
9577 +}
9578 +
9579 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
9580 +{
9581 + asm volatile(LOCK_PREFIX "orl %1,%0"
9582 + : "+m" (v->counter)
9583 + : "r" (mask)
9584 + : "memory");
9585 +}
9586 +
9587 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9588 +{
9589 + asm volatile(LOCK_PREFIX "orl %1,%0"
9590 + : "+m" (v->counter)
9591 + : "r" (mask)
9592 + : "memory");
9593 +}
9594
9595 /* Atomic operations are already serializing on x86 */
9596 #define smp_mb__before_atomic_dec() barrier()
9597 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9598 index 1981199..36b9dfb 100644
9599 --- a/arch/x86/include/asm/atomic64_32.h
9600 +++ b/arch/x86/include/asm/atomic64_32.h
9601 @@ -12,6 +12,14 @@ typedef struct {
9602 u64 __aligned(8) counter;
9603 } atomic64_t;
9604
9605 +#ifdef CONFIG_PAX_REFCOUNT
9606 +typedef struct {
9607 + u64 __aligned(8) counter;
9608 +} atomic64_unchecked_t;
9609 +#else
9610 +typedef atomic64_t atomic64_unchecked_t;
9611 +#endif
9612 +
9613 #define ATOMIC64_INIT(val) { (val) }
9614
9615 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9616 @@ -37,21 +45,31 @@ typedef struct {
9617 ATOMIC64_DECL_ONE(sym##_386)
9618
9619 ATOMIC64_DECL_ONE(add_386);
9620 +ATOMIC64_DECL_ONE(add_unchecked_386);
9621 ATOMIC64_DECL_ONE(sub_386);
9622 +ATOMIC64_DECL_ONE(sub_unchecked_386);
9623 ATOMIC64_DECL_ONE(inc_386);
9624 +ATOMIC64_DECL_ONE(inc_unchecked_386);
9625 ATOMIC64_DECL_ONE(dec_386);
9626 +ATOMIC64_DECL_ONE(dec_unchecked_386);
9627 #endif
9628
9629 #define alternative_atomic64(f, out, in...) \
9630 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9631
9632 ATOMIC64_DECL(read);
9633 +ATOMIC64_DECL(read_unchecked);
9634 ATOMIC64_DECL(set);
9635 +ATOMIC64_DECL(set_unchecked);
9636 ATOMIC64_DECL(xchg);
9637 ATOMIC64_DECL(add_return);
9638 +ATOMIC64_DECL(add_return_unchecked);
9639 ATOMIC64_DECL(sub_return);
9640 +ATOMIC64_DECL(sub_return_unchecked);
9641 ATOMIC64_DECL(inc_return);
9642 +ATOMIC64_DECL(inc_return_unchecked);
9643 ATOMIC64_DECL(dec_return);
9644 +ATOMIC64_DECL(dec_return_unchecked);
9645 ATOMIC64_DECL(dec_if_positive);
9646 ATOMIC64_DECL(inc_not_zero);
9647 ATOMIC64_DECL(add_unless);
9648 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9649 }
9650
9651 /**
9652 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9653 + * @p: pointer to type atomic64_unchecked_t
9654 + * @o: expected value
9655 + * @n: new value
9656 + *
9657 + * Atomically sets @v to @n if it was equal to @o and returns
9658 + * the old value.
9659 + */
9660 +
9661 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9662 +{
9663 + return cmpxchg64(&v->counter, o, n);
9664 +}
9665 +
9666 +/**
9667 * atomic64_xchg - xchg atomic64 variable
9668 * @v: pointer to type atomic64_t
9669 * @n: value to assign
9670 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9671 }
9672
9673 /**
9674 + * atomic64_set_unchecked - set atomic64 variable
9675 + * @v: pointer to type atomic64_unchecked_t
9676 + * @n: value to assign
9677 + *
9678 + * Atomically sets the value of @v to @n.
9679 + */
9680 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9681 +{
9682 + unsigned high = (unsigned)(i >> 32);
9683 + unsigned low = (unsigned)i;
9684 + alternative_atomic64(set, /* no output */,
9685 + "S" (v), "b" (low), "c" (high)
9686 + : "eax", "edx", "memory");
9687 +}
9688 +
9689 +/**
9690 * atomic64_read - read atomic64 variable
9691 * @v: pointer to type atomic64_t
9692 *
9693 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9694 }
9695
9696 /**
9697 + * atomic64_read_unchecked - read atomic64 variable
9698 + * @v: pointer to type atomic64_unchecked_t
9699 + *
9700 + * Atomically reads the value of @v and returns it.
9701 + */
9702 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9703 +{
9704 + long long r;
9705 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9706 + return r;
9707 + }
9708 +
9709 +/**
9710 * atomic64_add_return - add and return
9711 * @i: integer value to add
9712 * @v: pointer to type atomic64_t
9713 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9714 return i;
9715 }
9716
9717 +/**
9718 + * atomic64_add_return_unchecked - add and return
9719 + * @i: integer value to add
9720 + * @v: pointer to type atomic64_unchecked_t
9721 + *
9722 + * Atomically adds @i to @v and returns @i + *@v
9723 + */
9724 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9725 +{
9726 + alternative_atomic64(add_return_unchecked,
9727 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9728 + ASM_NO_INPUT_CLOBBER("memory"));
9729 + return i;
9730 +}
9731 +
9732 /*
9733 * Other variants with different arithmetic operators:
9734 */
9735 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9736 return a;
9737 }
9738
9739 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9740 +{
9741 + long long a;
9742 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
9743 + "S" (v) : "memory", "ecx");
9744 + return a;
9745 +}
9746 +
9747 static inline long long atomic64_dec_return(atomic64_t *v)
9748 {
9749 long long a;
9750 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9751 }
9752
9753 /**
9754 + * atomic64_add_unchecked - add integer to atomic64 variable
9755 + * @i: integer value to add
9756 + * @v: pointer to type atomic64_unchecked_t
9757 + *
9758 + * Atomically adds @i to @v.
9759 + */
9760 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9761 +{
9762 + __alternative_atomic64(add_unchecked, add_return_unchecked,
9763 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9764 + ASM_NO_INPUT_CLOBBER("memory"));
9765 + return i;
9766 +}
9767 +
9768 +/**
9769 * atomic64_sub - subtract the atomic64 variable
9770 * @i: integer value to subtract
9771 * @v: pointer to type atomic64_t
9772 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9773 index 0e1cbfc..5623683 100644
9774 --- a/arch/x86/include/asm/atomic64_64.h
9775 +++ b/arch/x86/include/asm/atomic64_64.h
9776 @@ -18,7 +18,19 @@
9777 */
9778 static inline long atomic64_read(const atomic64_t *v)
9779 {
9780 - return (*(volatile long *)&(v)->counter);
9781 + return (*(volatile const long *)&(v)->counter);
9782 +}
9783 +
9784 +/**
9785 + * atomic64_read_unchecked - read atomic64 variable
9786 + * @v: pointer of type atomic64_unchecked_t
9787 + *
9788 + * Atomically reads the value of @v.
9789 + * Doesn't imply a read memory barrier.
9790 + */
9791 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9792 +{
9793 + return (*(volatile const long *)&(v)->counter);
9794 }
9795
9796 /**
9797 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9798 }
9799
9800 /**
9801 + * atomic64_set_unchecked - set atomic64 variable
9802 + * @v: pointer to type atomic64_unchecked_t
9803 + * @i: required value
9804 + *
9805 + * Atomically sets the value of @v to @i.
9806 + */
9807 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9808 +{
9809 + v->counter = i;
9810 +}
9811 +
9812 +/**
9813 * atomic64_add - add integer to atomic64 variable
9814 * @i: integer value to add
9815 * @v: pointer to type atomic64_t
9816 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9817 */
9818 static inline void atomic64_add(long i, atomic64_t *v)
9819 {
9820 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9821 +
9822 +#ifdef CONFIG_PAX_REFCOUNT
9823 + "jno 0f\n"
9824 + LOCK_PREFIX "subq %1,%0\n"
9825 + "int $4\n0:\n"
9826 + _ASM_EXTABLE(0b, 0b)
9827 +#endif
9828 +
9829 + : "=m" (v->counter)
9830 + : "er" (i), "m" (v->counter));
9831 +}
9832 +
9833 +/**
9834 + * atomic64_add_unchecked - add integer to atomic64 variable
9835 + * @i: integer value to add
9836 + * @v: pointer to type atomic64_unchecked_t
9837 + *
9838 + * Atomically adds @i to @v.
9839 + */
9840 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9841 +{
9842 asm volatile(LOCK_PREFIX "addq %1,%0"
9843 : "=m" (v->counter)
9844 : "er" (i), "m" (v->counter));
9845 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9846 */
9847 static inline void atomic64_sub(long i, atomic64_t *v)
9848 {
9849 - asm volatile(LOCK_PREFIX "subq %1,%0"
9850 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9851 +
9852 +#ifdef CONFIG_PAX_REFCOUNT
9853 + "jno 0f\n"
9854 + LOCK_PREFIX "addq %1,%0\n"
9855 + "int $4\n0:\n"
9856 + _ASM_EXTABLE(0b, 0b)
9857 +#endif
9858 +
9859 + : "=m" (v->counter)
9860 + : "er" (i), "m" (v->counter));
9861 +}
9862 +
9863 +/**
9864 + * atomic64_sub_unchecked - subtract the atomic64 variable
9865 + * @i: integer value to subtract
9866 + * @v: pointer to type atomic64_unchecked_t
9867 + *
9868 + * Atomically subtracts @i from @v.
9869 + */
9870 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9871 +{
9872 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9873 : "=m" (v->counter)
9874 : "er" (i), "m" (v->counter));
9875 }
9876 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9877 {
9878 unsigned char c;
9879
9880 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9881 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9882 +
9883 +#ifdef CONFIG_PAX_REFCOUNT
9884 + "jno 0f\n"
9885 + LOCK_PREFIX "addq %2,%0\n"
9886 + "int $4\n0:\n"
9887 + _ASM_EXTABLE(0b, 0b)
9888 +#endif
9889 +
9890 + "sete %1\n"
9891 : "=m" (v->counter), "=qm" (c)
9892 : "er" (i), "m" (v->counter) : "memory");
9893 return c;
9894 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9895 */
9896 static inline void atomic64_inc(atomic64_t *v)
9897 {
9898 + asm volatile(LOCK_PREFIX "incq %0\n"
9899 +
9900 +#ifdef CONFIG_PAX_REFCOUNT
9901 + "jno 0f\n"
9902 + LOCK_PREFIX "decq %0\n"
9903 + "int $4\n0:\n"
9904 + _ASM_EXTABLE(0b, 0b)
9905 +#endif
9906 +
9907 + : "=m" (v->counter)
9908 + : "m" (v->counter));
9909 +}
9910 +
9911 +/**
9912 + * atomic64_inc_unchecked - increment atomic64 variable
9913 + * @v: pointer to type atomic64_unchecked_t
9914 + *
9915 + * Atomically increments @v by 1.
9916 + */
9917 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9918 +{
9919 asm volatile(LOCK_PREFIX "incq %0"
9920 : "=m" (v->counter)
9921 : "m" (v->counter));
9922 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9923 */
9924 static inline void atomic64_dec(atomic64_t *v)
9925 {
9926 - asm volatile(LOCK_PREFIX "decq %0"
9927 + asm volatile(LOCK_PREFIX "decq %0\n"
9928 +
9929 +#ifdef CONFIG_PAX_REFCOUNT
9930 + "jno 0f\n"
9931 + LOCK_PREFIX "incq %0\n"
9932 + "int $4\n0:\n"
9933 + _ASM_EXTABLE(0b, 0b)
9934 +#endif
9935 +
9936 + : "=m" (v->counter)
9937 + : "m" (v->counter));
9938 +}
9939 +
9940 +/**
9941 + * atomic64_dec_unchecked - decrement atomic64 variable
9942 + * @v: pointer to type atomic64_t
9943 + *
9944 + * Atomically decrements @v by 1.
9945 + */
9946 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9947 +{
9948 + asm volatile(LOCK_PREFIX "decq %0\n"
9949 : "=m" (v->counter)
9950 : "m" (v->counter));
9951 }
9952 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9953 {
9954 unsigned char c;
9955
9956 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9957 + asm volatile(LOCK_PREFIX "decq %0\n"
9958 +
9959 +#ifdef CONFIG_PAX_REFCOUNT
9960 + "jno 0f\n"
9961 + LOCK_PREFIX "incq %0\n"
9962 + "int $4\n0:\n"
9963 + _ASM_EXTABLE(0b, 0b)
9964 +#endif
9965 +
9966 + "sete %1\n"
9967 : "=m" (v->counter), "=qm" (c)
9968 : "m" (v->counter) : "memory");
9969 return c != 0;
9970 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9971 {
9972 unsigned char c;
9973
9974 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9975 + asm volatile(LOCK_PREFIX "incq %0\n"
9976 +
9977 +#ifdef CONFIG_PAX_REFCOUNT
9978 + "jno 0f\n"
9979 + LOCK_PREFIX "decq %0\n"
9980 + "int $4\n0:\n"
9981 + _ASM_EXTABLE(0b, 0b)
9982 +#endif
9983 +
9984 + "sete %1\n"
9985 : "=m" (v->counter), "=qm" (c)
9986 : "m" (v->counter) : "memory");
9987 return c != 0;
9988 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9989 {
9990 unsigned char c;
9991
9992 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9993 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9994 +
9995 +#ifdef CONFIG_PAX_REFCOUNT
9996 + "jno 0f\n"
9997 + LOCK_PREFIX "subq %2,%0\n"
9998 + "int $4\n0:\n"
9999 + _ASM_EXTABLE(0b, 0b)
10000 +#endif
10001 +
10002 + "sets %1\n"
10003 : "=m" (v->counter), "=qm" (c)
10004 : "er" (i), "m" (v->counter) : "memory");
10005 return c;
10006 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10007 */
10008 static inline long atomic64_add_return(long i, atomic64_t *v)
10009 {
10010 + return i + xadd_check_overflow(&v->counter, i);
10011 +}
10012 +
10013 +/**
10014 + * atomic64_add_return_unchecked - add and return
10015 + * @i: integer value to add
10016 + * @v: pointer to type atomic64_unchecked_t
10017 + *
10018 + * Atomically adds @i to @v and returns @i + @v
10019 + */
10020 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10021 +{
10022 return i + xadd(&v->counter, i);
10023 }
10024
10025 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10026 }
10027
10028 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10029 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10030 +{
10031 + return atomic64_add_return_unchecked(1, v);
10032 +}
10033 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10034
10035 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10036 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10037 return cmpxchg(&v->counter, old, new);
10038 }
10039
10040 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10041 +{
10042 + return cmpxchg(&v->counter, old, new);
10043 +}
10044 +
10045 static inline long atomic64_xchg(atomic64_t *v, long new)
10046 {
10047 return xchg(&v->counter, new);
10048 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10049 */
10050 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10051 {
10052 - long c, old;
10053 + long c, old, new;
10054 c = atomic64_read(v);
10055 for (;;) {
10056 - if (unlikely(c == (u)))
10057 + if (unlikely(c == u))
10058 break;
10059 - old = atomic64_cmpxchg((v), c, c + (a));
10060 +
10061 + asm volatile("add %2,%0\n"
10062 +
10063 +#ifdef CONFIG_PAX_REFCOUNT
10064 + "jno 0f\n"
10065 + "sub %2,%0\n"
10066 + "int $4\n0:\n"
10067 + _ASM_EXTABLE(0b, 0b)
10068 +#endif
10069 +
10070 + : "=r" (new)
10071 + : "0" (c), "ir" (a));
10072 +
10073 + old = atomic64_cmpxchg(v, c, new);
10074 if (likely(old == c))
10075 break;
10076 c = old;
10077 }
10078 - return c != (u);
10079 + return c != u;
10080 }
10081
10082 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10083 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10084 index b97596e..9bd48b06 100644
10085 --- a/arch/x86/include/asm/bitops.h
10086 +++ b/arch/x86/include/asm/bitops.h
10087 @@ -38,7 +38,7 @@
10088 * a mask operation on a byte.
10089 */
10090 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10091 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10092 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10093 #define CONST_MASK(nr) (1 << ((nr) & 7))
10094
10095 /**
10096 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10097 index 5e1a2ee..c9f9533 100644
10098 --- a/arch/x86/include/asm/boot.h
10099 +++ b/arch/x86/include/asm/boot.h
10100 @@ -11,10 +11,15 @@
10101 #include <asm/pgtable_types.h>
10102
10103 /* Physical address where kernel should be loaded. */
10104 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10105 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10106 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10107 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10108
10109 +#ifndef __ASSEMBLY__
10110 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
10111 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10112 +#endif
10113 +
10114 /* Minimum kernel alignment, as a power of two */
10115 #ifdef CONFIG_X86_64
10116 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10117 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10118 index 48f99f1..d78ebf9 100644
10119 --- a/arch/x86/include/asm/cache.h
10120 +++ b/arch/x86/include/asm/cache.h
10121 @@ -5,12 +5,13 @@
10122
10123 /* L1 cache line size */
10124 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10125 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10126 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10127
10128 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10129 +#define __read_only __attribute__((__section__(".data..read_only")))
10130
10131 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10132 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10133 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10134
10135 #ifdef CONFIG_X86_VSMP
10136 #ifdef CONFIG_SMP
10137 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10138 index 9863ee3..4a1f8e1 100644
10139 --- a/arch/x86/include/asm/cacheflush.h
10140 +++ b/arch/x86/include/asm/cacheflush.h
10141 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10142 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10143
10144 if (pg_flags == _PGMT_DEFAULT)
10145 - return -1;
10146 + return ~0UL;
10147 else if (pg_flags == _PGMT_WC)
10148 return _PAGE_CACHE_WC;
10149 else if (pg_flags == _PGMT_UC_MINUS)
10150 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10151 index 46fc474..b02b0f9 100644
10152 --- a/arch/x86/include/asm/checksum_32.h
10153 +++ b/arch/x86/include/asm/checksum_32.h
10154 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10155 int len, __wsum sum,
10156 int *src_err_ptr, int *dst_err_ptr);
10157
10158 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10159 + int len, __wsum sum,
10160 + int *src_err_ptr, int *dst_err_ptr);
10161 +
10162 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10163 + int len, __wsum sum,
10164 + int *src_err_ptr, int *dst_err_ptr);
10165 +
10166 /*
10167 * Note: when you get a NULL pointer exception here this means someone
10168 * passed in an incorrect kernel address to one of these functions.
10169 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10170 int *err_ptr)
10171 {
10172 might_sleep();
10173 - return csum_partial_copy_generic((__force void *)src, dst,
10174 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10175 len, sum, err_ptr, NULL);
10176 }
10177
10178 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10179 {
10180 might_sleep();
10181 if (access_ok(VERIFY_WRITE, dst, len))
10182 - return csum_partial_copy_generic(src, (__force void *)dst,
10183 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10184 len, sum, NULL, err_ptr);
10185
10186 if (len)
10187 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10188 index 99480e5..d81165b 100644
10189 --- a/arch/x86/include/asm/cmpxchg.h
10190 +++ b/arch/x86/include/asm/cmpxchg.h
10191 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10192 __compiletime_error("Bad argument size for cmpxchg");
10193 extern void __xadd_wrong_size(void)
10194 __compiletime_error("Bad argument size for xadd");
10195 +extern void __xadd_check_overflow_wrong_size(void)
10196 + __compiletime_error("Bad argument size for xadd_check_overflow");
10197 extern void __add_wrong_size(void)
10198 __compiletime_error("Bad argument size for add");
10199 +extern void __add_check_overflow_wrong_size(void)
10200 + __compiletime_error("Bad argument size for add_check_overflow");
10201
10202 /*
10203 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10204 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10205 __ret; \
10206 })
10207
10208 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10209 + ({ \
10210 + __typeof__ (*(ptr)) __ret = (arg); \
10211 + switch (sizeof(*(ptr))) { \
10212 + case __X86_CASE_L: \
10213 + asm volatile (lock #op "l %0, %1\n" \
10214 + "jno 0f\n" \
10215 + "mov %0,%1\n" \
10216 + "int $4\n0:\n" \
10217 + _ASM_EXTABLE(0b, 0b) \
10218 + : "+r" (__ret), "+m" (*(ptr)) \
10219 + : : "memory", "cc"); \
10220 + break; \
10221 + case __X86_CASE_Q: \
10222 + asm volatile (lock #op "q %q0, %1\n" \
10223 + "jno 0f\n" \
10224 + "mov %0,%1\n" \
10225 + "int $4\n0:\n" \
10226 + _ASM_EXTABLE(0b, 0b) \
10227 + : "+r" (__ret), "+m" (*(ptr)) \
10228 + : : "memory", "cc"); \
10229 + break; \
10230 + default: \
10231 + __ ## op ## _check_overflow_wrong_size(); \
10232 + } \
10233 + __ret; \
10234 + })
10235 +
10236 /*
10237 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10238 * Since this is generally used to protect other memory information, we
10239 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10240 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10241 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10242
10243 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10244 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10245 +
10246 #define __add(ptr, inc, lock) \
10247 ({ \
10248 __typeof__ (*(ptr)) __ret = (inc); \
10249 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10250 index f91e80f..7f9bd27 100644
10251 --- a/arch/x86/include/asm/cpufeature.h
10252 +++ b/arch/x86/include/asm/cpufeature.h
10253 @@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10254 ".section .discard,\"aw\",@progbits\n"
10255 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10256 ".previous\n"
10257 - ".section .altinstr_replacement,\"ax\"\n"
10258 + ".section .altinstr_replacement,\"a\"\n"
10259 "3: movb $1,%0\n"
10260 "4:\n"
10261 ".previous\n"
10262 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10263 index e95822d..a90010e 100644
10264 --- a/arch/x86/include/asm/desc.h
10265 +++ b/arch/x86/include/asm/desc.h
10266 @@ -4,6 +4,7 @@
10267 #include <asm/desc_defs.h>
10268 #include <asm/ldt.h>
10269 #include <asm/mmu.h>
10270 +#include <asm/pgtable.h>
10271
10272 #include <linux/smp.h>
10273
10274 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10275
10276 desc->type = (info->read_exec_only ^ 1) << 1;
10277 desc->type |= info->contents << 2;
10278 + desc->type |= info->seg_not_present ^ 1;
10279
10280 desc->s = 1;
10281 desc->dpl = 0x3;
10282 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10283 }
10284
10285 extern struct desc_ptr idt_descr;
10286 -extern gate_desc idt_table[];
10287 extern struct desc_ptr nmi_idt_descr;
10288 -extern gate_desc nmi_idt_table[];
10289 -
10290 -struct gdt_page {
10291 - struct desc_struct gdt[GDT_ENTRIES];
10292 -} __attribute__((aligned(PAGE_SIZE)));
10293 -
10294 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10295 +extern gate_desc idt_table[256];
10296 +extern gate_desc nmi_idt_table[256];
10297
10298 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10299 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10300 {
10301 - return per_cpu(gdt_page, cpu).gdt;
10302 + return cpu_gdt_table[cpu];
10303 }
10304
10305 #ifdef CONFIG_X86_64
10306 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10307 unsigned long base, unsigned dpl, unsigned flags,
10308 unsigned short seg)
10309 {
10310 - gate->a = (seg << 16) | (base & 0xffff);
10311 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10312 + gate->gate.offset_low = base;
10313 + gate->gate.seg = seg;
10314 + gate->gate.reserved = 0;
10315 + gate->gate.type = type;
10316 + gate->gate.s = 0;
10317 + gate->gate.dpl = dpl;
10318 + gate->gate.p = 1;
10319 + gate->gate.offset_high = base >> 16;
10320 }
10321
10322 #endif
10323 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10324
10325 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10326 {
10327 + pax_open_kernel();
10328 memcpy(&idt[entry], gate, sizeof(*gate));
10329 + pax_close_kernel();
10330 }
10331
10332 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10333 {
10334 + pax_open_kernel();
10335 memcpy(&ldt[entry], desc, 8);
10336 + pax_close_kernel();
10337 }
10338
10339 static inline void
10340 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10341 default: size = sizeof(*gdt); break;
10342 }
10343
10344 + pax_open_kernel();
10345 memcpy(&gdt[entry], desc, size);
10346 + pax_close_kernel();
10347 }
10348
10349 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10350 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10351
10352 static inline void native_load_tr_desc(void)
10353 {
10354 + pax_open_kernel();
10355 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10356 + pax_close_kernel();
10357 }
10358
10359 static inline void native_load_gdt(const struct desc_ptr *dtr)
10360 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10361 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10362 unsigned int i;
10363
10364 + pax_open_kernel();
10365 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10366 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10367 + pax_close_kernel();
10368 }
10369
10370 #define _LDT_empty(info) \
10371 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10372 }
10373
10374 #ifdef CONFIG_X86_64
10375 -static inline void set_nmi_gate(int gate, void *addr)
10376 +static inline void set_nmi_gate(int gate, const void *addr)
10377 {
10378 gate_desc s;
10379
10380 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10381 }
10382 #endif
10383
10384 -static inline void _set_gate(int gate, unsigned type, void *addr,
10385 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10386 unsigned dpl, unsigned ist, unsigned seg)
10387 {
10388 gate_desc s;
10389 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10390 * Pentium F0 0F bugfix can have resulted in the mapped
10391 * IDT being write-protected.
10392 */
10393 -static inline void set_intr_gate(unsigned int n, void *addr)
10394 +static inline void set_intr_gate(unsigned int n, const void *addr)
10395 {
10396 BUG_ON((unsigned)n > 0xFF);
10397 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10398 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10399 /*
10400 * This routine sets up an interrupt gate at directory privilege level 3.
10401 */
10402 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10403 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10404 {
10405 BUG_ON((unsigned)n > 0xFF);
10406 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10407 }
10408
10409 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10410 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10411 {
10412 BUG_ON((unsigned)n > 0xFF);
10413 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10414 }
10415
10416 -static inline void set_trap_gate(unsigned int n, void *addr)
10417 +static inline void set_trap_gate(unsigned int n, const void *addr)
10418 {
10419 BUG_ON((unsigned)n > 0xFF);
10420 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10421 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10422 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10423 {
10424 BUG_ON((unsigned)n > 0xFF);
10425 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10426 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10427 }
10428
10429 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10430 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10431 {
10432 BUG_ON((unsigned)n > 0xFF);
10433 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10434 }
10435
10436 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10437 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10438 {
10439 BUG_ON((unsigned)n > 0xFF);
10440 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10441 }
10442
10443 +#ifdef CONFIG_X86_32
10444 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10445 +{
10446 + struct desc_struct d;
10447 +
10448 + if (likely(limit))
10449 + limit = (limit - 1UL) >> PAGE_SHIFT;
10450 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10451 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10452 +}
10453 +#endif
10454 +
10455 #endif /* _ASM_X86_DESC_H */
10456 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10457 index 278441f..b95a174 100644
10458 --- a/arch/x86/include/asm/desc_defs.h
10459 +++ b/arch/x86/include/asm/desc_defs.h
10460 @@ -31,6 +31,12 @@ struct desc_struct {
10461 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10462 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10463 };
10464 + struct {
10465 + u16 offset_low;
10466 + u16 seg;
10467 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10468 + unsigned offset_high: 16;
10469 + } gate;
10470 };
10471 } __attribute__((packed));
10472
10473 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10474 index 3778256..c5d4fce 100644
10475 --- a/arch/x86/include/asm/e820.h
10476 +++ b/arch/x86/include/asm/e820.h
10477 @@ -69,7 +69,7 @@ struct e820map {
10478 #define ISA_START_ADDRESS 0xa0000
10479 #define ISA_END_ADDRESS 0x100000
10480
10481 -#define BIOS_BEGIN 0x000a0000
10482 +#define BIOS_BEGIN 0x000c0000
10483 #define BIOS_END 0x00100000
10484
10485 #define BIOS_ROM_BASE 0xffe00000
10486 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10487 index 5939f44..f8845f6 100644
10488 --- a/arch/x86/include/asm/elf.h
10489 +++ b/arch/x86/include/asm/elf.h
10490 @@ -243,7 +243,25 @@ extern int force_personality32;
10491 the loader. We need to make sure that it is out of the way of the program
10492 that it will "exec", and that there is sufficient room for the brk. */
10493
10494 +#ifdef CONFIG_PAX_SEGMEXEC
10495 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10496 +#else
10497 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10498 +#endif
10499 +
10500 +#ifdef CONFIG_PAX_ASLR
10501 +#ifdef CONFIG_X86_32
10502 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10503 +
10504 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10505 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10506 +#else
10507 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10508 +
10509 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10510 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10511 +#endif
10512 +#endif
10513
10514 /* This yields a mask that user programs can use to figure out what
10515 instruction set this CPU supports. This could be done in user space,
10516 @@ -296,16 +314,12 @@ do { \
10517
10518 #define ARCH_DLINFO \
10519 do { \
10520 - if (vdso_enabled) \
10521 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10522 - (unsigned long)current->mm->context.vdso); \
10523 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10524 } while (0)
10525
10526 #define ARCH_DLINFO_X32 \
10527 do { \
10528 - if (vdso_enabled) \
10529 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10530 - (unsigned long)current->mm->context.vdso); \
10531 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10532 } while (0)
10533
10534 #define AT_SYSINFO 32
10535 @@ -320,7 +334,7 @@ else \
10536
10537 #endif /* !CONFIG_X86_32 */
10538
10539 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10540 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10541
10542 #define VDSO_ENTRY \
10543 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10544 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10545 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10546 #define compat_arch_setup_additional_pages syscall32_setup_pages
10547
10548 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10549 -#define arch_randomize_brk arch_randomize_brk
10550 -
10551 /*
10552 * True on X86_32 or when emulating IA32 on X86_64
10553 */
10554 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10555 index cc70c1c..d96d011 100644
10556 --- a/arch/x86/include/asm/emergency-restart.h
10557 +++ b/arch/x86/include/asm/emergency-restart.h
10558 @@ -15,6 +15,6 @@ enum reboot_type {
10559
10560 extern enum reboot_type reboot_type;
10561
10562 -extern void machine_emergency_restart(void);
10563 +extern void machine_emergency_restart(void) __noreturn;
10564
10565 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10566 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10567 index 4fa8815..71b121a 100644
10568 --- a/arch/x86/include/asm/fpu-internal.h
10569 +++ b/arch/x86/include/asm/fpu-internal.h
10570 @@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10571 {
10572 int err;
10573
10574 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10575 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10576 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10577 +#endif
10578 +
10579 /* See comment in fxsave() below. */
10580 #ifdef CONFIG_AS_FXSAVEQ
10581 asm volatile("1: fxrstorq %[fx]\n\t"
10582 @@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10583 {
10584 int err;
10585
10586 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10587 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10588 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10589 +#endif
10590 +
10591 /*
10592 * Clear the bytes not touched by the fxsave and reserved
10593 * for the SW usage.
10594 @@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10595 "emms\n\t" /* clear stack tags */
10596 "fildl %P[addr]", /* set F?P to defined value */
10597 X86_FEATURE_FXSAVE_LEAK,
10598 - [addr] "m" (tsk->thread.fpu.has_fpu));
10599 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10600
10601 return fpu_restore_checking(&tsk->thread.fpu);
10602 }
10603 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10604 index 71ecbcb..bac10b7 100644
10605 --- a/arch/x86/include/asm/futex.h
10606 +++ b/arch/x86/include/asm/futex.h
10607 @@ -11,16 +11,18 @@
10608 #include <asm/processor.h>
10609
10610 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10611 + typecheck(u32 __user *, uaddr); \
10612 asm volatile("1:\t" insn "\n" \
10613 "2:\t.section .fixup,\"ax\"\n" \
10614 "3:\tmov\t%3, %1\n" \
10615 "\tjmp\t2b\n" \
10616 "\t.previous\n" \
10617 _ASM_EXTABLE(1b, 3b) \
10618 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10619 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10620 : "i" (-EFAULT), "0" (oparg), "1" (0))
10621
10622 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10623 + typecheck(u32 __user *, uaddr); \
10624 asm volatile("1:\tmovl %2, %0\n" \
10625 "\tmovl\t%0, %3\n" \
10626 "\t" insn "\n" \
10627 @@ -33,7 +35,7 @@
10628 _ASM_EXTABLE(1b, 4b) \
10629 _ASM_EXTABLE(2b, 4b) \
10630 : "=&a" (oldval), "=&r" (ret), \
10631 - "+m" (*uaddr), "=&r" (tem) \
10632 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10633 : "r" (oparg), "i" (-EFAULT), "1" (0))
10634
10635 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10636 @@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10637
10638 switch (op) {
10639 case FUTEX_OP_SET:
10640 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10641 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10642 break;
10643 case FUTEX_OP_ADD:
10644 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10645 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10646 uaddr, oparg);
10647 break;
10648 case FUTEX_OP_OR:
10649 @@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10650 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10651 return -EFAULT;
10652
10653 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10654 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10655 "2:\t.section .fixup, \"ax\"\n"
10656 "3:\tmov %3, %0\n"
10657 "\tjmp 2b\n"
10658 "\t.previous\n"
10659 _ASM_EXTABLE(1b, 3b)
10660 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10661 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10662 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10663 : "memory"
10664 );
10665 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10666 index eb92a6e..b98b2f4 100644
10667 --- a/arch/x86/include/asm/hw_irq.h
10668 +++ b/arch/x86/include/asm/hw_irq.h
10669 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10670 extern void enable_IO_APIC(void);
10671
10672 /* Statistics */
10673 -extern atomic_t irq_err_count;
10674 -extern atomic_t irq_mis_count;
10675 +extern atomic_unchecked_t irq_err_count;
10676 +extern atomic_unchecked_t irq_mis_count;
10677
10678 /* EISA */
10679 extern void eisa_set_level_irq(unsigned int irq);
10680 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10681 index d8e8eef..99f81ae 100644
10682 --- a/arch/x86/include/asm/io.h
10683 +++ b/arch/x86/include/asm/io.h
10684 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10685
10686 #include <linux/vmalloc.h>
10687
10688 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10689 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10690 +{
10691 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10692 +}
10693 +
10694 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10695 +{
10696 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10697 +}
10698 +
10699 /*
10700 * Convert a virtual cached pointer to an uncached pointer
10701 */
10702 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10703 index bba3cf8..06bc8da 100644
10704 --- a/arch/x86/include/asm/irqflags.h
10705 +++ b/arch/x86/include/asm/irqflags.h
10706 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10707 sti; \
10708 sysexit
10709
10710 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10711 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10712 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10713 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10714 +
10715 #else
10716 #define INTERRUPT_RETURN iret
10717 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10718 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10719 index 5478825..839e88c 100644
10720 --- a/arch/x86/include/asm/kprobes.h
10721 +++ b/arch/x86/include/asm/kprobes.h
10722 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10723 #define RELATIVEJUMP_SIZE 5
10724 #define RELATIVECALL_OPCODE 0xe8
10725 #define RELATIVE_ADDR_SIZE 4
10726 -#define MAX_STACK_SIZE 64
10727 -#define MIN_STACK_SIZE(ADDR) \
10728 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10729 - THREAD_SIZE - (unsigned long)(ADDR))) \
10730 - ? (MAX_STACK_SIZE) \
10731 - : (((unsigned long)current_thread_info()) + \
10732 - THREAD_SIZE - (unsigned long)(ADDR)))
10733 +#define MAX_STACK_SIZE 64UL
10734 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10735
10736 #define flush_insn_slot(p) do { } while (0)
10737
10738 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10739 index e216ba0..453f6ec 100644
10740 --- a/arch/x86/include/asm/kvm_host.h
10741 +++ b/arch/x86/include/asm/kvm_host.h
10742 @@ -679,7 +679,7 @@ struct kvm_x86_ops {
10743 int (*check_intercept)(struct kvm_vcpu *vcpu,
10744 struct x86_instruction_info *info,
10745 enum x86_intercept_stage stage);
10746 -};
10747 +} __do_const;
10748
10749 struct kvm_arch_async_pf {
10750 u32 token;
10751 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10752 index c8bed0d..e5721fa 100644
10753 --- a/arch/x86/include/asm/local.h
10754 +++ b/arch/x86/include/asm/local.h
10755 @@ -17,26 +17,58 @@ typedef struct {
10756
10757 static inline void local_inc(local_t *l)
10758 {
10759 - asm volatile(_ASM_INC "%0"
10760 + asm volatile(_ASM_INC "%0\n"
10761 +
10762 +#ifdef CONFIG_PAX_REFCOUNT
10763 + "jno 0f\n"
10764 + _ASM_DEC "%0\n"
10765 + "int $4\n0:\n"
10766 + _ASM_EXTABLE(0b, 0b)
10767 +#endif
10768 +
10769 : "+m" (l->a.counter));
10770 }
10771
10772 static inline void local_dec(local_t *l)
10773 {
10774 - asm volatile(_ASM_DEC "%0"
10775 + asm volatile(_ASM_DEC "%0\n"
10776 +
10777 +#ifdef CONFIG_PAX_REFCOUNT
10778 + "jno 0f\n"
10779 + _ASM_INC "%0\n"
10780 + "int $4\n0:\n"
10781 + _ASM_EXTABLE(0b, 0b)
10782 +#endif
10783 +
10784 : "+m" (l->a.counter));
10785 }
10786
10787 static inline void local_add(long i, local_t *l)
10788 {
10789 - asm volatile(_ASM_ADD "%1,%0"
10790 + asm volatile(_ASM_ADD "%1,%0\n"
10791 +
10792 +#ifdef CONFIG_PAX_REFCOUNT
10793 + "jno 0f\n"
10794 + _ASM_SUB "%1,%0\n"
10795 + "int $4\n0:\n"
10796 + _ASM_EXTABLE(0b, 0b)
10797 +#endif
10798 +
10799 : "+m" (l->a.counter)
10800 : "ir" (i));
10801 }
10802
10803 static inline void local_sub(long i, local_t *l)
10804 {
10805 - asm volatile(_ASM_SUB "%1,%0"
10806 + asm volatile(_ASM_SUB "%1,%0\n"
10807 +
10808 +#ifdef CONFIG_PAX_REFCOUNT
10809 + "jno 0f\n"
10810 + _ASM_ADD "%1,%0\n"
10811 + "int $4\n0:\n"
10812 + _ASM_EXTABLE(0b, 0b)
10813 +#endif
10814 +
10815 : "+m" (l->a.counter)
10816 : "ir" (i));
10817 }
10818 @@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10819 {
10820 unsigned char c;
10821
10822 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10823 + asm volatile(_ASM_SUB "%2,%0\n"
10824 +
10825 +#ifdef CONFIG_PAX_REFCOUNT
10826 + "jno 0f\n"
10827 + _ASM_ADD "%2,%0\n"
10828 + "int $4\n0:\n"
10829 + _ASM_EXTABLE(0b, 0b)
10830 +#endif
10831 +
10832 + "sete %1\n"
10833 : "+m" (l->a.counter), "=qm" (c)
10834 : "ir" (i) : "memory");
10835 return c;
10836 @@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10837 {
10838 unsigned char c;
10839
10840 - asm volatile(_ASM_DEC "%0; sete %1"
10841 + asm volatile(_ASM_DEC "%0\n"
10842 +
10843 +#ifdef CONFIG_PAX_REFCOUNT
10844 + "jno 0f\n"
10845 + _ASM_INC "%0\n"
10846 + "int $4\n0:\n"
10847 + _ASM_EXTABLE(0b, 0b)
10848 +#endif
10849 +
10850 + "sete %1\n"
10851 : "+m" (l->a.counter), "=qm" (c)
10852 : : "memory");
10853 return c != 0;
10854 @@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10855 {
10856 unsigned char c;
10857
10858 - asm volatile(_ASM_INC "%0; sete %1"
10859 + asm volatile(_ASM_INC "%0\n"
10860 +
10861 +#ifdef CONFIG_PAX_REFCOUNT
10862 + "jno 0f\n"
10863 + _ASM_DEC "%0\n"
10864 + "int $4\n0:\n"
10865 + _ASM_EXTABLE(0b, 0b)
10866 +#endif
10867 +
10868 + "sete %1\n"
10869 : "+m" (l->a.counter), "=qm" (c)
10870 : : "memory");
10871 return c != 0;
10872 @@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10873 {
10874 unsigned char c;
10875
10876 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10877 + asm volatile(_ASM_ADD "%2,%0\n"
10878 +
10879 +#ifdef CONFIG_PAX_REFCOUNT
10880 + "jno 0f\n"
10881 + _ASM_SUB "%2,%0\n"
10882 + "int $4\n0:\n"
10883 + _ASM_EXTABLE(0b, 0b)
10884 +#endif
10885 +
10886 + "sets %1\n"
10887 : "+m" (l->a.counter), "=qm" (c)
10888 : "ir" (i) : "memory");
10889 return c;
10890 @@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10891 #endif
10892 /* Modern 486+ processor */
10893 __i = i;
10894 - asm volatile(_ASM_XADD "%0, %1;"
10895 + asm volatile(_ASM_XADD "%0, %1\n"
10896 +
10897 +#ifdef CONFIG_PAX_REFCOUNT
10898 + "jno 0f\n"
10899 + _ASM_MOV "%0,%1\n"
10900 + "int $4\n0:\n"
10901 + _ASM_EXTABLE(0b, 0b)
10902 +#endif
10903 +
10904 : "+r" (i), "+m" (l->a.counter)
10905 : : "memory");
10906 return i + __i;
10907 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10908 index 593e51d..fa69c9a 100644
10909 --- a/arch/x86/include/asm/mman.h
10910 +++ b/arch/x86/include/asm/mman.h
10911 @@ -5,4 +5,14 @@
10912
10913 #include <asm-generic/mman.h>
10914
10915 +#ifdef __KERNEL__
10916 +#ifndef __ASSEMBLY__
10917 +#ifdef CONFIG_X86_32
10918 +#define arch_mmap_check i386_mmap_check
10919 +int i386_mmap_check(unsigned long addr, unsigned long len,
10920 + unsigned long flags);
10921 +#endif
10922 +#endif
10923 +#endif
10924 +
10925 #endif /* _ASM_X86_MMAN_H */
10926 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10927 index 5f55e69..e20bfb1 100644
10928 --- a/arch/x86/include/asm/mmu.h
10929 +++ b/arch/x86/include/asm/mmu.h
10930 @@ -9,7 +9,7 @@
10931 * we put the segment information here.
10932 */
10933 typedef struct {
10934 - void *ldt;
10935 + struct desc_struct *ldt;
10936 int size;
10937
10938 #ifdef CONFIG_X86_64
10939 @@ -18,7 +18,19 @@ typedef struct {
10940 #endif
10941
10942 struct mutex lock;
10943 - void *vdso;
10944 + unsigned long vdso;
10945 +
10946 +#ifdef CONFIG_X86_32
10947 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10948 + unsigned long user_cs_base;
10949 + unsigned long user_cs_limit;
10950 +
10951 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10952 + cpumask_t cpu_user_cs_mask;
10953 +#endif
10954 +
10955 +#endif
10956 +#endif
10957 } mm_context_t;
10958
10959 #ifdef CONFIG_SMP
10960 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10961 index 6902152..da4283a 100644
10962 --- a/arch/x86/include/asm/mmu_context.h
10963 +++ b/arch/x86/include/asm/mmu_context.h
10964 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10965
10966 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10967 {
10968 +
10969 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10970 + unsigned int i;
10971 + pgd_t *pgd;
10972 +
10973 + pax_open_kernel();
10974 + pgd = get_cpu_pgd(smp_processor_id());
10975 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10976 + set_pgd_batched(pgd+i, native_make_pgd(0));
10977 + pax_close_kernel();
10978 +#endif
10979 +
10980 #ifdef CONFIG_SMP
10981 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10982 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10983 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10984 struct task_struct *tsk)
10985 {
10986 unsigned cpu = smp_processor_id();
10987 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10988 + int tlbstate = TLBSTATE_OK;
10989 +#endif
10990
10991 if (likely(prev != next)) {
10992 #ifdef CONFIG_SMP
10993 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10994 + tlbstate = percpu_read(cpu_tlbstate.state);
10995 +#endif
10996 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10997 percpu_write(cpu_tlbstate.active_mm, next);
10998 #endif
10999 cpumask_set_cpu(cpu, mm_cpumask(next));
11000
11001 /* Re-load page tables */
11002 +#ifdef CONFIG_PAX_PER_CPU_PGD
11003 + pax_open_kernel();
11004 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11005 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11006 + pax_close_kernel();
11007 + load_cr3(get_cpu_pgd(cpu));
11008 +#else
11009 load_cr3(next->pgd);
11010 +#endif
11011
11012 /* stop flush ipis for the previous mm */
11013 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11014 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11015 */
11016 if (unlikely(prev->context.ldt != next->context.ldt))
11017 load_LDT_nolock(&next->context);
11018 - }
11019 +
11020 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11021 + if (!(__supported_pte_mask & _PAGE_NX)) {
11022 + smp_mb__before_clear_bit();
11023 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11024 + smp_mb__after_clear_bit();
11025 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11026 + }
11027 +#endif
11028 +
11029 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11030 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11031 + prev->context.user_cs_limit != next->context.user_cs_limit))
11032 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11033 #ifdef CONFIG_SMP
11034 + else if (unlikely(tlbstate != TLBSTATE_OK))
11035 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11036 +#endif
11037 +#endif
11038 +
11039 + }
11040 else {
11041 +
11042 +#ifdef CONFIG_PAX_PER_CPU_PGD
11043 + pax_open_kernel();
11044 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11045 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11046 + pax_close_kernel();
11047 + load_cr3(get_cpu_pgd(cpu));
11048 +#endif
11049 +
11050 +#ifdef CONFIG_SMP
11051 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11052 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11053
11054 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11055 * tlb flush IPI delivery. We must reload CR3
11056 * to make sure to use no freed page tables.
11057 */
11058 +
11059 +#ifndef CONFIG_PAX_PER_CPU_PGD
11060 load_cr3(next->pgd);
11061 +#endif
11062 +
11063 load_LDT_nolock(&next->context);
11064 +
11065 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11066 + if (!(__supported_pte_mask & _PAGE_NX))
11067 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11068 +#endif
11069 +
11070 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11071 +#ifdef CONFIG_PAX_PAGEEXEC
11072 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11073 +#endif
11074 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11075 +#endif
11076 +
11077 }
11078 +#endif
11079 }
11080 -#endif
11081 }
11082
11083 #define activate_mm(prev, next) \
11084 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11085 index 9eae775..c914fea 100644
11086 --- a/arch/x86/include/asm/module.h
11087 +++ b/arch/x86/include/asm/module.h
11088 @@ -5,6 +5,7 @@
11089
11090 #ifdef CONFIG_X86_64
11091 /* X86_64 does not define MODULE_PROC_FAMILY */
11092 +#define MODULE_PROC_FAMILY ""
11093 #elif defined CONFIG_M386
11094 #define MODULE_PROC_FAMILY "386 "
11095 #elif defined CONFIG_M486
11096 @@ -59,8 +60,20 @@
11097 #error unknown processor family
11098 #endif
11099
11100 -#ifdef CONFIG_X86_32
11101 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11102 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11103 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11104 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11105 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11106 +#else
11107 +#define MODULE_PAX_KERNEXEC ""
11108 #endif
11109
11110 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11111 +#define MODULE_PAX_UDEREF "UDEREF "
11112 +#else
11113 +#define MODULE_PAX_UDEREF ""
11114 +#endif
11115 +
11116 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11117 +
11118 #endif /* _ASM_X86_MODULE_H */
11119 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11120 index 7639dbf..e08a58c 100644
11121 --- a/arch/x86/include/asm/page_64_types.h
11122 +++ b/arch/x86/include/asm/page_64_types.h
11123 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11124
11125 /* duplicated to the one in bootmem.h */
11126 extern unsigned long max_pfn;
11127 -extern unsigned long phys_base;
11128 +extern const unsigned long phys_base;
11129
11130 extern unsigned long __phys_addr(unsigned long);
11131 #define __phys_reloc_hide(x) (x)
11132 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11133 index aa0f913..0c5bc6a 100644
11134 --- a/arch/x86/include/asm/paravirt.h
11135 +++ b/arch/x86/include/asm/paravirt.h
11136 @@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11137 val);
11138 }
11139
11140 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11141 +{
11142 + pgdval_t val = native_pgd_val(pgd);
11143 +
11144 + if (sizeof(pgdval_t) > sizeof(long))
11145 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11146 + val, (u64)val >> 32);
11147 + else
11148 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11149 + val);
11150 +}
11151 +
11152 static inline void pgd_clear(pgd_t *pgdp)
11153 {
11154 set_pgd(pgdp, __pgd(0));
11155 @@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11156 pv_mmu_ops.set_fixmap(idx, phys, flags);
11157 }
11158
11159 +#ifdef CONFIG_PAX_KERNEXEC
11160 +static inline unsigned long pax_open_kernel(void)
11161 +{
11162 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11163 +}
11164 +
11165 +static inline unsigned long pax_close_kernel(void)
11166 +{
11167 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11168 +}
11169 +#else
11170 +static inline unsigned long pax_open_kernel(void) { return 0; }
11171 +static inline unsigned long pax_close_kernel(void) { return 0; }
11172 +#endif
11173 +
11174 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11175
11176 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11177 @@ -965,7 +992,7 @@ extern void default_banner(void);
11178
11179 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11180 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11181 -#define PARA_INDIRECT(addr) *%cs:addr
11182 +#define PARA_INDIRECT(addr) *%ss:addr
11183 #endif
11184
11185 #define INTERRUPT_RETURN \
11186 @@ -1042,6 +1069,21 @@ extern void default_banner(void);
11187 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11188 CLBR_NONE, \
11189 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11190 +
11191 +#define GET_CR0_INTO_RDI \
11192 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11193 + mov %rax,%rdi
11194 +
11195 +#define SET_RDI_INTO_CR0 \
11196 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11197 +
11198 +#define GET_CR3_INTO_RDI \
11199 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11200 + mov %rax,%rdi
11201 +
11202 +#define SET_RDI_INTO_CR3 \
11203 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11204 +
11205 #endif /* CONFIG_X86_32 */
11206
11207 #endif /* __ASSEMBLY__ */
11208 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11209 index 8e8b9a4..f07d725 100644
11210 --- a/arch/x86/include/asm/paravirt_types.h
11211 +++ b/arch/x86/include/asm/paravirt_types.h
11212 @@ -84,20 +84,20 @@ struct pv_init_ops {
11213 */
11214 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11215 unsigned long addr, unsigned len);
11216 -};
11217 +} __no_const;
11218
11219
11220 struct pv_lazy_ops {
11221 /* Set deferred update mode, used for batching operations. */
11222 void (*enter)(void);
11223 void (*leave)(void);
11224 -};
11225 +} __no_const;
11226
11227 struct pv_time_ops {
11228 unsigned long long (*sched_clock)(void);
11229 unsigned long long (*steal_clock)(int cpu);
11230 unsigned long (*get_tsc_khz)(void);
11231 -};
11232 +} __no_const;
11233
11234 struct pv_cpu_ops {
11235 /* hooks for various privileged instructions */
11236 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
11237
11238 void (*start_context_switch)(struct task_struct *prev);
11239 void (*end_context_switch)(struct task_struct *next);
11240 -};
11241 +} __no_const;
11242
11243 struct pv_irq_ops {
11244 /*
11245 @@ -224,7 +224,7 @@ struct pv_apic_ops {
11246 unsigned long start_eip,
11247 unsigned long start_esp);
11248 #endif
11249 -};
11250 +} __no_const;
11251
11252 struct pv_mmu_ops {
11253 unsigned long (*read_cr2)(void);
11254 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
11255 struct paravirt_callee_save make_pud;
11256
11257 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11258 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11259 #endif /* PAGETABLE_LEVELS == 4 */
11260 #endif /* PAGETABLE_LEVELS >= 3 */
11261
11262 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
11263 an mfn. We can tell which is which from the index. */
11264 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11265 phys_addr_t phys, pgprot_t flags);
11266 +
11267 +#ifdef CONFIG_PAX_KERNEXEC
11268 + unsigned long (*pax_open_kernel)(void);
11269 + unsigned long (*pax_close_kernel)(void);
11270 +#endif
11271 +
11272 };
11273
11274 struct arch_spinlock;
11275 @@ -334,7 +341,7 @@ struct pv_lock_ops {
11276 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11277 int (*spin_trylock)(struct arch_spinlock *lock);
11278 void (*spin_unlock)(struct arch_spinlock *lock);
11279 -};
11280 +} __no_const;
11281
11282 /* This contains all the paravirt structures: we get a convenient
11283 * number for each function using the offset which we use to indicate
11284 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11285 index b4389a4..7024269 100644
11286 --- a/arch/x86/include/asm/pgalloc.h
11287 +++ b/arch/x86/include/asm/pgalloc.h
11288 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11289 pmd_t *pmd, pte_t *pte)
11290 {
11291 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11292 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11293 +}
11294 +
11295 +static inline void pmd_populate_user(struct mm_struct *mm,
11296 + pmd_t *pmd, pte_t *pte)
11297 +{
11298 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11299 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11300 }
11301
11302 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11303
11304 #ifdef CONFIG_X86_PAE
11305 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11306 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11307 +{
11308 + pud_populate(mm, pudp, pmd);
11309 +}
11310 #else /* !CONFIG_X86_PAE */
11311 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11312 {
11313 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11314 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11315 }
11316 +
11317 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11318 +{
11319 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11320 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11321 +}
11322 #endif /* CONFIG_X86_PAE */
11323
11324 #if PAGETABLE_LEVELS > 3
11325 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11326 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11327 }
11328
11329 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11330 +{
11331 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11332 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11333 +}
11334 +
11335 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11336 {
11337 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11338 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11339 index 98391db..8f6984e 100644
11340 --- a/arch/x86/include/asm/pgtable-2level.h
11341 +++ b/arch/x86/include/asm/pgtable-2level.h
11342 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11343
11344 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11345 {
11346 + pax_open_kernel();
11347 *pmdp = pmd;
11348 + pax_close_kernel();
11349 }
11350
11351 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11352 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11353 index cb00ccc..17e9054 100644
11354 --- a/arch/x86/include/asm/pgtable-3level.h
11355 +++ b/arch/x86/include/asm/pgtable-3level.h
11356 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11357
11358 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11359 {
11360 + pax_open_kernel();
11361 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11362 + pax_close_kernel();
11363 }
11364
11365 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11366 {
11367 + pax_open_kernel();
11368 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11369 + pax_close_kernel();
11370 }
11371
11372 /*
11373 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11374 index 49afb3f..91a8c63 100644
11375 --- a/arch/x86/include/asm/pgtable.h
11376 +++ b/arch/x86/include/asm/pgtable.h
11377 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11378
11379 #ifndef __PAGETABLE_PUD_FOLDED
11380 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11381 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11382 #define pgd_clear(pgd) native_pgd_clear(pgd)
11383 #endif
11384
11385 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11386
11387 #define arch_end_context_switch(prev) do {} while(0)
11388
11389 +#define pax_open_kernel() native_pax_open_kernel()
11390 +#define pax_close_kernel() native_pax_close_kernel()
11391 #endif /* CONFIG_PARAVIRT */
11392
11393 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11394 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11395 +
11396 +#ifdef CONFIG_PAX_KERNEXEC
11397 +static inline unsigned long native_pax_open_kernel(void)
11398 +{
11399 + unsigned long cr0;
11400 +
11401 + preempt_disable();
11402 + barrier();
11403 + cr0 = read_cr0() ^ X86_CR0_WP;
11404 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11405 + write_cr0(cr0);
11406 + return cr0 ^ X86_CR0_WP;
11407 +}
11408 +
11409 +static inline unsigned long native_pax_close_kernel(void)
11410 +{
11411 + unsigned long cr0;
11412 +
11413 + cr0 = read_cr0() ^ X86_CR0_WP;
11414 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11415 + write_cr0(cr0);
11416 + barrier();
11417 + preempt_enable_no_resched();
11418 + return cr0 ^ X86_CR0_WP;
11419 +}
11420 +#else
11421 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11422 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11423 +#endif
11424 +
11425 /*
11426 * The following only work if pte_present() is true.
11427 * Undefined behaviour if not..
11428 */
11429 +static inline int pte_user(pte_t pte)
11430 +{
11431 + return pte_val(pte) & _PAGE_USER;
11432 +}
11433 +
11434 static inline int pte_dirty(pte_t pte)
11435 {
11436 return pte_flags(pte) & _PAGE_DIRTY;
11437 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11438 return pte_clear_flags(pte, _PAGE_RW);
11439 }
11440
11441 +static inline pte_t pte_mkread(pte_t pte)
11442 +{
11443 + return __pte(pte_val(pte) | _PAGE_USER);
11444 +}
11445 +
11446 static inline pte_t pte_mkexec(pte_t pte)
11447 {
11448 - return pte_clear_flags(pte, _PAGE_NX);
11449 +#ifdef CONFIG_X86_PAE
11450 + if (__supported_pte_mask & _PAGE_NX)
11451 + return pte_clear_flags(pte, _PAGE_NX);
11452 + else
11453 +#endif
11454 + return pte_set_flags(pte, _PAGE_USER);
11455 +}
11456 +
11457 +static inline pte_t pte_exprotect(pte_t pte)
11458 +{
11459 +#ifdef CONFIG_X86_PAE
11460 + if (__supported_pte_mask & _PAGE_NX)
11461 + return pte_set_flags(pte, _PAGE_NX);
11462 + else
11463 +#endif
11464 + return pte_clear_flags(pte, _PAGE_USER);
11465 }
11466
11467 static inline pte_t pte_mkdirty(pte_t pte)
11468 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11469 #endif
11470
11471 #ifndef __ASSEMBLY__
11472 +
11473 +#ifdef CONFIG_PAX_PER_CPU_PGD
11474 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11475 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11476 +{
11477 + return cpu_pgd[cpu];
11478 +}
11479 +#endif
11480 +
11481 #include <linux/mm_types.h>
11482
11483 static inline int pte_none(pte_t pte)
11484 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11485
11486 static inline int pgd_bad(pgd_t pgd)
11487 {
11488 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11489 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11490 }
11491
11492 static inline int pgd_none(pgd_t pgd)
11493 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11494 * pgd_offset() returns a (pgd_t *)
11495 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11496 */
11497 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11498 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11499 +
11500 +#ifdef CONFIG_PAX_PER_CPU_PGD
11501 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11502 +#endif
11503 +
11504 /*
11505 * a shortcut which implies the use of the kernel's pgd, instead
11506 * of a process's
11507 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11508 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11509 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11510
11511 +#ifdef CONFIG_X86_32
11512 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11513 +#else
11514 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11515 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11516 +
11517 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11518 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11519 +#else
11520 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11521 +#endif
11522 +
11523 +#endif
11524 +
11525 #ifndef __ASSEMBLY__
11526
11527 extern int direct_gbpages;
11528 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11529 * dst and src can be on the same page, but the range must not overlap,
11530 * and must not cross a page boundary.
11531 */
11532 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11533 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11534 {
11535 - memcpy(dst, src, count * sizeof(pgd_t));
11536 + pax_open_kernel();
11537 + while (count--)
11538 + *dst++ = *src++;
11539 + pax_close_kernel();
11540 }
11541
11542 +#ifdef CONFIG_PAX_PER_CPU_PGD
11543 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11544 +#endif
11545 +
11546 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11547 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11548 +#else
11549 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11550 +#endif
11551
11552 #include <asm-generic/pgtable.h>
11553 #endif /* __ASSEMBLY__ */
11554 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11555 index 0c92113..34a77c6 100644
11556 --- a/arch/x86/include/asm/pgtable_32.h
11557 +++ b/arch/x86/include/asm/pgtable_32.h
11558 @@ -25,9 +25,6 @@
11559 struct mm_struct;
11560 struct vm_area_struct;
11561
11562 -extern pgd_t swapper_pg_dir[1024];
11563 -extern pgd_t initial_page_table[1024];
11564 -
11565 static inline void pgtable_cache_init(void) { }
11566 static inline void check_pgt_cache(void) { }
11567 void paging_init(void);
11568 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11569 # include <asm/pgtable-2level.h>
11570 #endif
11571
11572 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11573 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11574 +#ifdef CONFIG_X86_PAE
11575 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11576 +#endif
11577 +
11578 #if defined(CONFIG_HIGHPTE)
11579 #define pte_offset_map(dir, address) \
11580 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11581 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11582 /* Clear a kernel PTE and flush it from the TLB */
11583 #define kpte_clear_flush(ptep, vaddr) \
11584 do { \
11585 + pax_open_kernel(); \
11586 pte_clear(&init_mm, (vaddr), (ptep)); \
11587 + pax_close_kernel(); \
11588 __flush_tlb_one((vaddr)); \
11589 } while (0)
11590
11591 @@ -74,6 +79,9 @@ do { \
11592
11593 #endif /* !__ASSEMBLY__ */
11594
11595 +#define HAVE_ARCH_UNMAPPED_AREA
11596 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11597 +
11598 /*
11599 * kern_addr_valid() is (1) for FLATMEM and (0) for
11600 * SPARSEMEM and DISCONTIGMEM
11601 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11602 index ed5903b..c7fe163 100644
11603 --- a/arch/x86/include/asm/pgtable_32_types.h
11604 +++ b/arch/x86/include/asm/pgtable_32_types.h
11605 @@ -8,7 +8,7 @@
11606 */
11607 #ifdef CONFIG_X86_PAE
11608 # include <asm/pgtable-3level_types.h>
11609 -# define PMD_SIZE (1UL << PMD_SHIFT)
11610 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11611 # define PMD_MASK (~(PMD_SIZE - 1))
11612 #else
11613 # include <asm/pgtable-2level_types.h>
11614 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11615 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11616 #endif
11617
11618 +#ifdef CONFIG_PAX_KERNEXEC
11619 +#ifndef __ASSEMBLY__
11620 +extern unsigned char MODULES_EXEC_VADDR[];
11621 +extern unsigned char MODULES_EXEC_END[];
11622 +#endif
11623 +#include <asm/boot.h>
11624 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11625 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11626 +#else
11627 +#define ktla_ktva(addr) (addr)
11628 +#define ktva_ktla(addr) (addr)
11629 +#endif
11630 +
11631 #define MODULES_VADDR VMALLOC_START
11632 #define MODULES_END VMALLOC_END
11633 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11634 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11635 index 975f709..9f779c9 100644
11636 --- a/arch/x86/include/asm/pgtable_64.h
11637 +++ b/arch/x86/include/asm/pgtable_64.h
11638 @@ -16,10 +16,14 @@
11639
11640 extern pud_t level3_kernel_pgt[512];
11641 extern pud_t level3_ident_pgt[512];
11642 +extern pud_t level3_vmalloc_start_pgt[512];
11643 +extern pud_t level3_vmalloc_end_pgt[512];
11644 +extern pud_t level3_vmemmap_pgt[512];
11645 +extern pud_t level2_vmemmap_pgt[512];
11646 extern pmd_t level2_kernel_pgt[512];
11647 extern pmd_t level2_fixmap_pgt[512];
11648 -extern pmd_t level2_ident_pgt[512];
11649 -extern pgd_t init_level4_pgt[];
11650 +extern pmd_t level2_ident_pgt[512*2];
11651 +extern pgd_t init_level4_pgt[512];
11652
11653 #define swapper_pg_dir init_level4_pgt
11654
11655 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11656
11657 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11658 {
11659 + pax_open_kernel();
11660 *pmdp = pmd;
11661 + pax_close_kernel();
11662 }
11663
11664 static inline void native_pmd_clear(pmd_t *pmd)
11665 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11666
11667 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11668 {
11669 + pax_open_kernel();
11670 *pudp = pud;
11671 + pax_close_kernel();
11672 }
11673
11674 static inline void native_pud_clear(pud_t *pud)
11675 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11676
11677 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11678 {
11679 + pax_open_kernel();
11680 + *pgdp = pgd;
11681 + pax_close_kernel();
11682 +}
11683 +
11684 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11685 +{
11686 *pgdp = pgd;
11687 }
11688
11689 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11690 index 766ea16..5b96cb3 100644
11691 --- a/arch/x86/include/asm/pgtable_64_types.h
11692 +++ b/arch/x86/include/asm/pgtable_64_types.h
11693 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11694 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11695 #define MODULES_END _AC(0xffffffffff000000, UL)
11696 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11697 +#define MODULES_EXEC_VADDR MODULES_VADDR
11698 +#define MODULES_EXEC_END MODULES_END
11699 +
11700 +#define ktla_ktva(addr) (addr)
11701 +#define ktva_ktla(addr) (addr)
11702
11703 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11704 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11705 index 013286a..8b42f4f 100644
11706 --- a/arch/x86/include/asm/pgtable_types.h
11707 +++ b/arch/x86/include/asm/pgtable_types.h
11708 @@ -16,13 +16,12 @@
11709 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11710 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11711 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11712 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11713 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11714 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11715 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11716 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11717 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11718 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11719 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11720 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11721 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11722 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11723
11724 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11725 @@ -40,7 +39,6 @@
11726 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11727 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11728 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11729 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11730 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11731 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11732 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11733 @@ -57,8 +55,10 @@
11734
11735 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11736 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11737 -#else
11738 +#elif defined(CONFIG_KMEMCHECK)
11739 #define _PAGE_NX (_AT(pteval_t, 0))
11740 +#else
11741 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11742 #endif
11743
11744 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11745 @@ -96,6 +96,9 @@
11746 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11747 _PAGE_ACCESSED)
11748
11749 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11750 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11751 +
11752 #define __PAGE_KERNEL_EXEC \
11753 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11754 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11755 @@ -106,7 +109,7 @@
11756 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11757 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11758 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11759 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11760 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11761 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11762 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11763 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11764 @@ -168,8 +171,8 @@
11765 * bits are combined, this will alow user to access the high address mapped
11766 * VDSO in the presence of CONFIG_COMPAT_VDSO
11767 */
11768 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11769 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11770 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11771 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11772 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11773 #endif
11774
11775 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11776 {
11777 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11778 }
11779 +#endif
11780
11781 +#if PAGETABLE_LEVELS == 3
11782 +#include <asm-generic/pgtable-nopud.h>
11783 +#endif
11784 +
11785 +#if PAGETABLE_LEVELS == 2
11786 +#include <asm-generic/pgtable-nopmd.h>
11787 +#endif
11788 +
11789 +#ifndef __ASSEMBLY__
11790 #if PAGETABLE_LEVELS > 3
11791 typedef struct { pudval_t pud; } pud_t;
11792
11793 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11794 return pud.pud;
11795 }
11796 #else
11797 -#include <asm-generic/pgtable-nopud.h>
11798 -
11799 static inline pudval_t native_pud_val(pud_t pud)
11800 {
11801 return native_pgd_val(pud.pgd);
11802 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11803 return pmd.pmd;
11804 }
11805 #else
11806 -#include <asm-generic/pgtable-nopmd.h>
11807 -
11808 static inline pmdval_t native_pmd_val(pmd_t pmd)
11809 {
11810 return native_pgd_val(pmd.pud.pgd);
11811 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11812
11813 extern pteval_t __supported_pte_mask;
11814 extern void set_nx(void);
11815 -extern int nx_enabled;
11816
11817 #define pgprot_writecombine pgprot_writecombine
11818 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11819 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11820 index 4fa7dcc..764e33a 100644
11821 --- a/arch/x86/include/asm/processor.h
11822 +++ b/arch/x86/include/asm/processor.h
11823 @@ -276,7 +276,7 @@ struct tss_struct {
11824
11825 } ____cacheline_aligned;
11826
11827 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11828 +extern struct tss_struct init_tss[NR_CPUS];
11829
11830 /*
11831 * Save the original ist values for checking stack pointers during debugging
11832 @@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
11833 */
11834 #define TASK_SIZE PAGE_OFFSET
11835 #define TASK_SIZE_MAX TASK_SIZE
11836 +
11837 +#ifdef CONFIG_PAX_SEGMEXEC
11838 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11839 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11840 +#else
11841 #define STACK_TOP TASK_SIZE
11842 -#define STACK_TOP_MAX STACK_TOP
11843 +#endif
11844 +
11845 +#define STACK_TOP_MAX TASK_SIZE
11846
11847 #define INIT_THREAD { \
11848 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11849 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11850 .vm86_info = NULL, \
11851 .sysenter_cs = __KERNEL_CS, \
11852 .io_bitmap_ptr = NULL, \
11853 @@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
11854 */
11855 #define INIT_TSS { \
11856 .x86_tss = { \
11857 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11858 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11859 .ss0 = __KERNEL_DS, \
11860 .ss1 = __KERNEL_CS, \
11861 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11862 @@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
11863 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11864
11865 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11866 -#define KSTK_TOP(info) \
11867 -({ \
11868 - unsigned long *__ptr = (unsigned long *)(info); \
11869 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11870 -})
11871 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11872
11873 /*
11874 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11875 @@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11876 #define task_pt_regs(task) \
11877 ({ \
11878 struct pt_regs *__regs__; \
11879 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11880 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11881 __regs__ - 1; \
11882 })
11883
11884 @@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11885 /*
11886 * User space process size. 47bits minus one guard page.
11887 */
11888 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11889 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11890
11891 /* This decides where the kernel will search for a free chunk of vm
11892 * space during mmap's.
11893 */
11894 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11895 - 0xc0000000 : 0xFFFFe000)
11896 + 0xc0000000 : 0xFFFFf000)
11897
11898 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11899 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11900 @@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11901 #define STACK_TOP_MAX TASK_SIZE_MAX
11902
11903 #define INIT_THREAD { \
11904 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11905 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11906 }
11907
11908 #define INIT_TSS { \
11909 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11910 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11911 }
11912
11913 /*
11914 @@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11915 */
11916 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11917
11918 +#ifdef CONFIG_PAX_SEGMEXEC
11919 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11920 +#endif
11921 +
11922 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11923
11924 /* Get/set a process' ability to use the timestamp counter instruction */
11925 @@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11926
11927 void cpu_idle_wait(void);
11928
11929 -extern unsigned long arch_align_stack(unsigned long sp);
11930 +#define arch_align_stack(x) ((x) & ~0xfUL)
11931 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11932
11933 void default_idle(void);
11934 bool set_pm_idle_to_default(void);
11935
11936 -void stop_this_cpu(void *dummy);
11937 +void stop_this_cpu(void *dummy) __noreturn;
11938
11939 #endif /* _ASM_X86_PROCESSOR_H */
11940 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11941 index dcfde52..dbfea06 100644
11942 --- a/arch/x86/include/asm/ptrace.h
11943 +++ b/arch/x86/include/asm/ptrace.h
11944 @@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11945 }
11946
11947 /*
11948 - * user_mode_vm(regs) determines whether a register set came from user mode.
11949 + * user_mode(regs) determines whether a register set came from user mode.
11950 * This is true if V8086 mode was enabled OR if the register set was from
11951 * protected mode with RPL-3 CS value. This tricky test checks that with
11952 * one comparison. Many places in the kernel can bypass this full check
11953 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11954 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11955 + * be used.
11956 */
11957 -static inline int user_mode(struct pt_regs *regs)
11958 +static inline int user_mode_novm(struct pt_regs *regs)
11959 {
11960 #ifdef CONFIG_X86_32
11961 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11962 #else
11963 - return !!(regs->cs & 3);
11964 + return !!(regs->cs & SEGMENT_RPL_MASK);
11965 #endif
11966 }
11967
11968 -static inline int user_mode_vm(struct pt_regs *regs)
11969 +static inline int user_mode(struct pt_regs *regs)
11970 {
11971 #ifdef CONFIG_X86_32
11972 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11973 USER_RPL;
11974 #else
11975 - return user_mode(regs);
11976 + return user_mode_novm(regs);
11977 #endif
11978 }
11979
11980 @@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11981 #ifdef CONFIG_X86_64
11982 static inline bool user_64bit_mode(struct pt_regs *regs)
11983 {
11984 + unsigned long cs = regs->cs & 0xffff;
11985 #ifndef CONFIG_PARAVIRT
11986 /*
11987 * On non-paravirt systems, this is the only long mode CPL 3
11988 * selector. We do not allow long mode selectors in the LDT.
11989 */
11990 - return regs->cs == __USER_CS;
11991 + return cs == __USER_CS;
11992 #else
11993 /* Headers are too twisted for this to go in paravirt.h. */
11994 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11995 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11996 #endif
11997 }
11998 #endif
11999 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12000 index 92f29706..a79cbbb 100644
12001 --- a/arch/x86/include/asm/reboot.h
12002 +++ b/arch/x86/include/asm/reboot.h
12003 @@ -6,19 +6,19 @@
12004 struct pt_regs;
12005
12006 struct machine_ops {
12007 - void (*restart)(char *cmd);
12008 - void (*halt)(void);
12009 - void (*power_off)(void);
12010 + void (* __noreturn restart)(char *cmd);
12011 + void (* __noreturn halt)(void);
12012 + void (* __noreturn power_off)(void);
12013 void (*shutdown)(void);
12014 void (*crash_shutdown)(struct pt_regs *);
12015 - void (*emergency_restart)(void);
12016 -};
12017 + void (* __noreturn emergency_restart)(void);
12018 +} __no_const;
12019
12020 extern struct machine_ops machine_ops;
12021
12022 void native_machine_crash_shutdown(struct pt_regs *regs);
12023 void native_machine_shutdown(void);
12024 -void machine_real_restart(unsigned int type);
12025 +void machine_real_restart(unsigned int type) __noreturn;
12026 /* These must match dispatch_table in reboot_32.S */
12027 #define MRR_BIOS 0
12028 #define MRR_APM 1
12029 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12030 index 2dbe4a7..ce1db00 100644
12031 --- a/arch/x86/include/asm/rwsem.h
12032 +++ b/arch/x86/include/asm/rwsem.h
12033 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12034 {
12035 asm volatile("# beginning down_read\n\t"
12036 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12037 +
12038 +#ifdef CONFIG_PAX_REFCOUNT
12039 + "jno 0f\n"
12040 + LOCK_PREFIX _ASM_DEC "(%1)\n"
12041 + "int $4\n0:\n"
12042 + _ASM_EXTABLE(0b, 0b)
12043 +#endif
12044 +
12045 /* adds 0x00000001 */
12046 " jns 1f\n"
12047 " call call_rwsem_down_read_failed\n"
12048 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12049 "1:\n\t"
12050 " mov %1,%2\n\t"
12051 " add %3,%2\n\t"
12052 +
12053 +#ifdef CONFIG_PAX_REFCOUNT
12054 + "jno 0f\n"
12055 + "sub %3,%2\n"
12056 + "int $4\n0:\n"
12057 + _ASM_EXTABLE(0b, 0b)
12058 +#endif
12059 +
12060 " jle 2f\n\t"
12061 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12062 " jnz 1b\n\t"
12063 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12064 long tmp;
12065 asm volatile("# beginning down_write\n\t"
12066 LOCK_PREFIX " xadd %1,(%2)\n\t"
12067 +
12068 +#ifdef CONFIG_PAX_REFCOUNT
12069 + "jno 0f\n"
12070 + "mov %1,(%2)\n"
12071 + "int $4\n0:\n"
12072 + _ASM_EXTABLE(0b, 0b)
12073 +#endif
12074 +
12075 /* adds 0xffff0001, returns the old value */
12076 " test %1,%1\n\t"
12077 /* was the count 0 before? */
12078 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12079 long tmp;
12080 asm volatile("# beginning __up_read\n\t"
12081 LOCK_PREFIX " xadd %1,(%2)\n\t"
12082 +
12083 +#ifdef CONFIG_PAX_REFCOUNT
12084 + "jno 0f\n"
12085 + "mov %1,(%2)\n"
12086 + "int $4\n0:\n"
12087 + _ASM_EXTABLE(0b, 0b)
12088 +#endif
12089 +
12090 /* subtracts 1, returns the old value */
12091 " jns 1f\n\t"
12092 " call call_rwsem_wake\n" /* expects old value in %edx */
12093 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12094 long tmp;
12095 asm volatile("# beginning __up_write\n\t"
12096 LOCK_PREFIX " xadd %1,(%2)\n\t"
12097 +
12098 +#ifdef CONFIG_PAX_REFCOUNT
12099 + "jno 0f\n"
12100 + "mov %1,(%2)\n"
12101 + "int $4\n0:\n"
12102 + _ASM_EXTABLE(0b, 0b)
12103 +#endif
12104 +
12105 /* subtracts 0xffff0001, returns the old value */
12106 " jns 1f\n\t"
12107 " call call_rwsem_wake\n" /* expects old value in %edx */
12108 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12109 {
12110 asm volatile("# beginning __downgrade_write\n\t"
12111 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12112 +
12113 +#ifdef CONFIG_PAX_REFCOUNT
12114 + "jno 0f\n"
12115 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12116 + "int $4\n0:\n"
12117 + _ASM_EXTABLE(0b, 0b)
12118 +#endif
12119 +
12120 /*
12121 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12122 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12123 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12124 */
12125 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12126 {
12127 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12128 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12129 +
12130 +#ifdef CONFIG_PAX_REFCOUNT
12131 + "jno 0f\n"
12132 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12133 + "int $4\n0:\n"
12134 + _ASM_EXTABLE(0b, 0b)
12135 +#endif
12136 +
12137 : "+m" (sem->count)
12138 : "er" (delta));
12139 }
12140 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12141 */
12142 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12143 {
12144 - return delta + xadd(&sem->count, delta);
12145 + return delta + xadd_check_overflow(&sem->count, delta);
12146 }
12147
12148 #endif /* __KERNEL__ */
12149 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12150 index 1654662..5af4157 100644
12151 --- a/arch/x86/include/asm/segment.h
12152 +++ b/arch/x86/include/asm/segment.h
12153 @@ -64,10 +64,15 @@
12154 * 26 - ESPFIX small SS
12155 * 27 - per-cpu [ offset to per-cpu data area ]
12156 * 28 - stack_canary-20 [ for stack protector ]
12157 - * 29 - unused
12158 - * 30 - unused
12159 + * 29 - PCI BIOS CS
12160 + * 30 - PCI BIOS DS
12161 * 31 - TSS for double fault handler
12162 */
12163 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12164 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12165 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12166 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12167 +
12168 #define GDT_ENTRY_TLS_MIN 6
12169 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12170
12171 @@ -79,6 +84,8 @@
12172
12173 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12174
12175 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12176 +
12177 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12178
12179 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12180 @@ -104,6 +111,12 @@
12181 #define __KERNEL_STACK_CANARY 0
12182 #endif
12183
12184 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12185 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12186 +
12187 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12188 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12189 +
12190 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12191
12192 /*
12193 @@ -141,7 +154,7 @@
12194 */
12195
12196 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12197 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12198 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12199
12200
12201 #else
12202 @@ -165,6 +178,8 @@
12203 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12204 #define __USER32_DS __USER_DS
12205
12206 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12207 +
12208 #define GDT_ENTRY_TSS 8 /* needs two entries */
12209 #define GDT_ENTRY_LDT 10 /* needs two entries */
12210 #define GDT_ENTRY_TLS_MIN 12
12211 @@ -185,6 +200,7 @@
12212 #endif
12213
12214 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12215 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12216 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12217 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12218 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12219 @@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12220 {
12221 unsigned long __limit;
12222 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12223 - return __limit + 1;
12224 + return __limit;
12225 }
12226
12227 #endif /* !__ASSEMBLY__ */
12228 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12229 index 0434c40..1714bf0 100644
12230 --- a/arch/x86/include/asm/smp.h
12231 +++ b/arch/x86/include/asm/smp.h
12232 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12233 /* cpus sharing the last level cache: */
12234 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12235 DECLARE_PER_CPU(u16, cpu_llc_id);
12236 -DECLARE_PER_CPU(int, cpu_number);
12237 +DECLARE_PER_CPU(unsigned int, cpu_number);
12238
12239 static inline struct cpumask *cpu_sibling_mask(int cpu)
12240 {
12241 @@ -77,7 +77,7 @@ struct smp_ops {
12242
12243 void (*send_call_func_ipi)(const struct cpumask *mask);
12244 void (*send_call_func_single_ipi)(int cpu);
12245 -};
12246 +} __no_const;
12247
12248 /* Globals due to paravirt */
12249 extern void set_cpu_sibling_map(int cpu);
12250 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12251 extern int safe_smp_processor_id(void);
12252
12253 #elif defined(CONFIG_X86_64_SMP)
12254 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12255 -
12256 -#define stack_smp_processor_id() \
12257 -({ \
12258 - struct thread_info *ti; \
12259 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12260 - ti->cpu; \
12261 -})
12262 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12263 +#define stack_smp_processor_id() raw_smp_processor_id()
12264 #define safe_smp_processor_id() smp_processor_id()
12265
12266 #endif
12267 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12268 index 76bfa2c..12d3fe7 100644
12269 --- a/arch/x86/include/asm/spinlock.h
12270 +++ b/arch/x86/include/asm/spinlock.h
12271 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12272 static inline void arch_read_lock(arch_rwlock_t *rw)
12273 {
12274 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12275 +
12276 +#ifdef CONFIG_PAX_REFCOUNT
12277 + "jno 0f\n"
12278 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12279 + "int $4\n0:\n"
12280 + _ASM_EXTABLE(0b, 0b)
12281 +#endif
12282 +
12283 "jns 1f\n"
12284 "call __read_lock_failed\n\t"
12285 "1:\n"
12286 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12287 static inline void arch_write_lock(arch_rwlock_t *rw)
12288 {
12289 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12290 +
12291 +#ifdef CONFIG_PAX_REFCOUNT
12292 + "jno 0f\n"
12293 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12294 + "int $4\n0:\n"
12295 + _ASM_EXTABLE(0b, 0b)
12296 +#endif
12297 +
12298 "jz 1f\n"
12299 "call __write_lock_failed\n\t"
12300 "1:\n"
12301 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12302
12303 static inline void arch_read_unlock(arch_rwlock_t *rw)
12304 {
12305 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12306 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12307 +
12308 +#ifdef CONFIG_PAX_REFCOUNT
12309 + "jno 0f\n"
12310 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12311 + "int $4\n0:\n"
12312 + _ASM_EXTABLE(0b, 0b)
12313 +#endif
12314 +
12315 :"+m" (rw->lock) : : "memory");
12316 }
12317
12318 static inline void arch_write_unlock(arch_rwlock_t *rw)
12319 {
12320 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12321 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12322 +
12323 +#ifdef CONFIG_PAX_REFCOUNT
12324 + "jno 0f\n"
12325 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12326 + "int $4\n0:\n"
12327 + _ASM_EXTABLE(0b, 0b)
12328 +#endif
12329 +
12330 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12331 }
12332
12333 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12334 index b5d9533..41655fa 100644
12335 --- a/arch/x86/include/asm/stackprotector.h
12336 +++ b/arch/x86/include/asm/stackprotector.h
12337 @@ -47,7 +47,7 @@
12338 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12339 */
12340 #define GDT_STACK_CANARY_INIT \
12341 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12342 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12343
12344 /*
12345 * Initialize the stackprotector canary value.
12346 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12347
12348 static inline void load_stack_canary_segment(void)
12349 {
12350 -#ifdef CONFIG_X86_32
12351 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12352 asm volatile ("mov %0, %%gs" : : "r" (0));
12353 #endif
12354 }
12355 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12356 index 70bbe39..4ae2bd4 100644
12357 --- a/arch/x86/include/asm/stacktrace.h
12358 +++ b/arch/x86/include/asm/stacktrace.h
12359 @@ -11,28 +11,20 @@
12360
12361 extern int kstack_depth_to_print;
12362
12363 -struct thread_info;
12364 +struct task_struct;
12365 struct stacktrace_ops;
12366
12367 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12368 - unsigned long *stack,
12369 - unsigned long bp,
12370 - const struct stacktrace_ops *ops,
12371 - void *data,
12372 - unsigned long *end,
12373 - int *graph);
12374 +typedef unsigned long walk_stack_t(struct task_struct *task,
12375 + void *stack_start,
12376 + unsigned long *stack,
12377 + unsigned long bp,
12378 + const struct stacktrace_ops *ops,
12379 + void *data,
12380 + unsigned long *end,
12381 + int *graph);
12382
12383 -extern unsigned long
12384 -print_context_stack(struct thread_info *tinfo,
12385 - unsigned long *stack, unsigned long bp,
12386 - const struct stacktrace_ops *ops, void *data,
12387 - unsigned long *end, int *graph);
12388 -
12389 -extern unsigned long
12390 -print_context_stack_bp(struct thread_info *tinfo,
12391 - unsigned long *stack, unsigned long bp,
12392 - const struct stacktrace_ops *ops, void *data,
12393 - unsigned long *end, int *graph);
12394 +extern walk_stack_t print_context_stack;
12395 +extern walk_stack_t print_context_stack_bp;
12396
12397 /* Generic stack tracer with callbacks */
12398
12399 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12400 void (*address)(void *data, unsigned long address, int reliable);
12401 /* On negative return stop dumping */
12402 int (*stack)(void *data, char *name);
12403 - walk_stack_t walk_stack;
12404 + walk_stack_t *walk_stack;
12405 };
12406
12407 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12408 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12409 index 4ec45b3..a4f0a8a 100644
12410 --- a/arch/x86/include/asm/switch_to.h
12411 +++ b/arch/x86/include/asm/switch_to.h
12412 @@ -108,7 +108,7 @@ do { \
12413 "call __switch_to\n\t" \
12414 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12415 __switch_canary \
12416 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12417 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12418 "movq %%rax,%%rdi\n\t" \
12419 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12420 "jnz ret_from_fork\n\t" \
12421 @@ -119,7 +119,7 @@ do { \
12422 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12423 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12424 [_tif_fork] "i" (_TIF_FORK), \
12425 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12426 + [thread_info] "m" (current_tinfo), \
12427 [current_task] "m" (current_task) \
12428 __switch_canary_iparam \
12429 : "memory", "cc" __EXTRA_CLOBBER)
12430 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12431 index 3fda9db4..4ca1c61 100644
12432 --- a/arch/x86/include/asm/sys_ia32.h
12433 +++ b/arch/x86/include/asm/sys_ia32.h
12434 @@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12435 struct old_sigaction32 __user *);
12436 asmlinkage long sys32_alarm(unsigned int);
12437
12438 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12439 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12440 asmlinkage long sys32_sysfs(int, u32, u32);
12441
12442 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12443 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12444 index ad6df8c..5e0cf6e 100644
12445 --- a/arch/x86/include/asm/thread_info.h
12446 +++ b/arch/x86/include/asm/thread_info.h
12447 @@ -10,6 +10,7 @@
12448 #include <linux/compiler.h>
12449 #include <asm/page.h>
12450 #include <asm/types.h>
12451 +#include <asm/percpu.h>
12452
12453 /*
12454 * low level task data that entry.S needs immediate access to
12455 @@ -24,7 +25,6 @@ struct exec_domain;
12456 #include <linux/atomic.h>
12457
12458 struct thread_info {
12459 - struct task_struct *task; /* main task structure */
12460 struct exec_domain *exec_domain; /* execution domain */
12461 __u32 flags; /* low level flags */
12462 __u32 status; /* thread synchronous flags */
12463 @@ -34,19 +34,13 @@ struct thread_info {
12464 mm_segment_t addr_limit;
12465 struct restart_block restart_block;
12466 void __user *sysenter_return;
12467 -#ifdef CONFIG_X86_32
12468 - unsigned long previous_esp; /* ESP of the previous stack in
12469 - case of nested (IRQ) stacks
12470 - */
12471 - __u8 supervisor_stack[0];
12472 -#endif
12473 + unsigned long lowest_stack;
12474 unsigned int sig_on_uaccess_error:1;
12475 unsigned int uaccess_err:1; /* uaccess failed */
12476 };
12477
12478 -#define INIT_THREAD_INFO(tsk) \
12479 +#define INIT_THREAD_INFO \
12480 { \
12481 - .task = &tsk, \
12482 .exec_domain = &default_exec_domain, \
12483 .flags = 0, \
12484 .cpu = 0, \
12485 @@ -57,7 +51,7 @@ struct thread_info {
12486 }, \
12487 }
12488
12489 -#define init_thread_info (init_thread_union.thread_info)
12490 +#define init_thread_info (init_thread_union.stack)
12491 #define init_stack (init_thread_union.stack)
12492
12493 #else /* !__ASSEMBLY__ */
12494 @@ -97,6 +91,7 @@ struct thread_info {
12495 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12496 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12497 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12498 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12499
12500 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12501 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12502 @@ -120,16 +115,18 @@ struct thread_info {
12503 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12504 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12505 #define _TIF_X32 (1 << TIF_X32)
12506 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12507
12508 /* work to do in syscall_trace_enter() */
12509 #define _TIF_WORK_SYSCALL_ENTRY \
12510 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12511 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12512 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12513 + _TIF_GRSEC_SETXID)
12514
12515 /* work to do in syscall_trace_leave() */
12516 #define _TIF_WORK_SYSCALL_EXIT \
12517 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12518 - _TIF_SYSCALL_TRACEPOINT)
12519 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12520
12521 /* work to do on interrupt/exception return */
12522 #define _TIF_WORK_MASK \
12523 @@ -139,7 +136,8 @@ struct thread_info {
12524
12525 /* work to do on any return to user space */
12526 #define _TIF_ALLWORK_MASK \
12527 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12528 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12529 + _TIF_GRSEC_SETXID)
12530
12531 /* Only used for 64 bit */
12532 #define _TIF_DO_NOTIFY_MASK \
12533 @@ -173,45 +171,40 @@ struct thread_info {
12534 ret; \
12535 })
12536
12537 -#ifdef CONFIG_X86_32
12538 -
12539 -#define STACK_WARN (THREAD_SIZE/8)
12540 -/*
12541 - * macros/functions for gaining access to the thread information structure
12542 - *
12543 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12544 - */
12545 -#ifndef __ASSEMBLY__
12546 -
12547 -
12548 -/* how to get the current stack pointer from C */
12549 -register unsigned long current_stack_pointer asm("esp") __used;
12550 -
12551 -/* how to get the thread information struct from C */
12552 -static inline struct thread_info *current_thread_info(void)
12553 -{
12554 - return (struct thread_info *)
12555 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12556 -}
12557 -
12558 -#else /* !__ASSEMBLY__ */
12559 -
12560 +#ifdef __ASSEMBLY__
12561 /* how to get the thread information struct from ASM */
12562 #define GET_THREAD_INFO(reg) \
12563 - movl $-THREAD_SIZE, reg; \
12564 - andl %esp, reg
12565 + mov PER_CPU_VAR(current_tinfo), reg
12566
12567 /* use this one if reg already contains %esp */
12568 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12569 - andl $-THREAD_SIZE, reg
12570 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12571 +#else
12572 +/* how to get the thread information struct from C */
12573 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12574 +
12575 +static __always_inline struct thread_info *current_thread_info(void)
12576 +{
12577 + return percpu_read_stable(current_tinfo);
12578 +}
12579 +#endif
12580 +
12581 +#ifdef CONFIG_X86_32
12582 +
12583 +#define STACK_WARN (THREAD_SIZE/8)
12584 +/*
12585 + * macros/functions for gaining access to the thread information structure
12586 + *
12587 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12588 + */
12589 +#ifndef __ASSEMBLY__
12590 +
12591 +/* how to get the current stack pointer from C */
12592 +register unsigned long current_stack_pointer asm("esp") __used;
12593
12594 #endif
12595
12596 #else /* X86_32 */
12597
12598 -#include <asm/percpu.h>
12599 -#define KERNEL_STACK_OFFSET (5*8)
12600 -
12601 /*
12602 * macros/functions for gaining access to the thread information structure
12603 * preempt_count needs to be 1 initially, until the scheduler is functional.
12604 @@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
12605 #ifndef __ASSEMBLY__
12606 DECLARE_PER_CPU(unsigned long, kernel_stack);
12607
12608 -static inline struct thread_info *current_thread_info(void)
12609 -{
12610 - struct thread_info *ti;
12611 - ti = (void *)(percpu_read_stable(kernel_stack) +
12612 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12613 - return ti;
12614 -}
12615 -
12616 -#else /* !__ASSEMBLY__ */
12617 -
12618 -/* how to get the thread information struct from ASM */
12619 -#define GET_THREAD_INFO(reg) \
12620 - movq PER_CPU_VAR(kernel_stack),reg ; \
12621 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12622 -
12623 -/*
12624 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12625 - * a certain register (to be used in assembler memory operands).
12626 - */
12627 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12628 -
12629 +/* how to get the current stack pointer from C */
12630 +register unsigned long current_stack_pointer asm("rsp") __used;
12631 #endif
12632
12633 #endif /* !X86_32 */
12634 @@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
12635 extern void free_thread_info(struct thread_info *ti);
12636 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12637 #define arch_task_cache_init arch_task_cache_init
12638 +
12639 +#define __HAVE_THREAD_FUNCTIONS
12640 +#define task_thread_info(task) (&(task)->tinfo)
12641 +#define task_stack_page(task) ((task)->stack)
12642 +#define setup_thread_stack(p, org) do {} while (0)
12643 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12644 +
12645 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12646 +extern struct task_struct *alloc_task_struct_node(int node);
12647 +extern void free_task_struct(struct task_struct *);
12648 +
12649 #endif
12650 #endif /* _ASM_X86_THREAD_INFO_H */
12651 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12652 index e054459..14bc8a7 100644
12653 --- a/arch/x86/include/asm/uaccess.h
12654 +++ b/arch/x86/include/asm/uaccess.h
12655 @@ -7,12 +7,15 @@
12656 #include <linux/compiler.h>
12657 #include <linux/thread_info.h>
12658 #include <linux/string.h>
12659 +#include <linux/sched.h>
12660 #include <asm/asm.h>
12661 #include <asm/page.h>
12662
12663 #define VERIFY_READ 0
12664 #define VERIFY_WRITE 1
12665
12666 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12667 +
12668 /*
12669 * The fs value determines whether argument validity checking should be
12670 * performed or not. If get_fs() == USER_DS, checking is performed, with
12671 @@ -28,7 +31,12 @@
12672
12673 #define get_ds() (KERNEL_DS)
12674 #define get_fs() (current_thread_info()->addr_limit)
12675 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12676 +void __set_fs(mm_segment_t x);
12677 +void set_fs(mm_segment_t x);
12678 +#else
12679 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12680 +#endif
12681
12682 #define segment_eq(a, b) ((a).seg == (b).seg)
12683
12684 @@ -76,7 +84,33 @@
12685 * checks that the pointer is in the user space range - after calling
12686 * this function, memory access functions may still return -EFAULT.
12687 */
12688 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12689 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12690 +#define access_ok(type, addr, size) \
12691 +({ \
12692 + long __size = size; \
12693 + unsigned long __addr = (unsigned long)addr; \
12694 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12695 + unsigned long __end_ao = __addr + __size - 1; \
12696 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12697 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12698 + while(__addr_ao <= __end_ao) { \
12699 + char __c_ao; \
12700 + __addr_ao += PAGE_SIZE; \
12701 + if (__size > PAGE_SIZE) \
12702 + cond_resched(); \
12703 + if (__get_user(__c_ao, (char __user *)__addr)) \
12704 + break; \
12705 + if (type != VERIFY_WRITE) { \
12706 + __addr = __addr_ao; \
12707 + continue; \
12708 + } \
12709 + if (__put_user(__c_ao, (char __user *)__addr)) \
12710 + break; \
12711 + __addr = __addr_ao; \
12712 + } \
12713 + } \
12714 + __ret_ao; \
12715 +})
12716
12717 /*
12718 * The exception table consists of pairs of addresses: the first is the
12719 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12720 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12721 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12722
12723 -
12724 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12725 +#define __copyuser_seg "gs;"
12726 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12727 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12728 +#else
12729 +#define __copyuser_seg
12730 +#define __COPYUSER_SET_ES
12731 +#define __COPYUSER_RESTORE_ES
12732 +#endif
12733
12734 #ifdef CONFIG_X86_32
12735 #define __put_user_asm_u64(x, addr, err, errret) \
12736 - asm volatile("1: movl %%eax,0(%2)\n" \
12737 - "2: movl %%edx,4(%2)\n" \
12738 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12739 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12740 "3:\n" \
12741 ".section .fixup,\"ax\"\n" \
12742 "4: movl %3,%0\n" \
12743 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12744 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12745
12746 #define __put_user_asm_ex_u64(x, addr) \
12747 - asm volatile("1: movl %%eax,0(%1)\n" \
12748 - "2: movl %%edx,4(%1)\n" \
12749 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12750 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12751 "3:\n" \
12752 _ASM_EXTABLE(1b, 2b - 1b) \
12753 _ASM_EXTABLE(2b, 3b - 2b) \
12754 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12755 __typeof__(*(ptr)) __pu_val; \
12756 __chk_user_ptr(ptr); \
12757 might_fault(); \
12758 - __pu_val = x; \
12759 + __pu_val = (x); \
12760 switch (sizeof(*(ptr))) { \
12761 case 1: \
12762 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12763 @@ -373,7 +415,7 @@ do { \
12764 } while (0)
12765
12766 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12767 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12768 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12769 "2:\n" \
12770 ".section .fixup,\"ax\"\n" \
12771 "3: mov %3,%0\n" \
12772 @@ -381,7 +423,7 @@ do { \
12773 " jmp 2b\n" \
12774 ".previous\n" \
12775 _ASM_EXTABLE(1b, 3b) \
12776 - : "=r" (err), ltype(x) \
12777 + : "=r" (err), ltype (x) \
12778 : "m" (__m(addr)), "i" (errret), "0" (err))
12779
12780 #define __get_user_size_ex(x, ptr, size) \
12781 @@ -406,7 +448,7 @@ do { \
12782 } while (0)
12783
12784 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12785 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12786 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12787 "2:\n" \
12788 _ASM_EXTABLE(1b, 2b - 1b) \
12789 : ltype(x) : "m" (__m(addr)))
12790 @@ -423,13 +465,24 @@ do { \
12791 int __gu_err; \
12792 unsigned long __gu_val; \
12793 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12794 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12795 + (x) = (__typeof__(*(ptr)))__gu_val; \
12796 __gu_err; \
12797 })
12798
12799 /* FIXME: this hack is definitely wrong -AK */
12800 struct __large_struct { unsigned long buf[100]; };
12801 -#define __m(x) (*(struct __large_struct __user *)(x))
12802 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12803 +#define ____m(x) \
12804 +({ \
12805 + unsigned long ____x = (unsigned long)(x); \
12806 + if (____x < PAX_USER_SHADOW_BASE) \
12807 + ____x += PAX_USER_SHADOW_BASE; \
12808 + (void __user *)____x; \
12809 +})
12810 +#else
12811 +#define ____m(x) (x)
12812 +#endif
12813 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12814
12815 /*
12816 * Tell gcc we read from memory instead of writing: this is because
12817 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12818 * aliasing issues.
12819 */
12820 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12821 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12822 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12823 "2:\n" \
12824 ".section .fixup,\"ax\"\n" \
12825 "3: mov %3,%0\n" \
12826 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12827 ".previous\n" \
12828 _ASM_EXTABLE(1b, 3b) \
12829 : "=r"(err) \
12830 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12831 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12832
12833 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12834 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12835 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12836 "2:\n" \
12837 _ASM_EXTABLE(1b, 2b - 1b) \
12838 : : ltype(x), "m" (__m(addr)))
12839 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12840 * On error, the variable @x is set to zero.
12841 */
12842
12843 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12844 +#define __get_user(x, ptr) get_user((x), (ptr))
12845 +#else
12846 #define __get_user(x, ptr) \
12847 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12848 +#endif
12849
12850 /**
12851 * __put_user: - Write a simple value into user space, with less checking.
12852 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12853 * Returns zero on success, or -EFAULT on error.
12854 */
12855
12856 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12857 +#define __put_user(x, ptr) put_user((x), (ptr))
12858 +#else
12859 #define __put_user(x, ptr) \
12860 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12861 +#endif
12862
12863 #define __get_user_unaligned __get_user
12864 #define __put_user_unaligned __put_user
12865 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12866 #define get_user_ex(x, ptr) do { \
12867 unsigned long __gue_val; \
12868 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12869 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12870 + (x) = (__typeof__(*(ptr)))__gue_val; \
12871 } while (0)
12872
12873 #ifdef CONFIG_X86_WP_WORKS_OK
12874 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12875 index 8084bc7..3d6ec37 100644
12876 --- a/arch/x86/include/asm/uaccess_32.h
12877 +++ b/arch/x86/include/asm/uaccess_32.h
12878 @@ -11,15 +11,15 @@
12879 #include <asm/page.h>
12880
12881 unsigned long __must_check __copy_to_user_ll
12882 - (void __user *to, const void *from, unsigned long n);
12883 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12884 unsigned long __must_check __copy_from_user_ll
12885 - (void *to, const void __user *from, unsigned long n);
12886 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12887 unsigned long __must_check __copy_from_user_ll_nozero
12888 - (void *to, const void __user *from, unsigned long n);
12889 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12890 unsigned long __must_check __copy_from_user_ll_nocache
12891 - (void *to, const void __user *from, unsigned long n);
12892 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12893 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12894 - (void *to, const void __user *from, unsigned long n);
12895 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12896
12897 /**
12898 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12899 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12900 static __always_inline unsigned long __must_check
12901 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12902 {
12903 + if ((long)n < 0)
12904 + return n;
12905 +
12906 if (__builtin_constant_p(n)) {
12907 unsigned long ret;
12908
12909 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12910 return ret;
12911 }
12912 }
12913 + if (!__builtin_constant_p(n))
12914 + check_object_size(from, n, true);
12915 return __copy_to_user_ll(to, from, n);
12916 }
12917
12918 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12919 __copy_to_user(void __user *to, const void *from, unsigned long n)
12920 {
12921 might_fault();
12922 +
12923 return __copy_to_user_inatomic(to, from, n);
12924 }
12925
12926 static __always_inline unsigned long
12927 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12928 {
12929 + if ((long)n < 0)
12930 + return n;
12931 +
12932 /* Avoid zeroing the tail if the copy fails..
12933 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12934 * but as the zeroing behaviour is only significant when n is not
12935 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12936 __copy_from_user(void *to, const void __user *from, unsigned long n)
12937 {
12938 might_fault();
12939 +
12940 + if ((long)n < 0)
12941 + return n;
12942 +
12943 if (__builtin_constant_p(n)) {
12944 unsigned long ret;
12945
12946 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12947 return ret;
12948 }
12949 }
12950 + if (!__builtin_constant_p(n))
12951 + check_object_size(to, n, false);
12952 return __copy_from_user_ll(to, from, n);
12953 }
12954
12955 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12956 const void __user *from, unsigned long n)
12957 {
12958 might_fault();
12959 +
12960 + if ((long)n < 0)
12961 + return n;
12962 +
12963 if (__builtin_constant_p(n)) {
12964 unsigned long ret;
12965
12966 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12967 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12968 unsigned long n)
12969 {
12970 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12971 + if ((long)n < 0)
12972 + return n;
12973 +
12974 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12975 }
12976
12977 -unsigned long __must_check copy_to_user(void __user *to,
12978 - const void *from, unsigned long n);
12979 -unsigned long __must_check _copy_from_user(void *to,
12980 - const void __user *from,
12981 - unsigned long n);
12982 -
12983 +extern void copy_to_user_overflow(void)
12984 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12985 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12986 +#else
12987 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12988 +#endif
12989 +;
12990
12991 extern void copy_from_user_overflow(void)
12992 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12993 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12994 #endif
12995 ;
12996
12997 -static inline unsigned long __must_check copy_from_user(void *to,
12998 - const void __user *from,
12999 - unsigned long n)
13000 +/**
13001 + * copy_to_user: - Copy a block of data into user space.
13002 + * @to: Destination address, in user space.
13003 + * @from: Source address, in kernel space.
13004 + * @n: Number of bytes to copy.
13005 + *
13006 + * Context: User context only. This function may sleep.
13007 + *
13008 + * Copy data from kernel space to user space.
13009 + *
13010 + * Returns number of bytes that could not be copied.
13011 + * On success, this will be zero.
13012 + */
13013 +static inline unsigned long __must_check
13014 +copy_to_user(void __user *to, const void *from, unsigned long n)
13015 {
13016 - int sz = __compiletime_object_size(to);
13017 + size_t sz = __compiletime_object_size(from);
13018
13019 - if (likely(sz == -1 || sz >= n))
13020 - n = _copy_from_user(to, from, n);
13021 - else
13022 + if (unlikely(sz != (size_t)-1 && sz < n))
13023 + copy_to_user_overflow();
13024 + else if (access_ok(VERIFY_WRITE, to, n))
13025 + n = __copy_to_user(to, from, n);
13026 + return n;
13027 +}
13028 +
13029 +/**
13030 + * copy_from_user: - Copy a block of data from user space.
13031 + * @to: Destination address, in kernel space.
13032 + * @from: Source address, in user space.
13033 + * @n: Number of bytes to copy.
13034 + *
13035 + * Context: User context only. This function may sleep.
13036 + *
13037 + * Copy data from user space to kernel space.
13038 + *
13039 + * Returns number of bytes that could not be copied.
13040 + * On success, this will be zero.
13041 + *
13042 + * If some data could not be copied, this function will pad the copied
13043 + * data to the requested size using zero bytes.
13044 + */
13045 +static inline unsigned long __must_check
13046 +copy_from_user(void *to, const void __user *from, unsigned long n)
13047 +{
13048 + size_t sz = __compiletime_object_size(to);
13049 +
13050 + if (unlikely(sz != (size_t)-1 && sz < n))
13051 copy_from_user_overflow();
13052 -
13053 + else if (access_ok(VERIFY_READ, from, n))
13054 + n = __copy_from_user(to, from, n);
13055 + else if ((long)n > 0) {
13056 + if (!__builtin_constant_p(n))
13057 + check_object_size(to, n, false);
13058 + memset(to, 0, n);
13059 + }
13060 return n;
13061 }
13062
13063 @@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
13064 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13065
13066 long strnlen_user(const char __user *str, long n);
13067 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13068 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13069 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13070 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13071
13072 #endif /* _ASM_X86_UACCESS_32_H */
13073 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13074 index fcd4b6f..ef04f8f 100644
13075 --- a/arch/x86/include/asm/uaccess_64.h
13076 +++ b/arch/x86/include/asm/uaccess_64.h
13077 @@ -10,6 +10,9 @@
13078 #include <asm/alternative.h>
13079 #include <asm/cpufeature.h>
13080 #include <asm/page.h>
13081 +#include <asm/pgtable.h>
13082 +
13083 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13084
13085 /*
13086 * Copy To/From Userspace
13087 @@ -17,12 +20,14 @@
13088
13089 /* Handles exceptions in both to and from, but doesn't do access_ok */
13090 __must_check unsigned long
13091 -copy_user_generic_string(void *to, const void *from, unsigned len);
13092 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13093 __must_check unsigned long
13094 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13095 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13096
13097 static __always_inline __must_check unsigned long
13098 -copy_user_generic(void *to, const void *from, unsigned len)
13099 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13100 +static __always_inline __must_check unsigned long
13101 +copy_user_generic(void *to, const void *from, unsigned long len)
13102 {
13103 unsigned ret;
13104
13105 @@ -32,142 +37,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
13106 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13107 "=d" (len)),
13108 "1" (to), "2" (from), "3" (len)
13109 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13110 + : "memory", "rcx", "r8", "r9", "r11");
13111 return ret;
13112 }
13113
13114 +static __always_inline __must_check unsigned long
13115 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13116 +static __always_inline __must_check unsigned long
13117 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13118 __must_check unsigned long
13119 -_copy_to_user(void __user *to, const void *from, unsigned len);
13120 -__must_check unsigned long
13121 -_copy_from_user(void *to, const void __user *from, unsigned len);
13122 -__must_check unsigned long
13123 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13124 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13125 +
13126 +extern void copy_to_user_overflow(void)
13127 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13128 + __compiletime_error("copy_to_user() buffer size is not provably correct")
13129 +#else
13130 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
13131 +#endif
13132 +;
13133 +
13134 +extern void copy_from_user_overflow(void)
13135 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13136 + __compiletime_error("copy_from_user() buffer size is not provably correct")
13137 +#else
13138 + __compiletime_warning("copy_from_user() buffer size is not provably correct")
13139 +#endif
13140 +;
13141
13142 static inline unsigned long __must_check copy_from_user(void *to,
13143 const void __user *from,
13144 unsigned long n)
13145 {
13146 - int sz = __compiletime_object_size(to);
13147 -
13148 might_fault();
13149 - if (likely(sz == -1 || sz >= n))
13150 - n = _copy_from_user(to, from, n);
13151 -#ifdef CONFIG_DEBUG_VM
13152 - else
13153 - WARN(1, "Buffer overflow detected!\n");
13154 -#endif
13155 +
13156 + if (access_ok(VERIFY_READ, from, n))
13157 + n = __copy_from_user(to, from, n);
13158 + else if (n < INT_MAX) {
13159 + if (!__builtin_constant_p(n))
13160 + check_object_size(to, n, false);
13161 + memset(to, 0, n);
13162 + }
13163 return n;
13164 }
13165
13166 static __always_inline __must_check
13167 -int copy_to_user(void __user *dst, const void *src, unsigned size)
13168 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
13169 {
13170 might_fault();
13171
13172 - return _copy_to_user(dst, src, size);
13173 + if (access_ok(VERIFY_WRITE, dst, size))
13174 + size = __copy_to_user(dst, src, size);
13175 + return size;
13176 }
13177
13178 static __always_inline __must_check
13179 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13180 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13181 {
13182 - int ret = 0;
13183 + size_t sz = __compiletime_object_size(dst);
13184 + unsigned ret = 0;
13185
13186 might_fault();
13187 - if (!__builtin_constant_p(size))
13188 - return copy_user_generic(dst, (__force void *)src, size);
13189 +
13190 + if (size > INT_MAX)
13191 + return size;
13192 +
13193 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13194 + if (!__access_ok(VERIFY_READ, src, size))
13195 + return size;
13196 +#endif
13197 +
13198 + if (unlikely(sz != (size_t)-1 && sz < size)) {
13199 + copy_from_user_overflow();
13200 + return size;
13201 + }
13202 +
13203 + if (!__builtin_constant_p(size)) {
13204 + check_object_size(dst, size, false);
13205 +
13206 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13207 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13208 + src += PAX_USER_SHADOW_BASE;
13209 +#endif
13210 +
13211 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13212 + }
13213 switch (size) {
13214 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13215 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13216 ret, "b", "b", "=q", 1);
13217 return ret;
13218 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13219 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13220 ret, "w", "w", "=r", 2);
13221 return ret;
13222 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13223 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13224 ret, "l", "k", "=r", 4);
13225 return ret;
13226 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13227 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13228 ret, "q", "", "=r", 8);
13229 return ret;
13230 case 10:
13231 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13232 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13233 ret, "q", "", "=r", 10);
13234 if (unlikely(ret))
13235 return ret;
13236 __get_user_asm(*(u16 *)(8 + (char *)dst),
13237 - (u16 __user *)(8 + (char __user *)src),
13238 + (const u16 __user *)(8 + (const char __user *)src),
13239 ret, "w", "w", "=r", 2);
13240 return ret;
13241 case 16:
13242 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13243 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13244 ret, "q", "", "=r", 16);
13245 if (unlikely(ret))
13246 return ret;
13247 __get_user_asm(*(u64 *)(8 + (char *)dst),
13248 - (u64 __user *)(8 + (char __user *)src),
13249 + (const u64 __user *)(8 + (const char __user *)src),
13250 ret, "q", "", "=r", 8);
13251 return ret;
13252 default:
13253 - return copy_user_generic(dst, (__force void *)src, size);
13254 +
13255 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13256 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13257 + src += PAX_USER_SHADOW_BASE;
13258 +#endif
13259 +
13260 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13261 }
13262 }
13263
13264 static __always_inline __must_check
13265 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13266 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13267 {
13268 - int ret = 0;
13269 + size_t sz = __compiletime_object_size(src);
13270 + unsigned ret = 0;
13271
13272 might_fault();
13273 - if (!__builtin_constant_p(size))
13274 - return copy_user_generic((__force void *)dst, src, size);
13275 +
13276 + if (size > INT_MAX)
13277 + return size;
13278 +
13279 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13280 + if (!__access_ok(VERIFY_WRITE, dst, size))
13281 + return size;
13282 +#endif
13283 +
13284 + if (unlikely(sz != (size_t)-1 && sz < size)) {
13285 + copy_to_user_overflow();
13286 + return size;
13287 + }
13288 +
13289 + if (!__builtin_constant_p(size)) {
13290 + check_object_size(src, size, true);
13291 +
13292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13293 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13294 + dst += PAX_USER_SHADOW_BASE;
13295 +#endif
13296 +
13297 + return copy_user_generic((__force_kernel void *)dst, src, size);
13298 + }
13299 switch (size) {
13300 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13301 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13302 ret, "b", "b", "iq", 1);
13303 return ret;
13304 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13305 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13306 ret, "w", "w", "ir", 2);
13307 return ret;
13308 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13309 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13310 ret, "l", "k", "ir", 4);
13311 return ret;
13312 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13313 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13314 ret, "q", "", "er", 8);
13315 return ret;
13316 case 10:
13317 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13318 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13319 ret, "q", "", "er", 10);
13320 if (unlikely(ret))
13321 return ret;
13322 asm("":::"memory");
13323 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13324 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13325 ret, "w", "w", "ir", 2);
13326 return ret;
13327 case 16:
13328 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13329 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13330 ret, "q", "", "er", 16);
13331 if (unlikely(ret))
13332 return ret;
13333 asm("":::"memory");
13334 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13335 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13336 ret, "q", "", "er", 8);
13337 return ret;
13338 default:
13339 - return copy_user_generic((__force void *)dst, src, size);
13340 +
13341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13342 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13343 + dst += PAX_USER_SHADOW_BASE;
13344 +#endif
13345 +
13346 + return copy_user_generic((__force_kernel void *)dst, src, size);
13347 }
13348 }
13349
13350 static __always_inline __must_check
13351 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13352 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13353 {
13354 - int ret = 0;
13355 + unsigned ret = 0;
13356
13357 might_fault();
13358 - if (!__builtin_constant_p(size))
13359 - return copy_user_generic((__force void *)dst,
13360 - (__force void *)src, size);
13361 +
13362 + if (size > INT_MAX)
13363 + return size;
13364 +
13365 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13366 + if (!__access_ok(VERIFY_READ, src, size))
13367 + return size;
13368 + if (!__access_ok(VERIFY_WRITE, dst, size))
13369 + return size;
13370 +#endif
13371 +
13372 + if (!__builtin_constant_p(size)) {
13373 +
13374 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13375 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13376 + src += PAX_USER_SHADOW_BASE;
13377 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13378 + dst += PAX_USER_SHADOW_BASE;
13379 +#endif
13380 +
13381 + return copy_user_generic((__force_kernel void *)dst,
13382 + (__force_kernel const void *)src, size);
13383 + }
13384 switch (size) {
13385 case 1: {
13386 u8 tmp;
13387 - __get_user_asm(tmp, (u8 __user *)src,
13388 + __get_user_asm(tmp, (const u8 __user *)src,
13389 ret, "b", "b", "=q", 1);
13390 if (likely(!ret))
13391 __put_user_asm(tmp, (u8 __user *)dst,
13392 @@ -176,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13393 }
13394 case 2: {
13395 u16 tmp;
13396 - __get_user_asm(tmp, (u16 __user *)src,
13397 + __get_user_asm(tmp, (const u16 __user *)src,
13398 ret, "w", "w", "=r", 2);
13399 if (likely(!ret))
13400 __put_user_asm(tmp, (u16 __user *)dst,
13401 @@ -186,7 +287,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13402
13403 case 4: {
13404 u32 tmp;
13405 - __get_user_asm(tmp, (u32 __user *)src,
13406 + __get_user_asm(tmp, (const u32 __user *)src,
13407 ret, "l", "k", "=r", 4);
13408 if (likely(!ret))
13409 __put_user_asm(tmp, (u32 __user *)dst,
13410 @@ -195,7 +296,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13411 }
13412 case 8: {
13413 u64 tmp;
13414 - __get_user_asm(tmp, (u64 __user *)src,
13415 + __get_user_asm(tmp, (const u64 __user *)src,
13416 ret, "q", "", "=r", 8);
13417 if (likely(!ret))
13418 __put_user_asm(tmp, (u64 __user *)dst,
13419 @@ -203,47 +304,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13420 return ret;
13421 }
13422 default:
13423 - return copy_user_generic((__force void *)dst,
13424 - (__force void *)src, size);
13425 +
13426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13427 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13428 + src += PAX_USER_SHADOW_BASE;
13429 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13430 + dst += PAX_USER_SHADOW_BASE;
13431 +#endif
13432 +
13433 + return copy_user_generic((__force_kernel void *)dst,
13434 + (__force_kernel const void *)src, size);
13435 }
13436 }
13437
13438 __must_check long strnlen_user(const char __user *str, long n);
13439 __must_check long __strnlen_user(const char __user *str, long n);
13440 __must_check long strlen_user(const char __user *str);
13441 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13442 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13443 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13444 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13445
13446 static __must_check __always_inline int
13447 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13448 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13449 {
13450 - return copy_user_generic(dst, (__force const void *)src, size);
13451 + if (size > INT_MAX)
13452 + return size;
13453 +
13454 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13455 + if (!__access_ok(VERIFY_READ, src, size))
13456 + return size;
13457 +
13458 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13459 + src += PAX_USER_SHADOW_BASE;
13460 +#endif
13461 +
13462 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13463 }
13464
13465 -static __must_check __always_inline int
13466 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13467 +static __must_check __always_inline unsigned long
13468 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13469 {
13470 - return copy_user_generic((__force void *)dst, src, size);
13471 + if (size > INT_MAX)
13472 + return size;
13473 +
13474 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13475 + if (!__access_ok(VERIFY_WRITE, dst, size))
13476 + return size;
13477 +
13478 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13479 + dst += PAX_USER_SHADOW_BASE;
13480 +#endif
13481 +
13482 + return copy_user_generic((__force_kernel void *)dst, src, size);
13483 }
13484
13485 -extern long __copy_user_nocache(void *dst, const void __user *src,
13486 - unsigned size, int zerorest);
13487 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13488 + unsigned long size, int zerorest) __size_overflow(3);
13489
13490 -static inline int
13491 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13492 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13493 {
13494 might_sleep();
13495 +
13496 + if (size > INT_MAX)
13497 + return size;
13498 +
13499 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13500 + if (!__access_ok(VERIFY_READ, src, size))
13501 + return size;
13502 +#endif
13503 +
13504 return __copy_user_nocache(dst, src, size, 1);
13505 }
13506
13507 -static inline int
13508 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13509 - unsigned size)
13510 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13511 + unsigned long size)
13512 {
13513 + if (size > INT_MAX)
13514 + return size;
13515 +
13516 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13517 + if (!__access_ok(VERIFY_READ, src, size))
13518 + return size;
13519 +#endif
13520 +
13521 return __copy_user_nocache(dst, src, size, 0);
13522 }
13523
13524 -unsigned long
13525 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13526 +extern unsigned long
13527 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13528
13529 #endif /* _ASM_X86_UACCESS_64_H */
13530 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13531 index bb05228..d763d5b 100644
13532 --- a/arch/x86/include/asm/vdso.h
13533 +++ b/arch/x86/include/asm/vdso.h
13534 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13535 #define VDSO32_SYMBOL(base, name) \
13536 ({ \
13537 extern const char VDSO32_##name[]; \
13538 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13539 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13540 })
13541 #endif
13542
13543 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13544 index 764b66a..ad3cfc8 100644
13545 --- a/arch/x86/include/asm/x86_init.h
13546 +++ b/arch/x86/include/asm/x86_init.h
13547 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13548 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13549 void (*find_smp_config)(void);
13550 void (*get_smp_config)(unsigned int early);
13551 -};
13552 +} __no_const;
13553
13554 /**
13555 * struct x86_init_resources - platform specific resource related ops
13556 @@ -43,7 +43,7 @@ struct x86_init_resources {
13557 void (*probe_roms)(void);
13558 void (*reserve_resources)(void);
13559 char *(*memory_setup)(void);
13560 -};
13561 +} __no_const;
13562
13563 /**
13564 * struct x86_init_irqs - platform specific interrupt setup
13565 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13566 void (*pre_vector_init)(void);
13567 void (*intr_init)(void);
13568 void (*trap_init)(void);
13569 -};
13570 +} __no_const;
13571
13572 /**
13573 * struct x86_init_oem - oem platform specific customizing functions
13574 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13575 struct x86_init_oem {
13576 void (*arch_setup)(void);
13577 void (*banner)(void);
13578 -};
13579 +} __no_const;
13580
13581 /**
13582 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13583 @@ -77,7 +77,7 @@ struct x86_init_oem {
13584 */
13585 struct x86_init_mapping {
13586 void (*pagetable_reserve)(u64 start, u64 end);
13587 -};
13588 +} __no_const;
13589
13590 /**
13591 * struct x86_init_paging - platform specific paging functions
13592 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13593 struct x86_init_paging {
13594 void (*pagetable_setup_start)(pgd_t *base);
13595 void (*pagetable_setup_done)(pgd_t *base);
13596 -};
13597 +} __no_const;
13598
13599 /**
13600 * struct x86_init_timers - platform specific timer setup
13601 @@ -102,7 +102,7 @@ struct x86_init_timers {
13602 void (*tsc_pre_init)(void);
13603 void (*timer_init)(void);
13604 void (*wallclock_init)(void);
13605 -};
13606 +} __no_const;
13607
13608 /**
13609 * struct x86_init_iommu - platform specific iommu setup
13610 @@ -110,7 +110,7 @@ struct x86_init_timers {
13611 */
13612 struct x86_init_iommu {
13613 int (*iommu_init)(void);
13614 -};
13615 +} __no_const;
13616
13617 /**
13618 * struct x86_init_pci - platform specific pci init functions
13619 @@ -124,7 +124,7 @@ struct x86_init_pci {
13620 int (*init)(void);
13621 void (*init_irq)(void);
13622 void (*fixup_irqs)(void);
13623 -};
13624 +} __no_const;
13625
13626 /**
13627 * struct x86_init_ops - functions for platform specific setup
13628 @@ -140,7 +140,7 @@ struct x86_init_ops {
13629 struct x86_init_timers timers;
13630 struct x86_init_iommu iommu;
13631 struct x86_init_pci pci;
13632 -};
13633 +} __no_const;
13634
13635 /**
13636 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13637 @@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13638 void (*setup_percpu_clockev)(void);
13639 void (*early_percpu_clock_init)(void);
13640 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13641 -};
13642 +} __no_const;
13643
13644 /**
13645 * struct x86_platform_ops - platform specific runtime functions
13646 @@ -177,7 +177,7 @@ struct x86_platform_ops {
13647 int (*i8042_detect)(void);
13648 void (*save_sched_clock_state)(void);
13649 void (*restore_sched_clock_state)(void);
13650 -};
13651 +} __no_const;
13652
13653 struct pci_dev;
13654
13655 @@ -186,7 +186,7 @@ struct x86_msi_ops {
13656 void (*teardown_msi_irq)(unsigned int irq);
13657 void (*teardown_msi_irqs)(struct pci_dev *dev);
13658 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13659 -};
13660 +} __no_const;
13661
13662 extern struct x86_init_ops x86_init;
13663 extern struct x86_cpuinit_ops x86_cpuinit;
13664 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13665 index c6ce245..ffbdab7 100644
13666 --- a/arch/x86/include/asm/xsave.h
13667 +++ b/arch/x86/include/asm/xsave.h
13668 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13669 {
13670 int err;
13671
13672 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13673 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13674 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13675 +#endif
13676 +
13677 /*
13678 * Clear the xsave header first, so that reserved fields are
13679 * initialized to zero.
13680 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13681 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13682 {
13683 int err;
13684 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13685 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13686 u32 lmask = mask;
13687 u32 hmask = mask >> 32;
13688
13689 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13690 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13691 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13692 +#endif
13693 +
13694 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13695 "2:\n"
13696 ".section .fixup,\"ax\"\n"
13697 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13698 index 6a564ac..9b1340c 100644
13699 --- a/arch/x86/kernel/acpi/realmode/Makefile
13700 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13701 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13702 $(call cc-option, -fno-stack-protector) \
13703 $(call cc-option, -mpreferred-stack-boundary=2)
13704 KBUILD_CFLAGS += $(call cc-option, -m32)
13705 +ifdef CONSTIFY_PLUGIN
13706 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13707 +endif
13708 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13709 GCOV_PROFILE := n
13710
13711 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13712 index b4fd836..4358fe3 100644
13713 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13714 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13715 @@ -108,6 +108,9 @@ wakeup_code:
13716 /* Do any other stuff... */
13717
13718 #ifndef CONFIG_64BIT
13719 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13720 + call verify_cpu
13721 +
13722 /* This could also be done in C code... */
13723 movl pmode_cr3, %eax
13724 movl %eax, %cr3
13725 @@ -131,6 +134,7 @@ wakeup_code:
13726 movl pmode_cr0, %eax
13727 movl %eax, %cr0
13728 jmp pmode_return
13729 +# include "../../verify_cpu.S"
13730 #else
13731 pushw $0
13732 pushw trampoline_segment
13733 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13734 index 146a49c..1b5338b 100644
13735 --- a/arch/x86/kernel/acpi/sleep.c
13736 +++ b/arch/x86/kernel/acpi/sleep.c
13737 @@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
13738 header->trampoline_segment = trampoline_address() >> 4;
13739 #ifdef CONFIG_SMP
13740 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13741 +
13742 + pax_open_kernel();
13743 early_gdt_descr.address =
13744 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13745 + pax_close_kernel();
13746 +
13747 initial_gs = per_cpu_offset(smp_processor_id());
13748 #endif
13749 initial_code = (unsigned long)wakeup_long64;
13750 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13751 index 7261083..5c12053 100644
13752 --- a/arch/x86/kernel/acpi/wakeup_32.S
13753 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13754 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13755 # and restore the stack ... but you need gdt for this to work
13756 movl saved_context_esp, %esp
13757
13758 - movl %cs:saved_magic, %eax
13759 - cmpl $0x12345678, %eax
13760 + cmpl $0x12345678, saved_magic
13761 jne bogus_magic
13762
13763 # jump to place where we left off
13764 - movl saved_eip, %eax
13765 - jmp *%eax
13766 + jmp *(saved_eip)
13767
13768 bogus_magic:
13769 jmp bogus_magic
13770 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13771 index 1f84794..e23f862 100644
13772 --- a/arch/x86/kernel/alternative.c
13773 +++ b/arch/x86/kernel/alternative.c
13774 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13775 */
13776 for (a = start; a < end; a++) {
13777 instr = (u8 *)&a->instr_offset + a->instr_offset;
13778 +
13779 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13780 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13781 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13782 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13783 +#endif
13784 +
13785 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13786 BUG_ON(a->replacementlen > a->instrlen);
13787 BUG_ON(a->instrlen > sizeof(insnbuf));
13788 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13789 for (poff = start; poff < end; poff++) {
13790 u8 *ptr = (u8 *)poff + *poff;
13791
13792 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13793 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13794 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13795 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13796 +#endif
13797 +
13798 if (!*poff || ptr < text || ptr >= text_end)
13799 continue;
13800 /* turn DS segment override prefix into lock prefix */
13801 - if (*ptr == 0x3e)
13802 + if (*ktla_ktva(ptr) == 0x3e)
13803 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13804 };
13805 mutex_unlock(&text_mutex);
13806 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13807 for (poff = start; poff < end; poff++) {
13808 u8 *ptr = (u8 *)poff + *poff;
13809
13810 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13811 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13812 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13813 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13814 +#endif
13815 +
13816 if (!*poff || ptr < text || ptr >= text_end)
13817 continue;
13818 /* turn lock prefix into DS segment override prefix */
13819 - if (*ptr == 0xf0)
13820 + if (*ktla_ktva(ptr) == 0xf0)
13821 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13822 };
13823 mutex_unlock(&text_mutex);
13824 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13825
13826 BUG_ON(p->len > MAX_PATCH_LEN);
13827 /* prep the buffer with the original instructions */
13828 - memcpy(insnbuf, p->instr, p->len);
13829 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13830 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13831 (unsigned long)p->instr, p->len);
13832
13833 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13834 if (smp_alt_once)
13835 free_init_pages("SMP alternatives",
13836 (unsigned long)__smp_locks,
13837 - (unsigned long)__smp_locks_end);
13838 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13839
13840 restart_nmi();
13841 }
13842 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13843 * instructions. And on the local CPU you need to be protected again NMI or MCE
13844 * handlers seeing an inconsistent instruction while you patch.
13845 */
13846 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13847 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13848 size_t len)
13849 {
13850 unsigned long flags;
13851 local_irq_save(flags);
13852 - memcpy(addr, opcode, len);
13853 +
13854 + pax_open_kernel();
13855 + memcpy(ktla_ktva(addr), opcode, len);
13856 sync_core();
13857 + pax_close_kernel();
13858 +
13859 local_irq_restore(flags);
13860 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13861 that causes hangs on some VIA CPUs. */
13862 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13863 */
13864 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13865 {
13866 - unsigned long flags;
13867 - char *vaddr;
13868 + unsigned char *vaddr = ktla_ktva(addr);
13869 struct page *pages[2];
13870 - int i;
13871 + size_t i;
13872
13873 if (!core_kernel_text((unsigned long)addr)) {
13874 - pages[0] = vmalloc_to_page(addr);
13875 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13876 + pages[0] = vmalloc_to_page(vaddr);
13877 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13878 } else {
13879 - pages[0] = virt_to_page(addr);
13880 + pages[0] = virt_to_page(vaddr);
13881 WARN_ON(!PageReserved(pages[0]));
13882 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13883 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13884 }
13885 BUG_ON(!pages[0]);
13886 - local_irq_save(flags);
13887 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13888 - if (pages[1])
13889 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13890 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13891 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13892 - clear_fixmap(FIX_TEXT_POKE0);
13893 - if (pages[1])
13894 - clear_fixmap(FIX_TEXT_POKE1);
13895 - local_flush_tlb();
13896 - sync_core();
13897 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13898 - that causes hangs on some VIA CPUs. */
13899 + text_poke_early(addr, opcode, len);
13900 for (i = 0; i < len; i++)
13901 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13902 - local_irq_restore(flags);
13903 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13904 return addr;
13905 }
13906
13907 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13908 index edc2448..553e7c5 100644
13909 --- a/arch/x86/kernel/apic/apic.c
13910 +++ b/arch/x86/kernel/apic/apic.c
13911 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13912 /*
13913 * Debug level, exported for io_apic.c
13914 */
13915 -unsigned int apic_verbosity;
13916 +int apic_verbosity;
13917
13918 int pic_mode;
13919
13920 @@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13921 apic_write(APIC_ESR, 0);
13922 v1 = apic_read(APIC_ESR);
13923 ack_APIC_irq();
13924 - atomic_inc(&irq_err_count);
13925 + atomic_inc_unchecked(&irq_err_count);
13926
13927 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13928 smp_processor_id(), v0 , v1);
13929 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13930 index e88300d..cd5a87a 100644
13931 --- a/arch/x86/kernel/apic/io_apic.c
13932 +++ b/arch/x86/kernel/apic/io_apic.c
13933 @@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13934
13935 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13936 {
13937 - io_apic_ops = *ops;
13938 + pax_open_kernel();
13939 + memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13940 + pax_close_kernel();
13941 }
13942
13943 /*
13944 @@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13945 }
13946 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13947
13948 -void lock_vector_lock(void)
13949 +void lock_vector_lock(void) __acquires(vector_lock)
13950 {
13951 /* Used to the online set of cpus does not change
13952 * during assign_irq_vector.
13953 @@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
13954 raw_spin_lock(&vector_lock);
13955 }
13956
13957 -void unlock_vector_lock(void)
13958 +void unlock_vector_lock(void) __releases(vector_lock)
13959 {
13960 raw_spin_unlock(&vector_lock);
13961 }
13962 @@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
13963 ack_APIC_irq();
13964 }
13965
13966 -atomic_t irq_mis_count;
13967 +atomic_unchecked_t irq_mis_count;
13968
13969 #ifdef CONFIG_GENERIC_PENDING_IRQ
13970 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13971 @@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
13972 * at the cpu.
13973 */
13974 if (!(v & (1 << (i & 0x1f)))) {
13975 - atomic_inc(&irq_mis_count);
13976 + atomic_inc_unchecked(&irq_mis_count);
13977
13978 eoi_ioapic_irq(irq, cfg);
13979 }
13980 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13981 index 459e78c..f037006 100644
13982 --- a/arch/x86/kernel/apm_32.c
13983 +++ b/arch/x86/kernel/apm_32.c
13984 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13985 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13986 * even though they are called in protected mode.
13987 */
13988 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13989 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13990 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13991
13992 static const char driver_version[] = "1.16ac"; /* no spaces */
13993 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13994 BUG_ON(cpu != 0);
13995 gdt = get_cpu_gdt_table(cpu);
13996 save_desc_40 = gdt[0x40 / 8];
13997 +
13998 + pax_open_kernel();
13999 gdt[0x40 / 8] = bad_bios_desc;
14000 + pax_close_kernel();
14001
14002 apm_irq_save(flags);
14003 APM_DO_SAVE_SEGS;
14004 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14005 &call->esi);
14006 APM_DO_RESTORE_SEGS;
14007 apm_irq_restore(flags);
14008 +
14009 + pax_open_kernel();
14010 gdt[0x40 / 8] = save_desc_40;
14011 + pax_close_kernel();
14012 +
14013 put_cpu();
14014
14015 return call->eax & 0xff;
14016 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14017 BUG_ON(cpu != 0);
14018 gdt = get_cpu_gdt_table(cpu);
14019 save_desc_40 = gdt[0x40 / 8];
14020 +
14021 + pax_open_kernel();
14022 gdt[0x40 / 8] = bad_bios_desc;
14023 + pax_close_kernel();
14024
14025 apm_irq_save(flags);
14026 APM_DO_SAVE_SEGS;
14027 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14028 &call->eax);
14029 APM_DO_RESTORE_SEGS;
14030 apm_irq_restore(flags);
14031 +
14032 + pax_open_kernel();
14033 gdt[0x40 / 8] = save_desc_40;
14034 + pax_close_kernel();
14035 +
14036 put_cpu();
14037 return error;
14038 }
14039 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
14040 * code to that CPU.
14041 */
14042 gdt = get_cpu_gdt_table(0);
14043 +
14044 + pax_open_kernel();
14045 set_desc_base(&gdt[APM_CS >> 3],
14046 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14047 set_desc_base(&gdt[APM_CS_16 >> 3],
14048 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14049 set_desc_base(&gdt[APM_DS >> 3],
14050 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14051 + pax_close_kernel();
14052
14053 proc_create("apm", 0, NULL, &apm_file_ops);
14054
14055 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14056 index 68de2dc..1f3c720 100644
14057 --- a/arch/x86/kernel/asm-offsets.c
14058 +++ b/arch/x86/kernel/asm-offsets.c
14059 @@ -33,6 +33,8 @@ void common(void) {
14060 OFFSET(TI_status, thread_info, status);
14061 OFFSET(TI_addr_limit, thread_info, addr_limit);
14062 OFFSET(TI_preempt_count, thread_info, preempt_count);
14063 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14064 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14065
14066 BLANK();
14067 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14068 @@ -53,8 +55,26 @@ void common(void) {
14069 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14070 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14071 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14072 +
14073 +#ifdef CONFIG_PAX_KERNEXEC
14074 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14075 #endif
14076
14077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14078 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14079 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14080 +#ifdef CONFIG_X86_64
14081 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14082 +#endif
14083 +#endif
14084 +
14085 +#endif
14086 +
14087 + BLANK();
14088 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14089 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14090 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14091 +
14092 #ifdef CONFIG_XEN
14093 BLANK();
14094 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14095 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14096 index 1b4754f..fbb4227 100644
14097 --- a/arch/x86/kernel/asm-offsets_64.c
14098 +++ b/arch/x86/kernel/asm-offsets_64.c
14099 @@ -76,6 +76,7 @@ int main(void)
14100 BLANK();
14101 #undef ENTRY
14102
14103 + DEFINE(TSS_size, sizeof(struct tss_struct));
14104 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14105 BLANK();
14106
14107 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14108 index 6ab6aa2..8f71507 100644
14109 --- a/arch/x86/kernel/cpu/Makefile
14110 +++ b/arch/x86/kernel/cpu/Makefile
14111 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14112 CFLAGS_REMOVE_perf_event.o = -pg
14113 endif
14114
14115 -# Make sure load_percpu_segment has no stackprotector
14116 -nostackp := $(call cc-option, -fno-stack-protector)
14117 -CFLAGS_common.o := $(nostackp)
14118 -
14119 obj-y := intel_cacheinfo.o scattered.o topology.o
14120 obj-y += proc.o capflags.o powerflags.o common.o
14121 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14122 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14123 index 146bb62..ac9c74a 100644
14124 --- a/arch/x86/kernel/cpu/amd.c
14125 +++ b/arch/x86/kernel/cpu/amd.c
14126 @@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14127 unsigned int size)
14128 {
14129 /* AMD errata T13 (order #21922) */
14130 - if ((c->x86 == 6)) {
14131 + if (c->x86 == 6) {
14132 /* Duron Rev A0 */
14133 if (c->x86_model == 3 && c->x86_mask == 0)
14134 size = 64;
14135 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14136 index cf79302..b1b28ae 100644
14137 --- a/arch/x86/kernel/cpu/common.c
14138 +++ b/arch/x86/kernel/cpu/common.c
14139 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14140
14141 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14142
14143 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14144 -#ifdef CONFIG_X86_64
14145 - /*
14146 - * We need valid kernel segments for data and code in long mode too
14147 - * IRET will check the segment types kkeil 2000/10/28
14148 - * Also sysret mandates a special GDT layout
14149 - *
14150 - * TLS descriptors are currently at a different place compared to i386.
14151 - * Hopefully nobody expects them at a fixed place (Wine?)
14152 - */
14153 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14154 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14155 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14156 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14157 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14158 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14159 -#else
14160 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14161 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14162 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14163 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14164 - /*
14165 - * Segments used for calling PnP BIOS have byte granularity.
14166 - * They code segments and data segments have fixed 64k limits,
14167 - * the transfer segment sizes are set at run time.
14168 - */
14169 - /* 32-bit code */
14170 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14171 - /* 16-bit code */
14172 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14173 - /* 16-bit data */
14174 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14175 - /* 16-bit data */
14176 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14177 - /* 16-bit data */
14178 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14179 - /*
14180 - * The APM segments have byte granularity and their bases
14181 - * are set at run time. All have 64k limits.
14182 - */
14183 - /* 32-bit code */
14184 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14185 - /* 16-bit code */
14186 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14187 - /* data */
14188 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14189 -
14190 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14191 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14192 - GDT_STACK_CANARY_INIT
14193 -#endif
14194 -} };
14195 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14196 -
14197 static int __init x86_xsave_setup(char *s)
14198 {
14199 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14200 @@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14201 {
14202 struct desc_ptr gdt_descr;
14203
14204 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14205 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14206 gdt_descr.size = GDT_SIZE - 1;
14207 load_gdt(&gdt_descr);
14208 /* Reload the per-cpu base */
14209 @@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14210 /* Filter out anything that depends on CPUID levels we don't have */
14211 filter_cpuid_features(c, true);
14212
14213 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14214 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14215 +#endif
14216 +
14217 /* If the model name is still unset, do table lookup. */
14218 if (!c->x86_model_id[0]) {
14219 const char *p;
14220 @@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14221 }
14222 __setup("clearcpuid=", setup_disablecpuid);
14223
14224 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14225 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14226 +
14227 #ifdef CONFIG_X86_64
14228 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14229 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14230 - (unsigned long) nmi_idt_table };
14231 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14232
14233 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14234 irq_stack_union) __aligned(PAGE_SIZE);
14235 @@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14236 EXPORT_PER_CPU_SYMBOL(current_task);
14237
14238 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14239 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14240 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14241 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14242
14243 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14244 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14245 {
14246 memset(regs, 0, sizeof(struct pt_regs));
14247 regs->fs = __KERNEL_PERCPU;
14248 - regs->gs = __KERNEL_STACK_CANARY;
14249 + savesegment(gs, regs->gs);
14250
14251 return regs;
14252 }
14253 @@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14254 int i;
14255
14256 cpu = stack_smp_processor_id();
14257 - t = &per_cpu(init_tss, cpu);
14258 + t = init_tss + cpu;
14259 oist = &per_cpu(orig_ist, cpu);
14260
14261 #ifdef CONFIG_NUMA
14262 @@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14263 switch_to_new_gdt(cpu);
14264 loadsegment(fs, 0);
14265
14266 - load_idt((const struct desc_ptr *)&idt_descr);
14267 + load_idt(&idt_descr);
14268
14269 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14270 syscall_init();
14271 @@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14272 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14273 barrier();
14274
14275 - x86_configure_nx();
14276 if (cpu != 0)
14277 enable_x2apic();
14278
14279 @@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14280 {
14281 int cpu = smp_processor_id();
14282 struct task_struct *curr = current;
14283 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14284 + struct tss_struct *t = init_tss + cpu;
14285 struct thread_struct *thread = &curr->thread;
14286
14287 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14288 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14289 index 3e6ff6c..54b4992 100644
14290 --- a/arch/x86/kernel/cpu/intel.c
14291 +++ b/arch/x86/kernel/cpu/intel.c
14292 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14293 * Update the IDT descriptor and reload the IDT so that
14294 * it uses the read-only mapped virtual address.
14295 */
14296 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14297 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14298 load_idt(&idt_descr);
14299 }
14300 #endif
14301 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14302 index 61604ae..98250a5 100644
14303 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14304 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14305 @@ -42,6 +42,7 @@
14306 #include <asm/processor.h>
14307 #include <asm/mce.h>
14308 #include <asm/msr.h>
14309 +#include <asm/local.h>
14310
14311 #include "mce-internal.h"
14312
14313 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14314 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14315 m->cs, m->ip);
14316
14317 - if (m->cs == __KERNEL_CS)
14318 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14319 print_symbol("{%s}", m->ip);
14320 pr_cont("\n");
14321 }
14322 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14323
14324 #define PANIC_TIMEOUT 5 /* 5 seconds */
14325
14326 -static atomic_t mce_paniced;
14327 +static atomic_unchecked_t mce_paniced;
14328
14329 static int fake_panic;
14330 -static atomic_t mce_fake_paniced;
14331 +static atomic_unchecked_t mce_fake_paniced;
14332
14333 /* Panic in progress. Enable interrupts and wait for final IPI */
14334 static void wait_for_panic(void)
14335 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14336 /*
14337 * Make sure only one CPU runs in machine check panic
14338 */
14339 - if (atomic_inc_return(&mce_paniced) > 1)
14340 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14341 wait_for_panic();
14342 barrier();
14343
14344 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14345 console_verbose();
14346 } else {
14347 /* Don't log too much for fake panic */
14348 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14349 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14350 return;
14351 }
14352 /* First print corrected ones that are still unlogged */
14353 @@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
14354 * might have been modified by someone else.
14355 */
14356 rmb();
14357 - if (atomic_read(&mce_paniced))
14358 + if (atomic_read_unchecked(&mce_paniced))
14359 wait_for_panic();
14360 if (!monarch_timeout)
14361 goto out;
14362 @@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14363 }
14364
14365 /* Call the installed machine check handler for this CPU setup. */
14366 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14367 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14368 unexpected_machine_check;
14369
14370 /*
14371 @@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14372 return;
14373 }
14374
14375 + pax_open_kernel();
14376 machine_check_vector = do_machine_check;
14377 + pax_close_kernel();
14378
14379 __mcheck_cpu_init_generic();
14380 __mcheck_cpu_init_vendor(c);
14381 @@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14382 */
14383
14384 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14385 -static int mce_chrdev_open_count; /* #times opened */
14386 +static local_t mce_chrdev_open_count; /* #times opened */
14387 static int mce_chrdev_open_exclu; /* already open exclusive? */
14388
14389 static int mce_chrdev_open(struct inode *inode, struct file *file)
14390 @@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14391 spin_lock(&mce_chrdev_state_lock);
14392
14393 if (mce_chrdev_open_exclu ||
14394 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14395 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14396 spin_unlock(&mce_chrdev_state_lock);
14397
14398 return -EBUSY;
14399 @@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14400
14401 if (file->f_flags & O_EXCL)
14402 mce_chrdev_open_exclu = 1;
14403 - mce_chrdev_open_count++;
14404 + local_inc(&mce_chrdev_open_count);
14405
14406 spin_unlock(&mce_chrdev_state_lock);
14407
14408 @@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14409 {
14410 spin_lock(&mce_chrdev_state_lock);
14411
14412 - mce_chrdev_open_count--;
14413 + local_dec(&mce_chrdev_open_count);
14414 mce_chrdev_open_exclu = 0;
14415
14416 spin_unlock(&mce_chrdev_state_lock);
14417 @@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
14418 static void mce_reset(void)
14419 {
14420 cpu_missing = 0;
14421 - atomic_set(&mce_fake_paniced, 0);
14422 + atomic_set_unchecked(&mce_fake_paniced, 0);
14423 atomic_set(&mce_executing, 0);
14424 atomic_set(&mce_callin, 0);
14425 atomic_set(&global_nwo, 0);
14426 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14427 index 2d5454c..51987eb 100644
14428 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14429 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14430 @@ -11,6 +11,7 @@
14431 #include <asm/processor.h>
14432 #include <asm/mce.h>
14433 #include <asm/msr.h>
14434 +#include <asm/pgtable.h>
14435
14436 /* By default disabled */
14437 int mce_p5_enabled __read_mostly;
14438 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14439 if (!cpu_has(c, X86_FEATURE_MCE))
14440 return;
14441
14442 + pax_open_kernel();
14443 machine_check_vector = pentium_machine_check;
14444 + pax_close_kernel();
14445 /* Make sure the vector pointer is visible before we enable MCEs: */
14446 wmb();
14447
14448 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14449 index 2d7998f..17c9de1 100644
14450 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14451 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14452 @@ -10,6 +10,7 @@
14453 #include <asm/processor.h>
14454 #include <asm/mce.h>
14455 #include <asm/msr.h>
14456 +#include <asm/pgtable.h>
14457
14458 /* Machine check handler for WinChip C6: */
14459 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14460 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14461 {
14462 u32 lo, hi;
14463
14464 + pax_open_kernel();
14465 machine_check_vector = winchip_machine_check;
14466 + pax_close_kernel();
14467 /* Make sure the vector pointer is visible before we enable MCEs: */
14468 wmb();
14469
14470 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14471 index 6b96110..0da73eb 100644
14472 --- a/arch/x86/kernel/cpu/mtrr/main.c
14473 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14474 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14475 u64 size_or_mask, size_and_mask;
14476 static bool mtrr_aps_delayed_init;
14477
14478 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14479 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14480
14481 const struct mtrr_ops *mtrr_if;
14482
14483 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14484 index df5e41f..816c719 100644
14485 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14486 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14487 @@ -25,7 +25,7 @@ struct mtrr_ops {
14488 int (*validate_add_page)(unsigned long base, unsigned long size,
14489 unsigned int type);
14490 int (*have_wrcomb)(void);
14491 -};
14492 +} __do_const;
14493
14494 extern int generic_get_free_region(unsigned long base, unsigned long size,
14495 int replace_reg);
14496 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14497 index bb8e034..fb9020b 100644
14498 --- a/arch/x86/kernel/cpu/perf_event.c
14499 +++ b/arch/x86/kernel/cpu/perf_event.c
14500 @@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14501 break;
14502
14503 perf_callchain_store(entry, frame.return_address);
14504 - fp = frame.next_frame;
14505 + fp = (const void __force_user *)frame.next_frame;
14506 }
14507 }
14508
14509 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14510 index 13ad899..f642b9a 100644
14511 --- a/arch/x86/kernel/crash.c
14512 +++ b/arch/x86/kernel/crash.c
14513 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14514 {
14515 #ifdef CONFIG_X86_32
14516 struct pt_regs fixed_regs;
14517 -#endif
14518
14519 -#ifdef CONFIG_X86_32
14520 - if (!user_mode_vm(regs)) {
14521 + if (!user_mode(regs)) {
14522 crash_fixup_ss_esp(&fixed_regs, regs);
14523 regs = &fixed_regs;
14524 }
14525 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14526 index 37250fe..bf2ec74 100644
14527 --- a/arch/x86/kernel/doublefault_32.c
14528 +++ b/arch/x86/kernel/doublefault_32.c
14529 @@ -11,7 +11,7 @@
14530
14531 #define DOUBLEFAULT_STACKSIZE (1024)
14532 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14533 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14534 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14535
14536 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14537
14538 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14539 unsigned long gdt, tss;
14540
14541 store_gdt(&gdt_desc);
14542 - gdt = gdt_desc.address;
14543 + gdt = (unsigned long)gdt_desc.address;
14544
14545 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14546
14547 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14548 /* 0x2 bit is always set */
14549 .flags = X86_EFLAGS_SF | 0x2,
14550 .sp = STACK_START,
14551 - .es = __USER_DS,
14552 + .es = __KERNEL_DS,
14553 .cs = __KERNEL_CS,
14554 .ss = __KERNEL_DS,
14555 - .ds = __USER_DS,
14556 + .ds = __KERNEL_DS,
14557 .fs = __KERNEL_PERCPU,
14558
14559 .__cr3 = __pa_nodebug(swapper_pg_dir),
14560 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14561 index 1b81839..0b4e7b0 100644
14562 --- a/arch/x86/kernel/dumpstack.c
14563 +++ b/arch/x86/kernel/dumpstack.c
14564 @@ -2,6 +2,9 @@
14565 * Copyright (C) 1991, 1992 Linus Torvalds
14566 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14567 */
14568 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14569 +#define __INCLUDED_BY_HIDESYM 1
14570 +#endif
14571 #include <linux/kallsyms.h>
14572 #include <linux/kprobes.h>
14573 #include <linux/uaccess.h>
14574 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14575 static void
14576 print_ftrace_graph_addr(unsigned long addr, void *data,
14577 const struct stacktrace_ops *ops,
14578 - struct thread_info *tinfo, int *graph)
14579 + struct task_struct *task, int *graph)
14580 {
14581 - struct task_struct *task;
14582 unsigned long ret_addr;
14583 int index;
14584
14585 if (addr != (unsigned long)return_to_handler)
14586 return;
14587
14588 - task = tinfo->task;
14589 index = task->curr_ret_stack;
14590
14591 if (!task->ret_stack || index < *graph)
14592 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14593 static inline void
14594 print_ftrace_graph_addr(unsigned long addr, void *data,
14595 const struct stacktrace_ops *ops,
14596 - struct thread_info *tinfo, int *graph)
14597 + struct task_struct *task, int *graph)
14598 { }
14599 #endif
14600
14601 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14602 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14603 */
14604
14605 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14606 - void *p, unsigned int size, void *end)
14607 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14608 {
14609 - void *t = tinfo;
14610 if (end) {
14611 if (p < end && p >= (end-THREAD_SIZE))
14612 return 1;
14613 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14614 }
14615
14616 unsigned long
14617 -print_context_stack(struct thread_info *tinfo,
14618 +print_context_stack(struct task_struct *task, void *stack_start,
14619 unsigned long *stack, unsigned long bp,
14620 const struct stacktrace_ops *ops, void *data,
14621 unsigned long *end, int *graph)
14622 {
14623 struct stack_frame *frame = (struct stack_frame *)bp;
14624
14625 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14626 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14627 unsigned long addr;
14628
14629 addr = *stack;
14630 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14631 } else {
14632 ops->address(data, addr, 0);
14633 }
14634 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14635 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14636 }
14637 stack++;
14638 }
14639 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14640 EXPORT_SYMBOL_GPL(print_context_stack);
14641
14642 unsigned long
14643 -print_context_stack_bp(struct thread_info *tinfo,
14644 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14645 unsigned long *stack, unsigned long bp,
14646 const struct stacktrace_ops *ops, void *data,
14647 unsigned long *end, int *graph)
14648 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14649 struct stack_frame *frame = (struct stack_frame *)bp;
14650 unsigned long *ret_addr = &frame->return_address;
14651
14652 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14653 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14654 unsigned long addr = *ret_addr;
14655
14656 if (!__kernel_text_address(addr))
14657 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14658 ops->address(data, addr, 1);
14659 frame = frame->next_frame;
14660 ret_addr = &frame->return_address;
14661 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14662 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14663 }
14664
14665 return (unsigned long)frame;
14666 @@ -189,7 +188,7 @@ void dump_stack(void)
14667
14668 bp = stack_frame(current, NULL);
14669 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14670 - current->pid, current->comm, print_tainted(),
14671 + task_pid_nr(current), current->comm, print_tainted(),
14672 init_utsname()->release,
14673 (int)strcspn(init_utsname()->version, " "),
14674 init_utsname()->version);
14675 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14676 }
14677 EXPORT_SYMBOL_GPL(oops_begin);
14678
14679 +extern void gr_handle_kernel_exploit(void);
14680 +
14681 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14682 {
14683 if (regs && kexec_should_crash(current))
14684 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14685 panic("Fatal exception in interrupt");
14686 if (panic_on_oops)
14687 panic("Fatal exception");
14688 - do_exit(signr);
14689 +
14690 + gr_handle_kernel_exploit();
14691 +
14692 + do_group_exit(signr);
14693 }
14694
14695 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14696 @@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14697
14698 show_registers(regs);
14699 #ifdef CONFIG_X86_32
14700 - if (user_mode_vm(regs)) {
14701 + if (user_mode(regs)) {
14702 sp = regs->sp;
14703 ss = regs->ss & 0xffff;
14704 } else {
14705 @@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14706 unsigned long flags = oops_begin();
14707 int sig = SIGSEGV;
14708
14709 - if (!user_mode_vm(regs))
14710 + if (!user_mode(regs))
14711 report_bug(regs->ip, regs);
14712
14713 if (__die(str, regs, err))
14714 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14715 index 88ec912..e95e935 100644
14716 --- a/arch/x86/kernel/dumpstack_32.c
14717 +++ b/arch/x86/kernel/dumpstack_32.c
14718 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14719 bp = stack_frame(task, regs);
14720
14721 for (;;) {
14722 - struct thread_info *context;
14723 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14724
14725 - context = (struct thread_info *)
14726 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14727 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14728 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14729
14730 - stack = (unsigned long *)context->previous_esp;
14731 - if (!stack)
14732 + if (stack_start == task_stack_page(task))
14733 break;
14734 + stack = *(unsigned long **)stack_start;
14735 if (ops->stack(data, "IRQ") < 0)
14736 break;
14737 touch_nmi_watchdog();
14738 @@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14739 int i;
14740
14741 print_modules();
14742 - __show_regs(regs, !user_mode_vm(regs));
14743 + __show_regs(regs, !user_mode(regs));
14744
14745 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14746 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14747 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14748 * When in-kernel, we also print out the stack and code at the
14749 * time of the fault..
14750 */
14751 - if (!user_mode_vm(regs)) {
14752 + if (!user_mode(regs)) {
14753 unsigned int code_prologue = code_bytes * 43 / 64;
14754 unsigned int code_len = code_bytes;
14755 unsigned char c;
14756 u8 *ip;
14757 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14758
14759 printk(KERN_EMERG "Stack:\n");
14760 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14761
14762 printk(KERN_EMERG "Code: ");
14763
14764 - ip = (u8 *)regs->ip - code_prologue;
14765 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14766 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14767 /* try starting at IP */
14768 - ip = (u8 *)regs->ip;
14769 + ip = (u8 *)regs->ip + cs_base;
14770 code_len = code_len - code_prologue + 1;
14771 }
14772 for (i = 0; i < code_len; i++, ip++) {
14773 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14774 printk(KERN_CONT " Bad EIP value.");
14775 break;
14776 }
14777 - if (ip == (u8 *)regs->ip)
14778 + if (ip == (u8 *)regs->ip + cs_base)
14779 printk(KERN_CONT "<%02x> ", c);
14780 else
14781 printk(KERN_CONT "%02x ", c);
14782 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14783 {
14784 unsigned short ud2;
14785
14786 + ip = ktla_ktva(ip);
14787 if (ip < PAGE_OFFSET)
14788 return 0;
14789 if (probe_kernel_address((unsigned short *)ip, ud2))
14790 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14791
14792 return ud2 == 0x0b0f;
14793 }
14794 +
14795 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14796 +void pax_check_alloca(unsigned long size)
14797 +{
14798 + unsigned long sp = (unsigned long)&sp, stack_left;
14799 +
14800 + /* all kernel stacks are of the same size */
14801 + stack_left = sp & (THREAD_SIZE - 1);
14802 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14803 +}
14804 +EXPORT_SYMBOL(pax_check_alloca);
14805 +#endif
14806 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14807 index 17107bd..9623722 100644
14808 --- a/arch/x86/kernel/dumpstack_64.c
14809 +++ b/arch/x86/kernel/dumpstack_64.c
14810 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14811 unsigned long *irq_stack_end =
14812 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14813 unsigned used = 0;
14814 - struct thread_info *tinfo;
14815 int graph = 0;
14816 unsigned long dummy;
14817 + void *stack_start;
14818
14819 if (!task)
14820 task = current;
14821 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14822 * current stack address. If the stacks consist of nested
14823 * exceptions
14824 */
14825 - tinfo = task_thread_info(task);
14826 for (;;) {
14827 char *id;
14828 unsigned long *estack_end;
14829 +
14830 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14831 &used, &id);
14832
14833 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14834 if (ops->stack(data, id) < 0)
14835 break;
14836
14837 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14838 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14839 data, estack_end, &graph);
14840 ops->stack(data, "<EOE>");
14841 /*
14842 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14843 * second-to-last pointer (index -2 to end) in the
14844 * exception stack:
14845 */
14846 + if ((u16)estack_end[-1] != __KERNEL_DS)
14847 + goto out;
14848 stack = (unsigned long *) estack_end[-2];
14849 continue;
14850 }
14851 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14852 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14853 if (ops->stack(data, "IRQ") < 0)
14854 break;
14855 - bp = ops->walk_stack(tinfo, stack, bp,
14856 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14857 ops, data, irq_stack_end, &graph);
14858 /*
14859 * We link to the next stack (which would be
14860 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14861 /*
14862 * This handles the process stack:
14863 */
14864 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14865 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14866 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14867 +out:
14868 put_cpu();
14869 }
14870 EXPORT_SYMBOL(dump_trace);
14871 @@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14872
14873 return ud2 == 0x0b0f;
14874 }
14875 +
14876 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14877 +void pax_check_alloca(unsigned long size)
14878 +{
14879 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14880 + unsigned cpu, used;
14881 + char *id;
14882 +
14883 + /* check the process stack first */
14884 + stack_start = (unsigned long)task_stack_page(current);
14885 + stack_end = stack_start + THREAD_SIZE;
14886 + if (likely(stack_start <= sp && sp < stack_end)) {
14887 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14888 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14889 + return;
14890 + }
14891 +
14892 + cpu = get_cpu();
14893 +
14894 + /* check the irq stacks */
14895 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14896 + stack_start = stack_end - IRQ_STACK_SIZE;
14897 + if (stack_start <= sp && sp < stack_end) {
14898 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14899 + put_cpu();
14900 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14901 + return;
14902 + }
14903 +
14904 + /* check the exception stacks */
14905 + used = 0;
14906 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14907 + stack_start = stack_end - EXCEPTION_STKSZ;
14908 + if (stack_end && stack_start <= sp && sp < stack_end) {
14909 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14910 + put_cpu();
14911 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14912 + return;
14913 + }
14914 +
14915 + put_cpu();
14916 +
14917 + /* unknown stack */
14918 + BUG();
14919 +}
14920 +EXPORT_SYMBOL(pax_check_alloca);
14921 +#endif
14922 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14923 index 9b9f18b..9fcaa04 100644
14924 --- a/arch/x86/kernel/early_printk.c
14925 +++ b/arch/x86/kernel/early_printk.c
14926 @@ -7,6 +7,7 @@
14927 #include <linux/pci_regs.h>
14928 #include <linux/pci_ids.h>
14929 #include <linux/errno.h>
14930 +#include <linux/sched.h>
14931 #include <asm/io.h>
14932 #include <asm/processor.h>
14933 #include <asm/fcntl.h>
14934 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14935 index 7b784f4..db6b628 100644
14936 --- a/arch/x86/kernel/entry_32.S
14937 +++ b/arch/x86/kernel/entry_32.S
14938 @@ -179,13 +179,146 @@
14939 /*CFI_REL_OFFSET gs, PT_GS*/
14940 .endm
14941 .macro SET_KERNEL_GS reg
14942 +
14943 +#ifdef CONFIG_CC_STACKPROTECTOR
14944 movl $(__KERNEL_STACK_CANARY), \reg
14945 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14946 + movl $(__USER_DS), \reg
14947 +#else
14948 + xorl \reg, \reg
14949 +#endif
14950 +
14951 movl \reg, %gs
14952 .endm
14953
14954 #endif /* CONFIG_X86_32_LAZY_GS */
14955
14956 -.macro SAVE_ALL
14957 +.macro pax_enter_kernel
14958 +#ifdef CONFIG_PAX_KERNEXEC
14959 + call pax_enter_kernel
14960 +#endif
14961 +.endm
14962 +
14963 +.macro pax_exit_kernel
14964 +#ifdef CONFIG_PAX_KERNEXEC
14965 + call pax_exit_kernel
14966 +#endif
14967 +.endm
14968 +
14969 +#ifdef CONFIG_PAX_KERNEXEC
14970 +ENTRY(pax_enter_kernel)
14971 +#ifdef CONFIG_PARAVIRT
14972 + pushl %eax
14973 + pushl %ecx
14974 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14975 + mov %eax, %esi
14976 +#else
14977 + mov %cr0, %esi
14978 +#endif
14979 + bts $16, %esi
14980 + jnc 1f
14981 + mov %cs, %esi
14982 + cmp $__KERNEL_CS, %esi
14983 + jz 3f
14984 + ljmp $__KERNEL_CS, $3f
14985 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14986 +2:
14987 +#ifdef CONFIG_PARAVIRT
14988 + mov %esi, %eax
14989 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14990 +#else
14991 + mov %esi, %cr0
14992 +#endif
14993 +3:
14994 +#ifdef CONFIG_PARAVIRT
14995 + popl %ecx
14996 + popl %eax
14997 +#endif
14998 + ret
14999 +ENDPROC(pax_enter_kernel)
15000 +
15001 +ENTRY(pax_exit_kernel)
15002 +#ifdef CONFIG_PARAVIRT
15003 + pushl %eax
15004 + pushl %ecx
15005 +#endif
15006 + mov %cs, %esi
15007 + cmp $__KERNEXEC_KERNEL_CS, %esi
15008 + jnz 2f
15009 +#ifdef CONFIG_PARAVIRT
15010 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15011 + mov %eax, %esi
15012 +#else
15013 + mov %cr0, %esi
15014 +#endif
15015 + btr $16, %esi
15016 + ljmp $__KERNEL_CS, $1f
15017 +1:
15018 +#ifdef CONFIG_PARAVIRT
15019 + mov %esi, %eax
15020 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15021 +#else
15022 + mov %esi, %cr0
15023 +#endif
15024 +2:
15025 +#ifdef CONFIG_PARAVIRT
15026 + popl %ecx
15027 + popl %eax
15028 +#endif
15029 + ret
15030 +ENDPROC(pax_exit_kernel)
15031 +#endif
15032 +
15033 +.macro pax_erase_kstack
15034 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15035 + call pax_erase_kstack
15036 +#endif
15037 +.endm
15038 +
15039 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15040 +/*
15041 + * ebp: thread_info
15042 + * ecx, edx: can be clobbered
15043 + */
15044 +ENTRY(pax_erase_kstack)
15045 + pushl %edi
15046 + pushl %eax
15047 +
15048 + mov TI_lowest_stack(%ebp), %edi
15049 + mov $-0xBEEF, %eax
15050 + std
15051 +
15052 +1: mov %edi, %ecx
15053 + and $THREAD_SIZE_asm - 1, %ecx
15054 + shr $2, %ecx
15055 + repne scasl
15056 + jecxz 2f
15057 +
15058 + cmp $2*16, %ecx
15059 + jc 2f
15060 +
15061 + mov $2*16, %ecx
15062 + repe scasl
15063 + jecxz 2f
15064 + jne 1b
15065 +
15066 +2: cld
15067 + mov %esp, %ecx
15068 + sub %edi, %ecx
15069 + shr $2, %ecx
15070 + rep stosl
15071 +
15072 + mov TI_task_thread_sp0(%ebp), %edi
15073 + sub $128, %edi
15074 + mov %edi, TI_lowest_stack(%ebp)
15075 +
15076 + popl %eax
15077 + popl %edi
15078 + ret
15079 +ENDPROC(pax_erase_kstack)
15080 +#endif
15081 +
15082 +.macro __SAVE_ALL _DS
15083 cld
15084 PUSH_GS
15085 pushl_cfi %fs
15086 @@ -208,7 +341,7 @@
15087 CFI_REL_OFFSET ecx, 0
15088 pushl_cfi %ebx
15089 CFI_REL_OFFSET ebx, 0
15090 - movl $(__USER_DS), %edx
15091 + movl $\_DS, %edx
15092 movl %edx, %ds
15093 movl %edx, %es
15094 movl $(__KERNEL_PERCPU), %edx
15095 @@ -216,6 +349,15 @@
15096 SET_KERNEL_GS %edx
15097 .endm
15098
15099 +.macro SAVE_ALL
15100 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15101 + __SAVE_ALL __KERNEL_DS
15102 + pax_enter_kernel
15103 +#else
15104 + __SAVE_ALL __USER_DS
15105 +#endif
15106 +.endm
15107 +
15108 .macro RESTORE_INT_REGS
15109 popl_cfi %ebx
15110 CFI_RESTORE ebx
15111 @@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15112 popfl_cfi
15113 jmp syscall_exit
15114 CFI_ENDPROC
15115 -END(ret_from_fork)
15116 +ENDPROC(ret_from_fork)
15117
15118 /*
15119 * Interrupt exit functions should be protected against kprobes
15120 @@ -335,7 +477,15 @@ resume_userspace_sig:
15121 andl $SEGMENT_RPL_MASK, %eax
15122 #endif
15123 cmpl $USER_RPL, %eax
15124 +
15125 +#ifdef CONFIG_PAX_KERNEXEC
15126 + jae resume_userspace
15127 +
15128 + pax_exit_kernel
15129 + jmp resume_kernel
15130 +#else
15131 jb resume_kernel # not returning to v8086 or userspace
15132 +#endif
15133
15134 ENTRY(resume_userspace)
15135 LOCKDEP_SYS_EXIT
15136 @@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15137 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15138 # int/exception return?
15139 jne work_pending
15140 - jmp restore_all
15141 -END(ret_from_exception)
15142 + jmp restore_all_pax
15143 +ENDPROC(ret_from_exception)
15144
15145 #ifdef CONFIG_PREEMPT
15146 ENTRY(resume_kernel)
15147 @@ -363,7 +513,7 @@ need_resched:
15148 jz restore_all
15149 call preempt_schedule_irq
15150 jmp need_resched
15151 -END(resume_kernel)
15152 +ENDPROC(resume_kernel)
15153 #endif
15154 CFI_ENDPROC
15155 /*
15156 @@ -397,23 +547,34 @@ sysenter_past_esp:
15157 /*CFI_REL_OFFSET cs, 0*/
15158 /*
15159 * Push current_thread_info()->sysenter_return to the stack.
15160 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15161 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15162 */
15163 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15164 + pushl_cfi $0
15165 CFI_REL_OFFSET eip, 0
15166
15167 pushl_cfi %eax
15168 SAVE_ALL
15169 + GET_THREAD_INFO(%ebp)
15170 + movl TI_sysenter_return(%ebp),%ebp
15171 + movl %ebp,PT_EIP(%esp)
15172 ENABLE_INTERRUPTS(CLBR_NONE)
15173
15174 /*
15175 * Load the potential sixth argument from user stack.
15176 * Careful about security.
15177 */
15178 + movl PT_OLDESP(%esp),%ebp
15179 +
15180 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15181 + mov PT_OLDSS(%esp),%ds
15182 +1: movl %ds:(%ebp),%ebp
15183 + push %ss
15184 + pop %ds
15185 +#else
15186 cmpl $__PAGE_OFFSET-3,%ebp
15187 jae syscall_fault
15188 1: movl (%ebp),%ebp
15189 +#endif
15190 +
15191 movl %ebp,PT_EBP(%esp)
15192 .section __ex_table,"a"
15193 .align 4
15194 @@ -436,12 +597,24 @@ sysenter_do_call:
15195 testl $_TIF_ALLWORK_MASK, %ecx
15196 jne sysexit_audit
15197 sysenter_exit:
15198 +
15199 +#ifdef CONFIG_PAX_RANDKSTACK
15200 + pushl_cfi %eax
15201 + movl %esp, %eax
15202 + call pax_randomize_kstack
15203 + popl_cfi %eax
15204 +#endif
15205 +
15206 + pax_erase_kstack
15207 +
15208 /* if something modifies registers it must also disable sysexit */
15209 movl PT_EIP(%esp), %edx
15210 movl PT_OLDESP(%esp), %ecx
15211 xorl %ebp,%ebp
15212 TRACE_IRQS_ON
15213 1: mov PT_FS(%esp), %fs
15214 +2: mov PT_DS(%esp), %ds
15215 +3: mov PT_ES(%esp), %es
15216 PTGS_TO_GS
15217 ENABLE_INTERRUPTS_SYSEXIT
15218
15219 @@ -458,6 +631,9 @@ sysenter_audit:
15220 movl %eax,%edx /* 2nd arg: syscall number */
15221 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15222 call __audit_syscall_entry
15223 +
15224 + pax_erase_kstack
15225 +
15226 pushl_cfi %ebx
15227 movl PT_EAX(%esp),%eax /* reload syscall number */
15228 jmp sysenter_do_call
15229 @@ -483,11 +659,17 @@ sysexit_audit:
15230
15231 CFI_ENDPROC
15232 .pushsection .fixup,"ax"
15233 -2: movl $0,PT_FS(%esp)
15234 +4: movl $0,PT_FS(%esp)
15235 + jmp 1b
15236 +5: movl $0,PT_DS(%esp)
15237 + jmp 1b
15238 +6: movl $0,PT_ES(%esp)
15239 jmp 1b
15240 .section __ex_table,"a"
15241 .align 4
15242 - .long 1b,2b
15243 + .long 1b,4b
15244 + .long 2b,5b
15245 + .long 3b,6b
15246 .popsection
15247 PTGS_TO_GS_EX
15248 ENDPROC(ia32_sysenter_target)
15249 @@ -520,6 +702,15 @@ syscall_exit:
15250 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15251 jne syscall_exit_work
15252
15253 +restore_all_pax:
15254 +
15255 +#ifdef CONFIG_PAX_RANDKSTACK
15256 + movl %esp, %eax
15257 + call pax_randomize_kstack
15258 +#endif
15259 +
15260 + pax_erase_kstack
15261 +
15262 restore_all:
15263 TRACE_IRQS_IRET
15264 restore_all_notrace:
15265 @@ -579,14 +770,34 @@ ldt_ss:
15266 * compensating for the offset by changing to the ESPFIX segment with
15267 * a base address that matches for the difference.
15268 */
15269 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15270 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15271 mov %esp, %edx /* load kernel esp */
15272 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15273 mov %dx, %ax /* eax: new kernel esp */
15274 sub %eax, %edx /* offset (low word is 0) */
15275 +#ifdef CONFIG_SMP
15276 + movl PER_CPU_VAR(cpu_number), %ebx
15277 + shll $PAGE_SHIFT_asm, %ebx
15278 + addl $cpu_gdt_table, %ebx
15279 +#else
15280 + movl $cpu_gdt_table, %ebx
15281 +#endif
15282 shr $16, %edx
15283 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15284 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15285 +
15286 +#ifdef CONFIG_PAX_KERNEXEC
15287 + mov %cr0, %esi
15288 + btr $16, %esi
15289 + mov %esi, %cr0
15290 +#endif
15291 +
15292 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15293 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15294 +
15295 +#ifdef CONFIG_PAX_KERNEXEC
15296 + bts $16, %esi
15297 + mov %esi, %cr0
15298 +#endif
15299 +
15300 pushl_cfi $__ESPFIX_SS
15301 pushl_cfi %eax /* new kernel esp */
15302 /* Disable interrupts, but do not irqtrace this section: we
15303 @@ -615,38 +826,30 @@ work_resched:
15304 movl TI_flags(%ebp), %ecx
15305 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15306 # than syscall tracing?
15307 - jz restore_all
15308 + jz restore_all_pax
15309 testb $_TIF_NEED_RESCHED, %cl
15310 jnz work_resched
15311
15312 work_notifysig: # deal with pending signals and
15313 # notify-resume requests
15314 + movl %esp, %eax
15315 #ifdef CONFIG_VM86
15316 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15317 - movl %esp, %eax
15318 - jne work_notifysig_v86 # returning to kernel-space or
15319 + jz 1f # returning to kernel-space or
15320 # vm86-space
15321 - TRACE_IRQS_ON
15322 - ENABLE_INTERRUPTS(CLBR_NONE)
15323 - xorl %edx, %edx
15324 - call do_notify_resume
15325 - jmp resume_userspace_sig
15326
15327 - ALIGN
15328 -work_notifysig_v86:
15329 pushl_cfi %ecx # save ti_flags for do_notify_resume
15330 call save_v86_state # %eax contains pt_regs pointer
15331 popl_cfi %ecx
15332 movl %eax, %esp
15333 -#else
15334 - movl %esp, %eax
15335 +1:
15336 #endif
15337 TRACE_IRQS_ON
15338 ENABLE_INTERRUPTS(CLBR_NONE)
15339 xorl %edx, %edx
15340 call do_notify_resume
15341 jmp resume_userspace_sig
15342 -END(work_pending)
15343 +ENDPROC(work_pending)
15344
15345 # perform syscall exit tracing
15346 ALIGN
15347 @@ -654,11 +857,14 @@ syscall_trace_entry:
15348 movl $-ENOSYS,PT_EAX(%esp)
15349 movl %esp, %eax
15350 call syscall_trace_enter
15351 +
15352 + pax_erase_kstack
15353 +
15354 /* What it returned is what we'll actually use. */
15355 cmpl $(NR_syscalls), %eax
15356 jnae syscall_call
15357 jmp syscall_exit
15358 -END(syscall_trace_entry)
15359 +ENDPROC(syscall_trace_entry)
15360
15361 # perform syscall exit tracing
15362 ALIGN
15363 @@ -671,20 +877,24 @@ syscall_exit_work:
15364 movl %esp, %eax
15365 call syscall_trace_leave
15366 jmp resume_userspace
15367 -END(syscall_exit_work)
15368 +ENDPROC(syscall_exit_work)
15369 CFI_ENDPROC
15370
15371 RING0_INT_FRAME # can't unwind into user space anyway
15372 syscall_fault:
15373 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15374 + push %ss
15375 + pop %ds
15376 +#endif
15377 GET_THREAD_INFO(%ebp)
15378 movl $-EFAULT,PT_EAX(%esp)
15379 jmp resume_userspace
15380 -END(syscall_fault)
15381 +ENDPROC(syscall_fault)
15382
15383 syscall_badsys:
15384 movl $-ENOSYS,PT_EAX(%esp)
15385 jmp resume_userspace
15386 -END(syscall_badsys)
15387 +ENDPROC(syscall_badsys)
15388 CFI_ENDPROC
15389 /*
15390 * End of kprobes section
15391 @@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15392 CFI_ENDPROC
15393 ENDPROC(ptregs_clone)
15394
15395 + ALIGN;
15396 +ENTRY(kernel_execve)
15397 + CFI_STARTPROC
15398 + pushl_cfi %ebp
15399 + sub $PT_OLDSS+4,%esp
15400 + pushl_cfi %edi
15401 + pushl_cfi %ecx
15402 + pushl_cfi %eax
15403 + lea 3*4(%esp),%edi
15404 + mov $PT_OLDSS/4+1,%ecx
15405 + xorl %eax,%eax
15406 + rep stosl
15407 + popl_cfi %eax
15408 + popl_cfi %ecx
15409 + popl_cfi %edi
15410 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15411 + pushl_cfi %esp
15412 + call sys_execve
15413 + add $4,%esp
15414 + CFI_ADJUST_CFA_OFFSET -4
15415 + GET_THREAD_INFO(%ebp)
15416 + test %eax,%eax
15417 + jz syscall_exit
15418 + add $PT_OLDSS+4,%esp
15419 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15420 + popl_cfi %ebp
15421 + ret
15422 + CFI_ENDPROC
15423 +ENDPROC(kernel_execve)
15424 +
15425 .macro FIXUP_ESPFIX_STACK
15426 /*
15427 * Switch back for ESPFIX stack to the normal zerobased stack
15428 @@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15429 * normal stack and adjusts ESP with the matching offset.
15430 */
15431 /* fixup the stack */
15432 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15433 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15434 +#ifdef CONFIG_SMP
15435 + movl PER_CPU_VAR(cpu_number), %ebx
15436 + shll $PAGE_SHIFT_asm, %ebx
15437 + addl $cpu_gdt_table, %ebx
15438 +#else
15439 + movl $cpu_gdt_table, %ebx
15440 +#endif
15441 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15442 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15443 shl $16, %eax
15444 addl %esp, %eax /* the adjusted stack pointer */
15445 pushl_cfi $__KERNEL_DS
15446 @@ -819,7 +1066,7 @@ vector=vector+1
15447 .endr
15448 2: jmp common_interrupt
15449 .endr
15450 -END(irq_entries_start)
15451 +ENDPROC(irq_entries_start)
15452
15453 .previous
15454 END(interrupt)
15455 @@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15456 pushl_cfi $do_coprocessor_error
15457 jmp error_code
15458 CFI_ENDPROC
15459 -END(coprocessor_error)
15460 +ENDPROC(coprocessor_error)
15461
15462 ENTRY(simd_coprocessor_error)
15463 RING0_INT_FRAME
15464 @@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15465 #endif
15466 jmp error_code
15467 CFI_ENDPROC
15468 -END(simd_coprocessor_error)
15469 +ENDPROC(simd_coprocessor_error)
15470
15471 ENTRY(device_not_available)
15472 RING0_INT_FRAME
15473 @@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15474 pushl_cfi $do_device_not_available
15475 jmp error_code
15476 CFI_ENDPROC
15477 -END(device_not_available)
15478 +ENDPROC(device_not_available)
15479
15480 #ifdef CONFIG_PARAVIRT
15481 ENTRY(native_iret)
15482 @@ -905,12 +1152,12 @@ ENTRY(native_iret)
15483 .align 4
15484 .long native_iret, iret_exc
15485 .previous
15486 -END(native_iret)
15487 +ENDPROC(native_iret)
15488
15489 ENTRY(native_irq_enable_sysexit)
15490 sti
15491 sysexit
15492 -END(native_irq_enable_sysexit)
15493 +ENDPROC(native_irq_enable_sysexit)
15494 #endif
15495
15496 ENTRY(overflow)
15497 @@ -919,7 +1166,7 @@ ENTRY(overflow)
15498 pushl_cfi $do_overflow
15499 jmp error_code
15500 CFI_ENDPROC
15501 -END(overflow)
15502 +ENDPROC(overflow)
15503
15504 ENTRY(bounds)
15505 RING0_INT_FRAME
15506 @@ -927,7 +1174,7 @@ ENTRY(bounds)
15507 pushl_cfi $do_bounds
15508 jmp error_code
15509 CFI_ENDPROC
15510 -END(bounds)
15511 +ENDPROC(bounds)
15512
15513 ENTRY(invalid_op)
15514 RING0_INT_FRAME
15515 @@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15516 pushl_cfi $do_invalid_op
15517 jmp error_code
15518 CFI_ENDPROC
15519 -END(invalid_op)
15520 +ENDPROC(invalid_op)
15521
15522 ENTRY(coprocessor_segment_overrun)
15523 RING0_INT_FRAME
15524 @@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15525 pushl_cfi $do_coprocessor_segment_overrun
15526 jmp error_code
15527 CFI_ENDPROC
15528 -END(coprocessor_segment_overrun)
15529 +ENDPROC(coprocessor_segment_overrun)
15530
15531 ENTRY(invalid_TSS)
15532 RING0_EC_FRAME
15533 pushl_cfi $do_invalid_TSS
15534 jmp error_code
15535 CFI_ENDPROC
15536 -END(invalid_TSS)
15537 +ENDPROC(invalid_TSS)
15538
15539 ENTRY(segment_not_present)
15540 RING0_EC_FRAME
15541 pushl_cfi $do_segment_not_present
15542 jmp error_code
15543 CFI_ENDPROC
15544 -END(segment_not_present)
15545 +ENDPROC(segment_not_present)
15546
15547 ENTRY(stack_segment)
15548 RING0_EC_FRAME
15549 pushl_cfi $do_stack_segment
15550 jmp error_code
15551 CFI_ENDPROC
15552 -END(stack_segment)
15553 +ENDPROC(stack_segment)
15554
15555 ENTRY(alignment_check)
15556 RING0_EC_FRAME
15557 pushl_cfi $do_alignment_check
15558 jmp error_code
15559 CFI_ENDPROC
15560 -END(alignment_check)
15561 +ENDPROC(alignment_check)
15562
15563 ENTRY(divide_error)
15564 RING0_INT_FRAME
15565 @@ -979,7 +1226,7 @@ ENTRY(divide_error)
15566 pushl_cfi $do_divide_error
15567 jmp error_code
15568 CFI_ENDPROC
15569 -END(divide_error)
15570 +ENDPROC(divide_error)
15571
15572 #ifdef CONFIG_X86_MCE
15573 ENTRY(machine_check)
15574 @@ -988,7 +1235,7 @@ ENTRY(machine_check)
15575 pushl_cfi machine_check_vector
15576 jmp error_code
15577 CFI_ENDPROC
15578 -END(machine_check)
15579 +ENDPROC(machine_check)
15580 #endif
15581
15582 ENTRY(spurious_interrupt_bug)
15583 @@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15584 pushl_cfi $do_spurious_interrupt_bug
15585 jmp error_code
15586 CFI_ENDPROC
15587 -END(spurious_interrupt_bug)
15588 +ENDPROC(spurious_interrupt_bug)
15589 /*
15590 * End of kprobes section
15591 */
15592 @@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15593
15594 ENTRY(mcount)
15595 ret
15596 -END(mcount)
15597 +ENDPROC(mcount)
15598
15599 ENTRY(ftrace_caller)
15600 cmpl $0, function_trace_stop
15601 @@ -1141,7 +1388,7 @@ ftrace_graph_call:
15602 .globl ftrace_stub
15603 ftrace_stub:
15604 ret
15605 -END(ftrace_caller)
15606 +ENDPROC(ftrace_caller)
15607
15608 #else /* ! CONFIG_DYNAMIC_FTRACE */
15609
15610 @@ -1177,7 +1424,7 @@ trace:
15611 popl %ecx
15612 popl %eax
15613 jmp ftrace_stub
15614 -END(mcount)
15615 +ENDPROC(mcount)
15616 #endif /* CONFIG_DYNAMIC_FTRACE */
15617 #endif /* CONFIG_FUNCTION_TRACER */
15618
15619 @@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15620 popl %ecx
15621 popl %eax
15622 ret
15623 -END(ftrace_graph_caller)
15624 +ENDPROC(ftrace_graph_caller)
15625
15626 .globl return_to_handler
15627 return_to_handler:
15628 @@ -1253,15 +1500,18 @@ error_code:
15629 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15630 REG_TO_PTGS %ecx
15631 SET_KERNEL_GS %ecx
15632 - movl $(__USER_DS), %ecx
15633 + movl $(__KERNEL_DS), %ecx
15634 movl %ecx, %ds
15635 movl %ecx, %es
15636 +
15637 + pax_enter_kernel
15638 +
15639 TRACE_IRQS_OFF
15640 movl %esp,%eax # pt_regs pointer
15641 call *%edi
15642 jmp ret_from_exception
15643 CFI_ENDPROC
15644 -END(page_fault)
15645 +ENDPROC(page_fault)
15646
15647 /*
15648 * Debug traps and NMI can happen at the one SYSENTER instruction
15649 @@ -1303,7 +1553,7 @@ debug_stack_correct:
15650 call do_debug
15651 jmp ret_from_exception
15652 CFI_ENDPROC
15653 -END(debug)
15654 +ENDPROC(debug)
15655
15656 /*
15657 * NMI is doubly nasty. It can happen _while_ we're handling
15658 @@ -1340,6 +1590,9 @@ nmi_stack_correct:
15659 xorl %edx,%edx # zero error code
15660 movl %esp,%eax # pt_regs pointer
15661 call do_nmi
15662 +
15663 + pax_exit_kernel
15664 +
15665 jmp restore_all_notrace
15666 CFI_ENDPROC
15667
15668 @@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15669 FIXUP_ESPFIX_STACK # %eax == %esp
15670 xorl %edx,%edx # zero error code
15671 call do_nmi
15672 +
15673 + pax_exit_kernel
15674 +
15675 RESTORE_REGS
15676 lss 12+4(%esp), %esp # back to espfix stack
15677 CFI_ADJUST_CFA_OFFSET -24
15678 jmp irq_return
15679 CFI_ENDPROC
15680 -END(nmi)
15681 +ENDPROC(nmi)
15682
15683 ENTRY(int3)
15684 RING0_INT_FRAME
15685 @@ -1393,14 +1649,14 @@ ENTRY(int3)
15686 call do_int3
15687 jmp ret_from_exception
15688 CFI_ENDPROC
15689 -END(int3)
15690 +ENDPROC(int3)
15691
15692 ENTRY(general_protection)
15693 RING0_EC_FRAME
15694 pushl_cfi $do_general_protection
15695 jmp error_code
15696 CFI_ENDPROC
15697 -END(general_protection)
15698 +ENDPROC(general_protection)
15699
15700 #ifdef CONFIG_KVM_GUEST
15701 ENTRY(async_page_fault)
15702 @@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15703 pushl_cfi $do_async_page_fault
15704 jmp error_code
15705 CFI_ENDPROC
15706 -END(async_page_fault)
15707 +ENDPROC(async_page_fault)
15708 #endif
15709
15710 /*
15711 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15712 index cdc79b5..4710a75 100644
15713 --- a/arch/x86/kernel/entry_64.S
15714 +++ b/arch/x86/kernel/entry_64.S
15715 @@ -56,6 +56,8 @@
15716 #include <asm/ftrace.h>
15717 #include <asm/percpu.h>
15718 #include <linux/err.h>
15719 +#include <asm/pgtable.h>
15720 +#include <asm/alternative-asm.h>
15721
15722 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15723 #include <linux/elf-em.h>
15724 @@ -69,8 +71,9 @@
15725 #ifdef CONFIG_FUNCTION_TRACER
15726 #ifdef CONFIG_DYNAMIC_FTRACE
15727 ENTRY(mcount)
15728 + pax_force_retaddr
15729 retq
15730 -END(mcount)
15731 +ENDPROC(mcount)
15732
15733 ENTRY(ftrace_caller)
15734 cmpl $0, function_trace_stop
15735 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15736 #endif
15737
15738 GLOBAL(ftrace_stub)
15739 + pax_force_retaddr
15740 retq
15741 -END(ftrace_caller)
15742 +ENDPROC(ftrace_caller)
15743
15744 #else /* ! CONFIG_DYNAMIC_FTRACE */
15745 ENTRY(mcount)
15746 @@ -113,6 +117,7 @@ ENTRY(mcount)
15747 #endif
15748
15749 GLOBAL(ftrace_stub)
15750 + pax_force_retaddr
15751 retq
15752
15753 trace:
15754 @@ -122,12 +127,13 @@ trace:
15755 movq 8(%rbp), %rsi
15756 subq $MCOUNT_INSN_SIZE, %rdi
15757
15758 + pax_force_fptr ftrace_trace_function
15759 call *ftrace_trace_function
15760
15761 MCOUNT_RESTORE_FRAME
15762
15763 jmp ftrace_stub
15764 -END(mcount)
15765 +ENDPROC(mcount)
15766 #endif /* CONFIG_DYNAMIC_FTRACE */
15767 #endif /* CONFIG_FUNCTION_TRACER */
15768
15769 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15770
15771 MCOUNT_RESTORE_FRAME
15772
15773 + pax_force_retaddr
15774 retq
15775 -END(ftrace_graph_caller)
15776 +ENDPROC(ftrace_graph_caller)
15777
15778 GLOBAL(return_to_handler)
15779 subq $24, %rsp
15780 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15781 movq 8(%rsp), %rdx
15782 movq (%rsp), %rax
15783 addq $24, %rsp
15784 + pax_force_fptr %rdi
15785 jmp *%rdi
15786 #endif
15787
15788 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15789 ENDPROC(native_usergs_sysret64)
15790 #endif /* CONFIG_PARAVIRT */
15791
15792 + .macro ljmpq sel, off
15793 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15794 + .byte 0x48; ljmp *1234f(%rip)
15795 + .pushsection .rodata
15796 + .align 16
15797 + 1234: .quad \off; .word \sel
15798 + .popsection
15799 +#else
15800 + pushq $\sel
15801 + pushq $\off
15802 + lretq
15803 +#endif
15804 + .endm
15805 +
15806 + .macro pax_enter_kernel
15807 + pax_set_fptr_mask
15808 +#ifdef CONFIG_PAX_KERNEXEC
15809 + call pax_enter_kernel
15810 +#endif
15811 + .endm
15812 +
15813 + .macro pax_exit_kernel
15814 +#ifdef CONFIG_PAX_KERNEXEC
15815 + call pax_exit_kernel
15816 +#endif
15817 + .endm
15818 +
15819 +#ifdef CONFIG_PAX_KERNEXEC
15820 +ENTRY(pax_enter_kernel)
15821 + pushq %rdi
15822 +
15823 +#ifdef CONFIG_PARAVIRT
15824 + PV_SAVE_REGS(CLBR_RDI)
15825 +#endif
15826 +
15827 + GET_CR0_INTO_RDI
15828 + bts $16,%rdi
15829 + jnc 3f
15830 + mov %cs,%edi
15831 + cmp $__KERNEL_CS,%edi
15832 + jnz 2f
15833 +1:
15834 +
15835 +#ifdef CONFIG_PARAVIRT
15836 + PV_RESTORE_REGS(CLBR_RDI)
15837 +#endif
15838 +
15839 + popq %rdi
15840 + pax_force_retaddr
15841 + retq
15842 +
15843 +2: ljmpq __KERNEL_CS,1f
15844 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15845 +4: SET_RDI_INTO_CR0
15846 + jmp 1b
15847 +ENDPROC(pax_enter_kernel)
15848 +
15849 +ENTRY(pax_exit_kernel)
15850 + pushq %rdi
15851 +
15852 +#ifdef CONFIG_PARAVIRT
15853 + PV_SAVE_REGS(CLBR_RDI)
15854 +#endif
15855 +
15856 + mov %cs,%rdi
15857 + cmp $__KERNEXEC_KERNEL_CS,%edi
15858 + jz 2f
15859 +1:
15860 +
15861 +#ifdef CONFIG_PARAVIRT
15862 + PV_RESTORE_REGS(CLBR_RDI);
15863 +#endif
15864 +
15865 + popq %rdi
15866 + pax_force_retaddr
15867 + retq
15868 +
15869 +2: GET_CR0_INTO_RDI
15870 + btr $16,%rdi
15871 + ljmpq __KERNEL_CS,3f
15872 +3: SET_RDI_INTO_CR0
15873 + jmp 1b
15874 +#ifdef CONFIG_PARAVIRT
15875 + PV_RESTORE_REGS(CLBR_RDI);
15876 +#endif
15877 +
15878 + popq %rdi
15879 + pax_force_retaddr
15880 + retq
15881 +ENDPROC(pax_exit_kernel)
15882 +#endif
15883 +
15884 + .macro pax_enter_kernel_user
15885 + pax_set_fptr_mask
15886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15887 + call pax_enter_kernel_user
15888 +#endif
15889 + .endm
15890 +
15891 + .macro pax_exit_kernel_user
15892 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15893 + call pax_exit_kernel_user
15894 +#endif
15895 +#ifdef CONFIG_PAX_RANDKSTACK
15896 + pushq %rax
15897 + call pax_randomize_kstack
15898 + popq %rax
15899 +#endif
15900 + .endm
15901 +
15902 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15903 +ENTRY(pax_enter_kernel_user)
15904 + pushq %rdi
15905 + pushq %rbx
15906 +
15907 +#ifdef CONFIG_PARAVIRT
15908 + PV_SAVE_REGS(CLBR_RDI)
15909 +#endif
15910 +
15911 + GET_CR3_INTO_RDI
15912 + mov %rdi,%rbx
15913 + add $__START_KERNEL_map,%rbx
15914 + sub phys_base(%rip),%rbx
15915 +
15916 +#ifdef CONFIG_PARAVIRT
15917 + pushq %rdi
15918 + cmpl $0, pv_info+PARAVIRT_enabled
15919 + jz 1f
15920 + i = 0
15921 + .rept USER_PGD_PTRS
15922 + mov i*8(%rbx),%rsi
15923 + mov $0,%sil
15924 + lea i*8(%rbx),%rdi
15925 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15926 + i = i + 1
15927 + .endr
15928 + jmp 2f
15929 +1:
15930 +#endif
15931 +
15932 + i = 0
15933 + .rept USER_PGD_PTRS
15934 + movb $0,i*8(%rbx)
15935 + i = i + 1
15936 + .endr
15937 +
15938 +#ifdef CONFIG_PARAVIRT
15939 +2: popq %rdi
15940 +#endif
15941 + SET_RDI_INTO_CR3
15942 +
15943 +#ifdef CONFIG_PAX_KERNEXEC
15944 + GET_CR0_INTO_RDI
15945 + bts $16,%rdi
15946 + SET_RDI_INTO_CR0
15947 +#endif
15948 +
15949 +#ifdef CONFIG_PARAVIRT
15950 + PV_RESTORE_REGS(CLBR_RDI)
15951 +#endif
15952 +
15953 + popq %rbx
15954 + popq %rdi
15955 + pax_force_retaddr
15956 + retq
15957 +ENDPROC(pax_enter_kernel_user)
15958 +
15959 +ENTRY(pax_exit_kernel_user)
15960 + push %rdi
15961 +
15962 +#ifdef CONFIG_PARAVIRT
15963 + pushq %rbx
15964 + PV_SAVE_REGS(CLBR_RDI)
15965 +#endif
15966 +
15967 +#ifdef CONFIG_PAX_KERNEXEC
15968 + GET_CR0_INTO_RDI
15969 + btr $16,%rdi
15970 + SET_RDI_INTO_CR0
15971 +#endif
15972 +
15973 + GET_CR3_INTO_RDI
15974 + add $__START_KERNEL_map,%rdi
15975 + sub phys_base(%rip),%rdi
15976 +
15977 +#ifdef CONFIG_PARAVIRT
15978 + cmpl $0, pv_info+PARAVIRT_enabled
15979 + jz 1f
15980 + mov %rdi,%rbx
15981 + i = 0
15982 + .rept USER_PGD_PTRS
15983 + mov i*8(%rbx),%rsi
15984 + mov $0x67,%sil
15985 + lea i*8(%rbx),%rdi
15986 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15987 + i = i + 1
15988 + .endr
15989 + jmp 2f
15990 +1:
15991 +#endif
15992 +
15993 + i = 0
15994 + .rept USER_PGD_PTRS
15995 + movb $0x67,i*8(%rdi)
15996 + i = i + 1
15997 + .endr
15998 +
15999 +#ifdef CONFIG_PARAVIRT
16000 +2: PV_RESTORE_REGS(CLBR_RDI)
16001 + popq %rbx
16002 +#endif
16003 +
16004 + popq %rdi
16005 + pax_force_retaddr
16006 + retq
16007 +ENDPROC(pax_exit_kernel_user)
16008 +#endif
16009 +
16010 +.macro pax_erase_kstack
16011 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16012 + call pax_erase_kstack
16013 +#endif
16014 +.endm
16015 +
16016 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16017 +/*
16018 + * r11: thread_info
16019 + * rcx, rdx: can be clobbered
16020 + */
16021 +ENTRY(pax_erase_kstack)
16022 + pushq %rdi
16023 + pushq %rax
16024 + pushq %r11
16025 +
16026 + GET_THREAD_INFO(%r11)
16027 + mov TI_lowest_stack(%r11), %rdi
16028 + mov $-0xBEEF, %rax
16029 + std
16030 +
16031 +1: mov %edi, %ecx
16032 + and $THREAD_SIZE_asm - 1, %ecx
16033 + shr $3, %ecx
16034 + repne scasq
16035 + jecxz 2f
16036 +
16037 + cmp $2*8, %ecx
16038 + jc 2f
16039 +
16040 + mov $2*8, %ecx
16041 + repe scasq
16042 + jecxz 2f
16043 + jne 1b
16044 +
16045 +2: cld
16046 + mov %esp, %ecx
16047 + sub %edi, %ecx
16048 +
16049 + cmp $THREAD_SIZE_asm, %rcx
16050 + jb 3f
16051 + ud2
16052 +3:
16053 +
16054 + shr $3, %ecx
16055 + rep stosq
16056 +
16057 + mov TI_task_thread_sp0(%r11), %rdi
16058 + sub $256, %rdi
16059 + mov %rdi, TI_lowest_stack(%r11)
16060 +
16061 + popq %r11
16062 + popq %rax
16063 + popq %rdi
16064 + pax_force_retaddr
16065 + ret
16066 +ENDPROC(pax_erase_kstack)
16067 +#endif
16068
16069 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16070 #ifdef CONFIG_TRACE_IRQFLAGS
16071 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
16072 .endm
16073
16074 .macro UNFAKE_STACK_FRAME
16075 - addq $8*6, %rsp
16076 - CFI_ADJUST_CFA_OFFSET -(6*8)
16077 + addq $8*6 + ARG_SKIP, %rsp
16078 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16079 .endm
16080
16081 /*
16082 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
16083 movq %rsp, %rsi
16084
16085 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16086 - testl $3, CS-RBP(%rsi)
16087 + testb $3, CS-RBP(%rsi)
16088 je 1f
16089 SWAPGS
16090 /*
16091 @@ -355,9 +639,10 @@ ENTRY(save_rest)
16092 movq_cfi r15, R15+16
16093 movq %r11, 8(%rsp) /* return address */
16094 FIXUP_TOP_OF_STACK %r11, 16
16095 + pax_force_retaddr
16096 ret
16097 CFI_ENDPROC
16098 -END(save_rest)
16099 +ENDPROC(save_rest)
16100
16101 /* save complete stack frame */
16102 .pushsection .kprobes.text, "ax"
16103 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
16104 js 1f /* negative -> in kernel */
16105 SWAPGS
16106 xorl %ebx,%ebx
16107 -1: ret
16108 +1: pax_force_retaddr_bts
16109 + ret
16110 CFI_ENDPROC
16111 -END(save_paranoid)
16112 +ENDPROC(save_paranoid)
16113 .popsection
16114
16115 /*
16116 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
16117
16118 RESTORE_REST
16119
16120 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16121 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16122 jz retint_restore_args
16123
16124 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16125 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
16126 jmp ret_from_sys_call # go to the SYSRET fastpath
16127
16128 CFI_ENDPROC
16129 -END(ret_from_fork)
16130 +ENDPROC(ret_from_fork)
16131
16132 /*
16133 * System call entry. Up to 6 arguments in registers are supported.
16134 @@ -456,7 +742,7 @@ END(ret_from_fork)
16135 ENTRY(system_call)
16136 CFI_STARTPROC simple
16137 CFI_SIGNAL_FRAME
16138 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16139 + CFI_DEF_CFA rsp,0
16140 CFI_REGISTER rip,rcx
16141 /*CFI_REGISTER rflags,r11*/
16142 SWAPGS_UNSAFE_STACK
16143 @@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
16144
16145 movq %rsp,PER_CPU_VAR(old_rsp)
16146 movq PER_CPU_VAR(kernel_stack),%rsp
16147 + SAVE_ARGS 8*6,0
16148 + pax_enter_kernel_user
16149 /*
16150 * No need to follow this irqs off/on section - it's straight
16151 * and short:
16152 */
16153 ENABLE_INTERRUPTS(CLBR_NONE)
16154 - SAVE_ARGS 8,0
16155 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16156 movq %rcx,RIP-ARGOFFSET(%rsp)
16157 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16158 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16159 + GET_THREAD_INFO(%rcx)
16160 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16161 jnz tracesys
16162 system_call_fastpath:
16163 #if __SYSCALL_MASK == ~0
16164 @@ -488,7 +776,7 @@ system_call_fastpath:
16165 cmpl $__NR_syscall_max,%eax
16166 #endif
16167 ja badsys
16168 - movq %r10,%rcx
16169 + movq R10-ARGOFFSET(%rsp),%rcx
16170 call *sys_call_table(,%rax,8) # XXX: rip relative
16171 movq %rax,RAX-ARGOFFSET(%rsp)
16172 /*
16173 @@ -502,10 +790,13 @@ sysret_check:
16174 LOCKDEP_SYS_EXIT
16175 DISABLE_INTERRUPTS(CLBR_NONE)
16176 TRACE_IRQS_OFF
16177 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16178 + GET_THREAD_INFO(%rcx)
16179 + movl TI_flags(%rcx),%edx
16180 andl %edi,%edx
16181 jnz sysret_careful
16182 CFI_REMEMBER_STATE
16183 + pax_exit_kernel_user
16184 + pax_erase_kstack
16185 /*
16186 * sysretq will re-enable interrupts:
16187 */
16188 @@ -557,14 +848,18 @@ badsys:
16189 * jump back to the normal fast path.
16190 */
16191 auditsys:
16192 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16193 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16194 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16195 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16196 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16197 movq %rax,%rsi /* 2nd arg: syscall number */
16198 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16199 call __audit_syscall_entry
16200 +
16201 + pax_erase_kstack
16202 +
16203 LOAD_ARGS 0 /* reload call-clobbered registers */
16204 + pax_set_fptr_mask
16205 jmp system_call_fastpath
16206
16207 /*
16208 @@ -585,7 +880,7 @@ sysret_audit:
16209 /* Do syscall tracing */
16210 tracesys:
16211 #ifdef CONFIG_AUDITSYSCALL
16212 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16213 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16214 jz auditsys
16215 #endif
16216 SAVE_REST
16217 @@ -593,12 +888,16 @@ tracesys:
16218 FIXUP_TOP_OF_STACK %rdi
16219 movq %rsp,%rdi
16220 call syscall_trace_enter
16221 +
16222 + pax_erase_kstack
16223 +
16224 /*
16225 * Reload arg registers from stack in case ptrace changed them.
16226 * We don't reload %rax because syscall_trace_enter() returned
16227 * the value it wants us to use in the table lookup.
16228 */
16229 LOAD_ARGS ARGOFFSET, 1
16230 + pax_set_fptr_mask
16231 RESTORE_REST
16232 #if __SYSCALL_MASK == ~0
16233 cmpq $__NR_syscall_max,%rax
16234 @@ -607,7 +906,7 @@ tracesys:
16235 cmpl $__NR_syscall_max,%eax
16236 #endif
16237 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16238 - movq %r10,%rcx /* fixup for C */
16239 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16240 call *sys_call_table(,%rax,8)
16241 movq %rax,RAX-ARGOFFSET(%rsp)
16242 /* Use IRET because user could have changed frame */
16243 @@ -628,6 +927,7 @@ GLOBAL(int_with_check)
16244 andl %edi,%edx
16245 jnz int_careful
16246 andl $~TS_COMPAT,TI_status(%rcx)
16247 + pax_erase_kstack
16248 jmp retint_swapgs
16249
16250 /* Either reschedule or signal or syscall exit tracking needed. */
16251 @@ -674,7 +974,7 @@ int_restore_rest:
16252 TRACE_IRQS_OFF
16253 jmp int_with_check
16254 CFI_ENDPROC
16255 -END(system_call)
16256 +ENDPROC(system_call)
16257
16258 /*
16259 * Certain special system calls that need to save a complete full stack frame.
16260 @@ -690,7 +990,7 @@ ENTRY(\label)
16261 call \func
16262 jmp ptregscall_common
16263 CFI_ENDPROC
16264 -END(\label)
16265 +ENDPROC(\label)
16266 .endm
16267
16268 PTREGSCALL stub_clone, sys_clone, %r8
16269 @@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
16270 movq_cfi_restore R12+8, r12
16271 movq_cfi_restore RBP+8, rbp
16272 movq_cfi_restore RBX+8, rbx
16273 + pax_force_retaddr
16274 ret $REST_SKIP /* pop extended registers */
16275 CFI_ENDPROC
16276 -END(ptregscall_common)
16277 +ENDPROC(ptregscall_common)
16278
16279 ENTRY(stub_execve)
16280 CFI_STARTPROC
16281 @@ -725,7 +1026,7 @@ ENTRY(stub_execve)
16282 RESTORE_REST
16283 jmp int_ret_from_sys_call
16284 CFI_ENDPROC
16285 -END(stub_execve)
16286 +ENDPROC(stub_execve)
16287
16288 /*
16289 * sigreturn is special because it needs to restore all registers on return.
16290 @@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16291 RESTORE_REST
16292 jmp int_ret_from_sys_call
16293 CFI_ENDPROC
16294 -END(stub_rt_sigreturn)
16295 +ENDPROC(stub_rt_sigreturn)
16296
16297 #ifdef CONFIG_X86_X32_ABI
16298 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16299 @@ -812,7 +1113,7 @@ vector=vector+1
16300 2: jmp common_interrupt
16301 .endr
16302 CFI_ENDPROC
16303 -END(irq_entries_start)
16304 +ENDPROC(irq_entries_start)
16305
16306 .previous
16307 END(interrupt)
16308 @@ -832,6 +1133,16 @@ END(interrupt)
16309 subq $ORIG_RAX-RBP, %rsp
16310 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16311 SAVE_ARGS_IRQ
16312 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16313 + testb $3, CS(%rdi)
16314 + jnz 1f
16315 + pax_enter_kernel
16316 + jmp 2f
16317 +1: pax_enter_kernel_user
16318 +2:
16319 +#else
16320 + pax_enter_kernel
16321 +#endif
16322 call \func
16323 .endm
16324
16325 @@ -863,7 +1174,7 @@ ret_from_intr:
16326
16327 exit_intr:
16328 GET_THREAD_INFO(%rcx)
16329 - testl $3,CS-ARGOFFSET(%rsp)
16330 + testb $3,CS-ARGOFFSET(%rsp)
16331 je retint_kernel
16332
16333 /* Interrupt came from user space */
16334 @@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
16335 * The iretq could re-enable interrupts:
16336 */
16337 DISABLE_INTERRUPTS(CLBR_ANY)
16338 + pax_exit_kernel_user
16339 TRACE_IRQS_IRETQ
16340 SWAPGS
16341 jmp restore_args
16342
16343 retint_restore_args: /* return to kernel space */
16344 DISABLE_INTERRUPTS(CLBR_ANY)
16345 + pax_exit_kernel
16346 + pax_force_retaddr RIP-ARGOFFSET
16347 /*
16348 * The iretq could re-enable interrupts:
16349 */
16350 @@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
16351 #endif
16352
16353 CFI_ENDPROC
16354 -END(common_interrupt)
16355 +ENDPROC(common_interrupt)
16356 /*
16357 * End of kprobes section
16358 */
16359 @@ -996,7 +1310,7 @@ ENTRY(\sym)
16360 interrupt \do_sym
16361 jmp ret_from_intr
16362 CFI_ENDPROC
16363 -END(\sym)
16364 +ENDPROC(\sym)
16365 .endm
16366
16367 #ifdef CONFIG_SMP
16368 @@ -1069,12 +1383,22 @@ ENTRY(\sym)
16369 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16370 call error_entry
16371 DEFAULT_FRAME 0
16372 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16373 + testb $3, CS(%rsp)
16374 + jnz 1f
16375 + pax_enter_kernel
16376 + jmp 2f
16377 +1: pax_enter_kernel_user
16378 +2:
16379 +#else
16380 + pax_enter_kernel
16381 +#endif
16382 movq %rsp,%rdi /* pt_regs pointer */
16383 xorl %esi,%esi /* no error code */
16384 call \do_sym
16385 jmp error_exit /* %ebx: no swapgs flag */
16386 CFI_ENDPROC
16387 -END(\sym)
16388 +ENDPROC(\sym)
16389 .endm
16390
16391 .macro paranoidzeroentry sym do_sym
16392 @@ -1086,15 +1410,25 @@ ENTRY(\sym)
16393 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16394 call save_paranoid
16395 TRACE_IRQS_OFF
16396 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16397 + testb $3, CS(%rsp)
16398 + jnz 1f
16399 + pax_enter_kernel
16400 + jmp 2f
16401 +1: pax_enter_kernel_user
16402 +2:
16403 +#else
16404 + pax_enter_kernel
16405 +#endif
16406 movq %rsp,%rdi /* pt_regs pointer */
16407 xorl %esi,%esi /* no error code */
16408 call \do_sym
16409 jmp paranoid_exit /* %ebx: no swapgs flag */
16410 CFI_ENDPROC
16411 -END(\sym)
16412 +ENDPROC(\sym)
16413 .endm
16414
16415 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16416 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16417 .macro paranoidzeroentry_ist sym do_sym ist
16418 ENTRY(\sym)
16419 INTR_FRAME
16420 @@ -1104,14 +1438,30 @@ ENTRY(\sym)
16421 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16422 call save_paranoid
16423 TRACE_IRQS_OFF
16424 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16425 + testb $3, CS(%rsp)
16426 + jnz 1f
16427 + pax_enter_kernel
16428 + jmp 2f
16429 +1: pax_enter_kernel_user
16430 +2:
16431 +#else
16432 + pax_enter_kernel
16433 +#endif
16434 movq %rsp,%rdi /* pt_regs pointer */
16435 xorl %esi,%esi /* no error code */
16436 +#ifdef CONFIG_SMP
16437 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16438 + lea init_tss(%r12), %r12
16439 +#else
16440 + lea init_tss(%rip), %r12
16441 +#endif
16442 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16443 call \do_sym
16444 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16445 jmp paranoid_exit /* %ebx: no swapgs flag */
16446 CFI_ENDPROC
16447 -END(\sym)
16448 +ENDPROC(\sym)
16449 .endm
16450
16451 .macro errorentry sym do_sym
16452 @@ -1122,13 +1472,23 @@ ENTRY(\sym)
16453 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16454 call error_entry
16455 DEFAULT_FRAME 0
16456 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16457 + testb $3, CS(%rsp)
16458 + jnz 1f
16459 + pax_enter_kernel
16460 + jmp 2f
16461 +1: pax_enter_kernel_user
16462 +2:
16463 +#else
16464 + pax_enter_kernel
16465 +#endif
16466 movq %rsp,%rdi /* pt_regs pointer */
16467 movq ORIG_RAX(%rsp),%rsi /* get error code */
16468 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16469 call \do_sym
16470 jmp error_exit /* %ebx: no swapgs flag */
16471 CFI_ENDPROC
16472 -END(\sym)
16473 +ENDPROC(\sym)
16474 .endm
16475
16476 /* error code is on the stack already */
16477 @@ -1141,13 +1501,23 @@ ENTRY(\sym)
16478 call save_paranoid
16479 DEFAULT_FRAME 0
16480 TRACE_IRQS_OFF
16481 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16482 + testb $3, CS(%rsp)
16483 + jnz 1f
16484 + pax_enter_kernel
16485 + jmp 2f
16486 +1: pax_enter_kernel_user
16487 +2:
16488 +#else
16489 + pax_enter_kernel
16490 +#endif
16491 movq %rsp,%rdi /* pt_regs pointer */
16492 movq ORIG_RAX(%rsp),%rsi /* get error code */
16493 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16494 call \do_sym
16495 jmp paranoid_exit /* %ebx: no swapgs flag */
16496 CFI_ENDPROC
16497 -END(\sym)
16498 +ENDPROC(\sym)
16499 .endm
16500
16501 zeroentry divide_error do_divide_error
16502 @@ -1177,9 +1547,10 @@ gs_change:
16503 2: mfence /* workaround */
16504 SWAPGS
16505 popfq_cfi
16506 + pax_force_retaddr
16507 ret
16508 CFI_ENDPROC
16509 -END(native_load_gs_index)
16510 +ENDPROC(native_load_gs_index)
16511
16512 .section __ex_table,"a"
16513 .align 8
16514 @@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
16515 * Here we are in the child and the registers are set as they were
16516 * at kernel_thread() invocation in the parent.
16517 */
16518 + pax_force_fptr %rsi
16519 call *%rsi
16520 # exit
16521 mov %eax, %edi
16522 call do_exit
16523 ud2 # padding for call trace
16524 CFI_ENDPROC
16525 -END(kernel_thread_helper)
16526 +ENDPROC(kernel_thread_helper)
16527
16528 /*
16529 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16530 @@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
16531 RESTORE_REST
16532 testq %rax,%rax
16533 je int_ret_from_sys_call
16534 - RESTORE_ARGS
16535 UNFAKE_STACK_FRAME
16536 + pax_force_retaddr
16537 ret
16538 CFI_ENDPROC
16539 -END(kernel_execve)
16540 +ENDPROC(kernel_execve)
16541
16542 /* Call softirq on interrupt stack. Interrupts are off. */
16543 ENTRY(call_softirq)
16544 @@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
16545 CFI_DEF_CFA_REGISTER rsp
16546 CFI_ADJUST_CFA_OFFSET -8
16547 decl PER_CPU_VAR(irq_count)
16548 + pax_force_retaddr
16549 ret
16550 CFI_ENDPROC
16551 -END(call_softirq)
16552 +ENDPROC(call_softirq)
16553
16554 #ifdef CONFIG_XEN
16555 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16556 @@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16557 decl PER_CPU_VAR(irq_count)
16558 jmp error_exit
16559 CFI_ENDPROC
16560 -END(xen_do_hypervisor_callback)
16561 +ENDPROC(xen_do_hypervisor_callback)
16562
16563 /*
16564 * Hypervisor uses this for application faults while it executes.
16565 @@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
16566 SAVE_ALL
16567 jmp error_exit
16568 CFI_ENDPROC
16569 -END(xen_failsafe_callback)
16570 +ENDPROC(xen_failsafe_callback)
16571
16572 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16573 xen_hvm_callback_vector xen_evtchn_do_upcall
16574 @@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
16575 TRACE_IRQS_OFF
16576 testl %ebx,%ebx /* swapgs needed? */
16577 jnz paranoid_restore
16578 - testl $3,CS(%rsp)
16579 + testb $3,CS(%rsp)
16580 jnz paranoid_userspace
16581 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16582 + pax_exit_kernel
16583 + TRACE_IRQS_IRETQ 0
16584 + SWAPGS_UNSAFE_STACK
16585 + RESTORE_ALL 8
16586 + pax_force_retaddr_bts
16587 + jmp irq_return
16588 +#endif
16589 paranoid_swapgs:
16590 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16591 + pax_exit_kernel_user
16592 +#else
16593 + pax_exit_kernel
16594 +#endif
16595 TRACE_IRQS_IRETQ 0
16596 SWAPGS_UNSAFE_STACK
16597 RESTORE_ALL 8
16598 jmp irq_return
16599 paranoid_restore:
16600 + pax_exit_kernel
16601 TRACE_IRQS_IRETQ 0
16602 RESTORE_ALL 8
16603 + pax_force_retaddr_bts
16604 jmp irq_return
16605 paranoid_userspace:
16606 GET_THREAD_INFO(%rcx)
16607 @@ -1442,7 +1830,7 @@ paranoid_schedule:
16608 TRACE_IRQS_OFF
16609 jmp paranoid_userspace
16610 CFI_ENDPROC
16611 -END(paranoid_exit)
16612 +ENDPROC(paranoid_exit)
16613
16614 /*
16615 * Exception entry point. This expects an error code/orig_rax on the stack.
16616 @@ -1469,12 +1857,13 @@ ENTRY(error_entry)
16617 movq_cfi r14, R14+8
16618 movq_cfi r15, R15+8
16619 xorl %ebx,%ebx
16620 - testl $3,CS+8(%rsp)
16621 + testb $3,CS+8(%rsp)
16622 je error_kernelspace
16623 error_swapgs:
16624 SWAPGS
16625 error_sti:
16626 TRACE_IRQS_OFF
16627 + pax_force_retaddr_bts
16628 ret
16629
16630 /*
16631 @@ -1501,7 +1890,7 @@ bstep_iret:
16632 movq %rcx,RIP+8(%rsp)
16633 jmp error_swapgs
16634 CFI_ENDPROC
16635 -END(error_entry)
16636 +ENDPROC(error_entry)
16637
16638
16639 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16640 @@ -1521,7 +1910,7 @@ ENTRY(error_exit)
16641 jnz retint_careful
16642 jmp retint_swapgs
16643 CFI_ENDPROC
16644 -END(error_exit)
16645 +ENDPROC(error_exit)
16646
16647 /*
16648 * Test if a given stack is an NMI stack or not.
16649 @@ -1579,9 +1968,11 @@ ENTRY(nmi)
16650 * If %cs was not the kernel segment, then the NMI triggered in user
16651 * space, which means it is definitely not nested.
16652 */
16653 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16654 + je 1f
16655 cmpl $__KERNEL_CS, 16(%rsp)
16656 jne first_nmi
16657 -
16658 +1:
16659 /*
16660 * Check the special variable on the stack to see if NMIs are
16661 * executing.
16662 @@ -1728,6 +2119,16 @@ end_repeat_nmi:
16663 */
16664 call save_paranoid
16665 DEFAULT_FRAME 0
16666 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16667 + testb $3, CS(%rsp)
16668 + jnz 1f
16669 + pax_enter_kernel
16670 + jmp 2f
16671 +1: pax_enter_kernel_user
16672 +2:
16673 +#else
16674 + pax_enter_kernel
16675 +#endif
16676 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16677 movq %rsp,%rdi
16678 movq $-1,%rsi
16679 @@ -1735,21 +2136,32 @@ end_repeat_nmi:
16680 testl %ebx,%ebx /* swapgs needed? */
16681 jnz nmi_restore
16682 nmi_swapgs:
16683 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16684 + pax_exit_kernel_user
16685 +#else
16686 + pax_exit_kernel
16687 +#endif
16688 SWAPGS_UNSAFE_STACK
16689 + RESTORE_ALL 8
16690 + /* Clear the NMI executing stack variable */
16691 + movq $0, 10*8(%rsp)
16692 + jmp irq_return
16693 nmi_restore:
16694 + pax_exit_kernel
16695 RESTORE_ALL 8
16696 + pax_force_retaddr_bts
16697 /* Clear the NMI executing stack variable */
16698 movq $0, 10*8(%rsp)
16699 jmp irq_return
16700 CFI_ENDPROC
16701 -END(nmi)
16702 +ENDPROC(nmi)
16703
16704 ENTRY(ignore_sysret)
16705 CFI_STARTPROC
16706 mov $-ENOSYS,%eax
16707 sysret
16708 CFI_ENDPROC
16709 -END(ignore_sysret)
16710 +ENDPROC(ignore_sysret)
16711
16712 /*
16713 * End of kprobes section
16714 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16715 index c9a281f..ce2f317 100644
16716 --- a/arch/x86/kernel/ftrace.c
16717 +++ b/arch/x86/kernel/ftrace.c
16718 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16719 static const void *mod_code_newcode; /* holds the text to write to the IP */
16720
16721 static unsigned nmi_wait_count;
16722 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16723 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16724
16725 int ftrace_arch_read_dyn_info(char *buf, int size)
16726 {
16727 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16728
16729 r = snprintf(buf, size, "%u %u",
16730 nmi_wait_count,
16731 - atomic_read(&nmi_update_count));
16732 + atomic_read_unchecked(&nmi_update_count));
16733 return r;
16734 }
16735
16736 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16737
16738 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16739 smp_rmb();
16740 + pax_open_kernel();
16741 ftrace_mod_code();
16742 - atomic_inc(&nmi_update_count);
16743 + pax_close_kernel();
16744 + atomic_inc_unchecked(&nmi_update_count);
16745 }
16746 /* Must have previous changes seen before executions */
16747 smp_mb();
16748 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16749 {
16750 unsigned char replaced[MCOUNT_INSN_SIZE];
16751
16752 + ip = ktla_ktva(ip);
16753 +
16754 /*
16755 * Note: Due to modules and __init, code can
16756 * disappear and change, we need to protect against faulting
16757 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16758 unsigned char old[MCOUNT_INSN_SIZE], *new;
16759 int ret;
16760
16761 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16762 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16763 new = ftrace_call_replace(ip, (unsigned long)func);
16764 ret = ftrace_modify_code(ip, old, new);
16765
16766 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16767 {
16768 unsigned char code[MCOUNT_INSN_SIZE];
16769
16770 + ip = ktla_ktva(ip);
16771 +
16772 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16773 return -EFAULT;
16774
16775 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16776 index 51ff186..9e77418 100644
16777 --- a/arch/x86/kernel/head32.c
16778 +++ b/arch/x86/kernel/head32.c
16779 @@ -19,6 +19,7 @@
16780 #include <asm/io_apic.h>
16781 #include <asm/bios_ebda.h>
16782 #include <asm/tlbflush.h>
16783 +#include <asm/boot.h>
16784
16785 static void __init i386_default_early_setup(void)
16786 {
16787 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16788
16789 void __init i386_start_kernel(void)
16790 {
16791 - memblock_reserve(__pa_symbol(&_text),
16792 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16793 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16794
16795 #ifdef CONFIG_BLK_DEV_INITRD
16796 /* Reserve INITRD */
16797 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16798 index ce0be7c..c41476e 100644
16799 --- a/arch/x86/kernel/head_32.S
16800 +++ b/arch/x86/kernel/head_32.S
16801 @@ -25,6 +25,12 @@
16802 /* Physical address */
16803 #define pa(X) ((X) - __PAGE_OFFSET)
16804
16805 +#ifdef CONFIG_PAX_KERNEXEC
16806 +#define ta(X) (X)
16807 +#else
16808 +#define ta(X) ((X) - __PAGE_OFFSET)
16809 +#endif
16810 +
16811 /*
16812 * References to members of the new_cpu_data structure.
16813 */
16814 @@ -54,11 +60,7 @@
16815 * and small than max_low_pfn, otherwise will waste some page table entries
16816 */
16817
16818 -#if PTRS_PER_PMD > 1
16819 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16820 -#else
16821 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16822 -#endif
16823 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16824
16825 /* Number of possible pages in the lowmem region */
16826 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16827 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16828 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16829
16830 /*
16831 + * Real beginning of normal "text" segment
16832 + */
16833 +ENTRY(stext)
16834 +ENTRY(_stext)
16835 +
16836 +/*
16837 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16838 * %esi points to the real-mode code as a 32-bit pointer.
16839 * CS and DS must be 4 GB flat segments, but we don't depend on
16840 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16841 * can.
16842 */
16843 __HEAD
16844 +
16845 +#ifdef CONFIG_PAX_KERNEXEC
16846 + jmp startup_32
16847 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16848 +.fill PAGE_SIZE-5,1,0xcc
16849 +#endif
16850 +
16851 ENTRY(startup_32)
16852 movl pa(stack_start),%ecx
16853
16854 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16855 2:
16856 leal -__PAGE_OFFSET(%ecx),%esp
16857
16858 +#ifdef CONFIG_SMP
16859 + movl $pa(cpu_gdt_table),%edi
16860 + movl $__per_cpu_load,%eax
16861 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16862 + rorl $16,%eax
16863 + movb %al,__KERNEL_PERCPU + 4(%edi)
16864 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16865 + movl $__per_cpu_end - 1,%eax
16866 + subl $__per_cpu_start,%eax
16867 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16868 +#endif
16869 +
16870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16871 + movl $NR_CPUS,%ecx
16872 + movl $pa(cpu_gdt_table),%edi
16873 +1:
16874 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16875 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16876 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16877 + addl $PAGE_SIZE_asm,%edi
16878 + loop 1b
16879 +#endif
16880 +
16881 +#ifdef CONFIG_PAX_KERNEXEC
16882 + movl $pa(boot_gdt),%edi
16883 + movl $__LOAD_PHYSICAL_ADDR,%eax
16884 + movw %ax,__BOOT_CS + 2(%edi)
16885 + rorl $16,%eax
16886 + movb %al,__BOOT_CS + 4(%edi)
16887 + movb %ah,__BOOT_CS + 7(%edi)
16888 + rorl $16,%eax
16889 +
16890 + ljmp $(__BOOT_CS),$1f
16891 +1:
16892 +
16893 + movl $NR_CPUS,%ecx
16894 + movl $pa(cpu_gdt_table),%edi
16895 + addl $__PAGE_OFFSET,%eax
16896 +1:
16897 + movw %ax,__KERNEL_CS + 2(%edi)
16898 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16899 + rorl $16,%eax
16900 + movb %al,__KERNEL_CS + 4(%edi)
16901 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16902 + movb %ah,__KERNEL_CS + 7(%edi)
16903 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16904 + rorl $16,%eax
16905 + addl $PAGE_SIZE_asm,%edi
16906 + loop 1b
16907 +#endif
16908 +
16909 /*
16910 * Clear BSS first so that there are no surprises...
16911 */
16912 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16913 movl %eax, pa(max_pfn_mapped)
16914
16915 /* Do early initialization of the fixmap area */
16916 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16917 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16918 +#ifdef CONFIG_COMPAT_VDSO
16919 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16920 +#else
16921 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16922 +#endif
16923 #else /* Not PAE */
16924
16925 page_pde_offset = (__PAGE_OFFSET >> 20);
16926 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16927 movl %eax, pa(max_pfn_mapped)
16928
16929 /* Do early initialization of the fixmap area */
16930 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16931 - movl %eax,pa(initial_page_table+0xffc)
16932 +#ifdef CONFIG_COMPAT_VDSO
16933 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16934 +#else
16935 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16936 +#endif
16937 #endif
16938
16939 #ifdef CONFIG_PARAVIRT
16940 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16941 cmpl $num_subarch_entries, %eax
16942 jae bad_subarch
16943
16944 - movl pa(subarch_entries)(,%eax,4), %eax
16945 - subl $__PAGE_OFFSET, %eax
16946 - jmp *%eax
16947 + jmp *pa(subarch_entries)(,%eax,4)
16948
16949 bad_subarch:
16950 WEAK(lguest_entry)
16951 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16952 __INITDATA
16953
16954 subarch_entries:
16955 - .long default_entry /* normal x86/PC */
16956 - .long lguest_entry /* lguest hypervisor */
16957 - .long xen_entry /* Xen hypervisor */
16958 - .long default_entry /* Moorestown MID */
16959 + .long ta(default_entry) /* normal x86/PC */
16960 + .long ta(lguest_entry) /* lguest hypervisor */
16961 + .long ta(xen_entry) /* Xen hypervisor */
16962 + .long ta(default_entry) /* Moorestown MID */
16963 num_subarch_entries = (. - subarch_entries) / 4
16964 .previous
16965 #else
16966 @@ -312,6 +382,7 @@ default_entry:
16967 orl %edx,%eax
16968 movl %eax,%cr4
16969
16970 +#ifdef CONFIG_X86_PAE
16971 testb $X86_CR4_PAE, %al # check if PAE is enabled
16972 jz 6f
16973
16974 @@ -340,6 +411,9 @@ default_entry:
16975 /* Make changes effective */
16976 wrmsr
16977
16978 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16979 +#endif
16980 +
16981 6:
16982
16983 /*
16984 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16985 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16986 movl %eax,%ss # after changing gdt.
16987
16988 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16989 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16990 movl %eax,%ds
16991 movl %eax,%es
16992
16993 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16994 */
16995 cmpb $0,ready
16996 jne 1f
16997 - movl $gdt_page,%eax
16998 + movl $cpu_gdt_table,%eax
16999 movl $stack_canary,%ecx
17000 +#ifdef CONFIG_SMP
17001 + addl $__per_cpu_load,%ecx
17002 +#endif
17003 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17004 shrl $16, %ecx
17005 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17006 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17007 1:
17008 -#endif
17009 movl $(__KERNEL_STACK_CANARY),%eax
17010 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17011 + movl $(__USER_DS),%eax
17012 +#else
17013 + xorl %eax,%eax
17014 +#endif
17015 movl %eax,%gs
17016
17017 xorl %eax,%eax # Clear LDT
17018 @@ -558,22 +639,22 @@ early_page_fault:
17019 jmp early_fault
17020
17021 early_fault:
17022 - cld
17023 #ifdef CONFIG_PRINTK
17024 + cmpl $1,%ss:early_recursion_flag
17025 + je hlt_loop
17026 + incl %ss:early_recursion_flag
17027 + cld
17028 pusha
17029 movl $(__KERNEL_DS),%eax
17030 movl %eax,%ds
17031 movl %eax,%es
17032 - cmpl $2,early_recursion_flag
17033 - je hlt_loop
17034 - incl early_recursion_flag
17035 movl %cr2,%eax
17036 pushl %eax
17037 pushl %edx /* trapno */
17038 pushl $fault_msg
17039 call printk
17040 +; call dump_stack
17041 #endif
17042 - call dump_stack
17043 hlt_loop:
17044 hlt
17045 jmp hlt_loop
17046 @@ -581,8 +662,11 @@ hlt_loop:
17047 /* This is the default interrupt "handler" :-) */
17048 ALIGN
17049 ignore_int:
17050 - cld
17051 #ifdef CONFIG_PRINTK
17052 + cmpl $2,%ss:early_recursion_flag
17053 + je hlt_loop
17054 + incl %ss:early_recursion_flag
17055 + cld
17056 pushl %eax
17057 pushl %ecx
17058 pushl %edx
17059 @@ -591,9 +675,6 @@ ignore_int:
17060 movl $(__KERNEL_DS),%eax
17061 movl %eax,%ds
17062 movl %eax,%es
17063 - cmpl $2,early_recursion_flag
17064 - je hlt_loop
17065 - incl early_recursion_flag
17066 pushl 16(%esp)
17067 pushl 24(%esp)
17068 pushl 32(%esp)
17069 @@ -622,29 +703,43 @@ ENTRY(initial_code)
17070 /*
17071 * BSS section
17072 */
17073 -__PAGE_ALIGNED_BSS
17074 - .align PAGE_SIZE
17075 #ifdef CONFIG_X86_PAE
17076 +.section .initial_pg_pmd,"a",@progbits
17077 initial_pg_pmd:
17078 .fill 1024*KPMDS,4,0
17079 #else
17080 +.section .initial_page_table,"a",@progbits
17081 ENTRY(initial_page_table)
17082 .fill 1024,4,0
17083 #endif
17084 +.section .initial_pg_fixmap,"a",@progbits
17085 initial_pg_fixmap:
17086 .fill 1024,4,0
17087 +.section .empty_zero_page,"a",@progbits
17088 ENTRY(empty_zero_page)
17089 .fill 4096,1,0
17090 +.section .swapper_pg_dir,"a",@progbits
17091 ENTRY(swapper_pg_dir)
17092 +#ifdef CONFIG_X86_PAE
17093 + .fill 4,8,0
17094 +#else
17095 .fill 1024,4,0
17096 +#endif
17097 +
17098 +/*
17099 + * The IDT has to be page-aligned to simplify the Pentium
17100 + * F0 0F bug workaround.. We have a special link segment
17101 + * for this.
17102 + */
17103 +.section .idt,"a",@progbits
17104 +ENTRY(idt_table)
17105 + .fill 256,8,0
17106
17107 /*
17108 * This starts the data section.
17109 */
17110 #ifdef CONFIG_X86_PAE
17111 -__PAGE_ALIGNED_DATA
17112 - /* Page-aligned for the benefit of paravirt? */
17113 - .align PAGE_SIZE
17114 +.section .initial_page_table,"a",@progbits
17115 ENTRY(initial_page_table)
17116 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17117 # if KPMDS == 3
17118 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17119 # error "Kernel PMDs should be 1, 2 or 3"
17120 # endif
17121 .align PAGE_SIZE /* needs to be page-sized too */
17122 +
17123 +#ifdef CONFIG_PAX_PER_CPU_PGD
17124 +ENTRY(cpu_pgd)
17125 + .rept NR_CPUS
17126 + .fill 4,8,0
17127 + .endr
17128 +#endif
17129 +
17130 #endif
17131
17132 .data
17133 .balign 4
17134 ENTRY(stack_start)
17135 - .long init_thread_union+THREAD_SIZE
17136 + .long init_thread_union+THREAD_SIZE-8
17137
17138 +ready: .byte 0
17139 +
17140 +.section .rodata,"a",@progbits
17141 early_recursion_flag:
17142 .long 0
17143
17144 -ready: .byte 0
17145 -
17146 int_msg:
17147 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17148
17149 @@ -707,7 +811,7 @@ fault_msg:
17150 .word 0 # 32 bit align gdt_desc.address
17151 boot_gdt_descr:
17152 .word __BOOT_DS+7
17153 - .long boot_gdt - __PAGE_OFFSET
17154 + .long pa(boot_gdt)
17155
17156 .word 0 # 32-bit align idt_desc.address
17157 idt_descr:
17158 @@ -718,7 +822,7 @@ idt_descr:
17159 .word 0 # 32 bit align gdt_desc.address
17160 ENTRY(early_gdt_descr)
17161 .word GDT_ENTRIES*8-1
17162 - .long gdt_page /* Overwritten for secondary CPUs */
17163 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17164
17165 /*
17166 * The boot_gdt must mirror the equivalent in setup.S and is
17167 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17168 .align L1_CACHE_BYTES
17169 ENTRY(boot_gdt)
17170 .fill GDT_ENTRY_BOOT_CS,8,0
17171 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17172 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17173 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17174 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17175 +
17176 + .align PAGE_SIZE_asm
17177 +ENTRY(cpu_gdt_table)
17178 + .rept NR_CPUS
17179 + .quad 0x0000000000000000 /* NULL descriptor */
17180 + .quad 0x0000000000000000 /* 0x0b reserved */
17181 + .quad 0x0000000000000000 /* 0x13 reserved */
17182 + .quad 0x0000000000000000 /* 0x1b reserved */
17183 +
17184 +#ifdef CONFIG_PAX_KERNEXEC
17185 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17186 +#else
17187 + .quad 0x0000000000000000 /* 0x20 unused */
17188 +#endif
17189 +
17190 + .quad 0x0000000000000000 /* 0x28 unused */
17191 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17192 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17193 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17194 + .quad 0x0000000000000000 /* 0x4b reserved */
17195 + .quad 0x0000000000000000 /* 0x53 reserved */
17196 + .quad 0x0000000000000000 /* 0x5b reserved */
17197 +
17198 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17199 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17200 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17201 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17202 +
17203 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17204 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17205 +
17206 + /*
17207 + * Segments used for calling PnP BIOS have byte granularity.
17208 + * The code segments and data segments have fixed 64k limits,
17209 + * the transfer segment sizes are set at run time.
17210 + */
17211 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17212 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17213 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17214 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17215 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17216 +
17217 + /*
17218 + * The APM segments have byte granularity and their bases
17219 + * are set at run time. All have 64k limits.
17220 + */
17221 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17222 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17223 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17224 +
17225 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17226 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17227 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17228 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17229 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17230 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17231 +
17232 + /* Be sure this is zeroed to avoid false validations in Xen */
17233 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17234 + .endr
17235 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17236 index 40f4eb3..6d24d9d 100644
17237 --- a/arch/x86/kernel/head_64.S
17238 +++ b/arch/x86/kernel/head_64.S
17239 @@ -19,6 +19,8 @@
17240 #include <asm/cache.h>
17241 #include <asm/processor-flags.h>
17242 #include <asm/percpu.h>
17243 +#include <asm/cpufeature.h>
17244 +#include <asm/alternative-asm.h>
17245
17246 #ifdef CONFIG_PARAVIRT
17247 #include <asm/asm-offsets.h>
17248 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17249 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17250 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17251 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17252 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17253 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17254 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17255 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17256 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17257 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17258
17259 .text
17260 __HEAD
17261 @@ -85,35 +93,23 @@ startup_64:
17262 */
17263 addq %rbp, init_level4_pgt + 0(%rip)
17264 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17265 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17266 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17267 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17268 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17269
17270 addq %rbp, level3_ident_pgt + 0(%rip)
17271 +#ifndef CONFIG_XEN
17272 + addq %rbp, level3_ident_pgt + 8(%rip)
17273 +#endif
17274
17275 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17276 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17277 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17278 +
17279 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17280 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17281
17282 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17283 -
17284 - /* Add an Identity mapping if I am above 1G */
17285 - leaq _text(%rip), %rdi
17286 - andq $PMD_PAGE_MASK, %rdi
17287 -
17288 - movq %rdi, %rax
17289 - shrq $PUD_SHIFT, %rax
17290 - andq $(PTRS_PER_PUD - 1), %rax
17291 - jz ident_complete
17292 -
17293 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17294 - leaq level3_ident_pgt(%rip), %rbx
17295 - movq %rdx, 0(%rbx, %rax, 8)
17296 -
17297 - movq %rdi, %rax
17298 - shrq $PMD_SHIFT, %rax
17299 - andq $(PTRS_PER_PMD - 1), %rax
17300 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17301 - leaq level2_spare_pgt(%rip), %rbx
17302 - movq %rdx, 0(%rbx, %rax, 8)
17303 -ident_complete:
17304 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17305
17306 /*
17307 * Fixup the kernel text+data virtual addresses. Note that
17308 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17309 * after the boot processor executes this code.
17310 */
17311
17312 - /* Enable PAE mode and PGE */
17313 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17314 + /* Enable PAE mode and PSE/PGE */
17315 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17316 movq %rax, %cr4
17317
17318 /* Setup early boot stage 4 level pagetables. */
17319 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17320 movl $MSR_EFER, %ecx
17321 rdmsr
17322 btsl $_EFER_SCE, %eax /* Enable System Call */
17323 - btl $20,%edi /* No Execute supported? */
17324 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17325 jnc 1f
17326 btsl $_EFER_NX, %eax
17327 + leaq init_level4_pgt(%rip), %rdi
17328 +#ifndef CONFIG_EFI
17329 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17330 +#endif
17331 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17332 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17333 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17334 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17335 1: wrmsr /* Make changes effective */
17336
17337 /* Setup cr0 */
17338 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17339 * jump. In addition we need to ensure %cs is set so we make this
17340 * a far return.
17341 */
17342 + pax_set_fptr_mask
17343 movq initial_code(%rip),%rax
17344 pushq $0 # fake return address to stop unwinder
17345 pushq $__KERNEL_CS # set correct cs
17346 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17347 bad_address:
17348 jmp bad_address
17349
17350 - .section ".init.text","ax"
17351 + __INIT
17352 #ifdef CONFIG_EARLY_PRINTK
17353 .globl early_idt_handlers
17354 early_idt_handlers:
17355 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17356 #endif /* EARLY_PRINTK */
17357 1: hlt
17358 jmp 1b
17359 + .previous
17360
17361 #ifdef CONFIG_EARLY_PRINTK
17362 + __INITDATA
17363 early_recursion_flag:
17364 .long 0
17365 + .previous
17366
17367 + .section .rodata,"a",@progbits
17368 early_idt_msg:
17369 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17370 early_idt_ripmsg:
17371 .asciz "RIP %s\n"
17372 + .previous
17373 #endif /* CONFIG_EARLY_PRINTK */
17374 - .previous
17375
17376 + .section .rodata,"a",@progbits
17377 #define NEXT_PAGE(name) \
17378 .balign PAGE_SIZE; \
17379 ENTRY(name)
17380 @@ -338,7 +348,6 @@ ENTRY(name)
17381 i = i + 1 ; \
17382 .endr
17383
17384 - .data
17385 /*
17386 * This default setting generates an ident mapping at address 0x100000
17387 * and a mapping for the kernel that precisely maps virtual address
17388 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17389 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17390 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17392 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17393 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17394 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17395 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17396 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17397 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17398 .org init_level4_pgt + L4_START_KERNEL*8, 0
17399 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17400 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17401
17402 +#ifdef CONFIG_PAX_PER_CPU_PGD
17403 +NEXT_PAGE(cpu_pgd)
17404 + .rept NR_CPUS
17405 + .fill 512,8,0
17406 + .endr
17407 +#endif
17408 +
17409 NEXT_PAGE(level3_ident_pgt)
17410 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17411 +#ifdef CONFIG_XEN
17412 .fill 511,8,0
17413 +#else
17414 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17415 + .fill 510,8,0
17416 +#endif
17417 +
17418 +NEXT_PAGE(level3_vmalloc_start_pgt)
17419 + .fill 512,8,0
17420 +
17421 +NEXT_PAGE(level3_vmalloc_end_pgt)
17422 + .fill 512,8,0
17423 +
17424 +NEXT_PAGE(level3_vmemmap_pgt)
17425 + .fill L3_VMEMMAP_START,8,0
17426 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17427
17428 NEXT_PAGE(level3_kernel_pgt)
17429 .fill L3_START_KERNEL,8,0
17430 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17431 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17432 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17433
17434 +NEXT_PAGE(level2_vmemmap_pgt)
17435 + .fill 512,8,0
17436 +
17437 NEXT_PAGE(level2_fixmap_pgt)
17438 - .fill 506,8,0
17439 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17440 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17441 - .fill 5,8,0
17442 + .fill 507,8,0
17443 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17444 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17445 + .fill 4,8,0
17446
17447 -NEXT_PAGE(level1_fixmap_pgt)
17448 +NEXT_PAGE(level1_vsyscall_pgt)
17449 .fill 512,8,0
17450
17451 -NEXT_PAGE(level2_ident_pgt)
17452 - /* Since I easily can, map the first 1G.
17453 + /* Since I easily can, map the first 2G.
17454 * Don't set NX because code runs from these pages.
17455 */
17456 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17457 +NEXT_PAGE(level2_ident_pgt)
17458 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17459
17460 NEXT_PAGE(level2_kernel_pgt)
17461 /*
17462 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17463 * If you want to increase this then increase MODULES_VADDR
17464 * too.)
17465 */
17466 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17467 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17468 -
17469 -NEXT_PAGE(level2_spare_pgt)
17470 - .fill 512, 8, 0
17471 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17472
17473 #undef PMDS
17474 #undef NEXT_PAGE
17475
17476 - .data
17477 + .align PAGE_SIZE
17478 +ENTRY(cpu_gdt_table)
17479 + .rept NR_CPUS
17480 + .quad 0x0000000000000000 /* NULL descriptor */
17481 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17482 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17483 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17484 + .quad 0x00cffb000000ffff /* __USER32_CS */
17485 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17486 + .quad 0x00affb000000ffff /* __USER_CS */
17487 +
17488 +#ifdef CONFIG_PAX_KERNEXEC
17489 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17490 +#else
17491 + .quad 0x0 /* unused */
17492 +#endif
17493 +
17494 + .quad 0,0 /* TSS */
17495 + .quad 0,0 /* LDT */
17496 + .quad 0,0,0 /* three TLS descriptors */
17497 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17498 + /* asm/segment.h:GDT_ENTRIES must match this */
17499 +
17500 + /* zero the remaining page */
17501 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17502 + .endr
17503 +
17504 .align 16
17505 .globl early_gdt_descr
17506 early_gdt_descr:
17507 .word GDT_ENTRIES*8-1
17508 early_gdt_descr_base:
17509 - .quad INIT_PER_CPU_VAR(gdt_page)
17510 + .quad cpu_gdt_table
17511
17512 ENTRY(phys_base)
17513 /* This must match the first entry in level2_kernel_pgt */
17514 .quad 0x0000000000000000
17515
17516 #include "../../x86/xen/xen-head.S"
17517 -
17518 - .section .bss, "aw", @nobits
17519 +
17520 + .section .rodata,"a",@progbits
17521 .align L1_CACHE_BYTES
17522 ENTRY(idt_table)
17523 - .skip IDT_ENTRIES * 16
17524 + .fill 512,8,0
17525
17526 .align L1_CACHE_BYTES
17527 ENTRY(nmi_idt_table)
17528 - .skip IDT_ENTRIES * 16
17529 + .fill 512,8,0
17530
17531 __PAGE_ALIGNED_BSS
17532 .align PAGE_SIZE
17533 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17534 index 9c3bd4a..e1d9b35 100644
17535 --- a/arch/x86/kernel/i386_ksyms_32.c
17536 +++ b/arch/x86/kernel/i386_ksyms_32.c
17537 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17538 EXPORT_SYMBOL(cmpxchg8b_emu);
17539 #endif
17540
17541 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17542 +
17543 /* Networking helper routines. */
17544 EXPORT_SYMBOL(csum_partial_copy_generic);
17545 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17546 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17547
17548 EXPORT_SYMBOL(__get_user_1);
17549 EXPORT_SYMBOL(__get_user_2);
17550 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17551
17552 EXPORT_SYMBOL(csum_partial);
17553 EXPORT_SYMBOL(empty_zero_page);
17554 +
17555 +#ifdef CONFIG_PAX_KERNEXEC
17556 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17557 +#endif
17558 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17559 index 2d6e649..df6e1af 100644
17560 --- a/arch/x86/kernel/i387.c
17561 +++ b/arch/x86/kernel/i387.c
17562 @@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17563 static inline bool interrupted_user_mode(void)
17564 {
17565 struct pt_regs *regs = get_irq_regs();
17566 - return regs && user_mode_vm(regs);
17567 + return regs && user_mode(regs);
17568 }
17569
17570 /*
17571 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17572 index 36d1853..bf25736 100644
17573 --- a/arch/x86/kernel/i8259.c
17574 +++ b/arch/x86/kernel/i8259.c
17575 @@ -209,7 +209,7 @@ spurious_8259A_irq:
17576 "spurious 8259A interrupt: IRQ%d.\n", irq);
17577 spurious_irq_mask |= irqmask;
17578 }
17579 - atomic_inc(&irq_err_count);
17580 + atomic_inc_unchecked(&irq_err_count);
17581 /*
17582 * Theoretically we do not have to handle this IRQ,
17583 * but in Linux this does not cause problems and is
17584 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17585 index 43e9ccf..44ccf6f 100644
17586 --- a/arch/x86/kernel/init_task.c
17587 +++ b/arch/x86/kernel/init_task.c
17588 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17589 * way process stacks are handled. This is done by having a special
17590 * "init_task" linker map entry..
17591 */
17592 -union thread_union init_thread_union __init_task_data =
17593 - { INIT_THREAD_INFO(init_task) };
17594 +union thread_union init_thread_union __init_task_data;
17595
17596 /*
17597 * Initial task structure.
17598 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17599 * section. Since TSS's are completely CPU-local, we want them
17600 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17601 */
17602 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17603 -
17604 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17605 +EXPORT_SYMBOL(init_tss);
17606 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17607 index 8c96897..be66bfa 100644
17608 --- a/arch/x86/kernel/ioport.c
17609 +++ b/arch/x86/kernel/ioport.c
17610 @@ -6,6 +6,7 @@
17611 #include <linux/sched.h>
17612 #include <linux/kernel.h>
17613 #include <linux/capability.h>
17614 +#include <linux/security.h>
17615 #include <linux/errno.h>
17616 #include <linux/types.h>
17617 #include <linux/ioport.h>
17618 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17619
17620 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17621 return -EINVAL;
17622 +#ifdef CONFIG_GRKERNSEC_IO
17623 + if (turn_on && grsec_disable_privio) {
17624 + gr_handle_ioperm();
17625 + return -EPERM;
17626 + }
17627 +#endif
17628 if (turn_on && !capable(CAP_SYS_RAWIO))
17629 return -EPERM;
17630
17631 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17632 * because the ->io_bitmap_max value must match the bitmap
17633 * contents:
17634 */
17635 - tss = &per_cpu(init_tss, get_cpu());
17636 + tss = init_tss + get_cpu();
17637
17638 if (turn_on)
17639 bitmap_clear(t->io_bitmap_ptr, from, num);
17640 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17641 return -EINVAL;
17642 /* Trying to gain more privileges? */
17643 if (level > old) {
17644 +#ifdef CONFIG_GRKERNSEC_IO
17645 + if (grsec_disable_privio) {
17646 + gr_handle_iopl();
17647 + return -EPERM;
17648 + }
17649 +#endif
17650 if (!capable(CAP_SYS_RAWIO))
17651 return -EPERM;
17652 }
17653 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17654 index 3dafc60..aa8e9c4 100644
17655 --- a/arch/x86/kernel/irq.c
17656 +++ b/arch/x86/kernel/irq.c
17657 @@ -18,7 +18,7 @@
17658 #include <asm/mce.h>
17659 #include <asm/hw_irq.h>
17660
17661 -atomic_t irq_err_count;
17662 +atomic_unchecked_t irq_err_count;
17663
17664 /* Function pointer for generic interrupt vector handling */
17665 void (*x86_platform_ipi_callback)(void) = NULL;
17666 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17667 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17668 seq_printf(p, " Machine check polls\n");
17669 #endif
17670 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17671 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17672 #if defined(CONFIG_X86_IO_APIC)
17673 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17674 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17675 #endif
17676 return 0;
17677 }
17678 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17679
17680 u64 arch_irq_stat(void)
17681 {
17682 - u64 sum = atomic_read(&irq_err_count);
17683 + u64 sum = atomic_read_unchecked(&irq_err_count);
17684
17685 #ifdef CONFIG_X86_IO_APIC
17686 - sum += atomic_read(&irq_mis_count);
17687 + sum += atomic_read_unchecked(&irq_mis_count);
17688 #endif
17689 return sum;
17690 }
17691 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17692 index 58b7f27..e112d08 100644
17693 --- a/arch/x86/kernel/irq_32.c
17694 +++ b/arch/x86/kernel/irq_32.c
17695 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17696 __asm__ __volatile__("andl %%esp,%0" :
17697 "=r" (sp) : "0" (THREAD_SIZE - 1));
17698
17699 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17700 + return sp < STACK_WARN;
17701 }
17702
17703 static void print_stack_overflow(void)
17704 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17705 * per-CPU IRQ handling contexts (thread information and stack)
17706 */
17707 union irq_ctx {
17708 - struct thread_info tinfo;
17709 - u32 stack[THREAD_SIZE/sizeof(u32)];
17710 + unsigned long previous_esp;
17711 + u32 stack[THREAD_SIZE/sizeof(u32)];
17712 } __attribute__((aligned(THREAD_SIZE)));
17713
17714 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17715 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17716 static inline int
17717 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17718 {
17719 - union irq_ctx *curctx, *irqctx;
17720 + union irq_ctx *irqctx;
17721 u32 *isp, arg1, arg2;
17722
17723 - curctx = (union irq_ctx *) current_thread_info();
17724 irqctx = __this_cpu_read(hardirq_ctx);
17725
17726 /*
17727 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17728 * handler) we can't do that and just have to keep using the
17729 * current stack (which is the irq stack already after all)
17730 */
17731 - if (unlikely(curctx == irqctx))
17732 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17733 return 0;
17734
17735 /* build the stack frame on the IRQ stack */
17736 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17737 - irqctx->tinfo.task = curctx->tinfo.task;
17738 - irqctx->tinfo.previous_esp = current_stack_pointer;
17739 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17740 + irqctx->previous_esp = current_stack_pointer;
17741
17742 - /* Copy the preempt_count so that the [soft]irq checks work. */
17743 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17744 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17745 + __set_fs(MAKE_MM_SEG(0));
17746 +#endif
17747
17748 if (unlikely(overflow))
17749 call_on_stack(print_stack_overflow, isp);
17750 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17751 : "0" (irq), "1" (desc), "2" (isp),
17752 "D" (desc->handle_irq)
17753 : "memory", "cc", "ecx");
17754 +
17755 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17756 + __set_fs(current_thread_info()->addr_limit);
17757 +#endif
17758 +
17759 return 1;
17760 }
17761
17762 @@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17763 */
17764 void __cpuinit irq_ctx_init(int cpu)
17765 {
17766 - union irq_ctx *irqctx;
17767 -
17768 if (per_cpu(hardirq_ctx, cpu))
17769 return;
17770
17771 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17772 - THREAD_FLAGS,
17773 - THREAD_ORDER));
17774 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17775 - irqctx->tinfo.cpu = cpu;
17776 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17777 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17778 -
17779 - per_cpu(hardirq_ctx, cpu) = irqctx;
17780 -
17781 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17782 - THREAD_FLAGS,
17783 - THREAD_ORDER));
17784 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17785 - irqctx->tinfo.cpu = cpu;
17786 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17787 -
17788 - per_cpu(softirq_ctx, cpu) = irqctx;
17789 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17790 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17791
17792 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17793 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17794 @@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17795 asmlinkage void do_softirq(void)
17796 {
17797 unsigned long flags;
17798 - struct thread_info *curctx;
17799 union irq_ctx *irqctx;
17800 u32 *isp;
17801
17802 @@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
17803 local_irq_save(flags);
17804
17805 if (local_softirq_pending()) {
17806 - curctx = current_thread_info();
17807 irqctx = __this_cpu_read(softirq_ctx);
17808 - irqctx->tinfo.task = curctx->task;
17809 - irqctx->tinfo.previous_esp = current_stack_pointer;
17810 + irqctx->previous_esp = current_stack_pointer;
17811
17812 /* build the stack frame on the softirq stack */
17813 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17814 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17815 +
17816 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17817 + __set_fs(MAKE_MM_SEG(0));
17818 +#endif
17819
17820 call_on_stack(__do_softirq, isp);
17821 +
17822 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17823 + __set_fs(current_thread_info()->addr_limit);
17824 +#endif
17825 +
17826 /*
17827 * Shouldn't happen, we returned above if in_interrupt():
17828 */
17829 @@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17830 if (unlikely(!desc))
17831 return false;
17832
17833 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17834 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17835 if (unlikely(overflow))
17836 print_stack_overflow();
17837 desc->handle_irq(irq, desc);
17838 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17839 index d04d3ec..ea4b374 100644
17840 --- a/arch/x86/kernel/irq_64.c
17841 +++ b/arch/x86/kernel/irq_64.c
17842 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17843 u64 estack_top, estack_bottom;
17844 u64 curbase = (u64)task_stack_page(current);
17845
17846 - if (user_mode_vm(regs))
17847 + if (user_mode(regs))
17848 return;
17849
17850 if (regs->sp >= curbase + sizeof(struct thread_info) +
17851 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17852 index 1d5d31e..ab846ed 100644
17853 --- a/arch/x86/kernel/kdebugfs.c
17854 +++ b/arch/x86/kernel/kdebugfs.c
17855 @@ -28,6 +28,8 @@ struct setup_data_node {
17856 };
17857
17858 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17859 + size_t count, loff_t *ppos) __size_overflow(3);
17860 +static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17861 size_t count, loff_t *ppos)
17862 {
17863 struct setup_data_node *node = file->private_data;
17864 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17865 index 8bfb614..2b3b35f 100644
17866 --- a/arch/x86/kernel/kgdb.c
17867 +++ b/arch/x86/kernel/kgdb.c
17868 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17869 #ifdef CONFIG_X86_32
17870 switch (regno) {
17871 case GDB_SS:
17872 - if (!user_mode_vm(regs))
17873 + if (!user_mode(regs))
17874 *(unsigned long *)mem = __KERNEL_DS;
17875 break;
17876 case GDB_SP:
17877 - if (!user_mode_vm(regs))
17878 + if (!user_mode(regs))
17879 *(unsigned long *)mem = kernel_stack_pointer(regs);
17880 break;
17881 case GDB_GS:
17882 @@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17883 case 'k':
17884 /* clear the trace bit */
17885 linux_regs->flags &= ~X86_EFLAGS_TF;
17886 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17887 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17888
17889 /* set the trace bit if we're stepping */
17890 if (remcomInBuffer[0] == 's') {
17891 linux_regs->flags |= X86_EFLAGS_TF;
17892 - atomic_set(&kgdb_cpu_doing_single_step,
17893 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17894 raw_smp_processor_id());
17895 }
17896
17897 @@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17898
17899 switch (cmd) {
17900 case DIE_DEBUG:
17901 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17902 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17903 if (user_mode(regs))
17904 return single_step_cont(regs, args);
17905 break;
17906 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17907 index c5e410e..da6aaf9 100644
17908 --- a/arch/x86/kernel/kprobes-opt.c
17909 +++ b/arch/x86/kernel/kprobes-opt.c
17910 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17911 * Verify if the address gap is in 2GB range, because this uses
17912 * a relative jump.
17913 */
17914 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17915 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17916 if (abs(rel) > 0x7fffffff)
17917 return -ERANGE;
17918
17919 @@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17920 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17921
17922 /* Set probe function call */
17923 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17924 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17925
17926 /* Set returning jmp instruction at the tail of out-of-line buffer */
17927 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17928 - (u8 *)op->kp.addr + op->optinsn.size);
17929 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17930
17931 flush_icache_range((unsigned long) buf,
17932 (unsigned long) buf + TMPL_END_IDX +
17933 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17934 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17935
17936 /* Backup instructions which will be replaced by jump address */
17937 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17938 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17939 RELATIVE_ADDR_SIZE);
17940
17941 insn_buf[0] = RELATIVEJUMP_OPCODE;
17942 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17943 index e213fc8..d783ba4 100644
17944 --- a/arch/x86/kernel/kprobes.c
17945 +++ b/arch/x86/kernel/kprobes.c
17946 @@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17947 } __attribute__((packed)) *insn;
17948
17949 insn = (struct __arch_relative_insn *)from;
17950 +
17951 + pax_open_kernel();
17952 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17953 insn->op = op;
17954 + pax_close_kernel();
17955 }
17956
17957 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17958 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17959 kprobe_opcode_t opcode;
17960 kprobe_opcode_t *orig_opcodes = opcodes;
17961
17962 - if (search_exception_tables((unsigned long)opcodes))
17963 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17964 return 0; /* Page fault may occur on this address. */
17965
17966 retry:
17967 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17968 /* Another subsystem puts a breakpoint, failed to recover */
17969 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17970 return 0;
17971 + pax_open_kernel();
17972 memcpy(dest, insn.kaddr, insn.length);
17973 + pax_close_kernel();
17974
17975 #ifdef CONFIG_X86_64
17976 if (insn_rip_relative(&insn)) {
17977 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17978 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17979 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17980 disp = (u8 *) dest + insn_offset_displacement(&insn);
17981 + pax_open_kernel();
17982 *(s32 *) disp = (s32) newdisp;
17983 + pax_close_kernel();
17984 }
17985 #endif
17986 return insn.length;
17987 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17988 * nor set current_kprobe, because it doesn't use single
17989 * stepping.
17990 */
17991 - regs->ip = (unsigned long)p->ainsn.insn;
17992 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17993 preempt_enable_no_resched();
17994 return;
17995 }
17996 @@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17997 if (p->opcode == BREAKPOINT_INSTRUCTION)
17998 regs->ip = (unsigned long)p->addr;
17999 else
18000 - regs->ip = (unsigned long)p->ainsn.insn;
18001 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18002 }
18003
18004 /*
18005 @@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18006 setup_singlestep(p, regs, kcb, 0);
18007 return 1;
18008 }
18009 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
18010 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18011 /*
18012 * The breakpoint instruction was removed right
18013 * after we hit it. Another cpu has removed
18014 @@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18015 " movq %rax, 152(%rsp)\n"
18016 RESTORE_REGS_STRING
18017 " popfq\n"
18018 +#ifdef KERNEXEC_PLUGIN
18019 + " btsq $63,(%rsp)\n"
18020 +#endif
18021 #else
18022 " pushf\n"
18023 SAVE_REGS_STRING
18024 @@ -765,7 +775,7 @@ static void __kprobes
18025 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18026 {
18027 unsigned long *tos = stack_addr(regs);
18028 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18029 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18030 unsigned long orig_ip = (unsigned long)p->addr;
18031 kprobe_opcode_t *insn = p->ainsn.insn;
18032
18033 @@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
18034 struct die_args *args = data;
18035 int ret = NOTIFY_DONE;
18036
18037 - if (args->regs && user_mode_vm(args->regs))
18038 + if (args->regs && user_mode(args->regs))
18039 return ret;
18040
18041 switch (val) {
18042 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18043 index ebc9873..1b9724b 100644
18044 --- a/arch/x86/kernel/ldt.c
18045 +++ b/arch/x86/kernel/ldt.c
18046 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18047 if (reload) {
18048 #ifdef CONFIG_SMP
18049 preempt_disable();
18050 - load_LDT(pc);
18051 + load_LDT_nolock(pc);
18052 if (!cpumask_equal(mm_cpumask(current->mm),
18053 cpumask_of(smp_processor_id())))
18054 smp_call_function(flush_ldt, current->mm, 1);
18055 preempt_enable();
18056 #else
18057 - load_LDT(pc);
18058 + load_LDT_nolock(pc);
18059 #endif
18060 }
18061 if (oldsize) {
18062 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18063 return err;
18064
18065 for (i = 0; i < old->size; i++)
18066 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18067 + write_ldt_entry(new->ldt, i, old->ldt + i);
18068 return 0;
18069 }
18070
18071 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18072 retval = copy_ldt(&mm->context, &old_mm->context);
18073 mutex_unlock(&old_mm->context.lock);
18074 }
18075 +
18076 + if (tsk == current) {
18077 + mm->context.vdso = 0;
18078 +
18079 +#ifdef CONFIG_X86_32
18080 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18081 + mm->context.user_cs_base = 0UL;
18082 + mm->context.user_cs_limit = ~0UL;
18083 +
18084 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18085 + cpus_clear(mm->context.cpu_user_cs_mask);
18086 +#endif
18087 +
18088 +#endif
18089 +#endif
18090 +
18091 + }
18092 +
18093 return retval;
18094 }
18095
18096 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18097 }
18098 }
18099
18100 +#ifdef CONFIG_PAX_SEGMEXEC
18101 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18102 + error = -EINVAL;
18103 + goto out_unlock;
18104 + }
18105 +#endif
18106 +
18107 fill_ldt(&ldt, &ldt_info);
18108 if (oldmode)
18109 ldt.avl = 0;
18110 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18111 index 5b19e4d..6476a76 100644
18112 --- a/arch/x86/kernel/machine_kexec_32.c
18113 +++ b/arch/x86/kernel/machine_kexec_32.c
18114 @@ -26,7 +26,7 @@
18115 #include <asm/cacheflush.h>
18116 #include <asm/debugreg.h>
18117
18118 -static void set_idt(void *newidt, __u16 limit)
18119 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18120 {
18121 struct desc_ptr curidt;
18122
18123 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18124 }
18125
18126
18127 -static void set_gdt(void *newgdt, __u16 limit)
18128 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18129 {
18130 struct desc_ptr curgdt;
18131
18132 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18133 }
18134
18135 control_page = page_address(image->control_code_page);
18136 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18137 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18138
18139 relocate_kernel_ptr = control_page;
18140 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18141 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18142 index 0327e2b..e43737b 100644
18143 --- a/arch/x86/kernel/microcode_intel.c
18144 +++ b/arch/x86/kernel/microcode_intel.c
18145 @@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18146
18147 static int get_ucode_user(void *to, const void *from, size_t n)
18148 {
18149 - return copy_from_user(to, from, n);
18150 + return copy_from_user(to, (const void __force_user *)from, n);
18151 }
18152
18153 static enum ucode_state
18154 request_microcode_user(int cpu, const void __user *buf, size_t size)
18155 {
18156 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18157 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18158 }
18159
18160 static void microcode_fini_cpu(int cpu)
18161 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18162 index f21fd94..61565cd 100644
18163 --- a/arch/x86/kernel/module.c
18164 +++ b/arch/x86/kernel/module.c
18165 @@ -35,15 +35,60 @@
18166 #define DEBUGP(fmt...)
18167 #endif
18168
18169 -void *module_alloc(unsigned long size)
18170 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18171 {
18172 - if (PAGE_ALIGN(size) > MODULES_LEN)
18173 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18174 return NULL;
18175 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18176 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18177 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18178 -1, __builtin_return_address(0));
18179 }
18180
18181 +void *module_alloc(unsigned long size)
18182 +{
18183 +
18184 +#ifdef CONFIG_PAX_KERNEXEC
18185 + return __module_alloc(size, PAGE_KERNEL);
18186 +#else
18187 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18188 +#endif
18189 +
18190 +}
18191 +
18192 +#ifdef CONFIG_PAX_KERNEXEC
18193 +#ifdef CONFIG_X86_32
18194 +void *module_alloc_exec(unsigned long size)
18195 +{
18196 + struct vm_struct *area;
18197 +
18198 + if (size == 0)
18199 + return NULL;
18200 +
18201 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18202 + return area ? area->addr : NULL;
18203 +}
18204 +EXPORT_SYMBOL(module_alloc_exec);
18205 +
18206 +void module_free_exec(struct module *mod, void *module_region)
18207 +{
18208 + vunmap(module_region);
18209 +}
18210 +EXPORT_SYMBOL(module_free_exec);
18211 +#else
18212 +void module_free_exec(struct module *mod, void *module_region)
18213 +{
18214 + module_free(mod, module_region);
18215 +}
18216 +EXPORT_SYMBOL(module_free_exec);
18217 +
18218 +void *module_alloc_exec(unsigned long size)
18219 +{
18220 + return __module_alloc(size, PAGE_KERNEL_RX);
18221 +}
18222 +EXPORT_SYMBOL(module_alloc_exec);
18223 +#endif
18224 +#endif
18225 +
18226 #ifdef CONFIG_X86_32
18227 int apply_relocate(Elf32_Shdr *sechdrs,
18228 const char *strtab,
18229 @@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18230 unsigned int i;
18231 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18232 Elf32_Sym *sym;
18233 - uint32_t *location;
18234 + uint32_t *plocation, location;
18235
18236 DEBUGP("Applying relocate section %u to %u\n", relsec,
18237 sechdrs[relsec].sh_info);
18238 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18239 /* This is where to make the change */
18240 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18241 - + rel[i].r_offset;
18242 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18243 + location = (uint32_t)plocation;
18244 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18245 + plocation = ktla_ktva((void *)plocation);
18246 /* This is the symbol it is referring to. Note that all
18247 undefined symbols have been resolved. */
18248 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18249 @@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18250 switch (ELF32_R_TYPE(rel[i].r_info)) {
18251 case R_386_32:
18252 /* We add the value into the location given */
18253 - *location += sym->st_value;
18254 + pax_open_kernel();
18255 + *plocation += sym->st_value;
18256 + pax_close_kernel();
18257 break;
18258 case R_386_PC32:
18259 /* Add the value, subtract its postition */
18260 - *location += sym->st_value - (uint32_t)location;
18261 + pax_open_kernel();
18262 + *plocation += sym->st_value - location;
18263 + pax_close_kernel();
18264 break;
18265 default:
18266 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18267 @@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18268 case R_X86_64_NONE:
18269 break;
18270 case R_X86_64_64:
18271 + pax_open_kernel();
18272 *(u64 *)loc = val;
18273 + pax_close_kernel();
18274 break;
18275 case R_X86_64_32:
18276 + pax_open_kernel();
18277 *(u32 *)loc = val;
18278 + pax_close_kernel();
18279 if (val != *(u32 *)loc)
18280 goto overflow;
18281 break;
18282 case R_X86_64_32S:
18283 + pax_open_kernel();
18284 *(s32 *)loc = val;
18285 + pax_close_kernel();
18286 if ((s64)val != *(s32 *)loc)
18287 goto overflow;
18288 break;
18289 case R_X86_64_PC32:
18290 val -= (u64)loc;
18291 + pax_open_kernel();
18292 *(u32 *)loc = val;
18293 + pax_close_kernel();
18294 +
18295 #if 0
18296 if ((s64)val != *(s32 *)loc)
18297 goto overflow;
18298 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18299 index 32856fa..ce95eaa 100644
18300 --- a/arch/x86/kernel/nmi.c
18301 +++ b/arch/x86/kernel/nmi.c
18302 @@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
18303 dotraplinkage notrace __kprobes void
18304 do_nmi(struct pt_regs *regs, long error_code)
18305 {
18306 +
18307 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18308 + if (!user_mode(regs)) {
18309 + unsigned long cs = regs->cs & 0xFFFF;
18310 + unsigned long ip = ktva_ktla(regs->ip);
18311 +
18312 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18313 + regs->ip = ip;
18314 + }
18315 +#endif
18316 +
18317 nmi_nesting_preprocess(regs);
18318
18319 nmi_enter();
18320 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18321 index 676b8c7..870ba04 100644
18322 --- a/arch/x86/kernel/paravirt-spinlocks.c
18323 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18324 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18325 arch_spin_lock(lock);
18326 }
18327
18328 -struct pv_lock_ops pv_lock_ops = {
18329 +struct pv_lock_ops pv_lock_ops __read_only = {
18330 #ifdef CONFIG_SMP
18331 .spin_is_locked = __ticket_spin_is_locked,
18332 .spin_is_contended = __ticket_spin_is_contended,
18333 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18334 index ab13760..01218e0 100644
18335 --- a/arch/x86/kernel/paravirt.c
18336 +++ b/arch/x86/kernel/paravirt.c
18337 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18338 {
18339 return x;
18340 }
18341 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18342 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18343 +#endif
18344
18345 void __init default_banner(void)
18346 {
18347 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18348 if (opfunc == NULL)
18349 /* If there's no function, patch it with a ud2a (BUG) */
18350 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18351 - else if (opfunc == _paravirt_nop)
18352 + else if (opfunc == (void *)_paravirt_nop)
18353 /* If the operation is a nop, then nop the callsite */
18354 ret = paravirt_patch_nop();
18355
18356 /* identity functions just return their single argument */
18357 - else if (opfunc == _paravirt_ident_32)
18358 + else if (opfunc == (void *)_paravirt_ident_32)
18359 ret = paravirt_patch_ident_32(insnbuf, len);
18360 - else if (opfunc == _paravirt_ident_64)
18361 + else if (opfunc == (void *)_paravirt_ident_64)
18362 ret = paravirt_patch_ident_64(insnbuf, len);
18363 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18364 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18365 + ret = paravirt_patch_ident_64(insnbuf, len);
18366 +#endif
18367
18368 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18369 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18370 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18371 if (insn_len > len || start == NULL)
18372 insn_len = len;
18373 else
18374 - memcpy(insnbuf, start, insn_len);
18375 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18376
18377 return insn_len;
18378 }
18379 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18380 preempt_enable();
18381 }
18382
18383 -struct pv_info pv_info = {
18384 +struct pv_info pv_info __read_only = {
18385 .name = "bare hardware",
18386 .paravirt_enabled = 0,
18387 .kernel_rpl = 0,
18388 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
18389 #endif
18390 };
18391
18392 -struct pv_init_ops pv_init_ops = {
18393 +struct pv_init_ops pv_init_ops __read_only = {
18394 .patch = native_patch,
18395 };
18396
18397 -struct pv_time_ops pv_time_ops = {
18398 +struct pv_time_ops pv_time_ops __read_only = {
18399 .sched_clock = native_sched_clock,
18400 .steal_clock = native_steal_clock,
18401 };
18402
18403 -struct pv_irq_ops pv_irq_ops = {
18404 +struct pv_irq_ops pv_irq_ops __read_only = {
18405 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18406 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18407 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18408 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18409 #endif
18410 };
18411
18412 -struct pv_cpu_ops pv_cpu_ops = {
18413 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18414 .cpuid = native_cpuid,
18415 .get_debugreg = native_get_debugreg,
18416 .set_debugreg = native_set_debugreg,
18417 @@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18418 .end_context_switch = paravirt_nop,
18419 };
18420
18421 -struct pv_apic_ops pv_apic_ops = {
18422 +struct pv_apic_ops pv_apic_ops __read_only = {
18423 #ifdef CONFIG_X86_LOCAL_APIC
18424 .startup_ipi_hook = paravirt_nop,
18425 #endif
18426 };
18427
18428 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18429 +#ifdef CONFIG_X86_32
18430 +#ifdef CONFIG_X86_PAE
18431 +/* 64-bit pagetable entries */
18432 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18433 +#else
18434 /* 32-bit pagetable entries */
18435 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18436 +#endif
18437 #else
18438 /* 64-bit pagetable entries */
18439 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18440 #endif
18441
18442 -struct pv_mmu_ops pv_mmu_ops = {
18443 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18444
18445 .read_cr2 = native_read_cr2,
18446 .write_cr2 = native_write_cr2,
18447 @@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18448 .make_pud = PTE_IDENT,
18449
18450 .set_pgd = native_set_pgd,
18451 + .set_pgd_batched = native_set_pgd_batched,
18452 #endif
18453 #endif /* PAGETABLE_LEVELS >= 3 */
18454
18455 @@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18456 },
18457
18458 .set_fixmap = native_set_fixmap,
18459 +
18460 +#ifdef CONFIG_PAX_KERNEXEC
18461 + .pax_open_kernel = native_pax_open_kernel,
18462 + .pax_close_kernel = native_pax_close_kernel,
18463 +#endif
18464 +
18465 };
18466
18467 EXPORT_SYMBOL_GPL(pv_time_ops);
18468 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18469 index 35ccf75..7a15747 100644
18470 --- a/arch/x86/kernel/pci-iommu_table.c
18471 +++ b/arch/x86/kernel/pci-iommu_table.c
18472 @@ -2,7 +2,7 @@
18473 #include <asm/iommu_table.h>
18474 #include <linux/string.h>
18475 #include <linux/kallsyms.h>
18476 -
18477 +#include <linux/sched.h>
18478
18479 #define DEBUG 1
18480
18481 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18482 index 1d92a5a..7bc8c29 100644
18483 --- a/arch/x86/kernel/process.c
18484 +++ b/arch/x86/kernel/process.c
18485 @@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
18486
18487 void free_thread_info(struct thread_info *ti)
18488 {
18489 - free_thread_xstate(ti->task);
18490 free_pages((unsigned long)ti, THREAD_ORDER);
18491 }
18492
18493 +static struct kmem_cache *task_struct_cachep;
18494 +
18495 void arch_task_cache_init(void)
18496 {
18497 - task_xstate_cachep =
18498 - kmem_cache_create("task_xstate", xstate_size,
18499 + /* create a slab on which task_structs can be allocated */
18500 + task_struct_cachep =
18501 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18502 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18503 +
18504 + task_xstate_cachep =
18505 + kmem_cache_create("task_xstate", xstate_size,
18506 __alignof__(union thread_xstate),
18507 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18508 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18509 +}
18510 +
18511 +struct task_struct *alloc_task_struct_node(int node)
18512 +{
18513 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18514 +}
18515 +
18516 +void free_task_struct(struct task_struct *task)
18517 +{
18518 + free_thread_xstate(task);
18519 + kmem_cache_free(task_struct_cachep, task);
18520 }
18521
18522 /*
18523 @@ -91,7 +108,7 @@ void exit_thread(void)
18524 unsigned long *bp = t->io_bitmap_ptr;
18525
18526 if (bp) {
18527 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18528 + struct tss_struct *tss = init_tss + get_cpu();
18529
18530 t->io_bitmap_ptr = NULL;
18531 clear_thread_flag(TIF_IO_BITMAP);
18532 @@ -127,7 +144,7 @@ void show_regs_common(void)
18533
18534 printk(KERN_CONT "\n");
18535 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18536 - current->pid, current->comm, print_tainted(),
18537 + task_pid_nr(current), current->comm, print_tainted(),
18538 init_utsname()->release,
18539 (int)strcspn(init_utsname()->version, " "),
18540 init_utsname()->version);
18541 @@ -141,6 +158,9 @@ void flush_thread(void)
18542 {
18543 struct task_struct *tsk = current;
18544
18545 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18546 + loadsegment(gs, 0);
18547 +#endif
18548 flush_ptrace_hw_breakpoint(tsk);
18549 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18550 /*
18551 @@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18552 regs.di = (unsigned long) arg;
18553
18554 #ifdef CONFIG_X86_32
18555 - regs.ds = __USER_DS;
18556 - regs.es = __USER_DS;
18557 + regs.ds = __KERNEL_DS;
18558 + regs.es = __KERNEL_DS;
18559 regs.fs = __KERNEL_PERCPU;
18560 - regs.gs = __KERNEL_STACK_CANARY;
18561 + savesegment(gs, regs.gs);
18562 #else
18563 regs.ss = __KERNEL_DS;
18564 #endif
18565 @@ -392,7 +412,7 @@ static void __exit_idle(void)
18566 void exit_idle(void)
18567 {
18568 /* idle loop has pid 0 */
18569 - if (current->pid)
18570 + if (task_pid_nr(current))
18571 return;
18572 __exit_idle();
18573 }
18574 @@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
18575
18576 return ret;
18577 }
18578 -void stop_this_cpu(void *dummy)
18579 +__noreturn void stop_this_cpu(void *dummy)
18580 {
18581 local_irq_disable();
18582 /*
18583 @@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
18584 }
18585 early_param("idle", idle_setup);
18586
18587 -unsigned long arch_align_stack(unsigned long sp)
18588 +#ifdef CONFIG_PAX_RANDKSTACK
18589 +void pax_randomize_kstack(struct pt_regs *regs)
18590 {
18591 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18592 - sp -= get_random_int() % 8192;
18593 - return sp & ~0xf;
18594 -}
18595 + struct thread_struct *thread = &current->thread;
18596 + unsigned long time;
18597
18598 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18599 -{
18600 - unsigned long range_end = mm->brk + 0x02000000;
18601 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18602 -}
18603 + if (!randomize_va_space)
18604 + return;
18605 +
18606 + if (v8086_mode(regs))
18607 + return;
18608
18609 + rdtscl(time);
18610 +
18611 + /* P4 seems to return a 0 LSB, ignore it */
18612 +#ifdef CONFIG_MPENTIUM4
18613 + time &= 0x3EUL;
18614 + time <<= 2;
18615 +#elif defined(CONFIG_X86_64)
18616 + time &= 0xFUL;
18617 + time <<= 4;
18618 +#else
18619 + time &= 0x1FUL;
18620 + time <<= 3;
18621 +#endif
18622 +
18623 + thread->sp0 ^= time;
18624 + load_sp0(init_tss + smp_processor_id(), thread);
18625 +
18626 +#ifdef CONFIG_X86_64
18627 + percpu_write(kernel_stack, thread->sp0);
18628 +#endif
18629 +}
18630 +#endif
18631 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18632 index ae68473..7b0bb71 100644
18633 --- a/arch/x86/kernel/process_32.c
18634 +++ b/arch/x86/kernel/process_32.c
18635 @@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18636 unsigned long thread_saved_pc(struct task_struct *tsk)
18637 {
18638 return ((unsigned long *)tsk->thread.sp)[3];
18639 +//XXX return tsk->thread.eip;
18640 }
18641
18642 void __show_regs(struct pt_regs *regs, int all)
18643 @@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18644 unsigned long sp;
18645 unsigned short ss, gs;
18646
18647 - if (user_mode_vm(regs)) {
18648 + if (user_mode(regs)) {
18649 sp = regs->sp;
18650 ss = regs->ss & 0xffff;
18651 - gs = get_user_gs(regs);
18652 } else {
18653 sp = kernel_stack_pointer(regs);
18654 savesegment(ss, ss);
18655 - savesegment(gs, gs);
18656 }
18657 + gs = get_user_gs(regs);
18658
18659 show_regs_common();
18660
18661 @@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18662 struct task_struct *tsk;
18663 int err;
18664
18665 - childregs = task_pt_regs(p);
18666 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18667 *childregs = *regs;
18668 childregs->ax = 0;
18669 childregs->sp = sp;
18670
18671 p->thread.sp = (unsigned long) childregs;
18672 p->thread.sp0 = (unsigned long) (childregs+1);
18673 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18674
18675 p->thread.ip = (unsigned long) ret_from_fork;
18676
18677 @@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18678 struct thread_struct *prev = &prev_p->thread,
18679 *next = &next_p->thread;
18680 int cpu = smp_processor_id();
18681 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18682 + struct tss_struct *tss = init_tss + cpu;
18683 fpu_switch_t fpu;
18684
18685 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18686 @@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18687 */
18688 lazy_save_gs(prev->gs);
18689
18690 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18691 + __set_fs(task_thread_info(next_p)->addr_limit);
18692 +#endif
18693 +
18694 /*
18695 * Load the per-thread Thread-Local Storage descriptor.
18696 */
18697 @@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18698 */
18699 arch_end_context_switch(next_p);
18700
18701 + percpu_write(current_task, next_p);
18702 + percpu_write(current_tinfo, &next_p->tinfo);
18703 +
18704 /*
18705 * Restore %gs if needed (which is common)
18706 */
18707 @@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18708
18709 switch_fpu_finish(next_p, fpu);
18710
18711 - percpu_write(current_task, next_p);
18712 -
18713 return prev_p;
18714 }
18715
18716 @@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
18717 } while (count++ < 16);
18718 return 0;
18719 }
18720 -
18721 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18722 index 43d8b48..c45d566 100644
18723 --- a/arch/x86/kernel/process_64.c
18724 +++ b/arch/x86/kernel/process_64.c
18725 @@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18726 struct pt_regs *childregs;
18727 struct task_struct *me = current;
18728
18729 - childregs = ((struct pt_regs *)
18730 - (THREAD_SIZE + task_stack_page(p))) - 1;
18731 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18732 *childregs = *regs;
18733
18734 childregs->ax = 0;
18735 @@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18736 p->thread.sp = (unsigned long) childregs;
18737 p->thread.sp0 = (unsigned long) (childregs+1);
18738 p->thread.usersp = me->thread.usersp;
18739 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18740
18741 set_tsk_thread_flag(p, TIF_FORK);
18742
18743 @@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18744 struct thread_struct *prev = &prev_p->thread;
18745 struct thread_struct *next = &next_p->thread;
18746 int cpu = smp_processor_id();
18747 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18748 + struct tss_struct *tss = init_tss + cpu;
18749 unsigned fsindex, gsindex;
18750 fpu_switch_t fpu;
18751
18752 @@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18753 prev->usersp = percpu_read(old_rsp);
18754 percpu_write(old_rsp, next->usersp);
18755 percpu_write(current_task, next_p);
18756 + percpu_write(current_tinfo, &next_p->tinfo);
18757
18758 - percpu_write(kernel_stack,
18759 - (unsigned long)task_stack_page(next_p) +
18760 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18761 + percpu_write(kernel_stack, next->sp0);
18762
18763 /*
18764 * Now maybe reload the debug registers and handle I/O bitmaps
18765 @@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
18766 if (!p || p == current || p->state == TASK_RUNNING)
18767 return 0;
18768 stack = (unsigned long)task_stack_page(p);
18769 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18770 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18771 return 0;
18772 fp = *(u64 *)(p->thread.sp);
18773 do {
18774 - if (fp < (unsigned long)stack ||
18775 - fp >= (unsigned long)stack+THREAD_SIZE)
18776 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18777 return 0;
18778 ip = *(u64 *)(fp+8);
18779 if (!in_sched_functions(ip))
18780 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18781 index cf11783..e7ce551 100644
18782 --- a/arch/x86/kernel/ptrace.c
18783 +++ b/arch/x86/kernel/ptrace.c
18784 @@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18785 unsigned long addr, unsigned long data)
18786 {
18787 int ret;
18788 - unsigned long __user *datap = (unsigned long __user *)data;
18789 + unsigned long __user *datap = (__force unsigned long __user *)data;
18790
18791 switch (request) {
18792 /* read the word at location addr in the USER area. */
18793 @@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18794 if ((int) addr < 0)
18795 return -EIO;
18796 ret = do_get_thread_area(child, addr,
18797 - (struct user_desc __user *)data);
18798 + (__force struct user_desc __user *) data);
18799 break;
18800
18801 case PTRACE_SET_THREAD_AREA:
18802 if ((int) addr < 0)
18803 return -EIO;
18804 ret = do_set_thread_area(child, addr,
18805 - (struct user_desc __user *)data, 0);
18806 + (__force struct user_desc __user *) data, 0);
18807 break;
18808 #endif
18809
18810 @@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18811 memset(info, 0, sizeof(*info));
18812 info->si_signo = SIGTRAP;
18813 info->si_code = si_code;
18814 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18815 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18816 }
18817
18818 void user_single_step_siginfo(struct task_struct *tsk,
18819 @@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18820 # define IS_IA32 0
18821 #endif
18822
18823 +#ifdef CONFIG_GRKERNSEC_SETXID
18824 +extern void gr_delayed_cred_worker(void);
18825 +#endif
18826 +
18827 /*
18828 * We must return the syscall number to actually look up in the table.
18829 * This can be -1L to skip running any syscall at all.
18830 @@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18831 {
18832 long ret = 0;
18833
18834 +#ifdef CONFIG_GRKERNSEC_SETXID
18835 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18836 + gr_delayed_cred_worker();
18837 +#endif
18838 +
18839 /*
18840 * If we stepped into a sysenter/syscall insn, it trapped in
18841 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18842 @@ -1506,6 +1515,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18843 {
18844 bool step;
18845
18846 +#ifdef CONFIG_GRKERNSEC_SETXID
18847 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18848 + gr_delayed_cred_worker();
18849 +#endif
18850 +
18851 audit_syscall_exit(regs);
18852
18853 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18854 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18855 index 42eb330..139955c 100644
18856 --- a/arch/x86/kernel/pvclock.c
18857 +++ b/arch/x86/kernel/pvclock.c
18858 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18859 return pv_tsc_khz;
18860 }
18861
18862 -static atomic64_t last_value = ATOMIC64_INIT(0);
18863 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18864
18865 void pvclock_resume(void)
18866 {
18867 - atomic64_set(&last_value, 0);
18868 + atomic64_set_unchecked(&last_value, 0);
18869 }
18870
18871 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18872 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18873 * updating at the same time, and one of them could be slightly behind,
18874 * making the assumption that last_value always go forward fail to hold.
18875 */
18876 - last = atomic64_read(&last_value);
18877 + last = atomic64_read_unchecked(&last_value);
18878 do {
18879 if (ret < last)
18880 return last;
18881 - last = atomic64_cmpxchg(&last_value, last, ret);
18882 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18883 } while (unlikely(last != ret));
18884
18885 return ret;
18886 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18887 index 3034ee5..7cfbfa6 100644
18888 --- a/arch/x86/kernel/reboot.c
18889 +++ b/arch/x86/kernel/reboot.c
18890 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18891 EXPORT_SYMBOL(pm_power_off);
18892
18893 static const struct desc_ptr no_idt = {};
18894 -static int reboot_mode;
18895 +static unsigned short reboot_mode;
18896 enum reboot_type reboot_type = BOOT_ACPI;
18897 int reboot_force;
18898
18899 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18900 extern const unsigned char machine_real_restart_asm[];
18901 extern const u64 machine_real_restart_gdt[3];
18902
18903 -void machine_real_restart(unsigned int type)
18904 +__noreturn void machine_real_restart(unsigned int type)
18905 {
18906 void *restart_va;
18907 unsigned long restart_pa;
18908 - void (*restart_lowmem)(unsigned int);
18909 + void (* __noreturn restart_lowmem)(unsigned int);
18910 u64 *lowmem_gdt;
18911
18912 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18913 + struct desc_struct *gdt;
18914 +#endif
18915 +
18916 local_irq_disable();
18917
18918 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18919 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18920 boot)". This seems like a fairly standard thing that gets set by
18921 REBOOT.COM programs, and the previous reset routine did this
18922 too. */
18923 - *((unsigned short *)0x472) = reboot_mode;
18924 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18925
18926 /* Patch the GDT in the low memory trampoline */
18927 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18928
18929 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18930 restart_pa = virt_to_phys(restart_va);
18931 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18932 + restart_lowmem = (void *)restart_pa;
18933
18934 /* GDT[0]: GDT self-pointer */
18935 lowmem_gdt[0] =
18936 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18937 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18938
18939 /* Jump to the identity-mapped low memory code */
18940 +
18941 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18942 + gdt = get_cpu_gdt_table(smp_processor_id());
18943 + pax_open_kernel();
18944 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18945 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18946 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18947 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18948 +#endif
18949 +#ifdef CONFIG_PAX_KERNEXEC
18950 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18951 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18952 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18953 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18954 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18955 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18956 +#endif
18957 + pax_close_kernel();
18958 +#endif
18959 +
18960 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18961 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18962 + unreachable();
18963 +#else
18964 restart_lowmem(type);
18965 +#endif
18966 +
18967 }
18968 #ifdef CONFIG_APM_MODULE
18969 EXPORT_SYMBOL(machine_real_restart);
18970 @@ -564,7 +594,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18971 * try to force a triple fault and then cycle between hitting the keyboard
18972 * controller and doing that
18973 */
18974 -static void native_machine_emergency_restart(void)
18975 +__noreturn static void native_machine_emergency_restart(void)
18976 {
18977 int i;
18978 int attempt = 0;
18979 @@ -688,13 +718,13 @@ void native_machine_shutdown(void)
18980 #endif
18981 }
18982
18983 -static void __machine_emergency_restart(int emergency)
18984 +static __noreturn void __machine_emergency_restart(int emergency)
18985 {
18986 reboot_emergency = emergency;
18987 machine_ops.emergency_restart();
18988 }
18989
18990 -static void native_machine_restart(char *__unused)
18991 +static __noreturn void native_machine_restart(char *__unused)
18992 {
18993 printk("machine restart\n");
18994
18995 @@ -703,7 +733,7 @@ static void native_machine_restart(char *__unused)
18996 __machine_emergency_restart(0);
18997 }
18998
18999 -static void native_machine_halt(void)
19000 +static __noreturn void native_machine_halt(void)
19001 {
19002 /* stop other cpus and apics */
19003 machine_shutdown();
19004 @@ -714,7 +744,7 @@ static void native_machine_halt(void)
19005 stop_this_cpu(NULL);
19006 }
19007
19008 -static void native_machine_power_off(void)
19009 +__noreturn static void native_machine_power_off(void)
19010 {
19011 if (pm_power_off) {
19012 if (!reboot_force)
19013 @@ -723,6 +753,7 @@ static void native_machine_power_off(void)
19014 }
19015 /* a fallback in case there is no PM info available */
19016 tboot_shutdown(TB_SHUTDOWN_HALT);
19017 + unreachable();
19018 }
19019
19020 struct machine_ops machine_ops = {
19021 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19022 index 7a6f3b3..bed145d7 100644
19023 --- a/arch/x86/kernel/relocate_kernel_64.S
19024 +++ b/arch/x86/kernel/relocate_kernel_64.S
19025 @@ -11,6 +11,7 @@
19026 #include <asm/kexec.h>
19027 #include <asm/processor-flags.h>
19028 #include <asm/pgtable_types.h>
19029 +#include <asm/alternative-asm.h>
19030
19031 /*
19032 * Must be relocatable PIC code callable as a C function
19033 @@ -160,13 +161,14 @@ identity_mapped:
19034 xorq %rbp, %rbp
19035 xorq %r8, %r8
19036 xorq %r9, %r9
19037 - xorq %r10, %r9
19038 + xorq %r10, %r10
19039 xorq %r11, %r11
19040 xorq %r12, %r12
19041 xorq %r13, %r13
19042 xorq %r14, %r14
19043 xorq %r15, %r15
19044
19045 + pax_force_retaddr 0, 1
19046 ret
19047
19048 1:
19049 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19050 index 1a29015..712f324 100644
19051 --- a/arch/x86/kernel/setup.c
19052 +++ b/arch/x86/kernel/setup.c
19053 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
19054
19055 switch (data->type) {
19056 case SETUP_E820_EXT:
19057 - parse_e820_ext(data);
19058 + parse_e820_ext((struct setup_data __force_kernel *)data);
19059 break;
19060 case SETUP_DTB:
19061 add_dtb(pa_data);
19062 @@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
19063 * area (640->1Mb) as ram even though it is not.
19064 * take them out.
19065 */
19066 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19067 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19068 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19069 }
19070
19071 @@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
19072
19073 if (!boot_params.hdr.root_flags)
19074 root_mountflags &= ~MS_RDONLY;
19075 - init_mm.start_code = (unsigned long) _text;
19076 - init_mm.end_code = (unsigned long) _etext;
19077 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19078 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19079 init_mm.end_data = (unsigned long) _edata;
19080 init_mm.brk = _brk_end;
19081
19082 - code_resource.start = virt_to_phys(_text);
19083 - code_resource.end = virt_to_phys(_etext)-1;
19084 - data_resource.start = virt_to_phys(_etext);
19085 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19086 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19087 + data_resource.start = virt_to_phys(_sdata);
19088 data_resource.end = virt_to_phys(_edata)-1;
19089 bss_resource.start = virt_to_phys(&__bss_start);
19090 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19091 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19092 index 5a98aa2..2f9288d 100644
19093 --- a/arch/x86/kernel/setup_percpu.c
19094 +++ b/arch/x86/kernel/setup_percpu.c
19095 @@ -21,19 +21,17 @@
19096 #include <asm/cpu.h>
19097 #include <asm/stackprotector.h>
19098
19099 -DEFINE_PER_CPU(int, cpu_number);
19100 +#ifdef CONFIG_SMP
19101 +DEFINE_PER_CPU(unsigned int, cpu_number);
19102 EXPORT_PER_CPU_SYMBOL(cpu_number);
19103 +#endif
19104
19105 -#ifdef CONFIG_X86_64
19106 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19107 -#else
19108 -#define BOOT_PERCPU_OFFSET 0
19109 -#endif
19110
19111 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19112 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19113
19114 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19115 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19116 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19117 };
19118 EXPORT_SYMBOL(__per_cpu_offset);
19119 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19120 {
19121 #ifdef CONFIG_X86_32
19122 struct desc_struct gdt;
19123 + unsigned long base = per_cpu_offset(cpu);
19124
19125 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19126 - 0x2 | DESCTYPE_S, 0x8);
19127 - gdt.s = 1;
19128 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19129 + 0x83 | DESCTYPE_S, 0xC);
19130 write_gdt_entry(get_cpu_gdt_table(cpu),
19131 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19132 #endif
19133 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19134 /* alrighty, percpu areas up and running */
19135 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19136 for_each_possible_cpu(cpu) {
19137 +#ifdef CONFIG_CC_STACKPROTECTOR
19138 +#ifdef CONFIG_X86_32
19139 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19140 +#endif
19141 +#endif
19142 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19143 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19144 per_cpu(cpu_number, cpu) = cpu;
19145 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19146 */
19147 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19148 #endif
19149 +#ifdef CONFIG_CC_STACKPROTECTOR
19150 +#ifdef CONFIG_X86_32
19151 + if (!cpu)
19152 + per_cpu(stack_canary.canary, cpu) = canary;
19153 +#endif
19154 +#endif
19155 /*
19156 * Up to this point, the boot CPU has been using .init.data
19157 * area. Reload any changed state for the boot CPU.
19158 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19159 index 115eac4..c0591d5 100644
19160 --- a/arch/x86/kernel/signal.c
19161 +++ b/arch/x86/kernel/signal.c
19162 @@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
19163 * Align the stack pointer according to the i386 ABI,
19164 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19165 */
19166 - sp = ((sp + 4) & -16ul) - 4;
19167 + sp = ((sp - 12) & -16ul) - 4;
19168 #else /* !CONFIG_X86_32 */
19169 sp = round_down(sp, 16) - 8;
19170 #endif
19171 @@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19172 * Return an always-bogus address instead so we will die with SIGSEGV.
19173 */
19174 if (onsigstack && !likely(on_sig_stack(sp)))
19175 - return (void __user *)-1L;
19176 + return (__force void __user *)-1L;
19177
19178 /* save i387 state */
19179 if (used_math() && save_i387_xstate(*fpstate) < 0)
19180 - return (void __user *)-1L;
19181 + return (__force void __user *)-1L;
19182
19183 return (void __user *)sp;
19184 }
19185 @@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19186 }
19187
19188 if (current->mm->context.vdso)
19189 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19190 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19191 else
19192 - restorer = &frame->retcode;
19193 + restorer = (void __user *)&frame->retcode;
19194 if (ka->sa.sa_flags & SA_RESTORER)
19195 restorer = ka->sa.sa_restorer;
19196
19197 @@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19198 * reasons and because gdb uses it as a signature to notice
19199 * signal handler stack frames.
19200 */
19201 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19202 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19203
19204 if (err)
19205 return -EFAULT;
19206 @@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19207 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19208
19209 /* Set up to return from userspace. */
19210 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19211 + if (current->mm->context.vdso)
19212 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19213 + else
19214 + restorer = (void __user *)&frame->retcode;
19215 if (ka->sa.sa_flags & SA_RESTORER)
19216 restorer = ka->sa.sa_restorer;
19217 put_user_ex(restorer, &frame->pretcode);
19218 @@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19219 * reasons and because gdb uses it as a signature to notice
19220 * signal handler stack frames.
19221 */
19222 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19223 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19224 } put_user_catch(err);
19225
19226 if (err)
19227 @@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
19228 * X86_32: vm86 regs switched out by assembly code before reaching
19229 * here, so testing against kernel CS suffices.
19230 */
19231 - if (!user_mode(regs))
19232 + if (!user_mode_novm(regs))
19233 return;
19234
19235 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19236 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19237 index 6e1e406..edfb7cb 100644
19238 --- a/arch/x86/kernel/smpboot.c
19239 +++ b/arch/x86/kernel/smpboot.c
19240 @@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19241 set_idle_for_cpu(cpu, c_idle.idle);
19242 do_rest:
19243 per_cpu(current_task, cpu) = c_idle.idle;
19244 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19245 #ifdef CONFIG_X86_32
19246 /* Stack for startup_32 can be just as for start_secondary onwards */
19247 irq_ctx_init(cpu);
19248 #else
19249 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19250 initial_gs = per_cpu_offset(cpu);
19251 - per_cpu(kernel_stack, cpu) =
19252 - (unsigned long)task_stack_page(c_idle.idle) -
19253 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19254 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19255 #endif
19256 +
19257 + pax_open_kernel();
19258 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19259 + pax_close_kernel();
19260 +
19261 initial_code = (unsigned long)start_secondary;
19262 stack_start = c_idle.idle->thread.sp;
19263
19264 @@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19265
19266 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19267
19268 +#ifdef CONFIG_PAX_PER_CPU_PGD
19269 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19270 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19271 + KERNEL_PGD_PTRS);
19272 +#endif
19273 +
19274 err = do_boot_cpu(apicid, cpu);
19275 if (err) {
19276 pr_debug("do_boot_cpu failed %d\n", err);
19277 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19278 index c346d11..d43b163 100644
19279 --- a/arch/x86/kernel/step.c
19280 +++ b/arch/x86/kernel/step.c
19281 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19282 struct desc_struct *desc;
19283 unsigned long base;
19284
19285 - seg &= ~7UL;
19286 + seg >>= 3;
19287
19288 mutex_lock(&child->mm->context.lock);
19289 - if (unlikely((seg >> 3) >= child->mm->context.size))
19290 + if (unlikely(seg >= child->mm->context.size))
19291 addr = -1L; /* bogus selector, access would fault */
19292 else {
19293 desc = child->mm->context.ldt + seg;
19294 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19295 addr += base;
19296 }
19297 mutex_unlock(&child->mm->context.lock);
19298 - }
19299 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19300 + addr = ktla_ktva(addr);
19301
19302 return addr;
19303 }
19304 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19305 unsigned char opcode[15];
19306 unsigned long addr = convert_ip_to_linear(child, regs);
19307
19308 + if (addr == -EINVAL)
19309 + return 0;
19310 +
19311 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19312 for (i = 0; i < copied; i++) {
19313 switch (opcode[i]) {
19314 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19315 index 0b0cb5f..db6b9ed 100644
19316 --- a/arch/x86/kernel/sys_i386_32.c
19317 +++ b/arch/x86/kernel/sys_i386_32.c
19318 @@ -24,17 +24,224 @@
19319
19320 #include <asm/syscalls.h>
19321
19322 -/*
19323 - * Do a system call from kernel instead of calling sys_execve so we
19324 - * end up with proper pt_regs.
19325 - */
19326 -int kernel_execve(const char *filename,
19327 - const char *const argv[],
19328 - const char *const envp[])
19329 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19330 {
19331 - long __res;
19332 - asm volatile ("int $0x80"
19333 - : "=a" (__res)
19334 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19335 - return __res;
19336 + unsigned long pax_task_size = TASK_SIZE;
19337 +
19338 +#ifdef CONFIG_PAX_SEGMEXEC
19339 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19340 + pax_task_size = SEGMEXEC_TASK_SIZE;
19341 +#endif
19342 +
19343 + if (len > pax_task_size || addr > pax_task_size - len)
19344 + return -EINVAL;
19345 +
19346 + return 0;
19347 +}
19348 +
19349 +unsigned long
19350 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19351 + unsigned long len, unsigned long pgoff, unsigned long flags)
19352 +{
19353 + struct mm_struct *mm = current->mm;
19354 + struct vm_area_struct *vma;
19355 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19356 +
19357 +#ifdef CONFIG_PAX_SEGMEXEC
19358 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19359 + pax_task_size = SEGMEXEC_TASK_SIZE;
19360 +#endif
19361 +
19362 + pax_task_size -= PAGE_SIZE;
19363 +
19364 + if (len > pax_task_size)
19365 + return -ENOMEM;
19366 +
19367 + if (flags & MAP_FIXED)
19368 + return addr;
19369 +
19370 +#ifdef CONFIG_PAX_RANDMMAP
19371 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19372 +#endif
19373 +
19374 + if (addr) {
19375 + addr = PAGE_ALIGN(addr);
19376 + if (pax_task_size - len >= addr) {
19377 + vma = find_vma(mm, addr);
19378 + if (check_heap_stack_gap(vma, addr, len))
19379 + return addr;
19380 + }
19381 + }
19382 + if (len > mm->cached_hole_size) {
19383 + start_addr = addr = mm->free_area_cache;
19384 + } else {
19385 + start_addr = addr = mm->mmap_base;
19386 + mm->cached_hole_size = 0;
19387 + }
19388 +
19389 +#ifdef CONFIG_PAX_PAGEEXEC
19390 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19391 + start_addr = 0x00110000UL;
19392 +
19393 +#ifdef CONFIG_PAX_RANDMMAP
19394 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19395 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19396 +#endif
19397 +
19398 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19399 + start_addr = addr = mm->mmap_base;
19400 + else
19401 + addr = start_addr;
19402 + }
19403 +#endif
19404 +
19405 +full_search:
19406 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19407 + /* At this point: (!vma || addr < vma->vm_end). */
19408 + if (pax_task_size - len < addr) {
19409 + /*
19410 + * Start a new search - just in case we missed
19411 + * some holes.
19412 + */
19413 + if (start_addr != mm->mmap_base) {
19414 + start_addr = addr = mm->mmap_base;
19415 + mm->cached_hole_size = 0;
19416 + goto full_search;
19417 + }
19418 + return -ENOMEM;
19419 + }
19420 + if (check_heap_stack_gap(vma, addr, len))
19421 + break;
19422 + if (addr + mm->cached_hole_size < vma->vm_start)
19423 + mm->cached_hole_size = vma->vm_start - addr;
19424 + addr = vma->vm_end;
19425 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19426 + start_addr = addr = mm->mmap_base;
19427 + mm->cached_hole_size = 0;
19428 + goto full_search;
19429 + }
19430 + }
19431 +
19432 + /*
19433 + * Remember the place where we stopped the search:
19434 + */
19435 + mm->free_area_cache = addr + len;
19436 + return addr;
19437 +}
19438 +
19439 +unsigned long
19440 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19441 + const unsigned long len, const unsigned long pgoff,
19442 + const unsigned long flags)
19443 +{
19444 + struct vm_area_struct *vma;
19445 + struct mm_struct *mm = current->mm;
19446 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19447 +
19448 +#ifdef CONFIG_PAX_SEGMEXEC
19449 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19450 + pax_task_size = SEGMEXEC_TASK_SIZE;
19451 +#endif
19452 +
19453 + pax_task_size -= PAGE_SIZE;
19454 +
19455 + /* requested length too big for entire address space */
19456 + if (len > pax_task_size)
19457 + return -ENOMEM;
19458 +
19459 + if (flags & MAP_FIXED)
19460 + return addr;
19461 +
19462 +#ifdef CONFIG_PAX_PAGEEXEC
19463 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19464 + goto bottomup;
19465 +#endif
19466 +
19467 +#ifdef CONFIG_PAX_RANDMMAP
19468 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19469 +#endif
19470 +
19471 + /* requesting a specific address */
19472 + if (addr) {
19473 + addr = PAGE_ALIGN(addr);
19474 + if (pax_task_size - len >= addr) {
19475 + vma = find_vma(mm, addr);
19476 + if (check_heap_stack_gap(vma, addr, len))
19477 + return addr;
19478 + }
19479 + }
19480 +
19481 + /* check if free_area_cache is useful for us */
19482 + if (len <= mm->cached_hole_size) {
19483 + mm->cached_hole_size = 0;
19484 + mm->free_area_cache = mm->mmap_base;
19485 + }
19486 +
19487 + /* either no address requested or can't fit in requested address hole */
19488 + addr = mm->free_area_cache;
19489 +
19490 + /* make sure it can fit in the remaining address space */
19491 + if (addr > len) {
19492 + vma = find_vma(mm, addr-len);
19493 + if (check_heap_stack_gap(vma, addr - len, len))
19494 + /* remember the address as a hint for next time */
19495 + return (mm->free_area_cache = addr-len);
19496 + }
19497 +
19498 + if (mm->mmap_base < len)
19499 + goto bottomup;
19500 +
19501 + addr = mm->mmap_base-len;
19502 +
19503 + do {
19504 + /*
19505 + * Lookup failure means no vma is above this address,
19506 + * else if new region fits below vma->vm_start,
19507 + * return with success:
19508 + */
19509 + vma = find_vma(mm, addr);
19510 + if (check_heap_stack_gap(vma, addr, len))
19511 + /* remember the address as a hint for next time */
19512 + return (mm->free_area_cache = addr);
19513 +
19514 + /* remember the largest hole we saw so far */
19515 + if (addr + mm->cached_hole_size < vma->vm_start)
19516 + mm->cached_hole_size = vma->vm_start - addr;
19517 +
19518 + /* try just below the current vma->vm_start */
19519 + addr = skip_heap_stack_gap(vma, len);
19520 + } while (!IS_ERR_VALUE(addr));
19521 +
19522 +bottomup:
19523 + /*
19524 + * A failed mmap() very likely causes application failure,
19525 + * so fall back to the bottom-up function here. This scenario
19526 + * can happen with large stack limits and large mmap()
19527 + * allocations.
19528 + */
19529 +
19530 +#ifdef CONFIG_PAX_SEGMEXEC
19531 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19532 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19533 + else
19534 +#endif
19535 +
19536 + mm->mmap_base = TASK_UNMAPPED_BASE;
19537 +
19538 +#ifdef CONFIG_PAX_RANDMMAP
19539 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19540 + mm->mmap_base += mm->delta_mmap;
19541 +#endif
19542 +
19543 + mm->free_area_cache = mm->mmap_base;
19544 + mm->cached_hole_size = ~0UL;
19545 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19546 + /*
19547 + * Restore the topdown base:
19548 + */
19549 + mm->mmap_base = base;
19550 + mm->free_area_cache = base;
19551 + mm->cached_hole_size = ~0UL;
19552 +
19553 + return addr;
19554 }
19555 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19556 index b4d3c39..82bb73b 100644
19557 --- a/arch/x86/kernel/sys_x86_64.c
19558 +++ b/arch/x86/kernel/sys_x86_64.c
19559 @@ -95,8 +95,8 @@ out:
19560 return error;
19561 }
19562
19563 -static void find_start_end(unsigned long flags, unsigned long *begin,
19564 - unsigned long *end)
19565 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19566 + unsigned long *begin, unsigned long *end)
19567 {
19568 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19569 unsigned long new_begin;
19570 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19571 *begin = new_begin;
19572 }
19573 } else {
19574 - *begin = TASK_UNMAPPED_BASE;
19575 + *begin = mm->mmap_base;
19576 *end = TASK_SIZE;
19577 }
19578 }
19579 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19580 if (flags & MAP_FIXED)
19581 return addr;
19582
19583 - find_start_end(flags, &begin, &end);
19584 + find_start_end(mm, flags, &begin, &end);
19585
19586 if (len > end)
19587 return -ENOMEM;
19588
19589 +#ifdef CONFIG_PAX_RANDMMAP
19590 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19591 +#endif
19592 +
19593 if (addr) {
19594 addr = PAGE_ALIGN(addr);
19595 vma = find_vma(mm, addr);
19596 - if (end - len >= addr &&
19597 - (!vma || addr + len <= vma->vm_start))
19598 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19599 return addr;
19600 }
19601 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19602 @@ -172,7 +175,7 @@ full_search:
19603 }
19604 return -ENOMEM;
19605 }
19606 - if (!vma || addr + len <= vma->vm_start) {
19607 + if (check_heap_stack_gap(vma, addr, len)) {
19608 /*
19609 * Remember the place where we stopped the search:
19610 */
19611 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19612 {
19613 struct vm_area_struct *vma;
19614 struct mm_struct *mm = current->mm;
19615 - unsigned long addr = addr0, start_addr;
19616 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19617
19618 /* requested length too big for entire address space */
19619 if (len > TASK_SIZE)
19620 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19621 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19622 goto bottomup;
19623
19624 +#ifdef CONFIG_PAX_RANDMMAP
19625 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19626 +#endif
19627 +
19628 /* requesting a specific address */
19629 if (addr) {
19630 addr = PAGE_ALIGN(addr);
19631 - vma = find_vma(mm, addr);
19632 - if (TASK_SIZE - len >= addr &&
19633 - (!vma || addr + len <= vma->vm_start))
19634 - return addr;
19635 + if (TASK_SIZE - len >= addr) {
19636 + vma = find_vma(mm, addr);
19637 + if (check_heap_stack_gap(vma, addr, len))
19638 + return addr;
19639 + }
19640 }
19641
19642 /* check if free_area_cache is useful for us */
19643 @@ -240,7 +248,7 @@ try_again:
19644 * return with success:
19645 */
19646 vma = find_vma(mm, addr);
19647 - if (!vma || addr+len <= vma->vm_start)
19648 + if (check_heap_stack_gap(vma, addr, len))
19649 /* remember the address as a hint for next time */
19650 return mm->free_area_cache = addr;
19651
19652 @@ -249,8 +257,8 @@ try_again:
19653 mm->cached_hole_size = vma->vm_start - addr;
19654
19655 /* try just below the current vma->vm_start */
19656 - addr = vma->vm_start-len;
19657 - } while (len < vma->vm_start);
19658 + addr = skip_heap_stack_gap(vma, len);
19659 + } while (!IS_ERR_VALUE(addr));
19660
19661 fail:
19662 /*
19663 @@ -270,13 +278,21 @@ bottomup:
19664 * can happen with large stack limits and large mmap()
19665 * allocations.
19666 */
19667 + mm->mmap_base = TASK_UNMAPPED_BASE;
19668 +
19669 +#ifdef CONFIG_PAX_RANDMMAP
19670 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19671 + mm->mmap_base += mm->delta_mmap;
19672 +#endif
19673 +
19674 + mm->free_area_cache = mm->mmap_base;
19675 mm->cached_hole_size = ~0UL;
19676 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19677 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19678 /*
19679 * Restore the topdown base:
19680 */
19681 - mm->free_area_cache = mm->mmap_base;
19682 + mm->mmap_base = base;
19683 + mm->free_area_cache = base;
19684 mm->cached_hole_size = ~0UL;
19685
19686 return addr;
19687 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19688 index 6410744..79758f0 100644
19689 --- a/arch/x86/kernel/tboot.c
19690 +++ b/arch/x86/kernel/tboot.c
19691 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19692
19693 void tboot_shutdown(u32 shutdown_type)
19694 {
19695 - void (*shutdown)(void);
19696 + void (* __noreturn shutdown)(void);
19697
19698 if (!tboot_enabled())
19699 return;
19700 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19701
19702 switch_to_tboot_pt();
19703
19704 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19705 + shutdown = (void *)tboot->shutdown_entry;
19706 shutdown();
19707
19708 /* should not reach here */
19709 @@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19710 return 0;
19711 }
19712
19713 -static atomic_t ap_wfs_count;
19714 +static atomic_unchecked_t ap_wfs_count;
19715
19716 static int tboot_wait_for_aps(int num_aps)
19717 {
19718 @@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19719 {
19720 switch (action) {
19721 case CPU_DYING:
19722 - atomic_inc(&ap_wfs_count);
19723 + atomic_inc_unchecked(&ap_wfs_count);
19724 if (num_online_cpus() == 1)
19725 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19726 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19727 return NOTIFY_BAD;
19728 break;
19729 }
19730 @@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
19731
19732 tboot_create_trampoline();
19733
19734 - atomic_set(&ap_wfs_count, 0);
19735 + atomic_set_unchecked(&ap_wfs_count, 0);
19736 register_hotcpu_notifier(&tboot_cpu_notifier);
19737
19738 acpi_os_set_prepare_sleep(&tboot_sleep);
19739 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19740 index c6eba2b..3303326 100644
19741 --- a/arch/x86/kernel/time.c
19742 +++ b/arch/x86/kernel/time.c
19743 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19744 {
19745 unsigned long pc = instruction_pointer(regs);
19746
19747 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19748 + if (!user_mode(regs) && in_lock_functions(pc)) {
19749 #ifdef CONFIG_FRAME_POINTER
19750 - return *(unsigned long *)(regs->bp + sizeof(long));
19751 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19752 #else
19753 unsigned long *sp =
19754 (unsigned long *)kernel_stack_pointer(regs);
19755 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19756 * or above a saved flags. Eflags has bits 22-31 zero,
19757 * kernel addresses don't.
19758 */
19759 +
19760 +#ifdef CONFIG_PAX_KERNEXEC
19761 + return ktla_ktva(sp[0]);
19762 +#else
19763 if (sp[0] >> 22)
19764 return sp[0];
19765 if (sp[1] >> 22)
19766 return sp[1];
19767 #endif
19768 +
19769 +#endif
19770 }
19771 return pc;
19772 }
19773 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19774 index 9d9d2f9..ed344e4 100644
19775 --- a/arch/x86/kernel/tls.c
19776 +++ b/arch/x86/kernel/tls.c
19777 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19778 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19779 return -EINVAL;
19780
19781 +#ifdef CONFIG_PAX_SEGMEXEC
19782 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19783 + return -EINVAL;
19784 +#endif
19785 +
19786 set_tls_desc(p, idx, &info, 1);
19787
19788 return 0;
19789 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19790 index 451c0a7..e57f551 100644
19791 --- a/arch/x86/kernel/trampoline_32.S
19792 +++ b/arch/x86/kernel/trampoline_32.S
19793 @@ -32,6 +32,12 @@
19794 #include <asm/segment.h>
19795 #include <asm/page_types.h>
19796
19797 +#ifdef CONFIG_PAX_KERNEXEC
19798 +#define ta(X) (X)
19799 +#else
19800 +#define ta(X) ((X) - __PAGE_OFFSET)
19801 +#endif
19802 +
19803 #ifdef CONFIG_SMP
19804
19805 .section ".x86_trampoline","a"
19806 @@ -62,7 +68,7 @@ r_base = .
19807 inc %ax # protected mode (PE) bit
19808 lmsw %ax # into protected mode
19809 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19810 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19811 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19812
19813 # These need to be in the same 64K segment as the above;
19814 # hence we don't use the boot_gdt_descr defined in head.S
19815 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19816 index 09ff517..df19fbff 100644
19817 --- a/arch/x86/kernel/trampoline_64.S
19818 +++ b/arch/x86/kernel/trampoline_64.S
19819 @@ -90,7 +90,7 @@ startup_32:
19820 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19821 movl %eax, %ds
19822
19823 - movl $X86_CR4_PAE, %eax
19824 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19825 movl %eax, %cr4 # Enable PAE mode
19826
19827 # Setup trampoline 4 level pagetables
19828 @@ -138,7 +138,7 @@ tidt:
19829 # so the kernel can live anywhere
19830 .balign 4
19831 tgdt:
19832 - .short tgdt_end - tgdt # gdt limit
19833 + .short tgdt_end - tgdt - 1 # gdt limit
19834 .long tgdt - r_base
19835 .short 0
19836 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19837 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19838 index ff9281f1..30cb4ac 100644
19839 --- a/arch/x86/kernel/traps.c
19840 +++ b/arch/x86/kernel/traps.c
19841 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19842
19843 /* Do we ignore FPU interrupts ? */
19844 char ignore_fpu_irq;
19845 -
19846 -/*
19847 - * The IDT has to be page-aligned to simplify the Pentium
19848 - * F0 0F bug workaround.
19849 - */
19850 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19851 #endif
19852
19853 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19854 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19855 }
19856
19857 static void __kprobes
19858 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19859 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19860 long error_code, siginfo_t *info)
19861 {
19862 struct task_struct *tsk = current;
19863
19864 #ifdef CONFIG_X86_32
19865 - if (regs->flags & X86_VM_MASK) {
19866 + if (v8086_mode(regs)) {
19867 /*
19868 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19869 * On nmi (interrupt 2), do_trap should not be called.
19870 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19871 }
19872 #endif
19873
19874 - if (!user_mode(regs))
19875 + if (!user_mode_novm(regs))
19876 goto kernel_trap;
19877
19878 #ifdef CONFIG_X86_32
19879 @@ -148,7 +142,7 @@ trap_signal:
19880 printk_ratelimit()) {
19881 printk(KERN_INFO
19882 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19883 - tsk->comm, tsk->pid, str,
19884 + tsk->comm, task_pid_nr(tsk), str,
19885 regs->ip, regs->sp, error_code);
19886 print_vma_addr(" in ", regs->ip);
19887 printk("\n");
19888 @@ -165,8 +159,20 @@ kernel_trap:
19889 if (!fixup_exception(regs)) {
19890 tsk->thread.error_code = error_code;
19891 tsk->thread.trap_nr = trapnr;
19892 +
19893 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19894 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19895 + str = "PAX: suspicious stack segment fault";
19896 +#endif
19897 +
19898 die(str, regs, error_code);
19899 }
19900 +
19901 +#ifdef CONFIG_PAX_REFCOUNT
19902 + if (trapnr == 4)
19903 + pax_report_refcount_overflow(regs);
19904 +#endif
19905 +
19906 return;
19907
19908 #ifdef CONFIG_X86_32
19909 @@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19910 conditional_sti(regs);
19911
19912 #ifdef CONFIG_X86_32
19913 - if (regs->flags & X86_VM_MASK)
19914 + if (v8086_mode(regs))
19915 goto gp_in_vm86;
19916 #endif
19917
19918 tsk = current;
19919 - if (!user_mode(regs))
19920 + if (!user_mode_novm(regs))
19921 goto gp_in_kernel;
19922
19923 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19924 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19925 + struct mm_struct *mm = tsk->mm;
19926 + unsigned long limit;
19927 +
19928 + down_write(&mm->mmap_sem);
19929 + limit = mm->context.user_cs_limit;
19930 + if (limit < TASK_SIZE) {
19931 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19932 + up_write(&mm->mmap_sem);
19933 + return;
19934 + }
19935 + up_write(&mm->mmap_sem);
19936 + }
19937 +#endif
19938 +
19939 tsk->thread.error_code = error_code;
19940 tsk->thread.trap_nr = X86_TRAP_GP;
19941
19942 @@ -299,6 +321,13 @@ gp_in_kernel:
19943 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19944 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19945 return;
19946 +
19947 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19948 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19949 + die("PAX: suspicious general protection fault", regs, error_code);
19950 + else
19951 +#endif
19952 +
19953 die("general protection fault", regs, error_code);
19954 }
19955
19956 @@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19957 /* It's safe to allow irq's after DR6 has been saved */
19958 preempt_conditional_sti(regs);
19959
19960 - if (regs->flags & X86_VM_MASK) {
19961 + if (v8086_mode(regs)) {
19962 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19963 X86_TRAP_DB);
19964 preempt_conditional_cli(regs);
19965 @@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19966 * We already checked v86 mode above, so we can check for kernel mode
19967 * by just checking the CPL of CS.
19968 */
19969 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19970 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19971 tsk->thread.debugreg6 &= ~DR_STEP;
19972 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19973 regs->flags &= ~X86_EFLAGS_TF;
19974 @@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19975 return;
19976 conditional_sti(regs);
19977
19978 - if (!user_mode_vm(regs))
19979 + if (!user_mode(regs))
19980 {
19981 if (!fixup_exception(regs)) {
19982 task->thread.error_code = error_code;
19983 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19984 index b9242ba..50c5edd 100644
19985 --- a/arch/x86/kernel/verify_cpu.S
19986 +++ b/arch/x86/kernel/verify_cpu.S
19987 @@ -20,6 +20,7 @@
19988 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19989 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19990 * arch/x86/kernel/head_32.S: processor startup
19991 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19992 *
19993 * verify_cpu, returns the status of longmode and SSE in register %eax.
19994 * 0: Success 1: Failure
19995 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19996 index 255f58a..5e91150 100644
19997 --- a/arch/x86/kernel/vm86_32.c
19998 +++ b/arch/x86/kernel/vm86_32.c
19999 @@ -41,6 +41,7 @@
20000 #include <linux/ptrace.h>
20001 #include <linux/audit.h>
20002 #include <linux/stddef.h>
20003 +#include <linux/grsecurity.h>
20004
20005 #include <asm/uaccess.h>
20006 #include <asm/io.h>
20007 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20008 do_exit(SIGSEGV);
20009 }
20010
20011 - tss = &per_cpu(init_tss, get_cpu());
20012 + tss = init_tss + get_cpu();
20013 current->thread.sp0 = current->thread.saved_sp0;
20014 current->thread.sysenter_cs = __KERNEL_CS;
20015 load_sp0(tss, &current->thread);
20016 @@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
20017 struct task_struct *tsk;
20018 int tmp, ret = -EPERM;
20019
20020 +#ifdef CONFIG_GRKERNSEC_VM86
20021 + if (!capable(CAP_SYS_RAWIO)) {
20022 + gr_handle_vm86();
20023 + goto out;
20024 + }
20025 +#endif
20026 +
20027 tsk = current;
20028 if (tsk->thread.saved_sp0)
20029 goto out;
20030 @@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
20031 int tmp, ret;
20032 struct vm86plus_struct __user *v86;
20033
20034 +#ifdef CONFIG_GRKERNSEC_VM86
20035 + if (!capable(CAP_SYS_RAWIO)) {
20036 + gr_handle_vm86();
20037 + ret = -EPERM;
20038 + goto out;
20039 + }
20040 +#endif
20041 +
20042 tsk = current;
20043 switch (cmd) {
20044 case VM86_REQUEST_IRQ:
20045 @@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20046 tsk->thread.saved_fs = info->regs32->fs;
20047 tsk->thread.saved_gs = get_user_gs(info->regs32);
20048
20049 - tss = &per_cpu(init_tss, get_cpu());
20050 + tss = init_tss + get_cpu();
20051 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20052 if (cpu_has_sep)
20053 tsk->thread.sysenter_cs = 0;
20054 @@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20055 goto cannot_handle;
20056 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20057 goto cannot_handle;
20058 - intr_ptr = (unsigned long __user *) (i << 2);
20059 + intr_ptr = (__force unsigned long __user *) (i << 2);
20060 if (get_user(segoffs, intr_ptr))
20061 goto cannot_handle;
20062 if ((segoffs >> 16) == BIOSSEG)
20063 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20064 index 0f703f1..9e15f64 100644
20065 --- a/arch/x86/kernel/vmlinux.lds.S
20066 +++ b/arch/x86/kernel/vmlinux.lds.S
20067 @@ -26,6 +26,13 @@
20068 #include <asm/page_types.h>
20069 #include <asm/cache.h>
20070 #include <asm/boot.h>
20071 +#include <asm/segment.h>
20072 +
20073 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20074 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20075 +#else
20076 +#define __KERNEL_TEXT_OFFSET 0
20077 +#endif
20078
20079 #undef i386 /* in case the preprocessor is a 32bit one */
20080
20081 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
20082
20083 PHDRS {
20084 text PT_LOAD FLAGS(5); /* R_E */
20085 +#ifdef CONFIG_X86_32
20086 + module PT_LOAD FLAGS(5); /* R_E */
20087 +#endif
20088 +#ifdef CONFIG_XEN
20089 + rodata PT_LOAD FLAGS(5); /* R_E */
20090 +#else
20091 + rodata PT_LOAD FLAGS(4); /* R__ */
20092 +#endif
20093 data PT_LOAD FLAGS(6); /* RW_ */
20094 -#ifdef CONFIG_X86_64
20095 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20096 #ifdef CONFIG_SMP
20097 percpu PT_LOAD FLAGS(6); /* RW_ */
20098 #endif
20099 + text.init PT_LOAD FLAGS(5); /* R_E */
20100 + text.exit PT_LOAD FLAGS(5); /* R_E */
20101 init PT_LOAD FLAGS(7); /* RWE */
20102 -#endif
20103 note PT_NOTE FLAGS(0); /* ___ */
20104 }
20105
20106 SECTIONS
20107 {
20108 #ifdef CONFIG_X86_32
20109 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20110 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20111 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20112 #else
20113 - . = __START_KERNEL;
20114 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20115 + . = __START_KERNEL;
20116 #endif
20117
20118 /* Text and read-only data */
20119 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20120 - _text = .;
20121 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20122 /* bootstrapping code */
20123 +#ifdef CONFIG_X86_32
20124 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20125 +#else
20126 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20127 +#endif
20128 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20129 + _text = .;
20130 HEAD_TEXT
20131 #ifdef CONFIG_X86_32
20132 . = ALIGN(PAGE_SIZE);
20133 @@ -108,13 +128,47 @@ SECTIONS
20134 IRQENTRY_TEXT
20135 *(.fixup)
20136 *(.gnu.warning)
20137 - /* End of text section */
20138 - _etext = .;
20139 } :text = 0x9090
20140
20141 - NOTES :text :note
20142 + . += __KERNEL_TEXT_OFFSET;
20143
20144 - EXCEPTION_TABLE(16) :text = 0x9090
20145 +#ifdef CONFIG_X86_32
20146 + . = ALIGN(PAGE_SIZE);
20147 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20148 +
20149 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20150 + MODULES_EXEC_VADDR = .;
20151 + BYTE(0)
20152 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20153 + . = ALIGN(HPAGE_SIZE);
20154 + MODULES_EXEC_END = . - 1;
20155 +#endif
20156 +
20157 + } :module
20158 +#endif
20159 +
20160 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20161 + /* End of text section */
20162 + _etext = . - __KERNEL_TEXT_OFFSET;
20163 + }
20164 +
20165 +#ifdef CONFIG_X86_32
20166 + . = ALIGN(PAGE_SIZE);
20167 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20168 + *(.idt)
20169 + . = ALIGN(PAGE_SIZE);
20170 + *(.empty_zero_page)
20171 + *(.initial_pg_fixmap)
20172 + *(.initial_pg_pmd)
20173 + *(.initial_page_table)
20174 + *(.swapper_pg_dir)
20175 + } :rodata
20176 +#endif
20177 +
20178 + . = ALIGN(PAGE_SIZE);
20179 + NOTES :rodata :note
20180 +
20181 + EXCEPTION_TABLE(16) :rodata
20182
20183 #if defined(CONFIG_DEBUG_RODATA)
20184 /* .text should occupy whole number of pages */
20185 @@ -126,16 +180,20 @@ SECTIONS
20186
20187 /* Data */
20188 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20189 +
20190 +#ifdef CONFIG_PAX_KERNEXEC
20191 + . = ALIGN(HPAGE_SIZE);
20192 +#else
20193 + . = ALIGN(PAGE_SIZE);
20194 +#endif
20195 +
20196 /* Start of data section */
20197 _sdata = .;
20198
20199 /* init_task */
20200 INIT_TASK_DATA(THREAD_SIZE)
20201
20202 -#ifdef CONFIG_X86_32
20203 - /* 32 bit has nosave before _edata */
20204 NOSAVE_DATA
20205 -#endif
20206
20207 PAGE_ALIGNED_DATA(PAGE_SIZE)
20208
20209 @@ -176,12 +234,19 @@ SECTIONS
20210 #endif /* CONFIG_X86_64 */
20211
20212 /* Init code and data - will be freed after init */
20213 - . = ALIGN(PAGE_SIZE);
20214 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20215 + BYTE(0)
20216 +
20217 +#ifdef CONFIG_PAX_KERNEXEC
20218 + . = ALIGN(HPAGE_SIZE);
20219 +#else
20220 + . = ALIGN(PAGE_SIZE);
20221 +#endif
20222 +
20223 __init_begin = .; /* paired with __init_end */
20224 - }
20225 + } :init.begin
20226
20227 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20228 +#ifdef CONFIG_SMP
20229 /*
20230 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20231 * output PHDR, so the next output section - .init.text - should
20232 @@ -190,12 +255,27 @@ SECTIONS
20233 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20234 #endif
20235
20236 - INIT_TEXT_SECTION(PAGE_SIZE)
20237 -#ifdef CONFIG_X86_64
20238 - :init
20239 -#endif
20240 + . = ALIGN(PAGE_SIZE);
20241 + init_begin = .;
20242 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20243 + VMLINUX_SYMBOL(_sinittext) = .;
20244 + INIT_TEXT
20245 + VMLINUX_SYMBOL(_einittext) = .;
20246 + . = ALIGN(PAGE_SIZE);
20247 + } :text.init
20248
20249 - INIT_DATA_SECTION(16)
20250 + /*
20251 + * .exit.text is discard at runtime, not link time, to deal with
20252 + * references from .altinstructions and .eh_frame
20253 + */
20254 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20255 + EXIT_TEXT
20256 + . = ALIGN(16);
20257 + } :text.exit
20258 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20259 +
20260 + . = ALIGN(PAGE_SIZE);
20261 + INIT_DATA_SECTION(16) :init
20262
20263 /*
20264 * Code and data for a variety of lowlevel trampolines, to be
20265 @@ -269,19 +349,12 @@ SECTIONS
20266 }
20267
20268 . = ALIGN(8);
20269 - /*
20270 - * .exit.text is discard at runtime, not link time, to deal with
20271 - * references from .altinstructions and .eh_frame
20272 - */
20273 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20274 - EXIT_TEXT
20275 - }
20276
20277 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20278 EXIT_DATA
20279 }
20280
20281 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20282 +#ifndef CONFIG_SMP
20283 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20284 #endif
20285
20286 @@ -300,16 +373,10 @@ SECTIONS
20287 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20288 __smp_locks = .;
20289 *(.smp_locks)
20290 - . = ALIGN(PAGE_SIZE);
20291 __smp_locks_end = .;
20292 + . = ALIGN(PAGE_SIZE);
20293 }
20294
20295 -#ifdef CONFIG_X86_64
20296 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20297 - NOSAVE_DATA
20298 - }
20299 -#endif
20300 -
20301 /* BSS */
20302 . = ALIGN(PAGE_SIZE);
20303 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20304 @@ -325,6 +392,7 @@ SECTIONS
20305 __brk_base = .;
20306 . += 64 * 1024; /* 64k alignment slop space */
20307 *(.brk_reservation) /* areas brk users have reserved */
20308 + . = ALIGN(HPAGE_SIZE);
20309 __brk_limit = .;
20310 }
20311
20312 @@ -351,13 +419,12 @@ SECTIONS
20313 * for the boot processor.
20314 */
20315 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20316 -INIT_PER_CPU(gdt_page);
20317 INIT_PER_CPU(irq_stack_union);
20318
20319 /*
20320 * Build-time check on the image size:
20321 */
20322 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20323 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20324 "kernel image bigger than KERNEL_IMAGE_SIZE");
20325
20326 #ifdef CONFIG_SMP
20327 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20328 index 7515cf0..331a1a0 100644
20329 --- a/arch/x86/kernel/vsyscall_64.c
20330 +++ b/arch/x86/kernel/vsyscall_64.c
20331 @@ -54,15 +54,13 @@
20332 DEFINE_VVAR(int, vgetcpu_mode);
20333 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20334
20335 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20336 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20337
20338 static int __init vsyscall_setup(char *str)
20339 {
20340 if (str) {
20341 if (!strcmp("emulate", str))
20342 vsyscall_mode = EMULATE;
20343 - else if (!strcmp("native", str))
20344 - vsyscall_mode = NATIVE;
20345 else if (!strcmp("none", str))
20346 vsyscall_mode = NONE;
20347 else
20348 @@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20349
20350 tsk = current;
20351 if (seccomp_mode(&tsk->seccomp))
20352 - do_exit(SIGKILL);
20353 + do_group_exit(SIGKILL);
20354
20355 /*
20356 * With a real vsyscall, page faults cause SIGSEGV. We want to
20357 @@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20358 return true;
20359
20360 sigsegv:
20361 - force_sig(SIGSEGV, current);
20362 - return true;
20363 + do_group_exit(SIGKILL);
20364 }
20365
20366 /*
20367 @@ -332,10 +329,7 @@ void __init map_vsyscall(void)
20368 extern char __vvar_page;
20369 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20370
20371 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20372 - vsyscall_mode == NATIVE
20373 - ? PAGE_KERNEL_VSYSCALL
20374 - : PAGE_KERNEL_VVAR);
20375 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20376 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20377 (unsigned long)VSYSCALL_START);
20378
20379 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20380 index 9796c2f..f686fbf 100644
20381 --- a/arch/x86/kernel/x8664_ksyms_64.c
20382 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20383 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20384 EXPORT_SYMBOL(copy_user_generic_string);
20385 EXPORT_SYMBOL(copy_user_generic_unrolled);
20386 EXPORT_SYMBOL(__copy_user_nocache);
20387 -EXPORT_SYMBOL(_copy_from_user);
20388 -EXPORT_SYMBOL(_copy_to_user);
20389
20390 EXPORT_SYMBOL(copy_page);
20391 EXPORT_SYMBOL(clear_page);
20392 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20393 index e62728e..5fc3a07 100644
20394 --- a/arch/x86/kernel/xsave.c
20395 +++ b/arch/x86/kernel/xsave.c
20396 @@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20397 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20398 return -EINVAL;
20399
20400 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20401 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20402 fx_sw_user->extended_size -
20403 FP_XSTATE_MAGIC2_SIZE));
20404 if (err)
20405 @@ -267,7 +267,7 @@ fx_only:
20406 * the other extended state.
20407 */
20408 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20409 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20410 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20411 }
20412
20413 /*
20414 @@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
20415 if (use_xsave())
20416 err = restore_user_xstate(buf);
20417 else
20418 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20419 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20420 buf);
20421 if (unlikely(err)) {
20422 /*
20423 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20424 index 9fed5be..18fd595 100644
20425 --- a/arch/x86/kvm/cpuid.c
20426 +++ b/arch/x86/kvm/cpuid.c
20427 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20428 struct kvm_cpuid2 *cpuid,
20429 struct kvm_cpuid_entry2 __user *entries)
20430 {
20431 - int r;
20432 + int r, i;
20433
20434 r = -E2BIG;
20435 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20436 goto out;
20437 r = -EFAULT;
20438 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20439 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20440 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20441 goto out;
20442 + for (i = 0; i < cpuid->nent; ++i) {
20443 + struct kvm_cpuid_entry2 cpuid_entry;
20444 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20445 + goto out;
20446 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20447 + }
20448 vcpu->arch.cpuid_nent = cpuid->nent;
20449 kvm_apic_set_version(vcpu);
20450 kvm_x86_ops->cpuid_update(vcpu);
20451 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20452 struct kvm_cpuid2 *cpuid,
20453 struct kvm_cpuid_entry2 __user *entries)
20454 {
20455 - int r;
20456 + int r, i;
20457
20458 r = -E2BIG;
20459 if (cpuid->nent < vcpu->arch.cpuid_nent)
20460 goto out;
20461 r = -EFAULT;
20462 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20463 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20464 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20465 goto out;
20466 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20467 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20468 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20469 + goto out;
20470 + }
20471 return 0;
20472
20473 out:
20474 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20475 index 8375622..b7bca1a 100644
20476 --- a/arch/x86/kvm/emulate.c
20477 +++ b/arch/x86/kvm/emulate.c
20478 @@ -252,6 +252,7 @@ struct gprefix {
20479
20480 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20481 do { \
20482 + unsigned long _tmp; \
20483 __asm__ __volatile__ ( \
20484 _PRE_EFLAGS("0", "4", "2") \
20485 _op _suffix " %"_x"3,%1; " \
20486 @@ -266,8 +267,6 @@ struct gprefix {
20487 /* Raw emulation: instruction has two explicit operands. */
20488 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20489 do { \
20490 - unsigned long _tmp; \
20491 - \
20492 switch ((ctxt)->dst.bytes) { \
20493 case 2: \
20494 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20495 @@ -283,7 +282,6 @@ struct gprefix {
20496
20497 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20498 do { \
20499 - unsigned long _tmp; \
20500 switch ((ctxt)->dst.bytes) { \
20501 case 1: \
20502 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20503 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20504 index 8584322..17d5955 100644
20505 --- a/arch/x86/kvm/lapic.c
20506 +++ b/arch/x86/kvm/lapic.c
20507 @@ -54,7 +54,7 @@
20508 #define APIC_BUS_CYCLE_NS 1
20509
20510 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20511 -#define apic_debug(fmt, arg...)
20512 +#define apic_debug(fmt, arg...) do {} while (0)
20513
20514 #define APIC_LVT_NUM 6
20515 /* 14 is the version for Xeon and Pentium 8.4.8*/
20516 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20517 index df5a703..63748a7 100644
20518 --- a/arch/x86/kvm/paging_tmpl.h
20519 +++ b/arch/x86/kvm/paging_tmpl.h
20520 @@ -197,7 +197,7 @@ retry_walk:
20521 if (unlikely(kvm_is_error_hva(host_addr)))
20522 goto error;
20523
20524 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20525 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20526 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20527 goto error;
20528
20529 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20530 index e334389..6839087 100644
20531 --- a/arch/x86/kvm/svm.c
20532 +++ b/arch/x86/kvm/svm.c
20533 @@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20534 int cpu = raw_smp_processor_id();
20535
20536 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20537 +
20538 + pax_open_kernel();
20539 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20540 + pax_close_kernel();
20541 +
20542 load_TR_desc();
20543 }
20544
20545 @@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20546 #endif
20547 #endif
20548
20549 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20550 + __set_fs(current_thread_info()->addr_limit);
20551 +#endif
20552 +
20553 reload_tss(vcpu);
20554
20555 local_irq_disable();
20556 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20557 index 4ff0ab9..2ff68d3 100644
20558 --- a/arch/x86/kvm/vmx.c
20559 +++ b/arch/x86/kvm/vmx.c
20560 @@ -1303,7 +1303,11 @@ static void reload_tss(void)
20561 struct desc_struct *descs;
20562
20563 descs = (void *)gdt->address;
20564 +
20565 + pax_open_kernel();
20566 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20567 + pax_close_kernel();
20568 +
20569 load_TR_desc();
20570 }
20571
20572 @@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
20573 if (!cpu_has_vmx_flexpriority())
20574 flexpriority_enabled = 0;
20575
20576 - if (!cpu_has_vmx_tpr_shadow())
20577 - kvm_x86_ops->update_cr8_intercept = NULL;
20578 + if (!cpu_has_vmx_tpr_shadow()) {
20579 + pax_open_kernel();
20580 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20581 + pax_close_kernel();
20582 + }
20583
20584 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20585 kvm_disable_largepages();
20586 @@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
20587 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20588
20589 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20590 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20591 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20592
20593 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20594 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20595 @@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20596 "jmp .Lkvm_vmx_return \n\t"
20597 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20598 ".Lkvm_vmx_return: "
20599 +
20600 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20601 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20602 + ".Lkvm_vmx_return2: "
20603 +#endif
20604 +
20605 /* Save guest registers, load host registers, keep flags */
20606 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20607 "pop %0 \n\t"
20608 @@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20609 #endif
20610 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20611 [wordsize]"i"(sizeof(ulong))
20612 +
20613 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20614 + ,[cs]"i"(__KERNEL_CS)
20615 +#endif
20616 +
20617 : "cc", "memory"
20618 , R"ax", R"bx", R"di", R"si"
20619 #ifdef CONFIG_X86_64
20620 @@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20621 }
20622 }
20623
20624 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20625 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20626 +
20627 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20628 + loadsegment(fs, __KERNEL_PERCPU);
20629 +#endif
20630 +
20631 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20632 + __set_fs(current_thread_info()->addr_limit);
20633 +#endif
20634 +
20635 vmx->loaded_vmcs->launched = 1;
20636
20637 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20638 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20639 index 185a2b8..866d2a6 100644
20640 --- a/arch/x86/kvm/x86.c
20641 +++ b/arch/x86/kvm/x86.c
20642 @@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20643 {
20644 struct kvm *kvm = vcpu->kvm;
20645 int lm = is_long_mode(vcpu);
20646 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20647 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20648 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20649 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20650 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20651 : kvm->arch.xen_hvm_config.blob_size_32;
20652 u32 page_num = data & ~PAGE_MASK;
20653 @@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20654 if (n < msr_list.nmsrs)
20655 goto out;
20656 r = -EFAULT;
20657 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20658 + goto out;
20659 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20660 num_msrs_to_save * sizeof(u32)))
20661 goto out;
20662 @@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20663 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20664 struct kvm_interrupt *irq)
20665 {
20666 - if (irq->irq < 0 || irq->irq >= 256)
20667 + if (irq->irq >= 256)
20668 return -EINVAL;
20669 if (irqchip_in_kernel(vcpu->kvm))
20670 return -ENXIO;
20671 @@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
20672 kvm_mmu_set_mmio_spte_mask(mask);
20673 }
20674
20675 -int kvm_arch_init(void *opaque)
20676 +int kvm_arch_init(const void *opaque)
20677 {
20678 int r;
20679 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20680 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20681 index 642d880..44e0f3f 100644
20682 --- a/arch/x86/lguest/boot.c
20683 +++ b/arch/x86/lguest/boot.c
20684 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20685 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20686 * Launcher to reboot us.
20687 */
20688 -static void lguest_restart(char *reason)
20689 +static __noreturn void lguest_restart(char *reason)
20690 {
20691 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20692 + BUG();
20693 }
20694
20695 /*G:050
20696 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20697 index 00933d5..3a64af9 100644
20698 --- a/arch/x86/lib/atomic64_386_32.S
20699 +++ b/arch/x86/lib/atomic64_386_32.S
20700 @@ -48,6 +48,10 @@ BEGIN(read)
20701 movl (v), %eax
20702 movl 4(v), %edx
20703 RET_ENDP
20704 +BEGIN(read_unchecked)
20705 + movl (v), %eax
20706 + movl 4(v), %edx
20707 +RET_ENDP
20708 #undef v
20709
20710 #define v %esi
20711 @@ -55,6 +59,10 @@ BEGIN(set)
20712 movl %ebx, (v)
20713 movl %ecx, 4(v)
20714 RET_ENDP
20715 +BEGIN(set_unchecked)
20716 + movl %ebx, (v)
20717 + movl %ecx, 4(v)
20718 +RET_ENDP
20719 #undef v
20720
20721 #define v %esi
20722 @@ -70,6 +78,20 @@ RET_ENDP
20723 BEGIN(add)
20724 addl %eax, (v)
20725 adcl %edx, 4(v)
20726 +
20727 +#ifdef CONFIG_PAX_REFCOUNT
20728 + jno 0f
20729 + subl %eax, (v)
20730 + sbbl %edx, 4(v)
20731 + int $4
20732 +0:
20733 + _ASM_EXTABLE(0b, 0b)
20734 +#endif
20735 +
20736 +RET_ENDP
20737 +BEGIN(add_unchecked)
20738 + addl %eax, (v)
20739 + adcl %edx, 4(v)
20740 RET_ENDP
20741 #undef v
20742
20743 @@ -77,6 +99,24 @@ RET_ENDP
20744 BEGIN(add_return)
20745 addl (v), %eax
20746 adcl 4(v), %edx
20747 +
20748 +#ifdef CONFIG_PAX_REFCOUNT
20749 + into
20750 +1234:
20751 + _ASM_EXTABLE(1234b, 2f)
20752 +#endif
20753 +
20754 + movl %eax, (v)
20755 + movl %edx, 4(v)
20756 +
20757 +#ifdef CONFIG_PAX_REFCOUNT
20758 +2:
20759 +#endif
20760 +
20761 +RET_ENDP
20762 +BEGIN(add_return_unchecked)
20763 + addl (v), %eax
20764 + adcl 4(v), %edx
20765 movl %eax, (v)
20766 movl %edx, 4(v)
20767 RET_ENDP
20768 @@ -86,6 +126,20 @@ RET_ENDP
20769 BEGIN(sub)
20770 subl %eax, (v)
20771 sbbl %edx, 4(v)
20772 +
20773 +#ifdef CONFIG_PAX_REFCOUNT
20774 + jno 0f
20775 + addl %eax, (v)
20776 + adcl %edx, 4(v)
20777 + int $4
20778 +0:
20779 + _ASM_EXTABLE(0b, 0b)
20780 +#endif
20781 +
20782 +RET_ENDP
20783 +BEGIN(sub_unchecked)
20784 + subl %eax, (v)
20785 + sbbl %edx, 4(v)
20786 RET_ENDP
20787 #undef v
20788
20789 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20790 sbbl $0, %edx
20791 addl (v), %eax
20792 adcl 4(v), %edx
20793 +
20794 +#ifdef CONFIG_PAX_REFCOUNT
20795 + into
20796 +1234:
20797 + _ASM_EXTABLE(1234b, 2f)
20798 +#endif
20799 +
20800 + movl %eax, (v)
20801 + movl %edx, 4(v)
20802 +
20803 +#ifdef CONFIG_PAX_REFCOUNT
20804 +2:
20805 +#endif
20806 +
20807 +RET_ENDP
20808 +BEGIN(sub_return_unchecked)
20809 + negl %edx
20810 + negl %eax
20811 + sbbl $0, %edx
20812 + addl (v), %eax
20813 + adcl 4(v), %edx
20814 movl %eax, (v)
20815 movl %edx, 4(v)
20816 RET_ENDP
20817 @@ -105,6 +180,20 @@ RET_ENDP
20818 BEGIN(inc)
20819 addl $1, (v)
20820 adcl $0, 4(v)
20821 +
20822 +#ifdef CONFIG_PAX_REFCOUNT
20823 + jno 0f
20824 + subl $1, (v)
20825 + sbbl $0, 4(v)
20826 + int $4
20827 +0:
20828 + _ASM_EXTABLE(0b, 0b)
20829 +#endif
20830 +
20831 +RET_ENDP
20832 +BEGIN(inc_unchecked)
20833 + addl $1, (v)
20834 + adcl $0, 4(v)
20835 RET_ENDP
20836 #undef v
20837
20838 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20839 movl 4(v), %edx
20840 addl $1, %eax
20841 adcl $0, %edx
20842 +
20843 +#ifdef CONFIG_PAX_REFCOUNT
20844 + into
20845 +1234:
20846 + _ASM_EXTABLE(1234b, 2f)
20847 +#endif
20848 +
20849 + movl %eax, (v)
20850 + movl %edx, 4(v)
20851 +
20852 +#ifdef CONFIG_PAX_REFCOUNT
20853 +2:
20854 +#endif
20855 +
20856 +RET_ENDP
20857 +BEGIN(inc_return_unchecked)
20858 + movl (v), %eax
20859 + movl 4(v), %edx
20860 + addl $1, %eax
20861 + adcl $0, %edx
20862 movl %eax, (v)
20863 movl %edx, 4(v)
20864 RET_ENDP
20865 @@ -123,6 +232,20 @@ RET_ENDP
20866 BEGIN(dec)
20867 subl $1, (v)
20868 sbbl $0, 4(v)
20869 +
20870 +#ifdef CONFIG_PAX_REFCOUNT
20871 + jno 0f
20872 + addl $1, (v)
20873 + adcl $0, 4(v)
20874 + int $4
20875 +0:
20876 + _ASM_EXTABLE(0b, 0b)
20877 +#endif
20878 +
20879 +RET_ENDP
20880 +BEGIN(dec_unchecked)
20881 + subl $1, (v)
20882 + sbbl $0, 4(v)
20883 RET_ENDP
20884 #undef v
20885
20886 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20887 movl 4(v), %edx
20888 subl $1, %eax
20889 sbbl $0, %edx
20890 +
20891 +#ifdef CONFIG_PAX_REFCOUNT
20892 + into
20893 +1234:
20894 + _ASM_EXTABLE(1234b, 2f)
20895 +#endif
20896 +
20897 + movl %eax, (v)
20898 + movl %edx, 4(v)
20899 +
20900 +#ifdef CONFIG_PAX_REFCOUNT
20901 +2:
20902 +#endif
20903 +
20904 +RET_ENDP
20905 +BEGIN(dec_return_unchecked)
20906 + movl (v), %eax
20907 + movl 4(v), %edx
20908 + subl $1, %eax
20909 + sbbl $0, %edx
20910 movl %eax, (v)
20911 movl %edx, 4(v)
20912 RET_ENDP
20913 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20914 adcl %edx, %edi
20915 addl (v), %eax
20916 adcl 4(v), %edx
20917 +
20918 +#ifdef CONFIG_PAX_REFCOUNT
20919 + into
20920 +1234:
20921 + _ASM_EXTABLE(1234b, 2f)
20922 +#endif
20923 +
20924 cmpl %eax, %ecx
20925 je 3f
20926 1:
20927 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20928 1:
20929 addl $1, %eax
20930 adcl $0, %edx
20931 +
20932 +#ifdef CONFIG_PAX_REFCOUNT
20933 + into
20934 +1234:
20935 + _ASM_EXTABLE(1234b, 2f)
20936 +#endif
20937 +
20938 movl %eax, (v)
20939 movl %edx, 4(v)
20940 movl $1, %eax
20941 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20942 movl 4(v), %edx
20943 subl $1, %eax
20944 sbbl $0, %edx
20945 +
20946 +#ifdef CONFIG_PAX_REFCOUNT
20947 + into
20948 +1234:
20949 + _ASM_EXTABLE(1234b, 1f)
20950 +#endif
20951 +
20952 js 1f
20953 movl %eax, (v)
20954 movl %edx, 4(v)
20955 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20956 index f5cc9eb..51fa319 100644
20957 --- a/arch/x86/lib/atomic64_cx8_32.S
20958 +++ b/arch/x86/lib/atomic64_cx8_32.S
20959 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20960 CFI_STARTPROC
20961
20962 read64 %ecx
20963 + pax_force_retaddr
20964 ret
20965 CFI_ENDPROC
20966 ENDPROC(atomic64_read_cx8)
20967
20968 +ENTRY(atomic64_read_unchecked_cx8)
20969 + CFI_STARTPROC
20970 +
20971 + read64 %ecx
20972 + pax_force_retaddr
20973 + ret
20974 + CFI_ENDPROC
20975 +ENDPROC(atomic64_read_unchecked_cx8)
20976 +
20977 ENTRY(atomic64_set_cx8)
20978 CFI_STARTPROC
20979
20980 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20981 cmpxchg8b (%esi)
20982 jne 1b
20983
20984 + pax_force_retaddr
20985 ret
20986 CFI_ENDPROC
20987 ENDPROC(atomic64_set_cx8)
20988
20989 +ENTRY(atomic64_set_unchecked_cx8)
20990 + CFI_STARTPROC
20991 +
20992 +1:
20993 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20994 + * are atomic on 586 and newer */
20995 + cmpxchg8b (%esi)
20996 + jne 1b
20997 +
20998 + pax_force_retaddr
20999 + ret
21000 + CFI_ENDPROC
21001 +ENDPROC(atomic64_set_unchecked_cx8)
21002 +
21003 ENTRY(atomic64_xchg_cx8)
21004 CFI_STARTPROC
21005
21006 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
21007 cmpxchg8b (%esi)
21008 jne 1b
21009
21010 + pax_force_retaddr
21011 ret
21012 CFI_ENDPROC
21013 ENDPROC(atomic64_xchg_cx8)
21014
21015 -.macro addsub_return func ins insc
21016 -ENTRY(atomic64_\func\()_return_cx8)
21017 +.macro addsub_return func ins insc unchecked=""
21018 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21019 CFI_STARTPROC
21020 SAVE ebp
21021 SAVE ebx
21022 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
21023 movl %edx, %ecx
21024 \ins\()l %esi, %ebx
21025 \insc\()l %edi, %ecx
21026 +
21027 +.ifb \unchecked
21028 +#ifdef CONFIG_PAX_REFCOUNT
21029 + into
21030 +2:
21031 + _ASM_EXTABLE(2b, 3f)
21032 +#endif
21033 +.endif
21034 +
21035 LOCK_PREFIX
21036 cmpxchg8b (%ebp)
21037 jne 1b
21038 -
21039 -10:
21040 movl %ebx, %eax
21041 movl %ecx, %edx
21042 +
21043 +.ifb \unchecked
21044 +#ifdef CONFIG_PAX_REFCOUNT
21045 +3:
21046 +#endif
21047 +.endif
21048 +
21049 RESTORE edi
21050 RESTORE esi
21051 RESTORE ebx
21052 RESTORE ebp
21053 + pax_force_retaddr
21054 ret
21055 CFI_ENDPROC
21056 -ENDPROC(atomic64_\func\()_return_cx8)
21057 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21058 .endm
21059
21060 addsub_return add add adc
21061 addsub_return sub sub sbb
21062 +addsub_return add add adc _unchecked
21063 +addsub_return sub sub sbb _unchecked
21064
21065 -.macro incdec_return func ins insc
21066 -ENTRY(atomic64_\func\()_return_cx8)
21067 +.macro incdec_return func ins insc unchecked=""
21068 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21069 CFI_STARTPROC
21070 SAVE ebx
21071
21072 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21073 movl %edx, %ecx
21074 \ins\()l $1, %ebx
21075 \insc\()l $0, %ecx
21076 +
21077 +.ifb \unchecked
21078 +#ifdef CONFIG_PAX_REFCOUNT
21079 + into
21080 +2:
21081 + _ASM_EXTABLE(2b, 3f)
21082 +#endif
21083 +.endif
21084 +
21085 LOCK_PREFIX
21086 cmpxchg8b (%esi)
21087 jne 1b
21088
21089 -10:
21090 movl %ebx, %eax
21091 movl %ecx, %edx
21092 +
21093 +.ifb \unchecked
21094 +#ifdef CONFIG_PAX_REFCOUNT
21095 +3:
21096 +#endif
21097 +.endif
21098 +
21099 RESTORE ebx
21100 + pax_force_retaddr
21101 ret
21102 CFI_ENDPROC
21103 -ENDPROC(atomic64_\func\()_return_cx8)
21104 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21105 .endm
21106
21107 incdec_return inc add adc
21108 incdec_return dec sub sbb
21109 +incdec_return inc add adc _unchecked
21110 +incdec_return dec sub sbb _unchecked
21111
21112 ENTRY(atomic64_dec_if_positive_cx8)
21113 CFI_STARTPROC
21114 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21115 movl %edx, %ecx
21116 subl $1, %ebx
21117 sbb $0, %ecx
21118 +
21119 +#ifdef CONFIG_PAX_REFCOUNT
21120 + into
21121 +1234:
21122 + _ASM_EXTABLE(1234b, 2f)
21123 +#endif
21124 +
21125 js 2f
21126 LOCK_PREFIX
21127 cmpxchg8b (%esi)
21128 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21129 movl %ebx, %eax
21130 movl %ecx, %edx
21131 RESTORE ebx
21132 + pax_force_retaddr
21133 ret
21134 CFI_ENDPROC
21135 ENDPROC(atomic64_dec_if_positive_cx8)
21136 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21137 movl %edx, %ecx
21138 addl %ebp, %ebx
21139 adcl %edi, %ecx
21140 +
21141 +#ifdef CONFIG_PAX_REFCOUNT
21142 + into
21143 +1234:
21144 + _ASM_EXTABLE(1234b, 3f)
21145 +#endif
21146 +
21147 LOCK_PREFIX
21148 cmpxchg8b (%esi)
21149 jne 1b
21150 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21151 CFI_ADJUST_CFA_OFFSET -8
21152 RESTORE ebx
21153 RESTORE ebp
21154 + pax_force_retaddr
21155 ret
21156 4:
21157 cmpl %edx, 4(%esp)
21158 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21159 xorl %ecx, %ecx
21160 addl $1, %ebx
21161 adcl %edx, %ecx
21162 +
21163 +#ifdef CONFIG_PAX_REFCOUNT
21164 + into
21165 +1234:
21166 + _ASM_EXTABLE(1234b, 3f)
21167 +#endif
21168 +
21169 LOCK_PREFIX
21170 cmpxchg8b (%esi)
21171 jne 1b
21172 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21173 movl $1, %eax
21174 3:
21175 RESTORE ebx
21176 + pax_force_retaddr
21177 ret
21178 CFI_ENDPROC
21179 ENDPROC(atomic64_inc_not_zero_cx8)
21180 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21181 index 78d16a5..fbcf666 100644
21182 --- a/arch/x86/lib/checksum_32.S
21183 +++ b/arch/x86/lib/checksum_32.S
21184 @@ -28,7 +28,8 @@
21185 #include <linux/linkage.h>
21186 #include <asm/dwarf2.h>
21187 #include <asm/errno.h>
21188 -
21189 +#include <asm/segment.h>
21190 +
21191 /*
21192 * computes a partial checksum, e.g. for TCP/UDP fragments
21193 */
21194 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21195
21196 #define ARGBASE 16
21197 #define FP 12
21198 -
21199 -ENTRY(csum_partial_copy_generic)
21200 +
21201 +ENTRY(csum_partial_copy_generic_to_user)
21202 CFI_STARTPROC
21203 +
21204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21205 + pushl_cfi %gs
21206 + popl_cfi %es
21207 + jmp csum_partial_copy_generic
21208 +#endif
21209 +
21210 +ENTRY(csum_partial_copy_generic_from_user)
21211 +
21212 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21213 + pushl_cfi %gs
21214 + popl_cfi %ds
21215 +#endif
21216 +
21217 +ENTRY(csum_partial_copy_generic)
21218 subl $4,%esp
21219 CFI_ADJUST_CFA_OFFSET 4
21220 pushl_cfi %edi
21221 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21222 jmp 4f
21223 SRC(1: movw (%esi), %bx )
21224 addl $2, %esi
21225 -DST( movw %bx, (%edi) )
21226 +DST( movw %bx, %es:(%edi) )
21227 addl $2, %edi
21228 addw %bx, %ax
21229 adcl $0, %eax
21230 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21231 SRC(1: movl (%esi), %ebx )
21232 SRC( movl 4(%esi), %edx )
21233 adcl %ebx, %eax
21234 -DST( movl %ebx, (%edi) )
21235 +DST( movl %ebx, %es:(%edi) )
21236 adcl %edx, %eax
21237 -DST( movl %edx, 4(%edi) )
21238 +DST( movl %edx, %es:4(%edi) )
21239
21240 SRC( movl 8(%esi), %ebx )
21241 SRC( movl 12(%esi), %edx )
21242 adcl %ebx, %eax
21243 -DST( movl %ebx, 8(%edi) )
21244 +DST( movl %ebx, %es:8(%edi) )
21245 adcl %edx, %eax
21246 -DST( movl %edx, 12(%edi) )
21247 +DST( movl %edx, %es:12(%edi) )
21248
21249 SRC( movl 16(%esi), %ebx )
21250 SRC( movl 20(%esi), %edx )
21251 adcl %ebx, %eax
21252 -DST( movl %ebx, 16(%edi) )
21253 +DST( movl %ebx, %es:16(%edi) )
21254 adcl %edx, %eax
21255 -DST( movl %edx, 20(%edi) )
21256 +DST( movl %edx, %es:20(%edi) )
21257
21258 SRC( movl 24(%esi), %ebx )
21259 SRC( movl 28(%esi), %edx )
21260 adcl %ebx, %eax
21261 -DST( movl %ebx, 24(%edi) )
21262 +DST( movl %ebx, %es:24(%edi) )
21263 adcl %edx, %eax
21264 -DST( movl %edx, 28(%edi) )
21265 +DST( movl %edx, %es:28(%edi) )
21266
21267 lea 32(%esi), %esi
21268 lea 32(%edi), %edi
21269 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21270 shrl $2, %edx # This clears CF
21271 SRC(3: movl (%esi), %ebx )
21272 adcl %ebx, %eax
21273 -DST( movl %ebx, (%edi) )
21274 +DST( movl %ebx, %es:(%edi) )
21275 lea 4(%esi), %esi
21276 lea 4(%edi), %edi
21277 dec %edx
21278 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21279 jb 5f
21280 SRC( movw (%esi), %cx )
21281 leal 2(%esi), %esi
21282 -DST( movw %cx, (%edi) )
21283 +DST( movw %cx, %es:(%edi) )
21284 leal 2(%edi), %edi
21285 je 6f
21286 shll $16,%ecx
21287 SRC(5: movb (%esi), %cl )
21288 -DST( movb %cl, (%edi) )
21289 +DST( movb %cl, %es:(%edi) )
21290 6: addl %ecx, %eax
21291 adcl $0, %eax
21292 7:
21293 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21294
21295 6001:
21296 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21297 - movl $-EFAULT, (%ebx)
21298 + movl $-EFAULT, %ss:(%ebx)
21299
21300 # zero the complete destination - computing the rest
21301 # is too much work
21302 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21303
21304 6002:
21305 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21306 - movl $-EFAULT,(%ebx)
21307 + movl $-EFAULT,%ss:(%ebx)
21308 jmp 5000b
21309
21310 .previous
21311
21312 + pushl_cfi %ss
21313 + popl_cfi %ds
21314 + pushl_cfi %ss
21315 + popl_cfi %es
21316 popl_cfi %ebx
21317 CFI_RESTORE ebx
21318 popl_cfi %esi
21319 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21320 popl_cfi %ecx # equivalent to addl $4,%esp
21321 ret
21322 CFI_ENDPROC
21323 -ENDPROC(csum_partial_copy_generic)
21324 +ENDPROC(csum_partial_copy_generic_to_user)
21325
21326 #else
21327
21328 /* Version for PentiumII/PPro */
21329
21330 #define ROUND1(x) \
21331 + nop; nop; nop; \
21332 SRC(movl x(%esi), %ebx ) ; \
21333 addl %ebx, %eax ; \
21334 - DST(movl %ebx, x(%edi) ) ;
21335 + DST(movl %ebx, %es:x(%edi)) ;
21336
21337 #define ROUND(x) \
21338 + nop; nop; nop; \
21339 SRC(movl x(%esi), %ebx ) ; \
21340 adcl %ebx, %eax ; \
21341 - DST(movl %ebx, x(%edi) ) ;
21342 + DST(movl %ebx, %es:x(%edi)) ;
21343
21344 #define ARGBASE 12
21345 -
21346 -ENTRY(csum_partial_copy_generic)
21347 +
21348 +ENTRY(csum_partial_copy_generic_to_user)
21349 CFI_STARTPROC
21350 +
21351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21352 + pushl_cfi %gs
21353 + popl_cfi %es
21354 + jmp csum_partial_copy_generic
21355 +#endif
21356 +
21357 +ENTRY(csum_partial_copy_generic_from_user)
21358 +
21359 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21360 + pushl_cfi %gs
21361 + popl_cfi %ds
21362 +#endif
21363 +
21364 +ENTRY(csum_partial_copy_generic)
21365 pushl_cfi %ebx
21366 CFI_REL_OFFSET ebx, 0
21367 pushl_cfi %edi
21368 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21369 subl %ebx, %edi
21370 lea -1(%esi),%edx
21371 andl $-32,%edx
21372 - lea 3f(%ebx,%ebx), %ebx
21373 + lea 3f(%ebx,%ebx,2), %ebx
21374 testl %esi, %esi
21375 jmp *%ebx
21376 1: addl $64,%esi
21377 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21378 jb 5f
21379 SRC( movw (%esi), %dx )
21380 leal 2(%esi), %esi
21381 -DST( movw %dx, (%edi) )
21382 +DST( movw %dx, %es:(%edi) )
21383 leal 2(%edi), %edi
21384 je 6f
21385 shll $16,%edx
21386 5:
21387 SRC( movb (%esi), %dl )
21388 -DST( movb %dl, (%edi) )
21389 +DST( movb %dl, %es:(%edi) )
21390 6: addl %edx, %eax
21391 adcl $0, %eax
21392 7:
21393 .section .fixup, "ax"
21394 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21395 - movl $-EFAULT, (%ebx)
21396 + movl $-EFAULT, %ss:(%ebx)
21397 # zero the complete destination (computing the rest is too much work)
21398 movl ARGBASE+8(%esp),%edi # dst
21399 movl ARGBASE+12(%esp),%ecx # len
21400 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21401 rep; stosb
21402 jmp 7b
21403 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21404 - movl $-EFAULT, (%ebx)
21405 + movl $-EFAULT, %ss:(%ebx)
21406 jmp 7b
21407 .previous
21408
21409 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21410 + pushl_cfi %ss
21411 + popl_cfi %ds
21412 + pushl_cfi %ss
21413 + popl_cfi %es
21414 +#endif
21415 +
21416 popl_cfi %esi
21417 CFI_RESTORE esi
21418 popl_cfi %edi
21419 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21420 CFI_RESTORE ebx
21421 ret
21422 CFI_ENDPROC
21423 -ENDPROC(csum_partial_copy_generic)
21424 +ENDPROC(csum_partial_copy_generic_to_user)
21425
21426 #undef ROUND
21427 #undef ROUND1
21428 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21429 index f2145cf..cea889d 100644
21430 --- a/arch/x86/lib/clear_page_64.S
21431 +++ b/arch/x86/lib/clear_page_64.S
21432 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21433 movl $4096/8,%ecx
21434 xorl %eax,%eax
21435 rep stosq
21436 + pax_force_retaddr
21437 ret
21438 CFI_ENDPROC
21439 ENDPROC(clear_page_c)
21440 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21441 movl $4096,%ecx
21442 xorl %eax,%eax
21443 rep stosb
21444 + pax_force_retaddr
21445 ret
21446 CFI_ENDPROC
21447 ENDPROC(clear_page_c_e)
21448 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21449 leaq 64(%rdi),%rdi
21450 jnz .Lloop
21451 nop
21452 + pax_force_retaddr
21453 ret
21454 CFI_ENDPROC
21455 .Lclear_page_end:
21456 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21457
21458 #include <asm/cpufeature.h>
21459
21460 - .section .altinstr_replacement,"ax"
21461 + .section .altinstr_replacement,"a"
21462 1: .byte 0xeb /* jmp <disp8> */
21463 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21464 2: .byte 0xeb /* jmp <disp8> */
21465 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21466 index 1e572c5..2a162cd 100644
21467 --- a/arch/x86/lib/cmpxchg16b_emu.S
21468 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21469 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21470
21471 popf
21472 mov $1, %al
21473 + pax_force_retaddr
21474 ret
21475
21476 not_same:
21477 popf
21478 xor %al,%al
21479 + pax_force_retaddr
21480 ret
21481
21482 CFI_ENDPROC
21483 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21484 index 6b34d04..dccb07f 100644
21485 --- a/arch/x86/lib/copy_page_64.S
21486 +++ b/arch/x86/lib/copy_page_64.S
21487 @@ -9,6 +9,7 @@ copy_page_c:
21488 CFI_STARTPROC
21489 movl $4096/8,%ecx
21490 rep movsq
21491 + pax_force_retaddr
21492 ret
21493 CFI_ENDPROC
21494 ENDPROC(copy_page_c)
21495 @@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21496
21497 ENTRY(copy_page)
21498 CFI_STARTPROC
21499 - subq $2*8,%rsp
21500 - CFI_ADJUST_CFA_OFFSET 2*8
21501 + subq $3*8,%rsp
21502 + CFI_ADJUST_CFA_OFFSET 3*8
21503 movq %rbx,(%rsp)
21504 CFI_REL_OFFSET rbx, 0
21505 movq %r12,1*8(%rsp)
21506 CFI_REL_OFFSET r12, 1*8
21507 + movq %r13,2*8(%rsp)
21508 + CFI_REL_OFFSET r13, 2*8
21509
21510 movl $(4096/64)-5,%ecx
21511 .p2align 4
21512 @@ -37,7 +40,7 @@ ENTRY(copy_page)
21513 movq 16 (%rsi), %rdx
21514 movq 24 (%rsi), %r8
21515 movq 32 (%rsi), %r9
21516 - movq 40 (%rsi), %r10
21517 + movq 40 (%rsi), %r13
21518 movq 48 (%rsi), %r11
21519 movq 56 (%rsi), %r12
21520
21521 @@ -48,7 +51,7 @@ ENTRY(copy_page)
21522 movq %rdx, 16 (%rdi)
21523 movq %r8, 24 (%rdi)
21524 movq %r9, 32 (%rdi)
21525 - movq %r10, 40 (%rdi)
21526 + movq %r13, 40 (%rdi)
21527 movq %r11, 48 (%rdi)
21528 movq %r12, 56 (%rdi)
21529
21530 @@ -67,7 +70,7 @@ ENTRY(copy_page)
21531 movq 16 (%rsi), %rdx
21532 movq 24 (%rsi), %r8
21533 movq 32 (%rsi), %r9
21534 - movq 40 (%rsi), %r10
21535 + movq 40 (%rsi), %r13
21536 movq 48 (%rsi), %r11
21537 movq 56 (%rsi), %r12
21538
21539 @@ -76,7 +79,7 @@ ENTRY(copy_page)
21540 movq %rdx, 16 (%rdi)
21541 movq %r8, 24 (%rdi)
21542 movq %r9, 32 (%rdi)
21543 - movq %r10, 40 (%rdi)
21544 + movq %r13, 40 (%rdi)
21545 movq %r11, 48 (%rdi)
21546 movq %r12, 56 (%rdi)
21547
21548 @@ -89,8 +92,11 @@ ENTRY(copy_page)
21549 CFI_RESTORE rbx
21550 movq 1*8(%rsp),%r12
21551 CFI_RESTORE r12
21552 - addq $2*8,%rsp
21553 - CFI_ADJUST_CFA_OFFSET -2*8
21554 + movq 2*8(%rsp),%r13
21555 + CFI_RESTORE r13
21556 + addq $3*8,%rsp
21557 + CFI_ADJUST_CFA_OFFSET -3*8
21558 + pax_force_retaddr
21559 ret
21560 .Lcopy_page_end:
21561 CFI_ENDPROC
21562 @@ -101,7 +107,7 @@ ENDPROC(copy_page)
21563
21564 #include <asm/cpufeature.h>
21565
21566 - .section .altinstr_replacement,"ax"
21567 + .section .altinstr_replacement,"a"
21568 1: .byte 0xeb /* jmp <disp8> */
21569 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21570 2:
21571 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21572 index 0248402..821c786 100644
21573 --- a/arch/x86/lib/copy_user_64.S
21574 +++ b/arch/x86/lib/copy_user_64.S
21575 @@ -16,6 +16,7 @@
21576 #include <asm/thread_info.h>
21577 #include <asm/cpufeature.h>
21578 #include <asm/alternative-asm.h>
21579 +#include <asm/pgtable.h>
21580
21581 /*
21582 * By placing feature2 after feature1 in altinstructions section, we logically
21583 @@ -29,7 +30,7 @@
21584 .byte 0xe9 /* 32bit jump */
21585 .long \orig-1f /* by default jump to orig */
21586 1:
21587 - .section .altinstr_replacement,"ax"
21588 + .section .altinstr_replacement,"a"
21589 2: .byte 0xe9 /* near jump with 32bit immediate */
21590 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21591 3: .byte 0xe9 /* near jump with 32bit immediate */
21592 @@ -71,47 +72,20 @@
21593 #endif
21594 .endm
21595
21596 -/* Standard copy_to_user with segment limit checking */
21597 -ENTRY(_copy_to_user)
21598 - CFI_STARTPROC
21599 - GET_THREAD_INFO(%rax)
21600 - movq %rdi,%rcx
21601 - addq %rdx,%rcx
21602 - jc bad_to_user
21603 - cmpq TI_addr_limit(%rax),%rcx
21604 - ja bad_to_user
21605 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21606 - copy_user_generic_unrolled,copy_user_generic_string, \
21607 - copy_user_enhanced_fast_string
21608 - CFI_ENDPROC
21609 -ENDPROC(_copy_to_user)
21610 -
21611 -/* Standard copy_from_user with segment limit checking */
21612 -ENTRY(_copy_from_user)
21613 - CFI_STARTPROC
21614 - GET_THREAD_INFO(%rax)
21615 - movq %rsi,%rcx
21616 - addq %rdx,%rcx
21617 - jc bad_from_user
21618 - cmpq TI_addr_limit(%rax),%rcx
21619 - ja bad_from_user
21620 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21621 - copy_user_generic_unrolled,copy_user_generic_string, \
21622 - copy_user_enhanced_fast_string
21623 - CFI_ENDPROC
21624 -ENDPROC(_copy_from_user)
21625 -
21626 .section .fixup,"ax"
21627 /* must zero dest */
21628 ENTRY(bad_from_user)
21629 bad_from_user:
21630 CFI_STARTPROC
21631 + testl %edx,%edx
21632 + js bad_to_user
21633 movl %edx,%ecx
21634 xorl %eax,%eax
21635 rep
21636 stosb
21637 bad_to_user:
21638 movl %edx,%eax
21639 + pax_force_retaddr
21640 ret
21641 CFI_ENDPROC
21642 ENDPROC(bad_from_user)
21643 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21644 jz 17f
21645 1: movq (%rsi),%r8
21646 2: movq 1*8(%rsi),%r9
21647 -3: movq 2*8(%rsi),%r10
21648 +3: movq 2*8(%rsi),%rax
21649 4: movq 3*8(%rsi),%r11
21650 5: movq %r8,(%rdi)
21651 6: movq %r9,1*8(%rdi)
21652 -7: movq %r10,2*8(%rdi)
21653 +7: movq %rax,2*8(%rdi)
21654 8: movq %r11,3*8(%rdi)
21655 9: movq 4*8(%rsi),%r8
21656 10: movq 5*8(%rsi),%r9
21657 -11: movq 6*8(%rsi),%r10
21658 +11: movq 6*8(%rsi),%rax
21659 12: movq 7*8(%rsi),%r11
21660 13: movq %r8,4*8(%rdi)
21661 14: movq %r9,5*8(%rdi)
21662 -15: movq %r10,6*8(%rdi)
21663 +15: movq %rax,6*8(%rdi)
21664 16: movq %r11,7*8(%rdi)
21665 leaq 64(%rsi),%rsi
21666 leaq 64(%rdi),%rdi
21667 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21668 decl %ecx
21669 jnz 21b
21670 23: xor %eax,%eax
21671 + pax_force_retaddr
21672 ret
21673
21674 .section .fixup,"ax"
21675 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21676 3: rep
21677 movsb
21678 4: xorl %eax,%eax
21679 + pax_force_retaddr
21680 ret
21681
21682 .section .fixup,"ax"
21683 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21684 1: rep
21685 movsb
21686 2: xorl %eax,%eax
21687 + pax_force_retaddr
21688 ret
21689
21690 .section .fixup,"ax"
21691 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21692 index cb0c112..e3a6895 100644
21693 --- a/arch/x86/lib/copy_user_nocache_64.S
21694 +++ b/arch/x86/lib/copy_user_nocache_64.S
21695 @@ -8,12 +8,14 @@
21696
21697 #include <linux/linkage.h>
21698 #include <asm/dwarf2.h>
21699 +#include <asm/alternative-asm.h>
21700
21701 #define FIX_ALIGNMENT 1
21702
21703 #include <asm/current.h>
21704 #include <asm/asm-offsets.h>
21705 #include <asm/thread_info.h>
21706 +#include <asm/pgtable.h>
21707
21708 .macro ALIGN_DESTINATION
21709 #ifdef FIX_ALIGNMENT
21710 @@ -50,6 +52,15 @@
21711 */
21712 ENTRY(__copy_user_nocache)
21713 CFI_STARTPROC
21714 +
21715 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21716 + mov $PAX_USER_SHADOW_BASE,%rcx
21717 + cmp %rcx,%rsi
21718 + jae 1f
21719 + add %rcx,%rsi
21720 +1:
21721 +#endif
21722 +
21723 cmpl $8,%edx
21724 jb 20f /* less then 8 bytes, go to byte copy loop */
21725 ALIGN_DESTINATION
21726 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21727 jz 17f
21728 1: movq (%rsi),%r8
21729 2: movq 1*8(%rsi),%r9
21730 -3: movq 2*8(%rsi),%r10
21731 +3: movq 2*8(%rsi),%rax
21732 4: movq 3*8(%rsi),%r11
21733 5: movnti %r8,(%rdi)
21734 6: movnti %r9,1*8(%rdi)
21735 -7: movnti %r10,2*8(%rdi)
21736 +7: movnti %rax,2*8(%rdi)
21737 8: movnti %r11,3*8(%rdi)
21738 9: movq 4*8(%rsi),%r8
21739 10: movq 5*8(%rsi),%r9
21740 -11: movq 6*8(%rsi),%r10
21741 +11: movq 6*8(%rsi),%rax
21742 12: movq 7*8(%rsi),%r11
21743 13: movnti %r8,4*8(%rdi)
21744 14: movnti %r9,5*8(%rdi)
21745 -15: movnti %r10,6*8(%rdi)
21746 +15: movnti %rax,6*8(%rdi)
21747 16: movnti %r11,7*8(%rdi)
21748 leaq 64(%rsi),%rsi
21749 leaq 64(%rdi),%rdi
21750 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21751 jnz 21b
21752 23: xorl %eax,%eax
21753 sfence
21754 + pax_force_retaddr
21755 ret
21756
21757 .section .fixup,"ax"
21758 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21759 index fb903b7..c92b7f7 100644
21760 --- a/arch/x86/lib/csum-copy_64.S
21761 +++ b/arch/x86/lib/csum-copy_64.S
21762 @@ -8,6 +8,7 @@
21763 #include <linux/linkage.h>
21764 #include <asm/dwarf2.h>
21765 #include <asm/errno.h>
21766 +#include <asm/alternative-asm.h>
21767
21768 /*
21769 * Checksum copy with exception handling.
21770 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21771 CFI_RESTORE rbp
21772 addq $7*8, %rsp
21773 CFI_ADJUST_CFA_OFFSET -7*8
21774 + pax_force_retaddr 0, 1
21775 ret
21776 CFI_RESTORE_STATE
21777
21778 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21779 index 459b58a..9570bc7 100644
21780 --- a/arch/x86/lib/csum-wrappers_64.c
21781 +++ b/arch/x86/lib/csum-wrappers_64.c
21782 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21783 len -= 2;
21784 }
21785 }
21786 - isum = csum_partial_copy_generic((__force const void *)src,
21787 +
21788 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21789 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21790 + src += PAX_USER_SHADOW_BASE;
21791 +#endif
21792 +
21793 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21794 dst, len, isum, errp, NULL);
21795 if (unlikely(*errp))
21796 goto out_err;
21797 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21798 }
21799
21800 *errp = 0;
21801 - return csum_partial_copy_generic(src, (void __force *)dst,
21802 +
21803 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21804 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21805 + dst += PAX_USER_SHADOW_BASE;
21806 +#endif
21807 +
21808 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21809 len, isum, NULL, errp);
21810 }
21811 EXPORT_SYMBOL(csum_partial_copy_to_user);
21812 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21813 index 51f1504..ddac4c1 100644
21814 --- a/arch/x86/lib/getuser.S
21815 +++ b/arch/x86/lib/getuser.S
21816 @@ -33,15 +33,38 @@
21817 #include <asm/asm-offsets.h>
21818 #include <asm/thread_info.h>
21819 #include <asm/asm.h>
21820 +#include <asm/segment.h>
21821 +#include <asm/pgtable.h>
21822 +#include <asm/alternative-asm.h>
21823 +
21824 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21825 +#define __copyuser_seg gs;
21826 +#else
21827 +#define __copyuser_seg
21828 +#endif
21829
21830 .text
21831 ENTRY(__get_user_1)
21832 CFI_STARTPROC
21833 +
21834 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21835 GET_THREAD_INFO(%_ASM_DX)
21836 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21837 jae bad_get_user
21838 -1: movzb (%_ASM_AX),%edx
21839 +
21840 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21841 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21842 + cmp %_ASM_DX,%_ASM_AX
21843 + jae 1234f
21844 + add %_ASM_DX,%_ASM_AX
21845 +1234:
21846 +#endif
21847 +
21848 +#endif
21849 +
21850 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21851 xor %eax,%eax
21852 + pax_force_retaddr
21853 ret
21854 CFI_ENDPROC
21855 ENDPROC(__get_user_1)
21856 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21857 ENTRY(__get_user_2)
21858 CFI_STARTPROC
21859 add $1,%_ASM_AX
21860 +
21861 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21862 jc bad_get_user
21863 GET_THREAD_INFO(%_ASM_DX)
21864 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21865 jae bad_get_user
21866 -2: movzwl -1(%_ASM_AX),%edx
21867 +
21868 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21869 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21870 + cmp %_ASM_DX,%_ASM_AX
21871 + jae 1234f
21872 + add %_ASM_DX,%_ASM_AX
21873 +1234:
21874 +#endif
21875 +
21876 +#endif
21877 +
21878 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21879 xor %eax,%eax
21880 + pax_force_retaddr
21881 ret
21882 CFI_ENDPROC
21883 ENDPROC(__get_user_2)
21884 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21885 ENTRY(__get_user_4)
21886 CFI_STARTPROC
21887 add $3,%_ASM_AX
21888 +
21889 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21890 jc bad_get_user
21891 GET_THREAD_INFO(%_ASM_DX)
21892 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21893 jae bad_get_user
21894 -3: mov -3(%_ASM_AX),%edx
21895 +
21896 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21897 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21898 + cmp %_ASM_DX,%_ASM_AX
21899 + jae 1234f
21900 + add %_ASM_DX,%_ASM_AX
21901 +1234:
21902 +#endif
21903 +
21904 +#endif
21905 +
21906 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21907 xor %eax,%eax
21908 + pax_force_retaddr
21909 ret
21910 CFI_ENDPROC
21911 ENDPROC(__get_user_4)
21912 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21913 GET_THREAD_INFO(%_ASM_DX)
21914 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21915 jae bad_get_user
21916 +
21917 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21918 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21919 + cmp %_ASM_DX,%_ASM_AX
21920 + jae 1234f
21921 + add %_ASM_DX,%_ASM_AX
21922 +1234:
21923 +#endif
21924 +
21925 4: movq -7(%_ASM_AX),%_ASM_DX
21926 xor %eax,%eax
21927 + pax_force_retaddr
21928 ret
21929 CFI_ENDPROC
21930 ENDPROC(__get_user_8)
21931 @@ -91,6 +152,7 @@ bad_get_user:
21932 CFI_STARTPROC
21933 xor %edx,%edx
21934 mov $(-EFAULT),%_ASM_AX
21935 + pax_force_retaddr
21936 ret
21937 CFI_ENDPROC
21938 END(bad_get_user)
21939 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21940 index b1e6c4b..21ae8fc 100644
21941 --- a/arch/x86/lib/insn.c
21942 +++ b/arch/x86/lib/insn.c
21943 @@ -21,6 +21,11 @@
21944 #include <linux/string.h>
21945 #include <asm/inat.h>
21946 #include <asm/insn.h>
21947 +#ifdef __KERNEL__
21948 +#include <asm/pgtable_types.h>
21949 +#else
21950 +#define ktla_ktva(addr) addr
21951 +#endif
21952
21953 /* Verify next sizeof(t) bytes can be on the same instruction */
21954 #define validate_next(t, insn, n) \
21955 @@ -49,8 +54,8 @@
21956 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21957 {
21958 memset(insn, 0, sizeof(*insn));
21959 - insn->kaddr = kaddr;
21960 - insn->next_byte = kaddr;
21961 + insn->kaddr = ktla_ktva(kaddr);
21962 + insn->next_byte = ktla_ktva(kaddr);
21963 insn->x86_64 = x86_64 ? 1 : 0;
21964 insn->opnd_bytes = 4;
21965 if (x86_64)
21966 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21967 index 05a95e7..326f2fa 100644
21968 --- a/arch/x86/lib/iomap_copy_64.S
21969 +++ b/arch/x86/lib/iomap_copy_64.S
21970 @@ -17,6 +17,7 @@
21971
21972 #include <linux/linkage.h>
21973 #include <asm/dwarf2.h>
21974 +#include <asm/alternative-asm.h>
21975
21976 /*
21977 * override generic version in lib/iomap_copy.c
21978 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21979 CFI_STARTPROC
21980 movl %edx,%ecx
21981 rep movsd
21982 + pax_force_retaddr
21983 ret
21984 CFI_ENDPROC
21985 ENDPROC(__iowrite32_copy)
21986 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21987 index 1c273be..da9cc0e 100644
21988 --- a/arch/x86/lib/memcpy_64.S
21989 +++ b/arch/x86/lib/memcpy_64.S
21990 @@ -33,6 +33,7 @@
21991 rep movsq
21992 movl %edx, %ecx
21993 rep movsb
21994 + pax_force_retaddr
21995 ret
21996 .Lmemcpy_e:
21997 .previous
21998 @@ -49,6 +50,7 @@
21999 movq %rdi, %rax
22000 movq %rdx, %rcx
22001 rep movsb
22002 + pax_force_retaddr
22003 ret
22004 .Lmemcpy_e_e:
22005 .previous
22006 @@ -76,13 +78,13 @@ ENTRY(memcpy)
22007 */
22008 movq 0*8(%rsi), %r8
22009 movq 1*8(%rsi), %r9
22010 - movq 2*8(%rsi), %r10
22011 + movq 2*8(%rsi), %rcx
22012 movq 3*8(%rsi), %r11
22013 leaq 4*8(%rsi), %rsi
22014
22015 movq %r8, 0*8(%rdi)
22016 movq %r9, 1*8(%rdi)
22017 - movq %r10, 2*8(%rdi)
22018 + movq %rcx, 2*8(%rdi)
22019 movq %r11, 3*8(%rdi)
22020 leaq 4*8(%rdi), %rdi
22021 jae .Lcopy_forward_loop
22022 @@ -105,12 +107,12 @@ ENTRY(memcpy)
22023 subq $0x20, %rdx
22024 movq -1*8(%rsi), %r8
22025 movq -2*8(%rsi), %r9
22026 - movq -3*8(%rsi), %r10
22027 + movq -3*8(%rsi), %rcx
22028 movq -4*8(%rsi), %r11
22029 leaq -4*8(%rsi), %rsi
22030 movq %r8, -1*8(%rdi)
22031 movq %r9, -2*8(%rdi)
22032 - movq %r10, -3*8(%rdi)
22033 + movq %rcx, -3*8(%rdi)
22034 movq %r11, -4*8(%rdi)
22035 leaq -4*8(%rdi), %rdi
22036 jae .Lcopy_backward_loop
22037 @@ -130,12 +132,13 @@ ENTRY(memcpy)
22038 */
22039 movq 0*8(%rsi), %r8
22040 movq 1*8(%rsi), %r9
22041 - movq -2*8(%rsi, %rdx), %r10
22042 + movq -2*8(%rsi, %rdx), %rcx
22043 movq -1*8(%rsi, %rdx), %r11
22044 movq %r8, 0*8(%rdi)
22045 movq %r9, 1*8(%rdi)
22046 - movq %r10, -2*8(%rdi, %rdx)
22047 + movq %rcx, -2*8(%rdi, %rdx)
22048 movq %r11, -1*8(%rdi, %rdx)
22049 + pax_force_retaddr
22050 retq
22051 .p2align 4
22052 .Lless_16bytes:
22053 @@ -148,6 +151,7 @@ ENTRY(memcpy)
22054 movq -1*8(%rsi, %rdx), %r9
22055 movq %r8, 0*8(%rdi)
22056 movq %r9, -1*8(%rdi, %rdx)
22057 + pax_force_retaddr
22058 retq
22059 .p2align 4
22060 .Lless_8bytes:
22061 @@ -161,6 +165,7 @@ ENTRY(memcpy)
22062 movl -4(%rsi, %rdx), %r8d
22063 movl %ecx, (%rdi)
22064 movl %r8d, -4(%rdi, %rdx)
22065 + pax_force_retaddr
22066 retq
22067 .p2align 4
22068 .Lless_3bytes:
22069 @@ -179,6 +184,7 @@ ENTRY(memcpy)
22070 movb %cl, (%rdi)
22071
22072 .Lend:
22073 + pax_force_retaddr
22074 retq
22075 CFI_ENDPROC
22076 ENDPROC(memcpy)
22077 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22078 index ee16461..c39c199 100644
22079 --- a/arch/x86/lib/memmove_64.S
22080 +++ b/arch/x86/lib/memmove_64.S
22081 @@ -61,13 +61,13 @@ ENTRY(memmove)
22082 5:
22083 sub $0x20, %rdx
22084 movq 0*8(%rsi), %r11
22085 - movq 1*8(%rsi), %r10
22086 + movq 1*8(%rsi), %rcx
22087 movq 2*8(%rsi), %r9
22088 movq 3*8(%rsi), %r8
22089 leaq 4*8(%rsi), %rsi
22090
22091 movq %r11, 0*8(%rdi)
22092 - movq %r10, 1*8(%rdi)
22093 + movq %rcx, 1*8(%rdi)
22094 movq %r9, 2*8(%rdi)
22095 movq %r8, 3*8(%rdi)
22096 leaq 4*8(%rdi), %rdi
22097 @@ -81,10 +81,10 @@ ENTRY(memmove)
22098 4:
22099 movq %rdx, %rcx
22100 movq -8(%rsi, %rdx), %r11
22101 - lea -8(%rdi, %rdx), %r10
22102 + lea -8(%rdi, %rdx), %r9
22103 shrq $3, %rcx
22104 rep movsq
22105 - movq %r11, (%r10)
22106 + movq %r11, (%r9)
22107 jmp 13f
22108 .Lmemmove_end_forward:
22109
22110 @@ -95,14 +95,14 @@ ENTRY(memmove)
22111 7:
22112 movq %rdx, %rcx
22113 movq (%rsi), %r11
22114 - movq %rdi, %r10
22115 + movq %rdi, %r9
22116 leaq -8(%rsi, %rdx), %rsi
22117 leaq -8(%rdi, %rdx), %rdi
22118 shrq $3, %rcx
22119 std
22120 rep movsq
22121 cld
22122 - movq %r11, (%r10)
22123 + movq %r11, (%r9)
22124 jmp 13f
22125
22126 /*
22127 @@ -127,13 +127,13 @@ ENTRY(memmove)
22128 8:
22129 subq $0x20, %rdx
22130 movq -1*8(%rsi), %r11
22131 - movq -2*8(%rsi), %r10
22132 + movq -2*8(%rsi), %rcx
22133 movq -3*8(%rsi), %r9
22134 movq -4*8(%rsi), %r8
22135 leaq -4*8(%rsi), %rsi
22136
22137 movq %r11, -1*8(%rdi)
22138 - movq %r10, -2*8(%rdi)
22139 + movq %rcx, -2*8(%rdi)
22140 movq %r9, -3*8(%rdi)
22141 movq %r8, -4*8(%rdi)
22142 leaq -4*8(%rdi), %rdi
22143 @@ -151,11 +151,11 @@ ENTRY(memmove)
22144 * Move data from 16 bytes to 31 bytes.
22145 */
22146 movq 0*8(%rsi), %r11
22147 - movq 1*8(%rsi), %r10
22148 + movq 1*8(%rsi), %rcx
22149 movq -2*8(%rsi, %rdx), %r9
22150 movq -1*8(%rsi, %rdx), %r8
22151 movq %r11, 0*8(%rdi)
22152 - movq %r10, 1*8(%rdi)
22153 + movq %rcx, 1*8(%rdi)
22154 movq %r9, -2*8(%rdi, %rdx)
22155 movq %r8, -1*8(%rdi, %rdx)
22156 jmp 13f
22157 @@ -167,9 +167,9 @@ ENTRY(memmove)
22158 * Move data from 8 bytes to 15 bytes.
22159 */
22160 movq 0*8(%rsi), %r11
22161 - movq -1*8(%rsi, %rdx), %r10
22162 + movq -1*8(%rsi, %rdx), %r9
22163 movq %r11, 0*8(%rdi)
22164 - movq %r10, -1*8(%rdi, %rdx)
22165 + movq %r9, -1*8(%rdi, %rdx)
22166 jmp 13f
22167 10:
22168 cmpq $4, %rdx
22169 @@ -178,9 +178,9 @@ ENTRY(memmove)
22170 * Move data from 4 bytes to 7 bytes.
22171 */
22172 movl (%rsi), %r11d
22173 - movl -4(%rsi, %rdx), %r10d
22174 + movl -4(%rsi, %rdx), %r9d
22175 movl %r11d, (%rdi)
22176 - movl %r10d, -4(%rdi, %rdx)
22177 + movl %r9d, -4(%rdi, %rdx)
22178 jmp 13f
22179 11:
22180 cmp $2, %rdx
22181 @@ -189,9 +189,9 @@ ENTRY(memmove)
22182 * Move data from 2 bytes to 3 bytes.
22183 */
22184 movw (%rsi), %r11w
22185 - movw -2(%rsi, %rdx), %r10w
22186 + movw -2(%rsi, %rdx), %r9w
22187 movw %r11w, (%rdi)
22188 - movw %r10w, -2(%rdi, %rdx)
22189 + movw %r9w, -2(%rdi, %rdx)
22190 jmp 13f
22191 12:
22192 cmp $1, %rdx
22193 @@ -202,6 +202,7 @@ ENTRY(memmove)
22194 movb (%rsi), %r11b
22195 movb %r11b, (%rdi)
22196 13:
22197 + pax_force_retaddr
22198 retq
22199 CFI_ENDPROC
22200
22201 @@ -210,6 +211,7 @@ ENTRY(memmove)
22202 /* Forward moving data. */
22203 movq %rdx, %rcx
22204 rep movsb
22205 + pax_force_retaddr
22206 retq
22207 .Lmemmove_end_forward_efs:
22208 .previous
22209 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22210 index 2dcb380..963660a 100644
22211 --- a/arch/x86/lib/memset_64.S
22212 +++ b/arch/x86/lib/memset_64.S
22213 @@ -30,6 +30,7 @@
22214 movl %edx,%ecx
22215 rep stosb
22216 movq %r9,%rax
22217 + pax_force_retaddr
22218 ret
22219 .Lmemset_e:
22220 .previous
22221 @@ -52,6 +53,7 @@
22222 movq %rdx,%rcx
22223 rep stosb
22224 movq %r9,%rax
22225 + pax_force_retaddr
22226 ret
22227 .Lmemset_e_e:
22228 .previous
22229 @@ -59,7 +61,7 @@
22230 ENTRY(memset)
22231 ENTRY(__memset)
22232 CFI_STARTPROC
22233 - movq %rdi,%r10
22234 + movq %rdi,%r11
22235
22236 /* expand byte value */
22237 movzbl %sil,%ecx
22238 @@ -117,7 +119,8 @@ ENTRY(__memset)
22239 jnz .Lloop_1
22240
22241 .Lende:
22242 - movq %r10,%rax
22243 + movq %r11,%rax
22244 + pax_force_retaddr
22245 ret
22246
22247 CFI_RESTORE_STATE
22248 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22249 index c9f2d9b..e7fd2c0 100644
22250 --- a/arch/x86/lib/mmx_32.c
22251 +++ b/arch/x86/lib/mmx_32.c
22252 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22253 {
22254 void *p;
22255 int i;
22256 + unsigned long cr0;
22257
22258 if (unlikely(in_interrupt()))
22259 return __memcpy(to, from, len);
22260 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22261 kernel_fpu_begin();
22262
22263 __asm__ __volatile__ (
22264 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22265 - " prefetch 64(%0)\n"
22266 - " prefetch 128(%0)\n"
22267 - " prefetch 192(%0)\n"
22268 - " prefetch 256(%0)\n"
22269 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22270 + " prefetch 64(%1)\n"
22271 + " prefetch 128(%1)\n"
22272 + " prefetch 192(%1)\n"
22273 + " prefetch 256(%1)\n"
22274 "2: \n"
22275 ".section .fixup, \"ax\"\n"
22276 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22277 + "3: \n"
22278 +
22279 +#ifdef CONFIG_PAX_KERNEXEC
22280 + " movl %%cr0, %0\n"
22281 + " movl %0, %%eax\n"
22282 + " andl $0xFFFEFFFF, %%eax\n"
22283 + " movl %%eax, %%cr0\n"
22284 +#endif
22285 +
22286 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22287 +
22288 +#ifdef CONFIG_PAX_KERNEXEC
22289 + " movl %0, %%cr0\n"
22290 +#endif
22291 +
22292 " jmp 2b\n"
22293 ".previous\n"
22294 _ASM_EXTABLE(1b, 3b)
22295 - : : "r" (from));
22296 + : "=&r" (cr0) : "r" (from) : "ax");
22297
22298 for ( ; i > 5; i--) {
22299 __asm__ __volatile__ (
22300 - "1: prefetch 320(%0)\n"
22301 - "2: movq (%0), %%mm0\n"
22302 - " movq 8(%0), %%mm1\n"
22303 - " movq 16(%0), %%mm2\n"
22304 - " movq 24(%0), %%mm3\n"
22305 - " movq %%mm0, (%1)\n"
22306 - " movq %%mm1, 8(%1)\n"
22307 - " movq %%mm2, 16(%1)\n"
22308 - " movq %%mm3, 24(%1)\n"
22309 - " movq 32(%0), %%mm0\n"
22310 - " movq 40(%0), %%mm1\n"
22311 - " movq 48(%0), %%mm2\n"
22312 - " movq 56(%0), %%mm3\n"
22313 - " movq %%mm0, 32(%1)\n"
22314 - " movq %%mm1, 40(%1)\n"
22315 - " movq %%mm2, 48(%1)\n"
22316 - " movq %%mm3, 56(%1)\n"
22317 + "1: prefetch 320(%1)\n"
22318 + "2: movq (%1), %%mm0\n"
22319 + " movq 8(%1), %%mm1\n"
22320 + " movq 16(%1), %%mm2\n"
22321 + " movq 24(%1), %%mm3\n"
22322 + " movq %%mm0, (%2)\n"
22323 + " movq %%mm1, 8(%2)\n"
22324 + " movq %%mm2, 16(%2)\n"
22325 + " movq %%mm3, 24(%2)\n"
22326 + " movq 32(%1), %%mm0\n"
22327 + " movq 40(%1), %%mm1\n"
22328 + " movq 48(%1), %%mm2\n"
22329 + " movq 56(%1), %%mm3\n"
22330 + " movq %%mm0, 32(%2)\n"
22331 + " movq %%mm1, 40(%2)\n"
22332 + " movq %%mm2, 48(%2)\n"
22333 + " movq %%mm3, 56(%2)\n"
22334 ".section .fixup, \"ax\"\n"
22335 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22336 + "3:\n"
22337 +
22338 +#ifdef CONFIG_PAX_KERNEXEC
22339 + " movl %%cr0, %0\n"
22340 + " movl %0, %%eax\n"
22341 + " andl $0xFFFEFFFF, %%eax\n"
22342 + " movl %%eax, %%cr0\n"
22343 +#endif
22344 +
22345 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22346 +
22347 +#ifdef CONFIG_PAX_KERNEXEC
22348 + " movl %0, %%cr0\n"
22349 +#endif
22350 +
22351 " jmp 2b\n"
22352 ".previous\n"
22353 _ASM_EXTABLE(1b, 3b)
22354 - : : "r" (from), "r" (to) : "memory");
22355 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22356
22357 from += 64;
22358 to += 64;
22359 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22360 static void fast_copy_page(void *to, void *from)
22361 {
22362 int i;
22363 + unsigned long cr0;
22364
22365 kernel_fpu_begin();
22366
22367 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22368 * but that is for later. -AV
22369 */
22370 __asm__ __volatile__(
22371 - "1: prefetch (%0)\n"
22372 - " prefetch 64(%0)\n"
22373 - " prefetch 128(%0)\n"
22374 - " prefetch 192(%0)\n"
22375 - " prefetch 256(%0)\n"
22376 + "1: prefetch (%1)\n"
22377 + " prefetch 64(%1)\n"
22378 + " prefetch 128(%1)\n"
22379 + " prefetch 192(%1)\n"
22380 + " prefetch 256(%1)\n"
22381 "2: \n"
22382 ".section .fixup, \"ax\"\n"
22383 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22384 + "3: \n"
22385 +
22386 +#ifdef CONFIG_PAX_KERNEXEC
22387 + " movl %%cr0, %0\n"
22388 + " movl %0, %%eax\n"
22389 + " andl $0xFFFEFFFF, %%eax\n"
22390 + " movl %%eax, %%cr0\n"
22391 +#endif
22392 +
22393 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22394 +
22395 +#ifdef CONFIG_PAX_KERNEXEC
22396 + " movl %0, %%cr0\n"
22397 +#endif
22398 +
22399 " jmp 2b\n"
22400 ".previous\n"
22401 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22402 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22403
22404 for (i = 0; i < (4096-320)/64; i++) {
22405 __asm__ __volatile__ (
22406 - "1: prefetch 320(%0)\n"
22407 - "2: movq (%0), %%mm0\n"
22408 - " movntq %%mm0, (%1)\n"
22409 - " movq 8(%0), %%mm1\n"
22410 - " movntq %%mm1, 8(%1)\n"
22411 - " movq 16(%0), %%mm2\n"
22412 - " movntq %%mm2, 16(%1)\n"
22413 - " movq 24(%0), %%mm3\n"
22414 - " movntq %%mm3, 24(%1)\n"
22415 - " movq 32(%0), %%mm4\n"
22416 - " movntq %%mm4, 32(%1)\n"
22417 - " movq 40(%0), %%mm5\n"
22418 - " movntq %%mm5, 40(%1)\n"
22419 - " movq 48(%0), %%mm6\n"
22420 - " movntq %%mm6, 48(%1)\n"
22421 - " movq 56(%0), %%mm7\n"
22422 - " movntq %%mm7, 56(%1)\n"
22423 + "1: prefetch 320(%1)\n"
22424 + "2: movq (%1), %%mm0\n"
22425 + " movntq %%mm0, (%2)\n"
22426 + " movq 8(%1), %%mm1\n"
22427 + " movntq %%mm1, 8(%2)\n"
22428 + " movq 16(%1), %%mm2\n"
22429 + " movntq %%mm2, 16(%2)\n"
22430 + " movq 24(%1), %%mm3\n"
22431 + " movntq %%mm3, 24(%2)\n"
22432 + " movq 32(%1), %%mm4\n"
22433 + " movntq %%mm4, 32(%2)\n"
22434 + " movq 40(%1), %%mm5\n"
22435 + " movntq %%mm5, 40(%2)\n"
22436 + " movq 48(%1), %%mm6\n"
22437 + " movntq %%mm6, 48(%2)\n"
22438 + " movq 56(%1), %%mm7\n"
22439 + " movntq %%mm7, 56(%2)\n"
22440 ".section .fixup, \"ax\"\n"
22441 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22442 + "3:\n"
22443 +
22444 +#ifdef CONFIG_PAX_KERNEXEC
22445 + " movl %%cr0, %0\n"
22446 + " movl %0, %%eax\n"
22447 + " andl $0xFFFEFFFF, %%eax\n"
22448 + " movl %%eax, %%cr0\n"
22449 +#endif
22450 +
22451 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22452 +
22453 +#ifdef CONFIG_PAX_KERNEXEC
22454 + " movl %0, %%cr0\n"
22455 +#endif
22456 +
22457 " jmp 2b\n"
22458 ".previous\n"
22459 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22460 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22461
22462 from += 64;
22463 to += 64;
22464 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22465 static void fast_copy_page(void *to, void *from)
22466 {
22467 int i;
22468 + unsigned long cr0;
22469
22470 kernel_fpu_begin();
22471
22472 __asm__ __volatile__ (
22473 - "1: prefetch (%0)\n"
22474 - " prefetch 64(%0)\n"
22475 - " prefetch 128(%0)\n"
22476 - " prefetch 192(%0)\n"
22477 - " prefetch 256(%0)\n"
22478 + "1: prefetch (%1)\n"
22479 + " prefetch 64(%1)\n"
22480 + " prefetch 128(%1)\n"
22481 + " prefetch 192(%1)\n"
22482 + " prefetch 256(%1)\n"
22483 "2: \n"
22484 ".section .fixup, \"ax\"\n"
22485 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22486 + "3: \n"
22487 +
22488 +#ifdef CONFIG_PAX_KERNEXEC
22489 + " movl %%cr0, %0\n"
22490 + " movl %0, %%eax\n"
22491 + " andl $0xFFFEFFFF, %%eax\n"
22492 + " movl %%eax, %%cr0\n"
22493 +#endif
22494 +
22495 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22496 +
22497 +#ifdef CONFIG_PAX_KERNEXEC
22498 + " movl %0, %%cr0\n"
22499 +#endif
22500 +
22501 " jmp 2b\n"
22502 ".previous\n"
22503 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22504 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22505
22506 for (i = 0; i < 4096/64; i++) {
22507 __asm__ __volatile__ (
22508 - "1: prefetch 320(%0)\n"
22509 - "2: movq (%0), %%mm0\n"
22510 - " movq 8(%0), %%mm1\n"
22511 - " movq 16(%0), %%mm2\n"
22512 - " movq 24(%0), %%mm3\n"
22513 - " movq %%mm0, (%1)\n"
22514 - " movq %%mm1, 8(%1)\n"
22515 - " movq %%mm2, 16(%1)\n"
22516 - " movq %%mm3, 24(%1)\n"
22517 - " movq 32(%0), %%mm0\n"
22518 - " movq 40(%0), %%mm1\n"
22519 - " movq 48(%0), %%mm2\n"
22520 - " movq 56(%0), %%mm3\n"
22521 - " movq %%mm0, 32(%1)\n"
22522 - " movq %%mm1, 40(%1)\n"
22523 - " movq %%mm2, 48(%1)\n"
22524 - " movq %%mm3, 56(%1)\n"
22525 + "1: prefetch 320(%1)\n"
22526 + "2: movq (%1), %%mm0\n"
22527 + " movq 8(%1), %%mm1\n"
22528 + " movq 16(%1), %%mm2\n"
22529 + " movq 24(%1), %%mm3\n"
22530 + " movq %%mm0, (%2)\n"
22531 + " movq %%mm1, 8(%2)\n"
22532 + " movq %%mm2, 16(%2)\n"
22533 + " movq %%mm3, 24(%2)\n"
22534 + " movq 32(%1), %%mm0\n"
22535 + " movq 40(%1), %%mm1\n"
22536 + " movq 48(%1), %%mm2\n"
22537 + " movq 56(%1), %%mm3\n"
22538 + " movq %%mm0, 32(%2)\n"
22539 + " movq %%mm1, 40(%2)\n"
22540 + " movq %%mm2, 48(%2)\n"
22541 + " movq %%mm3, 56(%2)\n"
22542 ".section .fixup, \"ax\"\n"
22543 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22544 + "3:\n"
22545 +
22546 +#ifdef CONFIG_PAX_KERNEXEC
22547 + " movl %%cr0, %0\n"
22548 + " movl %0, %%eax\n"
22549 + " andl $0xFFFEFFFF, %%eax\n"
22550 + " movl %%eax, %%cr0\n"
22551 +#endif
22552 +
22553 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22554 +
22555 +#ifdef CONFIG_PAX_KERNEXEC
22556 + " movl %0, %%cr0\n"
22557 +#endif
22558 +
22559 " jmp 2b\n"
22560 ".previous\n"
22561 _ASM_EXTABLE(1b, 3b)
22562 - : : "r" (from), "r" (to) : "memory");
22563 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22564
22565 from += 64;
22566 to += 64;
22567 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22568 index 69fa106..adda88b 100644
22569 --- a/arch/x86/lib/msr-reg.S
22570 +++ b/arch/x86/lib/msr-reg.S
22571 @@ -3,6 +3,7 @@
22572 #include <asm/dwarf2.h>
22573 #include <asm/asm.h>
22574 #include <asm/msr.h>
22575 +#include <asm/alternative-asm.h>
22576
22577 #ifdef CONFIG_X86_64
22578 /*
22579 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22580 CFI_STARTPROC
22581 pushq_cfi %rbx
22582 pushq_cfi %rbp
22583 - movq %rdi, %r10 /* Save pointer */
22584 + movq %rdi, %r9 /* Save pointer */
22585 xorl %r11d, %r11d /* Return value */
22586 movl (%rdi), %eax
22587 movl 4(%rdi), %ecx
22588 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22589 movl 28(%rdi), %edi
22590 CFI_REMEMBER_STATE
22591 1: \op
22592 -2: movl %eax, (%r10)
22593 +2: movl %eax, (%r9)
22594 movl %r11d, %eax /* Return value */
22595 - movl %ecx, 4(%r10)
22596 - movl %edx, 8(%r10)
22597 - movl %ebx, 12(%r10)
22598 - movl %ebp, 20(%r10)
22599 - movl %esi, 24(%r10)
22600 - movl %edi, 28(%r10)
22601 + movl %ecx, 4(%r9)
22602 + movl %edx, 8(%r9)
22603 + movl %ebx, 12(%r9)
22604 + movl %ebp, 20(%r9)
22605 + movl %esi, 24(%r9)
22606 + movl %edi, 28(%r9)
22607 popq_cfi %rbp
22608 popq_cfi %rbx
22609 + pax_force_retaddr
22610 ret
22611 3:
22612 CFI_RESTORE_STATE
22613 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22614 index 36b0d15..d381858 100644
22615 --- a/arch/x86/lib/putuser.S
22616 +++ b/arch/x86/lib/putuser.S
22617 @@ -15,7 +15,9 @@
22618 #include <asm/thread_info.h>
22619 #include <asm/errno.h>
22620 #include <asm/asm.h>
22621 -
22622 +#include <asm/segment.h>
22623 +#include <asm/pgtable.h>
22624 +#include <asm/alternative-asm.h>
22625
22626 /*
22627 * __put_user_X
22628 @@ -29,52 +31,119 @@
22629 * as they get called from within inline assembly.
22630 */
22631
22632 -#define ENTER CFI_STARTPROC ; \
22633 - GET_THREAD_INFO(%_ASM_BX)
22634 -#define EXIT ret ; \
22635 +#define ENTER CFI_STARTPROC
22636 +#define EXIT pax_force_retaddr; ret ; \
22637 CFI_ENDPROC
22638
22639 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22640 +#define _DEST %_ASM_CX,%_ASM_BX
22641 +#else
22642 +#define _DEST %_ASM_CX
22643 +#endif
22644 +
22645 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22646 +#define __copyuser_seg gs;
22647 +#else
22648 +#define __copyuser_seg
22649 +#endif
22650 +
22651 .text
22652 ENTRY(__put_user_1)
22653 ENTER
22654 +
22655 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22656 + GET_THREAD_INFO(%_ASM_BX)
22657 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22658 jae bad_put_user
22659 -1: movb %al,(%_ASM_CX)
22660 +
22661 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22662 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22663 + cmp %_ASM_BX,%_ASM_CX
22664 + jb 1234f
22665 + xor %ebx,%ebx
22666 +1234:
22667 +#endif
22668 +
22669 +#endif
22670 +
22671 +1: __copyuser_seg movb %al,(_DEST)
22672 xor %eax,%eax
22673 EXIT
22674 ENDPROC(__put_user_1)
22675
22676 ENTRY(__put_user_2)
22677 ENTER
22678 +
22679 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22680 + GET_THREAD_INFO(%_ASM_BX)
22681 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22682 sub $1,%_ASM_BX
22683 cmp %_ASM_BX,%_ASM_CX
22684 jae bad_put_user
22685 -2: movw %ax,(%_ASM_CX)
22686 +
22687 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22688 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22689 + cmp %_ASM_BX,%_ASM_CX
22690 + jb 1234f
22691 + xor %ebx,%ebx
22692 +1234:
22693 +#endif
22694 +
22695 +#endif
22696 +
22697 +2: __copyuser_seg movw %ax,(_DEST)
22698 xor %eax,%eax
22699 EXIT
22700 ENDPROC(__put_user_2)
22701
22702 ENTRY(__put_user_4)
22703 ENTER
22704 +
22705 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22706 + GET_THREAD_INFO(%_ASM_BX)
22707 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22708 sub $3,%_ASM_BX
22709 cmp %_ASM_BX,%_ASM_CX
22710 jae bad_put_user
22711 -3: movl %eax,(%_ASM_CX)
22712 +
22713 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22714 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22715 + cmp %_ASM_BX,%_ASM_CX
22716 + jb 1234f
22717 + xor %ebx,%ebx
22718 +1234:
22719 +#endif
22720 +
22721 +#endif
22722 +
22723 +3: __copyuser_seg movl %eax,(_DEST)
22724 xor %eax,%eax
22725 EXIT
22726 ENDPROC(__put_user_4)
22727
22728 ENTRY(__put_user_8)
22729 ENTER
22730 +
22731 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22732 + GET_THREAD_INFO(%_ASM_BX)
22733 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22734 sub $7,%_ASM_BX
22735 cmp %_ASM_BX,%_ASM_CX
22736 jae bad_put_user
22737 -4: mov %_ASM_AX,(%_ASM_CX)
22738 +
22739 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22740 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22741 + cmp %_ASM_BX,%_ASM_CX
22742 + jb 1234f
22743 + xor %ebx,%ebx
22744 +1234:
22745 +#endif
22746 +
22747 +#endif
22748 +
22749 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22750 #ifdef CONFIG_X86_32
22751 -5: movl %edx,4(%_ASM_CX)
22752 +5: __copyuser_seg movl %edx,4(_DEST)
22753 #endif
22754 xor %eax,%eax
22755 EXIT
22756 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22757 index 1cad221..de671ee 100644
22758 --- a/arch/x86/lib/rwlock.S
22759 +++ b/arch/x86/lib/rwlock.S
22760 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22761 FRAME
22762 0: LOCK_PREFIX
22763 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22764 +
22765 +#ifdef CONFIG_PAX_REFCOUNT
22766 + jno 1234f
22767 + LOCK_PREFIX
22768 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22769 + int $4
22770 +1234:
22771 + _ASM_EXTABLE(1234b, 1234b)
22772 +#endif
22773 +
22774 1: rep; nop
22775 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22776 jne 1b
22777 LOCK_PREFIX
22778 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22779 +
22780 +#ifdef CONFIG_PAX_REFCOUNT
22781 + jno 1234f
22782 + LOCK_PREFIX
22783 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22784 + int $4
22785 +1234:
22786 + _ASM_EXTABLE(1234b, 1234b)
22787 +#endif
22788 +
22789 jnz 0b
22790 ENDFRAME
22791 + pax_force_retaddr
22792 ret
22793 CFI_ENDPROC
22794 END(__write_lock_failed)
22795 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22796 FRAME
22797 0: LOCK_PREFIX
22798 READ_LOCK_SIZE(inc) (%__lock_ptr)
22799 +
22800 +#ifdef CONFIG_PAX_REFCOUNT
22801 + jno 1234f
22802 + LOCK_PREFIX
22803 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22804 + int $4
22805 +1234:
22806 + _ASM_EXTABLE(1234b, 1234b)
22807 +#endif
22808 +
22809 1: rep; nop
22810 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22811 js 1b
22812 LOCK_PREFIX
22813 READ_LOCK_SIZE(dec) (%__lock_ptr)
22814 +
22815 +#ifdef CONFIG_PAX_REFCOUNT
22816 + jno 1234f
22817 + LOCK_PREFIX
22818 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22819 + int $4
22820 +1234:
22821 + _ASM_EXTABLE(1234b, 1234b)
22822 +#endif
22823 +
22824 js 0b
22825 ENDFRAME
22826 + pax_force_retaddr
22827 ret
22828 CFI_ENDPROC
22829 END(__read_lock_failed)
22830 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22831 index 5dff5f0..cadebf4 100644
22832 --- a/arch/x86/lib/rwsem.S
22833 +++ b/arch/x86/lib/rwsem.S
22834 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22835 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22836 CFI_RESTORE __ASM_REG(dx)
22837 restore_common_regs
22838 + pax_force_retaddr
22839 ret
22840 CFI_ENDPROC
22841 ENDPROC(call_rwsem_down_read_failed)
22842 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22843 movq %rax,%rdi
22844 call rwsem_down_write_failed
22845 restore_common_regs
22846 + pax_force_retaddr
22847 ret
22848 CFI_ENDPROC
22849 ENDPROC(call_rwsem_down_write_failed)
22850 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22851 movq %rax,%rdi
22852 call rwsem_wake
22853 restore_common_regs
22854 -1: ret
22855 +1: pax_force_retaddr
22856 + ret
22857 CFI_ENDPROC
22858 ENDPROC(call_rwsem_wake)
22859
22860 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22861 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22862 CFI_RESTORE __ASM_REG(dx)
22863 restore_common_regs
22864 + pax_force_retaddr
22865 ret
22866 CFI_ENDPROC
22867 ENDPROC(call_rwsem_downgrade_wake)
22868 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22869 index a63efd6..ccecad8 100644
22870 --- a/arch/x86/lib/thunk_64.S
22871 +++ b/arch/x86/lib/thunk_64.S
22872 @@ -8,6 +8,7 @@
22873 #include <linux/linkage.h>
22874 #include <asm/dwarf2.h>
22875 #include <asm/calling.h>
22876 +#include <asm/alternative-asm.h>
22877
22878 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22879 .macro THUNK name, func, put_ret_addr_in_rdi=0
22880 @@ -41,5 +42,6 @@
22881 SAVE_ARGS
22882 restore:
22883 RESTORE_ARGS
22884 + pax_force_retaddr
22885 ret
22886 CFI_ENDPROC
22887 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22888 index ef2a6a5..3b28862 100644
22889 --- a/arch/x86/lib/usercopy_32.c
22890 +++ b/arch/x86/lib/usercopy_32.c
22891 @@ -41,10 +41,12 @@ do { \
22892 int __d0; \
22893 might_fault(); \
22894 __asm__ __volatile__( \
22895 + __COPYUSER_SET_ES \
22896 "0: rep; stosl\n" \
22897 " movl %2,%0\n" \
22898 "1: rep; stosb\n" \
22899 "2:\n" \
22900 + __COPYUSER_RESTORE_ES \
22901 ".section .fixup,\"ax\"\n" \
22902 "3: lea 0(%2,%0,4),%0\n" \
22903 " jmp 2b\n" \
22904 @@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
22905 might_fault();
22906
22907 __asm__ __volatile__(
22908 + __COPYUSER_SET_ES
22909 " testl %0, %0\n"
22910 " jz 3f\n"
22911 " andl %0,%%ecx\n"
22912 @@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
22913 " subl %%ecx,%0\n"
22914 " addl %0,%%eax\n"
22915 "1:\n"
22916 + __COPYUSER_RESTORE_ES
22917 ".section .fixup,\"ax\"\n"
22918 "2: xorl %%eax,%%eax\n"
22919 " jmp 1b\n"
22920 @@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
22921
22922 #ifdef CONFIG_X86_INTEL_USERCOPY
22923 static unsigned long
22924 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22925 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22926 {
22927 int d0, d1;
22928 __asm__ __volatile__(
22929 @@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22930 " .align 2,0x90\n"
22931 "3: movl 0(%4), %%eax\n"
22932 "4: movl 4(%4), %%edx\n"
22933 - "5: movl %%eax, 0(%3)\n"
22934 - "6: movl %%edx, 4(%3)\n"
22935 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22936 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22937 "7: movl 8(%4), %%eax\n"
22938 "8: movl 12(%4),%%edx\n"
22939 - "9: movl %%eax, 8(%3)\n"
22940 - "10: movl %%edx, 12(%3)\n"
22941 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22942 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22943 "11: movl 16(%4), %%eax\n"
22944 "12: movl 20(%4), %%edx\n"
22945 - "13: movl %%eax, 16(%3)\n"
22946 - "14: movl %%edx, 20(%3)\n"
22947 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22948 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22949 "15: movl 24(%4), %%eax\n"
22950 "16: movl 28(%4), %%edx\n"
22951 - "17: movl %%eax, 24(%3)\n"
22952 - "18: movl %%edx, 28(%3)\n"
22953 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22954 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22955 "19: movl 32(%4), %%eax\n"
22956 "20: movl 36(%4), %%edx\n"
22957 - "21: movl %%eax, 32(%3)\n"
22958 - "22: movl %%edx, 36(%3)\n"
22959 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22960 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22961 "23: movl 40(%4), %%eax\n"
22962 "24: movl 44(%4), %%edx\n"
22963 - "25: movl %%eax, 40(%3)\n"
22964 - "26: movl %%edx, 44(%3)\n"
22965 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22966 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22967 "27: movl 48(%4), %%eax\n"
22968 "28: movl 52(%4), %%edx\n"
22969 - "29: movl %%eax, 48(%3)\n"
22970 - "30: movl %%edx, 52(%3)\n"
22971 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22972 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22973 "31: movl 56(%4), %%eax\n"
22974 "32: movl 60(%4), %%edx\n"
22975 - "33: movl %%eax, 56(%3)\n"
22976 - "34: movl %%edx, 60(%3)\n"
22977 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22978 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22979 " addl $-64, %0\n"
22980 " addl $64, %4\n"
22981 " addl $64, %3\n"
22982 @@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22983 " shrl $2, %0\n"
22984 " andl $3, %%eax\n"
22985 " cld\n"
22986 + __COPYUSER_SET_ES
22987 "99: rep; movsl\n"
22988 "36: movl %%eax, %0\n"
22989 "37: rep; movsb\n"
22990 "100:\n"
22991 + __COPYUSER_RESTORE_ES
22992 ".section .fixup,\"ax\"\n"
22993 "101: lea 0(%%eax,%0,4),%0\n"
22994 " jmp 100b\n"
22995 @@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22996 }
22997
22998 static unsigned long
22999 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23000 +{
23001 + int d0, d1;
23002 + __asm__ __volatile__(
23003 + " .align 2,0x90\n"
23004 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23005 + " cmpl $67, %0\n"
23006 + " jbe 3f\n"
23007 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23008 + " .align 2,0x90\n"
23009 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23010 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23011 + "5: movl %%eax, 0(%3)\n"
23012 + "6: movl %%edx, 4(%3)\n"
23013 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23014 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23015 + "9: movl %%eax, 8(%3)\n"
23016 + "10: movl %%edx, 12(%3)\n"
23017 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23018 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23019 + "13: movl %%eax, 16(%3)\n"
23020 + "14: movl %%edx, 20(%3)\n"
23021 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23022 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23023 + "17: movl %%eax, 24(%3)\n"
23024 + "18: movl %%edx, 28(%3)\n"
23025 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23026 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23027 + "21: movl %%eax, 32(%3)\n"
23028 + "22: movl %%edx, 36(%3)\n"
23029 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23030 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23031 + "25: movl %%eax, 40(%3)\n"
23032 + "26: movl %%edx, 44(%3)\n"
23033 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23034 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23035 + "29: movl %%eax, 48(%3)\n"
23036 + "30: movl %%edx, 52(%3)\n"
23037 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23038 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23039 + "33: movl %%eax, 56(%3)\n"
23040 + "34: movl %%edx, 60(%3)\n"
23041 + " addl $-64, %0\n"
23042 + " addl $64, %4\n"
23043 + " addl $64, %3\n"
23044 + " cmpl $63, %0\n"
23045 + " ja 1b\n"
23046 + "35: movl %0, %%eax\n"
23047 + " shrl $2, %0\n"
23048 + " andl $3, %%eax\n"
23049 + " cld\n"
23050 + "99: rep; "__copyuser_seg" movsl\n"
23051 + "36: movl %%eax, %0\n"
23052 + "37: rep; "__copyuser_seg" movsb\n"
23053 + "100:\n"
23054 + ".section .fixup,\"ax\"\n"
23055 + "101: lea 0(%%eax,%0,4),%0\n"
23056 + " jmp 100b\n"
23057 + ".previous\n"
23058 + ".section __ex_table,\"a\"\n"
23059 + " .align 4\n"
23060 + " .long 1b,100b\n"
23061 + " .long 2b,100b\n"
23062 + " .long 3b,100b\n"
23063 + " .long 4b,100b\n"
23064 + " .long 5b,100b\n"
23065 + " .long 6b,100b\n"
23066 + " .long 7b,100b\n"
23067 + " .long 8b,100b\n"
23068 + " .long 9b,100b\n"
23069 + " .long 10b,100b\n"
23070 + " .long 11b,100b\n"
23071 + " .long 12b,100b\n"
23072 + " .long 13b,100b\n"
23073 + " .long 14b,100b\n"
23074 + " .long 15b,100b\n"
23075 + " .long 16b,100b\n"
23076 + " .long 17b,100b\n"
23077 + " .long 18b,100b\n"
23078 + " .long 19b,100b\n"
23079 + " .long 20b,100b\n"
23080 + " .long 21b,100b\n"
23081 + " .long 22b,100b\n"
23082 + " .long 23b,100b\n"
23083 + " .long 24b,100b\n"
23084 + " .long 25b,100b\n"
23085 + " .long 26b,100b\n"
23086 + " .long 27b,100b\n"
23087 + " .long 28b,100b\n"
23088 + " .long 29b,100b\n"
23089 + " .long 30b,100b\n"
23090 + " .long 31b,100b\n"
23091 + " .long 32b,100b\n"
23092 + " .long 33b,100b\n"
23093 + " .long 34b,100b\n"
23094 + " .long 35b,100b\n"
23095 + " .long 36b,100b\n"
23096 + " .long 37b,100b\n"
23097 + " .long 99b,101b\n"
23098 + ".previous"
23099 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23100 + : "1"(to), "2"(from), "0"(size)
23101 + : "eax", "edx", "memory");
23102 + return size;
23103 +}
23104 +
23105 +static unsigned long
23106 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23107 +static unsigned long
23108 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23109 {
23110 int d0, d1;
23111 __asm__ __volatile__(
23112 " .align 2,0x90\n"
23113 - "0: movl 32(%4), %%eax\n"
23114 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23115 " cmpl $67, %0\n"
23116 " jbe 2f\n"
23117 - "1: movl 64(%4), %%eax\n"
23118 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23119 " .align 2,0x90\n"
23120 - "2: movl 0(%4), %%eax\n"
23121 - "21: movl 4(%4), %%edx\n"
23122 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23123 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23124 " movl %%eax, 0(%3)\n"
23125 " movl %%edx, 4(%3)\n"
23126 - "3: movl 8(%4), %%eax\n"
23127 - "31: movl 12(%4),%%edx\n"
23128 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23129 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23130 " movl %%eax, 8(%3)\n"
23131 " movl %%edx, 12(%3)\n"
23132 - "4: movl 16(%4), %%eax\n"
23133 - "41: movl 20(%4), %%edx\n"
23134 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23135 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23136 " movl %%eax, 16(%3)\n"
23137 " movl %%edx, 20(%3)\n"
23138 - "10: movl 24(%4), %%eax\n"
23139 - "51: movl 28(%4), %%edx\n"
23140 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23141 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23142 " movl %%eax, 24(%3)\n"
23143 " movl %%edx, 28(%3)\n"
23144 - "11: movl 32(%4), %%eax\n"
23145 - "61: movl 36(%4), %%edx\n"
23146 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23147 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23148 " movl %%eax, 32(%3)\n"
23149 " movl %%edx, 36(%3)\n"
23150 - "12: movl 40(%4), %%eax\n"
23151 - "71: movl 44(%4), %%edx\n"
23152 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23153 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23154 " movl %%eax, 40(%3)\n"
23155 " movl %%edx, 44(%3)\n"
23156 - "13: movl 48(%4), %%eax\n"
23157 - "81: movl 52(%4), %%edx\n"
23158 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23159 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23160 " movl %%eax, 48(%3)\n"
23161 " movl %%edx, 52(%3)\n"
23162 - "14: movl 56(%4), %%eax\n"
23163 - "91: movl 60(%4), %%edx\n"
23164 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23165 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23166 " movl %%eax, 56(%3)\n"
23167 " movl %%edx, 60(%3)\n"
23168 " addl $-64, %0\n"
23169 @@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23170 " shrl $2, %0\n"
23171 " andl $3, %%eax\n"
23172 " cld\n"
23173 - "6: rep; movsl\n"
23174 + "6: rep; "__copyuser_seg" movsl\n"
23175 " movl %%eax,%0\n"
23176 - "7: rep; movsb\n"
23177 + "7: rep; "__copyuser_seg" movsb\n"
23178 "8:\n"
23179 ".section .fixup,\"ax\"\n"
23180 "9: lea 0(%%eax,%0,4),%0\n"
23181 @@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23182 */
23183
23184 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23185 + const void __user *from, unsigned long size) __size_overflow(3);
23186 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23187 const void __user *from, unsigned long size)
23188 {
23189 int d0, d1;
23190
23191 __asm__ __volatile__(
23192 " .align 2,0x90\n"
23193 - "0: movl 32(%4), %%eax\n"
23194 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23195 " cmpl $67, %0\n"
23196 " jbe 2f\n"
23197 - "1: movl 64(%4), %%eax\n"
23198 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23199 " .align 2,0x90\n"
23200 - "2: movl 0(%4), %%eax\n"
23201 - "21: movl 4(%4), %%edx\n"
23202 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23203 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23204 " movnti %%eax, 0(%3)\n"
23205 " movnti %%edx, 4(%3)\n"
23206 - "3: movl 8(%4), %%eax\n"
23207 - "31: movl 12(%4),%%edx\n"
23208 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23209 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23210 " movnti %%eax, 8(%3)\n"
23211 " movnti %%edx, 12(%3)\n"
23212 - "4: movl 16(%4), %%eax\n"
23213 - "41: movl 20(%4), %%edx\n"
23214 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23215 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23216 " movnti %%eax, 16(%3)\n"
23217 " movnti %%edx, 20(%3)\n"
23218 - "10: movl 24(%4), %%eax\n"
23219 - "51: movl 28(%4), %%edx\n"
23220 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23221 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23222 " movnti %%eax, 24(%3)\n"
23223 " movnti %%edx, 28(%3)\n"
23224 - "11: movl 32(%4), %%eax\n"
23225 - "61: movl 36(%4), %%edx\n"
23226 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23227 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23228 " movnti %%eax, 32(%3)\n"
23229 " movnti %%edx, 36(%3)\n"
23230 - "12: movl 40(%4), %%eax\n"
23231 - "71: movl 44(%4), %%edx\n"
23232 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23233 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23234 " movnti %%eax, 40(%3)\n"
23235 " movnti %%edx, 44(%3)\n"
23236 - "13: movl 48(%4), %%eax\n"
23237 - "81: movl 52(%4), %%edx\n"
23238 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23239 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23240 " movnti %%eax, 48(%3)\n"
23241 " movnti %%edx, 52(%3)\n"
23242 - "14: movl 56(%4), %%eax\n"
23243 - "91: movl 60(%4), %%edx\n"
23244 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23245 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23246 " movnti %%eax, 56(%3)\n"
23247 " movnti %%edx, 60(%3)\n"
23248 " addl $-64, %0\n"
23249 @@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23250 " shrl $2, %0\n"
23251 " andl $3, %%eax\n"
23252 " cld\n"
23253 - "6: rep; movsl\n"
23254 + "6: rep; "__copyuser_seg" movsl\n"
23255 " movl %%eax,%0\n"
23256 - "7: rep; movsb\n"
23257 + "7: rep; "__copyuser_seg" movsb\n"
23258 "8:\n"
23259 ".section .fixup,\"ax\"\n"
23260 "9: lea 0(%%eax,%0,4),%0\n"
23261 @@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23262 }
23263
23264 static unsigned long __copy_user_intel_nocache(void *to,
23265 + const void __user *from, unsigned long size) __size_overflow(3);
23266 +static unsigned long __copy_user_intel_nocache(void *to,
23267 const void __user *from, unsigned long size)
23268 {
23269 int d0, d1;
23270
23271 __asm__ __volatile__(
23272 " .align 2,0x90\n"
23273 - "0: movl 32(%4), %%eax\n"
23274 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23275 " cmpl $67, %0\n"
23276 " jbe 2f\n"
23277 - "1: movl 64(%4), %%eax\n"
23278 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23279 " .align 2,0x90\n"
23280 - "2: movl 0(%4), %%eax\n"
23281 - "21: movl 4(%4), %%edx\n"
23282 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23283 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23284 " movnti %%eax, 0(%3)\n"
23285 " movnti %%edx, 4(%3)\n"
23286 - "3: movl 8(%4), %%eax\n"
23287 - "31: movl 12(%4),%%edx\n"
23288 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23289 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23290 " movnti %%eax, 8(%3)\n"
23291 " movnti %%edx, 12(%3)\n"
23292 - "4: movl 16(%4), %%eax\n"
23293 - "41: movl 20(%4), %%edx\n"
23294 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23295 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23296 " movnti %%eax, 16(%3)\n"
23297 " movnti %%edx, 20(%3)\n"
23298 - "10: movl 24(%4), %%eax\n"
23299 - "51: movl 28(%4), %%edx\n"
23300 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23301 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23302 " movnti %%eax, 24(%3)\n"
23303 " movnti %%edx, 28(%3)\n"
23304 - "11: movl 32(%4), %%eax\n"
23305 - "61: movl 36(%4), %%edx\n"
23306 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23307 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23308 " movnti %%eax, 32(%3)\n"
23309 " movnti %%edx, 36(%3)\n"
23310 - "12: movl 40(%4), %%eax\n"
23311 - "71: movl 44(%4), %%edx\n"
23312 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23313 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23314 " movnti %%eax, 40(%3)\n"
23315 " movnti %%edx, 44(%3)\n"
23316 - "13: movl 48(%4), %%eax\n"
23317 - "81: movl 52(%4), %%edx\n"
23318 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23319 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23320 " movnti %%eax, 48(%3)\n"
23321 " movnti %%edx, 52(%3)\n"
23322 - "14: movl 56(%4), %%eax\n"
23323 - "91: movl 60(%4), %%edx\n"
23324 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23325 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23326 " movnti %%eax, 56(%3)\n"
23327 " movnti %%edx, 60(%3)\n"
23328 " addl $-64, %0\n"
23329 @@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23330 " shrl $2, %0\n"
23331 " andl $3, %%eax\n"
23332 " cld\n"
23333 - "6: rep; movsl\n"
23334 + "6: rep; "__copyuser_seg" movsl\n"
23335 " movl %%eax,%0\n"
23336 - "7: rep; movsb\n"
23337 + "7: rep; "__copyuser_seg" movsb\n"
23338 "8:\n"
23339 ".section .fixup,\"ax\"\n"
23340 "9: lea 0(%%eax,%0,4),%0\n"
23341 @@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23342 */
23343 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23344 unsigned long size);
23345 -unsigned long __copy_user_intel(void __user *to, const void *from,
23346 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23347 + unsigned long size);
23348 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23349 unsigned long size);
23350 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23351 const void __user *from, unsigned long size);
23352 #endif /* CONFIG_X86_INTEL_USERCOPY */
23353
23354 /* Generic arbitrary sized copy. */
23355 -#define __copy_user(to, from, size) \
23356 +#define __copy_user(to, from, size, prefix, set, restore) \
23357 do { \
23358 int __d0, __d1, __d2; \
23359 __asm__ __volatile__( \
23360 + set \
23361 " cmp $7,%0\n" \
23362 " jbe 1f\n" \
23363 " movl %1,%0\n" \
23364 " negl %0\n" \
23365 " andl $7,%0\n" \
23366 " subl %0,%3\n" \
23367 - "4: rep; movsb\n" \
23368 + "4: rep; "prefix"movsb\n" \
23369 " movl %3,%0\n" \
23370 " shrl $2,%0\n" \
23371 " andl $3,%3\n" \
23372 " .align 2,0x90\n" \
23373 - "0: rep; movsl\n" \
23374 + "0: rep; "prefix"movsl\n" \
23375 " movl %3,%0\n" \
23376 - "1: rep; movsb\n" \
23377 + "1: rep; "prefix"movsb\n" \
23378 "2:\n" \
23379 + restore \
23380 ".section .fixup,\"ax\"\n" \
23381 "5: addl %3,%0\n" \
23382 " jmp 2b\n" \
23383 @@ -595,14 +718,14 @@ do { \
23384 " negl %0\n" \
23385 " andl $7,%0\n" \
23386 " subl %0,%3\n" \
23387 - "4: rep; movsb\n" \
23388 + "4: rep; "__copyuser_seg"movsb\n" \
23389 " movl %3,%0\n" \
23390 " shrl $2,%0\n" \
23391 " andl $3,%3\n" \
23392 " .align 2,0x90\n" \
23393 - "0: rep; movsl\n" \
23394 + "0: rep; "__copyuser_seg"movsl\n" \
23395 " movl %3,%0\n" \
23396 - "1: rep; movsb\n" \
23397 + "1: rep; "__copyuser_seg"movsb\n" \
23398 "2:\n" \
23399 ".section .fixup,\"ax\"\n" \
23400 "5: addl %3,%0\n" \
23401 @@ -688,9 +811,9 @@ survive:
23402 }
23403 #endif
23404 if (movsl_is_ok(to, from, n))
23405 - __copy_user(to, from, n);
23406 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23407 else
23408 - n = __copy_user_intel(to, from, n);
23409 + n = __generic_copy_to_user_intel(to, from, n);
23410 return n;
23411 }
23412 EXPORT_SYMBOL(__copy_to_user_ll);
23413 @@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23414 unsigned long n)
23415 {
23416 if (movsl_is_ok(to, from, n))
23417 - __copy_user(to, from, n);
23418 + __copy_user(to, from, n, __copyuser_seg, "", "");
23419 else
23420 - n = __copy_user_intel((void __user *)to,
23421 - (const void *)from, n);
23422 + n = __generic_copy_from_user_intel(to, from, n);
23423 return n;
23424 }
23425 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23426 @@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23427 if (n > 64 && cpu_has_xmm2)
23428 n = __copy_user_intel_nocache(to, from, n);
23429 else
23430 - __copy_user(to, from, n);
23431 + __copy_user(to, from, n, __copyuser_seg, "", "");
23432 #else
23433 - __copy_user(to, from, n);
23434 + __copy_user(to, from, n, __copyuser_seg, "", "");
23435 #endif
23436 return n;
23437 }
23438 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23439
23440 -/**
23441 - * copy_to_user: - Copy a block of data into user space.
23442 - * @to: Destination address, in user space.
23443 - * @from: Source address, in kernel space.
23444 - * @n: Number of bytes to copy.
23445 - *
23446 - * Context: User context only. This function may sleep.
23447 - *
23448 - * Copy data from kernel space to user space.
23449 - *
23450 - * Returns number of bytes that could not be copied.
23451 - * On success, this will be zero.
23452 - */
23453 -unsigned long
23454 -copy_to_user(void __user *to, const void *from, unsigned long n)
23455 -{
23456 - if (access_ok(VERIFY_WRITE, to, n))
23457 - n = __copy_to_user(to, from, n);
23458 - return n;
23459 -}
23460 -EXPORT_SYMBOL(copy_to_user);
23461 -
23462 -/**
23463 - * copy_from_user: - Copy a block of data from user space.
23464 - * @to: Destination address, in kernel space.
23465 - * @from: Source address, in user space.
23466 - * @n: Number of bytes to copy.
23467 - *
23468 - * Context: User context only. This function may sleep.
23469 - *
23470 - * Copy data from user space to kernel space.
23471 - *
23472 - * Returns number of bytes that could not be copied.
23473 - * On success, this will be zero.
23474 - *
23475 - * If some data could not be copied, this function will pad the copied
23476 - * data to the requested size using zero bytes.
23477 - */
23478 -unsigned long
23479 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23480 -{
23481 - if (access_ok(VERIFY_READ, from, n))
23482 - n = __copy_from_user(to, from, n);
23483 - else
23484 - memset(to, 0, n);
23485 - return n;
23486 -}
23487 -EXPORT_SYMBOL(_copy_from_user);
23488 -
23489 void copy_from_user_overflow(void)
23490 {
23491 WARN(1, "Buffer overflow detected!\n");
23492 }
23493 EXPORT_SYMBOL(copy_from_user_overflow);
23494 +
23495 +void copy_to_user_overflow(void)
23496 +{
23497 + WARN(1, "Buffer overflow detected!\n");
23498 +}
23499 +EXPORT_SYMBOL(copy_to_user_overflow);
23500 +
23501 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23502 +void __set_fs(mm_segment_t x)
23503 +{
23504 + switch (x.seg) {
23505 + case 0:
23506 + loadsegment(gs, 0);
23507 + break;
23508 + case TASK_SIZE_MAX:
23509 + loadsegment(gs, __USER_DS);
23510 + break;
23511 + case -1UL:
23512 + loadsegment(gs, __KERNEL_DS);
23513 + break;
23514 + default:
23515 + BUG();
23516 + }
23517 + return;
23518 +}
23519 +EXPORT_SYMBOL(__set_fs);
23520 +
23521 +void set_fs(mm_segment_t x)
23522 +{
23523 + current_thread_info()->addr_limit = x;
23524 + __set_fs(x);
23525 +}
23526 +EXPORT_SYMBOL(set_fs);
23527 +#endif
23528 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23529 index 0d0326f..6a6155b 100644
23530 --- a/arch/x86/lib/usercopy_64.c
23531 +++ b/arch/x86/lib/usercopy_64.c
23532 @@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23533 {
23534 long __d0;
23535 might_fault();
23536 +
23537 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23538 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23539 + addr += PAX_USER_SHADOW_BASE;
23540 +#endif
23541 +
23542 /* no memory constraint because it doesn't change any memory gcc knows
23543 about */
23544 asm volatile(
23545 @@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
23546 }
23547 EXPORT_SYMBOL(strlen_user);
23548
23549 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23550 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23551 {
23552 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23553 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23554 - }
23555 - return len;
23556 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23557 +
23558 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23559 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23560 + to += PAX_USER_SHADOW_BASE;
23561 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23562 + from += PAX_USER_SHADOW_BASE;
23563 +#endif
23564 +
23565 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23566 + }
23567 + return len;
23568 }
23569 EXPORT_SYMBOL(copy_in_user);
23570
23571 @@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
23572 * it is not necessary to optimize tail handling.
23573 */
23574 unsigned long
23575 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23576 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23577 {
23578 char c;
23579 unsigned zero_len;
23580 @@ -132,3 +146,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23581 break;
23582 return len;
23583 }
23584 +
23585 +void copy_from_user_overflow(void)
23586 +{
23587 + WARN(1, "Buffer overflow detected!\n");
23588 +}
23589 +EXPORT_SYMBOL(copy_from_user_overflow);
23590 +
23591 +void copy_to_user_overflow(void)
23592 +{
23593 + WARN(1, "Buffer overflow detected!\n");
23594 +}
23595 +EXPORT_SYMBOL(copy_to_user_overflow);
23596 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23597 index 1fb85db..8b3540b 100644
23598 --- a/arch/x86/mm/extable.c
23599 +++ b/arch/x86/mm/extable.c
23600 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23601 const struct exception_table_entry *fixup;
23602
23603 #ifdef CONFIG_PNPBIOS
23604 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23605 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23606 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23607 extern u32 pnp_bios_is_utter_crap;
23608 pnp_bios_is_utter_crap = 1;
23609 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23610 index 3ecfd1a..304d554 100644
23611 --- a/arch/x86/mm/fault.c
23612 +++ b/arch/x86/mm/fault.c
23613 @@ -13,11 +13,18 @@
23614 #include <linux/perf_event.h> /* perf_sw_event */
23615 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23616 #include <linux/prefetch.h> /* prefetchw */
23617 +#include <linux/unistd.h>
23618 +#include <linux/compiler.h>
23619
23620 #include <asm/traps.h> /* dotraplinkage, ... */
23621 #include <asm/pgalloc.h> /* pgd_*(), ... */
23622 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23623 #include <asm/fixmap.h> /* VSYSCALL_START */
23624 +#include <asm/tlbflush.h>
23625 +
23626 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23627 +#include <asm/stacktrace.h>
23628 +#endif
23629
23630 /*
23631 * Page fault error code bits:
23632 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23633 int ret = 0;
23634
23635 /* kprobe_running() needs smp_processor_id() */
23636 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23637 + if (kprobes_built_in() && !user_mode(regs)) {
23638 preempt_disable();
23639 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23640 ret = 1;
23641 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23642 return !instr_lo || (instr_lo>>1) == 1;
23643 case 0x00:
23644 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23645 - if (probe_kernel_address(instr, opcode))
23646 + if (user_mode(regs)) {
23647 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23648 + return 0;
23649 + } else if (probe_kernel_address(instr, opcode))
23650 return 0;
23651
23652 *prefetch = (instr_lo == 0xF) &&
23653 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23654 while (instr < max_instr) {
23655 unsigned char opcode;
23656
23657 - if (probe_kernel_address(instr, opcode))
23658 + if (user_mode(regs)) {
23659 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23660 + break;
23661 + } else if (probe_kernel_address(instr, opcode))
23662 break;
23663
23664 instr++;
23665 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23666 force_sig_info(si_signo, &info, tsk);
23667 }
23668
23669 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23670 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23671 +#endif
23672 +
23673 +#ifdef CONFIG_PAX_EMUTRAMP
23674 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23675 +#endif
23676 +
23677 +#ifdef CONFIG_PAX_PAGEEXEC
23678 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23679 +{
23680 + pgd_t *pgd;
23681 + pud_t *pud;
23682 + pmd_t *pmd;
23683 +
23684 + pgd = pgd_offset(mm, address);
23685 + if (!pgd_present(*pgd))
23686 + return NULL;
23687 + pud = pud_offset(pgd, address);
23688 + if (!pud_present(*pud))
23689 + return NULL;
23690 + pmd = pmd_offset(pud, address);
23691 + if (!pmd_present(*pmd))
23692 + return NULL;
23693 + return pmd;
23694 +}
23695 +#endif
23696 +
23697 DEFINE_SPINLOCK(pgd_lock);
23698 LIST_HEAD(pgd_list);
23699
23700 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23701 for (address = VMALLOC_START & PMD_MASK;
23702 address >= TASK_SIZE && address < FIXADDR_TOP;
23703 address += PMD_SIZE) {
23704 +
23705 +#ifdef CONFIG_PAX_PER_CPU_PGD
23706 + unsigned long cpu;
23707 +#else
23708 struct page *page;
23709 +#endif
23710
23711 spin_lock(&pgd_lock);
23712 +
23713 +#ifdef CONFIG_PAX_PER_CPU_PGD
23714 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23715 + pgd_t *pgd = get_cpu_pgd(cpu);
23716 + pmd_t *ret;
23717 +#else
23718 list_for_each_entry(page, &pgd_list, lru) {
23719 + pgd_t *pgd = page_address(page);
23720 spinlock_t *pgt_lock;
23721 pmd_t *ret;
23722
23723 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23724 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23725
23726 spin_lock(pgt_lock);
23727 - ret = vmalloc_sync_one(page_address(page), address);
23728 +#endif
23729 +
23730 + ret = vmalloc_sync_one(pgd, address);
23731 +
23732 +#ifndef CONFIG_PAX_PER_CPU_PGD
23733 spin_unlock(pgt_lock);
23734 +#endif
23735
23736 if (!ret)
23737 break;
23738 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23739 * an interrupt in the middle of a task switch..
23740 */
23741 pgd_paddr = read_cr3();
23742 +
23743 +#ifdef CONFIG_PAX_PER_CPU_PGD
23744 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23745 +#endif
23746 +
23747 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23748 if (!pmd_k)
23749 return -1;
23750 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23751 * happen within a race in page table update. In the later
23752 * case just flush:
23753 */
23754 +
23755 +#ifdef CONFIG_PAX_PER_CPU_PGD
23756 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23757 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23758 +#else
23759 pgd = pgd_offset(current->active_mm, address);
23760 +#endif
23761 +
23762 pgd_ref = pgd_offset_k(address);
23763 if (pgd_none(*pgd_ref))
23764 return -1;
23765 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23766 static int is_errata100(struct pt_regs *regs, unsigned long address)
23767 {
23768 #ifdef CONFIG_X86_64
23769 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23770 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23771 return 1;
23772 #endif
23773 return 0;
23774 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23775 }
23776
23777 static const char nx_warning[] = KERN_CRIT
23778 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23779 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23780
23781 static void
23782 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23783 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23784 if (!oops_may_print())
23785 return;
23786
23787 - if (error_code & PF_INSTR) {
23788 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23789 unsigned int level;
23790
23791 pte_t *pte = lookup_address(address, &level);
23792
23793 if (pte && pte_present(*pte) && !pte_exec(*pte))
23794 - printk(nx_warning, current_uid());
23795 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23796 }
23797
23798 +#ifdef CONFIG_PAX_KERNEXEC
23799 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23800 + if (current->signal->curr_ip)
23801 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23802 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23803 + else
23804 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23805 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23806 + }
23807 +#endif
23808 +
23809 printk(KERN_ALERT "BUG: unable to handle kernel ");
23810 if (address < PAGE_SIZE)
23811 printk(KERN_CONT "NULL pointer dereference");
23812 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23813 }
23814 #endif
23815
23816 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23817 + if (pax_is_fetch_fault(regs, error_code, address)) {
23818 +
23819 +#ifdef CONFIG_PAX_EMUTRAMP
23820 + switch (pax_handle_fetch_fault(regs)) {
23821 + case 2:
23822 + return;
23823 + }
23824 +#endif
23825 +
23826 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23827 + do_group_exit(SIGKILL);
23828 + }
23829 +#endif
23830 +
23831 if (unlikely(show_unhandled_signals))
23832 show_signal_msg(regs, error_code, address, tsk);
23833
23834 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23835 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23836 printk(KERN_ERR
23837 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23838 - tsk->comm, tsk->pid, address);
23839 + tsk->comm, task_pid_nr(tsk), address);
23840 code = BUS_MCEERR_AR;
23841 }
23842 #endif
23843 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23844 return 1;
23845 }
23846
23847 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23848 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23849 +{
23850 + pte_t *pte;
23851 + pmd_t *pmd;
23852 + spinlock_t *ptl;
23853 + unsigned char pte_mask;
23854 +
23855 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23856 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23857 + return 0;
23858 +
23859 + /* PaX: it's our fault, let's handle it if we can */
23860 +
23861 + /* PaX: take a look at read faults before acquiring any locks */
23862 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23863 + /* instruction fetch attempt from a protected page in user mode */
23864 + up_read(&mm->mmap_sem);
23865 +
23866 +#ifdef CONFIG_PAX_EMUTRAMP
23867 + switch (pax_handle_fetch_fault(regs)) {
23868 + case 2:
23869 + return 1;
23870 + }
23871 +#endif
23872 +
23873 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23874 + do_group_exit(SIGKILL);
23875 + }
23876 +
23877 + pmd = pax_get_pmd(mm, address);
23878 + if (unlikely(!pmd))
23879 + return 0;
23880 +
23881 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23882 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23883 + pte_unmap_unlock(pte, ptl);
23884 + return 0;
23885 + }
23886 +
23887 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23888 + /* write attempt to a protected page in user mode */
23889 + pte_unmap_unlock(pte, ptl);
23890 + return 0;
23891 + }
23892 +
23893 +#ifdef CONFIG_SMP
23894 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23895 +#else
23896 + if (likely(address > get_limit(regs->cs)))
23897 +#endif
23898 + {
23899 + set_pte(pte, pte_mkread(*pte));
23900 + __flush_tlb_one(address);
23901 + pte_unmap_unlock(pte, ptl);
23902 + up_read(&mm->mmap_sem);
23903 + return 1;
23904 + }
23905 +
23906 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23907 +
23908 + /*
23909 + * PaX: fill DTLB with user rights and retry
23910 + */
23911 + __asm__ __volatile__ (
23912 + "orb %2,(%1)\n"
23913 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23914 +/*
23915 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23916 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23917 + * page fault when examined during a TLB load attempt. this is true not only
23918 + * for PTEs holding a non-present entry but also present entries that will
23919 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23920 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23921 + * for our target pages since their PTEs are simply not in the TLBs at all.
23922 +
23923 + * the best thing in omitting it is that we gain around 15-20% speed in the
23924 + * fast path of the page fault handler and can get rid of tracing since we
23925 + * can no longer flush unintended entries.
23926 + */
23927 + "invlpg (%0)\n"
23928 +#endif
23929 + __copyuser_seg"testb $0,(%0)\n"
23930 + "xorb %3,(%1)\n"
23931 + :
23932 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23933 + : "memory", "cc");
23934 + pte_unmap_unlock(pte, ptl);
23935 + up_read(&mm->mmap_sem);
23936 + return 1;
23937 +}
23938 +#endif
23939 +
23940 /*
23941 * Handle a spurious fault caused by a stale TLB entry.
23942 *
23943 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23944 static inline int
23945 access_error(unsigned long error_code, struct vm_area_struct *vma)
23946 {
23947 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23948 + return 1;
23949 +
23950 if (error_code & PF_WRITE) {
23951 /* write, present and write, not present: */
23952 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23953 @@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23954 {
23955 struct vm_area_struct *vma;
23956 struct task_struct *tsk;
23957 - unsigned long address;
23958 struct mm_struct *mm;
23959 int fault;
23960 int write = error_code & PF_WRITE;
23961 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23962 (write ? FAULT_FLAG_WRITE : 0);
23963
23964 - tsk = current;
23965 - mm = tsk->mm;
23966 -
23967 /* Get the faulting address: */
23968 - address = read_cr2();
23969 + unsigned long address = read_cr2();
23970 +
23971 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23972 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23973 + if (!search_exception_tables(regs->ip)) {
23974 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23975 + bad_area_nosemaphore(regs, error_code, address);
23976 + return;
23977 + }
23978 + if (address < PAX_USER_SHADOW_BASE) {
23979 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23980 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23981 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23982 + } else
23983 + address -= PAX_USER_SHADOW_BASE;
23984 + }
23985 +#endif
23986 +
23987 + tsk = current;
23988 + mm = tsk->mm;
23989
23990 /*
23991 * Detect and handle instructions that would cause a page fault for
23992 @@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23993 * User-mode registers count as a user access even for any
23994 * potential system fault or CPU buglet:
23995 */
23996 - if (user_mode_vm(regs)) {
23997 + if (user_mode(regs)) {
23998 local_irq_enable();
23999 error_code |= PF_USER;
24000 } else {
24001 @@ -1132,6 +1339,11 @@ retry:
24002 might_sleep();
24003 }
24004
24005 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24006 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24007 + return;
24008 +#endif
24009 +
24010 vma = find_vma(mm, address);
24011 if (unlikely(!vma)) {
24012 bad_area(regs, error_code, address);
24013 @@ -1143,18 +1355,24 @@ retry:
24014 bad_area(regs, error_code, address);
24015 return;
24016 }
24017 - if (error_code & PF_USER) {
24018 - /*
24019 - * Accessing the stack below %sp is always a bug.
24020 - * The large cushion allows instructions like enter
24021 - * and pusha to work. ("enter $65535, $31" pushes
24022 - * 32 pointers and then decrements %sp by 65535.)
24023 - */
24024 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24025 - bad_area(regs, error_code, address);
24026 - return;
24027 - }
24028 + /*
24029 + * Accessing the stack below %sp is always a bug.
24030 + * The large cushion allows instructions like enter
24031 + * and pusha to work. ("enter $65535, $31" pushes
24032 + * 32 pointers and then decrements %sp by 65535.)
24033 + */
24034 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24035 + bad_area(regs, error_code, address);
24036 + return;
24037 }
24038 +
24039 +#ifdef CONFIG_PAX_SEGMEXEC
24040 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24041 + bad_area(regs, error_code, address);
24042 + return;
24043 + }
24044 +#endif
24045 +
24046 if (unlikely(expand_stack(vma, address))) {
24047 bad_area(regs, error_code, address);
24048 return;
24049 @@ -1209,3 +1427,292 @@ good_area:
24050
24051 up_read(&mm->mmap_sem);
24052 }
24053 +
24054 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24055 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24056 +{
24057 + struct mm_struct *mm = current->mm;
24058 + unsigned long ip = regs->ip;
24059 +
24060 + if (v8086_mode(regs))
24061 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24062 +
24063 +#ifdef CONFIG_PAX_PAGEEXEC
24064 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24065 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24066 + return true;
24067 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24068 + return true;
24069 + return false;
24070 + }
24071 +#endif
24072 +
24073 +#ifdef CONFIG_PAX_SEGMEXEC
24074 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24075 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24076 + return true;
24077 + return false;
24078 + }
24079 +#endif
24080 +
24081 + return false;
24082 +}
24083 +#endif
24084 +
24085 +#ifdef CONFIG_PAX_EMUTRAMP
24086 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24087 +{
24088 + int err;
24089 +
24090 + do { /* PaX: libffi trampoline emulation */
24091 + unsigned char mov, jmp;
24092 + unsigned int addr1, addr2;
24093 +
24094 +#ifdef CONFIG_X86_64
24095 + if ((regs->ip + 9) >> 32)
24096 + break;
24097 +#endif
24098 +
24099 + err = get_user(mov, (unsigned char __user *)regs->ip);
24100 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24101 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24102 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24103 +
24104 + if (err)
24105 + break;
24106 +
24107 + if (mov == 0xB8 && jmp == 0xE9) {
24108 + regs->ax = addr1;
24109 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24110 + return 2;
24111 + }
24112 + } while (0);
24113 +
24114 + do { /* PaX: gcc trampoline emulation #1 */
24115 + unsigned char mov1, mov2;
24116 + unsigned short jmp;
24117 + unsigned int addr1, addr2;
24118 +
24119 +#ifdef CONFIG_X86_64
24120 + if ((regs->ip + 11) >> 32)
24121 + break;
24122 +#endif
24123 +
24124 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24125 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24126 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24127 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24128 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24129 +
24130 + if (err)
24131 + break;
24132 +
24133 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24134 + regs->cx = addr1;
24135 + regs->ax = addr2;
24136 + regs->ip = addr2;
24137 + return 2;
24138 + }
24139 + } while (0);
24140 +
24141 + do { /* PaX: gcc trampoline emulation #2 */
24142 + unsigned char mov, jmp;
24143 + unsigned int addr1, addr2;
24144 +
24145 +#ifdef CONFIG_X86_64
24146 + if ((regs->ip + 9) >> 32)
24147 + break;
24148 +#endif
24149 +
24150 + err = get_user(mov, (unsigned char __user *)regs->ip);
24151 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24152 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24153 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24154 +
24155 + if (err)
24156 + break;
24157 +
24158 + if (mov == 0xB9 && jmp == 0xE9) {
24159 + regs->cx = addr1;
24160 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24161 + return 2;
24162 + }
24163 + } while (0);
24164 +
24165 + return 1; /* PaX in action */
24166 +}
24167 +
24168 +#ifdef CONFIG_X86_64
24169 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24170 +{
24171 + int err;
24172 +
24173 + do { /* PaX: libffi trampoline emulation */
24174 + unsigned short mov1, mov2, jmp1;
24175 + unsigned char stcclc, jmp2;
24176 + unsigned long addr1, addr2;
24177 +
24178 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24179 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24180 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24181 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24182 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24183 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24184 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24185 +
24186 + if (err)
24187 + break;
24188 +
24189 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24190 + regs->r11 = addr1;
24191 + regs->r10 = addr2;
24192 + if (stcclc == 0xF8)
24193 + regs->flags &= ~X86_EFLAGS_CF;
24194 + else
24195 + regs->flags |= X86_EFLAGS_CF;
24196 + regs->ip = addr1;
24197 + return 2;
24198 + }
24199 + } while (0);
24200 +
24201 + do { /* PaX: gcc trampoline emulation #1 */
24202 + unsigned short mov1, mov2, jmp1;
24203 + unsigned char jmp2;
24204 + unsigned int addr1;
24205 + unsigned long addr2;
24206 +
24207 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24208 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24209 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24210 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24211 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24212 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24213 +
24214 + if (err)
24215 + break;
24216 +
24217 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24218 + regs->r11 = addr1;
24219 + regs->r10 = addr2;
24220 + regs->ip = addr1;
24221 + return 2;
24222 + }
24223 + } while (0);
24224 +
24225 + do { /* PaX: gcc trampoline emulation #2 */
24226 + unsigned short mov1, mov2, jmp1;
24227 + unsigned char jmp2;
24228 + unsigned long addr1, addr2;
24229 +
24230 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24231 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24232 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24233 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24234 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24235 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24236 +
24237 + if (err)
24238 + break;
24239 +
24240 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24241 + regs->r11 = addr1;
24242 + regs->r10 = addr2;
24243 + regs->ip = addr1;
24244 + return 2;
24245 + }
24246 + } while (0);
24247 +
24248 + return 1; /* PaX in action */
24249 +}
24250 +#endif
24251 +
24252 +/*
24253 + * PaX: decide what to do with offenders (regs->ip = fault address)
24254 + *
24255 + * returns 1 when task should be killed
24256 + * 2 when gcc trampoline was detected
24257 + */
24258 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24259 +{
24260 + if (v8086_mode(regs))
24261 + return 1;
24262 +
24263 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24264 + return 1;
24265 +
24266 +#ifdef CONFIG_X86_32
24267 + return pax_handle_fetch_fault_32(regs);
24268 +#else
24269 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24270 + return pax_handle_fetch_fault_32(regs);
24271 + else
24272 + return pax_handle_fetch_fault_64(regs);
24273 +#endif
24274 +}
24275 +#endif
24276 +
24277 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24278 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24279 +{
24280 + long i;
24281 +
24282 + printk(KERN_ERR "PAX: bytes at PC: ");
24283 + for (i = 0; i < 20; i++) {
24284 + unsigned char c;
24285 + if (get_user(c, (unsigned char __force_user *)pc+i))
24286 + printk(KERN_CONT "?? ");
24287 + else
24288 + printk(KERN_CONT "%02x ", c);
24289 + }
24290 + printk("\n");
24291 +
24292 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24293 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24294 + unsigned long c;
24295 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24296 +#ifdef CONFIG_X86_32
24297 + printk(KERN_CONT "???????? ");
24298 +#else
24299 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24300 + printk(KERN_CONT "???????? ???????? ");
24301 + else
24302 + printk(KERN_CONT "???????????????? ");
24303 +#endif
24304 + } else {
24305 +#ifdef CONFIG_X86_64
24306 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24307 + printk(KERN_CONT "%08x ", (unsigned int)c);
24308 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24309 + } else
24310 +#endif
24311 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24312 + }
24313 + }
24314 + printk("\n");
24315 +}
24316 +#endif
24317 +
24318 +/**
24319 + * probe_kernel_write(): safely attempt to write to a location
24320 + * @dst: address to write to
24321 + * @src: pointer to the data that shall be written
24322 + * @size: size of the data chunk
24323 + *
24324 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24325 + * happens, handle that and return -EFAULT.
24326 + */
24327 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24328 +{
24329 + long ret;
24330 + mm_segment_t old_fs = get_fs();
24331 +
24332 + set_fs(KERNEL_DS);
24333 + pagefault_disable();
24334 + pax_open_kernel();
24335 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24336 + pax_close_kernel();
24337 + pagefault_enable();
24338 + set_fs(old_fs);
24339 +
24340 + return ret ? -EFAULT : 0;
24341 +}
24342 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24343 index dd74e46..7d26398 100644
24344 --- a/arch/x86/mm/gup.c
24345 +++ b/arch/x86/mm/gup.c
24346 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24347 addr = start;
24348 len = (unsigned long) nr_pages << PAGE_SHIFT;
24349 end = start + len;
24350 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24351 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24352 (void __user *)start, len)))
24353 return 0;
24354
24355 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24356 index 6f31ee5..8ee4164 100644
24357 --- a/arch/x86/mm/highmem_32.c
24358 +++ b/arch/x86/mm/highmem_32.c
24359 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24360 idx = type + KM_TYPE_NR*smp_processor_id();
24361 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24362 BUG_ON(!pte_none(*(kmap_pte-idx)));
24363 +
24364 + pax_open_kernel();
24365 set_pte(kmap_pte-idx, mk_pte(page, prot));
24366 + pax_close_kernel();
24367 +
24368 arch_flush_lazy_mmu_mode();
24369
24370 return (void *)vaddr;
24371 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24372 index f6679a7..8f795a3 100644
24373 --- a/arch/x86/mm/hugetlbpage.c
24374 +++ b/arch/x86/mm/hugetlbpage.c
24375 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24376 struct hstate *h = hstate_file(file);
24377 struct mm_struct *mm = current->mm;
24378 struct vm_area_struct *vma;
24379 - unsigned long start_addr;
24380 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24381 +
24382 +#ifdef CONFIG_PAX_SEGMEXEC
24383 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24384 + pax_task_size = SEGMEXEC_TASK_SIZE;
24385 +#endif
24386 +
24387 + pax_task_size -= PAGE_SIZE;
24388
24389 if (len > mm->cached_hole_size) {
24390 - start_addr = mm->free_area_cache;
24391 + start_addr = mm->free_area_cache;
24392 } else {
24393 - start_addr = TASK_UNMAPPED_BASE;
24394 - mm->cached_hole_size = 0;
24395 + start_addr = mm->mmap_base;
24396 + mm->cached_hole_size = 0;
24397 }
24398
24399 full_search:
24400 @@ -280,26 +287,27 @@ full_search:
24401
24402 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24403 /* At this point: (!vma || addr < vma->vm_end). */
24404 - if (TASK_SIZE - len < addr) {
24405 + if (pax_task_size - len < addr) {
24406 /*
24407 * Start a new search - just in case we missed
24408 * some holes.
24409 */
24410 - if (start_addr != TASK_UNMAPPED_BASE) {
24411 - start_addr = TASK_UNMAPPED_BASE;
24412 + if (start_addr != mm->mmap_base) {
24413 + start_addr = mm->mmap_base;
24414 mm->cached_hole_size = 0;
24415 goto full_search;
24416 }
24417 return -ENOMEM;
24418 }
24419 - if (!vma || addr + len <= vma->vm_start) {
24420 - mm->free_area_cache = addr + len;
24421 - return addr;
24422 - }
24423 + if (check_heap_stack_gap(vma, addr, len))
24424 + break;
24425 if (addr + mm->cached_hole_size < vma->vm_start)
24426 mm->cached_hole_size = vma->vm_start - addr;
24427 addr = ALIGN(vma->vm_end, huge_page_size(h));
24428 }
24429 +
24430 + mm->free_area_cache = addr + len;
24431 + return addr;
24432 }
24433
24434 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24435 @@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24436 struct mm_struct *mm = current->mm;
24437 struct vm_area_struct *vma;
24438 unsigned long base = mm->mmap_base;
24439 - unsigned long addr = addr0;
24440 + unsigned long addr;
24441 unsigned long largest_hole = mm->cached_hole_size;
24442 - unsigned long start_addr;
24443
24444 /* don't allow allocations above current base */
24445 if (mm->free_area_cache > base)
24446 @@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24447 largest_hole = 0;
24448 mm->free_area_cache = base;
24449 }
24450 -try_again:
24451 - start_addr = mm->free_area_cache;
24452
24453 /* make sure it can fit in the remaining address space */
24454 if (mm->free_area_cache < len)
24455 goto fail;
24456
24457 /* either no address requested or can't fit in requested address hole */
24458 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24459 + addr = mm->free_area_cache - len;
24460 do {
24461 + addr &= huge_page_mask(h);
24462 /*
24463 * Lookup failure means no vma is above this address,
24464 * i.e. return with success:
24465 @@ -340,10 +346,10 @@ try_again:
24466 if (!vma)
24467 return addr;
24468
24469 - if (addr + len <= vma->vm_start) {
24470 + if (check_heap_stack_gap(vma, addr, len)) {
24471 /* remember the address as a hint for next time */
24472 - mm->cached_hole_size = largest_hole;
24473 - return (mm->free_area_cache = addr);
24474 + mm->cached_hole_size = largest_hole;
24475 + return (mm->free_area_cache = addr);
24476 } else if (mm->free_area_cache == vma->vm_end) {
24477 /* pull free_area_cache down to the first hole */
24478 mm->free_area_cache = vma->vm_start;
24479 @@ -352,29 +358,34 @@ try_again:
24480
24481 /* remember the largest hole we saw so far */
24482 if (addr + largest_hole < vma->vm_start)
24483 - largest_hole = vma->vm_start - addr;
24484 + largest_hole = vma->vm_start - addr;
24485
24486 /* try just below the current vma->vm_start */
24487 - addr = (vma->vm_start - len) & huge_page_mask(h);
24488 - } while (len <= vma->vm_start);
24489 + addr = skip_heap_stack_gap(vma, len);
24490 + } while (!IS_ERR_VALUE(addr));
24491
24492 fail:
24493 /*
24494 - * if hint left us with no space for the requested
24495 - * mapping then try again:
24496 - */
24497 - if (start_addr != base) {
24498 - mm->free_area_cache = base;
24499 - largest_hole = 0;
24500 - goto try_again;
24501 - }
24502 - /*
24503 * A failed mmap() very likely causes application failure,
24504 * so fall back to the bottom-up function here. This scenario
24505 * can happen with large stack limits and large mmap()
24506 * allocations.
24507 */
24508 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24509 +
24510 +#ifdef CONFIG_PAX_SEGMEXEC
24511 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24512 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24513 + else
24514 +#endif
24515 +
24516 + mm->mmap_base = TASK_UNMAPPED_BASE;
24517 +
24518 +#ifdef CONFIG_PAX_RANDMMAP
24519 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24520 + mm->mmap_base += mm->delta_mmap;
24521 +#endif
24522 +
24523 + mm->free_area_cache = mm->mmap_base;
24524 mm->cached_hole_size = ~0UL;
24525 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24526 len, pgoff, flags);
24527 @@ -382,6 +393,7 @@ fail:
24528 /*
24529 * Restore the topdown base:
24530 */
24531 + mm->mmap_base = base;
24532 mm->free_area_cache = base;
24533 mm->cached_hole_size = ~0UL;
24534
24535 @@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24536 struct hstate *h = hstate_file(file);
24537 struct mm_struct *mm = current->mm;
24538 struct vm_area_struct *vma;
24539 + unsigned long pax_task_size = TASK_SIZE;
24540
24541 if (len & ~huge_page_mask(h))
24542 return -EINVAL;
24543 - if (len > TASK_SIZE)
24544 +
24545 +#ifdef CONFIG_PAX_SEGMEXEC
24546 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24547 + pax_task_size = SEGMEXEC_TASK_SIZE;
24548 +#endif
24549 +
24550 + pax_task_size -= PAGE_SIZE;
24551 +
24552 + if (len > pax_task_size)
24553 return -ENOMEM;
24554
24555 if (flags & MAP_FIXED) {
24556 @@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24557 if (addr) {
24558 addr = ALIGN(addr, huge_page_size(h));
24559 vma = find_vma(mm, addr);
24560 - if (TASK_SIZE - len >= addr &&
24561 - (!vma || addr + len <= vma->vm_start))
24562 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24563 return addr;
24564 }
24565 if (mm->get_unmapped_area == arch_get_unmapped_area)
24566 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24567 index 4f0cec7..00976ce 100644
24568 --- a/arch/x86/mm/init.c
24569 +++ b/arch/x86/mm/init.c
24570 @@ -16,6 +16,8 @@
24571 #include <asm/tlb.h>
24572 #include <asm/proto.h>
24573 #include <asm/dma.h> /* for MAX_DMA_PFN */
24574 +#include <asm/desc.h>
24575 +#include <asm/bios_ebda.h>
24576
24577 unsigned long __initdata pgt_buf_start;
24578 unsigned long __meminitdata pgt_buf_end;
24579 @@ -32,7 +34,7 @@ int direct_gbpages
24580 static void __init find_early_table_space(unsigned long end, int use_pse,
24581 int use_gbpages)
24582 {
24583 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24584 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24585 phys_addr_t base;
24586
24587 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24588 @@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24589 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24590 * mmio resources as well as potential bios/acpi data regions.
24591 */
24592 +
24593 +#ifdef CONFIG_GRKERNSEC_KMEM
24594 +static unsigned int ebda_start __read_only;
24595 +static unsigned int ebda_end __read_only;
24596 +#endif
24597 +
24598 int devmem_is_allowed(unsigned long pagenr)
24599 {
24600 +#ifdef CONFIG_GRKERNSEC_KMEM
24601 + /* allow BDA */
24602 + if (!pagenr)
24603 + return 1;
24604 + /* allow EBDA */
24605 + if (pagenr >= ebda_start && pagenr < ebda_end)
24606 + return 1;
24607 +#else
24608 + if (!pagenr)
24609 + return 1;
24610 +#ifdef CONFIG_VM86
24611 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24612 + return 1;
24613 +#endif
24614 +#endif
24615 +
24616 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24617 + return 1;
24618 +#ifdef CONFIG_GRKERNSEC_KMEM
24619 + /* throw out everything else below 1MB */
24620 if (pagenr <= 256)
24621 - return 1;
24622 + return 0;
24623 +#endif
24624 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24625 return 0;
24626 if (!page_is_ram(pagenr))
24627 @@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24628 #endif
24629 }
24630
24631 +#ifdef CONFIG_GRKERNSEC_KMEM
24632 +static inline void gr_init_ebda(void)
24633 +{
24634 + unsigned int ebda_addr;
24635 + unsigned int ebda_size = 0;
24636 +
24637 + ebda_addr = get_bios_ebda();
24638 + if (ebda_addr) {
24639 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24640 + ebda_size <<= 10;
24641 + }
24642 + if (ebda_addr && ebda_size) {
24643 + ebda_start = ebda_addr >> PAGE_SHIFT;
24644 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24645 + } else {
24646 + ebda_start = 0x9f000 >> PAGE_SHIFT;
24647 + ebda_end = 0xa0000 >> PAGE_SHIFT;
24648 + }
24649 +}
24650 +#else
24651 +static inline void gr_init_ebda(void) { }
24652 +#endif
24653 +
24654 void free_initmem(void)
24655 {
24656 +#ifdef CONFIG_PAX_KERNEXEC
24657 +#ifdef CONFIG_X86_32
24658 + /* PaX: limit KERNEL_CS to actual size */
24659 + unsigned long addr, limit;
24660 + struct desc_struct d;
24661 + int cpu;
24662 +#else
24663 + pgd_t *pgd;
24664 + pud_t *pud;
24665 + pmd_t *pmd;
24666 + unsigned long addr, end;
24667 +#endif
24668 +#endif
24669 +
24670 + gr_init_ebda();
24671 +
24672 +#ifdef CONFIG_PAX_KERNEXEC
24673 +#ifdef CONFIG_X86_32
24674 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24675 + limit = (limit - 1UL) >> PAGE_SHIFT;
24676 +
24677 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24678 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24679 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24680 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24681 + }
24682 +
24683 + /* PaX: make KERNEL_CS read-only */
24684 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24685 + if (!paravirt_enabled())
24686 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24687 +/*
24688 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24689 + pgd = pgd_offset_k(addr);
24690 + pud = pud_offset(pgd, addr);
24691 + pmd = pmd_offset(pud, addr);
24692 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24693 + }
24694 +*/
24695 +#ifdef CONFIG_X86_PAE
24696 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24697 +/*
24698 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24699 + pgd = pgd_offset_k(addr);
24700 + pud = pud_offset(pgd, addr);
24701 + pmd = pmd_offset(pud, addr);
24702 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24703 + }
24704 +*/
24705 +#endif
24706 +
24707 +#ifdef CONFIG_MODULES
24708 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24709 +#endif
24710 +
24711 +#else
24712 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24713 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24714 + pgd = pgd_offset_k(addr);
24715 + pud = pud_offset(pgd, addr);
24716 + pmd = pmd_offset(pud, addr);
24717 + if (!pmd_present(*pmd))
24718 + continue;
24719 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24720 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24721 + else
24722 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24723 + }
24724 +
24725 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24726 + end = addr + KERNEL_IMAGE_SIZE;
24727 + for (; addr < end; addr += PMD_SIZE) {
24728 + pgd = pgd_offset_k(addr);
24729 + pud = pud_offset(pgd, addr);
24730 + pmd = pmd_offset(pud, addr);
24731 + if (!pmd_present(*pmd))
24732 + continue;
24733 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24734 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24735 + }
24736 +#endif
24737 +
24738 + flush_tlb_all();
24739 +#endif
24740 +
24741 free_init_pages("unused kernel memory",
24742 (unsigned long)(&__init_begin),
24743 (unsigned long)(&__init_end));
24744 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24745 index 575d86f..4987469 100644
24746 --- a/arch/x86/mm/init_32.c
24747 +++ b/arch/x86/mm/init_32.c
24748 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24749 }
24750
24751 /*
24752 - * Creates a middle page table and puts a pointer to it in the
24753 - * given global directory entry. This only returns the gd entry
24754 - * in non-PAE compilation mode, since the middle layer is folded.
24755 - */
24756 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24757 -{
24758 - pud_t *pud;
24759 - pmd_t *pmd_table;
24760 -
24761 -#ifdef CONFIG_X86_PAE
24762 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24763 - if (after_bootmem)
24764 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24765 - else
24766 - pmd_table = (pmd_t *)alloc_low_page();
24767 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24768 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24769 - pud = pud_offset(pgd, 0);
24770 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24771 -
24772 - return pmd_table;
24773 - }
24774 -#endif
24775 - pud = pud_offset(pgd, 0);
24776 - pmd_table = pmd_offset(pud, 0);
24777 -
24778 - return pmd_table;
24779 -}
24780 -
24781 -/*
24782 * Create a page table and place a pointer to it in a middle page
24783 * directory entry:
24784 */
24785 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24786 page_table = (pte_t *)alloc_low_page();
24787
24788 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24789 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24790 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24791 +#else
24792 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24793 +#endif
24794 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24795 }
24796
24797 return pte_offset_kernel(pmd, 0);
24798 }
24799
24800 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24801 +{
24802 + pud_t *pud;
24803 + pmd_t *pmd_table;
24804 +
24805 + pud = pud_offset(pgd, 0);
24806 + pmd_table = pmd_offset(pud, 0);
24807 +
24808 + return pmd_table;
24809 +}
24810 +
24811 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24812 {
24813 int pgd_idx = pgd_index(vaddr);
24814 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24815 int pgd_idx, pmd_idx;
24816 unsigned long vaddr;
24817 pgd_t *pgd;
24818 + pud_t *pud;
24819 pmd_t *pmd;
24820 pte_t *pte = NULL;
24821
24822 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24823 pgd = pgd_base + pgd_idx;
24824
24825 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24826 - pmd = one_md_table_init(pgd);
24827 - pmd = pmd + pmd_index(vaddr);
24828 + pud = pud_offset(pgd, vaddr);
24829 + pmd = pmd_offset(pud, vaddr);
24830 +
24831 +#ifdef CONFIG_X86_PAE
24832 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24833 +#endif
24834 +
24835 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24836 pmd++, pmd_idx++) {
24837 pte = page_table_kmap_check(one_page_table_init(pmd),
24838 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24839 }
24840 }
24841
24842 -static inline int is_kernel_text(unsigned long addr)
24843 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24844 {
24845 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24846 - return 1;
24847 - return 0;
24848 + if ((start > ktla_ktva((unsigned long)_etext) ||
24849 + end <= ktla_ktva((unsigned long)_stext)) &&
24850 + (start > ktla_ktva((unsigned long)_einittext) ||
24851 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24852 +
24853 +#ifdef CONFIG_ACPI_SLEEP
24854 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24855 +#endif
24856 +
24857 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24858 + return 0;
24859 + return 1;
24860 }
24861
24862 /*
24863 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24864 unsigned long last_map_addr = end;
24865 unsigned long start_pfn, end_pfn;
24866 pgd_t *pgd_base = swapper_pg_dir;
24867 - int pgd_idx, pmd_idx, pte_ofs;
24868 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24869 unsigned long pfn;
24870 pgd_t *pgd;
24871 + pud_t *pud;
24872 pmd_t *pmd;
24873 pte_t *pte;
24874 unsigned pages_2m, pages_4k;
24875 @@ -280,8 +281,13 @@ repeat:
24876 pfn = start_pfn;
24877 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24878 pgd = pgd_base + pgd_idx;
24879 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24880 - pmd = one_md_table_init(pgd);
24881 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24882 + pud = pud_offset(pgd, 0);
24883 + pmd = pmd_offset(pud, 0);
24884 +
24885 +#ifdef CONFIG_X86_PAE
24886 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24887 +#endif
24888
24889 if (pfn >= end_pfn)
24890 continue;
24891 @@ -293,14 +299,13 @@ repeat:
24892 #endif
24893 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24894 pmd++, pmd_idx++) {
24895 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24896 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24897
24898 /*
24899 * Map with big pages if possible, otherwise
24900 * create normal page tables:
24901 */
24902 if (use_pse) {
24903 - unsigned int addr2;
24904 pgprot_t prot = PAGE_KERNEL_LARGE;
24905 /*
24906 * first pass will use the same initial
24907 @@ -310,11 +315,7 @@ repeat:
24908 __pgprot(PTE_IDENT_ATTR |
24909 _PAGE_PSE);
24910
24911 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24912 - PAGE_OFFSET + PAGE_SIZE-1;
24913 -
24914 - if (is_kernel_text(addr) ||
24915 - is_kernel_text(addr2))
24916 + if (is_kernel_text(address, address + PMD_SIZE))
24917 prot = PAGE_KERNEL_LARGE_EXEC;
24918
24919 pages_2m++;
24920 @@ -331,7 +332,7 @@ repeat:
24921 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24922 pte += pte_ofs;
24923 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24924 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24925 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24926 pgprot_t prot = PAGE_KERNEL;
24927 /*
24928 * first pass will use the same initial
24929 @@ -339,7 +340,7 @@ repeat:
24930 */
24931 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24932
24933 - if (is_kernel_text(addr))
24934 + if (is_kernel_text(address, address + PAGE_SIZE))
24935 prot = PAGE_KERNEL_EXEC;
24936
24937 pages_4k++;
24938 @@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24939
24940 pud = pud_offset(pgd, va);
24941 pmd = pmd_offset(pud, va);
24942 - if (!pmd_present(*pmd))
24943 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24944 break;
24945
24946 pte = pte_offset_kernel(pmd, va);
24947 @@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24948
24949 static void __init pagetable_init(void)
24950 {
24951 - pgd_t *pgd_base = swapper_pg_dir;
24952 -
24953 - permanent_kmaps_init(pgd_base);
24954 + permanent_kmaps_init(swapper_pg_dir);
24955 }
24956
24957 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24958 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24959 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24960
24961 /* user-defined highmem size */
24962 @@ -734,6 +733,12 @@ void __init mem_init(void)
24963
24964 pci_iommu_alloc();
24965
24966 +#ifdef CONFIG_PAX_PER_CPU_PGD
24967 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24968 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24969 + KERNEL_PGD_PTRS);
24970 +#endif
24971 +
24972 #ifdef CONFIG_FLATMEM
24973 BUG_ON(!mem_map);
24974 #endif
24975 @@ -760,7 +765,7 @@ void __init mem_init(void)
24976 reservedpages++;
24977
24978 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24979 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24980 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24981 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24982
24983 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24984 @@ -801,10 +806,10 @@ void __init mem_init(void)
24985 ((unsigned long)&__init_end -
24986 (unsigned long)&__init_begin) >> 10,
24987
24988 - (unsigned long)&_etext, (unsigned long)&_edata,
24989 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24990 + (unsigned long)&_sdata, (unsigned long)&_edata,
24991 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24992
24993 - (unsigned long)&_text, (unsigned long)&_etext,
24994 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24995 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24996
24997 /*
24998 @@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24999 if (!kernel_set_to_readonly)
25000 return;
25001
25002 + start = ktla_ktva(start);
25003 pr_debug("Set kernel text: %lx - %lx for read write\n",
25004 start, start+size);
25005
25006 @@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
25007 if (!kernel_set_to_readonly)
25008 return;
25009
25010 + start = ktla_ktva(start);
25011 pr_debug("Set kernel text: %lx - %lx for read only\n",
25012 start, start+size);
25013
25014 @@ -924,6 +931,7 @@ void mark_rodata_ro(void)
25015 unsigned long start = PFN_ALIGN(_text);
25016 unsigned long size = PFN_ALIGN(_etext) - start;
25017
25018 + start = ktla_ktva(start);
25019 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25020 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25021 size >> 10);
25022 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25023 index fc18be0..e539653 100644
25024 --- a/arch/x86/mm/init_64.c
25025 +++ b/arch/x86/mm/init_64.c
25026 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
25027 * around without checking the pgd every time.
25028 */
25029
25030 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25031 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25032 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25033
25034 int force_personality32;
25035 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25036
25037 for (address = start; address <= end; address += PGDIR_SIZE) {
25038 const pgd_t *pgd_ref = pgd_offset_k(address);
25039 +
25040 +#ifdef CONFIG_PAX_PER_CPU_PGD
25041 + unsigned long cpu;
25042 +#else
25043 struct page *page;
25044 +#endif
25045
25046 if (pgd_none(*pgd_ref))
25047 continue;
25048
25049 spin_lock(&pgd_lock);
25050 +
25051 +#ifdef CONFIG_PAX_PER_CPU_PGD
25052 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25053 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
25054 +#else
25055 list_for_each_entry(page, &pgd_list, lru) {
25056 pgd_t *pgd;
25057 spinlock_t *pgt_lock;
25058 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25059 /* the pgt_lock only for Xen */
25060 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25061 spin_lock(pgt_lock);
25062 +#endif
25063
25064 if (pgd_none(*pgd))
25065 set_pgd(pgd, *pgd_ref);
25066 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25067 BUG_ON(pgd_page_vaddr(*pgd)
25068 != pgd_page_vaddr(*pgd_ref));
25069
25070 +#ifndef CONFIG_PAX_PER_CPU_PGD
25071 spin_unlock(pgt_lock);
25072 +#endif
25073 +
25074 }
25075 spin_unlock(&pgd_lock);
25076 }
25077 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25078 {
25079 if (pgd_none(*pgd)) {
25080 pud_t *pud = (pud_t *)spp_getpage();
25081 - pgd_populate(&init_mm, pgd, pud);
25082 + pgd_populate_kernel(&init_mm, pgd, pud);
25083 if (pud != pud_offset(pgd, 0))
25084 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25085 pud, pud_offset(pgd, 0));
25086 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25087 {
25088 if (pud_none(*pud)) {
25089 pmd_t *pmd = (pmd_t *) spp_getpage();
25090 - pud_populate(&init_mm, pud, pmd);
25091 + pud_populate_kernel(&init_mm, pud, pmd);
25092 if (pmd != pmd_offset(pud, 0))
25093 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25094 pmd, pmd_offset(pud, 0));
25095 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25096 pmd = fill_pmd(pud, vaddr);
25097 pte = fill_pte(pmd, vaddr);
25098
25099 + pax_open_kernel();
25100 set_pte(pte, new_pte);
25101 + pax_close_kernel();
25102
25103 /*
25104 * It's enough to flush this one mapping.
25105 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25106 pgd = pgd_offset_k((unsigned long)__va(phys));
25107 if (pgd_none(*pgd)) {
25108 pud = (pud_t *) spp_getpage();
25109 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25110 - _PAGE_USER));
25111 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25112 }
25113 pud = pud_offset(pgd, (unsigned long)__va(phys));
25114 if (pud_none(*pud)) {
25115 pmd = (pmd_t *) spp_getpage();
25116 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25117 - _PAGE_USER));
25118 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25119 }
25120 pmd = pmd_offset(pud, phys);
25121 BUG_ON(!pmd_none(*pmd));
25122 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25123 if (pfn >= pgt_buf_top)
25124 panic("alloc_low_page: ran out of memory");
25125
25126 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25127 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25128 clear_page(adr);
25129 *phys = pfn * PAGE_SIZE;
25130 return adr;
25131 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25132
25133 phys = __pa(virt);
25134 left = phys & (PAGE_SIZE - 1);
25135 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25136 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25137 adr = (void *)(((unsigned long)adr) | left);
25138
25139 return adr;
25140 @@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25141 unmap_low_page(pmd);
25142
25143 spin_lock(&init_mm.page_table_lock);
25144 - pud_populate(&init_mm, pud, __va(pmd_phys));
25145 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25146 spin_unlock(&init_mm.page_table_lock);
25147 }
25148 __flush_tlb_all();
25149 @@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
25150 unmap_low_page(pud);
25151
25152 spin_lock(&init_mm.page_table_lock);
25153 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25154 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25155 spin_unlock(&init_mm.page_table_lock);
25156 pgd_changed = true;
25157 }
25158 @@ -683,6 +697,12 @@ void __init mem_init(void)
25159
25160 pci_iommu_alloc();
25161
25162 +#ifdef CONFIG_PAX_PER_CPU_PGD
25163 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25164 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25165 + KERNEL_PGD_PTRS);
25166 +#endif
25167 +
25168 /* clear_bss() already clear the empty_zero_page */
25169
25170 reservedpages = 0;
25171 @@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
25172 static struct vm_area_struct gate_vma = {
25173 .vm_start = VSYSCALL_START,
25174 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25175 - .vm_page_prot = PAGE_READONLY_EXEC,
25176 - .vm_flags = VM_READ | VM_EXEC
25177 + .vm_page_prot = PAGE_READONLY,
25178 + .vm_flags = VM_READ
25179 };
25180
25181 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25182 @@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
25183
25184 const char *arch_vma_name(struct vm_area_struct *vma)
25185 {
25186 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25187 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25188 return "[vdso]";
25189 if (vma == &gate_vma)
25190 return "[vsyscall]";
25191 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25192 index 7b179b4..6bd1777 100644
25193 --- a/arch/x86/mm/iomap_32.c
25194 +++ b/arch/x86/mm/iomap_32.c
25195 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25196 type = kmap_atomic_idx_push();
25197 idx = type + KM_TYPE_NR * smp_processor_id();
25198 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25199 +
25200 + pax_open_kernel();
25201 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25202 + pax_close_kernel();
25203 +
25204 arch_flush_lazy_mmu_mode();
25205
25206 return (void *)vaddr;
25207 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25208 index be1ef57..55f0160 100644
25209 --- a/arch/x86/mm/ioremap.c
25210 +++ b/arch/x86/mm/ioremap.c
25211 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25212 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25213 int is_ram = page_is_ram(pfn);
25214
25215 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25216 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25217 return NULL;
25218 WARN_ON_ONCE(is_ram);
25219 }
25220 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25221
25222 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25223 if (page_is_ram(start >> PAGE_SHIFT))
25224 +#ifdef CONFIG_HIGHMEM
25225 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25226 +#endif
25227 return __va(phys);
25228
25229 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25230 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25231 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25232
25233 static __initdata int after_paging_init;
25234 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25235 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25236
25237 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25238 {
25239 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25240 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25241
25242 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25243 - memset(bm_pte, 0, sizeof(bm_pte));
25244 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25245 + pmd_populate_user(&init_mm, pmd, bm_pte);
25246
25247 /*
25248 * The boot-ioremap range spans multiple pmds, for which
25249 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25250 index d87dd6d..bf3fa66 100644
25251 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25252 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25253 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25254 * memory (e.g. tracked pages)? For now, we need this to avoid
25255 * invoking kmemcheck for PnP BIOS calls.
25256 */
25257 - if (regs->flags & X86_VM_MASK)
25258 + if (v8086_mode(regs))
25259 return false;
25260 - if (regs->cs != __KERNEL_CS)
25261 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25262 return false;
25263
25264 pte = kmemcheck_pte_lookup(address);
25265 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25266 index 845df68..1d8d29f 100644
25267 --- a/arch/x86/mm/mmap.c
25268 +++ b/arch/x86/mm/mmap.c
25269 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25270 * Leave an at least ~128 MB hole with possible stack randomization.
25271 */
25272 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25273 -#define MAX_GAP (TASK_SIZE/6*5)
25274 +#define MAX_GAP (pax_task_size/6*5)
25275
25276 static int mmap_is_legacy(void)
25277 {
25278 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25279 return rnd << PAGE_SHIFT;
25280 }
25281
25282 -static unsigned long mmap_base(void)
25283 +static unsigned long mmap_base(struct mm_struct *mm)
25284 {
25285 unsigned long gap = rlimit(RLIMIT_STACK);
25286 + unsigned long pax_task_size = TASK_SIZE;
25287 +
25288 +#ifdef CONFIG_PAX_SEGMEXEC
25289 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25290 + pax_task_size = SEGMEXEC_TASK_SIZE;
25291 +#endif
25292
25293 if (gap < MIN_GAP)
25294 gap = MIN_GAP;
25295 else if (gap > MAX_GAP)
25296 gap = MAX_GAP;
25297
25298 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25299 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25300 }
25301
25302 /*
25303 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25304 * does, but not when emulating X86_32
25305 */
25306 -static unsigned long mmap_legacy_base(void)
25307 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25308 {
25309 - if (mmap_is_ia32())
25310 + if (mmap_is_ia32()) {
25311 +
25312 +#ifdef CONFIG_PAX_SEGMEXEC
25313 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25314 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25315 + else
25316 +#endif
25317 +
25318 return TASK_UNMAPPED_BASE;
25319 - else
25320 + } else
25321 return TASK_UNMAPPED_BASE + mmap_rnd();
25322 }
25323
25324 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25325 void arch_pick_mmap_layout(struct mm_struct *mm)
25326 {
25327 if (mmap_is_legacy()) {
25328 - mm->mmap_base = mmap_legacy_base();
25329 + mm->mmap_base = mmap_legacy_base(mm);
25330 +
25331 +#ifdef CONFIG_PAX_RANDMMAP
25332 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25333 + mm->mmap_base += mm->delta_mmap;
25334 +#endif
25335 +
25336 mm->get_unmapped_area = arch_get_unmapped_area;
25337 mm->unmap_area = arch_unmap_area;
25338 } else {
25339 - mm->mmap_base = mmap_base();
25340 + mm->mmap_base = mmap_base(mm);
25341 +
25342 +#ifdef CONFIG_PAX_RANDMMAP
25343 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25344 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25345 +#endif
25346 +
25347 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25348 mm->unmap_area = arch_unmap_area_topdown;
25349 }
25350 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25351 index dc0b727..dc9d71a 100644
25352 --- a/arch/x86/mm/mmio-mod.c
25353 +++ b/arch/x86/mm/mmio-mod.c
25354 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25355 break;
25356 default:
25357 {
25358 - unsigned char *ip = (unsigned char *)instptr;
25359 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25360 my_trace->opcode = MMIO_UNKNOWN_OP;
25361 my_trace->width = 0;
25362 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25363 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25364 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25365 void __iomem *addr)
25366 {
25367 - static atomic_t next_id;
25368 + static atomic_unchecked_t next_id;
25369 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25370 /* These are page-unaligned. */
25371 struct mmiotrace_map map = {
25372 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25373 .private = trace
25374 },
25375 .phys = offset,
25376 - .id = atomic_inc_return(&next_id)
25377 + .id = atomic_inc_return_unchecked(&next_id)
25378 };
25379 map.map_id = trace->id;
25380
25381 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25382 index b008656..773eac2 100644
25383 --- a/arch/x86/mm/pageattr-test.c
25384 +++ b/arch/x86/mm/pageattr-test.c
25385 @@ -36,7 +36,7 @@ enum {
25386
25387 static int pte_testbit(pte_t pte)
25388 {
25389 - return pte_flags(pte) & _PAGE_UNUSED1;
25390 + return pte_flags(pte) & _PAGE_CPA_TEST;
25391 }
25392
25393 struct split_state {
25394 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25395 index e1ebde3..b1e1db38 100644
25396 --- a/arch/x86/mm/pageattr.c
25397 +++ b/arch/x86/mm/pageattr.c
25398 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25399 */
25400 #ifdef CONFIG_PCI_BIOS
25401 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25402 - pgprot_val(forbidden) |= _PAGE_NX;
25403 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25404 #endif
25405
25406 /*
25407 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25408 * Does not cover __inittext since that is gone later on. On
25409 * 64bit we do not enforce !NX on the low mapping
25410 */
25411 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25412 - pgprot_val(forbidden) |= _PAGE_NX;
25413 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25414 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25415
25416 +#ifdef CONFIG_DEBUG_RODATA
25417 /*
25418 * The .rodata section needs to be read-only. Using the pfn
25419 * catches all aliases.
25420 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25421 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25422 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25423 pgprot_val(forbidden) |= _PAGE_RW;
25424 +#endif
25425
25426 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25427 /*
25428 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25429 }
25430 #endif
25431
25432 +#ifdef CONFIG_PAX_KERNEXEC
25433 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25434 + pgprot_val(forbidden) |= _PAGE_RW;
25435 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25436 + }
25437 +#endif
25438 +
25439 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25440
25441 return prot;
25442 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25443 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25444 {
25445 /* change init_mm */
25446 + pax_open_kernel();
25447 set_pte_atomic(kpte, pte);
25448 +
25449 #ifdef CONFIG_X86_32
25450 if (!SHARED_KERNEL_PMD) {
25451 +
25452 +#ifdef CONFIG_PAX_PER_CPU_PGD
25453 + unsigned long cpu;
25454 +#else
25455 struct page *page;
25456 +#endif
25457
25458 +#ifdef CONFIG_PAX_PER_CPU_PGD
25459 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25460 + pgd_t *pgd = get_cpu_pgd(cpu);
25461 +#else
25462 list_for_each_entry(page, &pgd_list, lru) {
25463 - pgd_t *pgd;
25464 + pgd_t *pgd = (pgd_t *)page_address(page);
25465 +#endif
25466 +
25467 pud_t *pud;
25468 pmd_t *pmd;
25469
25470 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25471 + pgd += pgd_index(address);
25472 pud = pud_offset(pgd, address);
25473 pmd = pmd_offset(pud, address);
25474 set_pte_atomic((pte_t *)pmd, pte);
25475 }
25476 }
25477 #endif
25478 + pax_close_kernel();
25479 }
25480
25481 static int
25482 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25483 index f6ff57b..481690f 100644
25484 --- a/arch/x86/mm/pat.c
25485 +++ b/arch/x86/mm/pat.c
25486 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25487
25488 if (!entry) {
25489 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25490 - current->comm, current->pid, start, end);
25491 + current->comm, task_pid_nr(current), start, end);
25492 return -EINVAL;
25493 }
25494
25495 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25496 while (cursor < to) {
25497 if (!devmem_is_allowed(pfn)) {
25498 printk(KERN_INFO
25499 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25500 - current->comm, from, to);
25501 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25502 + current->comm, from, to, cursor);
25503 return 0;
25504 }
25505 cursor += PAGE_SIZE;
25506 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25507 printk(KERN_INFO
25508 "%s:%d ioremap_change_attr failed %s "
25509 "for %Lx-%Lx\n",
25510 - current->comm, current->pid,
25511 + current->comm, task_pid_nr(current),
25512 cattr_name(flags),
25513 base, (unsigned long long)(base + size));
25514 return -EINVAL;
25515 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25516 if (want_flags != flags) {
25517 printk(KERN_WARNING
25518 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25519 - current->comm, current->pid,
25520 + current->comm, task_pid_nr(current),
25521 cattr_name(want_flags),
25522 (unsigned long long)paddr,
25523 (unsigned long long)(paddr + size),
25524 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25525 free_memtype(paddr, paddr + size);
25526 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25527 " for %Lx-%Lx, got %s\n",
25528 - current->comm, current->pid,
25529 + current->comm, task_pid_nr(current),
25530 cattr_name(want_flags),
25531 (unsigned long long)paddr,
25532 (unsigned long long)(paddr + size),
25533 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25534 index 9f0614d..92ae64a 100644
25535 --- a/arch/x86/mm/pf_in.c
25536 +++ b/arch/x86/mm/pf_in.c
25537 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25538 int i;
25539 enum reason_type rv = OTHERS;
25540
25541 - p = (unsigned char *)ins_addr;
25542 + p = (unsigned char *)ktla_ktva(ins_addr);
25543 p += skip_prefix(p, &prf);
25544 p += get_opcode(p, &opcode);
25545
25546 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25547 struct prefix_bits prf;
25548 int i;
25549
25550 - p = (unsigned char *)ins_addr;
25551 + p = (unsigned char *)ktla_ktva(ins_addr);
25552 p += skip_prefix(p, &prf);
25553 p += get_opcode(p, &opcode);
25554
25555 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25556 struct prefix_bits prf;
25557 int i;
25558
25559 - p = (unsigned char *)ins_addr;
25560 + p = (unsigned char *)ktla_ktva(ins_addr);
25561 p += skip_prefix(p, &prf);
25562 p += get_opcode(p, &opcode);
25563
25564 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25565 struct prefix_bits prf;
25566 int i;
25567
25568 - p = (unsigned char *)ins_addr;
25569 + p = (unsigned char *)ktla_ktva(ins_addr);
25570 p += skip_prefix(p, &prf);
25571 p += get_opcode(p, &opcode);
25572 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25573 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25574 struct prefix_bits prf;
25575 int i;
25576
25577 - p = (unsigned char *)ins_addr;
25578 + p = (unsigned char *)ktla_ktva(ins_addr);
25579 p += skip_prefix(p, &prf);
25580 p += get_opcode(p, &opcode);
25581 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25582 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25583 index 8573b83..4f3ed7e 100644
25584 --- a/arch/x86/mm/pgtable.c
25585 +++ b/arch/x86/mm/pgtable.c
25586 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25587 list_del(&page->lru);
25588 }
25589
25590 -#define UNSHARED_PTRS_PER_PGD \
25591 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25592 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25593 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25594
25595 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25596 +{
25597 + unsigned int count = USER_PGD_PTRS;
25598
25599 + while (count--)
25600 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25601 +}
25602 +#endif
25603 +
25604 +#ifdef CONFIG_PAX_PER_CPU_PGD
25605 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25606 +{
25607 + unsigned int count = USER_PGD_PTRS;
25608 +
25609 + while (count--) {
25610 + pgd_t pgd;
25611 +
25612 +#ifdef CONFIG_X86_64
25613 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25614 +#else
25615 + pgd = *src++;
25616 +#endif
25617 +
25618 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25619 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25620 +#endif
25621 +
25622 + *dst++ = pgd;
25623 + }
25624 +
25625 +}
25626 +#endif
25627 +
25628 +#ifdef CONFIG_X86_64
25629 +#define pxd_t pud_t
25630 +#define pyd_t pgd_t
25631 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25632 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25633 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25634 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25635 +#define PYD_SIZE PGDIR_SIZE
25636 +#else
25637 +#define pxd_t pmd_t
25638 +#define pyd_t pud_t
25639 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25640 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25641 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25642 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25643 +#define PYD_SIZE PUD_SIZE
25644 +#endif
25645 +
25646 +#ifdef CONFIG_PAX_PER_CPU_PGD
25647 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25648 +static inline void pgd_dtor(pgd_t *pgd) {}
25649 +#else
25650 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25651 {
25652 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25653 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25654 pgd_list_del(pgd);
25655 spin_unlock(&pgd_lock);
25656 }
25657 +#endif
25658
25659 /*
25660 * List of all pgd's needed for non-PAE so it can invalidate entries
25661 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25662 * -- wli
25663 */
25664
25665 -#ifdef CONFIG_X86_PAE
25666 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25667 /*
25668 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25669 * updating the top-level pagetable entries to guarantee the
25670 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25671 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25672 * and initialize the kernel pmds here.
25673 */
25674 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25675 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25676
25677 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25678 {
25679 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25680 */
25681 flush_tlb_mm(mm);
25682 }
25683 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25684 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25685 #else /* !CONFIG_X86_PAE */
25686
25687 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25688 -#define PREALLOCATED_PMDS 0
25689 +#define PREALLOCATED_PXDS 0
25690
25691 #endif /* CONFIG_X86_PAE */
25692
25693 -static void free_pmds(pmd_t *pmds[])
25694 +static void free_pxds(pxd_t *pxds[])
25695 {
25696 int i;
25697
25698 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25699 - if (pmds[i])
25700 - free_page((unsigned long)pmds[i]);
25701 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25702 + if (pxds[i])
25703 + free_page((unsigned long)pxds[i]);
25704 }
25705
25706 -static int preallocate_pmds(pmd_t *pmds[])
25707 +static int preallocate_pxds(pxd_t *pxds[])
25708 {
25709 int i;
25710 bool failed = false;
25711
25712 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25713 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25714 - if (pmd == NULL)
25715 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25716 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25717 + if (pxd == NULL)
25718 failed = true;
25719 - pmds[i] = pmd;
25720 + pxds[i] = pxd;
25721 }
25722
25723 if (failed) {
25724 - free_pmds(pmds);
25725 + free_pxds(pxds);
25726 return -ENOMEM;
25727 }
25728
25729 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25730 * preallocate which never got a corresponding vma will need to be
25731 * freed manually.
25732 */
25733 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25734 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25735 {
25736 int i;
25737
25738 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25739 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25740 pgd_t pgd = pgdp[i];
25741
25742 if (pgd_val(pgd) != 0) {
25743 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25744 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25745
25746 - pgdp[i] = native_make_pgd(0);
25747 + set_pgd(pgdp + i, native_make_pgd(0));
25748
25749 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25750 - pmd_free(mm, pmd);
25751 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25752 + pxd_free(mm, pxd);
25753 }
25754 }
25755 }
25756
25757 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25758 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25759 {
25760 - pud_t *pud;
25761 + pyd_t *pyd;
25762 unsigned long addr;
25763 int i;
25764
25765 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25766 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25767 return;
25768
25769 - pud = pud_offset(pgd, 0);
25770 +#ifdef CONFIG_X86_64
25771 + pyd = pyd_offset(mm, 0L);
25772 +#else
25773 + pyd = pyd_offset(pgd, 0L);
25774 +#endif
25775
25776 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25777 - i++, pud++, addr += PUD_SIZE) {
25778 - pmd_t *pmd = pmds[i];
25779 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25780 + i++, pyd++, addr += PYD_SIZE) {
25781 + pxd_t *pxd = pxds[i];
25782
25783 if (i >= KERNEL_PGD_BOUNDARY)
25784 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25785 - sizeof(pmd_t) * PTRS_PER_PMD);
25786 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25787 + sizeof(pxd_t) * PTRS_PER_PMD);
25788
25789 - pud_populate(mm, pud, pmd);
25790 + pyd_populate(mm, pyd, pxd);
25791 }
25792 }
25793
25794 pgd_t *pgd_alloc(struct mm_struct *mm)
25795 {
25796 pgd_t *pgd;
25797 - pmd_t *pmds[PREALLOCATED_PMDS];
25798 + pxd_t *pxds[PREALLOCATED_PXDS];
25799
25800 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25801
25802 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25803
25804 mm->pgd = pgd;
25805
25806 - if (preallocate_pmds(pmds) != 0)
25807 + if (preallocate_pxds(pxds) != 0)
25808 goto out_free_pgd;
25809
25810 if (paravirt_pgd_alloc(mm) != 0)
25811 - goto out_free_pmds;
25812 + goto out_free_pxds;
25813
25814 /*
25815 * Make sure that pre-populating the pmds is atomic with
25816 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25817 spin_lock(&pgd_lock);
25818
25819 pgd_ctor(mm, pgd);
25820 - pgd_prepopulate_pmd(mm, pgd, pmds);
25821 + pgd_prepopulate_pxd(mm, pgd, pxds);
25822
25823 spin_unlock(&pgd_lock);
25824
25825 return pgd;
25826
25827 -out_free_pmds:
25828 - free_pmds(pmds);
25829 +out_free_pxds:
25830 + free_pxds(pxds);
25831 out_free_pgd:
25832 free_page((unsigned long)pgd);
25833 out:
25834 @@ -295,7 +356,7 @@ out:
25835
25836 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25837 {
25838 - pgd_mop_up_pmds(mm, pgd);
25839 + pgd_mop_up_pxds(mm, pgd);
25840 pgd_dtor(pgd);
25841 paravirt_pgd_free(mm, pgd);
25842 free_page((unsigned long)pgd);
25843 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25844 index a69bcb8..19068ab 100644
25845 --- a/arch/x86/mm/pgtable_32.c
25846 +++ b/arch/x86/mm/pgtable_32.c
25847 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25848 return;
25849 }
25850 pte = pte_offset_kernel(pmd, vaddr);
25851 +
25852 + pax_open_kernel();
25853 if (pte_val(pteval))
25854 set_pte_at(&init_mm, vaddr, pte, pteval);
25855 else
25856 pte_clear(&init_mm, vaddr, pte);
25857 + pax_close_kernel();
25858
25859 /*
25860 * It's enough to flush this one mapping.
25861 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25862 index 410531d..0f16030 100644
25863 --- a/arch/x86/mm/setup_nx.c
25864 +++ b/arch/x86/mm/setup_nx.c
25865 @@ -5,8 +5,10 @@
25866 #include <asm/pgtable.h>
25867 #include <asm/proto.h>
25868
25869 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25870 static int disable_nx __cpuinitdata;
25871
25872 +#ifndef CONFIG_PAX_PAGEEXEC
25873 /*
25874 * noexec = on|off
25875 *
25876 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25877 return 0;
25878 }
25879 early_param("noexec", noexec_setup);
25880 +#endif
25881 +
25882 +#endif
25883
25884 void __cpuinit x86_configure_nx(void)
25885 {
25886 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25887 if (cpu_has_nx && !disable_nx)
25888 __supported_pte_mask |= _PAGE_NX;
25889 else
25890 +#endif
25891 __supported_pte_mask &= ~_PAGE_NX;
25892 }
25893
25894 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25895 index d6c0418..06a0ad5 100644
25896 --- a/arch/x86/mm/tlb.c
25897 +++ b/arch/x86/mm/tlb.c
25898 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25899 BUG();
25900 cpumask_clear_cpu(cpu,
25901 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25902 +
25903 +#ifndef CONFIG_PAX_PER_CPU_PGD
25904 load_cr3(swapper_pg_dir);
25905 +#endif
25906 +
25907 }
25908 EXPORT_SYMBOL_GPL(leave_mm);
25909
25910 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25911 index 877b9a1..a8ecf42 100644
25912 --- a/arch/x86/net/bpf_jit.S
25913 +++ b/arch/x86/net/bpf_jit.S
25914 @@ -9,6 +9,7 @@
25915 */
25916 #include <linux/linkage.h>
25917 #include <asm/dwarf2.h>
25918 +#include <asm/alternative-asm.h>
25919
25920 /*
25921 * Calling convention :
25922 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25923 jle bpf_slow_path_word
25924 mov (SKBDATA,%rsi),%eax
25925 bswap %eax /* ntohl() */
25926 + pax_force_retaddr
25927 ret
25928
25929 sk_load_half:
25930 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25931 jle bpf_slow_path_half
25932 movzwl (SKBDATA,%rsi),%eax
25933 rol $8,%ax # ntohs()
25934 + pax_force_retaddr
25935 ret
25936
25937 sk_load_byte:
25938 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25939 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25940 jle bpf_slow_path_byte
25941 movzbl (SKBDATA,%rsi),%eax
25942 + pax_force_retaddr
25943 ret
25944
25945 /**
25946 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25947 movzbl (SKBDATA,%rsi),%ebx
25948 and $15,%bl
25949 shl $2,%bl
25950 + pax_force_retaddr
25951 ret
25952
25953 /* rsi contains offset and can be scratched */
25954 @@ -109,6 +114,7 @@ bpf_slow_path_word:
25955 js bpf_error
25956 mov -12(%rbp),%eax
25957 bswap %eax
25958 + pax_force_retaddr
25959 ret
25960
25961 bpf_slow_path_half:
25962 @@ -117,12 +123,14 @@ bpf_slow_path_half:
25963 mov -12(%rbp),%ax
25964 rol $8,%ax
25965 movzwl %ax,%eax
25966 + pax_force_retaddr
25967 ret
25968
25969 bpf_slow_path_byte:
25970 bpf_slow_path_common(1)
25971 js bpf_error
25972 movzbl -12(%rbp),%eax
25973 + pax_force_retaddr
25974 ret
25975
25976 bpf_slow_path_byte_msh:
25977 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25978 and $15,%al
25979 shl $2,%al
25980 xchg %eax,%ebx
25981 + pax_force_retaddr
25982 ret
25983
25984 #define sk_negative_common(SIZE) \
25985 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25986 sk_negative_common(4)
25987 mov (%rax), %eax
25988 bswap %eax
25989 + pax_force_retaddr
25990 ret
25991
25992 bpf_slow_path_half_neg:
25993 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25994 mov (%rax),%ax
25995 rol $8,%ax
25996 movzwl %ax,%eax
25997 + pax_force_retaddr
25998 ret
25999
26000 bpf_slow_path_byte_neg:
26001 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
26002 .globl sk_load_byte_negative_offset
26003 sk_negative_common(1)
26004 movzbl (%rax), %eax
26005 + pax_force_retaddr
26006 ret
26007
26008 bpf_slow_path_byte_msh_neg:
26009 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
26010 and $15,%al
26011 shl $2,%al
26012 xchg %eax,%ebx
26013 + pax_force_retaddr
26014 ret
26015
26016 bpf_error:
26017 @@ -197,4 +210,5 @@ bpf_error:
26018 xor %eax,%eax
26019 mov -8(%rbp),%rbx
26020 leaveq
26021 + pax_force_retaddr
26022 ret
26023 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
26024 index 0597f95..a12c36e 100644
26025 --- a/arch/x86/net/bpf_jit_comp.c
26026 +++ b/arch/x86/net/bpf_jit_comp.c
26027 @@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
26028 set_fs(old_fs);
26029 }
26030
26031 +struct bpf_jit_work {
26032 + struct work_struct work;
26033 + void *image;
26034 +};
26035 +
26036 #define CHOOSE_LOAD_FUNC(K, func) \
26037 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
26038
26039 @@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26040 if (addrs == NULL)
26041 return;
26042
26043 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26044 + if (!fp->work)
26045 + goto out;
26046 +
26047 /* Before first pass, make a rough estimation of addrs[]
26048 * each bpf instruction is translated to less than 64 bytes
26049 */
26050 @@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26051 break;
26052 default:
26053 /* hmm, too complex filter, give up with jit compiler */
26054 - goto out;
26055 + goto error;
26056 }
26057 ilen = prog - temp;
26058 if (image) {
26059 if (unlikely(proglen + ilen > oldproglen)) {
26060 pr_err("bpb_jit_compile fatal error\n");
26061 - kfree(addrs);
26062 - module_free(NULL, image);
26063 - return;
26064 + module_free_exec(NULL, image);
26065 + goto error;
26066 }
26067 + pax_open_kernel();
26068 memcpy(image + proglen, temp, ilen);
26069 + pax_close_kernel();
26070 }
26071 proglen += ilen;
26072 addrs[i] = proglen;
26073 @@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26074 break;
26075 }
26076 if (proglen == oldproglen) {
26077 - image = module_alloc(max_t(unsigned int,
26078 - proglen,
26079 - sizeof(struct work_struct)));
26080 + image = module_alloc_exec(proglen);
26081 if (!image)
26082 - goto out;
26083 + goto error;
26084 }
26085 oldproglen = proglen;
26086 }
26087 @@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26088 bpf_flush_icache(image, image + proglen);
26089
26090 fp->bpf_func = (void *)image;
26091 - }
26092 + } else
26093 +error:
26094 + kfree(fp->work);
26095 +
26096 out:
26097 kfree(addrs);
26098 return;
26099 @@ -648,18 +659,20 @@ out:
26100
26101 static void jit_free_defer(struct work_struct *arg)
26102 {
26103 - module_free(NULL, arg);
26104 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26105 + kfree(arg);
26106 }
26107
26108 /* run from softirq, we must use a work_struct to call
26109 - * module_free() from process context
26110 + * module_free_exec() from process context
26111 */
26112 void bpf_jit_free(struct sk_filter *fp)
26113 {
26114 if (fp->bpf_func != sk_run_filter) {
26115 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
26116 + struct work_struct *work = &fp->work->work;
26117
26118 INIT_WORK(work, jit_free_defer);
26119 + fp->work->image = fp->bpf_func;
26120 schedule_work(work);
26121 }
26122 }
26123 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26124 index d6aa6e8..266395a 100644
26125 --- a/arch/x86/oprofile/backtrace.c
26126 +++ b/arch/x86/oprofile/backtrace.c
26127 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26128 struct stack_frame_ia32 *fp;
26129 unsigned long bytes;
26130
26131 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26132 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26133 if (bytes != sizeof(bufhead))
26134 return NULL;
26135
26136 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26137 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26138
26139 oprofile_add_trace(bufhead[0].return_address);
26140
26141 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26142 struct stack_frame bufhead[2];
26143 unsigned long bytes;
26144
26145 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26146 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26147 if (bytes != sizeof(bufhead))
26148 return NULL;
26149
26150 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26151 {
26152 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26153
26154 - if (!user_mode_vm(regs)) {
26155 + if (!user_mode(regs)) {
26156 unsigned long stack = kernel_stack_pointer(regs);
26157 if (depth)
26158 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26159 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26160 index 140942f..8a5cc55 100644
26161 --- a/arch/x86/pci/mrst.c
26162 +++ b/arch/x86/pci/mrst.c
26163 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26164 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26165 pci_mmcfg_late_init();
26166 pcibios_enable_irq = mrst_pci_irq_enable;
26167 - pci_root_ops = pci_mrst_ops;
26168 + pax_open_kernel();
26169 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26170 + pax_close_kernel();
26171 pci_soc_mode = 1;
26172 /* Continue with standard init */
26173 return 1;
26174 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26175 index da8fe05..7ee6704 100644
26176 --- a/arch/x86/pci/pcbios.c
26177 +++ b/arch/x86/pci/pcbios.c
26178 @@ -79,50 +79,93 @@ union bios32 {
26179 static struct {
26180 unsigned long address;
26181 unsigned short segment;
26182 -} bios32_indirect = { 0, __KERNEL_CS };
26183 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26184
26185 /*
26186 * Returns the entry point for the given service, NULL on error
26187 */
26188
26189 -static unsigned long bios32_service(unsigned long service)
26190 +static unsigned long __devinit bios32_service(unsigned long service)
26191 {
26192 unsigned char return_code; /* %al */
26193 unsigned long address; /* %ebx */
26194 unsigned long length; /* %ecx */
26195 unsigned long entry; /* %edx */
26196 unsigned long flags;
26197 + struct desc_struct d, *gdt;
26198
26199 local_irq_save(flags);
26200 - __asm__("lcall *(%%edi); cld"
26201 +
26202 + gdt = get_cpu_gdt_table(smp_processor_id());
26203 +
26204 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26205 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26206 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26207 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26208 +
26209 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26210 : "=a" (return_code),
26211 "=b" (address),
26212 "=c" (length),
26213 "=d" (entry)
26214 : "0" (service),
26215 "1" (0),
26216 - "D" (&bios32_indirect));
26217 + "D" (&bios32_indirect),
26218 + "r"(__PCIBIOS_DS)
26219 + : "memory");
26220 +
26221 + pax_open_kernel();
26222 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26223 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26224 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26225 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26226 + pax_close_kernel();
26227 +
26228 local_irq_restore(flags);
26229
26230 switch (return_code) {
26231 - case 0:
26232 - return address + entry;
26233 - case 0x80: /* Not present */
26234 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26235 - return 0;
26236 - default: /* Shouldn't happen */
26237 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26238 - service, return_code);
26239 + case 0: {
26240 + int cpu;
26241 + unsigned char flags;
26242 +
26243 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26244 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26245 + printk(KERN_WARNING "bios32_service: not valid\n");
26246 return 0;
26247 + }
26248 + address = address + PAGE_OFFSET;
26249 + length += 16UL; /* some BIOSs underreport this... */
26250 + flags = 4;
26251 + if (length >= 64*1024*1024) {
26252 + length >>= PAGE_SHIFT;
26253 + flags |= 8;
26254 + }
26255 +
26256 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26257 + gdt = get_cpu_gdt_table(cpu);
26258 + pack_descriptor(&d, address, length, 0x9b, flags);
26259 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26260 + pack_descriptor(&d, address, length, 0x93, flags);
26261 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26262 + }
26263 + return entry;
26264 + }
26265 + case 0x80: /* Not present */
26266 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26267 + return 0;
26268 + default: /* Shouldn't happen */
26269 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26270 + service, return_code);
26271 + return 0;
26272 }
26273 }
26274
26275 static struct {
26276 unsigned long address;
26277 unsigned short segment;
26278 -} pci_indirect = { 0, __KERNEL_CS };
26279 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26280
26281 -static int pci_bios_present;
26282 +static int pci_bios_present __read_only;
26283
26284 static int __devinit check_pcibios(void)
26285 {
26286 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26287 unsigned long flags, pcibios_entry;
26288
26289 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26290 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26291 + pci_indirect.address = pcibios_entry;
26292
26293 local_irq_save(flags);
26294 - __asm__(
26295 - "lcall *(%%edi); cld\n\t"
26296 + __asm__("movw %w6, %%ds\n\t"
26297 + "lcall *%%ss:(%%edi); cld\n\t"
26298 + "push %%ss\n\t"
26299 + "pop %%ds\n\t"
26300 "jc 1f\n\t"
26301 "xor %%ah, %%ah\n"
26302 "1:"
26303 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26304 "=b" (ebx),
26305 "=c" (ecx)
26306 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26307 - "D" (&pci_indirect)
26308 + "D" (&pci_indirect),
26309 + "r" (__PCIBIOS_DS)
26310 : "memory");
26311 local_irq_restore(flags);
26312
26313 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26314
26315 switch (len) {
26316 case 1:
26317 - __asm__("lcall *(%%esi); cld\n\t"
26318 + __asm__("movw %w6, %%ds\n\t"
26319 + "lcall *%%ss:(%%esi); cld\n\t"
26320 + "push %%ss\n\t"
26321 + "pop %%ds\n\t"
26322 "jc 1f\n\t"
26323 "xor %%ah, %%ah\n"
26324 "1:"
26325 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26326 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26327 "b" (bx),
26328 "D" ((long)reg),
26329 - "S" (&pci_indirect));
26330 + "S" (&pci_indirect),
26331 + "r" (__PCIBIOS_DS));
26332 /*
26333 * Zero-extend the result beyond 8 bits, do not trust the
26334 * BIOS having done it:
26335 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26336 *value &= 0xff;
26337 break;
26338 case 2:
26339 - __asm__("lcall *(%%esi); cld\n\t"
26340 + __asm__("movw %w6, %%ds\n\t"
26341 + "lcall *%%ss:(%%esi); cld\n\t"
26342 + "push %%ss\n\t"
26343 + "pop %%ds\n\t"
26344 "jc 1f\n\t"
26345 "xor %%ah, %%ah\n"
26346 "1:"
26347 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26348 : "1" (PCIBIOS_READ_CONFIG_WORD),
26349 "b" (bx),
26350 "D" ((long)reg),
26351 - "S" (&pci_indirect));
26352 + "S" (&pci_indirect),
26353 + "r" (__PCIBIOS_DS));
26354 /*
26355 * Zero-extend the result beyond 16 bits, do not trust the
26356 * BIOS having done it:
26357 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26358 *value &= 0xffff;
26359 break;
26360 case 4:
26361 - __asm__("lcall *(%%esi); cld\n\t"
26362 + __asm__("movw %w6, %%ds\n\t"
26363 + "lcall *%%ss:(%%esi); cld\n\t"
26364 + "push %%ss\n\t"
26365 + "pop %%ds\n\t"
26366 "jc 1f\n\t"
26367 "xor %%ah, %%ah\n"
26368 "1:"
26369 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26370 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26371 "b" (bx),
26372 "D" ((long)reg),
26373 - "S" (&pci_indirect));
26374 + "S" (&pci_indirect),
26375 + "r" (__PCIBIOS_DS));
26376 break;
26377 }
26378
26379 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26380
26381 switch (len) {
26382 case 1:
26383 - __asm__("lcall *(%%esi); cld\n\t"
26384 + __asm__("movw %w6, %%ds\n\t"
26385 + "lcall *%%ss:(%%esi); cld\n\t"
26386 + "push %%ss\n\t"
26387 + "pop %%ds\n\t"
26388 "jc 1f\n\t"
26389 "xor %%ah, %%ah\n"
26390 "1:"
26391 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26392 "c" (value),
26393 "b" (bx),
26394 "D" ((long)reg),
26395 - "S" (&pci_indirect));
26396 + "S" (&pci_indirect),
26397 + "r" (__PCIBIOS_DS));
26398 break;
26399 case 2:
26400 - __asm__("lcall *(%%esi); cld\n\t"
26401 + __asm__("movw %w6, %%ds\n\t"
26402 + "lcall *%%ss:(%%esi); cld\n\t"
26403 + "push %%ss\n\t"
26404 + "pop %%ds\n\t"
26405 "jc 1f\n\t"
26406 "xor %%ah, %%ah\n"
26407 "1:"
26408 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26409 "c" (value),
26410 "b" (bx),
26411 "D" ((long)reg),
26412 - "S" (&pci_indirect));
26413 + "S" (&pci_indirect),
26414 + "r" (__PCIBIOS_DS));
26415 break;
26416 case 4:
26417 - __asm__("lcall *(%%esi); cld\n\t"
26418 + __asm__("movw %w6, %%ds\n\t"
26419 + "lcall *%%ss:(%%esi); cld\n\t"
26420 + "push %%ss\n\t"
26421 + "pop %%ds\n\t"
26422 "jc 1f\n\t"
26423 "xor %%ah, %%ah\n"
26424 "1:"
26425 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26426 "c" (value),
26427 "b" (bx),
26428 "D" ((long)reg),
26429 - "S" (&pci_indirect));
26430 + "S" (&pci_indirect),
26431 + "r" (__PCIBIOS_DS));
26432 break;
26433 }
26434
26435 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26436
26437 DBG("PCI: Fetching IRQ routing table... ");
26438 __asm__("push %%es\n\t"
26439 + "movw %w8, %%ds\n\t"
26440 "push %%ds\n\t"
26441 "pop %%es\n\t"
26442 - "lcall *(%%esi); cld\n\t"
26443 + "lcall *%%ss:(%%esi); cld\n\t"
26444 "pop %%es\n\t"
26445 + "push %%ss\n\t"
26446 + "pop %%ds\n"
26447 "jc 1f\n\t"
26448 "xor %%ah, %%ah\n"
26449 "1:"
26450 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26451 "1" (0),
26452 "D" ((long) &opt),
26453 "S" (&pci_indirect),
26454 - "m" (opt)
26455 + "m" (opt),
26456 + "r" (__PCIBIOS_DS)
26457 : "memory");
26458 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26459 if (ret & 0xff00)
26460 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26461 {
26462 int ret;
26463
26464 - __asm__("lcall *(%%esi); cld\n\t"
26465 + __asm__("movw %w5, %%ds\n\t"
26466 + "lcall *%%ss:(%%esi); cld\n\t"
26467 + "push %%ss\n\t"
26468 + "pop %%ds\n"
26469 "jc 1f\n\t"
26470 "xor %%ah, %%ah\n"
26471 "1:"
26472 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26473 : "0" (PCIBIOS_SET_PCI_HW_INT),
26474 "b" ((dev->bus->number << 8) | dev->devfn),
26475 "c" ((irq << 8) | (pin + 10)),
26476 - "S" (&pci_indirect));
26477 + "S" (&pci_indirect),
26478 + "r" (__PCIBIOS_DS));
26479 return !(ret & 0xff00);
26480 }
26481 EXPORT_SYMBOL(pcibios_set_irq_routing);
26482 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26483 index 40e4469..1ab536e 100644
26484 --- a/arch/x86/platform/efi/efi_32.c
26485 +++ b/arch/x86/platform/efi/efi_32.c
26486 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26487 {
26488 struct desc_ptr gdt_descr;
26489
26490 +#ifdef CONFIG_PAX_KERNEXEC
26491 + struct desc_struct d;
26492 +#endif
26493 +
26494 local_irq_save(efi_rt_eflags);
26495
26496 load_cr3(initial_page_table);
26497 __flush_tlb_all();
26498
26499 +#ifdef CONFIG_PAX_KERNEXEC
26500 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26501 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26502 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26503 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26504 +#endif
26505 +
26506 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26507 gdt_descr.size = GDT_SIZE - 1;
26508 load_gdt(&gdt_descr);
26509 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26510 {
26511 struct desc_ptr gdt_descr;
26512
26513 +#ifdef CONFIG_PAX_KERNEXEC
26514 + struct desc_struct d;
26515 +
26516 + memset(&d, 0, sizeof d);
26517 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26518 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26519 +#endif
26520 +
26521 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26522 gdt_descr.size = GDT_SIZE - 1;
26523 load_gdt(&gdt_descr);
26524 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26525 index fbe66e6..c5c0dd2 100644
26526 --- a/arch/x86/platform/efi/efi_stub_32.S
26527 +++ b/arch/x86/platform/efi/efi_stub_32.S
26528 @@ -6,7 +6,9 @@
26529 */
26530
26531 #include <linux/linkage.h>
26532 +#include <linux/init.h>
26533 #include <asm/page_types.h>
26534 +#include <asm/segment.h>
26535
26536 /*
26537 * efi_call_phys(void *, ...) is a function with variable parameters.
26538 @@ -20,7 +22,7 @@
26539 * service functions will comply with gcc calling convention, too.
26540 */
26541
26542 -.text
26543 +__INIT
26544 ENTRY(efi_call_phys)
26545 /*
26546 * 0. The function can only be called in Linux kernel. So CS has been
26547 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26548 * The mapping of lower virtual memory has been created in prelog and
26549 * epilog.
26550 */
26551 - movl $1f, %edx
26552 - subl $__PAGE_OFFSET, %edx
26553 - jmp *%edx
26554 + movl $(__KERNEXEC_EFI_DS), %edx
26555 + mov %edx, %ds
26556 + mov %edx, %es
26557 + mov %edx, %ss
26558 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26559 1:
26560
26561 /*
26562 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26563 * parameter 2, ..., param n. To make things easy, we save the return
26564 * address of efi_call_phys in a global variable.
26565 */
26566 - popl %edx
26567 - movl %edx, saved_return_addr
26568 - /* get the function pointer into ECX*/
26569 - popl %ecx
26570 - movl %ecx, efi_rt_function_ptr
26571 - movl $2f, %edx
26572 - subl $__PAGE_OFFSET, %edx
26573 - pushl %edx
26574 + popl (saved_return_addr)
26575 + popl (efi_rt_function_ptr)
26576
26577 /*
26578 * 3. Clear PG bit in %CR0.
26579 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26580 /*
26581 * 5. Call the physical function.
26582 */
26583 - jmp *%ecx
26584 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26585
26586 -2:
26587 /*
26588 * 6. After EFI runtime service returns, control will return to
26589 * following instruction. We'd better readjust stack pointer first.
26590 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26591 movl %cr0, %edx
26592 orl $0x80000000, %edx
26593 movl %edx, %cr0
26594 - jmp 1f
26595 -1:
26596 +
26597 /*
26598 * 8. Now restore the virtual mode from flat mode by
26599 * adding EIP with PAGE_OFFSET.
26600 */
26601 - movl $1f, %edx
26602 - jmp *%edx
26603 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26604 1:
26605 + movl $(__KERNEL_DS), %edx
26606 + mov %edx, %ds
26607 + mov %edx, %es
26608 + mov %edx, %ss
26609
26610 /*
26611 * 9. Balance the stack. And because EAX contain the return value,
26612 * we'd better not clobber it.
26613 */
26614 - leal efi_rt_function_ptr, %edx
26615 - movl (%edx), %ecx
26616 - pushl %ecx
26617 + pushl (efi_rt_function_ptr)
26618
26619 /*
26620 - * 10. Push the saved return address onto the stack and return.
26621 + * 10. Return to the saved return address.
26622 */
26623 - leal saved_return_addr, %edx
26624 - movl (%edx), %ecx
26625 - pushl %ecx
26626 - ret
26627 + jmpl *(saved_return_addr)
26628 ENDPROC(efi_call_phys)
26629 .previous
26630
26631 -.data
26632 +__INITDATA
26633 saved_return_addr:
26634 .long 0
26635 efi_rt_function_ptr:
26636 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26637 index 4c07cca..2c8427d 100644
26638 --- a/arch/x86/platform/efi/efi_stub_64.S
26639 +++ b/arch/x86/platform/efi/efi_stub_64.S
26640 @@ -7,6 +7,7 @@
26641 */
26642
26643 #include <linux/linkage.h>
26644 +#include <asm/alternative-asm.h>
26645
26646 #define SAVE_XMM \
26647 mov %rsp, %rax; \
26648 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26649 call *%rdi
26650 addq $32, %rsp
26651 RESTORE_XMM
26652 + pax_force_retaddr 0, 1
26653 ret
26654 ENDPROC(efi_call0)
26655
26656 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26657 call *%rdi
26658 addq $32, %rsp
26659 RESTORE_XMM
26660 + pax_force_retaddr 0, 1
26661 ret
26662 ENDPROC(efi_call1)
26663
26664 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26665 call *%rdi
26666 addq $32, %rsp
26667 RESTORE_XMM
26668 + pax_force_retaddr 0, 1
26669 ret
26670 ENDPROC(efi_call2)
26671
26672 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26673 call *%rdi
26674 addq $32, %rsp
26675 RESTORE_XMM
26676 + pax_force_retaddr 0, 1
26677 ret
26678 ENDPROC(efi_call3)
26679
26680 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26681 call *%rdi
26682 addq $32, %rsp
26683 RESTORE_XMM
26684 + pax_force_retaddr 0, 1
26685 ret
26686 ENDPROC(efi_call4)
26687
26688 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26689 call *%rdi
26690 addq $48, %rsp
26691 RESTORE_XMM
26692 + pax_force_retaddr 0, 1
26693 ret
26694 ENDPROC(efi_call5)
26695
26696 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26697 call *%rdi
26698 addq $48, %rsp
26699 RESTORE_XMM
26700 + pax_force_retaddr 0, 1
26701 ret
26702 ENDPROC(efi_call6)
26703 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26704 index e31bcd8..f12dc46 100644
26705 --- a/arch/x86/platform/mrst/mrst.c
26706 +++ b/arch/x86/platform/mrst/mrst.c
26707 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26708 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26709 int sfi_mrtc_num;
26710
26711 -static void mrst_power_off(void)
26712 +static __noreturn void mrst_power_off(void)
26713 {
26714 + BUG();
26715 }
26716
26717 -static void mrst_reboot(void)
26718 +static __noreturn void mrst_reboot(void)
26719 {
26720 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26721 + BUG();
26722 }
26723
26724 /* parse all the mtimer info to a static mtimer array */
26725 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26726 index 218cdb1..fd55c08 100644
26727 --- a/arch/x86/power/cpu.c
26728 +++ b/arch/x86/power/cpu.c
26729 @@ -132,7 +132,7 @@ static void do_fpu_end(void)
26730 static void fix_processor_context(void)
26731 {
26732 int cpu = smp_processor_id();
26733 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26734 + struct tss_struct *t = init_tss + cpu;
26735
26736 set_tss_desc(cpu, t); /*
26737 * This just modifies memory; should not be
26738 @@ -142,7 +142,9 @@ static void fix_processor_context(void)
26739 */
26740
26741 #ifdef CONFIG_X86_64
26742 + pax_open_kernel();
26743 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26744 + pax_close_kernel();
26745
26746 syscall_init(); /* This sets MSR_*STAR and related */
26747 #endif
26748 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26749 index b685296..e00eb65 100644
26750 --- a/arch/x86/tools/relocs.c
26751 +++ b/arch/x86/tools/relocs.c
26752 @@ -12,10 +12,13 @@
26753 #include <regex.h>
26754 #include <tools/le_byteshift.h>
26755
26756 +#include "../../../include/generated/autoconf.h"
26757 +
26758 static void die(char *fmt, ...);
26759
26760 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26761 static Elf32_Ehdr ehdr;
26762 +static Elf32_Phdr *phdr;
26763 static unsigned long reloc_count, reloc_idx;
26764 static unsigned long *relocs;
26765 static unsigned long reloc16_count, reloc16_idx;
26766 @@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26767 }
26768 }
26769
26770 +static void read_phdrs(FILE *fp)
26771 +{
26772 + unsigned int i;
26773 +
26774 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26775 + if (!phdr) {
26776 + die("Unable to allocate %d program headers\n",
26777 + ehdr.e_phnum);
26778 + }
26779 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26780 + die("Seek to %d failed: %s\n",
26781 + ehdr.e_phoff, strerror(errno));
26782 + }
26783 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26784 + die("Cannot read ELF program headers: %s\n",
26785 + strerror(errno));
26786 + }
26787 + for(i = 0; i < ehdr.e_phnum; i++) {
26788 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26789 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26790 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26791 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26792 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26793 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26794 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26795 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26796 + }
26797 +
26798 +}
26799 +
26800 static void read_shdrs(FILE *fp)
26801 {
26802 - int i;
26803 + unsigned int i;
26804 Elf32_Shdr shdr;
26805
26806 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26807 @@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26808
26809 static void read_strtabs(FILE *fp)
26810 {
26811 - int i;
26812 + unsigned int i;
26813 for (i = 0; i < ehdr.e_shnum; i++) {
26814 struct section *sec = &secs[i];
26815 if (sec->shdr.sh_type != SHT_STRTAB) {
26816 @@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26817
26818 static void read_symtabs(FILE *fp)
26819 {
26820 - int i,j;
26821 + unsigned int i,j;
26822 for (i = 0; i < ehdr.e_shnum; i++) {
26823 struct section *sec = &secs[i];
26824 if (sec->shdr.sh_type != SHT_SYMTAB) {
26825 @@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26826
26827 static void read_relocs(FILE *fp)
26828 {
26829 - int i,j;
26830 + unsigned int i,j;
26831 + uint32_t base;
26832 +
26833 for (i = 0; i < ehdr.e_shnum; i++) {
26834 struct section *sec = &secs[i];
26835 if (sec->shdr.sh_type != SHT_REL) {
26836 @@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26837 die("Cannot read symbol table: %s\n",
26838 strerror(errno));
26839 }
26840 + base = 0;
26841 +
26842 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26843 + for (j = 0; j < ehdr.e_phnum; j++) {
26844 + if (phdr[j].p_type != PT_LOAD )
26845 + continue;
26846 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26847 + continue;
26848 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26849 + break;
26850 + }
26851 +#endif
26852 +
26853 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26854 Elf32_Rel *rel = &sec->reltab[j];
26855 - rel->r_offset = elf32_to_cpu(rel->r_offset);
26856 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26857 rel->r_info = elf32_to_cpu(rel->r_info);
26858 }
26859 }
26860 @@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26861
26862 static void print_absolute_symbols(void)
26863 {
26864 - int i;
26865 + unsigned int i;
26866 printf("Absolute symbols\n");
26867 printf(" Num: Value Size Type Bind Visibility Name\n");
26868 for (i = 0; i < ehdr.e_shnum; i++) {
26869 struct section *sec = &secs[i];
26870 char *sym_strtab;
26871 - int j;
26872 + unsigned int j;
26873
26874 if (sec->shdr.sh_type != SHT_SYMTAB) {
26875 continue;
26876 @@ -482,14 +530,14 @@ static void print_absolute_symbols(void)
26877
26878 static void print_absolute_relocs(void)
26879 {
26880 - int i, printed = 0;
26881 + unsigned int i, printed = 0;
26882
26883 for (i = 0; i < ehdr.e_shnum; i++) {
26884 struct section *sec = &secs[i];
26885 struct section *sec_applies, *sec_symtab;
26886 char *sym_strtab;
26887 Elf32_Sym *sh_symtab;
26888 - int j;
26889 + unsigned int j;
26890 if (sec->shdr.sh_type != SHT_REL) {
26891 continue;
26892 }
26893 @@ -551,13 +599,13 @@ static void print_absolute_relocs(void)
26894 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26895 int use_real_mode)
26896 {
26897 - int i;
26898 + unsigned int i;
26899 /* Walk through the relocations */
26900 for (i = 0; i < ehdr.e_shnum; i++) {
26901 char *sym_strtab;
26902 Elf32_Sym *sh_symtab;
26903 struct section *sec_applies, *sec_symtab;
26904 - int j;
26905 + unsigned int j;
26906 struct section *sec = &secs[i];
26907
26908 if (sec->shdr.sh_type != SHT_REL) {
26909 @@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26910 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26911 r_type = ELF32_R_TYPE(rel->r_info);
26912
26913 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26914 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26915 + continue;
26916 +
26917 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26918 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26919 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26920 + continue;
26921 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26922 + continue;
26923 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26924 + continue;
26925 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26926 + continue;
26927 +#endif
26928 +
26929 shn_abs = sym->st_shndx == SHN_ABS;
26930
26931 switch (r_type) {
26932 @@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26933
26934 static void emit_relocs(int as_text, int use_real_mode)
26935 {
26936 - int i;
26937 + unsigned int i;
26938 /* Count how many relocations I have and allocate space for them. */
26939 reloc_count = 0;
26940 walk_relocs(count_reloc, use_real_mode);
26941 @@ -801,6 +865,7 @@ int main(int argc, char **argv)
26942 fname, strerror(errno));
26943 }
26944 read_ehdr(fp);
26945 + read_phdrs(fp);
26946 read_shdrs(fp);
26947 read_strtabs(fp);
26948 read_symtabs(fp);
26949 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26950 index fd14be1..e3c79c0 100644
26951 --- a/arch/x86/vdso/Makefile
26952 +++ b/arch/x86/vdso/Makefile
26953 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
26954 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26955 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26956
26957 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26958 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26959 GCOV_PROFILE := n
26960
26961 #
26962 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26963 index 66e6d93..587f435 100644
26964 --- a/arch/x86/vdso/vdso32-setup.c
26965 +++ b/arch/x86/vdso/vdso32-setup.c
26966 @@ -25,6 +25,7 @@
26967 #include <asm/tlbflush.h>
26968 #include <asm/vdso.h>
26969 #include <asm/proto.h>
26970 +#include <asm/mman.h>
26971
26972 enum {
26973 VDSO_DISABLED = 0,
26974 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26975 void enable_sep_cpu(void)
26976 {
26977 int cpu = get_cpu();
26978 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26979 + struct tss_struct *tss = init_tss + cpu;
26980
26981 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26982 put_cpu();
26983 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26984 gate_vma.vm_start = FIXADDR_USER_START;
26985 gate_vma.vm_end = FIXADDR_USER_END;
26986 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26987 - gate_vma.vm_page_prot = __P101;
26988 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26989
26990 return 0;
26991 }
26992 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26993 if (compat)
26994 addr = VDSO_HIGH_BASE;
26995 else {
26996 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26997 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26998 if (IS_ERR_VALUE(addr)) {
26999 ret = addr;
27000 goto up_fail;
27001 }
27002 }
27003
27004 - current->mm->context.vdso = (void *)addr;
27005 + current->mm->context.vdso = addr;
27006
27007 if (compat_uses_vma || !compat) {
27008 /*
27009 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27010 }
27011
27012 current_thread_info()->sysenter_return =
27013 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27014 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27015
27016 up_fail:
27017 if (ret)
27018 - current->mm->context.vdso = NULL;
27019 + current->mm->context.vdso = 0;
27020
27021 up_write(&mm->mmap_sem);
27022
27023 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
27024
27025 const char *arch_vma_name(struct vm_area_struct *vma)
27026 {
27027 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27028 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27029 return "[vdso]";
27030 +
27031 +#ifdef CONFIG_PAX_SEGMEXEC
27032 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27033 + return "[vdso]";
27034 +#endif
27035 +
27036 return NULL;
27037 }
27038
27039 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27040 * Check to see if the corresponding task was created in compat vdso
27041 * mode.
27042 */
27043 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27044 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27045 return &gate_vma;
27046 return NULL;
27047 }
27048 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27049 index 00aaf04..4a26505 100644
27050 --- a/arch/x86/vdso/vma.c
27051 +++ b/arch/x86/vdso/vma.c
27052 @@ -16,8 +16,6 @@
27053 #include <asm/vdso.h>
27054 #include <asm/page.h>
27055
27056 -unsigned int __read_mostly vdso_enabled = 1;
27057 -
27058 extern char vdso_start[], vdso_end[];
27059 extern unsigned short vdso_sync_cpuid;
27060
27061 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27062 * unaligned here as a result of stack start randomization.
27063 */
27064 addr = PAGE_ALIGN(addr);
27065 - addr = align_addr(addr, NULL, ALIGN_VDSO);
27066
27067 return addr;
27068 }
27069 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
27070 unsigned size)
27071 {
27072 struct mm_struct *mm = current->mm;
27073 - unsigned long addr;
27074 + unsigned long addr = 0;
27075 int ret;
27076
27077 - if (!vdso_enabled)
27078 - return 0;
27079 -
27080 down_write(&mm->mmap_sem);
27081 +
27082 +#ifdef CONFIG_PAX_RANDMMAP
27083 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27084 +#endif
27085 +
27086 addr = vdso_addr(mm->start_stack, size);
27087 + addr = align_addr(addr, NULL, ALIGN_VDSO);
27088 addr = get_unmapped_area(NULL, addr, size, 0, 0);
27089 if (IS_ERR_VALUE(addr)) {
27090 ret = addr;
27091 goto up_fail;
27092 }
27093
27094 - current->mm->context.vdso = (void *)addr;
27095 + mm->context.vdso = addr;
27096
27097 ret = install_special_mapping(mm, addr, size,
27098 VM_READ|VM_EXEC|
27099 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27100 pages);
27101 - if (ret) {
27102 - current->mm->context.vdso = NULL;
27103 - goto up_fail;
27104 - }
27105 + if (ret)
27106 + mm->context.vdso = 0;
27107
27108 up_fail:
27109 up_write(&mm->mmap_sem);
27110 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27111 vdsox32_size);
27112 }
27113 #endif
27114 -
27115 -static __init int vdso_setup(char *s)
27116 -{
27117 - vdso_enabled = simple_strtoul(s, NULL, 0);
27118 - return 0;
27119 -}
27120 -__setup("vdso=", vdso_setup);
27121 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27122 index 40edfc3..b4d80ac 100644
27123 --- a/arch/x86/xen/enlighten.c
27124 +++ b/arch/x86/xen/enlighten.c
27125 @@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27126
27127 struct shared_info xen_dummy_shared_info;
27128
27129 -void *xen_initial_gdt;
27130 -
27131 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27132 __read_mostly int xen_have_vector_callback;
27133 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27134 @@ -1165,30 +1163,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27135 #endif
27136 };
27137
27138 -static void xen_reboot(int reason)
27139 +static __noreturn void xen_reboot(int reason)
27140 {
27141 struct sched_shutdown r = { .reason = reason };
27142
27143 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27144 - BUG();
27145 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27146 + BUG();
27147 }
27148
27149 -static void xen_restart(char *msg)
27150 +static __noreturn void xen_restart(char *msg)
27151 {
27152 xen_reboot(SHUTDOWN_reboot);
27153 }
27154
27155 -static void xen_emergency_restart(void)
27156 +static __noreturn void xen_emergency_restart(void)
27157 {
27158 xen_reboot(SHUTDOWN_reboot);
27159 }
27160
27161 -static void xen_machine_halt(void)
27162 +static __noreturn void xen_machine_halt(void)
27163 {
27164 xen_reboot(SHUTDOWN_poweroff);
27165 }
27166
27167 -static void xen_machine_power_off(void)
27168 +static __noreturn void xen_machine_power_off(void)
27169 {
27170 if (pm_power_off)
27171 pm_power_off();
27172 @@ -1291,7 +1289,17 @@ asmlinkage void __init xen_start_kernel(void)
27173 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27174
27175 /* Work out if we support NX */
27176 - x86_configure_nx();
27177 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27178 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27179 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27180 + unsigned l, h;
27181 +
27182 + __supported_pte_mask |= _PAGE_NX;
27183 + rdmsr(MSR_EFER, l, h);
27184 + l |= EFER_NX;
27185 + wrmsr(MSR_EFER, l, h);
27186 + }
27187 +#endif
27188
27189 xen_setup_features();
27190
27191 @@ -1322,13 +1330,6 @@ asmlinkage void __init xen_start_kernel(void)
27192
27193 machine_ops = xen_machine_ops;
27194
27195 - /*
27196 - * The only reliable way to retain the initial address of the
27197 - * percpu gdt_page is to remember it here, so we can go and
27198 - * mark it RW later, when the initial percpu area is freed.
27199 - */
27200 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27201 -
27202 xen_smp_init();
27203
27204 #ifdef CONFIG_ACPI_NUMA
27205 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27206 index 69f5857..0699dc5 100644
27207 --- a/arch/x86/xen/mmu.c
27208 +++ b/arch/x86/xen/mmu.c
27209 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27210 convert_pfn_mfn(init_level4_pgt);
27211 convert_pfn_mfn(level3_ident_pgt);
27212 convert_pfn_mfn(level3_kernel_pgt);
27213 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27214 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27215 + convert_pfn_mfn(level3_vmemmap_pgt);
27216
27217 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27218 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27219 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27220 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27221 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27222 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27223 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27224 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27225 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27226 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27227 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27228 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27229 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27230
27231 @@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
27232 pv_mmu_ops.set_pud = xen_set_pud;
27233 #if PAGETABLE_LEVELS == 4
27234 pv_mmu_ops.set_pgd = xen_set_pgd;
27235 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27236 #endif
27237
27238 /* This will work as long as patching hasn't happened yet
27239 @@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27240 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27241 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27242 .set_pgd = xen_set_pgd_hyper,
27243 + .set_pgd_batched = xen_set_pgd_hyper,
27244
27245 .alloc_pud = xen_alloc_pmd_init,
27246 .release_pud = xen_release_pmd_init,
27247 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27248 index 0503c0c..ceb2d16 100644
27249 --- a/arch/x86/xen/smp.c
27250 +++ b/arch/x86/xen/smp.c
27251 @@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27252 {
27253 BUG_ON(smp_processor_id() != 0);
27254 native_smp_prepare_boot_cpu();
27255 -
27256 - /* We've switched to the "real" per-cpu gdt, so make sure the
27257 - old memory can be recycled */
27258 - make_lowmem_page_readwrite(xen_initial_gdt);
27259 -
27260 xen_filter_cpu_maps();
27261 xen_setup_vcpu_info_placement();
27262 }
27263 @@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27264 gdt = get_cpu_gdt_table(cpu);
27265
27266 ctxt->flags = VGCF_IN_KERNEL;
27267 - ctxt->user_regs.ds = __USER_DS;
27268 - ctxt->user_regs.es = __USER_DS;
27269 + ctxt->user_regs.ds = __KERNEL_DS;
27270 + ctxt->user_regs.es = __KERNEL_DS;
27271 ctxt->user_regs.ss = __KERNEL_DS;
27272 #ifdef CONFIG_X86_32
27273 ctxt->user_regs.fs = __KERNEL_PERCPU;
27274 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27275 + savesegment(gs, ctxt->user_regs.gs);
27276 #else
27277 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27278 #endif
27279 @@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27280 int rc;
27281
27282 per_cpu(current_task, cpu) = idle;
27283 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27284 #ifdef CONFIG_X86_32
27285 irq_ctx_init(cpu);
27286 #else
27287 clear_tsk_thread_flag(idle, TIF_FORK);
27288 - per_cpu(kernel_stack, cpu) =
27289 - (unsigned long)task_stack_page(idle) -
27290 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27291 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27292 #endif
27293 xen_setup_runstate_info(cpu);
27294 xen_setup_timer(cpu);
27295 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27296 index b040b0e..8cc4fe0 100644
27297 --- a/arch/x86/xen/xen-asm_32.S
27298 +++ b/arch/x86/xen/xen-asm_32.S
27299 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27300 ESP_OFFSET=4 # bytes pushed onto stack
27301
27302 /*
27303 - * Store vcpu_info pointer for easy access. Do it this way to
27304 - * avoid having to reload %fs
27305 + * Store vcpu_info pointer for easy access.
27306 */
27307 #ifdef CONFIG_SMP
27308 - GET_THREAD_INFO(%eax)
27309 - movl TI_cpu(%eax), %eax
27310 - movl __per_cpu_offset(,%eax,4), %eax
27311 - mov xen_vcpu(%eax), %eax
27312 + push %fs
27313 + mov $(__KERNEL_PERCPU), %eax
27314 + mov %eax, %fs
27315 + mov PER_CPU_VAR(xen_vcpu), %eax
27316 + pop %fs
27317 #else
27318 movl xen_vcpu, %eax
27319 #endif
27320 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27321 index aaa7291..3f77960 100644
27322 --- a/arch/x86/xen/xen-head.S
27323 +++ b/arch/x86/xen/xen-head.S
27324 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27325 #ifdef CONFIG_X86_32
27326 mov %esi,xen_start_info
27327 mov $init_thread_union+THREAD_SIZE,%esp
27328 +#ifdef CONFIG_SMP
27329 + movl $cpu_gdt_table,%edi
27330 + movl $__per_cpu_load,%eax
27331 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27332 + rorl $16,%eax
27333 + movb %al,__KERNEL_PERCPU + 4(%edi)
27334 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27335 + movl $__per_cpu_end - 1,%eax
27336 + subl $__per_cpu_start,%eax
27337 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27338 +#endif
27339 #else
27340 mov %rsi,xen_start_info
27341 mov $init_thread_union+THREAD_SIZE,%rsp
27342 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27343 index b095739..8c17bcd 100644
27344 --- a/arch/x86/xen/xen-ops.h
27345 +++ b/arch/x86/xen/xen-ops.h
27346 @@ -10,8 +10,6 @@
27347 extern const char xen_hypervisor_callback[];
27348 extern const char xen_failsafe_callback[];
27349
27350 -extern void *xen_initial_gdt;
27351 -
27352 struct trap_info;
27353 void xen_copy_trap_info(struct trap_info *traps);
27354
27355 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27356 index 525bd3d..ef888b1 100644
27357 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27358 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27359 @@ -119,9 +119,9 @@
27360 ----------------------------------------------------------------------*/
27361
27362 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27363 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27364 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27365 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27366 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27367
27368 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27369 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27370 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27371 index 2f33760..835e50a 100644
27372 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27373 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27374 @@ -11,6 +11,7 @@
27375 #ifndef _XTENSA_CORE_H
27376 #define _XTENSA_CORE_H
27377
27378 +#include <linux/const.h>
27379
27380 /****************************************************************************
27381 Parameters Useful for Any Code, USER or PRIVILEGED
27382 @@ -112,9 +113,9 @@
27383 ----------------------------------------------------------------------*/
27384
27385 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27386 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27387 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27388 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27389 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27390
27391 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27392 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27393 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27394 index af00795..2bb8105 100644
27395 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27396 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27397 @@ -11,6 +11,7 @@
27398 #ifndef _XTENSA_CORE_CONFIGURATION_H
27399 #define _XTENSA_CORE_CONFIGURATION_H
27400
27401 +#include <linux/const.h>
27402
27403 /****************************************************************************
27404 Parameters Useful for Any Code, USER or PRIVILEGED
27405 @@ -118,9 +119,9 @@
27406 ----------------------------------------------------------------------*/
27407
27408 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27409 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27410 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27411 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27412 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27413
27414 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27415 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27416 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27417 index 58916af..9cb880b 100644
27418 --- a/block/blk-iopoll.c
27419 +++ b/block/blk-iopoll.c
27420 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27421 }
27422 EXPORT_SYMBOL(blk_iopoll_complete);
27423
27424 -static void blk_iopoll_softirq(struct softirq_action *h)
27425 +static void blk_iopoll_softirq(void)
27426 {
27427 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27428 int rearm = 0, budget = blk_iopoll_budget;
27429 diff --git a/block/blk-map.c b/block/blk-map.c
27430 index 623e1cd..ca1e109 100644
27431 --- a/block/blk-map.c
27432 +++ b/block/blk-map.c
27433 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27434 if (!len || !kbuf)
27435 return -EINVAL;
27436
27437 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27438 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27439 if (do_copy)
27440 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27441 else
27442 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27443 index 467c8de..4bddc6d 100644
27444 --- a/block/blk-softirq.c
27445 +++ b/block/blk-softirq.c
27446 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27447 * Softirq action handler - move entries to local list and loop over them
27448 * while passing them to the queue registered handler.
27449 */
27450 -static void blk_done_softirq(struct softirq_action *h)
27451 +static void blk_done_softirq(void)
27452 {
27453 struct list_head *cpu_list, local_list;
27454
27455 diff --git a/block/bsg.c b/block/bsg.c
27456 index ff64ae3..593560c 100644
27457 --- a/block/bsg.c
27458 +++ b/block/bsg.c
27459 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27460 struct sg_io_v4 *hdr, struct bsg_device *bd,
27461 fmode_t has_write_perm)
27462 {
27463 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27464 + unsigned char *cmdptr;
27465 +
27466 if (hdr->request_len > BLK_MAX_CDB) {
27467 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27468 if (!rq->cmd)
27469 return -ENOMEM;
27470 - }
27471 + cmdptr = rq->cmd;
27472 + } else
27473 + cmdptr = tmpcmd;
27474
27475 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27476 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27477 hdr->request_len))
27478 return -EFAULT;
27479
27480 + if (cmdptr != rq->cmd)
27481 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27482 +
27483 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27484 if (blk_verify_command(rq->cmd, has_write_perm))
27485 return -EPERM;
27486 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27487 index 7c668c8..db3521c 100644
27488 --- a/block/compat_ioctl.c
27489 +++ b/block/compat_ioctl.c
27490 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27491 err |= __get_user(f->spec1, &uf->spec1);
27492 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27493 err |= __get_user(name, &uf->name);
27494 - f->name = compat_ptr(name);
27495 + f->name = (void __force_kernel *)compat_ptr(name);
27496 if (err) {
27497 err = -EFAULT;
27498 goto out;
27499 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27500 index 6296b40..417c00f 100644
27501 --- a/block/partitions/efi.c
27502 +++ b/block/partitions/efi.c
27503 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27504 if (!gpt)
27505 return NULL;
27506
27507 + if (!le32_to_cpu(gpt->num_partition_entries))
27508 + return NULL;
27509 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27510 + if (!pte)
27511 + return NULL;
27512 +
27513 count = le32_to_cpu(gpt->num_partition_entries) *
27514 le32_to_cpu(gpt->sizeof_partition_entry);
27515 - if (!count)
27516 - return NULL;
27517 - pte = kzalloc(count, GFP_KERNEL);
27518 - if (!pte)
27519 - return NULL;
27520 -
27521 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27522 (u8 *) pte,
27523 count) < count) {
27524 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27525 index 260fa80..e8f3caf 100644
27526 --- a/block/scsi_ioctl.c
27527 +++ b/block/scsi_ioctl.c
27528 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27529 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27530 struct sg_io_hdr *hdr, fmode_t mode)
27531 {
27532 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27533 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27534 + unsigned char *cmdptr;
27535 +
27536 + if (rq->cmd != rq->__cmd)
27537 + cmdptr = rq->cmd;
27538 + else
27539 + cmdptr = tmpcmd;
27540 +
27541 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27542 return -EFAULT;
27543 +
27544 + if (cmdptr != rq->cmd)
27545 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27546 +
27547 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27548 return -EPERM;
27549
27550 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27551 int err;
27552 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27553 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27554 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27555 + unsigned char *cmdptr;
27556
27557 if (!sic)
27558 return -EINVAL;
27559 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27560 */
27561 err = -EFAULT;
27562 rq->cmd_len = cmdlen;
27563 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27564 +
27565 + if (rq->cmd != rq->__cmd)
27566 + cmdptr = rq->cmd;
27567 + else
27568 + cmdptr = tmpcmd;
27569 +
27570 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27571 goto error;
27572
27573 + if (rq->cmd != cmdptr)
27574 + memcpy(rq->cmd, cmdptr, cmdlen);
27575 +
27576 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27577 goto error;
27578
27579 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27580 index 671d4d6..5f24030 100644
27581 --- a/crypto/cryptd.c
27582 +++ b/crypto/cryptd.c
27583 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27584
27585 struct cryptd_blkcipher_request_ctx {
27586 crypto_completion_t complete;
27587 -};
27588 +} __no_const;
27589
27590 struct cryptd_hash_ctx {
27591 struct crypto_shash *child;
27592 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27593
27594 struct cryptd_aead_request_ctx {
27595 crypto_completion_t complete;
27596 -};
27597 +} __no_const;
27598
27599 static void cryptd_queue_worker(struct work_struct *work);
27600
27601 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27602 index e6defd8..c26a225 100644
27603 --- a/drivers/acpi/apei/cper.c
27604 +++ b/drivers/acpi/apei/cper.c
27605 @@ -38,12 +38,12 @@
27606 */
27607 u64 cper_next_record_id(void)
27608 {
27609 - static atomic64_t seq;
27610 + static atomic64_unchecked_t seq;
27611
27612 - if (!atomic64_read(&seq))
27613 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27614 + if (!atomic64_read_unchecked(&seq))
27615 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27616
27617 - return atomic64_inc_return(&seq);
27618 + return atomic64_inc_return_unchecked(&seq);
27619 }
27620 EXPORT_SYMBOL_GPL(cper_next_record_id);
27621
27622 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27623 index 7586544..636a2f0 100644
27624 --- a/drivers/acpi/ec_sys.c
27625 +++ b/drivers/acpi/ec_sys.c
27626 @@ -12,6 +12,7 @@
27627 #include <linux/acpi.h>
27628 #include <linux/debugfs.h>
27629 #include <linux/module.h>
27630 +#include <linux/uaccess.h>
27631 #include "internal.h"
27632
27633 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27634 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27635 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27636 */
27637 unsigned int size = EC_SPACE_SIZE;
27638 - u8 *data = (u8 *) buf;
27639 + u8 data;
27640 loff_t init_off = *off;
27641 int err = 0;
27642
27643 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27644 size = count;
27645
27646 while (size) {
27647 - err = ec_read(*off, &data[*off - init_off]);
27648 + err = ec_read(*off, &data);
27649 if (err)
27650 return err;
27651 + if (put_user(data, &buf[*off - init_off]))
27652 + return -EFAULT;
27653 *off += 1;
27654 size--;
27655 }
27656 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27657
27658 unsigned int size = count;
27659 loff_t init_off = *off;
27660 - u8 *data = (u8 *) buf;
27661 int err = 0;
27662
27663 if (*off >= EC_SPACE_SIZE)
27664 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27665 }
27666
27667 while (size) {
27668 - u8 byte_write = data[*off - init_off];
27669 + u8 byte_write;
27670 + if (get_user(byte_write, &buf[*off - init_off]))
27671 + return -EFAULT;
27672 err = ec_write(*off, byte_write);
27673 if (err)
27674 return err;
27675 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27676 index 251c7b62..000462d 100644
27677 --- a/drivers/acpi/proc.c
27678 +++ b/drivers/acpi/proc.c
27679 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27680 size_t count, loff_t * ppos)
27681 {
27682 struct list_head *node, *next;
27683 - char strbuf[5];
27684 - char str[5] = "";
27685 - unsigned int len = count;
27686 + char strbuf[5] = {0};
27687
27688 - if (len > 4)
27689 - len = 4;
27690 - if (len < 0)
27691 + if (count > 4)
27692 + count = 4;
27693 + if (copy_from_user(strbuf, buffer, count))
27694 return -EFAULT;
27695 -
27696 - if (copy_from_user(strbuf, buffer, len))
27697 - return -EFAULT;
27698 - strbuf[len] = '\0';
27699 - sscanf(strbuf, "%s", str);
27700 + strbuf[count] = '\0';
27701
27702 mutex_lock(&acpi_device_lock);
27703 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27704 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27705 if (!dev->wakeup.flags.valid)
27706 continue;
27707
27708 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27709 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27710 if (device_can_wakeup(&dev->dev)) {
27711 bool enable = !device_may_wakeup(&dev->dev);
27712 device_set_wakeup_enable(&dev->dev, enable);
27713 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27714 index 0734086..3ad3e4c 100644
27715 --- a/drivers/acpi/processor_driver.c
27716 +++ b/drivers/acpi/processor_driver.c
27717 @@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27718 return 0;
27719 #endif
27720
27721 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27722 + BUG_ON(pr->id >= nr_cpu_ids);
27723
27724 /*
27725 * Buggy BIOS check
27726 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27727 index d31ee55..8363a8b 100644
27728 --- a/drivers/ata/libata-core.c
27729 +++ b/drivers/ata/libata-core.c
27730 @@ -4742,7 +4742,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27731 struct ata_port *ap;
27732 unsigned int tag;
27733
27734 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27735 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27736 ap = qc->ap;
27737
27738 qc->flags = 0;
27739 @@ -4758,7 +4758,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27740 struct ata_port *ap;
27741 struct ata_link *link;
27742
27743 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27744 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27745 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27746 ap = qc->ap;
27747 link = qc->dev->link;
27748 @@ -5822,6 +5822,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27749 return;
27750
27751 spin_lock(&lock);
27752 + pax_open_kernel();
27753
27754 for (cur = ops->inherits; cur; cur = cur->inherits) {
27755 void **inherit = (void **)cur;
27756 @@ -5835,8 +5836,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27757 if (IS_ERR(*pp))
27758 *pp = NULL;
27759
27760 - ops->inherits = NULL;
27761 + *(struct ata_port_operations **)&ops->inherits = NULL;
27762
27763 + pax_close_kernel();
27764 spin_unlock(&lock);
27765 }
27766
27767 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27768 index 3239517..343b5f6 100644
27769 --- a/drivers/ata/pata_arasan_cf.c
27770 +++ b/drivers/ata/pata_arasan_cf.c
27771 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27772 /* Handle platform specific quirks */
27773 if (pdata->quirk) {
27774 if (pdata->quirk & CF_BROKEN_PIO) {
27775 - ap->ops->set_piomode = NULL;
27776 + pax_open_kernel();
27777 + *(void **)&ap->ops->set_piomode = NULL;
27778 + pax_close_kernel();
27779 ap->pio_mask = 0;
27780 }
27781 if (pdata->quirk & CF_BROKEN_MWDMA)
27782 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27783 index f9b983a..887b9d8 100644
27784 --- a/drivers/atm/adummy.c
27785 +++ b/drivers/atm/adummy.c
27786 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27787 vcc->pop(vcc, skb);
27788 else
27789 dev_kfree_skb_any(skb);
27790 - atomic_inc(&vcc->stats->tx);
27791 + atomic_inc_unchecked(&vcc->stats->tx);
27792
27793 return 0;
27794 }
27795 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27796 index f8f41e0..1f987dd 100644
27797 --- a/drivers/atm/ambassador.c
27798 +++ b/drivers/atm/ambassador.c
27799 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27800 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27801
27802 // VC layer stats
27803 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27804 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27805
27806 // free the descriptor
27807 kfree (tx_descr);
27808 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27809 dump_skb ("<<<", vc, skb);
27810
27811 // VC layer stats
27812 - atomic_inc(&atm_vcc->stats->rx);
27813 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27814 __net_timestamp(skb);
27815 // end of our responsibility
27816 atm_vcc->push (atm_vcc, skb);
27817 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27818 } else {
27819 PRINTK (KERN_INFO, "dropped over-size frame");
27820 // should we count this?
27821 - atomic_inc(&atm_vcc->stats->rx_drop);
27822 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27823 }
27824
27825 } else {
27826 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27827 }
27828
27829 if (check_area (skb->data, skb->len)) {
27830 - atomic_inc(&atm_vcc->stats->tx_err);
27831 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27832 return -ENOMEM; // ?
27833 }
27834
27835 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27836 index b22d71c..d6e1049 100644
27837 --- a/drivers/atm/atmtcp.c
27838 +++ b/drivers/atm/atmtcp.c
27839 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27840 if (vcc->pop) vcc->pop(vcc,skb);
27841 else dev_kfree_skb(skb);
27842 if (dev_data) return 0;
27843 - atomic_inc(&vcc->stats->tx_err);
27844 + atomic_inc_unchecked(&vcc->stats->tx_err);
27845 return -ENOLINK;
27846 }
27847 size = skb->len+sizeof(struct atmtcp_hdr);
27848 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27849 if (!new_skb) {
27850 if (vcc->pop) vcc->pop(vcc,skb);
27851 else dev_kfree_skb(skb);
27852 - atomic_inc(&vcc->stats->tx_err);
27853 + atomic_inc_unchecked(&vcc->stats->tx_err);
27854 return -ENOBUFS;
27855 }
27856 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27857 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27858 if (vcc->pop) vcc->pop(vcc,skb);
27859 else dev_kfree_skb(skb);
27860 out_vcc->push(out_vcc,new_skb);
27861 - atomic_inc(&vcc->stats->tx);
27862 - atomic_inc(&out_vcc->stats->rx);
27863 + atomic_inc_unchecked(&vcc->stats->tx);
27864 + atomic_inc_unchecked(&out_vcc->stats->rx);
27865 return 0;
27866 }
27867
27868 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27869 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27870 read_unlock(&vcc_sklist_lock);
27871 if (!out_vcc) {
27872 - atomic_inc(&vcc->stats->tx_err);
27873 + atomic_inc_unchecked(&vcc->stats->tx_err);
27874 goto done;
27875 }
27876 skb_pull(skb,sizeof(struct atmtcp_hdr));
27877 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27878 __net_timestamp(new_skb);
27879 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27880 out_vcc->push(out_vcc,new_skb);
27881 - atomic_inc(&vcc->stats->tx);
27882 - atomic_inc(&out_vcc->stats->rx);
27883 + atomic_inc_unchecked(&vcc->stats->tx);
27884 + atomic_inc_unchecked(&out_vcc->stats->rx);
27885 done:
27886 if (vcc->pop) vcc->pop(vcc,skb);
27887 else dev_kfree_skb(skb);
27888 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27889 index 2059ee4..faf51c7 100644
27890 --- a/drivers/atm/eni.c
27891 +++ b/drivers/atm/eni.c
27892 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27893 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27894 vcc->dev->number);
27895 length = 0;
27896 - atomic_inc(&vcc->stats->rx_err);
27897 + atomic_inc_unchecked(&vcc->stats->rx_err);
27898 }
27899 else {
27900 length = ATM_CELL_SIZE-1; /* no HEC */
27901 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27902 size);
27903 }
27904 eff = length = 0;
27905 - atomic_inc(&vcc->stats->rx_err);
27906 + atomic_inc_unchecked(&vcc->stats->rx_err);
27907 }
27908 else {
27909 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27910 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27911 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27912 vcc->dev->number,vcc->vci,length,size << 2,descr);
27913 length = eff = 0;
27914 - atomic_inc(&vcc->stats->rx_err);
27915 + atomic_inc_unchecked(&vcc->stats->rx_err);
27916 }
27917 }
27918 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27919 @@ -767,7 +767,7 @@ rx_dequeued++;
27920 vcc->push(vcc,skb);
27921 pushed++;
27922 }
27923 - atomic_inc(&vcc->stats->rx);
27924 + atomic_inc_unchecked(&vcc->stats->rx);
27925 }
27926 wake_up(&eni_dev->rx_wait);
27927 }
27928 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27929 PCI_DMA_TODEVICE);
27930 if (vcc->pop) vcc->pop(vcc,skb);
27931 else dev_kfree_skb_irq(skb);
27932 - atomic_inc(&vcc->stats->tx);
27933 + atomic_inc_unchecked(&vcc->stats->tx);
27934 wake_up(&eni_dev->tx_wait);
27935 dma_complete++;
27936 }
27937 @@ -1567,7 +1567,7 @@ tx_complete++;
27938 /*--------------------------------- entries ---------------------------------*/
27939
27940
27941 -static const char *media_name[] __devinitdata = {
27942 +static const char *media_name[] __devinitconst = {
27943 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27944 "UTP", "05?", "06?", "07?", /* 4- 7 */
27945 "TAXI","09?", "10?", "11?", /* 8-11 */
27946 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27947 index 86fed1b..6dc4721 100644
27948 --- a/drivers/atm/firestream.c
27949 +++ b/drivers/atm/firestream.c
27950 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27951 }
27952 }
27953
27954 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27955 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27956
27957 fs_dprintk (FS_DEBUG_TXMEM, "i");
27958 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27959 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27960 #endif
27961 skb_put (skb, qe->p1 & 0xffff);
27962 ATM_SKB(skb)->vcc = atm_vcc;
27963 - atomic_inc(&atm_vcc->stats->rx);
27964 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27965 __net_timestamp(skb);
27966 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27967 atm_vcc->push (atm_vcc, skb);
27968 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27969 kfree (pe);
27970 }
27971 if (atm_vcc)
27972 - atomic_inc(&atm_vcc->stats->rx_drop);
27973 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27974 break;
27975 case 0x1f: /* Reassembly abort: no buffers. */
27976 /* Silently increment error counter. */
27977 if (atm_vcc)
27978 - atomic_inc(&atm_vcc->stats->rx_drop);
27979 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27980 break;
27981 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27982 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27983 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27984 index 361f5ae..7fc552d 100644
27985 --- a/drivers/atm/fore200e.c
27986 +++ b/drivers/atm/fore200e.c
27987 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27988 #endif
27989 /* check error condition */
27990 if (*entry->status & STATUS_ERROR)
27991 - atomic_inc(&vcc->stats->tx_err);
27992 + atomic_inc_unchecked(&vcc->stats->tx_err);
27993 else
27994 - atomic_inc(&vcc->stats->tx);
27995 + atomic_inc_unchecked(&vcc->stats->tx);
27996 }
27997 }
27998
27999 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28000 if (skb == NULL) {
28001 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
28002
28003 - atomic_inc(&vcc->stats->rx_drop);
28004 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28005 return -ENOMEM;
28006 }
28007
28008 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28009
28010 dev_kfree_skb_any(skb);
28011
28012 - atomic_inc(&vcc->stats->rx_drop);
28013 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28014 return -ENOMEM;
28015 }
28016
28017 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28018
28019 vcc->push(vcc, skb);
28020 - atomic_inc(&vcc->stats->rx);
28021 + atomic_inc_unchecked(&vcc->stats->rx);
28022
28023 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28024
28025 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
28026 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28027 fore200e->atm_dev->number,
28028 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28029 - atomic_inc(&vcc->stats->rx_err);
28030 + atomic_inc_unchecked(&vcc->stats->rx_err);
28031 }
28032 }
28033
28034 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
28035 goto retry_here;
28036 }
28037
28038 - atomic_inc(&vcc->stats->tx_err);
28039 + atomic_inc_unchecked(&vcc->stats->tx_err);
28040
28041 fore200e->tx_sat++;
28042 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
28043 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
28044 index b182c2f..1c6fa8a 100644
28045 --- a/drivers/atm/he.c
28046 +++ b/drivers/atm/he.c
28047 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28048
28049 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28050 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28051 - atomic_inc(&vcc->stats->rx_drop);
28052 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28053 goto return_host_buffers;
28054 }
28055
28056 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28057 RBRQ_LEN_ERR(he_dev->rbrq_head)
28058 ? "LEN_ERR" : "",
28059 vcc->vpi, vcc->vci);
28060 - atomic_inc(&vcc->stats->rx_err);
28061 + atomic_inc_unchecked(&vcc->stats->rx_err);
28062 goto return_host_buffers;
28063 }
28064
28065 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28066 vcc->push(vcc, skb);
28067 spin_lock(&he_dev->global_lock);
28068
28069 - atomic_inc(&vcc->stats->rx);
28070 + atomic_inc_unchecked(&vcc->stats->rx);
28071
28072 return_host_buffers:
28073 ++pdus_assembled;
28074 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28075 tpd->vcc->pop(tpd->vcc, tpd->skb);
28076 else
28077 dev_kfree_skb_any(tpd->skb);
28078 - atomic_inc(&tpd->vcc->stats->tx_err);
28079 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28080 }
28081 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28082 return;
28083 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28084 vcc->pop(vcc, skb);
28085 else
28086 dev_kfree_skb_any(skb);
28087 - atomic_inc(&vcc->stats->tx_err);
28088 + atomic_inc_unchecked(&vcc->stats->tx_err);
28089 return -EINVAL;
28090 }
28091
28092 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28093 vcc->pop(vcc, skb);
28094 else
28095 dev_kfree_skb_any(skb);
28096 - atomic_inc(&vcc->stats->tx_err);
28097 + atomic_inc_unchecked(&vcc->stats->tx_err);
28098 return -EINVAL;
28099 }
28100 #endif
28101 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28102 vcc->pop(vcc, skb);
28103 else
28104 dev_kfree_skb_any(skb);
28105 - atomic_inc(&vcc->stats->tx_err);
28106 + atomic_inc_unchecked(&vcc->stats->tx_err);
28107 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28108 return -ENOMEM;
28109 }
28110 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28111 vcc->pop(vcc, skb);
28112 else
28113 dev_kfree_skb_any(skb);
28114 - atomic_inc(&vcc->stats->tx_err);
28115 + atomic_inc_unchecked(&vcc->stats->tx_err);
28116 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28117 return -ENOMEM;
28118 }
28119 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28120 __enqueue_tpd(he_dev, tpd, cid);
28121 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28122
28123 - atomic_inc(&vcc->stats->tx);
28124 + atomic_inc_unchecked(&vcc->stats->tx);
28125
28126 return 0;
28127 }
28128 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28129 index 75fd691..2d20b14 100644
28130 --- a/drivers/atm/horizon.c
28131 +++ b/drivers/atm/horizon.c
28132 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28133 {
28134 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28135 // VC layer stats
28136 - atomic_inc(&vcc->stats->rx);
28137 + atomic_inc_unchecked(&vcc->stats->rx);
28138 __net_timestamp(skb);
28139 // end of our responsibility
28140 vcc->push (vcc, skb);
28141 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28142 dev->tx_iovec = NULL;
28143
28144 // VC layer stats
28145 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28146 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28147
28148 // free the skb
28149 hrz_kfree_skb (skb);
28150 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28151 index 1c05212..c28e200 100644
28152 --- a/drivers/atm/idt77252.c
28153 +++ b/drivers/atm/idt77252.c
28154 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28155 else
28156 dev_kfree_skb(skb);
28157
28158 - atomic_inc(&vcc->stats->tx);
28159 + atomic_inc_unchecked(&vcc->stats->tx);
28160 }
28161
28162 atomic_dec(&scq->used);
28163 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28164 if ((sb = dev_alloc_skb(64)) == NULL) {
28165 printk("%s: Can't allocate buffers for aal0.\n",
28166 card->name);
28167 - atomic_add(i, &vcc->stats->rx_drop);
28168 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28169 break;
28170 }
28171 if (!atm_charge(vcc, sb->truesize)) {
28172 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28173 card->name);
28174 - atomic_add(i - 1, &vcc->stats->rx_drop);
28175 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28176 dev_kfree_skb(sb);
28177 break;
28178 }
28179 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28180 ATM_SKB(sb)->vcc = vcc;
28181 __net_timestamp(sb);
28182 vcc->push(vcc, sb);
28183 - atomic_inc(&vcc->stats->rx);
28184 + atomic_inc_unchecked(&vcc->stats->rx);
28185
28186 cell += ATM_CELL_PAYLOAD;
28187 }
28188 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28189 "(CDC: %08x)\n",
28190 card->name, len, rpp->len, readl(SAR_REG_CDC));
28191 recycle_rx_pool_skb(card, rpp);
28192 - atomic_inc(&vcc->stats->rx_err);
28193 + atomic_inc_unchecked(&vcc->stats->rx_err);
28194 return;
28195 }
28196 if (stat & SAR_RSQE_CRC) {
28197 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28198 recycle_rx_pool_skb(card, rpp);
28199 - atomic_inc(&vcc->stats->rx_err);
28200 + atomic_inc_unchecked(&vcc->stats->rx_err);
28201 return;
28202 }
28203 if (skb_queue_len(&rpp->queue) > 1) {
28204 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28205 RXPRINTK("%s: Can't alloc RX skb.\n",
28206 card->name);
28207 recycle_rx_pool_skb(card, rpp);
28208 - atomic_inc(&vcc->stats->rx_err);
28209 + atomic_inc_unchecked(&vcc->stats->rx_err);
28210 return;
28211 }
28212 if (!atm_charge(vcc, skb->truesize)) {
28213 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28214 __net_timestamp(skb);
28215
28216 vcc->push(vcc, skb);
28217 - atomic_inc(&vcc->stats->rx);
28218 + atomic_inc_unchecked(&vcc->stats->rx);
28219
28220 return;
28221 }
28222 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28223 __net_timestamp(skb);
28224
28225 vcc->push(vcc, skb);
28226 - atomic_inc(&vcc->stats->rx);
28227 + atomic_inc_unchecked(&vcc->stats->rx);
28228
28229 if (skb->truesize > SAR_FB_SIZE_3)
28230 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28231 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28232 if (vcc->qos.aal != ATM_AAL0) {
28233 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28234 card->name, vpi, vci);
28235 - atomic_inc(&vcc->stats->rx_drop);
28236 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28237 goto drop;
28238 }
28239
28240 if ((sb = dev_alloc_skb(64)) == NULL) {
28241 printk("%s: Can't allocate buffers for AAL0.\n",
28242 card->name);
28243 - atomic_inc(&vcc->stats->rx_err);
28244 + atomic_inc_unchecked(&vcc->stats->rx_err);
28245 goto drop;
28246 }
28247
28248 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28249 ATM_SKB(sb)->vcc = vcc;
28250 __net_timestamp(sb);
28251 vcc->push(vcc, sb);
28252 - atomic_inc(&vcc->stats->rx);
28253 + atomic_inc_unchecked(&vcc->stats->rx);
28254
28255 drop:
28256 skb_pull(queue, 64);
28257 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28258
28259 if (vc == NULL) {
28260 printk("%s: NULL connection in send().\n", card->name);
28261 - atomic_inc(&vcc->stats->tx_err);
28262 + atomic_inc_unchecked(&vcc->stats->tx_err);
28263 dev_kfree_skb(skb);
28264 return -EINVAL;
28265 }
28266 if (!test_bit(VCF_TX, &vc->flags)) {
28267 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28268 - atomic_inc(&vcc->stats->tx_err);
28269 + atomic_inc_unchecked(&vcc->stats->tx_err);
28270 dev_kfree_skb(skb);
28271 return -EINVAL;
28272 }
28273 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28274 break;
28275 default:
28276 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28277 - atomic_inc(&vcc->stats->tx_err);
28278 + atomic_inc_unchecked(&vcc->stats->tx_err);
28279 dev_kfree_skb(skb);
28280 return -EINVAL;
28281 }
28282
28283 if (skb_shinfo(skb)->nr_frags != 0) {
28284 printk("%s: No scatter-gather yet.\n", card->name);
28285 - atomic_inc(&vcc->stats->tx_err);
28286 + atomic_inc_unchecked(&vcc->stats->tx_err);
28287 dev_kfree_skb(skb);
28288 return -EINVAL;
28289 }
28290 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28291
28292 err = queue_skb(card, vc, skb, oam);
28293 if (err) {
28294 - atomic_inc(&vcc->stats->tx_err);
28295 + atomic_inc_unchecked(&vcc->stats->tx_err);
28296 dev_kfree_skb(skb);
28297 return err;
28298 }
28299 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28300 skb = dev_alloc_skb(64);
28301 if (!skb) {
28302 printk("%s: Out of memory in send_oam().\n", card->name);
28303 - atomic_inc(&vcc->stats->tx_err);
28304 + atomic_inc_unchecked(&vcc->stats->tx_err);
28305 return -ENOMEM;
28306 }
28307 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28308 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28309 index d438601..8b98495 100644
28310 --- a/drivers/atm/iphase.c
28311 +++ b/drivers/atm/iphase.c
28312 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28313 status = (u_short) (buf_desc_ptr->desc_mode);
28314 if (status & (RX_CER | RX_PTE | RX_OFL))
28315 {
28316 - atomic_inc(&vcc->stats->rx_err);
28317 + atomic_inc_unchecked(&vcc->stats->rx_err);
28318 IF_ERR(printk("IA: bad packet, dropping it");)
28319 if (status & RX_CER) {
28320 IF_ERR(printk(" cause: packet CRC error\n");)
28321 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28322 len = dma_addr - buf_addr;
28323 if (len > iadev->rx_buf_sz) {
28324 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28325 - atomic_inc(&vcc->stats->rx_err);
28326 + atomic_inc_unchecked(&vcc->stats->rx_err);
28327 goto out_free_desc;
28328 }
28329
28330 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28331 ia_vcc = INPH_IA_VCC(vcc);
28332 if (ia_vcc == NULL)
28333 {
28334 - atomic_inc(&vcc->stats->rx_err);
28335 + atomic_inc_unchecked(&vcc->stats->rx_err);
28336 atm_return(vcc, skb->truesize);
28337 dev_kfree_skb_any(skb);
28338 goto INCR_DLE;
28339 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28340 if ((length > iadev->rx_buf_sz) || (length >
28341 (skb->len - sizeof(struct cpcs_trailer))))
28342 {
28343 - atomic_inc(&vcc->stats->rx_err);
28344 + atomic_inc_unchecked(&vcc->stats->rx_err);
28345 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28346 length, skb->len);)
28347 atm_return(vcc, skb->truesize);
28348 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28349
28350 IF_RX(printk("rx_dle_intr: skb push");)
28351 vcc->push(vcc,skb);
28352 - atomic_inc(&vcc->stats->rx);
28353 + atomic_inc_unchecked(&vcc->stats->rx);
28354 iadev->rx_pkt_cnt++;
28355 }
28356 INCR_DLE:
28357 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28358 {
28359 struct k_sonet_stats *stats;
28360 stats = &PRIV(_ia_dev[board])->sonet_stats;
28361 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28362 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28363 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28364 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28365 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28366 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28367 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28368 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28369 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28370 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28371 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28372 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28373 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28374 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28375 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28376 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28377 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28378 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28379 }
28380 ia_cmds.status = 0;
28381 break;
28382 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28383 if ((desc == 0) || (desc > iadev->num_tx_desc))
28384 {
28385 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28386 - atomic_inc(&vcc->stats->tx);
28387 + atomic_inc_unchecked(&vcc->stats->tx);
28388 if (vcc->pop)
28389 vcc->pop(vcc, skb);
28390 else
28391 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28392 ATM_DESC(skb) = vcc->vci;
28393 skb_queue_tail(&iadev->tx_dma_q, skb);
28394
28395 - atomic_inc(&vcc->stats->tx);
28396 + atomic_inc_unchecked(&vcc->stats->tx);
28397 iadev->tx_pkt_cnt++;
28398 /* Increment transaction counter */
28399 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28400
28401 #if 0
28402 /* add flow control logic */
28403 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28404 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28405 if (iavcc->vc_desc_cnt > 10) {
28406 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28407 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28408 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28409 index 68c7588..7036683 100644
28410 --- a/drivers/atm/lanai.c
28411 +++ b/drivers/atm/lanai.c
28412 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28413 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28414 lanai_endtx(lanai, lvcc);
28415 lanai_free_skb(lvcc->tx.atmvcc, skb);
28416 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28417 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28418 }
28419
28420 /* Try to fill the buffer - don't call unless there is backlog */
28421 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28422 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28423 __net_timestamp(skb);
28424 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28425 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28426 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28427 out:
28428 lvcc->rx.buf.ptr = end;
28429 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28430 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28431 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28432 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28433 lanai->stats.service_rxnotaal5++;
28434 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28435 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28436 return 0;
28437 }
28438 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28439 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28440 int bytes;
28441 read_unlock(&vcc_sklist_lock);
28442 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28443 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28444 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28445 lvcc->stats.x.aal5.service_trash++;
28446 bytes = (SERVICE_GET_END(s) * 16) -
28447 (((unsigned long) lvcc->rx.buf.ptr) -
28448 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28449 }
28450 if (s & SERVICE_STREAM) {
28451 read_unlock(&vcc_sklist_lock);
28452 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28453 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28454 lvcc->stats.x.aal5.service_stream++;
28455 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28456 "PDU on VCI %d!\n", lanai->number, vci);
28457 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28458 return 0;
28459 }
28460 DPRINTK("got rx crc error on vci %d\n", vci);
28461 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28462 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28463 lvcc->stats.x.aal5.service_rxcrc++;
28464 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28465 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28466 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28467 index 1c70c45..300718d 100644
28468 --- a/drivers/atm/nicstar.c
28469 +++ b/drivers/atm/nicstar.c
28470 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28471 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28472 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28473 card->index);
28474 - atomic_inc(&vcc->stats->tx_err);
28475 + atomic_inc_unchecked(&vcc->stats->tx_err);
28476 dev_kfree_skb_any(skb);
28477 return -EINVAL;
28478 }
28479 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28480 if (!vc->tx) {
28481 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28482 card->index);
28483 - atomic_inc(&vcc->stats->tx_err);
28484 + atomic_inc_unchecked(&vcc->stats->tx_err);
28485 dev_kfree_skb_any(skb);
28486 return -EINVAL;
28487 }
28488 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28489 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28490 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28491 card->index);
28492 - atomic_inc(&vcc->stats->tx_err);
28493 + atomic_inc_unchecked(&vcc->stats->tx_err);
28494 dev_kfree_skb_any(skb);
28495 return -EINVAL;
28496 }
28497
28498 if (skb_shinfo(skb)->nr_frags != 0) {
28499 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28500 - atomic_inc(&vcc->stats->tx_err);
28501 + atomic_inc_unchecked(&vcc->stats->tx_err);
28502 dev_kfree_skb_any(skb);
28503 return -EINVAL;
28504 }
28505 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28506 }
28507
28508 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28509 - atomic_inc(&vcc->stats->tx_err);
28510 + atomic_inc_unchecked(&vcc->stats->tx_err);
28511 dev_kfree_skb_any(skb);
28512 return -EIO;
28513 }
28514 - atomic_inc(&vcc->stats->tx);
28515 + atomic_inc_unchecked(&vcc->stats->tx);
28516
28517 return 0;
28518 }
28519 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28520 printk
28521 ("nicstar%d: Can't allocate buffers for aal0.\n",
28522 card->index);
28523 - atomic_add(i, &vcc->stats->rx_drop);
28524 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28525 break;
28526 }
28527 if (!atm_charge(vcc, sb->truesize)) {
28528 RXPRINTK
28529 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28530 card->index);
28531 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28532 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28533 dev_kfree_skb_any(sb);
28534 break;
28535 }
28536 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28537 ATM_SKB(sb)->vcc = vcc;
28538 __net_timestamp(sb);
28539 vcc->push(vcc, sb);
28540 - atomic_inc(&vcc->stats->rx);
28541 + atomic_inc_unchecked(&vcc->stats->rx);
28542 cell += ATM_CELL_PAYLOAD;
28543 }
28544
28545 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28546 if (iovb == NULL) {
28547 printk("nicstar%d: Out of iovec buffers.\n",
28548 card->index);
28549 - atomic_inc(&vcc->stats->rx_drop);
28550 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28551 recycle_rx_buf(card, skb);
28552 return;
28553 }
28554 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28555 small or large buffer itself. */
28556 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28557 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28558 - atomic_inc(&vcc->stats->rx_err);
28559 + atomic_inc_unchecked(&vcc->stats->rx_err);
28560 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28561 NS_MAX_IOVECS);
28562 NS_PRV_IOVCNT(iovb) = 0;
28563 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28564 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28565 card->index);
28566 which_list(card, skb);
28567 - atomic_inc(&vcc->stats->rx_err);
28568 + atomic_inc_unchecked(&vcc->stats->rx_err);
28569 recycle_rx_buf(card, skb);
28570 vc->rx_iov = NULL;
28571 recycle_iov_buf(card, iovb);
28572 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28573 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28574 card->index);
28575 which_list(card, skb);
28576 - atomic_inc(&vcc->stats->rx_err);
28577 + atomic_inc_unchecked(&vcc->stats->rx_err);
28578 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28579 NS_PRV_IOVCNT(iovb));
28580 vc->rx_iov = NULL;
28581 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28582 printk(" - PDU size mismatch.\n");
28583 else
28584 printk(".\n");
28585 - atomic_inc(&vcc->stats->rx_err);
28586 + atomic_inc_unchecked(&vcc->stats->rx_err);
28587 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28588 NS_PRV_IOVCNT(iovb));
28589 vc->rx_iov = NULL;
28590 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28591 /* skb points to a small buffer */
28592 if (!atm_charge(vcc, skb->truesize)) {
28593 push_rxbufs(card, skb);
28594 - atomic_inc(&vcc->stats->rx_drop);
28595 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28596 } else {
28597 skb_put(skb, len);
28598 dequeue_sm_buf(card, skb);
28599 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28600 ATM_SKB(skb)->vcc = vcc;
28601 __net_timestamp(skb);
28602 vcc->push(vcc, skb);
28603 - atomic_inc(&vcc->stats->rx);
28604 + atomic_inc_unchecked(&vcc->stats->rx);
28605 }
28606 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28607 struct sk_buff *sb;
28608 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28609 if (len <= NS_SMBUFSIZE) {
28610 if (!atm_charge(vcc, sb->truesize)) {
28611 push_rxbufs(card, sb);
28612 - atomic_inc(&vcc->stats->rx_drop);
28613 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28614 } else {
28615 skb_put(sb, len);
28616 dequeue_sm_buf(card, sb);
28617 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28618 ATM_SKB(sb)->vcc = vcc;
28619 __net_timestamp(sb);
28620 vcc->push(vcc, sb);
28621 - atomic_inc(&vcc->stats->rx);
28622 + atomic_inc_unchecked(&vcc->stats->rx);
28623 }
28624
28625 push_rxbufs(card, skb);
28626 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28627
28628 if (!atm_charge(vcc, skb->truesize)) {
28629 push_rxbufs(card, skb);
28630 - atomic_inc(&vcc->stats->rx_drop);
28631 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28632 } else {
28633 dequeue_lg_buf(card, skb);
28634 #ifdef NS_USE_DESTRUCTORS
28635 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28636 ATM_SKB(skb)->vcc = vcc;
28637 __net_timestamp(skb);
28638 vcc->push(vcc, skb);
28639 - atomic_inc(&vcc->stats->rx);
28640 + atomic_inc_unchecked(&vcc->stats->rx);
28641 }
28642
28643 push_rxbufs(card, sb);
28644 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28645 printk
28646 ("nicstar%d: Out of huge buffers.\n",
28647 card->index);
28648 - atomic_inc(&vcc->stats->rx_drop);
28649 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28650 recycle_iovec_rx_bufs(card,
28651 (struct iovec *)
28652 iovb->data,
28653 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28654 card->hbpool.count++;
28655 } else
28656 dev_kfree_skb_any(hb);
28657 - atomic_inc(&vcc->stats->rx_drop);
28658 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28659 } else {
28660 /* Copy the small buffer to the huge buffer */
28661 sb = (struct sk_buff *)iov->iov_base;
28662 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28663 #endif /* NS_USE_DESTRUCTORS */
28664 __net_timestamp(hb);
28665 vcc->push(vcc, hb);
28666 - atomic_inc(&vcc->stats->rx);
28667 + atomic_inc_unchecked(&vcc->stats->rx);
28668 }
28669 }
28670
28671 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28672 index 9851093..adb2b1e 100644
28673 --- a/drivers/atm/solos-pci.c
28674 +++ b/drivers/atm/solos-pci.c
28675 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28676 }
28677 atm_charge(vcc, skb->truesize);
28678 vcc->push(vcc, skb);
28679 - atomic_inc(&vcc->stats->rx);
28680 + atomic_inc_unchecked(&vcc->stats->rx);
28681 break;
28682
28683 case PKT_STATUS:
28684 @@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28685 vcc = SKB_CB(oldskb)->vcc;
28686
28687 if (vcc) {
28688 - atomic_inc(&vcc->stats->tx);
28689 + atomic_inc_unchecked(&vcc->stats->tx);
28690 solos_pop(vcc, oldskb);
28691 } else
28692 dev_kfree_skb_irq(oldskb);
28693 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28694 index 0215934..ce9f5b1 100644
28695 --- a/drivers/atm/suni.c
28696 +++ b/drivers/atm/suni.c
28697 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28698
28699
28700 #define ADD_LIMITED(s,v) \
28701 - atomic_add((v),&stats->s); \
28702 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28703 + atomic_add_unchecked((v),&stats->s); \
28704 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28705
28706
28707 static void suni_hz(unsigned long from_timer)
28708 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28709 index 5120a96..e2572bd 100644
28710 --- a/drivers/atm/uPD98402.c
28711 +++ b/drivers/atm/uPD98402.c
28712 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28713 struct sonet_stats tmp;
28714 int error = 0;
28715
28716 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28717 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28718 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28719 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28720 if (zero && !error) {
28721 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28722
28723
28724 #define ADD_LIMITED(s,v) \
28725 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28726 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28727 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28728 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28729 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28730 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28731
28732
28733 static void stat_event(struct atm_dev *dev)
28734 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28735 if (reason & uPD98402_INT_PFM) stat_event(dev);
28736 if (reason & uPD98402_INT_PCO) {
28737 (void) GET(PCOCR); /* clear interrupt cause */
28738 - atomic_add(GET(HECCT),
28739 + atomic_add_unchecked(GET(HECCT),
28740 &PRIV(dev)->sonet_stats.uncorr_hcs);
28741 }
28742 if ((reason & uPD98402_INT_RFO) &&
28743 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28744 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28745 uPD98402_INT_LOS),PIMR); /* enable them */
28746 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28747 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28748 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28749 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28750 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28751 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28752 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28753 return 0;
28754 }
28755
28756 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28757 index abe4e20..83c4727 100644
28758 --- a/drivers/atm/zatm.c
28759 +++ b/drivers/atm/zatm.c
28760 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28761 }
28762 if (!size) {
28763 dev_kfree_skb_irq(skb);
28764 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28765 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28766 continue;
28767 }
28768 if (!atm_charge(vcc,skb->truesize)) {
28769 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28770 skb->len = size;
28771 ATM_SKB(skb)->vcc = vcc;
28772 vcc->push(vcc,skb);
28773 - atomic_inc(&vcc->stats->rx);
28774 + atomic_inc_unchecked(&vcc->stats->rx);
28775 }
28776 zout(pos & 0xffff,MTA(mbx));
28777 #if 0 /* probably a stupid idea */
28778 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28779 skb_queue_head(&zatm_vcc->backlog,skb);
28780 break;
28781 }
28782 - atomic_inc(&vcc->stats->tx);
28783 + atomic_inc_unchecked(&vcc->stats->tx);
28784 wake_up(&zatm_vcc->tx_wait);
28785 }
28786
28787 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28788 index 8493536..31adee0 100644
28789 --- a/drivers/base/devtmpfs.c
28790 +++ b/drivers/base/devtmpfs.c
28791 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28792 if (!thread)
28793 return 0;
28794
28795 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28796 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28797 if (err)
28798 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28799 else
28800 diff --git a/drivers/base/node.c b/drivers/base/node.c
28801 index 90aa2a1..af1a177 100644
28802 --- a/drivers/base/node.c
28803 +++ b/drivers/base/node.c
28804 @@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28805 {
28806 int n;
28807
28808 - n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28809 - if (n > 0 && PAGE_SIZE > n + 1) {
28810 - *(buf + n++) = '\n';
28811 - *(buf + n++) = '\0';
28812 - }
28813 + n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28814 + buf[n++] = '\n';
28815 + buf[n] = '\0';
28816 return n;
28817 }
28818
28819 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28820 index 2a3e581..3d6a73f 100644
28821 --- a/drivers/base/power/wakeup.c
28822 +++ b/drivers/base/power/wakeup.c
28823 @@ -30,14 +30,14 @@ bool events_check_enabled;
28824 * They need to be modified together atomically, so it's better to use one
28825 * atomic variable to hold them both.
28826 */
28827 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28828 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28829
28830 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28831 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28832
28833 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28834 {
28835 - unsigned int comb = atomic_read(&combined_event_count);
28836 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28837
28838 *cnt = (comb >> IN_PROGRESS_BITS);
28839 *inpr = comb & MAX_IN_PROGRESS;
28840 @@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28841 ws->last_time = ktime_get();
28842
28843 /* Increment the counter of events in progress. */
28844 - atomic_inc(&combined_event_count);
28845 + atomic_inc_unchecked(&combined_event_count);
28846 }
28847
28848 /**
28849 @@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28850 * Increment the counter of registered wakeup events and decrement the
28851 * couter of wakeup events in progress simultaneously.
28852 */
28853 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28854 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28855 }
28856
28857 /**
28858 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28859 index b0f553b..77b928b 100644
28860 --- a/drivers/block/cciss.c
28861 +++ b/drivers/block/cciss.c
28862 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28863 int err;
28864 u32 cp;
28865
28866 + memset(&arg64, 0, sizeof(arg64));
28867 +
28868 err = 0;
28869 err |=
28870 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28871 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28872 while (!list_empty(&h->reqQ)) {
28873 c = list_entry(h->reqQ.next, CommandList_struct, list);
28874 /* can't do anything if fifo is full */
28875 - if ((h->access.fifo_full(h))) {
28876 + if ((h->access->fifo_full(h))) {
28877 dev_warn(&h->pdev->dev, "fifo full\n");
28878 break;
28879 }
28880 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28881 h->Qdepth--;
28882
28883 /* Tell the controller execute command */
28884 - h->access.submit_command(h, c);
28885 + h->access->submit_command(h, c);
28886
28887 /* Put job onto the completed Q */
28888 addQ(&h->cmpQ, c);
28889 @@ -3443,17 +3445,17 @@ startio:
28890
28891 static inline unsigned long get_next_completion(ctlr_info_t *h)
28892 {
28893 - return h->access.command_completed(h);
28894 + return h->access->command_completed(h);
28895 }
28896
28897 static inline int interrupt_pending(ctlr_info_t *h)
28898 {
28899 - return h->access.intr_pending(h);
28900 + return h->access->intr_pending(h);
28901 }
28902
28903 static inline long interrupt_not_for_us(ctlr_info_t *h)
28904 {
28905 - return ((h->access.intr_pending(h) == 0) ||
28906 + return ((h->access->intr_pending(h) == 0) ||
28907 (h->interrupts_enabled == 0));
28908 }
28909
28910 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28911 u32 a;
28912
28913 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28914 - return h->access.command_completed(h);
28915 + return h->access->command_completed(h);
28916
28917 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28918 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28919 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28920 trans_support & CFGTBL_Trans_use_short_tags);
28921
28922 /* Change the access methods to the performant access methods */
28923 - h->access = SA5_performant_access;
28924 + h->access = &SA5_performant_access;
28925 h->transMethod = CFGTBL_Trans_Performant;
28926
28927 return;
28928 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28929 if (prod_index < 0)
28930 return -ENODEV;
28931 h->product_name = products[prod_index].product_name;
28932 - h->access = *(products[prod_index].access);
28933 + h->access = products[prod_index].access;
28934
28935 if (cciss_board_disabled(h)) {
28936 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28937 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28938 }
28939
28940 /* make sure the board interrupts are off */
28941 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28942 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28943 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28944 if (rc)
28945 goto clean2;
28946 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28947 * fake ones to scoop up any residual completions.
28948 */
28949 spin_lock_irqsave(&h->lock, flags);
28950 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28951 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28952 spin_unlock_irqrestore(&h->lock, flags);
28953 free_irq(h->intr[h->intr_mode], h);
28954 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28955 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28956 dev_info(&h->pdev->dev, "Board READY.\n");
28957 dev_info(&h->pdev->dev,
28958 "Waiting for stale completions to drain.\n");
28959 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28960 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28961 msleep(10000);
28962 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28963 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28964
28965 rc = controller_reset_failed(h->cfgtable);
28966 if (rc)
28967 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28968 cciss_scsi_setup(h);
28969
28970 /* Turn the interrupts on so we can service requests */
28971 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28972 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28973
28974 /* Get the firmware version */
28975 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28976 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28977 kfree(flush_buf);
28978 if (return_code != IO_OK)
28979 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28980 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28981 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28982 free_irq(h->intr[h->intr_mode], h);
28983 }
28984
28985 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28986 index 7fda30e..eb5dfe0 100644
28987 --- a/drivers/block/cciss.h
28988 +++ b/drivers/block/cciss.h
28989 @@ -101,7 +101,7 @@ struct ctlr_info
28990 /* information about each logical volume */
28991 drive_info_struct *drv[CISS_MAX_LUN];
28992
28993 - struct access_method access;
28994 + struct access_method *access;
28995
28996 /* queue and queue Info */
28997 struct list_head reqQ;
28998 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28999 index 9125bbe..eede5c8 100644
29000 --- a/drivers/block/cpqarray.c
29001 +++ b/drivers/block/cpqarray.c
29002 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29003 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29004 goto Enomem4;
29005 }
29006 - hba[i]->access.set_intr_mask(hba[i], 0);
29007 + hba[i]->access->set_intr_mask(hba[i], 0);
29008 if (request_irq(hba[i]->intr, do_ida_intr,
29009 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29010 {
29011 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29012 add_timer(&hba[i]->timer);
29013
29014 /* Enable IRQ now that spinlock and rate limit timer are set up */
29015 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29016 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29017
29018 for(j=0; j<NWD; j++) {
29019 struct gendisk *disk = ida_gendisk[i][j];
29020 @@ -694,7 +694,7 @@ DBGINFO(
29021 for(i=0; i<NR_PRODUCTS; i++) {
29022 if (board_id == products[i].board_id) {
29023 c->product_name = products[i].product_name;
29024 - c->access = *(products[i].access);
29025 + c->access = products[i].access;
29026 break;
29027 }
29028 }
29029 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
29030 hba[ctlr]->intr = intr;
29031 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29032 hba[ctlr]->product_name = products[j].product_name;
29033 - hba[ctlr]->access = *(products[j].access);
29034 + hba[ctlr]->access = products[j].access;
29035 hba[ctlr]->ctlr = ctlr;
29036 hba[ctlr]->board_id = board_id;
29037 hba[ctlr]->pci_dev = NULL; /* not PCI */
29038 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
29039
29040 while((c = h->reqQ) != NULL) {
29041 /* Can't do anything if we're busy */
29042 - if (h->access.fifo_full(h) == 0)
29043 + if (h->access->fifo_full(h) == 0)
29044 return;
29045
29046 /* Get the first entry from the request Q */
29047 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
29048 h->Qdepth--;
29049
29050 /* Tell the controller to do our bidding */
29051 - h->access.submit_command(h, c);
29052 + h->access->submit_command(h, c);
29053
29054 /* Get onto the completion Q */
29055 addQ(&h->cmpQ, c);
29056 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29057 unsigned long flags;
29058 __u32 a,a1;
29059
29060 - istat = h->access.intr_pending(h);
29061 + istat = h->access->intr_pending(h);
29062 /* Is this interrupt for us? */
29063 if (istat == 0)
29064 return IRQ_NONE;
29065 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29066 */
29067 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29068 if (istat & FIFO_NOT_EMPTY) {
29069 - while((a = h->access.command_completed(h))) {
29070 + while((a = h->access->command_completed(h))) {
29071 a1 = a; a &= ~3;
29072 if ((c = h->cmpQ) == NULL)
29073 {
29074 @@ -1449,11 +1449,11 @@ static int sendcmd(
29075 /*
29076 * Disable interrupt
29077 */
29078 - info_p->access.set_intr_mask(info_p, 0);
29079 + info_p->access->set_intr_mask(info_p, 0);
29080 /* Make sure there is room in the command FIFO */
29081 /* Actually it should be completely empty at this time. */
29082 for (i = 200000; i > 0; i--) {
29083 - temp = info_p->access.fifo_full(info_p);
29084 + temp = info_p->access->fifo_full(info_p);
29085 if (temp != 0) {
29086 break;
29087 }
29088 @@ -1466,7 +1466,7 @@ DBG(
29089 /*
29090 * Send the cmd
29091 */
29092 - info_p->access.submit_command(info_p, c);
29093 + info_p->access->submit_command(info_p, c);
29094 complete = pollcomplete(ctlr);
29095
29096 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29097 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29098 * we check the new geometry. Then turn interrupts back on when
29099 * we're done.
29100 */
29101 - host->access.set_intr_mask(host, 0);
29102 + host->access->set_intr_mask(host, 0);
29103 getgeometry(ctlr);
29104 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29105 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29106
29107 for(i=0; i<NWD; i++) {
29108 struct gendisk *disk = ida_gendisk[ctlr][i];
29109 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29110 /* Wait (up to 2 seconds) for a command to complete */
29111
29112 for (i = 200000; i > 0; i--) {
29113 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
29114 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
29115 if (done == 0) {
29116 udelay(10); /* a short fixed delay */
29117 } else
29118 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29119 index be73e9d..7fbf140 100644
29120 --- a/drivers/block/cpqarray.h
29121 +++ b/drivers/block/cpqarray.h
29122 @@ -99,7 +99,7 @@ struct ctlr_info {
29123 drv_info_t drv[NWD];
29124 struct proc_dir_entry *proc;
29125
29126 - struct access_method access;
29127 + struct access_method *access;
29128
29129 cmdlist_t *reqQ;
29130 cmdlist_t *cmpQ;
29131 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29132 index 8d68056..e67050f 100644
29133 --- a/drivers/block/drbd/drbd_int.h
29134 +++ b/drivers/block/drbd/drbd_int.h
29135 @@ -736,7 +736,7 @@ struct drbd_request;
29136 struct drbd_epoch {
29137 struct list_head list;
29138 unsigned int barrier_nr;
29139 - atomic_t epoch_size; /* increased on every request added. */
29140 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29141 atomic_t active; /* increased on every req. added, and dec on every finished. */
29142 unsigned long flags;
29143 };
29144 @@ -1108,7 +1108,7 @@ struct drbd_conf {
29145 void *int_dig_in;
29146 void *int_dig_vv;
29147 wait_queue_head_t seq_wait;
29148 - atomic_t packet_seq;
29149 + atomic_unchecked_t packet_seq;
29150 unsigned int peer_seq;
29151 spinlock_t peer_seq_lock;
29152 unsigned int minor;
29153 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29154
29155 static inline void drbd_tcp_cork(struct socket *sock)
29156 {
29157 - int __user val = 1;
29158 + int val = 1;
29159 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29160 - (char __user *)&val, sizeof(val));
29161 + (char __force_user *)&val, sizeof(val));
29162 }
29163
29164 static inline void drbd_tcp_uncork(struct socket *sock)
29165 {
29166 - int __user val = 0;
29167 + int val = 0;
29168 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29169 - (char __user *)&val, sizeof(val));
29170 + (char __force_user *)&val, sizeof(val));
29171 }
29172
29173 static inline void drbd_tcp_nodelay(struct socket *sock)
29174 {
29175 - int __user val = 1;
29176 + int val = 1;
29177 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29178 - (char __user *)&val, sizeof(val));
29179 + (char __force_user *)&val, sizeof(val));
29180 }
29181
29182 static inline void drbd_tcp_quickack(struct socket *sock)
29183 {
29184 - int __user val = 2;
29185 + int val = 2;
29186 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29187 - (char __user *)&val, sizeof(val));
29188 + (char __force_user *)&val, sizeof(val));
29189 }
29190
29191 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29192 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29193 index 211fc44..c5116f1 100644
29194 --- a/drivers/block/drbd/drbd_main.c
29195 +++ b/drivers/block/drbd/drbd_main.c
29196 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29197 p.sector = sector;
29198 p.block_id = block_id;
29199 p.blksize = blksize;
29200 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29201 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29202
29203 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29204 return false;
29205 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29206 p.sector = cpu_to_be64(req->sector);
29207 p.block_id = (unsigned long)req;
29208 p.seq_num = cpu_to_be32(req->seq_num =
29209 - atomic_add_return(1, &mdev->packet_seq));
29210 + atomic_add_return_unchecked(1, &mdev->packet_seq));
29211
29212 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29213
29214 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29215 atomic_set(&mdev->unacked_cnt, 0);
29216 atomic_set(&mdev->local_cnt, 0);
29217 atomic_set(&mdev->net_cnt, 0);
29218 - atomic_set(&mdev->packet_seq, 0);
29219 + atomic_set_unchecked(&mdev->packet_seq, 0);
29220 atomic_set(&mdev->pp_in_use, 0);
29221 atomic_set(&mdev->pp_in_use_by_net, 0);
29222 atomic_set(&mdev->rs_sect_in, 0);
29223 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29224 mdev->receiver.t_state);
29225
29226 /* no need to lock it, I'm the only thread alive */
29227 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29228 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29229 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29230 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29231 mdev->al_writ_cnt =
29232 mdev->bm_writ_cnt =
29233 mdev->read_cnt =
29234 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29235 index 946166e..356b39a 100644
29236 --- a/drivers/block/drbd/drbd_nl.c
29237 +++ b/drivers/block/drbd/drbd_nl.c
29238 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29239 module_put(THIS_MODULE);
29240 }
29241
29242 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29243 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29244
29245 static unsigned short *
29246 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29247 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29248 cn_reply->id.idx = CN_IDX_DRBD;
29249 cn_reply->id.val = CN_VAL_DRBD;
29250
29251 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29252 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29253 cn_reply->ack = 0; /* not used here. */
29254 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29255 (int)((char *)tl - (char *)reply->tag_list);
29256 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29257 cn_reply->id.idx = CN_IDX_DRBD;
29258 cn_reply->id.val = CN_VAL_DRBD;
29259
29260 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29261 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29262 cn_reply->ack = 0; /* not used here. */
29263 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29264 (int)((char *)tl - (char *)reply->tag_list);
29265 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29266 cn_reply->id.idx = CN_IDX_DRBD;
29267 cn_reply->id.val = CN_VAL_DRBD;
29268
29269 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29270 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29271 cn_reply->ack = 0; // not used here.
29272 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29273 (int)((char*)tl - (char*)reply->tag_list);
29274 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29275 cn_reply->id.idx = CN_IDX_DRBD;
29276 cn_reply->id.val = CN_VAL_DRBD;
29277
29278 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29279 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29280 cn_reply->ack = 0; /* not used here. */
29281 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29282 (int)((char *)tl - (char *)reply->tag_list);
29283 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29284 index 43beaca..4a5b1dd 100644
29285 --- a/drivers/block/drbd/drbd_receiver.c
29286 +++ b/drivers/block/drbd/drbd_receiver.c
29287 @@ -894,7 +894,7 @@ retry:
29288 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29289 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29290
29291 - atomic_set(&mdev->packet_seq, 0);
29292 + atomic_set_unchecked(&mdev->packet_seq, 0);
29293 mdev->peer_seq = 0;
29294
29295 drbd_thread_start(&mdev->asender);
29296 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29297 do {
29298 next_epoch = NULL;
29299
29300 - epoch_size = atomic_read(&epoch->epoch_size);
29301 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29302
29303 switch (ev & ~EV_CLEANUP) {
29304 case EV_PUT:
29305 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29306 rv = FE_DESTROYED;
29307 } else {
29308 epoch->flags = 0;
29309 - atomic_set(&epoch->epoch_size, 0);
29310 + atomic_set_unchecked(&epoch->epoch_size, 0);
29311 /* atomic_set(&epoch->active, 0); is already zero */
29312 if (rv == FE_STILL_LIVE)
29313 rv = FE_RECYCLED;
29314 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29315 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29316 drbd_flush(mdev);
29317
29318 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29319 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29320 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29321 if (epoch)
29322 break;
29323 }
29324
29325 epoch = mdev->current_epoch;
29326 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29327 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29328
29329 D_ASSERT(atomic_read(&epoch->active) == 0);
29330 D_ASSERT(epoch->flags == 0);
29331 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29332 }
29333
29334 epoch->flags = 0;
29335 - atomic_set(&epoch->epoch_size, 0);
29336 + atomic_set_unchecked(&epoch->epoch_size, 0);
29337 atomic_set(&epoch->active, 0);
29338
29339 spin_lock(&mdev->epoch_lock);
29340 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29341 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29342 list_add(&epoch->list, &mdev->current_epoch->list);
29343 mdev->current_epoch = epoch;
29344 mdev->epochs++;
29345 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29346 spin_unlock(&mdev->peer_seq_lock);
29347
29348 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29349 - atomic_inc(&mdev->current_epoch->epoch_size);
29350 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29351 return drbd_drain_block(mdev, data_size);
29352 }
29353
29354 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29355
29356 spin_lock(&mdev->epoch_lock);
29357 e->epoch = mdev->current_epoch;
29358 - atomic_inc(&e->epoch->epoch_size);
29359 + atomic_inc_unchecked(&e->epoch->epoch_size);
29360 atomic_inc(&e->epoch->active);
29361 spin_unlock(&mdev->epoch_lock);
29362
29363 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29364 D_ASSERT(list_empty(&mdev->done_ee));
29365
29366 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29367 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29368 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29369 D_ASSERT(list_empty(&mdev->current_epoch->list));
29370 }
29371
29372 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29373 index bbca966..65e37dd 100644
29374 --- a/drivers/block/loop.c
29375 +++ b/drivers/block/loop.c
29376 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29377 mm_segment_t old_fs = get_fs();
29378
29379 set_fs(get_ds());
29380 - bw = file->f_op->write(file, buf, len, &pos);
29381 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29382 set_fs(old_fs);
29383 if (likely(bw == len))
29384 return 0;
29385 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29386 index ee94686..3e09ad3 100644
29387 --- a/drivers/char/Kconfig
29388 +++ b/drivers/char/Kconfig
29389 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29390
29391 config DEVKMEM
29392 bool "/dev/kmem virtual device support"
29393 - default y
29394 + default n
29395 + depends on !GRKERNSEC_KMEM
29396 help
29397 Say Y here if you want to support the /dev/kmem device. The
29398 /dev/kmem device is rarely used, but can be used for certain
29399 @@ -581,6 +582,7 @@ config DEVPORT
29400 bool
29401 depends on !M68K
29402 depends on ISA || PCI
29403 + depends on !GRKERNSEC_KMEM
29404 default y
29405
29406 source "drivers/s390/char/Kconfig"
29407 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29408 index 2e04433..22afc64 100644
29409 --- a/drivers/char/agp/frontend.c
29410 +++ b/drivers/char/agp/frontend.c
29411 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29412 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29413 return -EFAULT;
29414
29415 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29416 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29417 return -EFAULT;
29418
29419 client = agp_find_client_by_pid(reserve.pid);
29420 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29421 index 21cb980..f15107c 100644
29422 --- a/drivers/char/genrtc.c
29423 +++ b/drivers/char/genrtc.c
29424 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29425 switch (cmd) {
29426
29427 case RTC_PLL_GET:
29428 + memset(&pll, 0, sizeof(pll));
29429 if (get_rtc_pll(&pll))
29430 return -EINVAL;
29431 else
29432 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29433 index dfd7876..c0b0885 100644
29434 --- a/drivers/char/hpet.c
29435 +++ b/drivers/char/hpet.c
29436 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29437 }
29438
29439 static int
29440 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29441 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29442 struct hpet_info *info)
29443 {
29444 struct hpet_timer __iomem *timer;
29445 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29446 index 2c29942..604c5ba 100644
29447 --- a/drivers/char/ipmi/ipmi_msghandler.c
29448 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29449 @@ -420,7 +420,7 @@ struct ipmi_smi {
29450 struct proc_dir_entry *proc_dir;
29451 char proc_dir_name[10];
29452
29453 - atomic_t stats[IPMI_NUM_STATS];
29454 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29455
29456 /*
29457 * run_to_completion duplicate of smb_info, smi_info
29458 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29459
29460
29461 #define ipmi_inc_stat(intf, stat) \
29462 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29463 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29464 #define ipmi_get_stat(intf, stat) \
29465 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29466 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29467
29468 static int is_lan_addr(struct ipmi_addr *addr)
29469 {
29470 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29471 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29472 init_waitqueue_head(&intf->waitq);
29473 for (i = 0; i < IPMI_NUM_STATS; i++)
29474 - atomic_set(&intf->stats[i], 0);
29475 + atomic_set_unchecked(&intf->stats[i], 0);
29476
29477 intf->proc_dir = NULL;
29478
29479 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29480 index 1e638ff..a869ef5 100644
29481 --- a/drivers/char/ipmi/ipmi_si_intf.c
29482 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29483 @@ -275,7 +275,7 @@ struct smi_info {
29484 unsigned char slave_addr;
29485
29486 /* Counters and things for the proc filesystem. */
29487 - atomic_t stats[SI_NUM_STATS];
29488 + atomic_unchecked_t stats[SI_NUM_STATS];
29489
29490 struct task_struct *thread;
29491
29492 @@ -284,9 +284,9 @@ struct smi_info {
29493 };
29494
29495 #define smi_inc_stat(smi, stat) \
29496 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29497 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29498 #define smi_get_stat(smi, stat) \
29499 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29500 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29501
29502 #define SI_MAX_PARMS 4
29503
29504 @@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29505 atomic_set(&new_smi->req_events, 0);
29506 new_smi->run_to_completion = 0;
29507 for (i = 0; i < SI_NUM_STATS; i++)
29508 - atomic_set(&new_smi->stats[i], 0);
29509 + atomic_set_unchecked(&new_smi->stats[i], 0);
29510
29511 new_smi->interrupt_disabled = 1;
29512 atomic_set(&new_smi->stop_operation, 0);
29513 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29514 index 47ff7e4..0c7d340 100644
29515 --- a/drivers/char/mbcs.c
29516 +++ b/drivers/char/mbcs.c
29517 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29518 return 0;
29519 }
29520
29521 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29522 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29523 {
29524 .part_num = MBCS_PART_NUM,
29525 .mfg_num = MBCS_MFG_NUM,
29526 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29527 index d6e9d08..0c314bf 100644
29528 --- a/drivers/char/mem.c
29529 +++ b/drivers/char/mem.c
29530 @@ -18,6 +18,7 @@
29531 #include <linux/raw.h>
29532 #include <linux/tty.h>
29533 #include <linux/capability.h>
29534 +#include <linux/security.h>
29535 #include <linux/ptrace.h>
29536 #include <linux/device.h>
29537 #include <linux/highmem.h>
29538 @@ -35,6 +36,10 @@
29539 # include <linux/efi.h>
29540 #endif
29541
29542 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29543 +extern const struct file_operations grsec_fops;
29544 +#endif
29545 +
29546 static inline unsigned long size_inside_page(unsigned long start,
29547 unsigned long size)
29548 {
29549 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29550
29551 while (cursor < to) {
29552 if (!devmem_is_allowed(pfn)) {
29553 +#ifdef CONFIG_GRKERNSEC_KMEM
29554 + gr_handle_mem_readwrite(from, to);
29555 +#else
29556 printk(KERN_INFO
29557 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29558 current->comm, from, to);
29559 +#endif
29560 return 0;
29561 }
29562 cursor += PAGE_SIZE;
29563 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29564 }
29565 return 1;
29566 }
29567 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29568 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29569 +{
29570 + return 0;
29571 +}
29572 #else
29573 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29574 {
29575 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29576
29577 while (count > 0) {
29578 unsigned long remaining;
29579 + char *temp;
29580
29581 sz = size_inside_page(p, count);
29582
29583 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29584 if (!ptr)
29585 return -EFAULT;
29586
29587 - remaining = copy_to_user(buf, ptr, sz);
29588 +#ifdef CONFIG_PAX_USERCOPY
29589 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29590 + if (!temp) {
29591 + unxlate_dev_mem_ptr(p, ptr);
29592 + return -ENOMEM;
29593 + }
29594 + memcpy(temp, ptr, sz);
29595 +#else
29596 + temp = ptr;
29597 +#endif
29598 +
29599 + remaining = copy_to_user(buf, temp, sz);
29600 +
29601 +#ifdef CONFIG_PAX_USERCOPY
29602 + kfree(temp);
29603 +#endif
29604 +
29605 unxlate_dev_mem_ptr(p, ptr);
29606 if (remaining)
29607 return -EFAULT;
29608 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29609 size_t count, loff_t *ppos)
29610 {
29611 unsigned long p = *ppos;
29612 - ssize_t low_count, read, sz;
29613 + ssize_t low_count, read, sz, err = 0;
29614 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29615 - int err = 0;
29616
29617 read = 0;
29618 if (p < (unsigned long) high_memory) {
29619 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29620 }
29621 #endif
29622 while (low_count > 0) {
29623 + char *temp;
29624 +
29625 sz = size_inside_page(p, low_count);
29626
29627 /*
29628 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29629 */
29630 kbuf = xlate_dev_kmem_ptr((char *)p);
29631
29632 - if (copy_to_user(buf, kbuf, sz))
29633 +#ifdef CONFIG_PAX_USERCOPY
29634 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29635 + if (!temp)
29636 + return -ENOMEM;
29637 + memcpy(temp, kbuf, sz);
29638 +#else
29639 + temp = kbuf;
29640 +#endif
29641 +
29642 + err = copy_to_user(buf, temp, sz);
29643 +
29644 +#ifdef CONFIG_PAX_USERCOPY
29645 + kfree(temp);
29646 +#endif
29647 +
29648 + if (err)
29649 return -EFAULT;
29650 buf += sz;
29651 p += sz;
29652 @@ -867,6 +914,9 @@ static const struct memdev {
29653 #ifdef CONFIG_CRASH_DUMP
29654 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29655 #endif
29656 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29657 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29658 +#endif
29659 };
29660
29661 static int memory_open(struct inode *inode, struct file *filp)
29662 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29663 index 9df78e2..01ba9ae 100644
29664 --- a/drivers/char/nvram.c
29665 +++ b/drivers/char/nvram.c
29666 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29667
29668 spin_unlock_irq(&rtc_lock);
29669
29670 - if (copy_to_user(buf, contents, tmp - contents))
29671 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29672 return -EFAULT;
29673
29674 *ppos = i;
29675 diff --git a/drivers/char/random.c b/drivers/char/random.c
29676 index 4ec04a7..9918387 100644
29677 --- a/drivers/char/random.c
29678 +++ b/drivers/char/random.c
29679 @@ -261,8 +261,13 @@
29680 /*
29681 * Configuration information
29682 */
29683 +#ifdef CONFIG_GRKERNSEC_RANDNET
29684 +#define INPUT_POOL_WORDS 512
29685 +#define OUTPUT_POOL_WORDS 128
29686 +#else
29687 #define INPUT_POOL_WORDS 128
29688 #define OUTPUT_POOL_WORDS 32
29689 +#endif
29690 #define SEC_XFER_SIZE 512
29691 #define EXTRACT_SIZE 10
29692
29693 @@ -300,10 +305,17 @@ static struct poolinfo {
29694 int poolwords;
29695 int tap1, tap2, tap3, tap4, tap5;
29696 } poolinfo_table[] = {
29697 +#ifdef CONFIG_GRKERNSEC_RANDNET
29698 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29699 + { 512, 411, 308, 208, 104, 1 },
29700 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29701 + { 128, 103, 76, 51, 25, 1 },
29702 +#else
29703 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29704 { 128, 103, 76, 51, 25, 1 },
29705 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29706 { 32, 26, 20, 14, 7, 1 },
29707 +#endif
29708 #if 0
29709 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29710 { 2048, 1638, 1231, 819, 411, 1 },
29711 @@ -726,6 +738,17 @@ void add_disk_randomness(struct gendisk *disk)
29712 }
29713 #endif
29714
29715 +#ifdef CONFIG_PAX_LATENT_ENTROPY
29716 +u64 latent_entropy;
29717 +
29718 +__init void transfer_latent_entropy(void)
29719 +{
29720 + mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy));
29721 + mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy));
29722 +// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
29723 +}
29724 +#endif
29725 +
29726 /*********************************************************************
29727 *
29728 * Entropy extraction routines
29729 @@ -913,7 +936,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29730
29731 extract_buf(r, tmp);
29732 i = min_t(int, nbytes, EXTRACT_SIZE);
29733 - if (copy_to_user(buf, tmp, i)) {
29734 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29735 ret = -EFAULT;
29736 break;
29737 }
29738 @@ -1238,7 +1261,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29739 #include <linux/sysctl.h>
29740
29741 static int min_read_thresh = 8, min_write_thresh;
29742 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29743 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29744 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29745 static char sysctl_bootid[16];
29746
29747 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29748 index 45713f0..8286d21 100644
29749 --- a/drivers/char/sonypi.c
29750 +++ b/drivers/char/sonypi.c
29751 @@ -54,6 +54,7 @@
29752
29753 #include <asm/uaccess.h>
29754 #include <asm/io.h>
29755 +#include <asm/local.h>
29756
29757 #include <linux/sonypi.h>
29758
29759 @@ -490,7 +491,7 @@ static struct sonypi_device {
29760 spinlock_t fifo_lock;
29761 wait_queue_head_t fifo_proc_list;
29762 struct fasync_struct *fifo_async;
29763 - int open_count;
29764 + local_t open_count;
29765 int model;
29766 struct input_dev *input_jog_dev;
29767 struct input_dev *input_key_dev;
29768 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29769 static int sonypi_misc_release(struct inode *inode, struct file *file)
29770 {
29771 mutex_lock(&sonypi_device.lock);
29772 - sonypi_device.open_count--;
29773 + local_dec(&sonypi_device.open_count);
29774 mutex_unlock(&sonypi_device.lock);
29775 return 0;
29776 }
29777 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29778 {
29779 mutex_lock(&sonypi_device.lock);
29780 /* Flush input queue on first open */
29781 - if (!sonypi_device.open_count)
29782 + if (!local_read(&sonypi_device.open_count))
29783 kfifo_reset(&sonypi_device.fifo);
29784 - sonypi_device.open_count++;
29785 + local_inc(&sonypi_device.open_count);
29786 mutex_unlock(&sonypi_device.lock);
29787
29788 return 0;
29789 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29790 index ad7c732..5aa8054 100644
29791 --- a/drivers/char/tpm/tpm.c
29792 +++ b/drivers/char/tpm/tpm.c
29793 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29794 chip->vendor.req_complete_val)
29795 goto out_recv;
29796
29797 - if ((status == chip->vendor.req_canceled)) {
29798 + if (status == chip->vendor.req_canceled) {
29799 dev_err(chip->dev, "Operation Canceled\n");
29800 rc = -ECANCELED;
29801 goto out;
29802 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29803 index 0636520..169c1d0 100644
29804 --- a/drivers/char/tpm/tpm_bios.c
29805 +++ b/drivers/char/tpm/tpm_bios.c
29806 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29807 event = addr;
29808
29809 if ((event->event_type == 0 && event->event_size == 0) ||
29810 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29811 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29812 return NULL;
29813
29814 return addr;
29815 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29816 return NULL;
29817
29818 if ((event->event_type == 0 && event->event_size == 0) ||
29819 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29820 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29821 return NULL;
29822
29823 (*pos)++;
29824 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29825 int i;
29826
29827 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29828 - seq_putc(m, data[i]);
29829 + if (!seq_putc(m, data[i]))
29830 + return -EFAULT;
29831
29832 return 0;
29833 }
29834 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29835 log->bios_event_log_end = log->bios_event_log + len;
29836
29837 virt = acpi_os_map_memory(start, len);
29838 + if (!virt) {
29839 + kfree(log->bios_event_log);
29840 + log->bios_event_log = NULL;
29841 + return -EFAULT;
29842 + }
29843
29844 - memcpy(log->bios_event_log, virt, len);
29845 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29846
29847 acpi_os_unmap_memory(virt, len);
29848 return 0;
29849 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29850 index cdf2f54..e55c197 100644
29851 --- a/drivers/char/virtio_console.c
29852 +++ b/drivers/char/virtio_console.c
29853 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29854 if (to_user) {
29855 ssize_t ret;
29856
29857 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29858 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29859 if (ret)
29860 return -EFAULT;
29861 } else {
29862 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29863 if (!port_has_data(port) && !port->host_connected)
29864 return 0;
29865
29866 - return fill_readbuf(port, ubuf, count, true);
29867 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29868 }
29869
29870 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29871 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29872 index 97f5064..202b6e6 100644
29873 --- a/drivers/edac/edac_pci_sysfs.c
29874 +++ b/drivers/edac/edac_pci_sysfs.c
29875 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29876 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29877 static int edac_pci_poll_msec = 1000; /* one second workq period */
29878
29879 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29880 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29881 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29882 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29883
29884 static struct kobject *edac_pci_top_main_kobj;
29885 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29886 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29887 edac_printk(KERN_CRIT, EDAC_PCI,
29888 "Signaled System Error on %s\n",
29889 pci_name(dev));
29890 - atomic_inc(&pci_nonparity_count);
29891 + atomic_inc_unchecked(&pci_nonparity_count);
29892 }
29893
29894 if (status & (PCI_STATUS_PARITY)) {
29895 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29896 "Master Data Parity Error on %s\n",
29897 pci_name(dev));
29898
29899 - atomic_inc(&pci_parity_count);
29900 + atomic_inc_unchecked(&pci_parity_count);
29901 }
29902
29903 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29904 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29905 "Detected Parity Error on %s\n",
29906 pci_name(dev));
29907
29908 - atomic_inc(&pci_parity_count);
29909 + atomic_inc_unchecked(&pci_parity_count);
29910 }
29911 }
29912
29913 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29914 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29915 "Signaled System Error on %s\n",
29916 pci_name(dev));
29917 - atomic_inc(&pci_nonparity_count);
29918 + atomic_inc_unchecked(&pci_nonparity_count);
29919 }
29920
29921 if (status & (PCI_STATUS_PARITY)) {
29922 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29923 "Master Data Parity Error on "
29924 "%s\n", pci_name(dev));
29925
29926 - atomic_inc(&pci_parity_count);
29927 + atomic_inc_unchecked(&pci_parity_count);
29928 }
29929
29930 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29931 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29932 "Detected Parity Error on %s\n",
29933 pci_name(dev));
29934
29935 - atomic_inc(&pci_parity_count);
29936 + atomic_inc_unchecked(&pci_parity_count);
29937 }
29938 }
29939 }
29940 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29941 if (!check_pci_errors)
29942 return;
29943
29944 - before_count = atomic_read(&pci_parity_count);
29945 + before_count = atomic_read_unchecked(&pci_parity_count);
29946
29947 /* scan all PCI devices looking for a Parity Error on devices and
29948 * bridges.
29949 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29950 /* Only if operator has selected panic on PCI Error */
29951 if (edac_pci_get_panic_on_pe()) {
29952 /* If the count is different 'after' from 'before' */
29953 - if (before_count != atomic_read(&pci_parity_count))
29954 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29955 panic("EDAC: PCI Parity Error");
29956 }
29957 }
29958 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29959 index c6074c5..88a9e2e 100644
29960 --- a/drivers/edac/mce_amd.h
29961 +++ b/drivers/edac/mce_amd.h
29962 @@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29963 struct amd_decoder_ops {
29964 bool (*dc_mce)(u16, u8);
29965 bool (*ic_mce)(u16, u8);
29966 -};
29967 +} __no_const;
29968
29969 void amd_report_gart_errors(bool);
29970 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29971 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29972 index cc595eb..4ec702a 100644
29973 --- a/drivers/firewire/core-card.c
29974 +++ b/drivers/firewire/core-card.c
29975 @@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
29976
29977 void fw_core_remove_card(struct fw_card *card)
29978 {
29979 - struct fw_card_driver dummy_driver = dummy_driver_template;
29980 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29981
29982 card->driver->update_phy_reg(card, 4,
29983 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29984 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29985 index 2e6b245..c3857d9 100644
29986 --- a/drivers/firewire/core-cdev.c
29987 +++ b/drivers/firewire/core-cdev.c
29988 @@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
29989 int ret;
29990
29991 if ((request->channels == 0 && request->bandwidth == 0) ||
29992 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29993 - request->bandwidth < 0)
29994 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29995 return -EINVAL;
29996
29997 r = kmalloc(sizeof(*r), GFP_KERNEL);
29998 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29999 index dea2dcc..a4fb978 100644
30000 --- a/drivers/firewire/core-transaction.c
30001 +++ b/drivers/firewire/core-transaction.c
30002 @@ -37,6 +37,7 @@
30003 #include <linux/timer.h>
30004 #include <linux/types.h>
30005 #include <linux/workqueue.h>
30006 +#include <linux/sched.h>
30007
30008 #include <asm/byteorder.h>
30009
30010 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30011 index 9047f55..e47c7ff 100644
30012 --- a/drivers/firewire/core.h
30013 +++ b/drivers/firewire/core.h
30014 @@ -110,6 +110,7 @@ struct fw_card_driver {
30015
30016 int (*stop_iso)(struct fw_iso_context *ctx);
30017 };
30018 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30019
30020 void fw_card_initialize(struct fw_card *card,
30021 const struct fw_card_driver *driver, struct device *device);
30022 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30023 index 153980b..4b4d046 100644
30024 --- a/drivers/firmware/dmi_scan.c
30025 +++ b/drivers/firmware/dmi_scan.c
30026 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
30027 }
30028 }
30029 else {
30030 - /*
30031 - * no iounmap() for that ioremap(); it would be a no-op, but
30032 - * it's so early in setup that sucker gets confused into doing
30033 - * what it shouldn't if we actually call it.
30034 - */
30035 p = dmi_ioremap(0xF0000, 0x10000);
30036 if (p == NULL)
30037 goto error;
30038 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30039 if (buf == NULL)
30040 return -1;
30041
30042 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30043 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30044
30045 iounmap(buf);
30046 return 0;
30047 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30048 index 82d5c20..44a7177 100644
30049 --- a/drivers/gpio/gpio-vr41xx.c
30050 +++ b/drivers/gpio/gpio-vr41xx.c
30051 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30052 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30053 maskl, pendl, maskh, pendh);
30054
30055 - atomic_inc(&irq_err_count);
30056 + atomic_inc_unchecked(&irq_err_count);
30057
30058 return -EINVAL;
30059 }
30060 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30061 index 8111889..367b253 100644
30062 --- a/drivers/gpu/drm/drm_crtc_helper.c
30063 +++ b/drivers/gpu/drm/drm_crtc_helper.c
30064 @@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30065 struct drm_crtc *tmp;
30066 int crtc_mask = 1;
30067
30068 - WARN(!crtc, "checking null crtc?\n");
30069 + BUG_ON(!crtc);
30070
30071 dev = crtc->dev;
30072
30073 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30074 index 6116e3b..c29dd16 100644
30075 --- a/drivers/gpu/drm/drm_drv.c
30076 +++ b/drivers/gpu/drm/drm_drv.c
30077 @@ -316,7 +316,7 @@ module_exit(drm_core_exit);
30078 /**
30079 * Copy and IOCTL return string to user space
30080 */
30081 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30082 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30083 {
30084 int len;
30085
30086 @@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
30087 return -ENODEV;
30088
30089 atomic_inc(&dev->ioctl_count);
30090 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30091 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30092 ++file_priv->ioctl_count;
30093
30094 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30095 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30096 index 123de28..43a0897 100644
30097 --- a/drivers/gpu/drm/drm_fops.c
30098 +++ b/drivers/gpu/drm/drm_fops.c
30099 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30100 }
30101
30102 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30103 - atomic_set(&dev->counts[i], 0);
30104 + atomic_set_unchecked(&dev->counts[i], 0);
30105
30106 dev->sigdata.lock = NULL;
30107
30108 @@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
30109
30110 retcode = drm_open_helper(inode, filp, dev);
30111 if (!retcode) {
30112 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30113 - if (!dev->open_count++)
30114 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30115 + if (local_inc_return(&dev->open_count) == 1)
30116 retcode = drm_setup(dev);
30117 }
30118 if (!retcode) {
30119 @@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
30120
30121 mutex_lock(&drm_global_mutex);
30122
30123 - DRM_DEBUG("open_count = %d\n", dev->open_count);
30124 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30125
30126 if (dev->driver->preclose)
30127 dev->driver->preclose(dev, file_priv);
30128 @@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30129 * Begin inline drm_release
30130 */
30131
30132 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30133 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30134 task_pid_nr(current),
30135 (long)old_encode_dev(file_priv->minor->device),
30136 - dev->open_count);
30137 + local_read(&dev->open_count));
30138
30139 /* Release any auth tokens that might point to this file_priv,
30140 (do that under the drm_global_mutex) */
30141 @@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30142 * End inline drm_release
30143 */
30144
30145 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30146 - if (!--dev->open_count) {
30147 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30148 + if (local_dec_and_test(&dev->open_count)) {
30149 if (atomic_read(&dev->ioctl_count)) {
30150 DRM_ERROR("Device busy: %d\n",
30151 atomic_read(&dev->ioctl_count));
30152 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30153 index c87dc96..326055d 100644
30154 --- a/drivers/gpu/drm/drm_global.c
30155 +++ b/drivers/gpu/drm/drm_global.c
30156 @@ -36,7 +36,7 @@
30157 struct drm_global_item {
30158 struct mutex mutex;
30159 void *object;
30160 - int refcount;
30161 + atomic_t refcount;
30162 };
30163
30164 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30165 @@ -49,7 +49,7 @@ void drm_global_init(void)
30166 struct drm_global_item *item = &glob[i];
30167 mutex_init(&item->mutex);
30168 item->object = NULL;
30169 - item->refcount = 0;
30170 + atomic_set(&item->refcount, 0);
30171 }
30172 }
30173
30174 @@ -59,7 +59,7 @@ void drm_global_release(void)
30175 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30176 struct drm_global_item *item = &glob[i];
30177 BUG_ON(item->object != NULL);
30178 - BUG_ON(item->refcount != 0);
30179 + BUG_ON(atomic_read(&item->refcount) != 0);
30180 }
30181 }
30182
30183 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30184 void *object;
30185
30186 mutex_lock(&item->mutex);
30187 - if (item->refcount == 0) {
30188 + if (atomic_read(&item->refcount) == 0) {
30189 item->object = kzalloc(ref->size, GFP_KERNEL);
30190 if (unlikely(item->object == NULL)) {
30191 ret = -ENOMEM;
30192 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30193 goto out_err;
30194
30195 }
30196 - ++item->refcount;
30197 + atomic_inc(&item->refcount);
30198 ref->object = item->object;
30199 object = item->object;
30200 mutex_unlock(&item->mutex);
30201 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30202 struct drm_global_item *item = &glob[ref->global_type];
30203
30204 mutex_lock(&item->mutex);
30205 - BUG_ON(item->refcount == 0);
30206 + BUG_ON(atomic_read(&item->refcount) == 0);
30207 BUG_ON(ref->object != item->object);
30208 - if (--item->refcount == 0) {
30209 + if (atomic_dec_and_test(&item->refcount)) {
30210 ref->release(ref);
30211 item->object = NULL;
30212 }
30213 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30214 index ab1162d..42587b2 100644
30215 --- a/drivers/gpu/drm/drm_info.c
30216 +++ b/drivers/gpu/drm/drm_info.c
30217 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30218 struct drm_local_map *map;
30219 struct drm_map_list *r_list;
30220
30221 - /* Hardcoded from _DRM_FRAME_BUFFER,
30222 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30223 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30224 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30225 + static const char * const types[] = {
30226 + [_DRM_FRAME_BUFFER] = "FB",
30227 + [_DRM_REGISTERS] = "REG",
30228 + [_DRM_SHM] = "SHM",
30229 + [_DRM_AGP] = "AGP",
30230 + [_DRM_SCATTER_GATHER] = "SG",
30231 + [_DRM_CONSISTENT] = "PCI",
30232 + [_DRM_GEM] = "GEM" };
30233 const char *type;
30234 int i;
30235
30236 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30237 map = r_list->map;
30238 if (!map)
30239 continue;
30240 - if (map->type < 0 || map->type > 5)
30241 + if (map->type >= ARRAY_SIZE(types))
30242 type = "??";
30243 else
30244 type = types[map->type];
30245 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30246 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30247 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30248 vma->vm_flags & VM_IO ? 'i' : '-',
30249 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30250 + 0);
30251 +#else
30252 vma->vm_pgoff);
30253 +#endif
30254
30255 #if defined(__i386__)
30256 pgprot = pgprot_val(vma->vm_page_prot);
30257 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30258 index 637fcc3..e890b33 100644
30259 --- a/drivers/gpu/drm/drm_ioc32.c
30260 +++ b/drivers/gpu/drm/drm_ioc32.c
30261 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30262 request = compat_alloc_user_space(nbytes);
30263 if (!access_ok(VERIFY_WRITE, request, nbytes))
30264 return -EFAULT;
30265 - list = (struct drm_buf_desc *) (request + 1);
30266 + list = (struct drm_buf_desc __user *) (request + 1);
30267
30268 if (__put_user(count, &request->count)
30269 || __put_user(list, &request->list))
30270 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30271 request = compat_alloc_user_space(nbytes);
30272 if (!access_ok(VERIFY_WRITE, request, nbytes))
30273 return -EFAULT;
30274 - list = (struct drm_buf_pub *) (request + 1);
30275 + list = (struct drm_buf_pub __user *) (request + 1);
30276
30277 if (__put_user(count, &request->count)
30278 || __put_user(list, &request->list))
30279 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30280 index cf85155..f2665cb 100644
30281 --- a/drivers/gpu/drm/drm_ioctl.c
30282 +++ b/drivers/gpu/drm/drm_ioctl.c
30283 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30284 stats->data[i].value =
30285 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30286 else
30287 - stats->data[i].value = atomic_read(&dev->counts[i]);
30288 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30289 stats->data[i].type = dev->types[i];
30290 }
30291
30292 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30293 index c79c713..2048588 100644
30294 --- a/drivers/gpu/drm/drm_lock.c
30295 +++ b/drivers/gpu/drm/drm_lock.c
30296 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30297 if (drm_lock_take(&master->lock, lock->context)) {
30298 master->lock.file_priv = file_priv;
30299 master->lock.lock_time = jiffies;
30300 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30301 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30302 break; /* Got lock */
30303 }
30304
30305 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30306 return -EINVAL;
30307 }
30308
30309 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30310 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30311
30312 if (drm_lock_free(&master->lock, lock->context)) {
30313 /* FIXME: Should really bail out here. */
30314 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30315 index aa454f8..6d38580 100644
30316 --- a/drivers/gpu/drm/drm_stub.c
30317 +++ b/drivers/gpu/drm/drm_stub.c
30318 @@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30319
30320 drm_device_set_unplugged(dev);
30321
30322 - if (dev->open_count == 0) {
30323 + if (local_read(&dev->open_count) == 0) {
30324 drm_put_dev(dev);
30325 }
30326 mutex_unlock(&drm_global_mutex);
30327 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30328 index f920fb5..001c52d 100644
30329 --- a/drivers/gpu/drm/i810/i810_dma.c
30330 +++ b/drivers/gpu/drm/i810/i810_dma.c
30331 @@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30332 dma->buflist[vertex->idx],
30333 vertex->discard, vertex->used);
30334
30335 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30336 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30337 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30338 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30339 sarea_priv->last_enqueue = dev_priv->counter - 1;
30340 sarea_priv->last_dispatch = (int)hw_status[5];
30341
30342 @@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30343 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30344 mc->last_render);
30345
30346 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30347 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30348 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30349 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30350 sarea_priv->last_enqueue = dev_priv->counter - 1;
30351 sarea_priv->last_dispatch = (int)hw_status[5];
30352
30353 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30354 index c9339f4..f5e1b9d 100644
30355 --- a/drivers/gpu/drm/i810/i810_drv.h
30356 +++ b/drivers/gpu/drm/i810/i810_drv.h
30357 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30358 int page_flipping;
30359
30360 wait_queue_head_t irq_queue;
30361 - atomic_t irq_received;
30362 - atomic_t irq_emitted;
30363 + atomic_unchecked_t irq_received;
30364 + atomic_unchecked_t irq_emitted;
30365
30366 int front_offset;
30367 } drm_i810_private_t;
30368 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30369 index e6162a1..b2ff486 100644
30370 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30371 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30372 @@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30373 I915_READ(GTIMR));
30374 }
30375 seq_printf(m, "Interrupts received: %d\n",
30376 - atomic_read(&dev_priv->irq_received));
30377 + atomic_read_unchecked(&dev_priv->irq_received));
30378 for (i = 0; i < I915_NUM_RINGS; i++) {
30379 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30380 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30381 @@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30382 return ret;
30383
30384 if (opregion->header)
30385 - seq_write(m, opregion->header, OPREGION_SIZE);
30386 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30387
30388 mutex_unlock(&dev->struct_mutex);
30389
30390 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30391 index ba60f3c..e2dff7f 100644
30392 --- a/drivers/gpu/drm/i915/i915_dma.c
30393 +++ b/drivers/gpu/drm/i915/i915_dma.c
30394 @@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30395 bool can_switch;
30396
30397 spin_lock(&dev->count_lock);
30398 - can_switch = (dev->open_count == 0);
30399 + can_switch = (local_read(&dev->open_count) == 0);
30400 spin_unlock(&dev->count_lock);
30401 return can_switch;
30402 }
30403 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30404 index 5fabc6c..0b08aa1 100644
30405 --- a/drivers/gpu/drm/i915/i915_drv.h
30406 +++ b/drivers/gpu/drm/i915/i915_drv.h
30407 @@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
30408 /* render clock increase/decrease */
30409 /* display clock increase/decrease */
30410 /* pll clock increase/decrease */
30411 -};
30412 +} __no_const;
30413
30414 struct intel_device_info {
30415 u8 gen;
30416 @@ -350,7 +350,7 @@ typedef struct drm_i915_private {
30417 int current_page;
30418 int page_flipping;
30419
30420 - atomic_t irq_received;
30421 + atomic_unchecked_t irq_received;
30422
30423 /* protects the irq masks */
30424 spinlock_t irq_lock;
30425 @@ -937,7 +937,7 @@ struct drm_i915_gem_object {
30426 * will be page flipped away on the next vblank. When it
30427 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30428 */
30429 - atomic_t pending_flip;
30430 + atomic_unchecked_t pending_flip;
30431 };
30432
30433 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30434 @@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30435 extern void intel_teardown_gmbus(struct drm_device *dev);
30436 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30437 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30438 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30439 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30440 {
30441 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30442 }
30443 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30444 index de43194..a14c4cc 100644
30445 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30446 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30447 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30448 i915_gem_clflush_object(obj);
30449
30450 if (obj->base.pending_write_domain)
30451 - cd->flips |= atomic_read(&obj->pending_flip);
30452 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30453
30454 /* The actual obj->write_domain will be updated with
30455 * pending_write_domain after we emit the accumulated flush for all
30456 @@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30457
30458 static int
30459 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30460 - int count)
30461 + unsigned int count)
30462 {
30463 - int i;
30464 + unsigned int i;
30465
30466 for (i = 0; i < count; i++) {
30467 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30468 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30469 index 26c67a7..8d4cbcb 100644
30470 --- a/drivers/gpu/drm/i915/i915_irq.c
30471 +++ b/drivers/gpu/drm/i915/i915_irq.c
30472 @@ -496,7 +496,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30473 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30474 struct drm_i915_master_private *master_priv;
30475
30476 - atomic_inc(&dev_priv->irq_received);
30477 + atomic_inc_unchecked(&dev_priv->irq_received);
30478
30479 /* disable master interrupt before clearing iir */
30480 de_ier = I915_READ(DEIER);
30481 @@ -579,7 +579,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30482 struct drm_i915_master_private *master_priv;
30483 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30484
30485 - atomic_inc(&dev_priv->irq_received);
30486 + atomic_inc_unchecked(&dev_priv->irq_received);
30487
30488 if (IS_GEN6(dev))
30489 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30490 @@ -1291,7 +1291,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30491 int ret = IRQ_NONE, pipe;
30492 bool blc_event = false;
30493
30494 - atomic_inc(&dev_priv->irq_received);
30495 + atomic_inc_unchecked(&dev_priv->irq_received);
30496
30497 iir = I915_READ(IIR);
30498
30499 @@ -1802,7 +1802,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30500 {
30501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30502
30503 - atomic_set(&dev_priv->irq_received, 0);
30504 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30505
30506 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30507 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30508 @@ -1979,7 +1979,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30509 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30510 int pipe;
30511
30512 - atomic_set(&dev_priv->irq_received, 0);
30513 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30514
30515 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30516 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30517 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30518 index d4d162f..b49a04e 100644
30519 --- a/drivers/gpu/drm/i915/intel_display.c
30520 +++ b/drivers/gpu/drm/i915/intel_display.c
30521 @@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30522
30523 wait_event(dev_priv->pending_flip_queue,
30524 atomic_read(&dev_priv->mm.wedged) ||
30525 - atomic_read(&obj->pending_flip) == 0);
30526 + atomic_read_unchecked(&obj->pending_flip) == 0);
30527
30528 /* Big Hammer, we also need to ensure that any pending
30529 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30530 @@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30531 obj = to_intel_framebuffer(crtc->fb)->obj;
30532 dev_priv = crtc->dev->dev_private;
30533 wait_event(dev_priv->pending_flip_queue,
30534 - atomic_read(&obj->pending_flip) == 0);
30535 + atomic_read_unchecked(&obj->pending_flip) == 0);
30536 }
30537
30538 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30539 @@ -7284,9 +7284,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30540
30541 obj = work->old_fb_obj;
30542
30543 - atomic_clear_mask(1 << intel_crtc->plane,
30544 - &obj->pending_flip.counter);
30545 - if (atomic_read(&obj->pending_flip) == 0)
30546 + atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
30547 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30548 wake_up(&dev_priv->pending_flip_queue);
30549
30550 schedule_work(&work->work);
30551 @@ -7582,7 +7581,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30552 /* Block clients from rendering to the new back buffer until
30553 * the flip occurs and the object is no longer visible.
30554 */
30555 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30556 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30557
30558 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30559 if (ret)
30560 @@ -7596,7 +7595,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30561 return 0;
30562
30563 cleanup_pending:
30564 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30565 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30566 drm_gem_object_unreference(&work->old_fb_obj->base);
30567 drm_gem_object_unreference(&obj->base);
30568 mutex_unlock(&dev->struct_mutex);
30569 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30570 index 54558a0..2d97005 100644
30571 --- a/drivers/gpu/drm/mga/mga_drv.h
30572 +++ b/drivers/gpu/drm/mga/mga_drv.h
30573 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30574 u32 clear_cmd;
30575 u32 maccess;
30576
30577 - atomic_t vbl_received; /**< Number of vblanks received. */
30578 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30579 wait_queue_head_t fence_queue;
30580 - atomic_t last_fence_retired;
30581 + atomic_unchecked_t last_fence_retired;
30582 u32 next_fence_to_post;
30583
30584 unsigned int fb_cpp;
30585 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30586 index 2581202..f230a8d9 100644
30587 --- a/drivers/gpu/drm/mga/mga_irq.c
30588 +++ b/drivers/gpu/drm/mga/mga_irq.c
30589 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30590 if (crtc != 0)
30591 return 0;
30592
30593 - return atomic_read(&dev_priv->vbl_received);
30594 + return atomic_read_unchecked(&dev_priv->vbl_received);
30595 }
30596
30597
30598 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30599 /* VBLANK interrupt */
30600 if (status & MGA_VLINEPEN) {
30601 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30602 - atomic_inc(&dev_priv->vbl_received);
30603 + atomic_inc_unchecked(&dev_priv->vbl_received);
30604 drm_handle_vblank(dev, 0);
30605 handled = 1;
30606 }
30607 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30608 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30609 MGA_WRITE(MGA_PRIMEND, prim_end);
30610
30611 - atomic_inc(&dev_priv->last_fence_retired);
30612 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30613 DRM_WAKEUP(&dev_priv->fence_queue);
30614 handled = 1;
30615 }
30616 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30617 * using fences.
30618 */
30619 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30620 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30621 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30622 - *sequence) <= (1 << 23)));
30623
30624 *sequence = cur_fence;
30625 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30626 index 0be4a81..7464804 100644
30627 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30628 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30629 @@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30630 struct bit_table {
30631 const char id;
30632 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30633 -};
30634 +} __no_const;
30635
30636 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30637
30638 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30639 index 3aef353..0ad1322 100644
30640 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30641 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30642 @@ -240,7 +240,7 @@ struct nouveau_channel {
30643 struct list_head pending;
30644 uint32_t sequence;
30645 uint32_t sequence_ack;
30646 - atomic_t last_sequence_irq;
30647 + atomic_unchecked_t last_sequence_irq;
30648 struct nouveau_vma vma;
30649 } fence;
30650
30651 @@ -321,7 +321,7 @@ struct nouveau_exec_engine {
30652 u32 handle, u16 class);
30653 void (*set_tile_region)(struct drm_device *dev, int i);
30654 void (*tlb_flush)(struct drm_device *, int engine);
30655 -};
30656 +} __no_const;
30657
30658 struct nouveau_instmem_engine {
30659 void *priv;
30660 @@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
30661 struct nouveau_mc_engine {
30662 int (*init)(struct drm_device *dev);
30663 void (*takedown)(struct drm_device *dev);
30664 -};
30665 +} __no_const;
30666
30667 struct nouveau_timer_engine {
30668 int (*init)(struct drm_device *dev);
30669 void (*takedown)(struct drm_device *dev);
30670 uint64_t (*read)(struct drm_device *dev);
30671 -};
30672 +} __no_const;
30673
30674 struct nouveau_fb_engine {
30675 int num_tiles;
30676 @@ -590,7 +590,7 @@ struct nouveau_vram_engine {
30677 void (*put)(struct drm_device *, struct nouveau_mem **);
30678
30679 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30680 -};
30681 +} __no_const;
30682
30683 struct nouveau_engine {
30684 struct nouveau_instmem_engine instmem;
30685 @@ -739,7 +739,7 @@ struct drm_nouveau_private {
30686 struct drm_global_reference mem_global_ref;
30687 struct ttm_bo_global_ref bo_global_ref;
30688 struct ttm_bo_device bdev;
30689 - atomic_t validate_sequence;
30690 + atomic_unchecked_t validate_sequence;
30691 } ttm;
30692
30693 struct {
30694 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30695 index c1dc20f..4df673c 100644
30696 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30697 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30698 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30699 if (USE_REFCNT(dev))
30700 sequence = nvchan_rd32(chan, 0x48);
30701 else
30702 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30703 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30704
30705 if (chan->fence.sequence_ack == sequence)
30706 goto out;
30707 @@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30708 return ret;
30709 }
30710
30711 - atomic_set(&chan->fence.last_sequence_irq, 0);
30712 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30713 return 0;
30714 }
30715
30716 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30717 index ed52a6f..484acdc 100644
30718 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30719 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30720 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30721 int trycnt = 0;
30722 int ret, i;
30723
30724 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30725 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30726 retry:
30727 if (++trycnt > 100000) {
30728 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30729 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30730 index c2a8511..4b996f9 100644
30731 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30732 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30733 @@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30734 bool can_switch;
30735
30736 spin_lock(&dev->count_lock);
30737 - can_switch = (dev->open_count == 0);
30738 + can_switch = (local_read(&dev->open_count) == 0);
30739 spin_unlock(&dev->count_lock);
30740 return can_switch;
30741 }
30742 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30743 index dbdea8e..cd6eeeb 100644
30744 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30745 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30746 @@ -554,7 +554,7 @@ static int
30747 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30748 u32 class, u32 mthd, u32 data)
30749 {
30750 - atomic_set(&chan->fence.last_sequence_irq, data);
30751 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30752 return 0;
30753 }
30754
30755 diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30756 index 2746402..c8dc4a4 100644
30757 --- a/drivers/gpu/drm/nouveau/nv50_sor.c
30758 +++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30759 @@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30760 }
30761
30762 if (nv_encoder->dcb->type == OUTPUT_DP) {
30763 - struct dp_train_func func = {
30764 + static struct dp_train_func func = {
30765 .link_set = nv50_sor_dp_link_set,
30766 .train_set = nv50_sor_dp_train_set,
30767 .train_adj = nv50_sor_dp_train_adj
30768 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30769 index 0247250..d2f6aaf 100644
30770 --- a/drivers/gpu/drm/nouveau/nvd0_display.c
30771 +++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30772 @@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30773 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30774
30775 if (nv_encoder->dcb->type == OUTPUT_DP) {
30776 - struct dp_train_func func = {
30777 + static struct dp_train_func func = {
30778 .link_set = nvd0_sor_dp_link_set,
30779 .train_set = nvd0_sor_dp_train_set,
30780 .train_adj = nvd0_sor_dp_train_adj
30781 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30782 index bcac90b..53bfc76 100644
30783 --- a/drivers/gpu/drm/r128/r128_cce.c
30784 +++ b/drivers/gpu/drm/r128/r128_cce.c
30785 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30786
30787 /* GH: Simple idle check.
30788 */
30789 - atomic_set(&dev_priv->idle_count, 0);
30790 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30791
30792 /* We don't support anything other than bus-mastering ring mode,
30793 * but the ring can be in either AGP or PCI space for the ring
30794 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30795 index 930c71b..499aded 100644
30796 --- a/drivers/gpu/drm/r128/r128_drv.h
30797 +++ b/drivers/gpu/drm/r128/r128_drv.h
30798 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30799 int is_pci;
30800 unsigned long cce_buffers_offset;
30801
30802 - atomic_t idle_count;
30803 + atomic_unchecked_t idle_count;
30804
30805 int page_flipping;
30806 int current_page;
30807 u32 crtc_offset;
30808 u32 crtc_offset_cntl;
30809
30810 - atomic_t vbl_received;
30811 + atomic_unchecked_t vbl_received;
30812
30813 u32 color_fmt;
30814 unsigned int front_offset;
30815 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30816 index 429d5a0..7e899ed 100644
30817 --- a/drivers/gpu/drm/r128/r128_irq.c
30818 +++ b/drivers/gpu/drm/r128/r128_irq.c
30819 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30820 if (crtc != 0)
30821 return 0;
30822
30823 - return atomic_read(&dev_priv->vbl_received);
30824 + return atomic_read_unchecked(&dev_priv->vbl_received);
30825 }
30826
30827 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30828 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30829 /* VBLANK interrupt */
30830 if (status & R128_CRTC_VBLANK_INT) {
30831 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30832 - atomic_inc(&dev_priv->vbl_received);
30833 + atomic_inc_unchecked(&dev_priv->vbl_received);
30834 drm_handle_vblank(dev, 0);
30835 return IRQ_HANDLED;
30836 }
30837 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30838 index a9e33ce..09edd4b 100644
30839 --- a/drivers/gpu/drm/r128/r128_state.c
30840 +++ b/drivers/gpu/drm/r128/r128_state.c
30841 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30842
30843 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30844 {
30845 - if (atomic_read(&dev_priv->idle_count) == 0)
30846 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30847 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30848 else
30849 - atomic_set(&dev_priv->idle_count, 0);
30850 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30851 }
30852
30853 #endif
30854 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30855 index 5a82b6b..9e69c73 100644
30856 --- a/drivers/gpu/drm/radeon/mkregtable.c
30857 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30858 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30859 regex_t mask_rex;
30860 regmatch_t match[4];
30861 char buf[1024];
30862 - size_t end;
30863 + long end;
30864 int len;
30865 int done = 0;
30866 int r;
30867 unsigned o;
30868 struct offset *offset;
30869 char last_reg_s[10];
30870 - int last_reg;
30871 + unsigned long last_reg;
30872
30873 if (regcomp
30874 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30875 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30876 index 138b952..d74f9cb 100644
30877 --- a/drivers/gpu/drm/radeon/radeon.h
30878 +++ b/drivers/gpu/drm/radeon/radeon.h
30879 @@ -253,7 +253,7 @@ struct radeon_fence_driver {
30880 uint32_t scratch_reg;
30881 uint64_t gpu_addr;
30882 volatile uint32_t *cpu_addr;
30883 - atomic_t seq;
30884 + atomic_unchecked_t seq;
30885 uint32_t last_seq;
30886 unsigned long last_jiffies;
30887 unsigned long last_timeout;
30888 @@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
30889 int x2, int y2);
30890 void (*draw_auto)(struct radeon_device *rdev);
30891 void (*set_default_state)(struct radeon_device *rdev);
30892 -};
30893 +} __no_const;
30894
30895 struct r600_blit {
30896 struct mutex mutex;
30897 @@ -1246,7 +1246,7 @@ struct radeon_asic {
30898 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30899 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30900 } pflip;
30901 -};
30902 +} __no_const;
30903
30904 /*
30905 * Asic structures
30906 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30907 index 5992502..c19c633 100644
30908 --- a/drivers/gpu/drm/radeon/radeon_device.c
30909 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30910 @@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30911 bool can_switch;
30912
30913 spin_lock(&dev->count_lock);
30914 - can_switch = (dev->open_count == 0);
30915 + can_switch = (local_read(&dev->open_count) == 0);
30916 spin_unlock(&dev->count_lock);
30917 return can_switch;
30918 }
30919 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30920 index a1b59ca..86f2d44 100644
30921 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30922 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30923 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30924
30925 /* SW interrupt */
30926 wait_queue_head_t swi_queue;
30927 - atomic_t swi_emitted;
30928 + atomic_unchecked_t swi_emitted;
30929 int vblank_crtc;
30930 uint32_t irq_enable_reg;
30931 uint32_t r500_disp_irq_reg;
30932 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30933 index 4bd36a3..e66fe9c 100644
30934 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30935 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30936 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30937 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30938 return 0;
30939 }
30940 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30941 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30942 if (!rdev->ring[fence->ring].ready)
30943 /* FIXME: cp is not running assume everythings is done right
30944 * away
30945 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30946 }
30947 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30948 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30949 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30950 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30951 rdev->fence_drv[ring].initialized = true;
30952 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30953 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30954 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30955 rdev->fence_drv[ring].scratch_reg = -1;
30956 rdev->fence_drv[ring].cpu_addr = NULL;
30957 rdev->fence_drv[ring].gpu_addr = 0;
30958 - atomic_set(&rdev->fence_drv[ring].seq, 0);
30959 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30960 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30961 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30962 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30963 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30964 index 48b7cea..342236f 100644
30965 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30966 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30967 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30968 request = compat_alloc_user_space(sizeof(*request));
30969 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30970 || __put_user(req32.param, &request->param)
30971 - || __put_user((void __user *)(unsigned long)req32.value,
30972 + || __put_user((unsigned long)req32.value,
30973 &request->value))
30974 return -EFAULT;
30975
30976 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30977 index 00da384..32f972d 100644
30978 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30979 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30980 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30981 unsigned int ret;
30982 RING_LOCALS;
30983
30984 - atomic_inc(&dev_priv->swi_emitted);
30985 - ret = atomic_read(&dev_priv->swi_emitted);
30986 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30987 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30988
30989 BEGIN_RING(4);
30990 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30991 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30992 drm_radeon_private_t *dev_priv =
30993 (drm_radeon_private_t *) dev->dev_private;
30994
30995 - atomic_set(&dev_priv->swi_emitted, 0);
30996 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30997 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30998
30999 dev->max_vblank_count = 0x001fffff;
31000 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31001 index e8422ae..d22d4a8 100644
31002 --- a/drivers/gpu/drm/radeon/radeon_state.c
31003 +++ b/drivers/gpu/drm/radeon/radeon_state.c
31004 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31005 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31006 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31007
31008 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31009 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31010 sarea_priv->nbox * sizeof(depth_boxes[0])))
31011 return -EFAULT;
31012
31013 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31014 {
31015 drm_radeon_private_t *dev_priv = dev->dev_private;
31016 drm_radeon_getparam_t *param = data;
31017 - int value;
31018 + int value = 0;
31019
31020 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31021
31022 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31023 index f493c64..524ab6b 100644
31024 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
31025 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31026 @@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31027 }
31028 if (unlikely(ttm_vm_ops == NULL)) {
31029 ttm_vm_ops = vma->vm_ops;
31030 - radeon_ttm_vm_ops = *ttm_vm_ops;
31031 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31032 + pax_open_kernel();
31033 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31034 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31035 + pax_close_kernel();
31036 }
31037 vma->vm_ops = &radeon_ttm_vm_ops;
31038 return 0;
31039 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31040 index f2c3b9d..d5a376b 100644
31041 --- a/drivers/gpu/drm/radeon/rs690.c
31042 +++ b/drivers/gpu/drm/radeon/rs690.c
31043 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31044 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31045 rdev->pm.sideport_bandwidth.full)
31046 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31047 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31048 + read_delay_latency.full = dfixed_const(800 * 1000);
31049 read_delay_latency.full = dfixed_div(read_delay_latency,
31050 rdev->pm.igp_sideport_mclk);
31051 + a.full = dfixed_const(370);
31052 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31053 } else {
31054 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31055 rdev->pm.k8_bandwidth.full)
31056 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31057 index ebc6fac..a8313ed 100644
31058 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31059 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31060 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
31061 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31062 struct shrink_control *sc)
31063 {
31064 - static atomic_t start_pool = ATOMIC_INIT(0);
31065 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31066 unsigned i;
31067 - unsigned pool_offset = atomic_add_return(1, &start_pool);
31068 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31069 struct ttm_page_pool *pool;
31070 int shrink_pages = sc->nr_to_scan;
31071
31072 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31073 index 88edacc..1e5412b 100644
31074 --- a/drivers/gpu/drm/via/via_drv.h
31075 +++ b/drivers/gpu/drm/via/via_drv.h
31076 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31077 typedef uint32_t maskarray_t[5];
31078
31079 typedef struct drm_via_irq {
31080 - atomic_t irq_received;
31081 + atomic_unchecked_t irq_received;
31082 uint32_t pending_mask;
31083 uint32_t enable_mask;
31084 wait_queue_head_t irq_queue;
31085 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
31086 struct timeval last_vblank;
31087 int last_vblank_valid;
31088 unsigned usec_per_vblank;
31089 - atomic_t vbl_received;
31090 + atomic_unchecked_t vbl_received;
31091 drm_via_state_t hc_state;
31092 char pci_buf[VIA_PCI_BUF_SIZE];
31093 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31094 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31095 index d391f48..10c8ca3 100644
31096 --- a/drivers/gpu/drm/via/via_irq.c
31097 +++ b/drivers/gpu/drm/via/via_irq.c
31098 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31099 if (crtc != 0)
31100 return 0;
31101
31102 - return atomic_read(&dev_priv->vbl_received);
31103 + return atomic_read_unchecked(&dev_priv->vbl_received);
31104 }
31105
31106 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31107 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31108
31109 status = VIA_READ(VIA_REG_INTERRUPT);
31110 if (status & VIA_IRQ_VBLANK_PENDING) {
31111 - atomic_inc(&dev_priv->vbl_received);
31112 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31113 + atomic_inc_unchecked(&dev_priv->vbl_received);
31114 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31115 do_gettimeofday(&cur_vblank);
31116 if (dev_priv->last_vblank_valid) {
31117 dev_priv->usec_per_vblank =
31118 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31119 dev_priv->last_vblank = cur_vblank;
31120 dev_priv->last_vblank_valid = 1;
31121 }
31122 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31123 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31124 DRM_DEBUG("US per vblank is: %u\n",
31125 dev_priv->usec_per_vblank);
31126 }
31127 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31128
31129 for (i = 0; i < dev_priv->num_irqs; ++i) {
31130 if (status & cur_irq->pending_mask) {
31131 - atomic_inc(&cur_irq->irq_received);
31132 + atomic_inc_unchecked(&cur_irq->irq_received);
31133 DRM_WAKEUP(&cur_irq->irq_queue);
31134 handled = 1;
31135 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31136 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31137 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31138 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31139 masks[irq][4]));
31140 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31141 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31142 } else {
31143 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31144 (((cur_irq_sequence =
31145 - atomic_read(&cur_irq->irq_received)) -
31146 + atomic_read_unchecked(&cur_irq->irq_received)) -
31147 *sequence) <= (1 << 23)));
31148 }
31149 *sequence = cur_irq_sequence;
31150 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31151 }
31152
31153 for (i = 0; i < dev_priv->num_irqs; ++i) {
31154 - atomic_set(&cur_irq->irq_received, 0);
31155 + atomic_set_unchecked(&cur_irq->irq_received, 0);
31156 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31157 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31158 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31159 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31160 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31161 case VIA_IRQ_RELATIVE:
31162 irqwait->request.sequence +=
31163 - atomic_read(&cur_irq->irq_received);
31164 + atomic_read_unchecked(&cur_irq->irq_received);
31165 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31166 case VIA_IRQ_ABSOLUTE:
31167 break;
31168 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31169 index d0f2c07..9ebd9c3 100644
31170 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31171 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31172 @@ -263,7 +263,7 @@ struct vmw_private {
31173 * Fencing and IRQs.
31174 */
31175
31176 - atomic_t marker_seq;
31177 + atomic_unchecked_t marker_seq;
31178 wait_queue_head_t fence_queue;
31179 wait_queue_head_t fifo_queue;
31180 int fence_queue_waiters; /* Protected by hw_mutex */
31181 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31182 index a0c2f12..68ae6cb 100644
31183 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31184 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31185 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31186 (unsigned int) min,
31187 (unsigned int) fifo->capabilities);
31188
31189 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31190 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31191 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31192 vmw_marker_queue_init(&fifo->marker_queue);
31193 return vmw_fifo_send_fence(dev_priv, &dummy);
31194 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31195 if (reserveable)
31196 iowrite32(bytes, fifo_mem +
31197 SVGA_FIFO_RESERVED);
31198 - return fifo_mem + (next_cmd >> 2);
31199 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31200 } else {
31201 need_bounce = true;
31202 }
31203 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31204
31205 fm = vmw_fifo_reserve(dev_priv, bytes);
31206 if (unlikely(fm == NULL)) {
31207 - *seqno = atomic_read(&dev_priv->marker_seq);
31208 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31209 ret = -ENOMEM;
31210 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31211 false, 3*HZ);
31212 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31213 }
31214
31215 do {
31216 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31217 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31218 } while (*seqno == 0);
31219
31220 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31221 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31222 index cabc95f..14b3d77 100644
31223 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31224 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31225 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31226 * emitted. Then the fence is stale and signaled.
31227 */
31228
31229 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31230 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31231 > VMW_FENCE_WRAP);
31232
31233 return ret;
31234 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31235
31236 if (fifo_idle)
31237 down_read(&fifo_state->rwsem);
31238 - signal_seq = atomic_read(&dev_priv->marker_seq);
31239 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31240 ret = 0;
31241
31242 for (;;) {
31243 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31244 index 8a8725c..afed796 100644
31245 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31246 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31247 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31248 while (!vmw_lag_lt(queue, us)) {
31249 spin_lock(&queue->lock);
31250 if (list_empty(&queue->head))
31251 - seqno = atomic_read(&dev_priv->marker_seq);
31252 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31253 else {
31254 marker = list_first_entry(&queue->head,
31255 struct vmw_marker, head);
31256 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31257 index 054677b..741672a 100644
31258 --- a/drivers/hid/hid-core.c
31259 +++ b/drivers/hid/hid-core.c
31260 @@ -2070,7 +2070,7 @@ static bool hid_ignore(struct hid_device *hdev)
31261
31262 int hid_add_device(struct hid_device *hdev)
31263 {
31264 - static atomic_t id = ATOMIC_INIT(0);
31265 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31266 int ret;
31267
31268 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31269 @@ -2085,7 +2085,7 @@ int hid_add_device(struct hid_device *hdev)
31270 /* XXX hack, any other cleaner solution after the driver core
31271 * is converted to allow more than 20 bytes as the device name? */
31272 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31273 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31274 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31275
31276 hid_debug_register(hdev, dev_name(&hdev->dev));
31277 ret = device_add(&hdev->dev);
31278 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31279 index eec3291..8ed706b 100644
31280 --- a/drivers/hid/hid-wiimote-debug.c
31281 +++ b/drivers/hid/hid-wiimote-debug.c
31282 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31283 else if (size == 0)
31284 return -EIO;
31285
31286 - if (copy_to_user(u, buf, size))
31287 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
31288 return -EFAULT;
31289
31290 *off += size;
31291 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31292 index b1ec0e2..c295a61 100644
31293 --- a/drivers/hid/usbhid/hiddev.c
31294 +++ b/drivers/hid/usbhid/hiddev.c
31295 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31296 break;
31297
31298 case HIDIOCAPPLICATION:
31299 - if (arg < 0 || arg >= hid->maxapplication)
31300 + if (arg >= hid->maxapplication)
31301 break;
31302
31303 for (i = 0; i < hid->maxcollection; i++)
31304 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31305 index 4065374..10ed7dc 100644
31306 --- a/drivers/hv/channel.c
31307 +++ b/drivers/hv/channel.c
31308 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31309 int ret = 0;
31310 int t;
31311
31312 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31313 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31314 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31315 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31316
31317 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31318 if (ret)
31319 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31320 index 15956bd..ea34398 100644
31321 --- a/drivers/hv/hv.c
31322 +++ b/drivers/hv/hv.c
31323 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31324 u64 output_address = (output) ? virt_to_phys(output) : 0;
31325 u32 output_address_hi = output_address >> 32;
31326 u32 output_address_lo = output_address & 0xFFFFFFFF;
31327 - void *hypercall_page = hv_context.hypercall_page;
31328 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31329
31330 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31331 "=a"(hv_status_lo) : "d" (control_hi),
31332 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31333 index 699f0d8..f4f19250 100644
31334 --- a/drivers/hv/hyperv_vmbus.h
31335 +++ b/drivers/hv/hyperv_vmbus.h
31336 @@ -555,7 +555,7 @@ enum vmbus_connect_state {
31337 struct vmbus_connection {
31338 enum vmbus_connect_state conn_state;
31339
31340 - atomic_t next_gpadl_handle;
31341 + atomic_unchecked_t next_gpadl_handle;
31342
31343 /*
31344 * Represents channel interrupts. Each bit position represents a
31345 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31346 index a220e57..428f54d 100644
31347 --- a/drivers/hv/vmbus_drv.c
31348 +++ b/drivers/hv/vmbus_drv.c
31349 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31350 {
31351 int ret = 0;
31352
31353 - static atomic_t device_num = ATOMIC_INIT(0);
31354 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31355
31356 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31357 - atomic_inc_return(&device_num));
31358 + atomic_inc_return_unchecked(&device_num));
31359
31360 child_device_obj->device.bus = &hv_bus;
31361 child_device_obj->device.parent = &hv_acpi_dev->dev;
31362 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31363 index 9140236..ceaef4e 100644
31364 --- a/drivers/hwmon/acpi_power_meter.c
31365 +++ b/drivers/hwmon/acpi_power_meter.c
31366 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31367 return res;
31368
31369 temp /= 1000;
31370 - if (temp < 0)
31371 - return -EINVAL;
31372
31373 mutex_lock(&resource->lock);
31374 resource->trip[attr->index - 7] = temp;
31375 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31376 index 8b011d0..3de24a1 100644
31377 --- a/drivers/hwmon/sht15.c
31378 +++ b/drivers/hwmon/sht15.c
31379 @@ -166,7 +166,7 @@ struct sht15_data {
31380 int supply_uV;
31381 bool supply_uV_valid;
31382 struct work_struct update_supply_work;
31383 - atomic_t interrupt_handled;
31384 + atomic_unchecked_t interrupt_handled;
31385 };
31386
31387 /**
31388 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31389 return ret;
31390
31391 gpio_direction_input(data->pdata->gpio_data);
31392 - atomic_set(&data->interrupt_handled, 0);
31393 + atomic_set_unchecked(&data->interrupt_handled, 0);
31394
31395 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31396 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31397 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31398 /* Only relevant if the interrupt hasn't occurred. */
31399 - if (!atomic_read(&data->interrupt_handled))
31400 + if (!atomic_read_unchecked(&data->interrupt_handled))
31401 schedule_work(&data->read_work);
31402 }
31403 ret = wait_event_timeout(data->wait_queue,
31404 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31405
31406 /* First disable the interrupt */
31407 disable_irq_nosync(irq);
31408 - atomic_inc(&data->interrupt_handled);
31409 + atomic_inc_unchecked(&data->interrupt_handled);
31410 /* Then schedule a reading work struct */
31411 if (data->state != SHT15_READING_NOTHING)
31412 schedule_work(&data->read_work);
31413 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31414 * If not, then start the interrupt again - care here as could
31415 * have gone low in meantime so verify it hasn't!
31416 */
31417 - atomic_set(&data->interrupt_handled, 0);
31418 + atomic_set_unchecked(&data->interrupt_handled, 0);
31419 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31420 /* If still not occurred or another handler was scheduled */
31421 if (gpio_get_value(data->pdata->gpio_data)
31422 - || atomic_read(&data->interrupt_handled))
31423 + || atomic_read_unchecked(&data->interrupt_handled))
31424 return;
31425 }
31426
31427 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31428 index 378fcb5..5e91fa8 100644
31429 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31430 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31431 @@ -43,7 +43,7 @@
31432 extern struct i2c_adapter amd756_smbus;
31433
31434 static struct i2c_adapter *s4882_adapter;
31435 -static struct i2c_algorithm *s4882_algo;
31436 +static i2c_algorithm_no_const *s4882_algo;
31437
31438 /* Wrapper access functions for multiplexed SMBus */
31439 static DEFINE_MUTEX(amd756_lock);
31440 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31441 index 29015eb..af2d8e9 100644
31442 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31443 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31444 @@ -41,7 +41,7 @@
31445 extern struct i2c_adapter *nforce2_smbus;
31446
31447 static struct i2c_adapter *s4985_adapter;
31448 -static struct i2c_algorithm *s4985_algo;
31449 +static i2c_algorithm_no_const *s4985_algo;
31450
31451 /* Wrapper access functions for multiplexed SMBus */
31452 static DEFINE_MUTEX(nforce2_lock);
31453 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31454 index d7a4833..7fae376 100644
31455 --- a/drivers/i2c/i2c-mux.c
31456 +++ b/drivers/i2c/i2c-mux.c
31457 @@ -28,7 +28,7 @@
31458 /* multiplexer per channel data */
31459 struct i2c_mux_priv {
31460 struct i2c_adapter adap;
31461 - struct i2c_algorithm algo;
31462 + i2c_algorithm_no_const algo;
31463
31464 struct i2c_adapter *parent;
31465 void *mux_dev; /* the mux chip/device */
31466 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31467 index 57d00ca..0145194 100644
31468 --- a/drivers/ide/aec62xx.c
31469 +++ b/drivers/ide/aec62xx.c
31470 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31471 .cable_detect = atp86x_cable_detect,
31472 };
31473
31474 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31475 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31476 { /* 0: AEC6210 */
31477 .name = DRV_NAME,
31478 .init_chipset = init_chipset_aec62xx,
31479 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31480 index 2c8016a..911a27c 100644
31481 --- a/drivers/ide/alim15x3.c
31482 +++ b/drivers/ide/alim15x3.c
31483 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31484 .dma_sff_read_status = ide_dma_sff_read_status,
31485 };
31486
31487 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31488 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31489 .name = DRV_NAME,
31490 .init_chipset = init_chipset_ali15x3,
31491 .init_hwif = init_hwif_ali15x3,
31492 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31493 index 3747b25..56fc995 100644
31494 --- a/drivers/ide/amd74xx.c
31495 +++ b/drivers/ide/amd74xx.c
31496 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31497 .udma_mask = udma, \
31498 }
31499
31500 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31501 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31502 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31503 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31504 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31505 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31506 index 15f0ead..cb43480 100644
31507 --- a/drivers/ide/atiixp.c
31508 +++ b/drivers/ide/atiixp.c
31509 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31510 .cable_detect = atiixp_cable_detect,
31511 };
31512
31513 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31514 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31515 { /* 0: IXP200/300/400/700 */
31516 .name = DRV_NAME,
31517 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31518 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31519 index 5f80312..d1fc438 100644
31520 --- a/drivers/ide/cmd64x.c
31521 +++ b/drivers/ide/cmd64x.c
31522 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31523 .dma_sff_read_status = ide_dma_sff_read_status,
31524 };
31525
31526 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31527 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31528 { /* 0: CMD643 */
31529 .name = DRV_NAME,
31530 .init_chipset = init_chipset_cmd64x,
31531 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31532 index 2c1e5f7..1444762 100644
31533 --- a/drivers/ide/cs5520.c
31534 +++ b/drivers/ide/cs5520.c
31535 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31536 .set_dma_mode = cs5520_set_dma_mode,
31537 };
31538
31539 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31540 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31541 .name = DRV_NAME,
31542 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31543 .port_ops = &cs5520_port_ops,
31544 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31545 index 4dc4eb9..49b40ad 100644
31546 --- a/drivers/ide/cs5530.c
31547 +++ b/drivers/ide/cs5530.c
31548 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31549 .udma_filter = cs5530_udma_filter,
31550 };
31551
31552 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31553 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31554 .name = DRV_NAME,
31555 .init_chipset = init_chipset_cs5530,
31556 .init_hwif = init_hwif_cs5530,
31557 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31558 index 5059faf..18d4c85 100644
31559 --- a/drivers/ide/cs5535.c
31560 +++ b/drivers/ide/cs5535.c
31561 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31562 .cable_detect = cs5535_cable_detect,
31563 };
31564
31565 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31566 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31567 .name = DRV_NAME,
31568 .port_ops = &cs5535_port_ops,
31569 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31570 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31571 index 847553f..3ffb49d 100644
31572 --- a/drivers/ide/cy82c693.c
31573 +++ b/drivers/ide/cy82c693.c
31574 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31575 .set_dma_mode = cy82c693_set_dma_mode,
31576 };
31577
31578 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31579 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31580 .name = DRV_NAME,
31581 .init_iops = init_iops_cy82c693,
31582 .port_ops = &cy82c693_port_ops,
31583 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31584 index 58c51cd..4aec3b8 100644
31585 --- a/drivers/ide/hpt366.c
31586 +++ b/drivers/ide/hpt366.c
31587 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31588 }
31589 };
31590
31591 -static const struct hpt_info hpt36x __devinitdata = {
31592 +static const struct hpt_info hpt36x __devinitconst = {
31593 .chip_name = "HPT36x",
31594 .chip_type = HPT36x,
31595 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31596 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31597 .timings = &hpt36x_timings
31598 };
31599
31600 -static const struct hpt_info hpt370 __devinitdata = {
31601 +static const struct hpt_info hpt370 __devinitconst = {
31602 .chip_name = "HPT370",
31603 .chip_type = HPT370,
31604 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31605 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31606 .timings = &hpt37x_timings
31607 };
31608
31609 -static const struct hpt_info hpt370a __devinitdata = {
31610 +static const struct hpt_info hpt370a __devinitconst = {
31611 .chip_name = "HPT370A",
31612 .chip_type = HPT370A,
31613 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31614 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31615 .timings = &hpt37x_timings
31616 };
31617
31618 -static const struct hpt_info hpt374 __devinitdata = {
31619 +static const struct hpt_info hpt374 __devinitconst = {
31620 .chip_name = "HPT374",
31621 .chip_type = HPT374,
31622 .udma_mask = ATA_UDMA5,
31623 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31624 .timings = &hpt37x_timings
31625 };
31626
31627 -static const struct hpt_info hpt372 __devinitdata = {
31628 +static const struct hpt_info hpt372 __devinitconst = {
31629 .chip_name = "HPT372",
31630 .chip_type = HPT372,
31631 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31632 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31633 .timings = &hpt37x_timings
31634 };
31635
31636 -static const struct hpt_info hpt372a __devinitdata = {
31637 +static const struct hpt_info hpt372a __devinitconst = {
31638 .chip_name = "HPT372A",
31639 .chip_type = HPT372A,
31640 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31641 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31642 .timings = &hpt37x_timings
31643 };
31644
31645 -static const struct hpt_info hpt302 __devinitdata = {
31646 +static const struct hpt_info hpt302 __devinitconst = {
31647 .chip_name = "HPT302",
31648 .chip_type = HPT302,
31649 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31650 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31651 .timings = &hpt37x_timings
31652 };
31653
31654 -static const struct hpt_info hpt371 __devinitdata = {
31655 +static const struct hpt_info hpt371 __devinitconst = {
31656 .chip_name = "HPT371",
31657 .chip_type = HPT371,
31658 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31659 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31660 .timings = &hpt37x_timings
31661 };
31662
31663 -static const struct hpt_info hpt372n __devinitdata = {
31664 +static const struct hpt_info hpt372n __devinitconst = {
31665 .chip_name = "HPT372N",
31666 .chip_type = HPT372N,
31667 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31668 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31669 .timings = &hpt37x_timings
31670 };
31671
31672 -static const struct hpt_info hpt302n __devinitdata = {
31673 +static const struct hpt_info hpt302n __devinitconst = {
31674 .chip_name = "HPT302N",
31675 .chip_type = HPT302N,
31676 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31677 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31678 .timings = &hpt37x_timings
31679 };
31680
31681 -static const struct hpt_info hpt371n __devinitdata = {
31682 +static const struct hpt_info hpt371n __devinitconst = {
31683 .chip_name = "HPT371N",
31684 .chip_type = HPT371N,
31685 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31686 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31687 .dma_sff_read_status = ide_dma_sff_read_status,
31688 };
31689
31690 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31691 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31692 { /* 0: HPT36x */
31693 .name = DRV_NAME,
31694 .init_chipset = init_chipset_hpt366,
31695 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31696 index 8126824..55a2798 100644
31697 --- a/drivers/ide/ide-cd.c
31698 +++ b/drivers/ide/ide-cd.c
31699 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31700 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31701 if ((unsigned long)buf & alignment
31702 || blk_rq_bytes(rq) & q->dma_pad_mask
31703 - || object_is_on_stack(buf))
31704 + || object_starts_on_stack(buf))
31705 drive->dma = 0;
31706 }
31707 }
31708 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31709 index 7f56b73..dab5b67 100644
31710 --- a/drivers/ide/ide-pci-generic.c
31711 +++ b/drivers/ide/ide-pci-generic.c
31712 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31713 .udma_mask = ATA_UDMA6, \
31714 }
31715
31716 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31717 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31718 /* 0: Unknown */
31719 DECLARE_GENERIC_PCI_DEV(0),
31720
31721 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31722 index 560e66d..d5dd180 100644
31723 --- a/drivers/ide/it8172.c
31724 +++ b/drivers/ide/it8172.c
31725 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31726 .set_dma_mode = it8172_set_dma_mode,
31727 };
31728
31729 -static const struct ide_port_info it8172_port_info __devinitdata = {
31730 +static const struct ide_port_info it8172_port_info __devinitconst = {
31731 .name = DRV_NAME,
31732 .port_ops = &it8172_port_ops,
31733 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31734 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31735 index 46816ba..1847aeb 100644
31736 --- a/drivers/ide/it8213.c
31737 +++ b/drivers/ide/it8213.c
31738 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31739 .cable_detect = it8213_cable_detect,
31740 };
31741
31742 -static const struct ide_port_info it8213_chipset __devinitdata = {
31743 +static const struct ide_port_info it8213_chipset __devinitconst = {
31744 .name = DRV_NAME,
31745 .enablebits = { {0x41, 0x80, 0x80} },
31746 .port_ops = &it8213_port_ops,
31747 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31748 index 2e3169f..c5611db 100644
31749 --- a/drivers/ide/it821x.c
31750 +++ b/drivers/ide/it821x.c
31751 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31752 .cable_detect = it821x_cable_detect,
31753 };
31754
31755 -static const struct ide_port_info it821x_chipset __devinitdata = {
31756 +static const struct ide_port_info it821x_chipset __devinitconst = {
31757 .name = DRV_NAME,
31758 .init_chipset = init_chipset_it821x,
31759 .init_hwif = init_hwif_it821x,
31760 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31761 index 74c2c4a..efddd7d 100644
31762 --- a/drivers/ide/jmicron.c
31763 +++ b/drivers/ide/jmicron.c
31764 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31765 .cable_detect = jmicron_cable_detect,
31766 };
31767
31768 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31769 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31770 .name = DRV_NAME,
31771 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31772 .port_ops = &jmicron_port_ops,
31773 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31774 index 95327a2..73f78d8 100644
31775 --- a/drivers/ide/ns87415.c
31776 +++ b/drivers/ide/ns87415.c
31777 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31778 .dma_sff_read_status = superio_dma_sff_read_status,
31779 };
31780
31781 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31782 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31783 .name = DRV_NAME,
31784 .init_hwif = init_hwif_ns87415,
31785 .tp_ops = &ns87415_tp_ops,
31786 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31787 index 1a53a4c..39edc66 100644
31788 --- a/drivers/ide/opti621.c
31789 +++ b/drivers/ide/opti621.c
31790 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31791 .set_pio_mode = opti621_set_pio_mode,
31792 };
31793
31794 -static const struct ide_port_info opti621_chipset __devinitdata = {
31795 +static const struct ide_port_info opti621_chipset __devinitconst = {
31796 .name = DRV_NAME,
31797 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31798 .port_ops = &opti621_port_ops,
31799 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31800 index 9546fe2..2e5ceb6 100644
31801 --- a/drivers/ide/pdc202xx_new.c
31802 +++ b/drivers/ide/pdc202xx_new.c
31803 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31804 .udma_mask = udma, \
31805 }
31806
31807 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31808 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31809 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31810 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31811 };
31812 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31813 index 3a35ec6..5634510 100644
31814 --- a/drivers/ide/pdc202xx_old.c
31815 +++ b/drivers/ide/pdc202xx_old.c
31816 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31817 .max_sectors = sectors, \
31818 }
31819
31820 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31821 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31822 { /* 0: PDC20246 */
31823 .name = DRV_NAME,
31824 .init_chipset = init_chipset_pdc202xx,
31825 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31826 index 1892e81..fe0fd60 100644
31827 --- a/drivers/ide/piix.c
31828 +++ b/drivers/ide/piix.c
31829 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31830 .udma_mask = udma, \
31831 }
31832
31833 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31834 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31835 /* 0: MPIIX */
31836 { /*
31837 * MPIIX actually has only a single IDE channel mapped to
31838 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31839 index a6414a8..c04173e 100644
31840 --- a/drivers/ide/rz1000.c
31841 +++ b/drivers/ide/rz1000.c
31842 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31843 }
31844 }
31845
31846 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31847 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31848 .name = DRV_NAME,
31849 .host_flags = IDE_HFLAG_NO_DMA,
31850 };
31851 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31852 index 356b9b5..d4758eb 100644
31853 --- a/drivers/ide/sc1200.c
31854 +++ b/drivers/ide/sc1200.c
31855 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31856 .dma_sff_read_status = ide_dma_sff_read_status,
31857 };
31858
31859 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31860 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31861 .name = DRV_NAME,
31862 .port_ops = &sc1200_port_ops,
31863 .dma_ops = &sc1200_dma_ops,
31864 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31865 index b7f5b0c..9701038 100644
31866 --- a/drivers/ide/scc_pata.c
31867 +++ b/drivers/ide/scc_pata.c
31868 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31869 .dma_sff_read_status = scc_dma_sff_read_status,
31870 };
31871
31872 -static const struct ide_port_info scc_chipset __devinitdata = {
31873 +static const struct ide_port_info scc_chipset __devinitconst = {
31874 .name = "sccIDE",
31875 .init_iops = init_iops_scc,
31876 .init_dma = scc_init_dma,
31877 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31878 index 35fb8da..24d72ef 100644
31879 --- a/drivers/ide/serverworks.c
31880 +++ b/drivers/ide/serverworks.c
31881 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31882 .cable_detect = svwks_cable_detect,
31883 };
31884
31885 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31886 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31887 { /* 0: OSB4 */
31888 .name = DRV_NAME,
31889 .init_chipset = init_chipset_svwks,
31890 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31891 index ddeda44..46f7e30 100644
31892 --- a/drivers/ide/siimage.c
31893 +++ b/drivers/ide/siimage.c
31894 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31895 .udma_mask = ATA_UDMA6, \
31896 }
31897
31898 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31899 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31900 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31901 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31902 };
31903 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31904 index 4a00225..09e61b4 100644
31905 --- a/drivers/ide/sis5513.c
31906 +++ b/drivers/ide/sis5513.c
31907 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31908 .cable_detect = sis_cable_detect,
31909 };
31910
31911 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31912 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31913 .name = DRV_NAME,
31914 .init_chipset = init_chipset_sis5513,
31915 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31916 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31917 index f21dc2a..d051cd2 100644
31918 --- a/drivers/ide/sl82c105.c
31919 +++ b/drivers/ide/sl82c105.c
31920 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31921 .dma_sff_read_status = ide_dma_sff_read_status,
31922 };
31923
31924 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31925 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31926 .name = DRV_NAME,
31927 .init_chipset = init_chipset_sl82c105,
31928 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31929 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31930 index 864ffe0..863a5e9 100644
31931 --- a/drivers/ide/slc90e66.c
31932 +++ b/drivers/ide/slc90e66.c
31933 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31934 .cable_detect = slc90e66_cable_detect,
31935 };
31936
31937 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31938 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31939 .name = DRV_NAME,
31940 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31941 .port_ops = &slc90e66_port_ops,
31942 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31943 index 4799d5c..1794678 100644
31944 --- a/drivers/ide/tc86c001.c
31945 +++ b/drivers/ide/tc86c001.c
31946 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31947 .dma_sff_read_status = ide_dma_sff_read_status,
31948 };
31949
31950 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31951 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31952 .name = DRV_NAME,
31953 .init_hwif = init_hwif_tc86c001,
31954 .port_ops = &tc86c001_port_ops,
31955 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31956 index 281c914..55ce1b8 100644
31957 --- a/drivers/ide/triflex.c
31958 +++ b/drivers/ide/triflex.c
31959 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31960 .set_dma_mode = triflex_set_mode,
31961 };
31962
31963 -static const struct ide_port_info triflex_device __devinitdata = {
31964 +static const struct ide_port_info triflex_device __devinitconst = {
31965 .name = DRV_NAME,
31966 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31967 .port_ops = &triflex_port_ops,
31968 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31969 index 4b42ca0..e494a98 100644
31970 --- a/drivers/ide/trm290.c
31971 +++ b/drivers/ide/trm290.c
31972 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31973 .dma_check = trm290_dma_check,
31974 };
31975
31976 -static const struct ide_port_info trm290_chipset __devinitdata = {
31977 +static const struct ide_port_info trm290_chipset __devinitconst = {
31978 .name = DRV_NAME,
31979 .init_hwif = init_hwif_trm290,
31980 .tp_ops = &trm290_tp_ops,
31981 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31982 index f46f49c..eb77678 100644
31983 --- a/drivers/ide/via82cxxx.c
31984 +++ b/drivers/ide/via82cxxx.c
31985 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31986 .cable_detect = via82cxxx_cable_detect,
31987 };
31988
31989 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31990 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31991 .name = DRV_NAME,
31992 .init_chipset = init_chipset_via82cxxx,
31993 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31994 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31995 index 73d4531..c90cd2d 100644
31996 --- a/drivers/ieee802154/fakehard.c
31997 +++ b/drivers/ieee802154/fakehard.c
31998 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31999 phy->transmit_power = 0xbf;
32000
32001 dev->netdev_ops = &fake_ops;
32002 - dev->ml_priv = &fake_mlme;
32003 + dev->ml_priv = (void *)&fake_mlme;
32004
32005 priv = netdev_priv(dev);
32006 priv->phy = phy;
32007 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32008 index c889aae..6cf5aa7 100644
32009 --- a/drivers/infiniband/core/cm.c
32010 +++ b/drivers/infiniband/core/cm.c
32011 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32012
32013 struct cm_counter_group {
32014 struct kobject obj;
32015 - atomic_long_t counter[CM_ATTR_COUNT];
32016 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32017 };
32018
32019 struct cm_counter_attribute {
32020 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32021 struct ib_mad_send_buf *msg = NULL;
32022 int ret;
32023
32024 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32025 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32026 counter[CM_REQ_COUNTER]);
32027
32028 /* Quick state check to discard duplicate REQs. */
32029 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32030 if (!cm_id_priv)
32031 return;
32032
32033 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32034 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32035 counter[CM_REP_COUNTER]);
32036 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32037 if (ret)
32038 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32039 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32040 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32041 spin_unlock_irq(&cm_id_priv->lock);
32042 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32043 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32044 counter[CM_RTU_COUNTER]);
32045 goto out;
32046 }
32047 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32048 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32049 dreq_msg->local_comm_id);
32050 if (!cm_id_priv) {
32051 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32052 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32053 counter[CM_DREQ_COUNTER]);
32054 cm_issue_drep(work->port, work->mad_recv_wc);
32055 return -EINVAL;
32056 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32057 case IB_CM_MRA_REP_RCVD:
32058 break;
32059 case IB_CM_TIMEWAIT:
32060 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32061 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32062 counter[CM_DREQ_COUNTER]);
32063 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32064 goto unlock;
32065 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32066 cm_free_msg(msg);
32067 goto deref;
32068 case IB_CM_DREQ_RCVD:
32069 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32070 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32071 counter[CM_DREQ_COUNTER]);
32072 goto unlock;
32073 default:
32074 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32075 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32076 cm_id_priv->msg, timeout)) {
32077 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32078 - atomic_long_inc(&work->port->
32079 + atomic_long_inc_unchecked(&work->port->
32080 counter_group[CM_RECV_DUPLICATES].
32081 counter[CM_MRA_COUNTER]);
32082 goto out;
32083 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32084 break;
32085 case IB_CM_MRA_REQ_RCVD:
32086 case IB_CM_MRA_REP_RCVD:
32087 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32088 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32089 counter[CM_MRA_COUNTER]);
32090 /* fall through */
32091 default:
32092 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32093 case IB_CM_LAP_IDLE:
32094 break;
32095 case IB_CM_MRA_LAP_SENT:
32096 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32097 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32098 counter[CM_LAP_COUNTER]);
32099 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32100 goto unlock;
32101 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32102 cm_free_msg(msg);
32103 goto deref;
32104 case IB_CM_LAP_RCVD:
32105 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32106 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32107 counter[CM_LAP_COUNTER]);
32108 goto unlock;
32109 default:
32110 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32111 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32112 if (cur_cm_id_priv) {
32113 spin_unlock_irq(&cm.lock);
32114 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32115 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32116 counter[CM_SIDR_REQ_COUNTER]);
32117 goto out; /* Duplicate message. */
32118 }
32119 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32120 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32121 msg->retries = 1;
32122
32123 - atomic_long_add(1 + msg->retries,
32124 + atomic_long_add_unchecked(1 + msg->retries,
32125 &port->counter_group[CM_XMIT].counter[attr_index]);
32126 if (msg->retries)
32127 - atomic_long_add(msg->retries,
32128 + atomic_long_add_unchecked(msg->retries,
32129 &port->counter_group[CM_XMIT_RETRIES].
32130 counter[attr_index]);
32131
32132 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32133 }
32134
32135 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32136 - atomic_long_inc(&port->counter_group[CM_RECV].
32137 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32138 counter[attr_id - CM_ATTR_ID_OFFSET]);
32139
32140 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32141 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32142 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32143
32144 return sprintf(buf, "%ld\n",
32145 - atomic_long_read(&group->counter[cm_attr->index]));
32146 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32147 }
32148
32149 static const struct sysfs_ops cm_counter_ops = {
32150 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32151 index 176c8f9..2627b62 100644
32152 --- a/drivers/infiniband/core/fmr_pool.c
32153 +++ b/drivers/infiniband/core/fmr_pool.c
32154 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
32155
32156 struct task_struct *thread;
32157
32158 - atomic_t req_ser;
32159 - atomic_t flush_ser;
32160 + atomic_unchecked_t req_ser;
32161 + atomic_unchecked_t flush_ser;
32162
32163 wait_queue_head_t force_wait;
32164 };
32165 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32166 struct ib_fmr_pool *pool = pool_ptr;
32167
32168 do {
32169 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32170 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32171 ib_fmr_batch_release(pool);
32172
32173 - atomic_inc(&pool->flush_ser);
32174 + atomic_inc_unchecked(&pool->flush_ser);
32175 wake_up_interruptible(&pool->force_wait);
32176
32177 if (pool->flush_function)
32178 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32179 }
32180
32181 set_current_state(TASK_INTERRUPTIBLE);
32182 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32183 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32184 !kthread_should_stop())
32185 schedule();
32186 __set_current_state(TASK_RUNNING);
32187 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32188 pool->dirty_watermark = params->dirty_watermark;
32189 pool->dirty_len = 0;
32190 spin_lock_init(&pool->pool_lock);
32191 - atomic_set(&pool->req_ser, 0);
32192 - atomic_set(&pool->flush_ser, 0);
32193 + atomic_set_unchecked(&pool->req_ser, 0);
32194 + atomic_set_unchecked(&pool->flush_ser, 0);
32195 init_waitqueue_head(&pool->force_wait);
32196
32197 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32198 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32199 }
32200 spin_unlock_irq(&pool->pool_lock);
32201
32202 - serial = atomic_inc_return(&pool->req_ser);
32203 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32204 wake_up_process(pool->thread);
32205
32206 if (wait_event_interruptible(pool->force_wait,
32207 - atomic_read(&pool->flush_ser) - serial >= 0))
32208 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32209 return -EINTR;
32210
32211 return 0;
32212 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32213 } else {
32214 list_add_tail(&fmr->list, &pool->dirty_list);
32215 if (++pool->dirty_len >= pool->dirty_watermark) {
32216 - atomic_inc(&pool->req_ser);
32217 + atomic_inc_unchecked(&pool->req_ser);
32218 wake_up_process(pool->thread);
32219 }
32220 }
32221 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32222 index 40c8353..946b0e4 100644
32223 --- a/drivers/infiniband/hw/cxgb4/mem.c
32224 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32225 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32226 int err;
32227 struct fw_ri_tpte tpt;
32228 u32 stag_idx;
32229 - static atomic_t key;
32230 + static atomic_unchecked_t key;
32231
32232 if (c4iw_fatal_error(rdev))
32233 return -EIO;
32234 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32235 &rdev->resource.tpt_fifo_lock);
32236 if (!stag_idx)
32237 return -ENOMEM;
32238 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32239 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32240 }
32241 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32242 __func__, stag_state, type, pdid, stag_idx);
32243 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32244 index 79b3dbc..96e5fcc 100644
32245 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32246 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32247 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32248 struct ib_atomic_eth *ateth;
32249 struct ipath_ack_entry *e;
32250 u64 vaddr;
32251 - atomic64_t *maddr;
32252 + atomic64_unchecked_t *maddr;
32253 u64 sdata;
32254 u32 rkey;
32255 u8 next;
32256 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32257 IB_ACCESS_REMOTE_ATOMIC)))
32258 goto nack_acc_unlck;
32259 /* Perform atomic OP and save result. */
32260 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32261 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32262 sdata = be64_to_cpu(ateth->swap_data);
32263 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32264 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32265 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32266 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32267 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32268 be64_to_cpu(ateth->compare_data),
32269 sdata);
32270 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32271 index 1f95bba..9530f87 100644
32272 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32273 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32274 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32275 unsigned long flags;
32276 struct ib_wc wc;
32277 u64 sdata;
32278 - atomic64_t *maddr;
32279 + atomic64_unchecked_t *maddr;
32280 enum ib_wc_status send_status;
32281
32282 /*
32283 @@ -382,11 +382,11 @@ again:
32284 IB_ACCESS_REMOTE_ATOMIC)))
32285 goto acc_err;
32286 /* Perform atomic OP and save result. */
32287 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32288 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32289 sdata = wqe->wr.wr.atomic.compare_add;
32290 *(u64 *) sqp->s_sge.sge.vaddr =
32291 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32292 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32293 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32294 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32295 sdata, wqe->wr.wr.atomic.swap);
32296 goto send_comp;
32297 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32298 index 7140199..da60063 100644
32299 --- a/drivers/infiniband/hw/nes/nes.c
32300 +++ b/drivers/infiniband/hw/nes/nes.c
32301 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32302 LIST_HEAD(nes_adapter_list);
32303 static LIST_HEAD(nes_dev_list);
32304
32305 -atomic_t qps_destroyed;
32306 +atomic_unchecked_t qps_destroyed;
32307
32308 static unsigned int ee_flsh_adapter;
32309 static unsigned int sysfs_nonidx_addr;
32310 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32311 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32312 struct nes_adapter *nesadapter = nesdev->nesadapter;
32313
32314 - atomic_inc(&qps_destroyed);
32315 + atomic_inc_unchecked(&qps_destroyed);
32316
32317 /* Free the control structures */
32318
32319 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32320 index c438e46..ca30356 100644
32321 --- a/drivers/infiniband/hw/nes/nes.h
32322 +++ b/drivers/infiniband/hw/nes/nes.h
32323 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32324 extern unsigned int wqm_quanta;
32325 extern struct list_head nes_adapter_list;
32326
32327 -extern atomic_t cm_connects;
32328 -extern atomic_t cm_accepts;
32329 -extern atomic_t cm_disconnects;
32330 -extern atomic_t cm_closes;
32331 -extern atomic_t cm_connecteds;
32332 -extern atomic_t cm_connect_reqs;
32333 -extern atomic_t cm_rejects;
32334 -extern atomic_t mod_qp_timouts;
32335 -extern atomic_t qps_created;
32336 -extern atomic_t qps_destroyed;
32337 -extern atomic_t sw_qps_destroyed;
32338 +extern atomic_unchecked_t cm_connects;
32339 +extern atomic_unchecked_t cm_accepts;
32340 +extern atomic_unchecked_t cm_disconnects;
32341 +extern atomic_unchecked_t cm_closes;
32342 +extern atomic_unchecked_t cm_connecteds;
32343 +extern atomic_unchecked_t cm_connect_reqs;
32344 +extern atomic_unchecked_t cm_rejects;
32345 +extern atomic_unchecked_t mod_qp_timouts;
32346 +extern atomic_unchecked_t qps_created;
32347 +extern atomic_unchecked_t qps_destroyed;
32348 +extern atomic_unchecked_t sw_qps_destroyed;
32349 extern u32 mh_detected;
32350 extern u32 mh_pauses_sent;
32351 extern u32 cm_packets_sent;
32352 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32353 extern u32 cm_packets_received;
32354 extern u32 cm_packets_dropped;
32355 extern u32 cm_packets_retrans;
32356 -extern atomic_t cm_listens_created;
32357 -extern atomic_t cm_listens_destroyed;
32358 +extern atomic_unchecked_t cm_listens_created;
32359 +extern atomic_unchecked_t cm_listens_destroyed;
32360 extern u32 cm_backlog_drops;
32361 -extern atomic_t cm_loopbacks;
32362 -extern atomic_t cm_nodes_created;
32363 -extern atomic_t cm_nodes_destroyed;
32364 -extern atomic_t cm_accel_dropped_pkts;
32365 -extern atomic_t cm_resets_recvd;
32366 -extern atomic_t pau_qps_created;
32367 -extern atomic_t pau_qps_destroyed;
32368 +extern atomic_unchecked_t cm_loopbacks;
32369 +extern atomic_unchecked_t cm_nodes_created;
32370 +extern atomic_unchecked_t cm_nodes_destroyed;
32371 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32372 +extern atomic_unchecked_t cm_resets_recvd;
32373 +extern atomic_unchecked_t pau_qps_created;
32374 +extern atomic_unchecked_t pau_qps_destroyed;
32375
32376 extern u32 int_mod_timer_init;
32377 extern u32 int_mod_cq_depth_256;
32378 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32379 index 71edfbb..15b62ae 100644
32380 --- a/drivers/infiniband/hw/nes/nes_cm.c
32381 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32382 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32383 u32 cm_packets_retrans;
32384 u32 cm_packets_created;
32385 u32 cm_packets_received;
32386 -atomic_t cm_listens_created;
32387 -atomic_t cm_listens_destroyed;
32388 +atomic_unchecked_t cm_listens_created;
32389 +atomic_unchecked_t cm_listens_destroyed;
32390 u32 cm_backlog_drops;
32391 -atomic_t cm_loopbacks;
32392 -atomic_t cm_nodes_created;
32393 -atomic_t cm_nodes_destroyed;
32394 -atomic_t cm_accel_dropped_pkts;
32395 -atomic_t cm_resets_recvd;
32396 +atomic_unchecked_t cm_loopbacks;
32397 +atomic_unchecked_t cm_nodes_created;
32398 +atomic_unchecked_t cm_nodes_destroyed;
32399 +atomic_unchecked_t cm_accel_dropped_pkts;
32400 +atomic_unchecked_t cm_resets_recvd;
32401
32402 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32403 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32404 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32405
32406 static struct nes_cm_core *g_cm_core;
32407
32408 -atomic_t cm_connects;
32409 -atomic_t cm_accepts;
32410 -atomic_t cm_disconnects;
32411 -atomic_t cm_closes;
32412 -atomic_t cm_connecteds;
32413 -atomic_t cm_connect_reqs;
32414 -atomic_t cm_rejects;
32415 +atomic_unchecked_t cm_connects;
32416 +atomic_unchecked_t cm_accepts;
32417 +atomic_unchecked_t cm_disconnects;
32418 +atomic_unchecked_t cm_closes;
32419 +atomic_unchecked_t cm_connecteds;
32420 +atomic_unchecked_t cm_connect_reqs;
32421 +atomic_unchecked_t cm_rejects;
32422
32423 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32424 {
32425 @@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32426 kfree(listener);
32427 listener = NULL;
32428 ret = 0;
32429 - atomic_inc(&cm_listens_destroyed);
32430 + atomic_inc_unchecked(&cm_listens_destroyed);
32431 } else {
32432 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32433 }
32434 @@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32435 cm_node->rem_mac);
32436
32437 add_hte_node(cm_core, cm_node);
32438 - atomic_inc(&cm_nodes_created);
32439 + atomic_inc_unchecked(&cm_nodes_created);
32440
32441 return cm_node;
32442 }
32443 @@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32444 }
32445
32446 atomic_dec(&cm_core->node_cnt);
32447 - atomic_inc(&cm_nodes_destroyed);
32448 + atomic_inc_unchecked(&cm_nodes_destroyed);
32449 nesqp = cm_node->nesqp;
32450 if (nesqp) {
32451 nesqp->cm_node = NULL;
32452 @@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32453
32454 static void drop_packet(struct sk_buff *skb)
32455 {
32456 - atomic_inc(&cm_accel_dropped_pkts);
32457 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32458 dev_kfree_skb_any(skb);
32459 }
32460
32461 @@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32462 {
32463
32464 int reset = 0; /* whether to send reset in case of err.. */
32465 - atomic_inc(&cm_resets_recvd);
32466 + atomic_inc_unchecked(&cm_resets_recvd);
32467 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32468 " refcnt=%d\n", cm_node, cm_node->state,
32469 atomic_read(&cm_node->ref_count));
32470 @@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32471 rem_ref_cm_node(cm_node->cm_core, cm_node);
32472 return NULL;
32473 }
32474 - atomic_inc(&cm_loopbacks);
32475 + atomic_inc_unchecked(&cm_loopbacks);
32476 loopbackremotenode->loopbackpartner = cm_node;
32477 loopbackremotenode->tcp_cntxt.rcv_wscale =
32478 NES_CM_DEFAULT_RCV_WND_SCALE;
32479 @@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32480 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32481 else {
32482 rem_ref_cm_node(cm_core, cm_node);
32483 - atomic_inc(&cm_accel_dropped_pkts);
32484 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32485 dev_kfree_skb_any(skb);
32486 }
32487 break;
32488 @@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32489
32490 if ((cm_id) && (cm_id->event_handler)) {
32491 if (issue_disconn) {
32492 - atomic_inc(&cm_disconnects);
32493 + atomic_inc_unchecked(&cm_disconnects);
32494 cm_event.event = IW_CM_EVENT_DISCONNECT;
32495 cm_event.status = disconn_status;
32496 cm_event.local_addr = cm_id->local_addr;
32497 @@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32498 }
32499
32500 if (issue_close) {
32501 - atomic_inc(&cm_closes);
32502 + atomic_inc_unchecked(&cm_closes);
32503 nes_disconnect(nesqp, 1);
32504
32505 cm_id->provider_data = nesqp;
32506 @@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32507
32508 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32509 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32510 - atomic_inc(&cm_accepts);
32511 + atomic_inc_unchecked(&cm_accepts);
32512
32513 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32514 netdev_refcnt_read(nesvnic->netdev));
32515 @@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32516 struct nes_cm_core *cm_core;
32517 u8 *start_buff;
32518
32519 - atomic_inc(&cm_rejects);
32520 + atomic_inc_unchecked(&cm_rejects);
32521 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32522 loopback = cm_node->loopbackpartner;
32523 cm_core = cm_node->cm_core;
32524 @@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32525 ntohl(cm_id->local_addr.sin_addr.s_addr),
32526 ntohs(cm_id->local_addr.sin_port));
32527
32528 - atomic_inc(&cm_connects);
32529 + atomic_inc_unchecked(&cm_connects);
32530 nesqp->active_conn = 1;
32531
32532 /* cache the cm_id in the qp */
32533 @@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32534 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32535 return err;
32536 }
32537 - atomic_inc(&cm_listens_created);
32538 + atomic_inc_unchecked(&cm_listens_created);
32539 }
32540
32541 cm_id->add_ref(cm_id);
32542 @@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32543
32544 if (nesqp->destroyed)
32545 return;
32546 - atomic_inc(&cm_connecteds);
32547 + atomic_inc_unchecked(&cm_connecteds);
32548 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32549 " local port 0x%04X. jiffies = %lu.\n",
32550 nesqp->hwqp.qp_id,
32551 @@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32552
32553 cm_id->add_ref(cm_id);
32554 ret = cm_id->event_handler(cm_id, &cm_event);
32555 - atomic_inc(&cm_closes);
32556 + atomic_inc_unchecked(&cm_closes);
32557 cm_event.event = IW_CM_EVENT_CLOSE;
32558 cm_event.status = 0;
32559 cm_event.provider_data = cm_id->provider_data;
32560 @@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32561 return;
32562 cm_id = cm_node->cm_id;
32563
32564 - atomic_inc(&cm_connect_reqs);
32565 + atomic_inc_unchecked(&cm_connect_reqs);
32566 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32567 cm_node, cm_id, jiffies);
32568
32569 @@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32570 return;
32571 cm_id = cm_node->cm_id;
32572
32573 - atomic_inc(&cm_connect_reqs);
32574 + atomic_inc_unchecked(&cm_connect_reqs);
32575 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32576 cm_node, cm_id, jiffies);
32577
32578 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32579 index 3ba7be3..c81f6ff 100644
32580 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32581 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32582 @@ -40,8 +40,8 @@
32583 #include "nes.h"
32584 #include "nes_mgt.h"
32585
32586 -atomic_t pau_qps_created;
32587 -atomic_t pau_qps_destroyed;
32588 +atomic_unchecked_t pau_qps_created;
32589 +atomic_unchecked_t pau_qps_destroyed;
32590
32591 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32592 {
32593 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32594 {
32595 struct sk_buff *skb;
32596 unsigned long flags;
32597 - atomic_inc(&pau_qps_destroyed);
32598 + atomic_inc_unchecked(&pau_qps_destroyed);
32599
32600 /* Free packets that have not yet been forwarded */
32601 /* Lock is acquired by skb_dequeue when removing the skb */
32602 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32603 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32604 skb_queue_head_init(&nesqp->pau_list);
32605 spin_lock_init(&nesqp->pau_lock);
32606 - atomic_inc(&pau_qps_created);
32607 + atomic_inc_unchecked(&pau_qps_created);
32608 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32609 }
32610
32611 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32612 index f3a3ecf..57d311d 100644
32613 --- a/drivers/infiniband/hw/nes/nes_nic.c
32614 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32615 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32616 target_stat_values[++index] = mh_detected;
32617 target_stat_values[++index] = mh_pauses_sent;
32618 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32619 - target_stat_values[++index] = atomic_read(&cm_connects);
32620 - target_stat_values[++index] = atomic_read(&cm_accepts);
32621 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32622 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32623 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32624 - target_stat_values[++index] = atomic_read(&cm_rejects);
32625 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32626 - target_stat_values[++index] = atomic_read(&qps_created);
32627 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32628 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32629 - target_stat_values[++index] = atomic_read(&cm_closes);
32630 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32631 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32632 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32633 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32634 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32635 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32636 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32637 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32638 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32639 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32640 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32641 target_stat_values[++index] = cm_packets_sent;
32642 target_stat_values[++index] = cm_packets_bounced;
32643 target_stat_values[++index] = cm_packets_created;
32644 target_stat_values[++index] = cm_packets_received;
32645 target_stat_values[++index] = cm_packets_dropped;
32646 target_stat_values[++index] = cm_packets_retrans;
32647 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32648 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32649 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32650 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32651 target_stat_values[++index] = cm_backlog_drops;
32652 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32653 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32654 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32655 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32656 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32657 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32658 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32659 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32660 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32661 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32662 target_stat_values[++index] = nesadapter->free_4kpbl;
32663 target_stat_values[++index] = nesadapter->free_256pbl;
32664 target_stat_values[++index] = int_mod_timer_init;
32665 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32666 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32667 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32668 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32669 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32670 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32671 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32672 }
32673
32674 /**
32675 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32676 index 8b8812d..a5e1133 100644
32677 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32678 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32679 @@ -46,9 +46,9 @@
32680
32681 #include <rdma/ib_umem.h>
32682
32683 -atomic_t mod_qp_timouts;
32684 -atomic_t qps_created;
32685 -atomic_t sw_qps_destroyed;
32686 +atomic_unchecked_t mod_qp_timouts;
32687 +atomic_unchecked_t qps_created;
32688 +atomic_unchecked_t sw_qps_destroyed;
32689
32690 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32691
32692 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32693 if (init_attr->create_flags)
32694 return ERR_PTR(-EINVAL);
32695
32696 - atomic_inc(&qps_created);
32697 + atomic_inc_unchecked(&qps_created);
32698 switch (init_attr->qp_type) {
32699 case IB_QPT_RC:
32700 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32701 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32702 struct iw_cm_event cm_event;
32703 int ret = 0;
32704
32705 - atomic_inc(&sw_qps_destroyed);
32706 + atomic_inc_unchecked(&sw_qps_destroyed);
32707 nesqp->destroyed = 1;
32708
32709 /* Blow away the connection if it exists. */
32710 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32711 index 6b811e3..f8acf88 100644
32712 --- a/drivers/infiniband/hw/qib/qib.h
32713 +++ b/drivers/infiniband/hw/qib/qib.h
32714 @@ -51,6 +51,7 @@
32715 #include <linux/completion.h>
32716 #include <linux/kref.h>
32717 #include <linux/sched.h>
32718 +#include <linux/slab.h>
32719
32720 #include "qib_common.h"
32721 #include "qib_verbs.h"
32722 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32723 index da739d9..da1c7f4 100644
32724 --- a/drivers/input/gameport/gameport.c
32725 +++ b/drivers/input/gameport/gameport.c
32726 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32727 */
32728 static void gameport_init_port(struct gameport *gameport)
32729 {
32730 - static atomic_t gameport_no = ATOMIC_INIT(0);
32731 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32732
32733 __module_get(THIS_MODULE);
32734
32735 mutex_init(&gameport->drv_mutex);
32736 device_initialize(&gameport->dev);
32737 dev_set_name(&gameport->dev, "gameport%lu",
32738 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32739 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32740 gameport->dev.bus = &gameport_bus;
32741 gameport->dev.release = gameport_release_port;
32742 if (gameport->parent)
32743 diff --git a/drivers/input/input.c b/drivers/input/input.c
32744 index 8921c61..f5cd63d 100644
32745 --- a/drivers/input/input.c
32746 +++ b/drivers/input/input.c
32747 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32748 */
32749 int input_register_device(struct input_dev *dev)
32750 {
32751 - static atomic_t input_no = ATOMIC_INIT(0);
32752 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32753 struct input_handler *handler;
32754 const char *path;
32755 int error;
32756 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32757 dev->setkeycode = input_default_setkeycode;
32758
32759 dev_set_name(&dev->dev, "input%ld",
32760 - (unsigned long) atomic_inc_return(&input_no) - 1);
32761 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32762
32763 error = device_add(&dev->dev);
32764 if (error)
32765 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32766 index b8d8611..7a4a04b 100644
32767 --- a/drivers/input/joystick/sidewinder.c
32768 +++ b/drivers/input/joystick/sidewinder.c
32769 @@ -30,6 +30,7 @@
32770 #include <linux/kernel.h>
32771 #include <linux/module.h>
32772 #include <linux/slab.h>
32773 +#include <linux/sched.h>
32774 #include <linux/init.h>
32775 #include <linux/input.h>
32776 #include <linux/gameport.h>
32777 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32778 index 42f7b25..09fcf46 100644
32779 --- a/drivers/input/joystick/xpad.c
32780 +++ b/drivers/input/joystick/xpad.c
32781 @@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32782
32783 static int xpad_led_probe(struct usb_xpad *xpad)
32784 {
32785 - static atomic_t led_seq = ATOMIC_INIT(0);
32786 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32787 long led_no;
32788 struct xpad_led *led;
32789 struct led_classdev *led_cdev;
32790 @@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32791 if (!led)
32792 return -ENOMEM;
32793
32794 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32795 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32796
32797 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32798 led->xpad = xpad;
32799 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32800 index 0110b5a..d3ad144 100644
32801 --- a/drivers/input/mousedev.c
32802 +++ b/drivers/input/mousedev.c
32803 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32804
32805 spin_unlock_irq(&client->packet_lock);
32806
32807 - if (copy_to_user(buffer, data, count))
32808 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32809 return -EFAULT;
32810
32811 return count;
32812 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32813 index d0f7533..fb8215b 100644
32814 --- a/drivers/input/serio/serio.c
32815 +++ b/drivers/input/serio/serio.c
32816 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32817 */
32818 static void serio_init_port(struct serio *serio)
32819 {
32820 - static atomic_t serio_no = ATOMIC_INIT(0);
32821 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32822
32823 __module_get(THIS_MODULE);
32824
32825 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32826 mutex_init(&serio->drv_mutex);
32827 device_initialize(&serio->dev);
32828 dev_set_name(&serio->dev, "serio%ld",
32829 - (long)atomic_inc_return(&serio_no) - 1);
32830 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32831 serio->dev.bus = &serio_bus;
32832 serio->dev.release = serio_release_port;
32833 serio->dev.groups = serio_device_attr_groups;
32834 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32835 index b902794..fc7b85b 100644
32836 --- a/drivers/isdn/capi/capi.c
32837 +++ b/drivers/isdn/capi/capi.c
32838 @@ -83,8 +83,8 @@ struct capiminor {
32839
32840 struct capi20_appl *ap;
32841 u32 ncci;
32842 - atomic_t datahandle;
32843 - atomic_t msgid;
32844 + atomic_unchecked_t datahandle;
32845 + atomic_unchecked_t msgid;
32846
32847 struct tty_port port;
32848 int ttyinstop;
32849 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32850 capimsg_setu16(s, 2, mp->ap->applid);
32851 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32852 capimsg_setu8 (s, 5, CAPI_RESP);
32853 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32854 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32855 capimsg_setu32(s, 8, mp->ncci);
32856 capimsg_setu16(s, 12, datahandle);
32857 }
32858 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32859 mp->outbytes -= len;
32860 spin_unlock_bh(&mp->outlock);
32861
32862 - datahandle = atomic_inc_return(&mp->datahandle);
32863 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32864 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32865 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32866 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32867 capimsg_setu16(skb->data, 2, mp->ap->applid);
32868 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32869 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32870 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32871 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32872 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32873 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32874 capimsg_setu16(skb->data, 16, len); /* Data length */
32875 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32876 index 821f7ac..28d4030 100644
32877 --- a/drivers/isdn/hardware/avm/b1.c
32878 +++ b/drivers/isdn/hardware/avm/b1.c
32879 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32880 }
32881 if (left) {
32882 if (t4file->user) {
32883 - if (copy_from_user(buf, dp, left))
32884 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32885 return -EFAULT;
32886 } else {
32887 memcpy(buf, dp, left);
32888 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
32889 }
32890 if (left) {
32891 if (config->user) {
32892 - if (copy_from_user(buf, dp, left))
32893 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32894 return -EFAULT;
32895 } else {
32896 memcpy(buf, dp, left);
32897 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32898 index dd6b53a..19d9ee6 100644
32899 --- a/drivers/isdn/hardware/eicon/divasync.h
32900 +++ b/drivers/isdn/hardware/eicon/divasync.h
32901 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32902 } diva_didd_add_adapter_t;
32903 typedef struct _diva_didd_remove_adapter {
32904 IDI_CALL p_request;
32905 -} diva_didd_remove_adapter_t;
32906 +} __no_const diva_didd_remove_adapter_t;
32907 typedef struct _diva_didd_read_adapter_array {
32908 void *buffer;
32909 dword length;
32910 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32911 index d303e65..28bcb7b 100644
32912 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32913 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32914 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32915 typedef struct _diva_os_idi_adapter_interface {
32916 diva_init_card_proc_t cleanup_adapter_proc;
32917 diva_cmd_card_proc_t cmd_proc;
32918 -} diva_os_idi_adapter_interface_t;
32919 +} __no_const diva_os_idi_adapter_interface_t;
32920
32921 typedef struct _diva_os_xdi_adapter {
32922 struct list_head link;
32923 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32924 index e74df7c..03a03ba 100644
32925 --- a/drivers/isdn/icn/icn.c
32926 +++ b/drivers/isdn/icn/icn.c
32927 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
32928 if (count > len)
32929 count = len;
32930 if (user) {
32931 - if (copy_from_user(msg, buf, count))
32932 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32933 return -EFAULT;
32934 } else
32935 memcpy(msg, buf, count);
32936 diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32937 index 8bc4915..4cc6a2e 100644
32938 --- a/drivers/leds/leds-mc13783.c
32939 +++ b/drivers/leds/leds-mc13783.c
32940 @@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32941 return -EINVAL;
32942 }
32943
32944 - led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32945 + led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32946 if (led == NULL) {
32947 dev_err(&pdev->dev, "failed to alloc memory\n");
32948 return -ENOMEM;
32949 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32950 index b5fdcb7..5b6c59f 100644
32951 --- a/drivers/lguest/core.c
32952 +++ b/drivers/lguest/core.c
32953 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32954 * it's worked so far. The end address needs +1 because __get_vm_area
32955 * allocates an extra guard page, so we need space for that.
32956 */
32957 +
32958 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32959 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32960 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32961 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32962 +#else
32963 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32964 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32965 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32966 +#endif
32967 +
32968 if (!switcher_vma) {
32969 err = -ENOMEM;
32970 printk("lguest: could not map switcher pages high\n");
32971 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32972 * Now the Switcher is mapped at the right address, we can't fail!
32973 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32974 */
32975 - memcpy(switcher_vma->addr, start_switcher_text,
32976 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32977 end_switcher_text - start_switcher_text);
32978
32979 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32980 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32981 index 39809035..ce25c5e 100644
32982 --- a/drivers/lguest/x86/core.c
32983 +++ b/drivers/lguest/x86/core.c
32984 @@ -59,7 +59,7 @@ static struct {
32985 /* Offset from where switcher.S was compiled to where we've copied it */
32986 static unsigned long switcher_offset(void)
32987 {
32988 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32989 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32990 }
32991
32992 /* This cpu's struct lguest_pages. */
32993 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32994 * These copies are pretty cheap, so we do them unconditionally: */
32995 /* Save the current Host top-level page directory.
32996 */
32997 +
32998 +#ifdef CONFIG_PAX_PER_CPU_PGD
32999 + pages->state.host_cr3 = read_cr3();
33000 +#else
33001 pages->state.host_cr3 = __pa(current->mm->pgd);
33002 +#endif
33003 +
33004 /*
33005 * Set up the Guest's page tables to see this CPU's pages (and no
33006 * other CPU's pages).
33007 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33008 * compiled-in switcher code and the high-mapped copy we just made.
33009 */
33010 for (i = 0; i < IDT_ENTRIES; i++)
33011 - default_idt_entries[i] += switcher_offset();
33012 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33013
33014 /*
33015 * Set up the Switcher's per-cpu areas.
33016 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33017 * it will be undisturbed when we switch. To change %cs and jump we
33018 * need this structure to feed to Intel's "lcall" instruction.
33019 */
33020 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33021 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33022 lguest_entry.segment = LGUEST_CS;
33023
33024 /*
33025 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33026 index 40634b0..4f5855e 100644
33027 --- a/drivers/lguest/x86/switcher_32.S
33028 +++ b/drivers/lguest/x86/switcher_32.S
33029 @@ -87,6 +87,7 @@
33030 #include <asm/page.h>
33031 #include <asm/segment.h>
33032 #include <asm/lguest.h>
33033 +#include <asm/processor-flags.h>
33034
33035 // We mark the start of the code to copy
33036 // It's placed in .text tho it's never run here
33037 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33038 // Changes type when we load it: damn Intel!
33039 // For after we switch over our page tables
33040 // That entry will be read-only: we'd crash.
33041 +
33042 +#ifdef CONFIG_PAX_KERNEXEC
33043 + mov %cr0, %edx
33044 + xor $X86_CR0_WP, %edx
33045 + mov %edx, %cr0
33046 +#endif
33047 +
33048 movl $(GDT_ENTRY_TSS*8), %edx
33049 ltr %dx
33050
33051 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33052 // Let's clear it again for our return.
33053 // The GDT descriptor of the Host
33054 // Points to the table after two "size" bytes
33055 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33056 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33057 // Clear "used" from type field (byte 5, bit 2)
33058 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33059 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33060 +
33061 +#ifdef CONFIG_PAX_KERNEXEC
33062 + mov %cr0, %eax
33063 + xor $X86_CR0_WP, %eax
33064 + mov %eax, %cr0
33065 +#endif
33066
33067 // Once our page table's switched, the Guest is live!
33068 // The Host fades as we run this final step.
33069 @@ -295,13 +309,12 @@ deliver_to_host:
33070 // I consulted gcc, and it gave
33071 // These instructions, which I gladly credit:
33072 leal (%edx,%ebx,8), %eax
33073 - movzwl (%eax),%edx
33074 - movl 4(%eax), %eax
33075 - xorw %ax, %ax
33076 - orl %eax, %edx
33077 + movl 4(%eax), %edx
33078 + movw (%eax), %dx
33079 // Now the address of the handler's in %edx
33080 // We call it now: its "iret" drops us home.
33081 - jmp *%edx
33082 + ljmp $__KERNEL_CS, $1f
33083 +1: jmp *%edx
33084
33085 // Every interrupt can come to us here
33086 // But we must truly tell each apart.
33087 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33088 index 20e5c2c..9e849a9 100644
33089 --- a/drivers/macintosh/macio_asic.c
33090 +++ b/drivers/macintosh/macio_asic.c
33091 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33092 * MacIO is matched against any Apple ID, it's probe() function
33093 * will then decide wether it applies or not
33094 */
33095 -static const struct pci_device_id __devinitdata pci_ids [] = { {
33096 +static const struct pci_device_id __devinitconst pci_ids [] = { {
33097 .vendor = PCI_VENDOR_ID_APPLE,
33098 .device = PCI_ANY_ID,
33099 .subvendor = PCI_ANY_ID,
33100 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33101 index 17e2b47..bcbeec4 100644
33102 --- a/drivers/md/bitmap.c
33103 +++ b/drivers/md/bitmap.c
33104 @@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33105 chunk_kb ? "KB" : "B");
33106 if (bitmap->file) {
33107 seq_printf(seq, ", file: ");
33108 - seq_path(seq, &bitmap->file->f_path, " \t\n");
33109 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33110 }
33111
33112 seq_printf(seq, "\n");
33113 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33114 index a1a3e6d..1918bfc 100644
33115 --- a/drivers/md/dm-ioctl.c
33116 +++ b/drivers/md/dm-ioctl.c
33117 @@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33118 cmd == DM_LIST_VERSIONS_CMD)
33119 return 0;
33120
33121 - if ((cmd == DM_DEV_CREATE_CMD)) {
33122 + if (cmd == DM_DEV_CREATE_CMD) {
33123 if (!*param->name) {
33124 DMWARN("name not supplied when creating device");
33125 return -EINVAL;
33126 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33127 index d039de8..0cf5b87 100644
33128 --- a/drivers/md/dm-raid1.c
33129 +++ b/drivers/md/dm-raid1.c
33130 @@ -40,7 +40,7 @@ enum dm_raid1_error {
33131
33132 struct mirror {
33133 struct mirror_set *ms;
33134 - atomic_t error_count;
33135 + atomic_unchecked_t error_count;
33136 unsigned long error_type;
33137 struct dm_dev *dev;
33138 sector_t offset;
33139 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33140 struct mirror *m;
33141
33142 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33143 - if (!atomic_read(&m->error_count))
33144 + if (!atomic_read_unchecked(&m->error_count))
33145 return m;
33146
33147 return NULL;
33148 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33149 * simple way to tell if a device has encountered
33150 * errors.
33151 */
33152 - atomic_inc(&m->error_count);
33153 + atomic_inc_unchecked(&m->error_count);
33154
33155 if (test_and_set_bit(error_type, &m->error_type))
33156 return;
33157 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33158 struct mirror *m = get_default_mirror(ms);
33159
33160 do {
33161 - if (likely(!atomic_read(&m->error_count)))
33162 + if (likely(!atomic_read_unchecked(&m->error_count)))
33163 return m;
33164
33165 if (m-- == ms->mirror)
33166 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33167 {
33168 struct mirror *default_mirror = get_default_mirror(m->ms);
33169
33170 - return !atomic_read(&default_mirror->error_count);
33171 + return !atomic_read_unchecked(&default_mirror->error_count);
33172 }
33173
33174 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33175 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33176 */
33177 if (likely(region_in_sync(ms, region, 1)))
33178 m = choose_mirror(ms, bio->bi_sector);
33179 - else if (m && atomic_read(&m->error_count))
33180 + else if (m && atomic_read_unchecked(&m->error_count))
33181 m = NULL;
33182
33183 if (likely(m))
33184 @@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33185 }
33186
33187 ms->mirror[mirror].ms = ms;
33188 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33189 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33190 ms->mirror[mirror].error_type = 0;
33191 ms->mirror[mirror].offset = offset;
33192
33193 @@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33194 */
33195 static char device_status_char(struct mirror *m)
33196 {
33197 - if (!atomic_read(&(m->error_count)))
33198 + if (!atomic_read_unchecked(&(m->error_count)))
33199 return 'A';
33200
33201 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33202 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33203 index 35c94ff..20d4c17 100644
33204 --- a/drivers/md/dm-stripe.c
33205 +++ b/drivers/md/dm-stripe.c
33206 @@ -20,7 +20,7 @@ struct stripe {
33207 struct dm_dev *dev;
33208 sector_t physical_start;
33209
33210 - atomic_t error_count;
33211 + atomic_unchecked_t error_count;
33212 };
33213
33214 struct stripe_c {
33215 @@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33216 kfree(sc);
33217 return r;
33218 }
33219 - atomic_set(&(sc->stripe[i].error_count), 0);
33220 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33221 }
33222
33223 ti->private = sc;
33224 @@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33225 DMEMIT("%d ", sc->stripes);
33226 for (i = 0; i < sc->stripes; i++) {
33227 DMEMIT("%s ", sc->stripe[i].dev->name);
33228 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33229 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33230 'D' : 'A';
33231 }
33232 buffer[i] = '\0';
33233 @@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33234 */
33235 for (i = 0; i < sc->stripes; i++)
33236 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33237 - atomic_inc(&(sc->stripe[i].error_count));
33238 - if (atomic_read(&(sc->stripe[i].error_count)) <
33239 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33240 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33241 DM_IO_ERROR_THRESHOLD)
33242 schedule_work(&sc->trigger_event);
33243 }
33244 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33245 index 2e227fb..44ead1f 100644
33246 --- a/drivers/md/dm-table.c
33247 +++ b/drivers/md/dm-table.c
33248 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33249 if (!dev_size)
33250 return 0;
33251
33252 - if ((start >= dev_size) || (start + len > dev_size)) {
33253 + if ((start >= dev_size) || (len > dev_size - start)) {
33254 DMWARN("%s: %s too small for target: "
33255 "start=%llu, len=%llu, dev_size=%llu",
33256 dm_device_name(ti->table->md), bdevname(bdev, b),
33257 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33258 index 737d388..811ad5a 100644
33259 --- a/drivers/md/dm-thin-metadata.c
33260 +++ b/drivers/md/dm-thin-metadata.c
33261 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33262
33263 pmd->info.tm = tm;
33264 pmd->info.levels = 2;
33265 - pmd->info.value_type.context = pmd->data_sm;
33266 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33267 pmd->info.value_type.size = sizeof(__le64);
33268 pmd->info.value_type.inc = data_block_inc;
33269 pmd->info.value_type.dec = data_block_dec;
33270 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33271
33272 pmd->bl_info.tm = tm;
33273 pmd->bl_info.levels = 1;
33274 - pmd->bl_info.value_type.context = pmd->data_sm;
33275 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33276 pmd->bl_info.value_type.size = sizeof(__le64);
33277 pmd->bl_info.value_type.inc = data_block_inc;
33278 pmd->bl_info.value_type.dec = data_block_dec;
33279 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33280 index e24143c..ce2f21a1 100644
33281 --- a/drivers/md/dm.c
33282 +++ b/drivers/md/dm.c
33283 @@ -176,9 +176,9 @@ struct mapped_device {
33284 /*
33285 * Event handling.
33286 */
33287 - atomic_t event_nr;
33288 + atomic_unchecked_t event_nr;
33289 wait_queue_head_t eventq;
33290 - atomic_t uevent_seq;
33291 + atomic_unchecked_t uevent_seq;
33292 struct list_head uevent_list;
33293 spinlock_t uevent_lock; /* Protect access to uevent_list */
33294
33295 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33296 rwlock_init(&md->map_lock);
33297 atomic_set(&md->holders, 1);
33298 atomic_set(&md->open_count, 0);
33299 - atomic_set(&md->event_nr, 0);
33300 - atomic_set(&md->uevent_seq, 0);
33301 + atomic_set_unchecked(&md->event_nr, 0);
33302 + atomic_set_unchecked(&md->uevent_seq, 0);
33303 INIT_LIST_HEAD(&md->uevent_list);
33304 spin_lock_init(&md->uevent_lock);
33305
33306 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33307
33308 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33309
33310 - atomic_inc(&md->event_nr);
33311 + atomic_inc_unchecked(&md->event_nr);
33312 wake_up(&md->eventq);
33313 }
33314
33315 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33316
33317 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33318 {
33319 - return atomic_add_return(1, &md->uevent_seq);
33320 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33321 }
33322
33323 uint32_t dm_get_event_nr(struct mapped_device *md)
33324 {
33325 - return atomic_read(&md->event_nr);
33326 + return atomic_read_unchecked(&md->event_nr);
33327 }
33328
33329 int dm_wait_event(struct mapped_device *md, int event_nr)
33330 {
33331 return wait_event_interruptible(md->eventq,
33332 - (event_nr != atomic_read(&md->event_nr)));
33333 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33334 }
33335
33336 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33337 diff --git a/drivers/md/md.c b/drivers/md/md.c
33338 index 2b30ffd..362b519 100644
33339 --- a/drivers/md/md.c
33340 +++ b/drivers/md/md.c
33341 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33342 * start build, activate spare
33343 */
33344 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33345 -static atomic_t md_event_count;
33346 +static atomic_unchecked_t md_event_count;
33347 void md_new_event(struct mddev *mddev)
33348 {
33349 - atomic_inc(&md_event_count);
33350 + atomic_inc_unchecked(&md_event_count);
33351 wake_up(&md_event_waiters);
33352 }
33353 EXPORT_SYMBOL_GPL(md_new_event);
33354 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33355 */
33356 static void md_new_event_inintr(struct mddev *mddev)
33357 {
33358 - atomic_inc(&md_event_count);
33359 + atomic_inc_unchecked(&md_event_count);
33360 wake_up(&md_event_waiters);
33361 }
33362
33363 @@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33364
33365 rdev->preferred_minor = 0xffff;
33366 rdev->data_offset = le64_to_cpu(sb->data_offset);
33367 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33368 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33369
33370 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33371 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33372 @@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33373 else
33374 sb->resync_offset = cpu_to_le64(0);
33375
33376 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33377 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33378
33379 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33380 sb->size = cpu_to_le64(mddev->dev_sectors);
33381 @@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33382 static ssize_t
33383 errors_show(struct md_rdev *rdev, char *page)
33384 {
33385 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33386 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33387 }
33388
33389 static ssize_t
33390 @@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33391 char *e;
33392 unsigned long n = simple_strtoul(buf, &e, 10);
33393 if (*buf && (*e == 0 || *e == '\n')) {
33394 - atomic_set(&rdev->corrected_errors, n);
33395 + atomic_set_unchecked(&rdev->corrected_errors, n);
33396 return len;
33397 }
33398 return -EINVAL;
33399 @@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33400 rdev->sb_loaded = 0;
33401 rdev->bb_page = NULL;
33402 atomic_set(&rdev->nr_pending, 0);
33403 - atomic_set(&rdev->read_errors, 0);
33404 - atomic_set(&rdev->corrected_errors, 0);
33405 + atomic_set_unchecked(&rdev->read_errors, 0);
33406 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33407
33408 INIT_LIST_HEAD(&rdev->same_set);
33409 init_waitqueue_head(&rdev->blocked_wait);
33410 @@ -3744,8 +3744,8 @@ array_state_show(struct mddev *mddev, char *page)
33411 return sprintf(page, "%s\n", array_states[st]);
33412 }
33413
33414 -static int do_md_stop(struct mddev * mddev, int ro, int is_open);
33415 -static int md_set_readonly(struct mddev * mddev, int is_open);
33416 +static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
33417 +static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
33418 static int do_md_run(struct mddev * mddev);
33419 static int restart_array(struct mddev *mddev);
33420
33421 @@ -3761,14 +3761,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33422 /* stopping an active array */
33423 if (atomic_read(&mddev->openers) > 0)
33424 return -EBUSY;
33425 - err = do_md_stop(mddev, 0, 0);
33426 + err = do_md_stop(mddev, 0, NULL);
33427 break;
33428 case inactive:
33429 /* stopping an active array */
33430 if (mddev->pers) {
33431 if (atomic_read(&mddev->openers) > 0)
33432 return -EBUSY;
33433 - err = do_md_stop(mddev, 2, 0);
33434 + err = do_md_stop(mddev, 2, NULL);
33435 } else
33436 err = 0; /* already inactive */
33437 break;
33438 @@ -3776,7 +3776,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33439 break; /* not supported yet */
33440 case readonly:
33441 if (mddev->pers)
33442 - err = md_set_readonly(mddev, 0);
33443 + err = md_set_readonly(mddev, NULL);
33444 else {
33445 mddev->ro = 1;
33446 set_disk_ro(mddev->gendisk, 1);
33447 @@ -3786,7 +3786,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33448 case read_auto:
33449 if (mddev->pers) {
33450 if (mddev->ro == 0)
33451 - err = md_set_readonly(mddev, 0);
33452 + err = md_set_readonly(mddev, NULL);
33453 else if (mddev->ro == 1)
33454 err = restart_array(mddev);
33455 if (err == 0) {
33456 @@ -5124,15 +5124,17 @@ void md_stop(struct mddev *mddev)
33457 }
33458 EXPORT_SYMBOL_GPL(md_stop);
33459
33460 -static int md_set_readonly(struct mddev *mddev, int is_open)
33461 +static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
33462 {
33463 int err = 0;
33464 mutex_lock(&mddev->open_mutex);
33465 - if (atomic_read(&mddev->openers) > is_open) {
33466 + if (atomic_read(&mddev->openers) > !!bdev) {
33467 printk("md: %s still in use.\n",mdname(mddev));
33468 err = -EBUSY;
33469 goto out;
33470 }
33471 + if (bdev)
33472 + sync_blockdev(bdev);
33473 if (mddev->pers) {
33474 __md_stop_writes(mddev);
33475
33476 @@ -5154,18 +5156,26 @@ out:
33477 * 0 - completely stop and dis-assemble array
33478 * 2 - stop but do not disassemble array
33479 */
33480 -static int do_md_stop(struct mddev * mddev, int mode, int is_open)
33481 +static int do_md_stop(struct mddev * mddev, int mode,
33482 + struct block_device *bdev)
33483 {
33484 struct gendisk *disk = mddev->gendisk;
33485 struct md_rdev *rdev;
33486
33487 mutex_lock(&mddev->open_mutex);
33488 - if (atomic_read(&mddev->openers) > is_open ||
33489 + if (atomic_read(&mddev->openers) > !!bdev ||
33490 mddev->sysfs_active) {
33491 printk("md: %s still in use.\n",mdname(mddev));
33492 mutex_unlock(&mddev->open_mutex);
33493 return -EBUSY;
33494 }
33495 + if (bdev)
33496 + /* It is possible IO was issued on some other
33497 + * open file which was closed before we took ->open_mutex.
33498 + * As that was not the last close __blkdev_put will not
33499 + * have called sync_blockdev, so we must.
33500 + */
33501 + sync_blockdev(bdev);
33502
33503 if (mddev->pers) {
33504 if (mddev->ro)
33505 @@ -5239,7 +5249,7 @@ static void autorun_array(struct mddev *mddev)
33506 err = do_md_run(mddev);
33507 if (err) {
33508 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
33509 - do_md_stop(mddev, 0, 0);
33510 + do_md_stop(mddev, 0, NULL);
33511 }
33512 }
33513
33514 @@ -6237,11 +6247,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
33515 goto done_unlock;
33516
33517 case STOP_ARRAY:
33518 - err = do_md_stop(mddev, 0, 1);
33519 + err = do_md_stop(mddev, 0, bdev);
33520 goto done_unlock;
33521
33522 case STOP_ARRAY_RO:
33523 - err = md_set_readonly(mddev, 1);
33524 + err = md_set_readonly(mddev, bdev);
33525 goto done_unlock;
33526
33527 case BLKROSET:
33528 @@ -6738,7 +6748,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33529
33530 spin_unlock(&pers_lock);
33531 seq_printf(seq, "\n");
33532 - seq->poll_event = atomic_read(&md_event_count);
33533 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33534 return 0;
33535 }
33536 if (v == (void*)2) {
33537 @@ -6841,7 +6851,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33538 return error;
33539
33540 seq = file->private_data;
33541 - seq->poll_event = atomic_read(&md_event_count);
33542 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33543 return error;
33544 }
33545
33546 @@ -6855,7 +6865,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33547 /* always allow read */
33548 mask = POLLIN | POLLRDNORM;
33549
33550 - if (seq->poll_event != atomic_read(&md_event_count))
33551 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33552 mask |= POLLERR | POLLPRI;
33553 return mask;
33554 }
33555 @@ -6899,7 +6909,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33556 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33557 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33558 (int)part_stat_read(&disk->part0, sectors[1]) -
33559 - atomic_read(&disk->sync_io);
33560 + atomic_read_unchecked(&disk->sync_io);
33561 /* sync IO will cause sync_io to increase before the disk_stats
33562 * as sync_io is counted when a request starts, and
33563 * disk_stats is counted when it completes.
33564 diff --git a/drivers/md/md.h b/drivers/md/md.h
33565 index 1c2063c..9639970 100644
33566 --- a/drivers/md/md.h
33567 +++ b/drivers/md/md.h
33568 @@ -93,13 +93,13 @@ struct md_rdev {
33569 * only maintained for arrays that
33570 * support hot removal
33571 */
33572 - atomic_t read_errors; /* number of consecutive read errors that
33573 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33574 * we have tried to ignore.
33575 */
33576 struct timespec last_read_error; /* monotonic time since our
33577 * last read error
33578 */
33579 - atomic_t corrected_errors; /* number of corrected read errors,
33580 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33581 * for reporting to userspace and storing
33582 * in superblock.
33583 */
33584 @@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33585
33586 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33587 {
33588 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33589 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33590 }
33591
33592 struct md_personality
33593 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33594 index fc90c11..c8cd9a9 100644
33595 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33596 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33597 @@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
33598 /*----------------------------------------------------------------*/
33599
33600 struct sm_checker {
33601 - struct dm_space_map sm;
33602 + dm_space_map_no_const sm;
33603
33604 struct count_array old_counts;
33605 struct count_array counts;
33606 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33607 index 3d0ed53..35dc592 100644
33608 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33609 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33610 @@ -23,7 +23,7 @@
33611 * Space map interface.
33612 */
33613 struct sm_disk {
33614 - struct dm_space_map sm;
33615 + dm_space_map_no_const sm;
33616
33617 struct ll_disk ll;
33618 struct ll_disk old_ll;
33619 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33620 index e89ae5e..062e4c2 100644
33621 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33622 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33623 @@ -43,7 +43,7 @@ struct block_op {
33624 };
33625
33626 struct sm_metadata {
33627 - struct dm_space_map sm;
33628 + dm_space_map_no_const sm;
33629
33630 struct ll_disk ll;
33631 struct ll_disk old_ll;
33632 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33633 index 1cbfc6b..56e1dbb 100644
33634 --- a/drivers/md/persistent-data/dm-space-map.h
33635 +++ b/drivers/md/persistent-data/dm-space-map.h
33636 @@ -60,6 +60,7 @@ struct dm_space_map {
33637 int (*root_size)(struct dm_space_map *sm, size_t *result);
33638 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33639 };
33640 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33641
33642 /*----------------------------------------------------------------*/
33643
33644 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33645 index d1f74ab..d1b24fd 100644
33646 --- a/drivers/md/raid1.c
33647 +++ b/drivers/md/raid1.c
33648 @@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33649 if (r1_sync_page_io(rdev, sect, s,
33650 bio->bi_io_vec[idx].bv_page,
33651 READ) != 0)
33652 - atomic_add(s, &rdev->corrected_errors);
33653 + atomic_add_unchecked(s, &rdev->corrected_errors);
33654 }
33655 sectors -= s;
33656 sect += s;
33657 @@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33658 test_bit(In_sync, &rdev->flags)) {
33659 if (r1_sync_page_io(rdev, sect, s,
33660 conf->tmppage, READ)) {
33661 - atomic_add(s, &rdev->corrected_errors);
33662 + atomic_add_unchecked(s, &rdev->corrected_errors);
33663 printk(KERN_INFO
33664 "md/raid1:%s: read error corrected "
33665 "(%d sectors at %llu on %s)\n",
33666 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33667 index a954c95..6e7a21c 100644
33668 --- a/drivers/md/raid10.c
33669 +++ b/drivers/md/raid10.c
33670 @@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
33671 /* The write handler will notice the lack of
33672 * R10BIO_Uptodate and record any errors etc
33673 */
33674 - atomic_add(r10_bio->sectors,
33675 + atomic_add_unchecked(r10_bio->sectors,
33676 &conf->mirrors[d].rdev->corrected_errors);
33677
33678 /* for reconstruct, we always reschedule after a read.
33679 @@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33680 {
33681 struct timespec cur_time_mon;
33682 unsigned long hours_since_last;
33683 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33684 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33685
33686 ktime_get_ts(&cur_time_mon);
33687
33688 @@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33689 * overflowing the shift of read_errors by hours_since_last.
33690 */
33691 if (hours_since_last >= 8 * sizeof(read_errors))
33692 - atomic_set(&rdev->read_errors, 0);
33693 + atomic_set_unchecked(&rdev->read_errors, 0);
33694 else
33695 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33696 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33697 }
33698
33699 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33700 @@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33701 return;
33702
33703 check_decay_read_errors(mddev, rdev);
33704 - atomic_inc(&rdev->read_errors);
33705 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33706 + atomic_inc_unchecked(&rdev->read_errors);
33707 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33708 char b[BDEVNAME_SIZE];
33709 bdevname(rdev->bdev, b);
33710
33711 @@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33712 "md/raid10:%s: %s: Raid device exceeded "
33713 "read_error threshold [cur %d:max %d]\n",
33714 mdname(mddev), b,
33715 - atomic_read(&rdev->read_errors), max_read_errors);
33716 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33717 printk(KERN_NOTICE
33718 "md/raid10:%s: %s: Failing raid device\n",
33719 mdname(mddev), b);
33720 @@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33721 (unsigned long long)(
33722 sect + rdev->data_offset),
33723 bdevname(rdev->bdev, b));
33724 - atomic_add(s, &rdev->corrected_errors);
33725 + atomic_add_unchecked(s, &rdev->corrected_errors);
33726 }
33727
33728 rdev_dec_pending(rdev, mddev);
33729 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33730 index 73a5800..2b0e3b1 100644
33731 --- a/drivers/md/raid5.c
33732 +++ b/drivers/md/raid5.c
33733 @@ -1694,18 +1694,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33734 (unsigned long long)(sh->sector
33735 + rdev->data_offset),
33736 bdevname(rdev->bdev, b));
33737 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33738 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33739 clear_bit(R5_ReadError, &sh->dev[i].flags);
33740 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33741 }
33742 - if (atomic_read(&rdev->read_errors))
33743 - atomic_set(&rdev->read_errors, 0);
33744 + if (atomic_read_unchecked(&rdev->read_errors))
33745 + atomic_set_unchecked(&rdev->read_errors, 0);
33746 } else {
33747 const char *bdn = bdevname(rdev->bdev, b);
33748 int retry = 0;
33749
33750 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33751 - atomic_inc(&rdev->read_errors);
33752 + atomic_inc_unchecked(&rdev->read_errors);
33753 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33754 printk_ratelimited(
33755 KERN_WARNING
33756 @@ -1734,7 +1734,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33757 (unsigned long long)(sh->sector
33758 + rdev->data_offset),
33759 bdn);
33760 - else if (atomic_read(&rdev->read_errors)
33761 + else if (atomic_read_unchecked(&rdev->read_errors)
33762 > conf->max_nr_stripes)
33763 printk(KERN_WARNING
33764 "md/raid:%s: Too many read errors, failing device %s.\n",
33765 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33766 index d88c4aa..17c80b1 100644
33767 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33768 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33769 @@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
33770 .subvendor = _subvend, .subdevice = _subdev, \
33771 .driver_data = (unsigned long)&_driverdata }
33772
33773 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33774 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33775 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33776 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33777 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33778 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33779 index a7d876f..8c21b61 100644
33780 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33781 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33782 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33783 union {
33784 dmx_ts_cb ts;
33785 dmx_section_cb sec;
33786 - } cb;
33787 + } __no_const cb;
33788
33789 struct dvb_demux *demux;
33790 void *priv;
33791 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33792 index 39eab73..60033e7 100644
33793 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33794 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33795 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33796 const struct dvb_device *template, void *priv, int type)
33797 {
33798 struct dvb_device *dvbdev;
33799 - struct file_operations *dvbdevfops;
33800 + file_operations_no_const *dvbdevfops;
33801 struct device *clsdev;
33802 int minor;
33803 int id;
33804 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33805 index 3940bb0..fb3952a 100644
33806 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33807 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33808 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33809
33810 struct dib0700_adapter_state {
33811 int (*set_param_save) (struct dvb_frontend *);
33812 -};
33813 +} __no_const;
33814
33815 static int dib7070_set_param_override(struct dvb_frontend *fe)
33816 {
33817 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33818 index 451c5a7..649f711 100644
33819 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33820 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33821 @@ -95,7 +95,7 @@ struct su3000_state {
33822
33823 struct s6x0_state {
33824 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33825 -};
33826 +} __no_const;
33827
33828 /* debug */
33829 static int dvb_usb_dw2102_debug;
33830 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33831 index 404f63a..4796533 100644
33832 --- a/drivers/media/dvb/frontends/dib3000.h
33833 +++ b/drivers/media/dvb/frontends/dib3000.h
33834 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33835 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33836 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33837 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33838 -};
33839 +} __no_const;
33840
33841 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33842 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33843 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33844 index 7539a5d..06531a6 100644
33845 --- a/drivers/media/dvb/ngene/ngene-cards.c
33846 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33847 @@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33848
33849 /****************************************************************************/
33850
33851 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33852 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33853 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33854 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33855 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33856 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33857 index 16a089f..1661b11 100644
33858 --- a/drivers/media/radio/radio-cadet.c
33859 +++ b/drivers/media/radio/radio-cadet.c
33860 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33861 unsigned char readbuf[RDS_BUFFER];
33862 int i = 0;
33863
33864 + if (count > RDS_BUFFER)
33865 + return -EFAULT;
33866 mutex_lock(&dev->lock);
33867 if (dev->rdsstat == 0) {
33868 dev->rdsstat = 1;
33869 @@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33870 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33871 mutex_unlock(&dev->lock);
33872
33873 - if (copy_to_user(data, readbuf, i))
33874 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33875 return -EFAULT;
33876 return i;
33877 }
33878 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33879 index 9cde353..8c6a1c3 100644
33880 --- a/drivers/media/video/au0828/au0828.h
33881 +++ b/drivers/media/video/au0828/au0828.h
33882 @@ -191,7 +191,7 @@ struct au0828_dev {
33883
33884 /* I2C */
33885 struct i2c_adapter i2c_adap;
33886 - struct i2c_algorithm i2c_algo;
33887 + i2c_algorithm_no_const i2c_algo;
33888 struct i2c_client i2c_client;
33889 u32 i2c_rc;
33890
33891 diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
33892 index 7930ca5..235bf7d 100644
33893 --- a/drivers/media/video/cx25821/cx25821-core.c
33894 +++ b/drivers/media/video/cx25821/cx25821-core.c
33895 @@ -912,9 +912,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
33896 list_add_tail(&dev->devlist, &cx25821_devlist);
33897 mutex_unlock(&cx25821_devlist_mutex);
33898
33899 - strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
33900 - strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
33901 -
33902 if (dev->pci->device != 0x8210) {
33903 pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
33904 __func__, dev->pci->device);
33905 diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
33906 index b9aa801..029f293 100644
33907 --- a/drivers/media/video/cx25821/cx25821.h
33908 +++ b/drivers/media/video/cx25821/cx25821.h
33909 @@ -187,7 +187,7 @@ enum port {
33910 };
33911
33912 struct cx25821_board {
33913 - char *name;
33914 + const char *name;
33915 enum port porta;
33916 enum port portb;
33917 enum port portc;
33918 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33919 index 04bf662..e0ac026 100644
33920 --- a/drivers/media/video/cx88/cx88-alsa.c
33921 +++ b/drivers/media/video/cx88/cx88-alsa.c
33922 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33923 * Only boards with eeprom and byte 1 at eeprom=1 have it
33924 */
33925
33926 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33927 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33928 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33929 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33930 {0, }
33931 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33932 index 88cf9d9..bbc4b2c 100644
33933 --- a/drivers/media/video/omap/omap_vout.c
33934 +++ b/drivers/media/video/omap/omap_vout.c
33935 @@ -64,7 +64,6 @@ enum omap_vout_channels {
33936 OMAP_VIDEO2,
33937 };
33938
33939 -static struct videobuf_queue_ops video_vbq_ops;
33940 /* Variables configurable through module params*/
33941 static u32 video1_numbuffers = 3;
33942 static u32 video2_numbuffers = 3;
33943 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33944 {
33945 struct videobuf_queue *q;
33946 struct omap_vout_device *vout = NULL;
33947 + static struct videobuf_queue_ops video_vbq_ops = {
33948 + .buf_setup = omap_vout_buffer_setup,
33949 + .buf_prepare = omap_vout_buffer_prepare,
33950 + .buf_release = omap_vout_buffer_release,
33951 + .buf_queue = omap_vout_buffer_queue,
33952 + };
33953
33954 vout = video_drvdata(file);
33955 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33956 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33957 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33958
33959 q = &vout->vbq;
33960 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33961 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33962 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33963 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33964 spin_lock_init(&vout->vbq_lock);
33965
33966 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33967 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33968 index 305e6aa..0143317 100644
33969 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33970 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33971 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33972
33973 /* I2C stuff */
33974 struct i2c_adapter i2c_adap;
33975 - struct i2c_algorithm i2c_algo;
33976 + i2c_algorithm_no_const i2c_algo;
33977 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33978 int i2c_cx25840_hack_state;
33979 int i2c_linked;
33980 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33981 index 02194c0..091733b 100644
33982 --- a/drivers/media/video/timblogiw.c
33983 +++ b/drivers/media/video/timblogiw.c
33984 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33985
33986 /* Platform device functions */
33987
33988 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33989 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33990 .vidioc_querycap = timblogiw_querycap,
33991 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33992 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33993 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33994 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33995 };
33996
33997 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33998 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33999 .owner = THIS_MODULE,
34000 .open = timblogiw_open,
34001 .release = timblogiw_close,
34002 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34003 index a5c591f..db692a3 100644
34004 --- a/drivers/message/fusion/mptbase.c
34005 +++ b/drivers/message/fusion/mptbase.c
34006 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34007 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34008 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34009
34010 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34011 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34012 +#else
34013 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34014 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34015 +#endif
34016 +
34017 /*
34018 * Rounding UP to nearest 4-kB boundary here...
34019 */
34020 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34021 index 551262e..7551198 100644
34022 --- a/drivers/message/fusion/mptsas.c
34023 +++ b/drivers/message/fusion/mptsas.c
34024 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34025 return 0;
34026 }
34027
34028 +static inline void
34029 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34030 +{
34031 + if (phy_info->port_details) {
34032 + phy_info->port_details->rphy = rphy;
34033 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34034 + ioc->name, rphy));
34035 + }
34036 +
34037 + if (rphy) {
34038 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34039 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34040 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34041 + ioc->name, rphy, rphy->dev.release));
34042 + }
34043 +}
34044 +
34045 /* no mutex */
34046 static void
34047 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34048 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34049 return NULL;
34050 }
34051
34052 -static inline void
34053 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34054 -{
34055 - if (phy_info->port_details) {
34056 - phy_info->port_details->rphy = rphy;
34057 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34058 - ioc->name, rphy));
34059 - }
34060 -
34061 - if (rphy) {
34062 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34063 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34064 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34065 - ioc->name, rphy, rphy->dev.release));
34066 - }
34067 -}
34068 -
34069 static inline struct sas_port *
34070 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34071 {
34072 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34073 index 0c3ced7..1fe34ec 100644
34074 --- a/drivers/message/fusion/mptscsih.c
34075 +++ b/drivers/message/fusion/mptscsih.c
34076 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34077
34078 h = shost_priv(SChost);
34079
34080 - if (h) {
34081 - if (h->info_kbuf == NULL)
34082 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34083 - return h->info_kbuf;
34084 - h->info_kbuf[0] = '\0';
34085 + if (!h)
34086 + return NULL;
34087
34088 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34089 - h->info_kbuf[size-1] = '\0';
34090 - }
34091 + if (h->info_kbuf == NULL)
34092 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34093 + return h->info_kbuf;
34094 + h->info_kbuf[0] = '\0';
34095 +
34096 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34097 + h->info_kbuf[size-1] = '\0';
34098
34099 return h->info_kbuf;
34100 }
34101 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34102 index 6d115c7..58ff7fd 100644
34103 --- a/drivers/message/i2o/i2o_proc.c
34104 +++ b/drivers/message/i2o/i2o_proc.c
34105 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34106 "Array Controller Device"
34107 };
34108
34109 -static char *chtostr(u8 * chars, int n)
34110 -{
34111 - char tmp[256];
34112 - tmp[0] = 0;
34113 - return strncat(tmp, (char *)chars, n);
34114 -}
34115 -
34116 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34117 char *group)
34118 {
34119 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34120
34121 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34122 seq_printf(seq, "%-#8x", ddm_table.module_id);
34123 - seq_printf(seq, "%-29s",
34124 - chtostr(ddm_table.module_name_version, 28));
34125 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34126 seq_printf(seq, "%9d ", ddm_table.data_size);
34127 seq_printf(seq, "%8d", ddm_table.code_size);
34128
34129 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34130
34131 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34132 seq_printf(seq, "%-#8x", dst->module_id);
34133 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34134 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34135 + seq_printf(seq, "%-.28s", dst->module_name_version);
34136 + seq_printf(seq, "%-.8s", dst->date);
34137 seq_printf(seq, "%8d ", dst->module_size);
34138 seq_printf(seq, "%8d ", dst->mpb_size);
34139 seq_printf(seq, "0x%04x", dst->module_flags);
34140 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34141 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34142 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34143 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34144 - seq_printf(seq, "Vendor info : %s\n",
34145 - chtostr((u8 *) (work32 + 2), 16));
34146 - seq_printf(seq, "Product info : %s\n",
34147 - chtostr((u8 *) (work32 + 6), 16));
34148 - seq_printf(seq, "Description : %s\n",
34149 - chtostr((u8 *) (work32 + 10), 16));
34150 - seq_printf(seq, "Product rev. : %s\n",
34151 - chtostr((u8 *) (work32 + 14), 8));
34152 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34153 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34154 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34155 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34156
34157 seq_printf(seq, "Serial number : ");
34158 print_serial_number(seq, (u8 *) (work32 + 16),
34159 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34160 }
34161
34162 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34163 - seq_printf(seq, "Module name : %s\n",
34164 - chtostr(result.module_name, 24));
34165 - seq_printf(seq, "Module revision : %s\n",
34166 - chtostr(result.module_rev, 8));
34167 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
34168 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34169
34170 seq_printf(seq, "Serial number : ");
34171 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34172 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34173 return 0;
34174 }
34175
34176 - seq_printf(seq, "Device name : %s\n",
34177 - chtostr(result.device_name, 64));
34178 - seq_printf(seq, "Service name : %s\n",
34179 - chtostr(result.service_name, 64));
34180 - seq_printf(seq, "Physical name : %s\n",
34181 - chtostr(result.physical_location, 64));
34182 - seq_printf(seq, "Instance number : %s\n",
34183 - chtostr(result.instance_number, 4));
34184 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
34185 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
34186 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34187 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34188
34189 return 0;
34190 }
34191 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34192 index a8c08f3..155fe3d 100644
34193 --- a/drivers/message/i2o/iop.c
34194 +++ b/drivers/message/i2o/iop.c
34195 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34196
34197 spin_lock_irqsave(&c->context_list_lock, flags);
34198
34199 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34200 - atomic_inc(&c->context_list_counter);
34201 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34202 + atomic_inc_unchecked(&c->context_list_counter);
34203
34204 - entry->context = atomic_read(&c->context_list_counter);
34205 + entry->context = atomic_read_unchecked(&c->context_list_counter);
34206
34207 list_add(&entry->list, &c->context_list);
34208
34209 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34210
34211 #if BITS_PER_LONG == 64
34212 spin_lock_init(&c->context_list_lock);
34213 - atomic_set(&c->context_list_counter, 0);
34214 + atomic_set_unchecked(&c->context_list_counter, 0);
34215 INIT_LIST_HEAD(&c->context_list);
34216 #endif
34217
34218 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34219 index 7ce65f4..e66e9bc 100644
34220 --- a/drivers/mfd/abx500-core.c
34221 +++ b/drivers/mfd/abx500-core.c
34222 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34223
34224 struct abx500_device_entry {
34225 struct list_head list;
34226 - struct abx500_ops ops;
34227 + abx500_ops_no_const ops;
34228 struct device *dev;
34229 };
34230
34231 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34232 index a9223ed..4127b13 100644
34233 --- a/drivers/mfd/janz-cmodio.c
34234 +++ b/drivers/mfd/janz-cmodio.c
34235 @@ -13,6 +13,7 @@
34236
34237 #include <linux/kernel.h>
34238 #include <linux/module.h>
34239 +#include <linux/slab.h>
34240 #include <linux/init.h>
34241 #include <linux/pci.h>
34242 #include <linux/interrupt.h>
34243 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34244 index a981e2a..5ca0c8b 100644
34245 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
34246 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34247 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34248 * the lid is closed. This leads to interrupts as soon as a little move
34249 * is done.
34250 */
34251 - atomic_inc(&lis3->count);
34252 + atomic_inc_unchecked(&lis3->count);
34253
34254 wake_up_interruptible(&lis3->misc_wait);
34255 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34256 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34257 if (lis3->pm_dev)
34258 pm_runtime_get_sync(lis3->pm_dev);
34259
34260 - atomic_set(&lis3->count, 0);
34261 + atomic_set_unchecked(&lis3->count, 0);
34262 return 0;
34263 }
34264
34265 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34266 add_wait_queue(&lis3->misc_wait, &wait);
34267 while (true) {
34268 set_current_state(TASK_INTERRUPTIBLE);
34269 - data = atomic_xchg(&lis3->count, 0);
34270 + data = atomic_xchg_unchecked(&lis3->count, 0);
34271 if (data)
34272 break;
34273
34274 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34275 struct lis3lv02d, miscdev);
34276
34277 poll_wait(file, &lis3->misc_wait, wait);
34278 - if (atomic_read(&lis3->count))
34279 + if (atomic_read_unchecked(&lis3->count))
34280 return POLLIN | POLLRDNORM;
34281 return 0;
34282 }
34283 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34284 index 2b1482a..5d33616 100644
34285 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
34286 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34287 @@ -266,7 +266,7 @@ struct lis3lv02d {
34288 struct input_polled_dev *idev; /* input device */
34289 struct platform_device *pdev; /* platform device */
34290 struct regulator_bulk_data regulators[2];
34291 - atomic_t count; /* interrupt count after last read */
34292 + atomic_unchecked_t count; /* interrupt count after last read */
34293 union axis_conversion ac; /* hw -> logical axis */
34294 int mapped_btns[3];
34295
34296 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34297 index 2f30bad..c4c13d0 100644
34298 --- a/drivers/misc/sgi-gru/gruhandles.c
34299 +++ b/drivers/misc/sgi-gru/gruhandles.c
34300 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34301 unsigned long nsec;
34302
34303 nsec = CLKS2NSEC(clks);
34304 - atomic_long_inc(&mcs_op_statistics[op].count);
34305 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34306 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34307 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34308 if (mcs_op_statistics[op].max < nsec)
34309 mcs_op_statistics[op].max = nsec;
34310 }
34311 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34312 index 950dbe9..eeef0f8 100644
34313 --- a/drivers/misc/sgi-gru/gruprocfs.c
34314 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34315 @@ -32,9 +32,9 @@
34316
34317 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34318
34319 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34320 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34321 {
34322 - unsigned long val = atomic_long_read(v);
34323 + unsigned long val = atomic_long_read_unchecked(v);
34324
34325 seq_printf(s, "%16lu %s\n", val, id);
34326 }
34327 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34328
34329 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34330 for (op = 0; op < mcsop_last; op++) {
34331 - count = atomic_long_read(&mcs_op_statistics[op].count);
34332 - total = atomic_long_read(&mcs_op_statistics[op].total);
34333 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34334 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34335 max = mcs_op_statistics[op].max;
34336 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34337 count ? total / count : 0, max);
34338 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34339 index 5c3ce24..4915ccb 100644
34340 --- a/drivers/misc/sgi-gru/grutables.h
34341 +++ b/drivers/misc/sgi-gru/grutables.h
34342 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34343 * GRU statistics.
34344 */
34345 struct gru_stats_s {
34346 - atomic_long_t vdata_alloc;
34347 - atomic_long_t vdata_free;
34348 - atomic_long_t gts_alloc;
34349 - atomic_long_t gts_free;
34350 - atomic_long_t gms_alloc;
34351 - atomic_long_t gms_free;
34352 - atomic_long_t gts_double_allocate;
34353 - atomic_long_t assign_context;
34354 - atomic_long_t assign_context_failed;
34355 - atomic_long_t free_context;
34356 - atomic_long_t load_user_context;
34357 - atomic_long_t load_kernel_context;
34358 - atomic_long_t lock_kernel_context;
34359 - atomic_long_t unlock_kernel_context;
34360 - atomic_long_t steal_user_context;
34361 - atomic_long_t steal_kernel_context;
34362 - atomic_long_t steal_context_failed;
34363 - atomic_long_t nopfn;
34364 - atomic_long_t asid_new;
34365 - atomic_long_t asid_next;
34366 - atomic_long_t asid_wrap;
34367 - atomic_long_t asid_reuse;
34368 - atomic_long_t intr;
34369 - atomic_long_t intr_cbr;
34370 - atomic_long_t intr_tfh;
34371 - atomic_long_t intr_spurious;
34372 - atomic_long_t intr_mm_lock_failed;
34373 - atomic_long_t call_os;
34374 - atomic_long_t call_os_wait_queue;
34375 - atomic_long_t user_flush_tlb;
34376 - atomic_long_t user_unload_context;
34377 - atomic_long_t user_exception;
34378 - atomic_long_t set_context_option;
34379 - atomic_long_t check_context_retarget_intr;
34380 - atomic_long_t check_context_unload;
34381 - atomic_long_t tlb_dropin;
34382 - atomic_long_t tlb_preload_page;
34383 - atomic_long_t tlb_dropin_fail_no_asid;
34384 - atomic_long_t tlb_dropin_fail_upm;
34385 - atomic_long_t tlb_dropin_fail_invalid;
34386 - atomic_long_t tlb_dropin_fail_range_active;
34387 - atomic_long_t tlb_dropin_fail_idle;
34388 - atomic_long_t tlb_dropin_fail_fmm;
34389 - atomic_long_t tlb_dropin_fail_no_exception;
34390 - atomic_long_t tfh_stale_on_fault;
34391 - atomic_long_t mmu_invalidate_range;
34392 - atomic_long_t mmu_invalidate_page;
34393 - atomic_long_t flush_tlb;
34394 - atomic_long_t flush_tlb_gru;
34395 - atomic_long_t flush_tlb_gru_tgh;
34396 - atomic_long_t flush_tlb_gru_zero_asid;
34397 + atomic_long_unchecked_t vdata_alloc;
34398 + atomic_long_unchecked_t vdata_free;
34399 + atomic_long_unchecked_t gts_alloc;
34400 + atomic_long_unchecked_t gts_free;
34401 + atomic_long_unchecked_t gms_alloc;
34402 + atomic_long_unchecked_t gms_free;
34403 + atomic_long_unchecked_t gts_double_allocate;
34404 + atomic_long_unchecked_t assign_context;
34405 + atomic_long_unchecked_t assign_context_failed;
34406 + atomic_long_unchecked_t free_context;
34407 + atomic_long_unchecked_t load_user_context;
34408 + atomic_long_unchecked_t load_kernel_context;
34409 + atomic_long_unchecked_t lock_kernel_context;
34410 + atomic_long_unchecked_t unlock_kernel_context;
34411 + atomic_long_unchecked_t steal_user_context;
34412 + atomic_long_unchecked_t steal_kernel_context;
34413 + atomic_long_unchecked_t steal_context_failed;
34414 + atomic_long_unchecked_t nopfn;
34415 + atomic_long_unchecked_t asid_new;
34416 + atomic_long_unchecked_t asid_next;
34417 + atomic_long_unchecked_t asid_wrap;
34418 + atomic_long_unchecked_t asid_reuse;
34419 + atomic_long_unchecked_t intr;
34420 + atomic_long_unchecked_t intr_cbr;
34421 + atomic_long_unchecked_t intr_tfh;
34422 + atomic_long_unchecked_t intr_spurious;
34423 + atomic_long_unchecked_t intr_mm_lock_failed;
34424 + atomic_long_unchecked_t call_os;
34425 + atomic_long_unchecked_t call_os_wait_queue;
34426 + atomic_long_unchecked_t user_flush_tlb;
34427 + atomic_long_unchecked_t user_unload_context;
34428 + atomic_long_unchecked_t user_exception;
34429 + atomic_long_unchecked_t set_context_option;
34430 + atomic_long_unchecked_t check_context_retarget_intr;
34431 + atomic_long_unchecked_t check_context_unload;
34432 + atomic_long_unchecked_t tlb_dropin;
34433 + atomic_long_unchecked_t tlb_preload_page;
34434 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34435 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34436 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34437 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34438 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34439 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34440 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34441 + atomic_long_unchecked_t tfh_stale_on_fault;
34442 + atomic_long_unchecked_t mmu_invalidate_range;
34443 + atomic_long_unchecked_t mmu_invalidate_page;
34444 + atomic_long_unchecked_t flush_tlb;
34445 + atomic_long_unchecked_t flush_tlb_gru;
34446 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34447 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34448
34449 - atomic_long_t copy_gpa;
34450 - atomic_long_t read_gpa;
34451 + atomic_long_unchecked_t copy_gpa;
34452 + atomic_long_unchecked_t read_gpa;
34453
34454 - atomic_long_t mesq_receive;
34455 - atomic_long_t mesq_receive_none;
34456 - atomic_long_t mesq_send;
34457 - atomic_long_t mesq_send_failed;
34458 - atomic_long_t mesq_noop;
34459 - atomic_long_t mesq_send_unexpected_error;
34460 - atomic_long_t mesq_send_lb_overflow;
34461 - atomic_long_t mesq_send_qlimit_reached;
34462 - atomic_long_t mesq_send_amo_nacked;
34463 - atomic_long_t mesq_send_put_nacked;
34464 - atomic_long_t mesq_page_overflow;
34465 - atomic_long_t mesq_qf_locked;
34466 - atomic_long_t mesq_qf_noop_not_full;
34467 - atomic_long_t mesq_qf_switch_head_failed;
34468 - atomic_long_t mesq_qf_unexpected_error;
34469 - atomic_long_t mesq_noop_unexpected_error;
34470 - atomic_long_t mesq_noop_lb_overflow;
34471 - atomic_long_t mesq_noop_qlimit_reached;
34472 - atomic_long_t mesq_noop_amo_nacked;
34473 - atomic_long_t mesq_noop_put_nacked;
34474 - atomic_long_t mesq_noop_page_overflow;
34475 + atomic_long_unchecked_t mesq_receive;
34476 + atomic_long_unchecked_t mesq_receive_none;
34477 + atomic_long_unchecked_t mesq_send;
34478 + atomic_long_unchecked_t mesq_send_failed;
34479 + atomic_long_unchecked_t mesq_noop;
34480 + atomic_long_unchecked_t mesq_send_unexpected_error;
34481 + atomic_long_unchecked_t mesq_send_lb_overflow;
34482 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34483 + atomic_long_unchecked_t mesq_send_amo_nacked;
34484 + atomic_long_unchecked_t mesq_send_put_nacked;
34485 + atomic_long_unchecked_t mesq_page_overflow;
34486 + atomic_long_unchecked_t mesq_qf_locked;
34487 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34488 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34489 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34490 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34491 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34492 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34493 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34494 + atomic_long_unchecked_t mesq_noop_put_nacked;
34495 + atomic_long_unchecked_t mesq_noop_page_overflow;
34496
34497 };
34498
34499 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34500 tghop_invalidate, mcsop_last};
34501
34502 struct mcs_op_statistic {
34503 - atomic_long_t count;
34504 - atomic_long_t total;
34505 + atomic_long_unchecked_t count;
34506 + atomic_long_unchecked_t total;
34507 unsigned long max;
34508 };
34509
34510 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34511
34512 #define STAT(id) do { \
34513 if (gru_options & OPT_STATS) \
34514 - atomic_long_inc(&gru_stats.id); \
34515 + atomic_long_inc_unchecked(&gru_stats.id); \
34516 } while (0)
34517
34518 #ifdef CONFIG_SGI_GRU_DEBUG
34519 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34520 index c862cd4..0d176fe 100644
34521 --- a/drivers/misc/sgi-xp/xp.h
34522 +++ b/drivers/misc/sgi-xp/xp.h
34523 @@ -288,7 +288,7 @@ struct xpc_interface {
34524 xpc_notify_func, void *);
34525 void (*received) (short, int, void *);
34526 enum xp_retval (*partid_to_nasids) (short, void *);
34527 -};
34528 +} __no_const;
34529
34530 extern struct xpc_interface xpc_interface;
34531
34532 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34533 index b94d5f7..7f494c5 100644
34534 --- a/drivers/misc/sgi-xp/xpc.h
34535 +++ b/drivers/misc/sgi-xp/xpc.h
34536 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34537 void (*received_payload) (struct xpc_channel *, void *);
34538 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34539 };
34540 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34541
34542 /* struct xpc_partition act_state values (for XPC HB) */
34543
34544 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34545 /* found in xpc_main.c */
34546 extern struct device *xpc_part;
34547 extern struct device *xpc_chan;
34548 -extern struct xpc_arch_operations xpc_arch_ops;
34549 +extern xpc_arch_operations_no_const xpc_arch_ops;
34550 extern int xpc_disengage_timelimit;
34551 extern int xpc_disengage_timedout;
34552 extern int xpc_activate_IRQ_rcvd;
34553 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34554 index 8d082b4..aa749ae 100644
34555 --- a/drivers/misc/sgi-xp/xpc_main.c
34556 +++ b/drivers/misc/sgi-xp/xpc_main.c
34557 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34558 .notifier_call = xpc_system_die,
34559 };
34560
34561 -struct xpc_arch_operations xpc_arch_ops;
34562 +xpc_arch_operations_no_const xpc_arch_ops;
34563
34564 /*
34565 * Timer function to enforce the timelimit on the partition disengage.
34566 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34567 index 69ef0be..f3ef91e 100644
34568 --- a/drivers/mmc/host/sdhci-pci.c
34569 +++ b/drivers/mmc/host/sdhci-pci.c
34570 @@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34571 .probe = via_probe,
34572 };
34573
34574 -static const struct pci_device_id pci_ids[] __devinitdata = {
34575 +static const struct pci_device_id pci_ids[] __devinitconst = {
34576 {
34577 .vendor = PCI_VENDOR_ID_RICOH,
34578 .device = PCI_DEVICE_ID_RICOH_R5C822,
34579 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34580 index a4eb8b5..8c0628f 100644
34581 --- a/drivers/mtd/devices/doc2000.c
34582 +++ b/drivers/mtd/devices/doc2000.c
34583 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34584
34585 /* The ECC will not be calculated correctly if less than 512 is written */
34586 /* DBB-
34587 - if (len != 0x200 && eccbuf)
34588 + if (len != 0x200)
34589 printk(KERN_WARNING
34590 "ECC needs a full sector write (adr: %lx size %lx)\n",
34591 (long) to, (long) len);
34592 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34593 index a9e57d6..c6d8731 100644
34594 --- a/drivers/mtd/nand/denali.c
34595 +++ b/drivers/mtd/nand/denali.c
34596 @@ -26,6 +26,7 @@
34597 #include <linux/pci.h>
34598 #include <linux/mtd/mtd.h>
34599 #include <linux/module.h>
34600 +#include <linux/slab.h>
34601
34602 #include "denali.h"
34603
34604 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34605 index 51b9d6a..52af9a7 100644
34606 --- a/drivers/mtd/nftlmount.c
34607 +++ b/drivers/mtd/nftlmount.c
34608 @@ -24,6 +24,7 @@
34609 #include <asm/errno.h>
34610 #include <linux/delay.h>
34611 #include <linux/slab.h>
34612 +#include <linux/sched.h>
34613 #include <linux/mtd/mtd.h>
34614 #include <linux/mtd/nand.h>
34615 #include <linux/mtd/nftl.h>
34616 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34617 index 6762dc4..9956862 100644
34618 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34619 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34620 @@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34621 */
34622
34623 #define ATL2_PARAM(X, desc) \
34624 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34625 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34626 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34627 MODULE_PARM_DESC(X, desc);
34628 #else
34629 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34630 index 61a7670..7da6e34 100644
34631 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34632 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34633 @@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
34634
34635 int (*wait_comp)(struct bnx2x *bp,
34636 struct bnx2x_rx_mode_ramrod_params *p);
34637 -};
34638 +} __no_const;
34639
34640 /********************** Set multicast group ***********************************/
34641
34642 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34643 index 93865f8..5448741 100644
34644 --- a/drivers/net/ethernet/broadcom/tg3.h
34645 +++ b/drivers/net/ethernet/broadcom/tg3.h
34646 @@ -140,6 +140,7 @@
34647 #define CHIPREV_ID_5750_A0 0x4000
34648 #define CHIPREV_ID_5750_A1 0x4001
34649 #define CHIPREV_ID_5750_A3 0x4003
34650 +#define CHIPREV_ID_5750_C1 0x4201
34651 #define CHIPREV_ID_5750_C2 0x4202
34652 #define CHIPREV_ID_5752_A0_HW 0x5000
34653 #define CHIPREV_ID_5752_A0 0x6000
34654 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34655 index c4e8643..0979484 100644
34656 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34657 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34658 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34659 */
34660 struct l2t_skb_cb {
34661 arp_failure_handler_func arp_failure_handler;
34662 -};
34663 +} __no_const;
34664
34665 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34666
34667 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34668 index 18b106c..2b38d36 100644
34669 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34670 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34671 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34672 for (i=0; i<ETH_ALEN; i++) {
34673 tmp.addr[i] = dev->dev_addr[i];
34674 }
34675 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34676 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34677 break;
34678
34679 case DE4X5_SET_HWADDR: /* Set the hardware address */
34680 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34681 spin_lock_irqsave(&lp->lock, flags);
34682 memcpy(&statbuf, &lp->pktStats, ioc->len);
34683 spin_unlock_irqrestore(&lp->lock, flags);
34684 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34685 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34686 return -EFAULT;
34687 break;
34688 }
34689 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34690 index ed7d1dc..d426748 100644
34691 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34692 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34693 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34694 {NULL}};
34695
34696
34697 -static const char *block_name[] __devinitdata = {
34698 +static const char *block_name[] __devinitconst = {
34699 "21140 non-MII",
34700 "21140 MII PHY",
34701 "21142 Serial PHY",
34702 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34703 index 2ac6fff..2d127d0 100644
34704 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34705 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34706 @@ -236,7 +236,7 @@ struct pci_id_info {
34707 int drv_flags; /* Driver use, intended as capability flags. */
34708 };
34709
34710 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34711 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34712 { /* Sometime a Level-One switch card. */
34713 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34714 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34715 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34716 index d783f4f..97fa1b0 100644
34717 --- a/drivers/net/ethernet/dlink/sundance.c
34718 +++ b/drivers/net/ethernet/dlink/sundance.c
34719 @@ -218,7 +218,7 @@ enum {
34720 struct pci_id_info {
34721 const char *name;
34722 };
34723 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34724 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34725 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34726 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34727 {"D-Link DFE-580TX 4 port Server Adapter"},
34728 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34729 index 1bbf6b3..430dcd0 100644
34730 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34731 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34732 @@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34733
34734 if (wrapped)
34735 newacc += 65536;
34736 - ACCESS_ONCE(*acc) = newacc;
34737 + ACCESS_ONCE_RW(*acc) = newacc;
34738 }
34739
34740 void be_parse_stats(struct be_adapter *adapter)
34741 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34742 index 16b0704..d2c07d7 100644
34743 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34744 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34745 @@ -31,6 +31,8 @@
34746 #include <linux/netdevice.h>
34747 #include <linux/phy.h>
34748 #include <linux/platform_device.h>
34749 +#include <linux/interrupt.h>
34750 +#include <linux/irqreturn.h>
34751 #include <net/ip.h>
34752
34753 #include "ftgmac100.h"
34754 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34755 index 829b109..4ae5f6a 100644
34756 --- a/drivers/net/ethernet/faraday/ftmac100.c
34757 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34758 @@ -31,6 +31,8 @@
34759 #include <linux/module.h>
34760 #include <linux/netdevice.h>
34761 #include <linux/platform_device.h>
34762 +#include <linux/interrupt.h>
34763 +#include <linux/irqreturn.h>
34764
34765 #include "ftmac100.h"
34766
34767 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34768 index 1637b98..c42f87b 100644
34769 --- a/drivers/net/ethernet/fealnx.c
34770 +++ b/drivers/net/ethernet/fealnx.c
34771 @@ -150,7 +150,7 @@ struct chip_info {
34772 int flags;
34773 };
34774
34775 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34776 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34777 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34778 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34779 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34780 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34781 index b83897f..b2d970f 100644
34782 --- a/drivers/net/ethernet/intel/e1000e/e1000.h
34783 +++ b/drivers/net/ethernet/intel/e1000e/e1000.h
34784 @@ -181,7 +181,7 @@ struct e1000_info;
34785 #define E1000_TXDCTL_DMA_BURST_ENABLE \
34786 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
34787 E1000_TXDCTL_COUNT_DESC | \
34788 - (5 << 16) | /* wthresh must be +1 more than desired */\
34789 + (1 << 16) | /* wthresh must be +1 more than desired */\
34790 (1 << 8) | /* hthresh */ \
34791 0x1f) /* pthresh */
34792
34793 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34794 index f82ecf5..7d59ecb 100644
34795 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34796 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34797 @@ -784,6 +784,7 @@ struct e1000_mac_operations {
34798 void (*config_collision_dist)(struct e1000_hw *);
34799 s32 (*read_mac_addr)(struct e1000_hw *);
34800 };
34801 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34802
34803 /*
34804 * When to use various PHY register access functions:
34805 @@ -824,6 +825,7 @@ struct e1000_phy_operations {
34806 void (*power_up)(struct e1000_hw *);
34807 void (*power_down)(struct e1000_hw *);
34808 };
34809 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34810
34811 /* Function pointers for the NVM. */
34812 struct e1000_nvm_operations {
34813 @@ -836,9 +838,10 @@ struct e1000_nvm_operations {
34814 s32 (*validate)(struct e1000_hw *);
34815 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34816 };
34817 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34818
34819 struct e1000_mac_info {
34820 - struct e1000_mac_operations ops;
34821 + e1000_mac_operations_no_const ops;
34822 u8 addr[ETH_ALEN];
34823 u8 perm_addr[ETH_ALEN];
34824
34825 @@ -879,7 +882,7 @@ struct e1000_mac_info {
34826 };
34827
34828 struct e1000_phy_info {
34829 - struct e1000_phy_operations ops;
34830 + e1000_phy_operations_no_const ops;
34831
34832 enum e1000_phy_type type;
34833
34834 @@ -913,7 +916,7 @@ struct e1000_phy_info {
34835 };
34836
34837 struct e1000_nvm_info {
34838 - struct e1000_nvm_operations ops;
34839 + e1000_nvm_operations_no_const ops;
34840
34841 enum e1000_nvm_type type;
34842 enum e1000_nvm_override override;
34843 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34844 index f67cbd3..cef9e3d 100644
34845 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34846 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34847 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34848 s32 (*read_mac_addr)(struct e1000_hw *);
34849 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34850 };
34851 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34852
34853 struct e1000_phy_operations {
34854 s32 (*acquire)(struct e1000_hw *);
34855 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34856 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34857 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34858 };
34859 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34860
34861 struct e1000_nvm_operations {
34862 s32 (*acquire)(struct e1000_hw *);
34863 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34864 s32 (*update)(struct e1000_hw *);
34865 s32 (*validate)(struct e1000_hw *);
34866 };
34867 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34868
34869 struct e1000_info {
34870 s32 (*get_invariants)(struct e1000_hw *);
34871 @@ -350,7 +353,7 @@ struct e1000_info {
34872 extern const struct e1000_info e1000_82575_info;
34873
34874 struct e1000_mac_info {
34875 - struct e1000_mac_operations ops;
34876 + e1000_mac_operations_no_const ops;
34877
34878 u8 addr[6];
34879 u8 perm_addr[6];
34880 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34881 };
34882
34883 struct e1000_phy_info {
34884 - struct e1000_phy_operations ops;
34885 + e1000_phy_operations_no_const ops;
34886
34887 enum e1000_phy_type type;
34888
34889 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34890 };
34891
34892 struct e1000_nvm_info {
34893 - struct e1000_nvm_operations ops;
34894 + e1000_nvm_operations_no_const ops;
34895 enum e1000_nvm_type type;
34896 enum e1000_nvm_override override;
34897
34898 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34899 s32 (*check_for_ack)(struct e1000_hw *, u16);
34900 s32 (*check_for_rst)(struct e1000_hw *, u16);
34901 };
34902 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34903
34904 struct e1000_mbx_stats {
34905 u32 msgs_tx;
34906 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34907 };
34908
34909 struct e1000_mbx_info {
34910 - struct e1000_mbx_operations ops;
34911 + e1000_mbx_operations_no_const ops;
34912 struct e1000_mbx_stats stats;
34913 u32 timeout;
34914 u32 usec_delay;
34915 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34916 index 57db3c6..aa825fc 100644
34917 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34918 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34919 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34920 s32 (*read_mac_addr)(struct e1000_hw *);
34921 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34922 };
34923 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34924
34925 struct e1000_mac_info {
34926 - struct e1000_mac_operations ops;
34927 + e1000_mac_operations_no_const ops;
34928 u8 addr[6];
34929 u8 perm_addr[6];
34930
34931 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34932 s32 (*check_for_ack)(struct e1000_hw *);
34933 s32 (*check_for_rst)(struct e1000_hw *);
34934 };
34935 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34936
34937 struct e1000_mbx_stats {
34938 u32 msgs_tx;
34939 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34940 };
34941
34942 struct e1000_mbx_info {
34943 - struct e1000_mbx_operations ops;
34944 + e1000_mbx_operations_no_const ops;
34945 struct e1000_mbx_stats stats;
34946 u32 timeout;
34947 u32 usec_delay;
34948 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34949 index 8636e83..ab9bbc3 100644
34950 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34951 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34952 @@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
34953 s32 (*update_checksum)(struct ixgbe_hw *);
34954 u16 (*calc_checksum)(struct ixgbe_hw *);
34955 };
34956 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34957
34958 struct ixgbe_mac_operations {
34959 s32 (*init_hw)(struct ixgbe_hw *);
34960 @@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
34961 /* Manageability interface */
34962 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34963 };
34964 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34965
34966 struct ixgbe_phy_operations {
34967 s32 (*identify)(struct ixgbe_hw *);
34968 @@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
34969 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34970 s32 (*check_overtemp)(struct ixgbe_hw *);
34971 };
34972 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34973
34974 struct ixgbe_eeprom_info {
34975 - struct ixgbe_eeprom_operations ops;
34976 + ixgbe_eeprom_operations_no_const ops;
34977 enum ixgbe_eeprom_type type;
34978 u32 semaphore_delay;
34979 u16 word_size;
34980 @@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
34981
34982 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34983 struct ixgbe_mac_info {
34984 - struct ixgbe_mac_operations ops;
34985 + ixgbe_mac_operations_no_const ops;
34986 enum ixgbe_mac_type type;
34987 u8 addr[ETH_ALEN];
34988 u8 perm_addr[ETH_ALEN];
34989 @@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
34990 };
34991
34992 struct ixgbe_phy_info {
34993 - struct ixgbe_phy_operations ops;
34994 + ixgbe_phy_operations_no_const ops;
34995 struct mdio_if_info mdio;
34996 enum ixgbe_phy_type type;
34997 u32 id;
34998 @@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
34999 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35000 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35001 };
35002 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35003
35004 struct ixgbe_mbx_stats {
35005 u32 msgs_tx;
35006 @@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
35007 };
35008
35009 struct ixgbe_mbx_info {
35010 - struct ixgbe_mbx_operations ops;
35011 + ixgbe_mbx_operations_no_const ops;
35012 struct ixgbe_mbx_stats stats;
35013 u32 timeout;
35014 u32 usec_delay;
35015 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35016 index 307611a..d8e4562 100644
35017 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35018 +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35019 @@ -969,8 +969,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
35020 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
35021 for (i = 0; i < q_vector->txr_count; i++) {
35022 tx_ring = &(adapter->tx_ring[r_idx]);
35023 - tx_ring->total_bytes = 0;
35024 - tx_ring->total_packets = 0;
35025 ixgbevf_clean_tx_irq(adapter, tx_ring);
35026 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
35027 r_idx + 1);
35028 @@ -994,16 +992,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
35029 struct ixgbe_hw *hw = &adapter->hw;
35030 struct ixgbevf_ring *rx_ring;
35031 int r_idx;
35032 - int i;
35033 -
35034 - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
35035 - for (i = 0; i < q_vector->rxr_count; i++) {
35036 - rx_ring = &(adapter->rx_ring[r_idx]);
35037 - rx_ring->total_bytes = 0;
35038 - rx_ring->total_packets = 0;
35039 - r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
35040 - r_idx + 1);
35041 - }
35042
35043 if (!q_vector->rxr_count)
35044 return IRQ_HANDLED;
35045 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35046 index 25c951d..cc7cf33 100644
35047 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35048 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35049 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35050 s32 (*clear_vfta)(struct ixgbe_hw *);
35051 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35052 };
35053 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35054
35055 enum ixgbe_mac_type {
35056 ixgbe_mac_unknown = 0,
35057 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35058 };
35059
35060 struct ixgbe_mac_info {
35061 - struct ixgbe_mac_operations ops;
35062 + ixgbe_mac_operations_no_const ops;
35063 u8 addr[6];
35064 u8 perm_addr[6];
35065
35066 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35067 s32 (*check_for_ack)(struct ixgbe_hw *);
35068 s32 (*check_for_rst)(struct ixgbe_hw *);
35069 };
35070 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35071
35072 struct ixgbe_mbx_stats {
35073 u32 msgs_tx;
35074 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35075 };
35076
35077 struct ixgbe_mbx_info {
35078 - struct ixgbe_mbx_operations ops;
35079 + ixgbe_mbx_operations_no_const ops;
35080 struct ixgbe_mbx_stats stats;
35081 u32 timeout;
35082 u32 udelay;
35083 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35084 index 8bb05b4..074796f 100644
35085 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
35086 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35087 @@ -41,6 +41,7 @@
35088 #include <linux/slab.h>
35089 #include <linux/io-mapping.h>
35090 #include <linux/delay.h>
35091 +#include <linux/sched.h>
35092
35093 #include <linux/mlx4/device.h>
35094 #include <linux/mlx4/doorbell.h>
35095 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35096 index 5046a64..71ca936 100644
35097 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35098 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35099 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35100 void (*link_down)(struct __vxge_hw_device *devh);
35101 void (*crit_err)(struct __vxge_hw_device *devh,
35102 enum vxge_hw_event type, u64 ext_data);
35103 -};
35104 +} __no_const;
35105
35106 /*
35107 * struct __vxge_hw_blockpool_entry - Block private data structure
35108 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35109 index 4a518a3..936b334 100644
35110 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35111 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35112 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35113 struct vxge_hw_mempool_dma *dma_object,
35114 u32 index,
35115 u32 is_last);
35116 -};
35117 +} __no_const;
35118
35119 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35120 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35121 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35122 index 161e045..0bb5b86 100644
35123 --- a/drivers/net/ethernet/realtek/r8169.c
35124 +++ b/drivers/net/ethernet/realtek/r8169.c
35125 @@ -708,17 +708,17 @@ struct rtl8169_private {
35126 struct mdio_ops {
35127 void (*write)(void __iomem *, int, int);
35128 int (*read)(void __iomem *, int);
35129 - } mdio_ops;
35130 + } __no_const mdio_ops;
35131
35132 struct pll_power_ops {
35133 void (*down)(struct rtl8169_private *);
35134 void (*up)(struct rtl8169_private *);
35135 - } pll_power_ops;
35136 + } __no_const pll_power_ops;
35137
35138 struct jumbo_ops {
35139 void (*enable)(struct rtl8169_private *);
35140 void (*disable)(struct rtl8169_private *);
35141 - } jumbo_ops;
35142 + } __no_const jumbo_ops;
35143
35144 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35145 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35146 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35147 index a9deda8..5507c31 100644
35148 --- a/drivers/net/ethernet/sis/sis190.c
35149 +++ b/drivers/net/ethernet/sis/sis190.c
35150 @@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35151 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35152 struct net_device *dev)
35153 {
35154 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35155 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35156 struct sis190_private *tp = netdev_priv(dev);
35157 struct pci_dev *isa_bridge;
35158 u8 reg, tmp8;
35159 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35160 index c07cfe9..81cbf7e 100644
35161 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35162 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35163 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35164
35165 writel(value, ioaddr + MMC_CNTRL);
35166
35167 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35168 - MMC_CNTRL, value);
35169 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35170 +// MMC_CNTRL, value);
35171 }
35172
35173 /* To mask all all interrupts.*/
35174 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35175 index 9bdfaba..3d8f8d4 100644
35176 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35177 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35178 @@ -1587,7 +1587,7 @@ static const struct file_operations stmmac_rings_status_fops = {
35179 .open = stmmac_sysfs_ring_open,
35180 .read = seq_read,
35181 .llseek = seq_lseek,
35182 - .release = seq_release,
35183 + .release = single_release,
35184 };
35185
35186 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
35187 @@ -1659,7 +1659,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
35188 .open = stmmac_sysfs_dma_cap_open,
35189 .read = seq_read,
35190 .llseek = seq_lseek,
35191 - .release = seq_release,
35192 + .release = single_release,
35193 };
35194
35195 static int stmmac_init_fs(struct net_device *dev)
35196 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35197 index c358245..8c1de63 100644
35198 --- a/drivers/net/hyperv/hyperv_net.h
35199 +++ b/drivers/net/hyperv/hyperv_net.h
35200 @@ -98,7 +98,7 @@ struct rndis_device {
35201
35202 enum rndis_device_state state;
35203 bool link_state;
35204 - atomic_t new_req_id;
35205 + atomic_unchecked_t new_req_id;
35206
35207 spinlock_t request_lock;
35208 struct list_head req_list;
35209 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35210 index d6be64b..5d97e3b 100644
35211 --- a/drivers/net/hyperv/rndis_filter.c
35212 +++ b/drivers/net/hyperv/rndis_filter.c
35213 @@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35214 * template
35215 */
35216 set = &rndis_msg->msg.set_req;
35217 - set->req_id = atomic_inc_return(&dev->new_req_id);
35218 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35219
35220 /* Add to the request list */
35221 spin_lock_irqsave(&dev->request_lock, flags);
35222 @@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35223
35224 /* Setup the rndis set */
35225 halt = &request->request_msg.msg.halt_req;
35226 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35227 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35228
35229 /* Ignore return since this msg is optional. */
35230 rndis_filter_send_request(dev, request);
35231 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35232 index 21d7151..8034208 100644
35233 --- a/drivers/net/ppp/ppp_generic.c
35234 +++ b/drivers/net/ppp/ppp_generic.c
35235 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35236 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35237 struct ppp_stats stats;
35238 struct ppp_comp_stats cstats;
35239 - char *vers;
35240
35241 switch (cmd) {
35242 case SIOCGPPPSTATS:
35243 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35244 break;
35245
35246 case SIOCGPPPVER:
35247 - vers = PPP_VERSION;
35248 - if (copy_to_user(addr, vers, strlen(vers) + 1))
35249 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35250 break;
35251 err = 0;
35252 break;
35253 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
35254 index b715e6b..6d2490f 100644
35255 --- a/drivers/net/tokenring/abyss.c
35256 +++ b/drivers/net/tokenring/abyss.c
35257 @@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
35258
35259 static int __init abyss_init (void)
35260 {
35261 - abyss_netdev_ops = tms380tr_netdev_ops;
35262 + pax_open_kernel();
35263 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35264
35265 - abyss_netdev_ops.ndo_open = abyss_open;
35266 - abyss_netdev_ops.ndo_stop = abyss_close;
35267 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35268 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35269 + pax_close_kernel();
35270
35271 return pci_register_driver(&abyss_driver);
35272 }
35273 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35274 index 28adcdf..ae82f35 100644
35275 --- a/drivers/net/tokenring/madgemc.c
35276 +++ b/drivers/net/tokenring/madgemc.c
35277 @@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
35278
35279 static int __init madgemc_init (void)
35280 {
35281 - madgemc_netdev_ops = tms380tr_netdev_ops;
35282 - madgemc_netdev_ops.ndo_open = madgemc_open;
35283 - madgemc_netdev_ops.ndo_stop = madgemc_close;
35284 + pax_open_kernel();
35285 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35286 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35287 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35288 + pax_close_kernel();
35289
35290 return mca_register_driver (&madgemc_driver);
35291 }
35292 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35293 index 62d90e4..9d84237 100644
35294 --- a/drivers/net/tokenring/proteon.c
35295 +++ b/drivers/net/tokenring/proteon.c
35296 @@ -352,9 +352,11 @@ static int __init proteon_init(void)
35297 struct platform_device *pdev;
35298 int i, num = 0, err = 0;
35299
35300 - proteon_netdev_ops = tms380tr_netdev_ops;
35301 - proteon_netdev_ops.ndo_open = proteon_open;
35302 - proteon_netdev_ops.ndo_stop = tms380tr_close;
35303 + pax_open_kernel();
35304 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35305 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35306 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35307 + pax_close_kernel();
35308
35309 err = platform_driver_register(&proteon_driver);
35310 if (err)
35311 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35312 index ee11e93..c8f19c7 100644
35313 --- a/drivers/net/tokenring/skisa.c
35314 +++ b/drivers/net/tokenring/skisa.c
35315 @@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
35316 struct platform_device *pdev;
35317 int i, num = 0, err = 0;
35318
35319 - sk_isa_netdev_ops = tms380tr_netdev_ops;
35320 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
35321 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35322 + pax_open_kernel();
35323 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35324 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35325 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35326 + pax_close_kernel();
35327
35328 err = platform_driver_register(&sk_isa_driver);
35329 if (err)
35330 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35331 index 2d2a688..35f2372 100644
35332 --- a/drivers/net/usb/hso.c
35333 +++ b/drivers/net/usb/hso.c
35334 @@ -71,7 +71,7 @@
35335 #include <asm/byteorder.h>
35336 #include <linux/serial_core.h>
35337 #include <linux/serial.h>
35338 -
35339 +#include <asm/local.h>
35340
35341 #define MOD_AUTHOR "Option Wireless"
35342 #define MOD_DESCRIPTION "USB High Speed Option driver"
35343 @@ -257,7 +257,7 @@ struct hso_serial {
35344
35345 /* from usb_serial_port */
35346 struct tty_struct *tty;
35347 - int open_count;
35348 + local_t open_count;
35349 spinlock_t serial_lock;
35350
35351 int (*write_data) (struct hso_serial *serial);
35352 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35353 struct urb *urb;
35354
35355 urb = serial->rx_urb[0];
35356 - if (serial->open_count > 0) {
35357 + if (local_read(&serial->open_count) > 0) {
35358 count = put_rxbuf_data(urb, serial);
35359 if (count == -1)
35360 return;
35361 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35362 DUMP1(urb->transfer_buffer, urb->actual_length);
35363
35364 /* Anyone listening? */
35365 - if (serial->open_count == 0)
35366 + if (local_read(&serial->open_count) == 0)
35367 return;
35368
35369 if (status == 0) {
35370 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35371 spin_unlock_irq(&serial->serial_lock);
35372
35373 /* check for port already opened, if not set the termios */
35374 - serial->open_count++;
35375 - if (serial->open_count == 1) {
35376 + if (local_inc_return(&serial->open_count) == 1) {
35377 serial->rx_state = RX_IDLE;
35378 /* Force default termio settings */
35379 _hso_serial_set_termios(tty, NULL);
35380 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35381 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35382 if (result) {
35383 hso_stop_serial_device(serial->parent);
35384 - serial->open_count--;
35385 + local_dec(&serial->open_count);
35386 kref_put(&serial->parent->ref, hso_serial_ref_free);
35387 }
35388 } else {
35389 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35390
35391 /* reset the rts and dtr */
35392 /* do the actual close */
35393 - serial->open_count--;
35394 + local_dec(&serial->open_count);
35395
35396 - if (serial->open_count <= 0) {
35397 - serial->open_count = 0;
35398 + if (local_read(&serial->open_count) <= 0) {
35399 + local_set(&serial->open_count, 0);
35400 spin_lock_irq(&serial->serial_lock);
35401 if (serial->tty == tty) {
35402 serial->tty->driver_data = NULL;
35403 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35404
35405 /* the actual setup */
35406 spin_lock_irqsave(&serial->serial_lock, flags);
35407 - if (serial->open_count)
35408 + if (local_read(&serial->open_count))
35409 _hso_serial_set_termios(tty, old);
35410 else
35411 tty->termios = old;
35412 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35413 D1("Pending read interrupt on port %d\n", i);
35414 spin_lock(&serial->serial_lock);
35415 if (serial->rx_state == RX_IDLE &&
35416 - serial->open_count > 0) {
35417 + local_read(&serial->open_count) > 0) {
35418 /* Setup and send a ctrl req read on
35419 * port i */
35420 if (!serial->rx_urb_filled[0]) {
35421 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35422 /* Start all serial ports */
35423 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35424 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35425 - if (dev2ser(serial_table[i])->open_count) {
35426 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35427 result =
35428 hso_start_serial_device(serial_table[i], GFP_NOIO);
35429 hso_kick_transmit(dev2ser(serial_table[i]));
35430 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35431 index 420d69b..74f90a2 100644
35432 --- a/drivers/net/wireless/ath/ath.h
35433 +++ b/drivers/net/wireless/ath/ath.h
35434 @@ -119,6 +119,7 @@ struct ath_ops {
35435 void (*write_flush) (void *);
35436 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35437 };
35438 +typedef struct ath_ops __no_const ath_ops_no_const;
35439
35440 struct ath_common;
35441 struct ath_bus_ops;
35442 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35443 index aa2abaf..5f5152d 100644
35444 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35445 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35446 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35447 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35448 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35449
35450 - ACCESS_ONCE(ads->ds_link) = i->link;
35451 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35452 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35453 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35454
35455 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35456 ctl6 = SM(i->keytype, AR_EncrType);
35457 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35458
35459 if ((i->is_first || i->is_last) &&
35460 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35461 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35462 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35463 | set11nTries(i->rates, 1)
35464 | set11nTries(i->rates, 2)
35465 | set11nTries(i->rates, 3)
35466 | (i->dur_update ? AR_DurUpdateEna : 0)
35467 | SM(0, AR_BurstDur);
35468
35469 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35470 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35471 | set11nRate(i->rates, 1)
35472 | set11nRate(i->rates, 2)
35473 | set11nRate(i->rates, 3);
35474 } else {
35475 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35476 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35477 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35478 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35479 }
35480
35481 if (!i->is_first) {
35482 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35483 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35484 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35485 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35486 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35487 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35488 return;
35489 }
35490
35491 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35492 break;
35493 }
35494
35495 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35496 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35497 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35498 | SM(i->txpower, AR_XmitPower)
35499 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35500 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35501 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35502 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35503
35504 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35505 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35506 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35507 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35508
35509 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35510 return;
35511
35512 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35513 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35514 | set11nPktDurRTSCTS(i->rates, 1);
35515
35516 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35517 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35518 | set11nPktDurRTSCTS(i->rates, 3);
35519
35520 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35521 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35522 | set11nRateFlags(i->rates, 1)
35523 | set11nRateFlags(i->rates, 2)
35524 | set11nRateFlags(i->rates, 3)
35525 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35526 index a66a13b..0ef399e 100644
35527 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35528 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35529 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35530 (i->qcu << AR_TxQcuNum_S) | desc_len;
35531
35532 checksum += val;
35533 - ACCESS_ONCE(ads->info) = val;
35534 + ACCESS_ONCE_RW(ads->info) = val;
35535
35536 checksum += i->link;
35537 - ACCESS_ONCE(ads->link) = i->link;
35538 + ACCESS_ONCE_RW(ads->link) = i->link;
35539
35540 checksum += i->buf_addr[0];
35541 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35542 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35543 checksum += i->buf_addr[1];
35544 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35545 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35546 checksum += i->buf_addr[2];
35547 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35548 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35549 checksum += i->buf_addr[3];
35550 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35551 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35552
35553 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35554 - ACCESS_ONCE(ads->ctl3) = val;
35555 + ACCESS_ONCE_RW(ads->ctl3) = val;
35556 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35557 - ACCESS_ONCE(ads->ctl5) = val;
35558 + ACCESS_ONCE_RW(ads->ctl5) = val;
35559 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35560 - ACCESS_ONCE(ads->ctl7) = val;
35561 + ACCESS_ONCE_RW(ads->ctl7) = val;
35562 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35563 - ACCESS_ONCE(ads->ctl9) = val;
35564 + ACCESS_ONCE_RW(ads->ctl9) = val;
35565
35566 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35567 - ACCESS_ONCE(ads->ctl10) = checksum;
35568 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35569
35570 if (i->is_first || i->is_last) {
35571 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35572 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35573 | set11nTries(i->rates, 1)
35574 | set11nTries(i->rates, 2)
35575 | set11nTries(i->rates, 3)
35576 | (i->dur_update ? AR_DurUpdateEna : 0)
35577 | SM(0, AR_BurstDur);
35578
35579 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35580 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35581 | set11nRate(i->rates, 1)
35582 | set11nRate(i->rates, 2)
35583 | set11nRate(i->rates, 3);
35584 } else {
35585 - ACCESS_ONCE(ads->ctl13) = 0;
35586 - ACCESS_ONCE(ads->ctl14) = 0;
35587 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35588 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35589 }
35590
35591 ads->ctl20 = 0;
35592 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35593
35594 ctl17 = SM(i->keytype, AR_EncrType);
35595 if (!i->is_first) {
35596 - ACCESS_ONCE(ads->ctl11) = 0;
35597 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35598 - ACCESS_ONCE(ads->ctl15) = 0;
35599 - ACCESS_ONCE(ads->ctl16) = 0;
35600 - ACCESS_ONCE(ads->ctl17) = ctl17;
35601 - ACCESS_ONCE(ads->ctl18) = 0;
35602 - ACCESS_ONCE(ads->ctl19) = 0;
35603 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35604 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35605 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35606 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35607 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35608 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35609 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35610 return;
35611 }
35612
35613 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35614 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35615 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35616 | SM(i->txpower, AR_XmitPower)
35617 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35618 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35619 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35620 ctl12 |= SM(val, AR_PAPRDChainMask);
35621
35622 - ACCESS_ONCE(ads->ctl12) = ctl12;
35623 - ACCESS_ONCE(ads->ctl17) = ctl17;
35624 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35625 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35626
35627 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35628 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35629 | set11nPktDurRTSCTS(i->rates, 1);
35630
35631 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35632 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35633 | set11nPktDurRTSCTS(i->rates, 3);
35634
35635 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35636 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35637 | set11nRateFlags(i->rates, 1)
35638 | set11nRateFlags(i->rates, 2)
35639 | set11nRateFlags(i->rates, 3)
35640 | SM(i->rtscts_rate, AR_RTSCTSRate);
35641
35642 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35643 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35644 }
35645
35646 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35647 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35648 index e88f182..4e57f5d 100644
35649 --- a/drivers/net/wireless/ath/ath9k/hw.h
35650 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35651 @@ -614,7 +614,7 @@ struct ath_hw_private_ops {
35652
35653 /* ANI */
35654 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35655 -};
35656 +} __no_const;
35657
35658 /**
35659 * struct ath_hw_ops - callbacks used by hardware code and driver code
35660 @@ -644,7 +644,7 @@ struct ath_hw_ops {
35661 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35662 struct ath_hw_antcomb_conf *antconf);
35663
35664 -};
35665 +} __no_const;
35666
35667 struct ath_nf_limits {
35668 s16 max;
35669 @@ -664,7 +664,7 @@ enum ath_cal_list {
35670 #define AH_FASTCC 0x4
35671
35672 struct ath_hw {
35673 - struct ath_ops reg_ops;
35674 + ath_ops_no_const reg_ops;
35675
35676 struct ieee80211_hw *hw;
35677 struct ath_common common;
35678 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35679 index af00e2c..ab04d34 100644
35680 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35681 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35682 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35683 void (*carrsuppr)(struct brcms_phy *);
35684 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35685 void (*detach)(struct brcms_phy *);
35686 -};
35687 +} __no_const;
35688
35689 struct brcms_phy {
35690 struct brcms_phy_pub pubpi_ro;
35691 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35692 index faec404..a5277f1 100644
35693 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35694 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35695 @@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35696 */
35697 if (il3945_mod_params.disable_hw_scan) {
35698 D_INFO("Disabling hw_scan\n");
35699 - il3945_mac_ops.hw_scan = NULL;
35700 + pax_open_kernel();
35701 + *(void **)&il3945_mac_ops.hw_scan = NULL;
35702 + pax_close_kernel();
35703 }
35704
35705 D_INFO("*** LOAD DRIVER ***\n");
35706 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35707 index b7ce6a6..5649756 100644
35708 --- a/drivers/net/wireless/mac80211_hwsim.c
35709 +++ b/drivers/net/wireless/mac80211_hwsim.c
35710 @@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
35711 return -EINVAL;
35712
35713 if (fake_hw_scan) {
35714 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35715 - mac80211_hwsim_ops.sw_scan_start = NULL;
35716 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35717 + pax_open_kernel();
35718 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35719 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35720 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35721 + pax_close_kernel();
35722 }
35723
35724 spin_lock_init(&hwsim_radio_lock);
35725 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35726 index 35225e9..95e6bf9 100644
35727 --- a/drivers/net/wireless/mwifiex/main.h
35728 +++ b/drivers/net/wireless/mwifiex/main.h
35729 @@ -537,7 +537,7 @@ struct mwifiex_if_ops {
35730 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35731 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35732 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35733 -};
35734 +} __no_const;
35735
35736 struct mwifiex_adapter {
35737 u8 iface_type;
35738 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35739 index d66e298..55b0a89 100644
35740 --- a/drivers/net/wireless/rndis_wlan.c
35741 +++ b/drivers/net/wireless/rndis_wlan.c
35742 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35743
35744 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35745
35746 - if (rts_threshold < 0 || rts_threshold > 2347)
35747 + if (rts_threshold > 2347)
35748 rts_threshold = 2347;
35749
35750 tmp = cpu_to_le32(rts_threshold);
35751 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
35752 index c264dfa..08ee30e 100644
35753 --- a/drivers/net/wireless/rt2x00/rt2x00.h
35754 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
35755 @@ -396,7 +396,7 @@ struct rt2x00_intf {
35756 * for hardware which doesn't support hardware
35757 * sequence counting.
35758 */
35759 - atomic_t seqno;
35760 + atomic_unchecked_t seqno;
35761 };
35762
35763 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
35764 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
35765 index 50f92d5..f3afc41 100644
35766 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
35767 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
35768 @@ -229,9 +229,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
35769 * sequence counter given by mac80211.
35770 */
35771 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
35772 - seqno = atomic_add_return(0x10, &intf->seqno);
35773 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
35774 else
35775 - seqno = atomic_read(&intf->seqno);
35776 + seqno = atomic_read_unchecked(&intf->seqno);
35777
35778 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
35779 hdr->seq_ctrl |= cpu_to_le16(seqno);
35780 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35781 index 9d8f581..0f6589e 100644
35782 --- a/drivers/net/wireless/wl1251/wl1251.h
35783 +++ b/drivers/net/wireless/wl1251/wl1251.h
35784 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35785 void (*reset)(struct wl1251 *wl);
35786 void (*enable_irq)(struct wl1251 *wl);
35787 void (*disable_irq)(struct wl1251 *wl);
35788 -};
35789 +} __no_const;
35790
35791 struct wl1251 {
35792 struct ieee80211_hw *hw;
35793 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35794 index f34b5b2..b5abb9f 100644
35795 --- a/drivers/oprofile/buffer_sync.c
35796 +++ b/drivers/oprofile/buffer_sync.c
35797 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35798 if (cookie == NO_COOKIE)
35799 offset = pc;
35800 if (cookie == INVALID_COOKIE) {
35801 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35802 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35803 offset = pc;
35804 }
35805 if (cookie != last_cookie) {
35806 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35807 /* add userspace sample */
35808
35809 if (!mm) {
35810 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35811 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35812 return 0;
35813 }
35814
35815 cookie = lookup_dcookie(mm, s->eip, &offset);
35816
35817 if (cookie == INVALID_COOKIE) {
35818 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35819 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35820 return 0;
35821 }
35822
35823 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35824 /* ignore backtraces if failed to add a sample */
35825 if (state == sb_bt_start) {
35826 state = sb_bt_ignore;
35827 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35828 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35829 }
35830 }
35831 release_mm(mm);
35832 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35833 index c0cc4e7..44d4e54 100644
35834 --- a/drivers/oprofile/event_buffer.c
35835 +++ b/drivers/oprofile/event_buffer.c
35836 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35837 }
35838
35839 if (buffer_pos == buffer_size) {
35840 - atomic_inc(&oprofile_stats.event_lost_overflow);
35841 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35842 return;
35843 }
35844
35845 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35846 index ed2c3ec..deda85a 100644
35847 --- a/drivers/oprofile/oprof.c
35848 +++ b/drivers/oprofile/oprof.c
35849 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35850 if (oprofile_ops.switch_events())
35851 return;
35852
35853 - atomic_inc(&oprofile_stats.multiplex_counter);
35854 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35855 start_switch_worker();
35856 }
35857
35858 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35859 index 917d28e..d62d981 100644
35860 --- a/drivers/oprofile/oprofile_stats.c
35861 +++ b/drivers/oprofile/oprofile_stats.c
35862 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35863 cpu_buf->sample_invalid_eip = 0;
35864 }
35865
35866 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35867 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35868 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35869 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35870 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35871 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35872 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35873 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35874 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35875 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35876 }
35877
35878
35879 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35880 index 38b6fc0..b5cbfce 100644
35881 --- a/drivers/oprofile/oprofile_stats.h
35882 +++ b/drivers/oprofile/oprofile_stats.h
35883 @@ -13,11 +13,11 @@
35884 #include <linux/atomic.h>
35885
35886 struct oprofile_stat_struct {
35887 - atomic_t sample_lost_no_mm;
35888 - atomic_t sample_lost_no_mapping;
35889 - atomic_t bt_lost_no_mapping;
35890 - atomic_t event_lost_overflow;
35891 - atomic_t multiplex_counter;
35892 + atomic_unchecked_t sample_lost_no_mm;
35893 + atomic_unchecked_t sample_lost_no_mapping;
35894 + atomic_unchecked_t bt_lost_no_mapping;
35895 + atomic_unchecked_t event_lost_overflow;
35896 + atomic_unchecked_t multiplex_counter;
35897 };
35898
35899 extern struct oprofile_stat_struct oprofile_stats;
35900 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35901 index 849357c..b83c1e0 100644
35902 --- a/drivers/oprofile/oprofilefs.c
35903 +++ b/drivers/oprofile/oprofilefs.c
35904 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35905
35906
35907 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35908 - char const *name, atomic_t *val)
35909 + char const *name, atomic_unchecked_t *val)
35910 {
35911 return __oprofilefs_create_file(sb, root, name,
35912 &atomic_ro_fops, 0444, val);
35913 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35914 index 3f56bc0..707d642 100644
35915 --- a/drivers/parport/procfs.c
35916 +++ b/drivers/parport/procfs.c
35917 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35918
35919 *ppos += len;
35920
35921 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35922 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35923 }
35924
35925 #ifdef CONFIG_PARPORT_1284
35926 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35927
35928 *ppos += len;
35929
35930 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35931 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35932 }
35933 #endif /* IEEE1284.3 support. */
35934
35935 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35936 index 9fff878..ad0ad53 100644
35937 --- a/drivers/pci/hotplug/cpci_hotplug.h
35938 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35939 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35940 int (*hardware_test) (struct slot* slot, u32 value);
35941 u8 (*get_power) (struct slot* slot);
35942 int (*set_power) (struct slot* slot, int value);
35943 -};
35944 +} __no_const;
35945
35946 struct cpci_hp_controller {
35947 unsigned int irq;
35948 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35949 index 76ba8a1..20ca857 100644
35950 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35951 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35952 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35953
35954 void compaq_nvram_init (void __iomem *rom_start)
35955 {
35956 +
35957 +#ifndef CONFIG_PAX_KERNEXEC
35958 if (rom_start) {
35959 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35960 }
35961 +#endif
35962 +
35963 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35964
35965 /* initialize our int15 lock */
35966 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35967 index b500840..d7159d3 100644
35968 --- a/drivers/pci/pcie/aspm.c
35969 +++ b/drivers/pci/pcie/aspm.c
35970 @@ -27,9 +27,9 @@
35971 #define MODULE_PARAM_PREFIX "pcie_aspm."
35972
35973 /* Note: those are not register definitions */
35974 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35975 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35976 -#define ASPM_STATE_L1 (4) /* L1 state */
35977 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35978 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35979 +#define ASPM_STATE_L1 (4U) /* L1 state */
35980 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35981 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35982
35983 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35984 index 5e1ca3c..08082fe 100644
35985 --- a/drivers/pci/probe.c
35986 +++ b/drivers/pci/probe.c
35987 @@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35988 u16 orig_cmd;
35989 struct pci_bus_region region;
35990
35991 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35992 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35993
35994 if (!dev->mmio_always_on) {
35995 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35996 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35997 index 27911b5..5b6db88 100644
35998 --- a/drivers/pci/proc.c
35999 +++ b/drivers/pci/proc.c
36000 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36001 static int __init pci_proc_init(void)
36002 {
36003 struct pci_dev *dev = NULL;
36004 +
36005 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
36006 +#ifdef CONFIG_GRKERNSEC_PROC_USER
36007 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36008 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36009 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36010 +#endif
36011 +#else
36012 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36013 +#endif
36014 proc_create("devices", 0, proc_bus_pci_dir,
36015 &proc_bus_pci_dev_operations);
36016 proc_initialized = 1;
36017 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36018 index d68c000..f6094ca 100644
36019 --- a/drivers/platform/x86/thinkpad_acpi.c
36020 +++ b/drivers/platform/x86/thinkpad_acpi.c
36021 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36022 return 0;
36023 }
36024
36025 -void static hotkey_mask_warn_incomplete_mask(void)
36026 +static void hotkey_mask_warn_incomplete_mask(void)
36027 {
36028 /* log only what the user can fix... */
36029 const u32 wantedmask = hotkey_driver_mask &
36030 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36031 }
36032 }
36033
36034 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36035 - struct tp_nvram_state *newn,
36036 - const u32 event_mask)
36037 -{
36038 -
36039 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36040 do { \
36041 if ((event_mask & (1 << __scancode)) && \
36042 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36043 tpacpi_hotkey_send_key(__scancode); \
36044 } while (0)
36045
36046 - void issue_volchange(const unsigned int oldvol,
36047 - const unsigned int newvol)
36048 - {
36049 - unsigned int i = oldvol;
36050 +static void issue_volchange(const unsigned int oldvol,
36051 + const unsigned int newvol,
36052 + const u32 event_mask)
36053 +{
36054 + unsigned int i = oldvol;
36055
36056 - while (i > newvol) {
36057 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36058 - i--;
36059 - }
36060 - while (i < newvol) {
36061 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36062 - i++;
36063 - }
36064 + while (i > newvol) {
36065 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36066 + i--;
36067 }
36068 + while (i < newvol) {
36069 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36070 + i++;
36071 + }
36072 +}
36073
36074 - void issue_brightnesschange(const unsigned int oldbrt,
36075 - const unsigned int newbrt)
36076 - {
36077 - unsigned int i = oldbrt;
36078 +static void issue_brightnesschange(const unsigned int oldbrt,
36079 + const unsigned int newbrt,
36080 + const u32 event_mask)
36081 +{
36082 + unsigned int i = oldbrt;
36083
36084 - while (i > newbrt) {
36085 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36086 - i--;
36087 - }
36088 - while (i < newbrt) {
36089 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36090 - i++;
36091 - }
36092 + while (i > newbrt) {
36093 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36094 + i--;
36095 + }
36096 + while (i < newbrt) {
36097 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36098 + i++;
36099 }
36100 +}
36101
36102 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36103 + struct tp_nvram_state *newn,
36104 + const u32 event_mask)
36105 +{
36106 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36107 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36108 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36109 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36110 oldn->volume_level != newn->volume_level) {
36111 /* recently muted, or repeated mute keypress, or
36112 * multiple presses ending in mute */
36113 - issue_volchange(oldn->volume_level, newn->volume_level);
36114 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36115 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36116 }
36117 } else {
36118 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36119 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36120 }
36121 if (oldn->volume_level != newn->volume_level) {
36122 - issue_volchange(oldn->volume_level, newn->volume_level);
36123 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36124 } else if (oldn->volume_toggle != newn->volume_toggle) {
36125 /* repeated vol up/down keypress at end of scale ? */
36126 if (newn->volume_level == 0)
36127 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36128 /* handle brightness */
36129 if (oldn->brightness_level != newn->brightness_level) {
36130 issue_brightnesschange(oldn->brightness_level,
36131 - newn->brightness_level);
36132 + newn->brightness_level,
36133 + event_mask);
36134 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36135 /* repeated key presses that didn't change state */
36136 if (newn->brightness_level == 0)
36137 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36138 && !tp_features.bright_unkfw)
36139 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36140 }
36141 +}
36142
36143 #undef TPACPI_COMPARE_KEY
36144 #undef TPACPI_MAY_SEND_KEY
36145 -}
36146
36147 /*
36148 * Polling driver
36149 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36150 index 769d265..a3a05ca 100644
36151 --- a/drivers/pnp/pnpbios/bioscalls.c
36152 +++ b/drivers/pnp/pnpbios/bioscalls.c
36153 @@ -58,7 +58,7 @@ do { \
36154 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36155 } while(0)
36156
36157 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36158 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36159 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36160
36161 /*
36162 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36163
36164 cpu = get_cpu();
36165 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36166 +
36167 + pax_open_kernel();
36168 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36169 + pax_close_kernel();
36170
36171 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36172 spin_lock_irqsave(&pnp_bios_lock, flags);
36173 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36174 :"memory");
36175 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36176
36177 + pax_open_kernel();
36178 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36179 + pax_close_kernel();
36180 +
36181 put_cpu();
36182
36183 /* If we get here and this is set then the PnP BIOS faulted on us. */
36184 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36185 return status;
36186 }
36187
36188 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36189 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36190 {
36191 int i;
36192
36193 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36194 pnp_bios_callpoint.offset = header->fields.pm16offset;
36195 pnp_bios_callpoint.segment = PNP_CS16;
36196
36197 + pax_open_kernel();
36198 +
36199 for_each_possible_cpu(i) {
36200 struct desc_struct *gdt = get_cpu_gdt_table(i);
36201 if (!gdt)
36202 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36203 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36204 (unsigned long)__va(header->fields.pm16dseg));
36205 }
36206 +
36207 + pax_close_kernel();
36208 }
36209 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36210 index b0ecacb..7c9da2e 100644
36211 --- a/drivers/pnp/resource.c
36212 +++ b/drivers/pnp/resource.c
36213 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36214 return 1;
36215
36216 /* check if the resource is valid */
36217 - if (*irq < 0 || *irq > 15)
36218 + if (*irq > 15)
36219 return 0;
36220
36221 /* check if the resource is reserved */
36222 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36223 return 1;
36224
36225 /* check if the resource is valid */
36226 - if (*dma < 0 || *dma == 4 || *dma > 7)
36227 + if (*dma == 4 || *dma > 7)
36228 return 0;
36229
36230 /* check if the resource is reserved */
36231 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36232 index 222ccd8..6275fa5 100644
36233 --- a/drivers/power/bq27x00_battery.c
36234 +++ b/drivers/power/bq27x00_battery.c
36235 @@ -72,7 +72,7 @@
36236 struct bq27x00_device_info;
36237 struct bq27x00_access_methods {
36238 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36239 -};
36240 +} __no_const;
36241
36242 enum bq27x00_chip { BQ27000, BQ27500 };
36243
36244 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36245 index 4c5b053..104263e 100644
36246 --- a/drivers/regulator/max8660.c
36247 +++ b/drivers/regulator/max8660.c
36248 @@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36249 max8660->shadow_regs[MAX8660_OVER1] = 5;
36250 } else {
36251 /* Otherwise devices can be toggled via software */
36252 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
36253 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
36254 + pax_open_kernel();
36255 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36256 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36257 + pax_close_kernel();
36258 }
36259
36260 /*
36261 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36262 index 845aa22..99ec402 100644
36263 --- a/drivers/regulator/mc13892-regulator.c
36264 +++ b/drivers/regulator/mc13892-regulator.c
36265 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36266 }
36267 mc13xxx_unlock(mc13892);
36268
36269 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36270 + pax_open_kernel();
36271 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36272 = mc13892_vcam_set_mode;
36273 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36274 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36275 = mc13892_vcam_get_mode;
36276 + pax_close_kernel();
36277
36278 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36279 ARRAY_SIZE(mc13892_regulators));
36280 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36281 index cace6d3..f623fda 100644
36282 --- a/drivers/rtc/rtc-dev.c
36283 +++ b/drivers/rtc/rtc-dev.c
36284 @@ -14,6 +14,7 @@
36285 #include <linux/module.h>
36286 #include <linux/rtc.h>
36287 #include <linux/sched.h>
36288 +#include <linux/grsecurity.h>
36289 #include "rtc-core.h"
36290
36291 static dev_t rtc_devt;
36292 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36293 if (copy_from_user(&tm, uarg, sizeof(tm)))
36294 return -EFAULT;
36295
36296 + gr_log_timechange();
36297 +
36298 return rtc_set_time(rtc, &tm);
36299
36300 case RTC_PIE_ON:
36301 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36302 index 3fcf627..f334910 100644
36303 --- a/drivers/scsi/aacraid/aacraid.h
36304 +++ b/drivers/scsi/aacraid/aacraid.h
36305 @@ -492,7 +492,7 @@ struct adapter_ops
36306 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36307 /* Administrative operations */
36308 int (*adapter_comm)(struct aac_dev * dev, int comm);
36309 -};
36310 +} __no_const;
36311
36312 /*
36313 * Define which interrupt handler needs to be installed
36314 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36315 index 0d279c44..3d25a97 100644
36316 --- a/drivers/scsi/aacraid/linit.c
36317 +++ b/drivers/scsi/aacraid/linit.c
36318 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36319 #elif defined(__devinitconst)
36320 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36321 #else
36322 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36323 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36324 #endif
36325 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36326 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36327 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36328 index ff80552..1c4120c 100644
36329 --- a/drivers/scsi/aic94xx/aic94xx_init.c
36330 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
36331 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36332 .lldd_ata_set_dmamode = asd_set_dmamode,
36333 };
36334
36335 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36336 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36337 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36338 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36339 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36340 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36341 index 4ad7e36..d004679 100644
36342 --- a/drivers/scsi/bfa/bfa.h
36343 +++ b/drivers/scsi/bfa/bfa.h
36344 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
36345 u32 *end);
36346 int cpe_vec_q0;
36347 int rme_vec_q0;
36348 -};
36349 +} __no_const;
36350 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36351
36352 struct bfa_faa_cbfn_s {
36353 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36354 index f0f80e2..8ec946b 100644
36355 --- a/drivers/scsi/bfa/bfa_fcpim.c
36356 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36357 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36358
36359 bfa_iotag_attach(fcp);
36360
36361 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36362 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36363 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36364 (fcp->num_itns * sizeof(struct bfa_itn_s));
36365 memset(fcp->itn_arr, 0,
36366 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36367 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36368 {
36369 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36370 - struct bfa_itn_s *itn;
36371 + bfa_itn_s_no_const *itn;
36372
36373 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36374 itn->isr = isr;
36375 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36376 index 36f26da..38a34a8 100644
36377 --- a/drivers/scsi/bfa/bfa_fcpim.h
36378 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36379 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36380 struct bfa_itn_s {
36381 bfa_isr_func_t isr;
36382 };
36383 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36384
36385 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36386 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36387 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36388 struct list_head iotag_tio_free_q; /* free IO resources */
36389 struct list_head iotag_unused_q; /* unused IO resources*/
36390 struct bfa_iotag_s *iotag_arr;
36391 - struct bfa_itn_s *itn_arr;
36392 + bfa_itn_s_no_const *itn_arr;
36393 int num_ioim_reqs;
36394 int num_fwtio_reqs;
36395 int num_itns;
36396 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36397 index 1a99d4b..e85d64b 100644
36398 --- a/drivers/scsi/bfa/bfa_ioc.h
36399 +++ b/drivers/scsi/bfa/bfa_ioc.h
36400 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36401 bfa_ioc_disable_cbfn_t disable_cbfn;
36402 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36403 bfa_ioc_reset_cbfn_t reset_cbfn;
36404 -};
36405 +} __no_const;
36406
36407 /*
36408 * IOC event notification mechanism.
36409 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36410 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36411 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36412 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36413 -};
36414 +} __no_const;
36415
36416 /*
36417 * Queue element to wait for room in request queue. FIFO order is
36418 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36419 index a3a056a..b9bbc2f 100644
36420 --- a/drivers/scsi/hosts.c
36421 +++ b/drivers/scsi/hosts.c
36422 @@ -42,7 +42,7 @@
36423 #include "scsi_logging.h"
36424
36425
36426 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36427 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36428
36429
36430 static void scsi_host_cls_release(struct device *dev)
36431 @@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36432 * subtract one because we increment first then return, but we need to
36433 * know what the next host number was before increment
36434 */
36435 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36436 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36437 shost->dma_channel = 0xff;
36438
36439 /* These three are default values which can be overridden */
36440 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36441 index 500e20d..ebd3059 100644
36442 --- a/drivers/scsi/hpsa.c
36443 +++ b/drivers/scsi/hpsa.c
36444 @@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
36445 u32 a;
36446
36447 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36448 - return h->access.command_completed(h);
36449 + return h->access->command_completed(h);
36450
36451 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36452 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36453 @@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
36454 while (!list_empty(&h->reqQ)) {
36455 c = list_entry(h->reqQ.next, struct CommandList, list);
36456 /* can't do anything if fifo is full */
36457 - if ((h->access.fifo_full(h))) {
36458 + if ((h->access->fifo_full(h))) {
36459 dev_warn(&h->pdev->dev, "fifo full\n");
36460 break;
36461 }
36462 @@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
36463 h->Qdepth--;
36464
36465 /* Tell the controller execute command */
36466 - h->access.submit_command(h, c);
36467 + h->access->submit_command(h, c);
36468
36469 /* Put job onto the completed Q */
36470 addQ(&h->cmpQ, c);
36471 @@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
36472
36473 static inline unsigned long get_next_completion(struct ctlr_info *h)
36474 {
36475 - return h->access.command_completed(h);
36476 + return h->access->command_completed(h);
36477 }
36478
36479 static inline bool interrupt_pending(struct ctlr_info *h)
36480 {
36481 - return h->access.intr_pending(h);
36482 + return h->access->intr_pending(h);
36483 }
36484
36485 static inline long interrupt_not_for_us(struct ctlr_info *h)
36486 {
36487 - return (h->access.intr_pending(h) == 0) ||
36488 + return (h->access->intr_pending(h) == 0) ||
36489 (h->interrupts_enabled == 0);
36490 }
36491
36492 @@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36493 if (prod_index < 0)
36494 return -ENODEV;
36495 h->product_name = products[prod_index].product_name;
36496 - h->access = *(products[prod_index].access);
36497 + h->access = products[prod_index].access;
36498
36499 if (hpsa_board_disabled(h->pdev)) {
36500 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36501 @@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36502
36503 assert_spin_locked(&lockup_detector_lock);
36504 remove_ctlr_from_lockup_detector_list(h);
36505 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36506 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36507 spin_lock_irqsave(&h->lock, flags);
36508 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36509 spin_unlock_irqrestore(&h->lock, flags);
36510 @@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
36511 }
36512
36513 /* make sure the board interrupts are off */
36514 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36515 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36516
36517 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36518 goto clean2;
36519 @@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
36520 * fake ones to scoop up any residual completions.
36521 */
36522 spin_lock_irqsave(&h->lock, flags);
36523 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36524 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36525 spin_unlock_irqrestore(&h->lock, flags);
36526 free_irq(h->intr[h->intr_mode], h);
36527 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36528 @@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
36529 dev_info(&h->pdev->dev, "Board READY.\n");
36530 dev_info(&h->pdev->dev,
36531 "Waiting for stale completions to drain.\n");
36532 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36533 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36534 msleep(10000);
36535 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36536 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36537
36538 rc = controller_reset_failed(h->cfgtable);
36539 if (rc)
36540 @@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
36541 }
36542
36543 /* Turn the interrupts on so we can service requests */
36544 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36545 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36546
36547 hpsa_hba_inquiry(h);
36548 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36549 @@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36550 * To write all data in the battery backed cache to disks
36551 */
36552 hpsa_flush_cache(h);
36553 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36554 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36555 free_irq(h->intr[h->intr_mode], h);
36556 #ifdef CONFIG_PCI_MSI
36557 if (h->msix_vector)
36558 @@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36559 return;
36560 }
36561 /* Change the access methods to the performant access methods */
36562 - h->access = SA5_performant_access;
36563 + h->access = &SA5_performant_access;
36564 h->transMethod = CFGTBL_Trans_Performant;
36565 }
36566
36567 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36568 index 7b28d54..952f23a 100644
36569 --- a/drivers/scsi/hpsa.h
36570 +++ b/drivers/scsi/hpsa.h
36571 @@ -72,7 +72,7 @@ struct ctlr_info {
36572 unsigned int msix_vector;
36573 unsigned int msi_vector;
36574 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36575 - struct access_method access;
36576 + struct access_method *access;
36577
36578 /* queue and queue Info */
36579 struct list_head reqQ;
36580 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36581 index f2df059..a3a9930 100644
36582 --- a/drivers/scsi/ips.h
36583 +++ b/drivers/scsi/ips.h
36584 @@ -1027,7 +1027,7 @@ typedef struct {
36585 int (*intr)(struct ips_ha *);
36586 void (*enableint)(struct ips_ha *);
36587 uint32_t (*statupd)(struct ips_ha *);
36588 -} ips_hw_func_t;
36589 +} __no_const ips_hw_func_t;
36590
36591 typedef struct ips_ha {
36592 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36593 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36594 index aceffad..c35c08d 100644
36595 --- a/drivers/scsi/libfc/fc_exch.c
36596 +++ b/drivers/scsi/libfc/fc_exch.c
36597 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36598 * all together if not used XXX
36599 */
36600 struct {
36601 - atomic_t no_free_exch;
36602 - atomic_t no_free_exch_xid;
36603 - atomic_t xid_not_found;
36604 - atomic_t xid_busy;
36605 - atomic_t seq_not_found;
36606 - atomic_t non_bls_resp;
36607 + atomic_unchecked_t no_free_exch;
36608 + atomic_unchecked_t no_free_exch_xid;
36609 + atomic_unchecked_t xid_not_found;
36610 + atomic_unchecked_t xid_busy;
36611 + atomic_unchecked_t seq_not_found;
36612 + atomic_unchecked_t non_bls_resp;
36613 } stats;
36614 };
36615
36616 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36617 /* allocate memory for exchange */
36618 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36619 if (!ep) {
36620 - atomic_inc(&mp->stats.no_free_exch);
36621 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36622 goto out;
36623 }
36624 memset(ep, 0, sizeof(*ep));
36625 @@ -780,7 +780,7 @@ out:
36626 return ep;
36627 err:
36628 spin_unlock_bh(&pool->lock);
36629 - atomic_inc(&mp->stats.no_free_exch_xid);
36630 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36631 mempool_free(ep, mp->ep_pool);
36632 return NULL;
36633 }
36634 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36635 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36636 ep = fc_exch_find(mp, xid);
36637 if (!ep) {
36638 - atomic_inc(&mp->stats.xid_not_found);
36639 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36640 reject = FC_RJT_OX_ID;
36641 goto out;
36642 }
36643 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36644 ep = fc_exch_find(mp, xid);
36645 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36646 if (ep) {
36647 - atomic_inc(&mp->stats.xid_busy);
36648 + atomic_inc_unchecked(&mp->stats.xid_busy);
36649 reject = FC_RJT_RX_ID;
36650 goto rel;
36651 }
36652 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36653 }
36654 xid = ep->xid; /* get our XID */
36655 } else if (!ep) {
36656 - atomic_inc(&mp->stats.xid_not_found);
36657 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36658 reject = FC_RJT_RX_ID; /* XID not found */
36659 goto out;
36660 }
36661 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36662 } else {
36663 sp = &ep->seq;
36664 if (sp->id != fh->fh_seq_id) {
36665 - atomic_inc(&mp->stats.seq_not_found);
36666 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36667 if (f_ctl & FC_FC_END_SEQ) {
36668 /*
36669 * Update sequence_id based on incoming last
36670 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36671
36672 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36673 if (!ep) {
36674 - atomic_inc(&mp->stats.xid_not_found);
36675 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36676 goto out;
36677 }
36678 if (ep->esb_stat & ESB_ST_COMPLETE) {
36679 - atomic_inc(&mp->stats.xid_not_found);
36680 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36681 goto rel;
36682 }
36683 if (ep->rxid == FC_XID_UNKNOWN)
36684 ep->rxid = ntohs(fh->fh_rx_id);
36685 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36686 - atomic_inc(&mp->stats.xid_not_found);
36687 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36688 goto rel;
36689 }
36690 if (ep->did != ntoh24(fh->fh_s_id) &&
36691 ep->did != FC_FID_FLOGI) {
36692 - atomic_inc(&mp->stats.xid_not_found);
36693 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36694 goto rel;
36695 }
36696 sof = fr_sof(fp);
36697 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36698 sp->ssb_stat |= SSB_ST_RESP;
36699 sp->id = fh->fh_seq_id;
36700 } else if (sp->id != fh->fh_seq_id) {
36701 - atomic_inc(&mp->stats.seq_not_found);
36702 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36703 goto rel;
36704 }
36705
36706 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36707 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36708
36709 if (!sp)
36710 - atomic_inc(&mp->stats.xid_not_found);
36711 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36712 else
36713 - atomic_inc(&mp->stats.non_bls_resp);
36714 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36715
36716 fc_frame_free(fp);
36717 }
36718 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36719 index d109cc3..09f4e7d 100644
36720 --- a/drivers/scsi/libsas/sas_ata.c
36721 +++ b/drivers/scsi/libsas/sas_ata.c
36722 @@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36723 .postreset = ata_std_postreset,
36724 .error_handler = ata_std_error_handler,
36725 .post_internal_cmd = sas_ata_post_internal,
36726 - .qc_defer = ata_std_qc_defer,
36727 + .qc_defer = ata_std_qc_defer,
36728 .qc_prep = ata_noop_qc_prep,
36729 .qc_issue = sas_ata_qc_issue,
36730 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36731 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36732 index 3a1ffdd..8eb7c71 100644
36733 --- a/drivers/scsi/lpfc/lpfc.h
36734 +++ b/drivers/scsi/lpfc/lpfc.h
36735 @@ -413,7 +413,7 @@ struct lpfc_vport {
36736 struct dentry *debug_nodelist;
36737 struct dentry *vport_debugfs_root;
36738 struct lpfc_debugfs_trc *disc_trc;
36739 - atomic_t disc_trc_cnt;
36740 + atomic_unchecked_t disc_trc_cnt;
36741 #endif
36742 uint8_t stat_data_enabled;
36743 uint8_t stat_data_blocked;
36744 @@ -826,8 +826,8 @@ struct lpfc_hba {
36745 struct timer_list fabric_block_timer;
36746 unsigned long bit_flags;
36747 #define FABRIC_COMANDS_BLOCKED 0
36748 - atomic_t num_rsrc_err;
36749 - atomic_t num_cmd_success;
36750 + atomic_unchecked_t num_rsrc_err;
36751 + atomic_unchecked_t num_cmd_success;
36752 unsigned long last_rsrc_error_time;
36753 unsigned long last_ramp_down_time;
36754 unsigned long last_ramp_up_time;
36755 @@ -863,7 +863,7 @@ struct lpfc_hba {
36756
36757 struct dentry *debug_slow_ring_trc;
36758 struct lpfc_debugfs_trc *slow_ring_trc;
36759 - atomic_t slow_ring_trc_cnt;
36760 + atomic_unchecked_t slow_ring_trc_cnt;
36761 /* iDiag debugfs sub-directory */
36762 struct dentry *idiag_root;
36763 struct dentry *idiag_pci_cfg;
36764 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36765 index af04b0d..8f1a97e 100644
36766 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36767 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36768 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36769
36770 #include <linux/debugfs.h>
36771
36772 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36773 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36774 static unsigned long lpfc_debugfs_start_time = 0L;
36775
36776 /* iDiag */
36777 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36778 lpfc_debugfs_enable = 0;
36779
36780 len = 0;
36781 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36782 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36783 (lpfc_debugfs_max_disc_trc - 1);
36784 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36785 dtp = vport->disc_trc + i;
36786 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36787 lpfc_debugfs_enable = 0;
36788
36789 len = 0;
36790 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36791 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36792 (lpfc_debugfs_max_slow_ring_trc - 1);
36793 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36794 dtp = phba->slow_ring_trc + i;
36795 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36796 !vport || !vport->disc_trc)
36797 return;
36798
36799 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36800 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36801 (lpfc_debugfs_max_disc_trc - 1);
36802 dtp = vport->disc_trc + index;
36803 dtp->fmt = fmt;
36804 dtp->data1 = data1;
36805 dtp->data2 = data2;
36806 dtp->data3 = data3;
36807 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36808 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36809 dtp->jif = jiffies;
36810 #endif
36811 return;
36812 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36813 !phba || !phba->slow_ring_trc)
36814 return;
36815
36816 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36817 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36818 (lpfc_debugfs_max_slow_ring_trc - 1);
36819 dtp = phba->slow_ring_trc + index;
36820 dtp->fmt = fmt;
36821 dtp->data1 = data1;
36822 dtp->data2 = data2;
36823 dtp->data3 = data3;
36824 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36825 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36826 dtp->jif = jiffies;
36827 #endif
36828 return;
36829 @@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36830 "slow_ring buffer\n");
36831 goto debug_failed;
36832 }
36833 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36834 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36835 memset(phba->slow_ring_trc, 0,
36836 (sizeof(struct lpfc_debugfs_trc) *
36837 lpfc_debugfs_max_slow_ring_trc));
36838 @@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36839 "buffer\n");
36840 goto debug_failed;
36841 }
36842 - atomic_set(&vport->disc_trc_cnt, 0);
36843 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36844
36845 snprintf(name, sizeof(name), "discovery_trace");
36846 vport->debug_disc_trc =
36847 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36848 index 9598fdc..7e9f3d9 100644
36849 --- a/drivers/scsi/lpfc/lpfc_init.c
36850 +++ b/drivers/scsi/lpfc/lpfc_init.c
36851 @@ -10266,8 +10266,10 @@ lpfc_init(void)
36852 "misc_register returned with status %d", error);
36853
36854 if (lpfc_enable_npiv) {
36855 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36856 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36857 + pax_open_kernel();
36858 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36859 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36860 + pax_close_kernel();
36861 }
36862 lpfc_transport_template =
36863 fc_attach_transport(&lpfc_transport_functions);
36864 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36865 index 88f3a83..686d3fa 100644
36866 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36867 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36868 @@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36869 uint32_t evt_posted;
36870
36871 spin_lock_irqsave(&phba->hbalock, flags);
36872 - atomic_inc(&phba->num_rsrc_err);
36873 + atomic_inc_unchecked(&phba->num_rsrc_err);
36874 phba->last_rsrc_error_time = jiffies;
36875
36876 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36877 @@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36878 unsigned long flags;
36879 struct lpfc_hba *phba = vport->phba;
36880 uint32_t evt_posted;
36881 - atomic_inc(&phba->num_cmd_success);
36882 + atomic_inc_unchecked(&phba->num_cmd_success);
36883
36884 if (vport->cfg_lun_queue_depth <= queue_depth)
36885 return;
36886 @@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36887 unsigned long num_rsrc_err, num_cmd_success;
36888 int i;
36889
36890 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36891 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36892 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36893 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36894
36895 vports = lpfc_create_vport_work_array(phba);
36896 if (vports != NULL)
36897 @@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36898 }
36899 }
36900 lpfc_destroy_vport_work_array(phba, vports);
36901 - atomic_set(&phba->num_rsrc_err, 0);
36902 - atomic_set(&phba->num_cmd_success, 0);
36903 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36904 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36905 }
36906
36907 /**
36908 @@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36909 }
36910 }
36911 lpfc_destroy_vport_work_array(phba, vports);
36912 - atomic_set(&phba->num_rsrc_err, 0);
36913 - atomic_set(&phba->num_cmd_success, 0);
36914 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36915 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36916 }
36917
36918 /**
36919 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36920 index ea8a0b4..812a124 100644
36921 --- a/drivers/scsi/pmcraid.c
36922 +++ b/drivers/scsi/pmcraid.c
36923 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36924 res->scsi_dev = scsi_dev;
36925 scsi_dev->hostdata = res;
36926 res->change_detected = 0;
36927 - atomic_set(&res->read_failures, 0);
36928 - atomic_set(&res->write_failures, 0);
36929 + atomic_set_unchecked(&res->read_failures, 0);
36930 + atomic_set_unchecked(&res->write_failures, 0);
36931 rc = 0;
36932 }
36933 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36934 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36935
36936 /* If this was a SCSI read/write command keep count of errors */
36937 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36938 - atomic_inc(&res->read_failures);
36939 + atomic_inc_unchecked(&res->read_failures);
36940 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36941 - atomic_inc(&res->write_failures);
36942 + atomic_inc_unchecked(&res->write_failures);
36943
36944 if (!RES_IS_GSCSI(res->cfg_entry) &&
36945 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36946 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36947 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36948 * hrrq_id assigned here in queuecommand
36949 */
36950 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36951 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36952 pinstance->num_hrrq;
36953 cmd->cmd_done = pmcraid_io_done;
36954
36955 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36956 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36957 * hrrq_id assigned here in queuecommand
36958 */
36959 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36960 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36961 pinstance->num_hrrq;
36962
36963 if (request_size) {
36964 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36965
36966 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36967 /* add resources only after host is added into system */
36968 - if (!atomic_read(&pinstance->expose_resources))
36969 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36970 return;
36971
36972 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36973 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36974 init_waitqueue_head(&pinstance->reset_wait_q);
36975
36976 atomic_set(&pinstance->outstanding_cmds, 0);
36977 - atomic_set(&pinstance->last_message_id, 0);
36978 - atomic_set(&pinstance->expose_resources, 0);
36979 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36980 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36981
36982 INIT_LIST_HEAD(&pinstance->free_res_q);
36983 INIT_LIST_HEAD(&pinstance->used_res_q);
36984 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36985 /* Schedule worker thread to handle CCN and take care of adding and
36986 * removing devices to OS
36987 */
36988 - atomic_set(&pinstance->expose_resources, 1);
36989 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36990 schedule_work(&pinstance->worker_q);
36991 return rc;
36992
36993 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36994 index e1d150f..6c6df44 100644
36995 --- a/drivers/scsi/pmcraid.h
36996 +++ b/drivers/scsi/pmcraid.h
36997 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36998 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36999
37000 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37001 - atomic_t last_message_id;
37002 + atomic_unchecked_t last_message_id;
37003
37004 /* configuration table */
37005 struct pmcraid_config_table *cfg_table;
37006 @@ -777,7 +777,7 @@ struct pmcraid_instance {
37007 atomic_t outstanding_cmds;
37008
37009 /* should add/delete resources to mid-layer now ?*/
37010 - atomic_t expose_resources;
37011 + atomic_unchecked_t expose_resources;
37012
37013
37014
37015 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37016 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37017 };
37018 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37019 - atomic_t read_failures; /* count of failed READ commands */
37020 - atomic_t write_failures; /* count of failed WRITE commands */
37021 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37022 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37023
37024 /* To indicate add/delete/modify during CCN */
37025 u8 change_detected;
37026 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37027 index a244303..6015eb7 100644
37028 --- a/drivers/scsi/qla2xxx/qla_def.h
37029 +++ b/drivers/scsi/qla2xxx/qla_def.h
37030 @@ -2264,7 +2264,7 @@ struct isp_operations {
37031 int (*start_scsi) (srb_t *);
37032 int (*abort_isp) (struct scsi_qla_host *);
37033 int (*iospace_config)(struct qla_hw_data*);
37034 -};
37035 +} __no_const;
37036
37037 /* MSI-X Support *************************************************************/
37038
37039 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37040 index 7f2492e..5113877 100644
37041 --- a/drivers/scsi/qla4xxx/ql4_def.h
37042 +++ b/drivers/scsi/qla4xxx/ql4_def.h
37043 @@ -268,7 +268,7 @@ struct ddb_entry {
37044 * (4000 only) */
37045 atomic_t relogin_timer; /* Max Time to wait for
37046 * relogin to complete */
37047 - atomic_t relogin_retry_count; /* Num of times relogin has been
37048 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37049 * retried */
37050 uint32_t default_time2wait; /* Default Min time between
37051 * relogins (+aens) */
37052 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37053 index ee47820..a83b1f4 100644
37054 --- a/drivers/scsi/qla4xxx/ql4_os.c
37055 +++ b/drivers/scsi/qla4xxx/ql4_os.c
37056 @@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37057 */
37058 if (!iscsi_is_session_online(cls_sess)) {
37059 /* Reset retry relogin timer */
37060 - atomic_inc(&ddb_entry->relogin_retry_count);
37061 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37062 DEBUG2(ql4_printk(KERN_INFO, ha,
37063 "%s: index[%d] relogin timed out-retrying"
37064 " relogin (%d), retry (%d)\n", __func__,
37065 ddb_entry->fw_ddb_index,
37066 - atomic_read(&ddb_entry->relogin_retry_count),
37067 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37068 ddb_entry->default_time2wait + 4));
37069 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37070 atomic_set(&ddb_entry->retry_relogin_timer,
37071 @@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37072
37073 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37074 atomic_set(&ddb_entry->relogin_timer, 0);
37075 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37076 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37077 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37078 ddb_entry->default_relogin_timeout =
37079 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37080 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37081 index 07322ec..91ccc23 100644
37082 --- a/drivers/scsi/scsi.c
37083 +++ b/drivers/scsi/scsi.c
37084 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37085 unsigned long timeout;
37086 int rtn = 0;
37087
37088 - atomic_inc(&cmd->device->iorequest_cnt);
37089 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37090
37091 /* check if the device is still usable */
37092 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37093 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37094 index 4037fd5..a19fcc7 100644
37095 --- a/drivers/scsi/scsi_lib.c
37096 +++ b/drivers/scsi/scsi_lib.c
37097 @@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37098 shost = sdev->host;
37099 scsi_init_cmd_errh(cmd);
37100 cmd->result = DID_NO_CONNECT << 16;
37101 - atomic_inc(&cmd->device->iorequest_cnt);
37102 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37103
37104 /*
37105 * SCSI request completion path will do scsi_device_unbusy(),
37106 @@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
37107
37108 INIT_LIST_HEAD(&cmd->eh_entry);
37109
37110 - atomic_inc(&cmd->device->iodone_cnt);
37111 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37112 if (cmd->result)
37113 - atomic_inc(&cmd->device->ioerr_cnt);
37114 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37115
37116 disposition = scsi_decide_disposition(cmd);
37117 if (disposition != SUCCESS &&
37118 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37119 index 04c2a27..9d8bd66 100644
37120 --- a/drivers/scsi/scsi_sysfs.c
37121 +++ b/drivers/scsi/scsi_sysfs.c
37122 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37123 char *buf) \
37124 { \
37125 struct scsi_device *sdev = to_scsi_device(dev); \
37126 - unsigned long long count = atomic_read(&sdev->field); \
37127 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37128 return snprintf(buf, 20, "0x%llx\n", count); \
37129 } \
37130 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37131 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37132 index 84a1fdf..693b0d6 100644
37133 --- a/drivers/scsi/scsi_tgt_lib.c
37134 +++ b/drivers/scsi/scsi_tgt_lib.c
37135 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37136 int err;
37137
37138 dprintk("%lx %u\n", uaddr, len);
37139 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37140 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37141 if (err) {
37142 /*
37143 * TODO: need to fixup sg_tablesize, max_segment_size,
37144 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37145 index 80fbe2a..efa223b 100644
37146 --- a/drivers/scsi/scsi_transport_fc.c
37147 +++ b/drivers/scsi/scsi_transport_fc.c
37148 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37149 * Netlink Infrastructure
37150 */
37151
37152 -static atomic_t fc_event_seq;
37153 +static atomic_unchecked_t fc_event_seq;
37154
37155 /**
37156 * fc_get_event_number - Obtain the next sequential FC event number
37157 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
37158 u32
37159 fc_get_event_number(void)
37160 {
37161 - return atomic_add_return(1, &fc_event_seq);
37162 + return atomic_add_return_unchecked(1, &fc_event_seq);
37163 }
37164 EXPORT_SYMBOL(fc_get_event_number);
37165
37166 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
37167 {
37168 int error;
37169
37170 - atomic_set(&fc_event_seq, 0);
37171 + atomic_set_unchecked(&fc_event_seq, 0);
37172
37173 error = transport_class_register(&fc_host_class);
37174 if (error)
37175 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37176 char *cp;
37177
37178 *val = simple_strtoul(buf, &cp, 0);
37179 - if ((*cp && (*cp != '\n')) || (*val < 0))
37180 + if (*cp && (*cp != '\n'))
37181 return -EINVAL;
37182 /*
37183 * Check for overflow; dev_loss_tmo is u32
37184 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37185 index 1cf640e..78e9014 100644
37186 --- a/drivers/scsi/scsi_transport_iscsi.c
37187 +++ b/drivers/scsi/scsi_transport_iscsi.c
37188 @@ -79,7 +79,7 @@ struct iscsi_internal {
37189 struct transport_container session_cont;
37190 };
37191
37192 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37193 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37194 static struct workqueue_struct *iscsi_eh_timer_workq;
37195
37196 static DEFINE_IDA(iscsi_sess_ida);
37197 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37198 int err;
37199
37200 ihost = shost->shost_data;
37201 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37202 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37203
37204 if (target_id == ISCSI_MAX_TARGET) {
37205 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37206 @@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
37207 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37208 ISCSI_TRANSPORT_VERSION);
37209
37210 - atomic_set(&iscsi_session_nr, 0);
37211 + atomic_set_unchecked(&iscsi_session_nr, 0);
37212
37213 err = class_register(&iscsi_transport_class);
37214 if (err)
37215 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37216 index 21a045e..ec89e03 100644
37217 --- a/drivers/scsi/scsi_transport_srp.c
37218 +++ b/drivers/scsi/scsi_transport_srp.c
37219 @@ -33,7 +33,7 @@
37220 #include "scsi_transport_srp_internal.h"
37221
37222 struct srp_host_attrs {
37223 - atomic_t next_port_id;
37224 + atomic_unchecked_t next_port_id;
37225 };
37226 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37227
37228 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37229 struct Scsi_Host *shost = dev_to_shost(dev);
37230 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37231
37232 - atomic_set(&srp_host->next_port_id, 0);
37233 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37234 return 0;
37235 }
37236
37237 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37238 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37239 rport->roles = ids->roles;
37240
37241 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37242 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37243 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37244
37245 transport_setup_device(&rport->dev);
37246 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37247 index eacd46b..e3f4d62 100644
37248 --- a/drivers/scsi/sg.c
37249 +++ b/drivers/scsi/sg.c
37250 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37251 sdp->disk->disk_name,
37252 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37253 NULL,
37254 - (char *)arg);
37255 + (char __user *)arg);
37256 case BLKTRACESTART:
37257 return blk_trace_startstop(sdp->device->request_queue, 1);
37258 case BLKTRACESTOP:
37259 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
37260 const struct file_operations * fops;
37261 };
37262
37263 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37264 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37265 {"allow_dio", &adio_fops},
37266 {"debug", &debug_fops},
37267 {"def_reserved_size", &dressz_fops},
37268 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
37269 if (!sg_proc_sgp)
37270 return 1;
37271 for (k = 0; k < num_leaves; ++k) {
37272 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37273 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37274 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37275 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37276 }
37277 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37278 index 3d8f662..070f1a5 100644
37279 --- a/drivers/spi/spi.c
37280 +++ b/drivers/spi/spi.c
37281 @@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
37282 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37283
37284 /* portable code must never pass more than 32 bytes */
37285 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37286 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37287
37288 static u8 *buf;
37289
37290 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37291 index d91751f..a3a9e36 100644
37292 --- a/drivers/staging/octeon/ethernet-rx.c
37293 +++ b/drivers/staging/octeon/ethernet-rx.c
37294 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37295 /* Increment RX stats for virtual ports */
37296 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37297 #ifdef CONFIG_64BIT
37298 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37299 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37300 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37301 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37302 #else
37303 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37304 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37305 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37306 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37307 #endif
37308 }
37309 netif_receive_skb(skb);
37310 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37311 dev->name);
37312 */
37313 #ifdef CONFIG_64BIT
37314 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37315 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37316 #else
37317 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37318 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37319 #endif
37320 dev_kfree_skb_irq(skb);
37321 }
37322 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37323 index 60cba81..71eb239 100644
37324 --- a/drivers/staging/octeon/ethernet.c
37325 +++ b/drivers/staging/octeon/ethernet.c
37326 @@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37327 * since the RX tasklet also increments it.
37328 */
37329 #ifdef CONFIG_64BIT
37330 - atomic64_add(rx_status.dropped_packets,
37331 - (atomic64_t *)&priv->stats.rx_dropped);
37332 + atomic64_add_unchecked(rx_status.dropped_packets,
37333 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37334 #else
37335 - atomic_add(rx_status.dropped_packets,
37336 - (atomic_t *)&priv->stats.rx_dropped);
37337 + atomic_add_unchecked(rx_status.dropped_packets,
37338 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37339 #endif
37340 }
37341
37342 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37343 index d3d8727..f9327bb8 100644
37344 --- a/drivers/staging/rtl8712/rtl871x_io.h
37345 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37346 @@ -108,7 +108,7 @@ struct _io_ops {
37347 u8 *pmem);
37348 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37349 u8 *pmem);
37350 -};
37351 +} __no_const;
37352
37353 struct io_req {
37354 struct list_head list;
37355 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37356 index c7b5e8b..783d6cb 100644
37357 --- a/drivers/staging/sbe-2t3e3/netdev.c
37358 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37359 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37360 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37361
37362 if (rlen)
37363 - if (copy_to_user(data, &resp, rlen))
37364 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37365 return -EFAULT;
37366
37367 return 0;
37368 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37369 index 42cdafe..2769103 100644
37370 --- a/drivers/staging/speakup/speakup_soft.c
37371 +++ b/drivers/staging/speakup/speakup_soft.c
37372 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37373 break;
37374 } else if (!initialized) {
37375 if (*init) {
37376 - ch = *init;
37377 init++;
37378 } else {
37379 initialized = 1;
37380 }
37381 + ch = *init;
37382 } else {
37383 ch = synth_buffer_getc();
37384 }
37385 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37386 index c7b888c..c94be93 100644
37387 --- a/drivers/staging/usbip/usbip_common.h
37388 +++ b/drivers/staging/usbip/usbip_common.h
37389 @@ -289,7 +289,7 @@ struct usbip_device {
37390 void (*shutdown)(struct usbip_device *);
37391 void (*reset)(struct usbip_device *);
37392 void (*unusable)(struct usbip_device *);
37393 - } eh_ops;
37394 + } __no_const eh_ops;
37395 };
37396
37397 /* usbip_common.c */
37398 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37399 index 88b3298..3783eee 100644
37400 --- a/drivers/staging/usbip/vhci.h
37401 +++ b/drivers/staging/usbip/vhci.h
37402 @@ -88,7 +88,7 @@ struct vhci_hcd {
37403 unsigned resuming:1;
37404 unsigned long re_timeout;
37405
37406 - atomic_t seqnum;
37407 + atomic_unchecked_t seqnum;
37408
37409 /*
37410 * NOTE:
37411 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37412 index dca9bf1..80735c9 100644
37413 --- a/drivers/staging/usbip/vhci_hcd.c
37414 +++ b/drivers/staging/usbip/vhci_hcd.c
37415 @@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37416 return;
37417 }
37418
37419 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37420 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37421 if (priv->seqnum == 0xffff)
37422 dev_info(&urb->dev->dev, "seqnum max\n");
37423
37424 @@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37425 return -ENOMEM;
37426 }
37427
37428 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37429 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37430 if (unlink->seqnum == 0xffff)
37431 pr_info("seqnum max\n");
37432
37433 @@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37434 vdev->rhport = rhport;
37435 }
37436
37437 - atomic_set(&vhci->seqnum, 0);
37438 + atomic_set_unchecked(&vhci->seqnum, 0);
37439 spin_lock_init(&vhci->lock);
37440
37441 hcd->power_budget = 0; /* no limit */
37442 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37443 index f5fba732..210a16c 100644
37444 --- a/drivers/staging/usbip/vhci_rx.c
37445 +++ b/drivers/staging/usbip/vhci_rx.c
37446 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37447 if (!urb) {
37448 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37449 pr_info("max seqnum %d\n",
37450 - atomic_read(&the_controller->seqnum));
37451 + atomic_read_unchecked(&the_controller->seqnum));
37452 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37453 return;
37454 }
37455 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37456 index 7735027..30eed13 100644
37457 --- a/drivers/staging/vt6655/hostap.c
37458 +++ b/drivers/staging/vt6655/hostap.c
37459 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37460 *
37461 */
37462
37463 +static net_device_ops_no_const apdev_netdev_ops;
37464 +
37465 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37466 {
37467 PSDevice apdev_priv;
37468 struct net_device *dev = pDevice->dev;
37469 int ret;
37470 - const struct net_device_ops apdev_netdev_ops = {
37471 - .ndo_start_xmit = pDevice->tx_80211,
37472 - };
37473
37474 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37475
37476 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37477 *apdev_priv = *pDevice;
37478 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37479
37480 + /* only half broken now */
37481 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37482 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37483
37484 pDevice->apdev->type = ARPHRD_IEEE80211;
37485 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37486 index 51b5adf..098e320 100644
37487 --- a/drivers/staging/vt6656/hostap.c
37488 +++ b/drivers/staging/vt6656/hostap.c
37489 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37490 *
37491 */
37492
37493 +static net_device_ops_no_const apdev_netdev_ops;
37494 +
37495 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37496 {
37497 PSDevice apdev_priv;
37498 struct net_device *dev = pDevice->dev;
37499 int ret;
37500 - const struct net_device_ops apdev_netdev_ops = {
37501 - .ndo_start_xmit = pDevice->tx_80211,
37502 - };
37503
37504 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37505
37506 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37507 *apdev_priv = *pDevice;
37508 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37509
37510 + /* only half broken now */
37511 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37512 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37513
37514 pDevice->apdev->type = ARPHRD_IEEE80211;
37515 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37516 index 7843dfd..3db105f 100644
37517 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37518 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37519 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37520
37521 struct usbctlx_completor {
37522 int (*complete) (struct usbctlx_completor *);
37523 -};
37524 +} __no_const;
37525
37526 static int
37527 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37528 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37529 index 1ca66ea..76f1343 100644
37530 --- a/drivers/staging/zcache/tmem.c
37531 +++ b/drivers/staging/zcache/tmem.c
37532 @@ -39,7 +39,7 @@
37533 * A tmem host implementation must use this function to register callbacks
37534 * for memory allocation.
37535 */
37536 -static struct tmem_hostops tmem_hostops;
37537 +static tmem_hostops_no_const tmem_hostops;
37538
37539 static void tmem_objnode_tree_init(void);
37540
37541 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37542 * A tmem host implementation must use this function to register
37543 * callbacks for a page-accessible memory (PAM) implementation
37544 */
37545 -static struct tmem_pamops tmem_pamops;
37546 +static tmem_pamops_no_const tmem_pamops;
37547
37548 void tmem_register_pamops(struct tmem_pamops *m)
37549 {
37550 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37551 index 0d4aa82..f7832d4 100644
37552 --- a/drivers/staging/zcache/tmem.h
37553 +++ b/drivers/staging/zcache/tmem.h
37554 @@ -180,6 +180,7 @@ struct tmem_pamops {
37555 void (*new_obj)(struct tmem_obj *);
37556 int (*replace_in_obj)(void *, struct tmem_obj *);
37557 };
37558 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37559 extern void tmem_register_pamops(struct tmem_pamops *m);
37560
37561 /* memory allocation methods provided by the host implementation */
37562 @@ -189,6 +190,7 @@ struct tmem_hostops {
37563 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37564 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37565 };
37566 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37567 extern void tmem_register_hostops(struct tmem_hostops *m);
37568
37569 /* core tmem accessor functions */
37570 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37571 index 30a6770..fa323f8 100644
37572 --- a/drivers/target/target_core_cdb.c
37573 +++ b/drivers/target/target_core_cdb.c
37574 @@ -1107,7 +1107,7 @@ int target_emulate_write_same(struct se_task *task)
37575 if (num_blocks != 0)
37576 range = num_blocks;
37577 else
37578 - range = (dev->transport->get_blocks(dev) - lba);
37579 + range = (dev->transport->get_blocks(dev) - lba) + 1;
37580
37581 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
37582 (unsigned long long)lba, (unsigned long long)range);
37583 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37584 index c3148b1..89d10e6 100644
37585 --- a/drivers/target/target_core_pr.c
37586 +++ b/drivers/target/target_core_pr.c
37587 @@ -2038,7 +2038,7 @@ static int __core_scsi3_write_aptpl_to_file(
37588 if (IS_ERR(file) || !file || !file->f_dentry) {
37589 pr_err("filp_open(%s) for APTPL metadata"
37590 " failed\n", path);
37591 - return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
37592 + return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
37593 }
37594
37595 iov[0].iov_base = &buf[0];
37596 @@ -3826,7 +3826,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
37597 " SPC-2 reservation is held, returning"
37598 " RESERVATION_CONFLICT\n");
37599 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
37600 - ret = EINVAL;
37601 + ret = -EINVAL;
37602 goto out;
37603 }
37604
37605 @@ -3836,7 +3836,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
37606 */
37607 if (!cmd->se_sess) {
37608 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
37609 - return -EINVAL;
37610 + ret = -EINVAL;
37611 + goto out;
37612 }
37613
37614 if (cmd->data_length < 24) {
37615 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37616 index f015839..b15dfc4 100644
37617 --- a/drivers/target/target_core_tmr.c
37618 +++ b/drivers/target/target_core_tmr.c
37619 @@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
37620 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37621 cmd->t_task_list_num,
37622 atomic_read(&cmd->t_task_cdbs_left),
37623 - atomic_read(&cmd->t_task_cdbs_sent),
37624 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37625 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37626 (cmd->transport_state & CMD_T_STOP) != 0,
37627 (cmd->transport_state & CMD_T_SENT) != 0);
37628 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37629 index 443704f..92d3517 100644
37630 --- a/drivers/target/target_core_transport.c
37631 +++ b/drivers/target/target_core_transport.c
37632 @@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
37633 spin_lock_init(&dev->se_port_lock);
37634 spin_lock_init(&dev->se_tmr_lock);
37635 spin_lock_init(&dev->qf_cmd_lock);
37636 - atomic_set(&dev->dev_ordered_id, 0);
37637 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37638
37639 se_dev_set_default_attribs(dev, dev_limits);
37640
37641 @@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37642 * Used to determine when ORDERED commands should go from
37643 * Dormant to Active status.
37644 */
37645 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37646 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37647 smp_mb__after_atomic_inc();
37648 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37649 cmd->se_ordered_id, cmd->sam_task_attr,
37650 @@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37651 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37652 cmd->t_task_list_num,
37653 atomic_read(&cmd->t_task_cdbs_left),
37654 - atomic_read(&cmd->t_task_cdbs_sent),
37655 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37656 atomic_read(&cmd->t_task_cdbs_ex_left),
37657 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37658 (cmd->transport_state & CMD_T_STOP) != 0,
37659 @@ -2216,9 +2216,9 @@ check_depth:
37660 cmd = task->task_se_cmd;
37661 spin_lock_irqsave(&cmd->t_state_lock, flags);
37662 task->task_flags |= (TF_ACTIVE | TF_SENT);
37663 - atomic_inc(&cmd->t_task_cdbs_sent);
37664 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37665
37666 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37667 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37668 cmd->t_task_list_num)
37669 cmd->transport_state |= CMD_T_SENT;
37670
37671 diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
37672 index a375f25..da90f64 100644
37673 --- a/drivers/target/tcm_fc/tfc_cmd.c
37674 +++ b/drivers/target/tcm_fc/tfc_cmd.c
37675 @@ -240,6 +240,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
37676 {
37677 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
37678
37679 + if (cmd->aborted)
37680 + return ~0;
37681 return fc_seq_exch(cmd->seq)->rxid;
37682 }
37683
37684 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37685 index 3436436..772237b 100644
37686 --- a/drivers/tty/hvc/hvcs.c
37687 +++ b/drivers/tty/hvc/hvcs.c
37688 @@ -83,6 +83,7 @@
37689 #include <asm/hvcserver.h>
37690 #include <asm/uaccess.h>
37691 #include <asm/vio.h>
37692 +#include <asm/local.h>
37693
37694 /*
37695 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37696 @@ -270,7 +271,7 @@ struct hvcs_struct {
37697 unsigned int index;
37698
37699 struct tty_struct *tty;
37700 - int open_count;
37701 + local_t open_count;
37702
37703 /*
37704 * Used to tell the driver kernel_thread what operations need to take
37705 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37706
37707 spin_lock_irqsave(&hvcsd->lock, flags);
37708
37709 - if (hvcsd->open_count > 0) {
37710 + if (local_read(&hvcsd->open_count) > 0) {
37711 spin_unlock_irqrestore(&hvcsd->lock, flags);
37712 printk(KERN_INFO "HVCS: vterm state unchanged. "
37713 "The hvcs device node is still in use.\n");
37714 @@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37715 if ((retval = hvcs_partner_connect(hvcsd)))
37716 goto error_release;
37717
37718 - hvcsd->open_count = 1;
37719 + local_set(&hvcsd->open_count, 1);
37720 hvcsd->tty = tty;
37721 tty->driver_data = hvcsd;
37722
37723 @@ -1172,7 +1173,7 @@ fast_open:
37724
37725 spin_lock_irqsave(&hvcsd->lock, flags);
37726 kref_get(&hvcsd->kref);
37727 - hvcsd->open_count++;
37728 + local_inc(&hvcsd->open_count);
37729 hvcsd->todo_mask |= HVCS_SCHED_READ;
37730 spin_unlock_irqrestore(&hvcsd->lock, flags);
37731
37732 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37733 hvcsd = tty->driver_data;
37734
37735 spin_lock_irqsave(&hvcsd->lock, flags);
37736 - if (--hvcsd->open_count == 0) {
37737 + if (local_dec_and_test(&hvcsd->open_count)) {
37738
37739 vio_disable_interrupts(hvcsd->vdev);
37740
37741 @@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37742 free_irq(irq, hvcsd);
37743 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37744 return;
37745 - } else if (hvcsd->open_count < 0) {
37746 + } else if (local_read(&hvcsd->open_count) < 0) {
37747 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37748 " is missmanaged.\n",
37749 - hvcsd->vdev->unit_address, hvcsd->open_count);
37750 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37751 }
37752
37753 spin_unlock_irqrestore(&hvcsd->lock, flags);
37754 @@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37755
37756 spin_lock_irqsave(&hvcsd->lock, flags);
37757 /* Preserve this so that we know how many kref refs to put */
37758 - temp_open_count = hvcsd->open_count;
37759 + temp_open_count = local_read(&hvcsd->open_count);
37760
37761 /*
37762 * Don't kref put inside the spinlock because the destruction
37763 @@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37764 hvcsd->tty->driver_data = NULL;
37765 hvcsd->tty = NULL;
37766
37767 - hvcsd->open_count = 0;
37768 + local_set(&hvcsd->open_count, 0);
37769
37770 /* This will drop any buffered data on the floor which is OK in a hangup
37771 * scenario. */
37772 @@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
37773 * the middle of a write operation? This is a crummy place to do this
37774 * but we want to keep it all in the spinlock.
37775 */
37776 - if (hvcsd->open_count <= 0) {
37777 + if (local_read(&hvcsd->open_count) <= 0) {
37778 spin_unlock_irqrestore(&hvcsd->lock, flags);
37779 return -ENODEV;
37780 }
37781 @@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37782 {
37783 struct hvcs_struct *hvcsd = tty->driver_data;
37784
37785 - if (!hvcsd || hvcsd->open_count <= 0)
37786 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37787 return 0;
37788
37789 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37790 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37791 index 4daf962..b4a2281 100644
37792 --- a/drivers/tty/ipwireless/tty.c
37793 +++ b/drivers/tty/ipwireless/tty.c
37794 @@ -29,6 +29,7 @@
37795 #include <linux/tty_driver.h>
37796 #include <linux/tty_flip.h>
37797 #include <linux/uaccess.h>
37798 +#include <asm/local.h>
37799
37800 #include "tty.h"
37801 #include "network.h"
37802 @@ -51,7 +52,7 @@ struct ipw_tty {
37803 int tty_type;
37804 struct ipw_network *network;
37805 struct tty_struct *linux_tty;
37806 - int open_count;
37807 + local_t open_count;
37808 unsigned int control_lines;
37809 struct mutex ipw_tty_mutex;
37810 int tx_bytes_queued;
37811 @@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37812 mutex_unlock(&tty->ipw_tty_mutex);
37813 return -ENODEV;
37814 }
37815 - if (tty->open_count == 0)
37816 + if (local_read(&tty->open_count) == 0)
37817 tty->tx_bytes_queued = 0;
37818
37819 - tty->open_count++;
37820 + local_inc(&tty->open_count);
37821
37822 tty->linux_tty = linux_tty;
37823 linux_tty->driver_data = tty;
37824 @@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37825
37826 static void do_ipw_close(struct ipw_tty *tty)
37827 {
37828 - tty->open_count--;
37829 -
37830 - if (tty->open_count == 0) {
37831 + if (local_dec_return(&tty->open_count) == 0) {
37832 struct tty_struct *linux_tty = tty->linux_tty;
37833
37834 if (linux_tty != NULL) {
37835 @@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37836 return;
37837
37838 mutex_lock(&tty->ipw_tty_mutex);
37839 - if (tty->open_count == 0) {
37840 + if (local_read(&tty->open_count) == 0) {
37841 mutex_unlock(&tty->ipw_tty_mutex);
37842 return;
37843 }
37844 @@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37845 return;
37846 }
37847
37848 - if (!tty->open_count) {
37849 + if (!local_read(&tty->open_count)) {
37850 mutex_unlock(&tty->ipw_tty_mutex);
37851 return;
37852 }
37853 @@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37854 return -ENODEV;
37855
37856 mutex_lock(&tty->ipw_tty_mutex);
37857 - if (!tty->open_count) {
37858 + if (!local_read(&tty->open_count)) {
37859 mutex_unlock(&tty->ipw_tty_mutex);
37860 return -EINVAL;
37861 }
37862 @@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37863 if (!tty)
37864 return -ENODEV;
37865
37866 - if (!tty->open_count)
37867 + if (!local_read(&tty->open_count))
37868 return -EINVAL;
37869
37870 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37871 @@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37872 if (!tty)
37873 return 0;
37874
37875 - if (!tty->open_count)
37876 + if (!local_read(&tty->open_count))
37877 return 0;
37878
37879 return tty->tx_bytes_queued;
37880 @@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37881 if (!tty)
37882 return -ENODEV;
37883
37884 - if (!tty->open_count)
37885 + if (!local_read(&tty->open_count))
37886 return -EINVAL;
37887
37888 return get_control_lines(tty);
37889 @@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37890 if (!tty)
37891 return -ENODEV;
37892
37893 - if (!tty->open_count)
37894 + if (!local_read(&tty->open_count))
37895 return -EINVAL;
37896
37897 return set_control_lines(tty, set, clear);
37898 @@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37899 if (!tty)
37900 return -ENODEV;
37901
37902 - if (!tty->open_count)
37903 + if (!local_read(&tty->open_count))
37904 return -EINVAL;
37905
37906 /* FIXME: Exactly how is the tty object locked here .. */
37907 @@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37908 against a parallel ioctl etc */
37909 mutex_lock(&ttyj->ipw_tty_mutex);
37910 }
37911 - while (ttyj->open_count)
37912 + while (local_read(&ttyj->open_count))
37913 do_ipw_close(ttyj);
37914 ipwireless_disassociate_network_ttys(network,
37915 ttyj->channel_idx);
37916 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37917 index c43b683..0a88f1c 100644
37918 --- a/drivers/tty/n_gsm.c
37919 +++ b/drivers/tty/n_gsm.c
37920 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37921 kref_init(&dlci->ref);
37922 mutex_init(&dlci->mutex);
37923 dlci->fifo = &dlci->_fifo;
37924 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37925 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37926 kfree(dlci);
37927 return NULL;
37928 }
37929 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37930 index 94b6eda..15f7cec 100644
37931 --- a/drivers/tty/n_tty.c
37932 +++ b/drivers/tty/n_tty.c
37933 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37934 {
37935 *ops = tty_ldisc_N_TTY;
37936 ops->owner = NULL;
37937 - ops->refcount = ops->flags = 0;
37938 + atomic_set(&ops->refcount, 0);
37939 + ops->flags = 0;
37940 }
37941 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37942 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37943 index eeae7fa..177a743 100644
37944 --- a/drivers/tty/pty.c
37945 +++ b/drivers/tty/pty.c
37946 @@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37947 panic("Couldn't register Unix98 pts driver");
37948
37949 /* Now create the /dev/ptmx special device */
37950 + pax_open_kernel();
37951 tty_default_fops(&ptmx_fops);
37952 - ptmx_fops.open = ptmx_open;
37953 + *(void **)&ptmx_fops.open = ptmx_open;
37954 + pax_close_kernel();
37955
37956 cdev_init(&ptmx_cdev, &ptmx_fops);
37957 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37958 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37959 index 2b42a01..32a2ed3 100644
37960 --- a/drivers/tty/serial/kgdboc.c
37961 +++ b/drivers/tty/serial/kgdboc.c
37962 @@ -24,8 +24,9 @@
37963 #define MAX_CONFIG_LEN 40
37964
37965 static struct kgdb_io kgdboc_io_ops;
37966 +static struct kgdb_io kgdboc_io_ops_console;
37967
37968 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37969 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37970 static int configured = -1;
37971
37972 static char config[MAX_CONFIG_LEN];
37973 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37974 kgdboc_unregister_kbd();
37975 if (configured == 1)
37976 kgdb_unregister_io_module(&kgdboc_io_ops);
37977 + else if (configured == 2)
37978 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37979 }
37980
37981 static int configure_kgdboc(void)
37982 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37983 int err;
37984 char *cptr = config;
37985 struct console *cons;
37986 + int is_console = 0;
37987
37988 err = kgdboc_option_setup(config);
37989 if (err || !strlen(config) || isspace(config[0]))
37990 goto noconfig;
37991
37992 err = -ENODEV;
37993 - kgdboc_io_ops.is_console = 0;
37994 kgdb_tty_driver = NULL;
37995
37996 kgdboc_use_kms = 0;
37997 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37998 int idx;
37999 if (cons->device && cons->device(cons, &idx) == p &&
38000 idx == tty_line) {
38001 - kgdboc_io_ops.is_console = 1;
38002 + is_console = 1;
38003 break;
38004 }
38005 cons = cons->next;
38006 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38007 kgdb_tty_line = tty_line;
38008
38009 do_register:
38010 - err = kgdb_register_io_module(&kgdboc_io_ops);
38011 + if (is_console) {
38012 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
38013 + configured = 2;
38014 + } else {
38015 + err = kgdb_register_io_module(&kgdboc_io_ops);
38016 + configured = 1;
38017 + }
38018 if (err)
38019 goto noconfig;
38020
38021 - configured = 1;
38022 -
38023 return 0;
38024
38025 noconfig:
38026 @@ -213,7 +220,7 @@ noconfig:
38027 static int __init init_kgdboc(void)
38028 {
38029 /* Already configured? */
38030 - if (configured == 1)
38031 + if (configured >= 1)
38032 return 0;
38033
38034 return configure_kgdboc();
38035 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38036 if (config[len - 1] == '\n')
38037 config[len - 1] = '\0';
38038
38039 - if (configured == 1)
38040 + if (configured >= 1)
38041 cleanup_kgdboc();
38042
38043 /* Go and configure with the new params. */
38044 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38045 .post_exception = kgdboc_post_exp_handler,
38046 };
38047
38048 +static struct kgdb_io kgdboc_io_ops_console = {
38049 + .name = "kgdboc",
38050 + .read_char = kgdboc_get_char,
38051 + .write_char = kgdboc_put_char,
38052 + .pre_exception = kgdboc_pre_exp_handler,
38053 + .post_exception = kgdboc_post_exp_handler,
38054 + .is_console = 1
38055 +};
38056 +
38057 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38058 /* This is only available if kgdboc is a built in for early debugging */
38059 static int __init kgdboc_early_init(char *opt)
38060 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38061 index 05728894..b9d44c6 100644
38062 --- a/drivers/tty/sysrq.c
38063 +++ b/drivers/tty/sysrq.c
38064 @@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38065 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38066 size_t count, loff_t *ppos)
38067 {
38068 - if (count) {
38069 + if (count && capable(CAP_SYS_ADMIN)) {
38070 char c;
38071
38072 if (get_user(c, buf))
38073 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38074 index d939bd7..33d92cd 100644
38075 --- a/drivers/tty/tty_io.c
38076 +++ b/drivers/tty/tty_io.c
38077 @@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38078
38079 void tty_default_fops(struct file_operations *fops)
38080 {
38081 - *fops = tty_fops;
38082 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38083 }
38084
38085 /*
38086 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38087 index 24b95db..9c078d0 100644
38088 --- a/drivers/tty/tty_ldisc.c
38089 +++ b/drivers/tty/tty_ldisc.c
38090 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38091 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38092 struct tty_ldisc_ops *ldo = ld->ops;
38093
38094 - ldo->refcount--;
38095 + atomic_dec(&ldo->refcount);
38096 module_put(ldo->owner);
38097 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38098
38099 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38100 spin_lock_irqsave(&tty_ldisc_lock, flags);
38101 tty_ldiscs[disc] = new_ldisc;
38102 new_ldisc->num = disc;
38103 - new_ldisc->refcount = 0;
38104 + atomic_set(&new_ldisc->refcount, 0);
38105 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38106
38107 return ret;
38108 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
38109 return -EINVAL;
38110
38111 spin_lock_irqsave(&tty_ldisc_lock, flags);
38112 - if (tty_ldiscs[disc]->refcount)
38113 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38114 ret = -EBUSY;
38115 else
38116 tty_ldiscs[disc] = NULL;
38117 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38118 if (ldops) {
38119 ret = ERR_PTR(-EAGAIN);
38120 if (try_module_get(ldops->owner)) {
38121 - ldops->refcount++;
38122 + atomic_inc(&ldops->refcount);
38123 ret = ldops;
38124 }
38125 }
38126 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38127 unsigned long flags;
38128
38129 spin_lock_irqsave(&tty_ldisc_lock, flags);
38130 - ldops->refcount--;
38131 + atomic_dec(&ldops->refcount);
38132 module_put(ldops->owner);
38133 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38134 }
38135 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38136 index 3b0c4e3..f98a992 100644
38137 --- a/drivers/tty/vt/keyboard.c
38138 +++ b/drivers/tty/vt/keyboard.c
38139 @@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38140 kbd->kbdmode == VC_OFF) &&
38141 value != KVAL(K_SAK))
38142 return; /* SAK is allowed even in raw mode */
38143 +
38144 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38145 + {
38146 + void *func = fn_handler[value];
38147 + if (func == fn_show_state || func == fn_show_ptregs ||
38148 + func == fn_show_mem)
38149 + return;
38150 + }
38151 +#endif
38152 +
38153 fn_handler[value](vc);
38154 }
38155
38156 @@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38157 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38158 return -EFAULT;
38159
38160 - if (!capable(CAP_SYS_TTY_CONFIG))
38161 - perm = 0;
38162 -
38163 switch (cmd) {
38164 case KDGKBENT:
38165 /* Ensure another thread doesn't free it under us */
38166 @@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38167 spin_unlock_irqrestore(&kbd_event_lock, flags);
38168 return put_user(val, &user_kbe->kb_value);
38169 case KDSKBENT:
38170 + if (!capable(CAP_SYS_TTY_CONFIG))
38171 + perm = 0;
38172 +
38173 if (!perm)
38174 return -EPERM;
38175 if (!i && v == K_NOSUCHMAP) {
38176 @@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38177 int i, j, k;
38178 int ret;
38179
38180 - if (!capable(CAP_SYS_TTY_CONFIG))
38181 - perm = 0;
38182 -
38183 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38184 if (!kbs) {
38185 ret = -ENOMEM;
38186 @@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38187 kfree(kbs);
38188 return ((p && *p) ? -EOVERFLOW : 0);
38189 case KDSKBSENT:
38190 + if (!capable(CAP_SYS_TTY_CONFIG))
38191 + perm = 0;
38192 +
38193 if (!perm) {
38194 ret = -EPERM;
38195 goto reterr;
38196 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38197 index a783d53..cb30d94 100644
38198 --- a/drivers/uio/uio.c
38199 +++ b/drivers/uio/uio.c
38200 @@ -25,6 +25,7 @@
38201 #include <linux/kobject.h>
38202 #include <linux/cdev.h>
38203 #include <linux/uio_driver.h>
38204 +#include <asm/local.h>
38205
38206 #define UIO_MAX_DEVICES (1U << MINORBITS)
38207
38208 @@ -32,10 +33,10 @@ struct uio_device {
38209 struct module *owner;
38210 struct device *dev;
38211 int minor;
38212 - atomic_t event;
38213 + atomic_unchecked_t event;
38214 struct fasync_struct *async_queue;
38215 wait_queue_head_t wait;
38216 - int vma_count;
38217 + local_t vma_count;
38218 struct uio_info *info;
38219 struct kobject *map_dir;
38220 struct kobject *portio_dir;
38221 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38222 struct device_attribute *attr, char *buf)
38223 {
38224 struct uio_device *idev = dev_get_drvdata(dev);
38225 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38226 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38227 }
38228
38229 static struct device_attribute uio_class_attributes[] = {
38230 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38231 {
38232 struct uio_device *idev = info->uio_dev;
38233
38234 - atomic_inc(&idev->event);
38235 + atomic_inc_unchecked(&idev->event);
38236 wake_up_interruptible(&idev->wait);
38237 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38238 }
38239 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38240 }
38241
38242 listener->dev = idev;
38243 - listener->event_count = atomic_read(&idev->event);
38244 + listener->event_count = atomic_read_unchecked(&idev->event);
38245 filep->private_data = listener;
38246
38247 if (idev->info->open) {
38248 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38249 return -EIO;
38250
38251 poll_wait(filep, &idev->wait, wait);
38252 - if (listener->event_count != atomic_read(&idev->event))
38253 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38254 return POLLIN | POLLRDNORM;
38255 return 0;
38256 }
38257 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38258 do {
38259 set_current_state(TASK_INTERRUPTIBLE);
38260
38261 - event_count = atomic_read(&idev->event);
38262 + event_count = atomic_read_unchecked(&idev->event);
38263 if (event_count != listener->event_count) {
38264 if (copy_to_user(buf, &event_count, count))
38265 retval = -EFAULT;
38266 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38267 static void uio_vma_open(struct vm_area_struct *vma)
38268 {
38269 struct uio_device *idev = vma->vm_private_data;
38270 - idev->vma_count++;
38271 + local_inc(&idev->vma_count);
38272 }
38273
38274 static void uio_vma_close(struct vm_area_struct *vma)
38275 {
38276 struct uio_device *idev = vma->vm_private_data;
38277 - idev->vma_count--;
38278 + local_dec(&idev->vma_count);
38279 }
38280
38281 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38282 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38283 idev->owner = owner;
38284 idev->info = info;
38285 init_waitqueue_head(&idev->wait);
38286 - atomic_set(&idev->event, 0);
38287 + atomic_set_unchecked(&idev->event, 0);
38288
38289 ret = uio_get_minor(idev);
38290 if (ret)
38291 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38292 index 98b89fe..aff824e 100644
38293 --- a/drivers/usb/atm/cxacru.c
38294 +++ b/drivers/usb/atm/cxacru.c
38295 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38296 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38297 if (ret < 2)
38298 return -EINVAL;
38299 - if (index < 0 || index > 0x7f)
38300 + if (index > 0x7f)
38301 return -EINVAL;
38302 pos += tmp;
38303
38304 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38305 index d3448ca..d2864ca 100644
38306 --- a/drivers/usb/atm/usbatm.c
38307 +++ b/drivers/usb/atm/usbatm.c
38308 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38309 if (printk_ratelimit())
38310 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38311 __func__, vpi, vci);
38312 - atomic_inc(&vcc->stats->rx_err);
38313 + atomic_inc_unchecked(&vcc->stats->rx_err);
38314 return;
38315 }
38316
38317 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38318 if (length > ATM_MAX_AAL5_PDU) {
38319 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38320 __func__, length, vcc);
38321 - atomic_inc(&vcc->stats->rx_err);
38322 + atomic_inc_unchecked(&vcc->stats->rx_err);
38323 goto out;
38324 }
38325
38326 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38327 if (sarb->len < pdu_length) {
38328 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38329 __func__, pdu_length, sarb->len, vcc);
38330 - atomic_inc(&vcc->stats->rx_err);
38331 + atomic_inc_unchecked(&vcc->stats->rx_err);
38332 goto out;
38333 }
38334
38335 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38336 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38337 __func__, vcc);
38338 - atomic_inc(&vcc->stats->rx_err);
38339 + atomic_inc_unchecked(&vcc->stats->rx_err);
38340 goto out;
38341 }
38342
38343 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38344 if (printk_ratelimit())
38345 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38346 __func__, length);
38347 - atomic_inc(&vcc->stats->rx_drop);
38348 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38349 goto out;
38350 }
38351
38352 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38353
38354 vcc->push(vcc, skb);
38355
38356 - atomic_inc(&vcc->stats->rx);
38357 + atomic_inc_unchecked(&vcc->stats->rx);
38358 out:
38359 skb_trim(sarb, 0);
38360 }
38361 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38362 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38363
38364 usbatm_pop(vcc, skb);
38365 - atomic_inc(&vcc->stats->tx);
38366 + atomic_inc_unchecked(&vcc->stats->tx);
38367
38368 skb = skb_dequeue(&instance->sndqueue);
38369 }
38370 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38371 if (!left--)
38372 return sprintf(page,
38373 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38374 - atomic_read(&atm_dev->stats.aal5.tx),
38375 - atomic_read(&atm_dev->stats.aal5.tx_err),
38376 - atomic_read(&atm_dev->stats.aal5.rx),
38377 - atomic_read(&atm_dev->stats.aal5.rx_err),
38378 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38379 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38380 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38381 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38382 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38383 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38384
38385 if (!left--) {
38386 if (instance->disconnected)
38387 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38388 index d956965..4179a77 100644
38389 --- a/drivers/usb/core/devices.c
38390 +++ b/drivers/usb/core/devices.c
38391 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38392 * time it gets called.
38393 */
38394 static struct device_connect_event {
38395 - atomic_t count;
38396 + atomic_unchecked_t count;
38397 wait_queue_head_t wait;
38398 } device_event = {
38399 .count = ATOMIC_INIT(1),
38400 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38401
38402 void usbfs_conn_disc_event(void)
38403 {
38404 - atomic_add(2, &device_event.count);
38405 + atomic_add_unchecked(2, &device_event.count);
38406 wake_up(&device_event.wait);
38407 }
38408
38409 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38410
38411 poll_wait(file, &device_event.wait, wait);
38412
38413 - event_count = atomic_read(&device_event.count);
38414 + event_count = atomic_read_unchecked(&device_event.count);
38415 if (file->f_version != event_count) {
38416 file->f_version = event_count;
38417 return POLLIN | POLLRDNORM;
38418 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38419 index 1fc8f12..20647c1 100644
38420 --- a/drivers/usb/early/ehci-dbgp.c
38421 +++ b/drivers/usb/early/ehci-dbgp.c
38422 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38423
38424 #ifdef CONFIG_KGDB
38425 static struct kgdb_io kgdbdbgp_io_ops;
38426 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38427 +static struct kgdb_io kgdbdbgp_io_ops_console;
38428 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38429 #else
38430 #define dbgp_kgdb_mode (0)
38431 #endif
38432 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38433 .write_char = kgdbdbgp_write_char,
38434 };
38435
38436 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38437 + .name = "kgdbdbgp",
38438 + .read_char = kgdbdbgp_read_char,
38439 + .write_char = kgdbdbgp_write_char,
38440 + .is_console = 1
38441 +};
38442 +
38443 static int kgdbdbgp_wait_time;
38444
38445 static int __init kgdbdbgp_parse_config(char *str)
38446 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38447 ptr++;
38448 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38449 }
38450 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38451 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38452 + if (early_dbgp_console.index != -1)
38453 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38454 + else
38455 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38456
38457 return 0;
38458 }
38459 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38460 index d6bea3e..60b250e 100644
38461 --- a/drivers/usb/wusbcore/wa-hc.h
38462 +++ b/drivers/usb/wusbcore/wa-hc.h
38463 @@ -192,7 +192,7 @@ struct wahc {
38464 struct list_head xfer_delayed_list;
38465 spinlock_t xfer_list_lock;
38466 struct work_struct xfer_work;
38467 - atomic_t xfer_id_count;
38468 + atomic_unchecked_t xfer_id_count;
38469 };
38470
38471
38472 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38473 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38474 spin_lock_init(&wa->xfer_list_lock);
38475 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38476 - atomic_set(&wa->xfer_id_count, 1);
38477 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38478 }
38479
38480 /**
38481 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38482 index 57c01ab..8a05959 100644
38483 --- a/drivers/usb/wusbcore/wa-xfer.c
38484 +++ b/drivers/usb/wusbcore/wa-xfer.c
38485 @@ -296,7 +296,7 @@ out:
38486 */
38487 static void wa_xfer_id_init(struct wa_xfer *xfer)
38488 {
38489 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38490 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38491 }
38492
38493 /*
38494 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38495 index 51e4c1e..9d87e2a 100644
38496 --- a/drivers/vhost/vhost.c
38497 +++ b/drivers/vhost/vhost.c
38498 @@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38499 return 0;
38500 }
38501
38502 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38503 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38504 {
38505 struct file *eventfp, *filep = NULL,
38506 *pollstart = NULL, *pollstop = NULL;
38507 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38508 index b0b2ac3..89a4399 100644
38509 --- a/drivers/video/aty/aty128fb.c
38510 +++ b/drivers/video/aty/aty128fb.c
38511 @@ -148,7 +148,7 @@ enum {
38512 };
38513
38514 /* Must match above enum */
38515 -static const char *r128_family[] __devinitdata = {
38516 +static const char *r128_family[] __devinitconst = {
38517 "AGP",
38518 "PCI",
38519 "PRO AGP",
38520 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38521 index 5c3960d..15cf8fc 100644
38522 --- a/drivers/video/fbcmap.c
38523 +++ b/drivers/video/fbcmap.c
38524 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38525 rc = -ENODEV;
38526 goto out;
38527 }
38528 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38529 - !info->fbops->fb_setcmap)) {
38530 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38531 rc = -EINVAL;
38532 goto out1;
38533 }
38534 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38535 index c6ce416..3b9b642 100644
38536 --- a/drivers/video/fbmem.c
38537 +++ b/drivers/video/fbmem.c
38538 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38539 image->dx += image->width + 8;
38540 }
38541 } else if (rotate == FB_ROTATE_UD) {
38542 - for (x = 0; x < num && image->dx >= 0; x++) {
38543 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38544 info->fbops->fb_imageblit(info, image);
38545 image->dx -= image->width + 8;
38546 }
38547 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38548 image->dy += image->height + 8;
38549 }
38550 } else if (rotate == FB_ROTATE_CCW) {
38551 - for (x = 0; x < num && image->dy >= 0; x++) {
38552 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38553 info->fbops->fb_imageblit(info, image);
38554 image->dy -= image->height + 8;
38555 }
38556 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38557 return -EFAULT;
38558 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38559 return -EINVAL;
38560 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38561 + if (con2fb.framebuffer >= FB_MAX)
38562 return -EINVAL;
38563 if (!registered_fb[con2fb.framebuffer])
38564 request_module("fb%d", con2fb.framebuffer);
38565 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38566 index 5a5d092..265c5ed 100644
38567 --- a/drivers/video/geode/gx1fb_core.c
38568 +++ b/drivers/video/geode/gx1fb_core.c
38569 @@ -29,7 +29,7 @@ static int crt_option = 1;
38570 static char panel_option[32] = "";
38571
38572 /* Modes relevant to the GX1 (taken from modedb.c) */
38573 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38574 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38575 /* 640x480-60 VESA */
38576 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38577 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38578 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38579 index 0fad23f..0e9afa4 100644
38580 --- a/drivers/video/gxt4500.c
38581 +++ b/drivers/video/gxt4500.c
38582 @@ -156,7 +156,7 @@ struct gxt4500_par {
38583 static char *mode_option;
38584
38585 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38586 -static const struct fb_videomode defaultmode __devinitdata = {
38587 +static const struct fb_videomode defaultmode __devinitconst = {
38588 .refresh = 60,
38589 .xres = 1280,
38590 .yres = 1024,
38591 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38592 return 0;
38593 }
38594
38595 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38596 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38597 .id = "IBM GXT4500P",
38598 .type = FB_TYPE_PACKED_PIXELS,
38599 .visual = FB_VISUAL_PSEUDOCOLOR,
38600 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38601 index 7672d2e..b56437f 100644
38602 --- a/drivers/video/i810/i810_accel.c
38603 +++ b/drivers/video/i810/i810_accel.c
38604 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38605 }
38606 }
38607 printk("ringbuffer lockup!!!\n");
38608 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38609 i810_report_error(mmio);
38610 par->dev_flags |= LOCKUP;
38611 info->pixmap.scan_align = 1;
38612 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38613 index b83f361..2b05a91 100644
38614 --- a/drivers/video/i810/i810_main.c
38615 +++ b/drivers/video/i810/i810_main.c
38616 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38617 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38618
38619 /* PCI */
38620 -static const char *i810_pci_list[] __devinitdata = {
38621 +static const char *i810_pci_list[] __devinitconst = {
38622 "Intel(R) 810 Framebuffer Device" ,
38623 "Intel(R) 810-DC100 Framebuffer Device" ,
38624 "Intel(R) 810E Framebuffer Device" ,
38625 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38626 index de36693..3c63fc2 100644
38627 --- a/drivers/video/jz4740_fb.c
38628 +++ b/drivers/video/jz4740_fb.c
38629 @@ -136,7 +136,7 @@ struct jzfb {
38630 uint32_t pseudo_palette[16];
38631 };
38632
38633 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38634 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38635 .id = "JZ4740 FB",
38636 .type = FB_TYPE_PACKED_PIXELS,
38637 .visual = FB_VISUAL_TRUECOLOR,
38638 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38639 index 3c14e43..eafa544 100644
38640 --- a/drivers/video/logo/logo_linux_clut224.ppm
38641 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38642 @@ -1,1604 +1,1123 @@
38643 P3
38644 -# Standard 224-color Linux logo
38645 80 80
38646 255
38647 - 0 0 0 0 0 0 0 0 0 0 0 0
38648 - 0 0 0 0 0 0 0 0 0 0 0 0
38649 - 0 0 0 0 0 0 0 0 0 0 0 0
38650 - 0 0 0 0 0 0 0 0 0 0 0 0
38651 - 0 0 0 0 0 0 0 0 0 0 0 0
38652 - 0 0 0 0 0 0 0 0 0 0 0 0
38653 - 0 0 0 0 0 0 0 0 0 0 0 0
38654 - 0 0 0 0 0 0 0 0 0 0 0 0
38655 - 0 0 0 0 0 0 0 0 0 0 0 0
38656 - 6 6 6 6 6 6 10 10 10 10 10 10
38657 - 10 10 10 6 6 6 6 6 6 6 6 6
38658 - 0 0 0 0 0 0 0 0 0 0 0 0
38659 - 0 0 0 0 0 0 0 0 0 0 0 0
38660 - 0 0 0 0 0 0 0 0 0 0 0 0
38661 - 0 0 0 0 0 0 0 0 0 0 0 0
38662 - 0 0 0 0 0 0 0 0 0 0 0 0
38663 - 0 0 0 0 0 0 0 0 0 0 0 0
38664 - 0 0 0 0 0 0 0 0 0 0 0 0
38665 - 0 0 0 0 0 0 0 0 0 0 0 0
38666 - 0 0 0 0 0 0 0 0 0 0 0 0
38667 - 0 0 0 0 0 0 0 0 0 0 0 0
38668 - 0 0 0 0 0 0 0 0 0 0 0 0
38669 - 0 0 0 0 0 0 0 0 0 0 0 0
38670 - 0 0 0 0 0 0 0 0 0 0 0 0
38671 - 0 0 0 0 0 0 0 0 0 0 0 0
38672 - 0 0 0 0 0 0 0 0 0 0 0 0
38673 - 0 0 0 0 0 0 0 0 0 0 0 0
38674 - 0 0 0 0 0 0 0 0 0 0 0 0
38675 - 0 0 0 6 6 6 10 10 10 14 14 14
38676 - 22 22 22 26 26 26 30 30 30 34 34 34
38677 - 30 30 30 30 30 30 26 26 26 18 18 18
38678 - 14 14 14 10 10 10 6 6 6 0 0 0
38679 - 0 0 0 0 0 0 0 0 0 0 0 0
38680 - 0 0 0 0 0 0 0 0 0 0 0 0
38681 - 0 0 0 0 0 0 0 0 0 0 0 0
38682 - 0 0 0 0 0 0 0 0 0 0 0 0
38683 - 0 0 0 0 0 0 0 0 0 0 0 0
38684 - 0 0 0 0 0 0 0 0 0 0 0 0
38685 - 0 0 0 0 0 0 0 0 0 0 0 0
38686 - 0 0 0 0 0 0 0 0 0 0 0 0
38687 - 0 0 0 0 0 0 0 0 0 0 0 0
38688 - 0 0 0 0 0 1 0 0 1 0 0 0
38689 - 0 0 0 0 0 0 0 0 0 0 0 0
38690 - 0 0 0 0 0 0 0 0 0 0 0 0
38691 - 0 0 0 0 0 0 0 0 0 0 0 0
38692 - 0 0 0 0 0 0 0 0 0 0 0 0
38693 - 0 0 0 0 0 0 0 0 0 0 0 0
38694 - 0 0 0 0 0 0 0 0 0 0 0 0
38695 - 6 6 6 14 14 14 26 26 26 42 42 42
38696 - 54 54 54 66 66 66 78 78 78 78 78 78
38697 - 78 78 78 74 74 74 66 66 66 54 54 54
38698 - 42 42 42 26 26 26 18 18 18 10 10 10
38699 - 6 6 6 0 0 0 0 0 0 0 0 0
38700 - 0 0 0 0 0 0 0 0 0 0 0 0
38701 - 0 0 0 0 0 0 0 0 0 0 0 0
38702 - 0 0 0 0 0 0 0 0 0 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 0 0 0
38704 - 0 0 0 0 0 0 0 0 0 0 0 0
38705 - 0 0 0 0 0 0 0 0 0 0 0 0
38706 - 0 0 0 0 0 0 0 0 0 0 0 0
38707 - 0 0 0 0 0 0 0 0 0 0 0 0
38708 - 0 0 1 0 0 0 0 0 0 0 0 0
38709 - 0 0 0 0 0 0 0 0 0 0 0 0
38710 - 0 0 0 0 0 0 0 0 0 0 0 0
38711 - 0 0 0 0 0 0 0 0 0 0 0 0
38712 - 0 0 0 0 0 0 0 0 0 0 0 0
38713 - 0 0 0 0 0 0 0 0 0 0 0 0
38714 - 0 0 0 0 0 0 0 0 0 10 10 10
38715 - 22 22 22 42 42 42 66 66 66 86 86 86
38716 - 66 66 66 38 38 38 38 38 38 22 22 22
38717 - 26 26 26 34 34 34 54 54 54 66 66 66
38718 - 86 86 86 70 70 70 46 46 46 26 26 26
38719 - 14 14 14 6 6 6 0 0 0 0 0 0
38720 - 0 0 0 0 0 0 0 0 0 0 0 0
38721 - 0 0 0 0 0 0 0 0 0 0 0 0
38722 - 0 0 0 0 0 0 0 0 0 0 0 0
38723 - 0 0 0 0 0 0 0 0 0 0 0 0
38724 - 0 0 0 0 0 0 0 0 0 0 0 0
38725 - 0 0 0 0 0 0 0 0 0 0 0 0
38726 - 0 0 0 0 0 0 0 0 0 0 0 0
38727 - 0 0 0 0 0 0 0 0 0 0 0 0
38728 - 0 0 1 0 0 1 0 0 1 0 0 0
38729 - 0 0 0 0 0 0 0 0 0 0 0 0
38730 - 0 0 0 0 0 0 0 0 0 0 0 0
38731 - 0 0 0 0 0 0 0 0 0 0 0 0
38732 - 0 0 0 0 0 0 0 0 0 0 0 0
38733 - 0 0 0 0 0 0 0 0 0 0 0 0
38734 - 0 0 0 0 0 0 10 10 10 26 26 26
38735 - 50 50 50 82 82 82 58 58 58 6 6 6
38736 - 2 2 6 2 2 6 2 2 6 2 2 6
38737 - 2 2 6 2 2 6 2 2 6 2 2 6
38738 - 6 6 6 54 54 54 86 86 86 66 66 66
38739 - 38 38 38 18 18 18 6 6 6 0 0 0
38740 - 0 0 0 0 0 0 0 0 0 0 0 0
38741 - 0 0 0 0 0 0 0 0 0 0 0 0
38742 - 0 0 0 0 0 0 0 0 0 0 0 0
38743 - 0 0 0 0 0 0 0 0 0 0 0 0
38744 - 0 0 0 0 0 0 0 0 0 0 0 0
38745 - 0 0 0 0 0 0 0 0 0 0 0 0
38746 - 0 0 0 0 0 0 0 0 0 0 0 0
38747 - 0 0 0 0 0 0 0 0 0 0 0 0
38748 - 0 0 0 0 0 0 0 0 0 0 0 0
38749 - 0 0 0 0 0 0 0 0 0 0 0 0
38750 - 0 0 0 0 0 0 0 0 0 0 0 0
38751 - 0 0 0 0 0 0 0 0 0 0 0 0
38752 - 0 0 0 0 0 0 0 0 0 0 0 0
38753 - 0 0 0 0 0 0 0 0 0 0 0 0
38754 - 0 0 0 6 6 6 22 22 22 50 50 50
38755 - 78 78 78 34 34 34 2 2 6 2 2 6
38756 - 2 2 6 2 2 6 2 2 6 2 2 6
38757 - 2 2 6 2 2 6 2 2 6 2 2 6
38758 - 2 2 6 2 2 6 6 6 6 70 70 70
38759 - 78 78 78 46 46 46 22 22 22 6 6 6
38760 - 0 0 0 0 0 0 0 0 0 0 0 0
38761 - 0 0 0 0 0 0 0 0 0 0 0 0
38762 - 0 0 0 0 0 0 0 0 0 0 0 0
38763 - 0 0 0 0 0 0 0 0 0 0 0 0
38764 - 0 0 0 0 0 0 0 0 0 0 0 0
38765 - 0 0 0 0 0 0 0 0 0 0 0 0
38766 - 0 0 0 0 0 0 0 0 0 0 0 0
38767 - 0 0 0 0 0 0 0 0 0 0 0 0
38768 - 0 0 1 0 0 1 0 0 1 0 0 0
38769 - 0 0 0 0 0 0 0 0 0 0 0 0
38770 - 0 0 0 0 0 0 0 0 0 0 0 0
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 0 0 0 0 0 0 0 0 0 0 0 0
38773 - 0 0 0 0 0 0 0 0 0 0 0 0
38774 - 6 6 6 18 18 18 42 42 42 82 82 82
38775 - 26 26 26 2 2 6 2 2 6 2 2 6
38776 - 2 2 6 2 2 6 2 2 6 2 2 6
38777 - 2 2 6 2 2 6 2 2 6 14 14 14
38778 - 46 46 46 34 34 34 6 6 6 2 2 6
38779 - 42 42 42 78 78 78 42 42 42 18 18 18
38780 - 6 6 6 0 0 0 0 0 0 0 0 0
38781 - 0 0 0 0 0 0 0 0 0 0 0 0
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 0 0 0 0 0 0 0 0 0 0 0 0
38784 - 0 0 0 0 0 0 0 0 0 0 0 0
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 0 0 0
38788 - 0 0 1 0 0 0 0 0 1 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 0 0 0 0 0 0 0 0 0 0 0 0
38791 - 0 0 0 0 0 0 0 0 0 0 0 0
38792 - 0 0 0 0 0 0 0 0 0 0 0 0
38793 - 0 0 0 0 0 0 0 0 0 0 0 0
38794 - 10 10 10 30 30 30 66 66 66 58 58 58
38795 - 2 2 6 2 2 6 2 2 6 2 2 6
38796 - 2 2 6 2 2 6 2 2 6 2 2 6
38797 - 2 2 6 2 2 6 2 2 6 26 26 26
38798 - 86 86 86 101 101 101 46 46 46 10 10 10
38799 - 2 2 6 58 58 58 70 70 70 34 34 34
38800 - 10 10 10 0 0 0 0 0 0 0 0 0
38801 - 0 0 0 0 0 0 0 0 0 0 0 0
38802 - 0 0 0 0 0 0 0 0 0 0 0 0
38803 - 0 0 0 0 0 0 0 0 0 0 0 0
38804 - 0 0 0 0 0 0 0 0 0 0 0 0
38805 - 0 0 0 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 0 0 0 0 0 0 0 0 0 0
38808 - 0 0 1 0 0 1 0 0 1 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 0 0 0
38810 - 0 0 0 0 0 0 0 0 0 0 0 0
38811 - 0 0 0 0 0 0 0 0 0 0 0 0
38812 - 0 0 0 0 0 0 0 0 0 0 0 0
38813 - 0 0 0 0 0 0 0 0 0 0 0 0
38814 - 14 14 14 42 42 42 86 86 86 10 10 10
38815 - 2 2 6 2 2 6 2 2 6 2 2 6
38816 - 2 2 6 2 2 6 2 2 6 2 2 6
38817 - 2 2 6 2 2 6 2 2 6 30 30 30
38818 - 94 94 94 94 94 94 58 58 58 26 26 26
38819 - 2 2 6 6 6 6 78 78 78 54 54 54
38820 - 22 22 22 6 6 6 0 0 0 0 0 0
38821 - 0 0 0 0 0 0 0 0 0 0 0 0
38822 - 0 0 0 0 0 0 0 0 0 0 0 0
38823 - 0 0 0 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 0 0 0 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 0 0 0 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 0 0 0 0 0 0 0 0 0 0
38833 - 0 0 0 0 0 0 0 0 0 6 6 6
38834 - 22 22 22 62 62 62 62 62 62 2 2 6
38835 - 2 2 6 2 2 6 2 2 6 2 2 6
38836 - 2 2 6 2 2 6 2 2 6 2 2 6
38837 - 2 2 6 2 2 6 2 2 6 26 26 26
38838 - 54 54 54 38 38 38 18 18 18 10 10 10
38839 - 2 2 6 2 2 6 34 34 34 82 82 82
38840 - 38 38 38 14 14 14 0 0 0 0 0 0
38841 - 0 0 0 0 0 0 0 0 0 0 0 0
38842 - 0 0 0 0 0 0 0 0 0 0 0 0
38843 - 0 0 0 0 0 0 0 0 0 0 0 0
38844 - 0 0 0 0 0 0 0 0 0 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 0 0 0 0 0 0
38848 - 0 0 0 0 0 1 0 0 1 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 0 0 0 0 0 0 0 0 0 0
38853 - 0 0 0 0 0 0 0 0 0 6 6 6
38854 - 30 30 30 78 78 78 30 30 30 2 2 6
38855 - 2 2 6 2 2 6 2 2 6 2 2 6
38856 - 2 2 6 2 2 6 2 2 6 2 2 6
38857 - 2 2 6 2 2 6 2 2 6 10 10 10
38858 - 10 10 10 2 2 6 2 2 6 2 2 6
38859 - 2 2 6 2 2 6 2 2 6 78 78 78
38860 - 50 50 50 18 18 18 6 6 6 0 0 0
38861 - 0 0 0 0 0 0 0 0 0 0 0 0
38862 - 0 0 0 0 0 0 0 0 0 0 0 0
38863 - 0 0 0 0 0 0 0 0 0 0 0 0
38864 - 0 0 0 0 0 0 0 0 0 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 1 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 0 0 0 0 0 0 0 0 0 0
38873 - 0 0 0 0 0 0 0 0 0 10 10 10
38874 - 38 38 38 86 86 86 14 14 14 2 2 6
38875 - 2 2 6 2 2 6 2 2 6 2 2 6
38876 - 2 2 6 2 2 6 2 2 6 2 2 6
38877 - 2 2 6 2 2 6 2 2 6 2 2 6
38878 - 2 2 6 2 2 6 2 2 6 2 2 6
38879 - 2 2 6 2 2 6 2 2 6 54 54 54
38880 - 66 66 66 26 26 26 6 6 6 0 0 0
38881 - 0 0 0 0 0 0 0 0 0 0 0 0
38882 - 0 0 0 0 0 0 0 0 0 0 0 0
38883 - 0 0 0 0 0 0 0 0 0 0 0 0
38884 - 0 0 0 0 0 0 0 0 0 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 0 0 0 0 0 0 0
38888 - 0 0 0 0 0 1 0 0 1 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 0 0 0 0 0 0 0 0 0 0
38893 - 0 0 0 0 0 0 0 0 0 14 14 14
38894 - 42 42 42 82 82 82 2 2 6 2 2 6
38895 - 2 2 6 6 6 6 10 10 10 2 2 6
38896 - 2 2 6 2 2 6 2 2 6 2 2 6
38897 - 2 2 6 2 2 6 2 2 6 6 6 6
38898 - 14 14 14 10 10 10 2 2 6 2 2 6
38899 - 2 2 6 2 2 6 2 2 6 18 18 18
38900 - 82 82 82 34 34 34 10 10 10 0 0 0
38901 - 0 0 0 0 0 0 0 0 0 0 0 0
38902 - 0 0 0 0 0 0 0 0 0 0 0 0
38903 - 0 0 0 0 0 0 0 0 0 0 0 0
38904 - 0 0 0 0 0 0 0 0 0 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 0 0 0 0 0 0 0 0 0 0
38908 - 0 0 1 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 0 0 0 0 0 0 0 0 0 0
38913 - 0 0 0 0 0 0 0 0 0 14 14 14
38914 - 46 46 46 86 86 86 2 2 6 2 2 6
38915 - 6 6 6 6 6 6 22 22 22 34 34 34
38916 - 6 6 6 2 2 6 2 2 6 2 2 6
38917 - 2 2 6 2 2 6 18 18 18 34 34 34
38918 - 10 10 10 50 50 50 22 22 22 2 2 6
38919 - 2 2 6 2 2 6 2 2 6 10 10 10
38920 - 86 86 86 42 42 42 14 14 14 0 0 0
38921 - 0 0 0 0 0 0 0 0 0 0 0 0
38922 - 0 0 0 0 0 0 0 0 0 0 0 0
38923 - 0 0 0 0 0 0 0 0 0 0 0 0
38924 - 0 0 0 0 0 0 0 0 0 0 0 0
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 0 0 0 0 0 0 0
38928 - 0 0 1 0 0 1 0 0 1 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 0 0 0
38931 - 0 0 0 0 0 0 0 0 0 0 0 0
38932 - 0 0 0 0 0 0 0 0 0 0 0 0
38933 - 0 0 0 0 0 0 0 0 0 14 14 14
38934 - 46 46 46 86 86 86 2 2 6 2 2 6
38935 - 38 38 38 116 116 116 94 94 94 22 22 22
38936 - 22 22 22 2 2 6 2 2 6 2 2 6
38937 - 14 14 14 86 86 86 138 138 138 162 162 162
38938 -154 154 154 38 38 38 26 26 26 6 6 6
38939 - 2 2 6 2 2 6 2 2 6 2 2 6
38940 - 86 86 86 46 46 46 14 14 14 0 0 0
38941 - 0 0 0 0 0 0 0 0 0 0 0 0
38942 - 0 0 0 0 0 0 0 0 0 0 0 0
38943 - 0 0 0 0 0 0 0 0 0 0 0 0
38944 - 0 0 0 0 0 0 0 0 0 0 0 0
38945 - 0 0 0 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 0 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 0 0 0 0 0 0 0 0 0
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 0 0 0 0 0 0 0 0 0 0 0 0
38953 - 0 0 0 0 0 0 0 0 0 14 14 14
38954 - 46 46 46 86 86 86 2 2 6 14 14 14
38955 -134 134 134 198 198 198 195 195 195 116 116 116
38956 - 10 10 10 2 2 6 2 2 6 6 6 6
38957 -101 98 89 187 187 187 210 210 210 218 218 218
38958 -214 214 214 134 134 134 14 14 14 6 6 6
38959 - 2 2 6 2 2 6 2 2 6 2 2 6
38960 - 86 86 86 50 50 50 18 18 18 6 6 6
38961 - 0 0 0 0 0 0 0 0 0 0 0 0
38962 - 0 0 0 0 0 0 0 0 0 0 0 0
38963 - 0 0 0 0 0 0 0 0 0 0 0 0
38964 - 0 0 0 0 0 0 0 0 0 0 0 0
38965 - 0 0 0 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 0 0 0 0 0 0 1 0 0 0
38968 - 0 0 1 0 0 1 0 0 1 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 0 0 0 0 0 0 0 0 0 0 0 0
38971 - 0 0 0 0 0 0 0 0 0 0 0 0
38972 - 0 0 0 0 0 0 0 0 0 0 0 0
38973 - 0 0 0 0 0 0 0 0 0 14 14 14
38974 - 46 46 46 86 86 86 2 2 6 54 54 54
38975 -218 218 218 195 195 195 226 226 226 246 246 246
38976 - 58 58 58 2 2 6 2 2 6 30 30 30
38977 -210 210 210 253 253 253 174 174 174 123 123 123
38978 -221 221 221 234 234 234 74 74 74 2 2 6
38979 - 2 2 6 2 2 6 2 2 6 2 2 6
38980 - 70 70 70 58 58 58 22 22 22 6 6 6
38981 - 0 0 0 0 0 0 0 0 0 0 0 0
38982 - 0 0 0 0 0 0 0 0 0 0 0 0
38983 - 0 0 0 0 0 0 0 0 0 0 0 0
38984 - 0 0 0 0 0 0 0 0 0 0 0 0
38985 - 0 0 0 0 0 0 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 0 0 0 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 0 0 0 0 0 0 0 0 0 0 0 0
38991 - 0 0 0 0 0 0 0 0 0 0 0 0
38992 - 0 0 0 0 0 0 0 0 0 0 0 0
38993 - 0 0 0 0 0 0 0 0 0 14 14 14
38994 - 46 46 46 82 82 82 2 2 6 106 106 106
38995 -170 170 170 26 26 26 86 86 86 226 226 226
38996 -123 123 123 10 10 10 14 14 14 46 46 46
38997 -231 231 231 190 190 190 6 6 6 70 70 70
38998 - 90 90 90 238 238 238 158 158 158 2 2 6
38999 - 2 2 6 2 2 6 2 2 6 2 2 6
39000 - 70 70 70 58 58 58 22 22 22 6 6 6
39001 - 0 0 0 0 0 0 0 0 0 0 0 0
39002 - 0 0 0 0 0 0 0 0 0 0 0 0
39003 - 0 0 0 0 0 0 0 0 0 0 0 0
39004 - 0 0 0 0 0 0 0 0 0 0 0 0
39005 - 0 0 0 0 0 0 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 0 0 0 0
39007 - 0 0 0 0 0 0 0 0 1 0 0 0
39008 - 0 0 1 0 0 1 0 0 1 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 0 0 0
39010 - 0 0 0 0 0 0 0 0 0 0 0 0
39011 - 0 0 0 0 0 0 0 0 0 0 0 0
39012 - 0 0 0 0 0 0 0 0 0 0 0 0
39013 - 0 0 0 0 0 0 0 0 0 14 14 14
39014 - 42 42 42 86 86 86 6 6 6 116 116 116
39015 -106 106 106 6 6 6 70 70 70 149 149 149
39016 -128 128 128 18 18 18 38 38 38 54 54 54
39017 -221 221 221 106 106 106 2 2 6 14 14 14
39018 - 46 46 46 190 190 190 198 198 198 2 2 6
39019 - 2 2 6 2 2 6 2 2 6 2 2 6
39020 - 74 74 74 62 62 62 22 22 22 6 6 6
39021 - 0 0 0 0 0 0 0 0 0 0 0 0
39022 - 0 0 0 0 0 0 0 0 0 0 0 0
39023 - 0 0 0 0 0 0 0 0 0 0 0 0
39024 - 0 0 0 0 0 0 0 0 0 0 0 0
39025 - 0 0 0 0 0 0 0 0 0 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 1 0 0 0
39028 - 0 0 1 0 0 0 0 0 1 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 0 0 0
39030 - 0 0 0 0 0 0 0 0 0 0 0 0
39031 - 0 0 0 0 0 0 0 0 0 0 0 0
39032 - 0 0 0 0 0 0 0 0 0 0 0 0
39033 - 0 0 0 0 0 0 0 0 0 14 14 14
39034 - 42 42 42 94 94 94 14 14 14 101 101 101
39035 -128 128 128 2 2 6 18 18 18 116 116 116
39036 -118 98 46 121 92 8 121 92 8 98 78 10
39037 -162 162 162 106 106 106 2 2 6 2 2 6
39038 - 2 2 6 195 195 195 195 195 195 6 6 6
39039 - 2 2 6 2 2 6 2 2 6 2 2 6
39040 - 74 74 74 62 62 62 22 22 22 6 6 6
39041 - 0 0 0 0 0 0 0 0 0 0 0 0
39042 - 0 0 0 0 0 0 0 0 0 0 0 0
39043 - 0 0 0 0 0 0 0 0 0 0 0 0
39044 - 0 0 0 0 0 0 0 0 0 0 0 0
39045 - 0 0 0 0 0 0 0 0 0 0 0 0
39046 - 0 0 0 0 0 0 0 0 0 0 0 0
39047 - 0 0 0 0 0 0 0 0 1 0 0 1
39048 - 0 0 1 0 0 0 0 0 1 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 0 0 0
39050 - 0 0 0 0 0 0 0 0 0 0 0 0
39051 - 0 0 0 0 0 0 0 0 0 0 0 0
39052 - 0 0 0 0 0 0 0 0 0 0 0 0
39053 - 0 0 0 0 0 0 0 0 0 10 10 10
39054 - 38 38 38 90 90 90 14 14 14 58 58 58
39055 -210 210 210 26 26 26 54 38 6 154 114 10
39056 -226 170 11 236 186 11 225 175 15 184 144 12
39057 -215 174 15 175 146 61 37 26 9 2 2 6
39058 - 70 70 70 246 246 246 138 138 138 2 2 6
39059 - 2 2 6 2 2 6 2 2 6 2 2 6
39060 - 70 70 70 66 66 66 26 26 26 6 6 6
39061 - 0 0 0 0 0 0 0 0 0 0 0 0
39062 - 0 0 0 0 0 0 0 0 0 0 0 0
39063 - 0 0 0 0 0 0 0 0 0 0 0 0
39064 - 0 0 0 0 0 0 0 0 0 0 0 0
39065 - 0 0 0 0 0 0 0 0 0 0 0 0
39066 - 0 0 0 0 0 0 0 0 0 0 0 0
39067 - 0 0 0 0 0 0 0 0 0 0 0 0
39068 - 0 0 0 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 0 0 0
39070 - 0 0 0 0 0 0 0 0 0 0 0 0
39071 - 0 0 0 0 0 0 0 0 0 0 0 0
39072 - 0 0 0 0 0 0 0 0 0 0 0 0
39073 - 0 0 0 0 0 0 0 0 0 10 10 10
39074 - 38 38 38 86 86 86 14 14 14 10 10 10
39075 -195 195 195 188 164 115 192 133 9 225 175 15
39076 -239 182 13 234 190 10 232 195 16 232 200 30
39077 -245 207 45 241 208 19 232 195 16 184 144 12
39078 -218 194 134 211 206 186 42 42 42 2 2 6
39079 - 2 2 6 2 2 6 2 2 6 2 2 6
39080 - 50 50 50 74 74 74 30 30 30 6 6 6
39081 - 0 0 0 0 0 0 0 0 0 0 0 0
39082 - 0 0 0 0 0 0 0 0 0 0 0 0
39083 - 0 0 0 0 0 0 0 0 0 0 0 0
39084 - 0 0 0 0 0 0 0 0 0 0 0 0
39085 - 0 0 0 0 0 0 0 0 0 0 0 0
39086 - 0 0 0 0 0 0 0 0 0 0 0 0
39087 - 0 0 0 0 0 0 0 0 0 0 0 0
39088 - 0 0 0 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 0 0 0
39090 - 0 0 0 0 0 0 0 0 0 0 0 0
39091 - 0 0 0 0 0 0 0 0 0 0 0 0
39092 - 0 0 0 0 0 0 0 0 0 0 0 0
39093 - 0 0 0 0 0 0 0 0 0 10 10 10
39094 - 34 34 34 86 86 86 14 14 14 2 2 6
39095 -121 87 25 192 133 9 219 162 10 239 182 13
39096 -236 186 11 232 195 16 241 208 19 244 214 54
39097 -246 218 60 246 218 38 246 215 20 241 208 19
39098 -241 208 19 226 184 13 121 87 25 2 2 6
39099 - 2 2 6 2 2 6 2 2 6 2 2 6
39100 - 50 50 50 82 82 82 34 34 34 10 10 10
39101 - 0 0 0 0 0 0 0 0 0 0 0 0
39102 - 0 0 0 0 0 0 0 0 0 0 0 0
39103 - 0 0 0 0 0 0 0 0 0 0 0 0
39104 - 0 0 0 0 0 0 0 0 0 0 0 0
39105 - 0 0 0 0 0 0 0 0 0 0 0 0
39106 - 0 0 0 0 0 0 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 0 0 0
39108 - 0 0 0 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 0 0 0
39110 - 0 0 0 0 0 0 0 0 0 0 0 0
39111 - 0 0 0 0 0 0 0 0 0 0 0 0
39112 - 0 0 0 0 0 0 0 0 0 0 0 0
39113 - 0 0 0 0 0 0 0 0 0 10 10 10
39114 - 34 34 34 82 82 82 30 30 30 61 42 6
39115 -180 123 7 206 145 10 230 174 11 239 182 13
39116 -234 190 10 238 202 15 241 208 19 246 218 74
39117 -246 218 38 246 215 20 246 215 20 246 215 20
39118 -226 184 13 215 174 15 184 144 12 6 6 6
39119 - 2 2 6 2 2 6 2 2 6 2 2 6
39120 - 26 26 26 94 94 94 42 42 42 14 14 14
39121 - 0 0 0 0 0 0 0 0 0 0 0 0
39122 - 0 0 0 0 0 0 0 0 0 0 0 0
39123 - 0 0 0 0 0 0 0 0 0 0 0 0
39124 - 0 0 0 0 0 0 0 0 0 0 0 0
39125 - 0 0 0 0 0 0 0 0 0 0 0 0
39126 - 0 0 0 0 0 0 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 0 0 0
39128 - 0 0 0 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 0 0 0
39130 - 0 0 0 0 0 0 0 0 0 0 0 0
39131 - 0 0 0 0 0 0 0 0 0 0 0 0
39132 - 0 0 0 0 0 0 0 0 0 0 0 0
39133 - 0 0 0 0 0 0 0 0 0 10 10 10
39134 - 30 30 30 78 78 78 50 50 50 104 69 6
39135 -192 133 9 216 158 10 236 178 12 236 186 11
39136 -232 195 16 241 208 19 244 214 54 245 215 43
39137 -246 215 20 246 215 20 241 208 19 198 155 10
39138 -200 144 11 216 158 10 156 118 10 2 2 6
39139 - 2 2 6 2 2 6 2 2 6 2 2 6
39140 - 6 6 6 90 90 90 54 54 54 18 18 18
39141 - 6 6 6 0 0 0 0 0 0 0 0 0
39142 - 0 0 0 0 0 0 0 0 0 0 0 0
39143 - 0 0 0 0 0 0 0 0 0 0 0 0
39144 - 0 0 0 0 0 0 0 0 0 0 0 0
39145 - 0 0 0 0 0 0 0 0 0 0 0 0
39146 - 0 0 0 0 0 0 0 0 0 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 0 0 0
39148 - 0 0 0 0 0 0 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 0 0 0
39150 - 0 0 0 0 0 0 0 0 0 0 0 0
39151 - 0 0 0 0 0 0 0 0 0 0 0 0
39152 - 0 0 0 0 0 0 0 0 0 0 0 0
39153 - 0 0 0 0 0 0 0 0 0 10 10 10
39154 - 30 30 30 78 78 78 46 46 46 22 22 22
39155 -137 92 6 210 162 10 239 182 13 238 190 10
39156 -238 202 15 241 208 19 246 215 20 246 215 20
39157 -241 208 19 203 166 17 185 133 11 210 150 10
39158 -216 158 10 210 150 10 102 78 10 2 2 6
39159 - 6 6 6 54 54 54 14 14 14 2 2 6
39160 - 2 2 6 62 62 62 74 74 74 30 30 30
39161 - 10 10 10 0 0 0 0 0 0 0 0 0
39162 - 0 0 0 0 0 0 0 0 0 0 0 0
39163 - 0 0 0 0 0 0 0 0 0 0 0 0
39164 - 0 0 0 0 0 0 0 0 0 0 0 0
39165 - 0 0 0 0 0 0 0 0 0 0 0 0
39166 - 0 0 0 0 0 0 0 0 0 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 0 0 0
39168 - 0 0 0 0 0 0 0 0 0 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 0 0 0
39170 - 0 0 0 0 0 0 0 0 0 0 0 0
39171 - 0 0 0 0 0 0 0 0 0 0 0 0
39172 - 0 0 0 0 0 0 0 0 0 0 0 0
39173 - 0 0 0 0 0 0 0 0 0 10 10 10
39174 - 34 34 34 78 78 78 50 50 50 6 6 6
39175 - 94 70 30 139 102 15 190 146 13 226 184 13
39176 -232 200 30 232 195 16 215 174 15 190 146 13
39177 -168 122 10 192 133 9 210 150 10 213 154 11
39178 -202 150 34 182 157 106 101 98 89 2 2 6
39179 - 2 2 6 78 78 78 116 116 116 58 58 58
39180 - 2 2 6 22 22 22 90 90 90 46 46 46
39181 - 18 18 18 6 6 6 0 0 0 0 0 0
39182 - 0 0 0 0 0 0 0 0 0 0 0 0
39183 - 0 0 0 0 0 0 0 0 0 0 0 0
39184 - 0 0 0 0 0 0 0 0 0 0 0 0
39185 - 0 0 0 0 0 0 0 0 0 0 0 0
39186 - 0 0 0 0 0 0 0 0 0 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 0 0 0
39188 - 0 0 0 0 0 0 0 0 0 0 0 0
39189 - 0 0 0 0 0 0 0 0 0 0 0 0
39190 - 0 0 0 0 0 0 0 0 0 0 0 0
39191 - 0 0 0 0 0 0 0 0 0 0 0 0
39192 - 0 0 0 0 0 0 0 0 0 0 0 0
39193 - 0 0 0 0 0 0 0 0 0 10 10 10
39194 - 38 38 38 86 86 86 50 50 50 6 6 6
39195 -128 128 128 174 154 114 156 107 11 168 122 10
39196 -198 155 10 184 144 12 197 138 11 200 144 11
39197 -206 145 10 206 145 10 197 138 11 188 164 115
39198 -195 195 195 198 198 198 174 174 174 14 14 14
39199 - 2 2 6 22 22 22 116 116 116 116 116 116
39200 - 22 22 22 2 2 6 74 74 74 70 70 70
39201 - 30 30 30 10 10 10 0 0 0 0 0 0
39202 - 0 0 0 0 0 0 0 0 0 0 0 0
39203 - 0 0 0 0 0 0 0 0 0 0 0 0
39204 - 0 0 0 0 0 0 0 0 0 0 0 0
39205 - 0 0 0 0 0 0 0 0 0 0 0 0
39206 - 0 0 0 0 0 0 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 0 0 0 0 0 0
39208 - 0 0 0 0 0 0 0 0 0 0 0 0
39209 - 0 0 0 0 0 0 0 0 0 0 0 0
39210 - 0 0 0 0 0 0 0 0 0 0 0 0
39211 - 0 0 0 0 0 0 0 0 0 0 0 0
39212 - 0 0 0 0 0 0 0 0 0 0 0 0
39213 - 0 0 0 0 0 0 6 6 6 18 18 18
39214 - 50 50 50 101 101 101 26 26 26 10 10 10
39215 -138 138 138 190 190 190 174 154 114 156 107 11
39216 -197 138 11 200 144 11 197 138 11 192 133 9
39217 -180 123 7 190 142 34 190 178 144 187 187 187
39218 -202 202 202 221 221 221 214 214 214 66 66 66
39219 - 2 2 6 2 2 6 50 50 50 62 62 62
39220 - 6 6 6 2 2 6 10 10 10 90 90 90
39221 - 50 50 50 18 18 18 6 6 6 0 0 0
39222 - 0 0 0 0 0 0 0 0 0 0 0 0
39223 - 0 0 0 0 0 0 0 0 0 0 0 0
39224 - 0 0 0 0 0 0 0 0 0 0 0 0
39225 - 0 0 0 0 0 0 0 0 0 0 0 0
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 0 0 0
39231 - 0 0 0 0 0 0 0 0 0 0 0 0
39232 - 0 0 0 0 0 0 0 0 0 0 0 0
39233 - 0 0 0 0 0 0 10 10 10 34 34 34
39234 - 74 74 74 74 74 74 2 2 6 6 6 6
39235 -144 144 144 198 198 198 190 190 190 178 166 146
39236 -154 121 60 156 107 11 156 107 11 168 124 44
39237 -174 154 114 187 187 187 190 190 190 210 210 210
39238 -246 246 246 253 253 253 253 253 253 182 182 182
39239 - 6 6 6 2 2 6 2 2 6 2 2 6
39240 - 2 2 6 2 2 6 2 2 6 62 62 62
39241 - 74 74 74 34 34 34 14 14 14 0 0 0
39242 - 0 0 0 0 0 0 0 0 0 0 0 0
39243 - 0 0 0 0 0 0 0 0 0 0 0 0
39244 - 0 0 0 0 0 0 0 0 0 0 0 0
39245 - 0 0 0 0 0 0 0 0 0 0 0 0
39246 - 0 0 0 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 0 0 0 0 0 0
39251 - 0 0 0 0 0 0 0 0 0 0 0 0
39252 - 0 0 0 0 0 0 0 0 0 0 0 0
39253 - 0 0 0 10 10 10 22 22 22 54 54 54
39254 - 94 94 94 18 18 18 2 2 6 46 46 46
39255 -234 234 234 221 221 221 190 190 190 190 190 190
39256 -190 190 190 187 187 187 187 187 187 190 190 190
39257 -190 190 190 195 195 195 214 214 214 242 242 242
39258 -253 253 253 253 253 253 253 253 253 253 253 253
39259 - 82 82 82 2 2 6 2 2 6 2 2 6
39260 - 2 2 6 2 2 6 2 2 6 14 14 14
39261 - 86 86 86 54 54 54 22 22 22 6 6 6
39262 - 0 0 0 0 0 0 0 0 0 0 0 0
39263 - 0 0 0 0 0 0 0 0 0 0 0 0
39264 - 0 0 0 0 0 0 0 0 0 0 0 0
39265 - 0 0 0 0 0 0 0 0 0 0 0 0
39266 - 0 0 0 0 0 0 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 0 0 0
39270 - 0 0 0 0 0 0 0 0 0 0 0 0
39271 - 0 0 0 0 0 0 0 0 0 0 0 0
39272 - 0 0 0 0 0 0 0 0 0 0 0 0
39273 - 6 6 6 18 18 18 46 46 46 90 90 90
39274 - 46 46 46 18 18 18 6 6 6 182 182 182
39275 -253 253 253 246 246 246 206 206 206 190 190 190
39276 -190 190 190 190 190 190 190 190 190 190 190 190
39277 -206 206 206 231 231 231 250 250 250 253 253 253
39278 -253 253 253 253 253 253 253 253 253 253 253 253
39279 -202 202 202 14 14 14 2 2 6 2 2 6
39280 - 2 2 6 2 2 6 2 2 6 2 2 6
39281 - 42 42 42 86 86 86 42 42 42 18 18 18
39282 - 6 6 6 0 0 0 0 0 0 0 0 0
39283 - 0 0 0 0 0 0 0 0 0 0 0 0
39284 - 0 0 0 0 0 0 0 0 0 0 0 0
39285 - 0 0 0 0 0 0 0 0 0 0 0 0
39286 - 0 0 0 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 0 0 0
39290 - 0 0 0 0 0 0 0 0 0 0 0 0
39291 - 0 0 0 0 0 0 0 0 0 0 0 0
39292 - 0 0 0 0 0 0 0 0 0 6 6 6
39293 - 14 14 14 38 38 38 74 74 74 66 66 66
39294 - 2 2 6 6 6 6 90 90 90 250 250 250
39295 -253 253 253 253 253 253 238 238 238 198 198 198
39296 -190 190 190 190 190 190 195 195 195 221 221 221
39297 -246 246 246 253 253 253 253 253 253 253 253 253
39298 -253 253 253 253 253 253 253 253 253 253 253 253
39299 -253 253 253 82 82 82 2 2 6 2 2 6
39300 - 2 2 6 2 2 6 2 2 6 2 2 6
39301 - 2 2 6 78 78 78 70 70 70 34 34 34
39302 - 14 14 14 6 6 6 0 0 0 0 0 0
39303 - 0 0 0 0 0 0 0 0 0 0 0 0
39304 - 0 0 0 0 0 0 0 0 0 0 0 0
39305 - 0 0 0 0 0 0 0 0 0 0 0 0
39306 - 0 0 0 0 0 0 0 0 0 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 0 0 0 0 0 0 0 0 0 0 0 0
39311 - 0 0 0 0 0 0 0 0 0 0 0 0
39312 - 0 0 0 0 0 0 0 0 0 14 14 14
39313 - 34 34 34 66 66 66 78 78 78 6 6 6
39314 - 2 2 6 18 18 18 218 218 218 253 253 253
39315 -253 253 253 253 253 253 253 253 253 246 246 246
39316 -226 226 226 231 231 231 246 246 246 253 253 253
39317 -253 253 253 253 253 253 253 253 253 253 253 253
39318 -253 253 253 253 253 253 253 253 253 253 253 253
39319 -253 253 253 178 178 178 2 2 6 2 2 6
39320 - 2 2 6 2 2 6 2 2 6 2 2 6
39321 - 2 2 6 18 18 18 90 90 90 62 62 62
39322 - 30 30 30 10 10 10 0 0 0 0 0 0
39323 - 0 0 0 0 0 0 0 0 0 0 0 0
39324 - 0 0 0 0 0 0 0 0 0 0 0 0
39325 - 0 0 0 0 0 0 0 0 0 0 0 0
39326 - 0 0 0 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 0 0 0 0 0 0
39331 - 0 0 0 0 0 0 0 0 0 0 0 0
39332 - 0 0 0 0 0 0 10 10 10 26 26 26
39333 - 58 58 58 90 90 90 18 18 18 2 2 6
39334 - 2 2 6 110 110 110 253 253 253 253 253 253
39335 -253 253 253 253 253 253 253 253 253 253 253 253
39336 -250 250 250 253 253 253 253 253 253 253 253 253
39337 -253 253 253 253 253 253 253 253 253 253 253 253
39338 -253 253 253 253 253 253 253 253 253 253 253 253
39339 -253 253 253 231 231 231 18 18 18 2 2 6
39340 - 2 2 6 2 2 6 2 2 6 2 2 6
39341 - 2 2 6 2 2 6 18 18 18 94 94 94
39342 - 54 54 54 26 26 26 10 10 10 0 0 0
39343 - 0 0 0 0 0 0 0 0 0 0 0 0
39344 - 0 0 0 0 0 0 0 0 0 0 0 0
39345 - 0 0 0 0 0 0 0 0 0 0 0 0
39346 - 0 0 0 0 0 0 0 0 0 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 0 0 0 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 0 0 0 0 0 0 0 0 0
39352 - 0 0 0 6 6 6 22 22 22 50 50 50
39353 - 90 90 90 26 26 26 2 2 6 2 2 6
39354 - 14 14 14 195 195 195 250 250 250 253 253 253
39355 -253 253 253 253 253 253 253 253 253 253 253 253
39356 -253 253 253 253 253 253 253 253 253 253 253 253
39357 -253 253 253 253 253 253 253 253 253 253 253 253
39358 -253 253 253 253 253 253 253 253 253 253 253 253
39359 -250 250 250 242 242 242 54 54 54 2 2 6
39360 - 2 2 6 2 2 6 2 2 6 2 2 6
39361 - 2 2 6 2 2 6 2 2 6 38 38 38
39362 - 86 86 86 50 50 50 22 22 22 6 6 6
39363 - 0 0 0 0 0 0 0 0 0 0 0 0
39364 - 0 0 0 0 0 0 0 0 0 0 0 0
39365 - 0 0 0 0 0 0 0 0 0 0 0 0
39366 - 0 0 0 0 0 0 0 0 0 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 0 0 0 0 0 0
39372 - 6 6 6 14 14 14 38 38 38 82 82 82
39373 - 34 34 34 2 2 6 2 2 6 2 2 6
39374 - 42 42 42 195 195 195 246 246 246 253 253 253
39375 -253 253 253 253 253 253 253 253 253 250 250 250
39376 -242 242 242 242 242 242 250 250 250 253 253 253
39377 -253 253 253 253 253 253 253 253 253 253 253 253
39378 -253 253 253 250 250 250 246 246 246 238 238 238
39379 -226 226 226 231 231 231 101 101 101 6 6 6
39380 - 2 2 6 2 2 6 2 2 6 2 2 6
39381 - 2 2 6 2 2 6 2 2 6 2 2 6
39382 - 38 38 38 82 82 82 42 42 42 14 14 14
39383 - 6 6 6 0 0 0 0 0 0 0 0 0
39384 - 0 0 0 0 0 0 0 0 0 0 0 0
39385 - 0 0 0 0 0 0 0 0 0 0 0 0
39386 - 0 0 0 0 0 0 0 0 0 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 0 0 0 0 0 0 0 0 0
39392 - 10 10 10 26 26 26 62 62 62 66 66 66
39393 - 2 2 6 2 2 6 2 2 6 6 6 6
39394 - 70 70 70 170 170 170 206 206 206 234 234 234
39395 -246 246 246 250 250 250 250 250 250 238 238 238
39396 -226 226 226 231 231 231 238 238 238 250 250 250
39397 -250 250 250 250 250 250 246 246 246 231 231 231
39398 -214 214 214 206 206 206 202 202 202 202 202 202
39399 -198 198 198 202 202 202 182 182 182 18 18 18
39400 - 2 2 6 2 2 6 2 2 6 2 2 6
39401 - 2 2 6 2 2 6 2 2 6 2 2 6
39402 - 2 2 6 62 62 62 66 66 66 30 30 30
39403 - 10 10 10 0 0 0 0 0 0 0 0 0
39404 - 0 0 0 0 0 0 0 0 0 0 0 0
39405 - 0 0 0 0 0 0 0 0 0 0 0 0
39406 - 0 0 0 0 0 0 0 0 0 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 0 0 0 0 0 0 0 0 0 0 0 0
39412 - 14 14 14 42 42 42 82 82 82 18 18 18
39413 - 2 2 6 2 2 6 2 2 6 10 10 10
39414 - 94 94 94 182 182 182 218 218 218 242 242 242
39415 -250 250 250 253 253 253 253 253 253 250 250 250
39416 -234 234 234 253 253 253 253 253 253 253 253 253
39417 -253 253 253 253 253 253 253 253 253 246 246 246
39418 -238 238 238 226 226 226 210 210 210 202 202 202
39419 -195 195 195 195 195 195 210 210 210 158 158 158
39420 - 6 6 6 14 14 14 50 50 50 14 14 14
39421 - 2 2 6 2 2 6 2 2 6 2 2 6
39422 - 2 2 6 6 6 6 86 86 86 46 46 46
39423 - 18 18 18 6 6 6 0 0 0 0 0 0
39424 - 0 0 0 0 0 0 0 0 0 0 0 0
39425 - 0 0 0 0 0 0 0 0 0 0 0 0
39426 - 0 0 0 0 0 0 0 0 0 0 0 0
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 0 0 0 0 0 0 0 0 0 6 6 6
39432 - 22 22 22 54 54 54 70 70 70 2 2 6
39433 - 2 2 6 10 10 10 2 2 6 22 22 22
39434 -166 166 166 231 231 231 250 250 250 253 253 253
39435 -253 253 253 253 253 253 253 253 253 250 250 250
39436 -242 242 242 253 253 253 253 253 253 253 253 253
39437 -253 253 253 253 253 253 253 253 253 253 253 253
39438 -253 253 253 253 253 253 253 253 253 246 246 246
39439 -231 231 231 206 206 206 198 198 198 226 226 226
39440 - 94 94 94 2 2 6 6 6 6 38 38 38
39441 - 30 30 30 2 2 6 2 2 6 2 2 6
39442 - 2 2 6 2 2 6 62 62 62 66 66 66
39443 - 26 26 26 10 10 10 0 0 0 0 0 0
39444 - 0 0 0 0 0 0 0 0 0 0 0 0
39445 - 0 0 0 0 0 0 0 0 0 0 0 0
39446 - 0 0 0 0 0 0 0 0 0 0 0 0
39447 - 0 0 0 0 0 0 0 0 0 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 0 0 0
39449 - 0 0 0 0 0 0 0 0 0 0 0 0
39450 - 0 0 0 0 0 0 0 0 0 0 0 0
39451 - 0 0 0 0 0 0 0 0 0 10 10 10
39452 - 30 30 30 74 74 74 50 50 50 2 2 6
39453 - 26 26 26 26 26 26 2 2 6 106 106 106
39454 -238 238 238 253 253 253 253 253 253 253 253 253
39455 -253 253 253 253 253 253 253 253 253 253 253 253
39456 -253 253 253 253 253 253 253 253 253 253 253 253
39457 -253 253 253 253 253 253 253 253 253 253 253 253
39458 -253 253 253 253 253 253 253 253 253 253 253 253
39459 -253 253 253 246 246 246 218 218 218 202 202 202
39460 -210 210 210 14 14 14 2 2 6 2 2 6
39461 - 30 30 30 22 22 22 2 2 6 2 2 6
39462 - 2 2 6 2 2 6 18 18 18 86 86 86
39463 - 42 42 42 14 14 14 0 0 0 0 0 0
39464 - 0 0 0 0 0 0 0 0 0 0 0 0
39465 - 0 0 0 0 0 0 0 0 0 0 0 0
39466 - 0 0 0 0 0 0 0 0 0 0 0 0
39467 - 0 0 0 0 0 0 0 0 0 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 0 0 0
39469 - 0 0 0 0 0 0 0 0 0 0 0 0
39470 - 0 0 0 0 0 0 0 0 0 0 0 0
39471 - 0 0 0 0 0 0 0 0 0 14 14 14
39472 - 42 42 42 90 90 90 22 22 22 2 2 6
39473 - 42 42 42 2 2 6 18 18 18 218 218 218
39474 -253 253 253 253 253 253 253 253 253 253 253 253
39475 -253 253 253 253 253 253 253 253 253 253 253 253
39476 -253 253 253 253 253 253 253 253 253 253 253 253
39477 -253 253 253 253 253 253 253 253 253 253 253 253
39478 -253 253 253 253 253 253 253 253 253 253 253 253
39479 -253 253 253 253 253 253 250 250 250 221 221 221
39480 -218 218 218 101 101 101 2 2 6 14 14 14
39481 - 18 18 18 38 38 38 10 10 10 2 2 6
39482 - 2 2 6 2 2 6 2 2 6 78 78 78
39483 - 58 58 58 22 22 22 6 6 6 0 0 0
39484 - 0 0 0 0 0 0 0 0 0 0 0 0
39485 - 0 0 0 0 0 0 0 0 0 0 0 0
39486 - 0 0 0 0 0 0 0 0 0 0 0 0
39487 - 0 0 0 0 0 0 0 0 0 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 0 0 0
39489 - 0 0 0 0 0 0 0 0 0 0 0 0
39490 - 0 0 0 0 0 0 0 0 0 0 0 0
39491 - 0 0 0 0 0 0 6 6 6 18 18 18
39492 - 54 54 54 82 82 82 2 2 6 26 26 26
39493 - 22 22 22 2 2 6 123 123 123 253 253 253
39494 -253 253 253 253 253 253 253 253 253 253 253 253
39495 -253 253 253 253 253 253 253 253 253 253 253 253
39496 -253 253 253 253 253 253 253 253 253 253 253 253
39497 -253 253 253 253 253 253 253 253 253 253 253 253
39498 -253 253 253 253 253 253 253 253 253 253 253 253
39499 -253 253 253 253 253 253 253 253 253 250 250 250
39500 -238 238 238 198 198 198 6 6 6 38 38 38
39501 - 58 58 58 26 26 26 38 38 38 2 2 6
39502 - 2 2 6 2 2 6 2 2 6 46 46 46
39503 - 78 78 78 30 30 30 10 10 10 0 0 0
39504 - 0 0 0 0 0 0 0 0 0 0 0 0
39505 - 0 0 0 0 0 0 0 0 0 0 0 0
39506 - 0 0 0 0 0 0 0 0 0 0 0 0
39507 - 0 0 0 0 0 0 0 0 0 0 0 0
39508 - 0 0 0 0 0 0 0 0 0 0 0 0
39509 - 0 0 0 0 0 0 0 0 0 0 0 0
39510 - 0 0 0 0 0 0 0 0 0 0 0 0
39511 - 0 0 0 0 0 0 10 10 10 30 30 30
39512 - 74 74 74 58 58 58 2 2 6 42 42 42
39513 - 2 2 6 22 22 22 231 231 231 253 253 253
39514 -253 253 253 253 253 253 253 253 253 253 253 253
39515 -253 253 253 253 253 253 253 253 253 250 250 250
39516 -253 253 253 253 253 253 253 253 253 253 253 253
39517 -253 253 253 253 253 253 253 253 253 253 253 253
39518 -253 253 253 253 253 253 253 253 253 253 253 253
39519 -253 253 253 253 253 253 253 253 253 253 253 253
39520 -253 253 253 246 246 246 46 46 46 38 38 38
39521 - 42 42 42 14 14 14 38 38 38 14 14 14
39522 - 2 2 6 2 2 6 2 2 6 6 6 6
39523 - 86 86 86 46 46 46 14 14 14 0 0 0
39524 - 0 0 0 0 0 0 0 0 0 0 0 0
39525 - 0 0 0 0 0 0 0 0 0 0 0 0
39526 - 0 0 0 0 0 0 0 0 0 0 0 0
39527 - 0 0 0 0 0 0 0 0 0 0 0 0
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 0 0 0 0 0 0
39531 - 0 0 0 6 6 6 14 14 14 42 42 42
39532 - 90 90 90 18 18 18 18 18 18 26 26 26
39533 - 2 2 6 116 116 116 253 253 253 253 253 253
39534 -253 253 253 253 253 253 253 253 253 253 253 253
39535 -253 253 253 253 253 253 250 250 250 238 238 238
39536 -253 253 253 253 253 253 253 253 253 253 253 253
39537 -253 253 253 253 253 253 253 253 253 253 253 253
39538 -253 253 253 253 253 253 253 253 253 253 253 253
39539 -253 253 253 253 253 253 253 253 253 253 253 253
39540 -253 253 253 253 253 253 94 94 94 6 6 6
39541 - 2 2 6 2 2 6 10 10 10 34 34 34
39542 - 2 2 6 2 2 6 2 2 6 2 2 6
39543 - 74 74 74 58 58 58 22 22 22 6 6 6
39544 - 0 0 0 0 0 0 0 0 0 0 0 0
39545 - 0 0 0 0 0 0 0 0 0 0 0 0
39546 - 0 0 0 0 0 0 0 0 0 0 0 0
39547 - 0 0 0 0 0 0 0 0 0 0 0 0
39548 - 0 0 0 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 0 0 0 0 0 0
39551 - 0 0 0 10 10 10 26 26 26 66 66 66
39552 - 82 82 82 2 2 6 38 38 38 6 6 6
39553 - 14 14 14 210 210 210 253 253 253 253 253 253
39554 -253 253 253 253 253 253 253 253 253 253 253 253
39555 -253 253 253 253 253 253 246 246 246 242 242 242
39556 -253 253 253 253 253 253 253 253 253 253 253 253
39557 -253 253 253 253 253 253 253 253 253 253 253 253
39558 -253 253 253 253 253 253 253 253 253 253 253 253
39559 -253 253 253 253 253 253 253 253 253 253 253 253
39560 -253 253 253 253 253 253 144 144 144 2 2 6
39561 - 2 2 6 2 2 6 2 2 6 46 46 46
39562 - 2 2 6 2 2 6 2 2 6 2 2 6
39563 - 42 42 42 74 74 74 30 30 30 10 10 10
39564 - 0 0 0 0 0 0 0 0 0 0 0 0
39565 - 0 0 0 0 0 0 0 0 0 0 0 0
39566 - 0 0 0 0 0 0 0 0 0 0 0 0
39567 - 0 0 0 0 0 0 0 0 0 0 0 0
39568 - 0 0 0 0 0 0 0 0 0 0 0 0
39569 - 0 0 0 0 0 0 0 0 0 0 0 0
39570 - 0 0 0 0 0 0 0 0 0 0 0 0
39571 - 6 6 6 14 14 14 42 42 42 90 90 90
39572 - 26 26 26 6 6 6 42 42 42 2 2 6
39573 - 74 74 74 250 250 250 253 253 253 253 253 253
39574 -253 253 253 253 253 253 253 253 253 253 253 253
39575 -253 253 253 253 253 253 242 242 242 242 242 242
39576 -253 253 253 253 253 253 253 253 253 253 253 253
39577 -253 253 253 253 253 253 253 253 253 253 253 253
39578 -253 253 253 253 253 253 253 253 253 253 253 253
39579 -253 253 253 253 253 253 253 253 253 253 253 253
39580 -253 253 253 253 253 253 182 182 182 2 2 6
39581 - 2 2 6 2 2 6 2 2 6 46 46 46
39582 - 2 2 6 2 2 6 2 2 6 2 2 6
39583 - 10 10 10 86 86 86 38 38 38 10 10 10
39584 - 0 0 0 0 0 0 0 0 0 0 0 0
39585 - 0 0 0 0 0 0 0 0 0 0 0 0
39586 - 0 0 0 0 0 0 0 0 0 0 0 0
39587 - 0 0 0 0 0 0 0 0 0 0 0 0
39588 - 0 0 0 0 0 0 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 0 0 0 0 0 0 0 0 0 0 0 0
39591 - 10 10 10 26 26 26 66 66 66 82 82 82
39592 - 2 2 6 22 22 22 18 18 18 2 2 6
39593 -149 149 149 253 253 253 253 253 253 253 253 253
39594 -253 253 253 253 253 253 253 253 253 253 253 253
39595 -253 253 253 253 253 253 234 234 234 242 242 242
39596 -253 253 253 253 253 253 253 253 253 253 253 253
39597 -253 253 253 253 253 253 253 253 253 253 253 253
39598 -253 253 253 253 253 253 253 253 253 253 253 253
39599 -253 253 253 253 253 253 253 253 253 253 253 253
39600 -253 253 253 253 253 253 206 206 206 2 2 6
39601 - 2 2 6 2 2 6 2 2 6 38 38 38
39602 - 2 2 6 2 2 6 2 2 6 2 2 6
39603 - 6 6 6 86 86 86 46 46 46 14 14 14
39604 - 0 0 0 0 0 0 0 0 0 0 0 0
39605 - 0 0 0 0 0 0 0 0 0 0 0 0
39606 - 0 0 0 0 0 0 0 0 0 0 0 0
39607 - 0 0 0 0 0 0 0 0 0 0 0 0
39608 - 0 0 0 0 0 0 0 0 0 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 0 0 0 0 0 0 0 0 0 6 6 6
39611 - 18 18 18 46 46 46 86 86 86 18 18 18
39612 - 2 2 6 34 34 34 10 10 10 6 6 6
39613 -210 210 210 253 253 253 253 253 253 253 253 253
39614 -253 253 253 253 253 253 253 253 253 253 253 253
39615 -253 253 253 253 253 253 234 234 234 242 242 242
39616 -253 253 253 253 253 253 253 253 253 253 253 253
39617 -253 253 253 253 253 253 253 253 253 253 253 253
39618 -253 253 253 253 253 253 253 253 253 253 253 253
39619 -253 253 253 253 253 253 253 253 253 253 253 253
39620 -253 253 253 253 253 253 221 221 221 6 6 6
39621 - 2 2 6 2 2 6 6 6 6 30 30 30
39622 - 2 2 6 2 2 6 2 2 6 2 2 6
39623 - 2 2 6 82 82 82 54 54 54 18 18 18
39624 - 6 6 6 0 0 0 0 0 0 0 0 0
39625 - 0 0 0 0 0 0 0 0 0 0 0 0
39626 - 0 0 0 0 0 0 0 0 0 0 0 0
39627 - 0 0 0 0 0 0 0 0 0 0 0 0
39628 - 0 0 0 0 0 0 0 0 0 0 0 0
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 0 0 0 0 0 0 0 0 0 10 10 10
39631 - 26 26 26 66 66 66 62 62 62 2 2 6
39632 - 2 2 6 38 38 38 10 10 10 26 26 26
39633 -238 238 238 253 253 253 253 253 253 253 253 253
39634 -253 253 253 253 253 253 253 253 253 253 253 253
39635 -253 253 253 253 253 253 231 231 231 238 238 238
39636 -253 253 253 253 253 253 253 253 253 253 253 253
39637 -253 253 253 253 253 253 253 253 253 253 253 253
39638 -253 253 253 253 253 253 253 253 253 253 253 253
39639 -253 253 253 253 253 253 253 253 253 253 253 253
39640 -253 253 253 253 253 253 231 231 231 6 6 6
39641 - 2 2 6 2 2 6 10 10 10 30 30 30
39642 - 2 2 6 2 2 6 2 2 6 2 2 6
39643 - 2 2 6 66 66 66 58 58 58 22 22 22
39644 - 6 6 6 0 0 0 0 0 0 0 0 0
39645 - 0 0 0 0 0 0 0 0 0 0 0 0
39646 - 0 0 0 0 0 0 0 0 0 0 0 0
39647 - 0 0 0 0 0 0 0 0 0 0 0 0
39648 - 0 0 0 0 0 0 0 0 0 0 0 0
39649 - 0 0 0 0 0 0 0 0 0 0 0 0
39650 - 0 0 0 0 0 0 0 0 0 10 10 10
39651 - 38 38 38 78 78 78 6 6 6 2 2 6
39652 - 2 2 6 46 46 46 14 14 14 42 42 42
39653 -246 246 246 253 253 253 253 253 253 253 253 253
39654 -253 253 253 253 253 253 253 253 253 253 253 253
39655 -253 253 253 253 253 253 231 231 231 242 242 242
39656 -253 253 253 253 253 253 253 253 253 253 253 253
39657 -253 253 253 253 253 253 253 253 253 253 253 253
39658 -253 253 253 253 253 253 253 253 253 253 253 253
39659 -253 253 253 253 253 253 253 253 253 253 253 253
39660 -253 253 253 253 253 253 234 234 234 10 10 10
39661 - 2 2 6 2 2 6 22 22 22 14 14 14
39662 - 2 2 6 2 2 6 2 2 6 2 2 6
39663 - 2 2 6 66 66 66 62 62 62 22 22 22
39664 - 6 6 6 0 0 0 0 0 0 0 0 0
39665 - 0 0 0 0 0 0 0 0 0 0 0 0
39666 - 0 0 0 0 0 0 0 0 0 0 0 0
39667 - 0 0 0 0 0 0 0 0 0 0 0 0
39668 - 0 0 0 0 0 0 0 0 0 0 0 0
39669 - 0 0 0 0 0 0 0 0 0 0 0 0
39670 - 0 0 0 0 0 0 6 6 6 18 18 18
39671 - 50 50 50 74 74 74 2 2 6 2 2 6
39672 - 14 14 14 70 70 70 34 34 34 62 62 62
39673 -250 250 250 253 253 253 253 253 253 253 253 253
39674 -253 253 253 253 253 253 253 253 253 253 253 253
39675 -253 253 253 253 253 253 231 231 231 246 246 246
39676 -253 253 253 253 253 253 253 253 253 253 253 253
39677 -253 253 253 253 253 253 253 253 253 253 253 253
39678 -253 253 253 253 253 253 253 253 253 253 253 253
39679 -253 253 253 253 253 253 253 253 253 253 253 253
39680 -253 253 253 253 253 253 234 234 234 14 14 14
39681 - 2 2 6 2 2 6 30 30 30 2 2 6
39682 - 2 2 6 2 2 6 2 2 6 2 2 6
39683 - 2 2 6 66 66 66 62 62 62 22 22 22
39684 - 6 6 6 0 0 0 0 0 0 0 0 0
39685 - 0 0 0 0 0 0 0 0 0 0 0 0
39686 - 0 0 0 0 0 0 0 0 0 0 0 0
39687 - 0 0 0 0 0 0 0 0 0 0 0 0
39688 - 0 0 0 0 0 0 0 0 0 0 0 0
39689 - 0 0 0 0 0 0 0 0 0 0 0 0
39690 - 0 0 0 0 0 0 6 6 6 18 18 18
39691 - 54 54 54 62 62 62 2 2 6 2 2 6
39692 - 2 2 6 30 30 30 46 46 46 70 70 70
39693 -250 250 250 253 253 253 253 253 253 253 253 253
39694 -253 253 253 253 253 253 253 253 253 253 253 253
39695 -253 253 253 253 253 253 231 231 231 246 246 246
39696 -253 253 253 253 253 253 253 253 253 253 253 253
39697 -253 253 253 253 253 253 253 253 253 253 253 253
39698 -253 253 253 253 253 253 253 253 253 253 253 253
39699 -253 253 253 253 253 253 253 253 253 253 253 253
39700 -253 253 253 253 253 253 226 226 226 10 10 10
39701 - 2 2 6 6 6 6 30 30 30 2 2 6
39702 - 2 2 6 2 2 6 2 2 6 2 2 6
39703 - 2 2 6 66 66 66 58 58 58 22 22 22
39704 - 6 6 6 0 0 0 0 0 0 0 0 0
39705 - 0 0 0 0 0 0 0 0 0 0 0 0
39706 - 0 0 0 0 0 0 0 0 0 0 0 0
39707 - 0 0 0 0 0 0 0 0 0 0 0 0
39708 - 0 0 0 0 0 0 0 0 0 0 0 0
39709 - 0 0 0 0 0 0 0 0 0 0 0 0
39710 - 0 0 0 0 0 0 6 6 6 22 22 22
39711 - 58 58 58 62 62 62 2 2 6 2 2 6
39712 - 2 2 6 2 2 6 30 30 30 78 78 78
39713 -250 250 250 253 253 253 253 253 253 253 253 253
39714 -253 253 253 253 253 253 253 253 253 253 253 253
39715 -253 253 253 253 253 253 231 231 231 246 246 246
39716 -253 253 253 253 253 253 253 253 253 253 253 253
39717 -253 253 253 253 253 253 253 253 253 253 253 253
39718 -253 253 253 253 253 253 253 253 253 253 253 253
39719 -253 253 253 253 253 253 253 253 253 253 253 253
39720 -253 253 253 253 253 253 206 206 206 2 2 6
39721 - 22 22 22 34 34 34 18 14 6 22 22 22
39722 - 26 26 26 18 18 18 6 6 6 2 2 6
39723 - 2 2 6 82 82 82 54 54 54 18 18 18
39724 - 6 6 6 0 0 0 0 0 0 0 0 0
39725 - 0 0 0 0 0 0 0 0 0 0 0 0
39726 - 0 0 0 0 0 0 0 0 0 0 0 0
39727 - 0 0 0 0 0 0 0 0 0 0 0 0
39728 - 0 0 0 0 0 0 0 0 0 0 0 0
39729 - 0 0 0 0 0 0 0 0 0 0 0 0
39730 - 0 0 0 0 0 0 6 6 6 26 26 26
39731 - 62 62 62 106 106 106 74 54 14 185 133 11
39732 -210 162 10 121 92 8 6 6 6 62 62 62
39733 -238 238 238 253 253 253 253 253 253 253 253 253
39734 -253 253 253 253 253 253 253 253 253 253 253 253
39735 -253 253 253 253 253 253 231 231 231 246 246 246
39736 -253 253 253 253 253 253 253 253 253 253 253 253
39737 -253 253 253 253 253 253 253 253 253 253 253 253
39738 -253 253 253 253 253 253 253 253 253 253 253 253
39739 -253 253 253 253 253 253 253 253 253 253 253 253
39740 -253 253 253 253 253 253 158 158 158 18 18 18
39741 - 14 14 14 2 2 6 2 2 6 2 2 6
39742 - 6 6 6 18 18 18 66 66 66 38 38 38
39743 - 6 6 6 94 94 94 50 50 50 18 18 18
39744 - 6 6 6 0 0 0 0 0 0 0 0 0
39745 - 0 0 0 0 0 0 0 0 0 0 0 0
39746 - 0 0 0 0 0 0 0 0 0 0 0 0
39747 - 0 0 0 0 0 0 0 0 0 0 0 0
39748 - 0 0 0 0 0 0 0 0 0 0 0 0
39749 - 0 0 0 0 0 0 0 0 0 6 6 6
39750 - 10 10 10 10 10 10 18 18 18 38 38 38
39751 - 78 78 78 142 134 106 216 158 10 242 186 14
39752 -246 190 14 246 190 14 156 118 10 10 10 10
39753 - 90 90 90 238 238 238 253 253 253 253 253 253
39754 -253 253 253 253 253 253 253 253 253 253 253 253
39755 -253 253 253 253 253 253 231 231 231 250 250 250
39756 -253 253 253 253 253 253 253 253 253 253 253 253
39757 -253 253 253 253 253 253 253 253 253 253 253 253
39758 -253 253 253 253 253 253 253 253 253 253 253 253
39759 -253 253 253 253 253 253 253 253 253 246 230 190
39760 -238 204 91 238 204 91 181 142 44 37 26 9
39761 - 2 2 6 2 2 6 2 2 6 2 2 6
39762 - 2 2 6 2 2 6 38 38 38 46 46 46
39763 - 26 26 26 106 106 106 54 54 54 18 18 18
39764 - 6 6 6 0 0 0 0 0 0 0 0 0
39765 - 0 0 0 0 0 0 0 0 0 0 0 0
39766 - 0 0 0 0 0 0 0 0 0 0 0 0
39767 - 0 0 0 0 0 0 0 0 0 0 0 0
39768 - 0 0 0 0 0 0 0 0 0 0 0 0
39769 - 0 0 0 6 6 6 14 14 14 22 22 22
39770 - 30 30 30 38 38 38 50 50 50 70 70 70
39771 -106 106 106 190 142 34 226 170 11 242 186 14
39772 -246 190 14 246 190 14 246 190 14 154 114 10
39773 - 6 6 6 74 74 74 226 226 226 253 253 253
39774 -253 253 253 253 253 253 253 253 253 253 253 253
39775 -253 253 253 253 253 253 231 231 231 250 250 250
39776 -253 253 253 253 253 253 253 253 253 253 253 253
39777 -253 253 253 253 253 253 253 253 253 253 253 253
39778 -253 253 253 253 253 253 253 253 253 253 253 253
39779 -253 253 253 253 253 253 253 253 253 228 184 62
39780 -241 196 14 241 208 19 232 195 16 38 30 10
39781 - 2 2 6 2 2 6 2 2 6 2 2 6
39782 - 2 2 6 6 6 6 30 30 30 26 26 26
39783 -203 166 17 154 142 90 66 66 66 26 26 26
39784 - 6 6 6 0 0 0 0 0 0 0 0 0
39785 - 0 0 0 0 0 0 0 0 0 0 0 0
39786 - 0 0 0 0 0 0 0 0 0 0 0 0
39787 - 0 0 0 0 0 0 0 0 0 0 0 0
39788 - 0 0 0 0 0 0 0 0 0 0 0 0
39789 - 6 6 6 18 18 18 38 38 38 58 58 58
39790 - 78 78 78 86 86 86 101 101 101 123 123 123
39791 -175 146 61 210 150 10 234 174 13 246 186 14
39792 -246 190 14 246 190 14 246 190 14 238 190 10
39793 -102 78 10 2 2 6 46 46 46 198 198 198
39794 -253 253 253 253 253 253 253 253 253 253 253 253
39795 -253 253 253 253 253 253 234 234 234 242 242 242
39796 -253 253 253 253 253 253 253 253 253 253 253 253
39797 -253 253 253 253 253 253 253 253 253 253 253 253
39798 -253 253 253 253 253 253 253 253 253 253 253 253
39799 -253 253 253 253 253 253 253 253 253 224 178 62
39800 -242 186 14 241 196 14 210 166 10 22 18 6
39801 - 2 2 6 2 2 6 2 2 6 2 2 6
39802 - 2 2 6 2 2 6 6 6 6 121 92 8
39803 -238 202 15 232 195 16 82 82 82 34 34 34
39804 - 10 10 10 0 0 0 0 0 0 0 0 0
39805 - 0 0 0 0 0 0 0 0 0 0 0 0
39806 - 0 0 0 0 0 0 0 0 0 0 0 0
39807 - 0 0 0 0 0 0 0 0 0 0 0 0
39808 - 0 0 0 0 0 0 0 0 0 0 0 0
39809 - 14 14 14 38 38 38 70 70 70 154 122 46
39810 -190 142 34 200 144 11 197 138 11 197 138 11
39811 -213 154 11 226 170 11 242 186 14 246 190 14
39812 -246 190 14 246 190 14 246 190 14 246 190 14
39813 -225 175 15 46 32 6 2 2 6 22 22 22
39814 -158 158 158 250 250 250 253 253 253 253 253 253
39815 -253 253 253 253 253 253 253 253 253 253 253 253
39816 -253 253 253 253 253 253 253 253 253 253 253 253
39817 -253 253 253 253 253 253 253 253 253 253 253 253
39818 -253 253 253 253 253 253 253 253 253 253 253 253
39819 -253 253 253 250 250 250 242 242 242 224 178 62
39820 -239 182 13 236 186 11 213 154 11 46 32 6
39821 - 2 2 6 2 2 6 2 2 6 2 2 6
39822 - 2 2 6 2 2 6 61 42 6 225 175 15
39823 -238 190 10 236 186 11 112 100 78 42 42 42
39824 - 14 14 14 0 0 0 0 0 0 0 0 0
39825 - 0 0 0 0 0 0 0 0 0 0 0 0
39826 - 0 0 0 0 0 0 0 0 0 0 0 0
39827 - 0 0 0 0 0 0 0 0 0 0 0 0
39828 - 0 0 0 0 0 0 0 0 0 6 6 6
39829 - 22 22 22 54 54 54 154 122 46 213 154 11
39830 -226 170 11 230 174 11 226 170 11 226 170 11
39831 -236 178 12 242 186 14 246 190 14 246 190 14
39832 -246 190 14 246 190 14 246 190 14 246 190 14
39833 -241 196 14 184 144 12 10 10 10 2 2 6
39834 - 6 6 6 116 116 116 242 242 242 253 253 253
39835 -253 253 253 253 253 253 253 253 253 253 253 253
39836 -253 253 253 253 253 253 253 253 253 253 253 253
39837 -253 253 253 253 253 253 253 253 253 253 253 253
39838 -253 253 253 253 253 253 253 253 253 253 253 253
39839 -253 253 253 231 231 231 198 198 198 214 170 54
39840 -236 178 12 236 178 12 210 150 10 137 92 6
39841 - 18 14 6 2 2 6 2 2 6 2 2 6
39842 - 6 6 6 70 47 6 200 144 11 236 178 12
39843 -239 182 13 239 182 13 124 112 88 58 58 58
39844 - 22 22 22 6 6 6 0 0 0 0 0 0
39845 - 0 0 0 0 0 0 0 0 0 0 0 0
39846 - 0 0 0 0 0 0 0 0 0 0 0 0
39847 - 0 0 0 0 0 0 0 0 0 0 0 0
39848 - 0 0 0 0 0 0 0 0 0 10 10 10
39849 - 30 30 30 70 70 70 180 133 36 226 170 11
39850 -239 182 13 242 186 14 242 186 14 246 186 14
39851 -246 190 14 246 190 14 246 190 14 246 190 14
39852 -246 190 14 246 190 14 246 190 14 246 190 14
39853 -246 190 14 232 195 16 98 70 6 2 2 6
39854 - 2 2 6 2 2 6 66 66 66 221 221 221
39855 -253 253 253 253 253 253 253 253 253 253 253 253
39856 -253 253 253 253 253 253 253 253 253 253 253 253
39857 -253 253 253 253 253 253 253 253 253 253 253 253
39858 -253 253 253 253 253 253 253 253 253 253 253 253
39859 -253 253 253 206 206 206 198 198 198 214 166 58
39860 -230 174 11 230 174 11 216 158 10 192 133 9
39861 -163 110 8 116 81 8 102 78 10 116 81 8
39862 -167 114 7 197 138 11 226 170 11 239 182 13
39863 -242 186 14 242 186 14 162 146 94 78 78 78
39864 - 34 34 34 14 14 14 6 6 6 0 0 0
39865 - 0 0 0 0 0 0 0 0 0 0 0 0
39866 - 0 0 0 0 0 0 0 0 0 0 0 0
39867 - 0 0 0 0 0 0 0 0 0 0 0 0
39868 - 0 0 0 0 0 0 0 0 0 6 6 6
39869 - 30 30 30 78 78 78 190 142 34 226 170 11
39870 -239 182 13 246 190 14 246 190 14 246 190 14
39871 -246 190 14 246 190 14 246 190 14 246 190 14
39872 -246 190 14 246 190 14 246 190 14 246 190 14
39873 -246 190 14 241 196 14 203 166 17 22 18 6
39874 - 2 2 6 2 2 6 2 2 6 38 38 38
39875 -218 218 218 253 253 253 253 253 253 253 253 253
39876 -253 253 253 253 253 253 253 253 253 253 253 253
39877 -253 253 253 253 253 253 253 253 253 253 253 253
39878 -253 253 253 253 253 253 253 253 253 253 253 253
39879 -250 250 250 206 206 206 198 198 198 202 162 69
39880 -226 170 11 236 178 12 224 166 10 210 150 10
39881 -200 144 11 197 138 11 192 133 9 197 138 11
39882 -210 150 10 226 170 11 242 186 14 246 190 14
39883 -246 190 14 246 186 14 225 175 15 124 112 88
39884 - 62 62 62 30 30 30 14 14 14 6 6 6
39885 - 0 0 0 0 0 0 0 0 0 0 0 0
39886 - 0 0 0 0 0 0 0 0 0 0 0 0
39887 - 0 0 0 0 0 0 0 0 0 0 0 0
39888 - 0 0 0 0 0 0 0 0 0 10 10 10
39889 - 30 30 30 78 78 78 174 135 50 224 166 10
39890 -239 182 13 246 190 14 246 190 14 246 190 14
39891 -246 190 14 246 190 14 246 190 14 246 190 14
39892 -246 190 14 246 190 14 246 190 14 246 190 14
39893 -246 190 14 246 190 14 241 196 14 139 102 15
39894 - 2 2 6 2 2 6 2 2 6 2 2 6
39895 - 78 78 78 250 250 250 253 253 253 253 253 253
39896 -253 253 253 253 253 253 253 253 253 253 253 253
39897 -253 253 253 253 253 253 253 253 253 253 253 253
39898 -253 253 253 253 253 253 253 253 253 253 253 253
39899 -250 250 250 214 214 214 198 198 198 190 150 46
39900 -219 162 10 236 178 12 234 174 13 224 166 10
39901 -216 158 10 213 154 11 213 154 11 216 158 10
39902 -226 170 11 239 182 13 246 190 14 246 190 14
39903 -246 190 14 246 190 14 242 186 14 206 162 42
39904 -101 101 101 58 58 58 30 30 30 14 14 14
39905 - 6 6 6 0 0 0 0 0 0 0 0 0
39906 - 0 0 0 0 0 0 0 0 0 0 0 0
39907 - 0 0 0 0 0 0 0 0 0 0 0 0
39908 - 0 0 0 0 0 0 0 0 0 10 10 10
39909 - 30 30 30 74 74 74 174 135 50 216 158 10
39910 -236 178 12 246 190 14 246 190 14 246 190 14
39911 -246 190 14 246 190 14 246 190 14 246 190 14
39912 -246 190 14 246 190 14 246 190 14 246 190 14
39913 -246 190 14 246 190 14 241 196 14 226 184 13
39914 - 61 42 6 2 2 6 2 2 6 2 2 6
39915 - 22 22 22 238 238 238 253 253 253 253 253 253
39916 -253 253 253 253 253 253 253 253 253 253 253 253
39917 -253 253 253 253 253 253 253 253 253 253 253 253
39918 -253 253 253 253 253 253 253 253 253 253 253 253
39919 -253 253 253 226 226 226 187 187 187 180 133 36
39920 -216 158 10 236 178 12 239 182 13 236 178 12
39921 -230 174 11 226 170 11 226 170 11 230 174 11
39922 -236 178 12 242 186 14 246 190 14 246 190 14
39923 -246 190 14 246 190 14 246 186 14 239 182 13
39924 -206 162 42 106 106 106 66 66 66 34 34 34
39925 - 14 14 14 6 6 6 0 0 0 0 0 0
39926 - 0 0 0 0 0 0 0 0 0 0 0 0
39927 - 0 0 0 0 0 0 0 0 0 0 0 0
39928 - 0 0 0 0 0 0 0 0 0 6 6 6
39929 - 26 26 26 70 70 70 163 133 67 213 154 11
39930 -236 178 12 246 190 14 246 190 14 246 190 14
39931 -246 190 14 246 190 14 246 190 14 246 190 14
39932 -246 190 14 246 190 14 246 190 14 246 190 14
39933 -246 190 14 246 190 14 246 190 14 241 196 14
39934 -190 146 13 18 14 6 2 2 6 2 2 6
39935 - 46 46 46 246 246 246 253 253 253 253 253 253
39936 -253 253 253 253 253 253 253 253 253 253 253 253
39937 -253 253 253 253 253 253 253 253 253 253 253 253
39938 -253 253 253 253 253 253 253 253 253 253 253 253
39939 -253 253 253 221 221 221 86 86 86 156 107 11
39940 -216 158 10 236 178 12 242 186 14 246 186 14
39941 -242 186 14 239 182 13 239 182 13 242 186 14
39942 -242 186 14 246 186 14 246 190 14 246 190 14
39943 -246 190 14 246 190 14 246 190 14 246 190 14
39944 -242 186 14 225 175 15 142 122 72 66 66 66
39945 - 30 30 30 10 10 10 0 0 0 0 0 0
39946 - 0 0 0 0 0 0 0 0 0 0 0 0
39947 - 0 0 0 0 0 0 0 0 0 0 0 0
39948 - 0 0 0 0 0 0 0 0 0 6 6 6
39949 - 26 26 26 70 70 70 163 133 67 210 150 10
39950 -236 178 12 246 190 14 246 190 14 246 190 14
39951 -246 190 14 246 190 14 246 190 14 246 190 14
39952 -246 190 14 246 190 14 246 190 14 246 190 14
39953 -246 190 14 246 190 14 246 190 14 246 190 14
39954 -232 195 16 121 92 8 34 34 34 106 106 106
39955 -221 221 221 253 253 253 253 253 253 253 253 253
39956 -253 253 253 253 253 253 253 253 253 253 253 253
39957 -253 253 253 253 253 253 253 253 253 253 253 253
39958 -253 253 253 253 253 253 253 253 253 253 253 253
39959 -242 242 242 82 82 82 18 14 6 163 110 8
39960 -216 158 10 236 178 12 242 186 14 246 190 14
39961 -246 190 14 246 190 14 246 190 14 246 190 14
39962 -246 190 14 246 190 14 246 190 14 246 190 14
39963 -246 190 14 246 190 14 246 190 14 246 190 14
39964 -246 190 14 246 190 14 242 186 14 163 133 67
39965 - 46 46 46 18 18 18 6 6 6 0 0 0
39966 - 0 0 0 0 0 0 0 0 0 0 0 0
39967 - 0 0 0 0 0 0 0 0 0 0 0 0
39968 - 0 0 0 0 0 0 0 0 0 10 10 10
39969 - 30 30 30 78 78 78 163 133 67 210 150 10
39970 -236 178 12 246 186 14 246 190 14 246 190 14
39971 -246 190 14 246 190 14 246 190 14 246 190 14
39972 -246 190 14 246 190 14 246 190 14 246 190 14
39973 -246 190 14 246 190 14 246 190 14 246 190 14
39974 -241 196 14 215 174 15 190 178 144 253 253 253
39975 -253 253 253 253 253 253 253 253 253 253 253 253
39976 -253 253 253 253 253 253 253 253 253 253 253 253
39977 -253 253 253 253 253 253 253 253 253 253 253 253
39978 -253 253 253 253 253 253 253 253 253 218 218 218
39979 - 58 58 58 2 2 6 22 18 6 167 114 7
39980 -216 158 10 236 178 12 246 186 14 246 190 14
39981 -246 190 14 246 190 14 246 190 14 246 190 14
39982 -246 190 14 246 190 14 246 190 14 246 190 14
39983 -246 190 14 246 190 14 246 190 14 246 190 14
39984 -246 190 14 246 186 14 242 186 14 190 150 46
39985 - 54 54 54 22 22 22 6 6 6 0 0 0
39986 - 0 0 0 0 0 0 0 0 0 0 0 0
39987 - 0 0 0 0 0 0 0 0 0 0 0 0
39988 - 0 0 0 0 0 0 0 0 0 14 14 14
39989 - 38 38 38 86 86 86 180 133 36 213 154 11
39990 -236 178 12 246 186 14 246 190 14 246 190 14
39991 -246 190 14 246 190 14 246 190 14 246 190 14
39992 -246 190 14 246 190 14 246 190 14 246 190 14
39993 -246 190 14 246 190 14 246 190 14 246 190 14
39994 -246 190 14 232 195 16 190 146 13 214 214 214
39995 -253 253 253 253 253 253 253 253 253 253 253 253
39996 -253 253 253 253 253 253 253 253 253 253 253 253
39997 -253 253 253 253 253 253 253 253 253 253 253 253
39998 -253 253 253 250 250 250 170 170 170 26 26 26
39999 - 2 2 6 2 2 6 37 26 9 163 110 8
40000 -219 162 10 239 182 13 246 186 14 246 190 14
40001 -246 190 14 246 190 14 246 190 14 246 190 14
40002 -246 190 14 246 190 14 246 190 14 246 190 14
40003 -246 190 14 246 190 14 246 190 14 246 190 14
40004 -246 186 14 236 178 12 224 166 10 142 122 72
40005 - 46 46 46 18 18 18 6 6 6 0 0 0
40006 - 0 0 0 0 0 0 0 0 0 0 0 0
40007 - 0 0 0 0 0 0 0 0 0 0 0 0
40008 - 0 0 0 0 0 0 6 6 6 18 18 18
40009 - 50 50 50 109 106 95 192 133 9 224 166 10
40010 -242 186 14 246 190 14 246 190 14 246 190 14
40011 -246 190 14 246 190 14 246 190 14 246 190 14
40012 -246 190 14 246 190 14 246 190 14 246 190 14
40013 -246 190 14 246 190 14 246 190 14 246 190 14
40014 -242 186 14 226 184 13 210 162 10 142 110 46
40015 -226 226 226 253 253 253 253 253 253 253 253 253
40016 -253 253 253 253 253 253 253 253 253 253 253 253
40017 -253 253 253 253 253 253 253 253 253 253 253 253
40018 -198 198 198 66 66 66 2 2 6 2 2 6
40019 - 2 2 6 2 2 6 50 34 6 156 107 11
40020 -219 162 10 239 182 13 246 186 14 246 190 14
40021 -246 190 14 246 190 14 246 190 14 246 190 14
40022 -246 190 14 246 190 14 246 190 14 246 190 14
40023 -246 190 14 246 190 14 246 190 14 242 186 14
40024 -234 174 13 213 154 11 154 122 46 66 66 66
40025 - 30 30 30 10 10 10 0 0 0 0 0 0
40026 - 0 0 0 0 0 0 0 0 0 0 0 0
40027 - 0 0 0 0 0 0 0 0 0 0 0 0
40028 - 0 0 0 0 0 0 6 6 6 22 22 22
40029 - 58 58 58 154 121 60 206 145 10 234 174 13
40030 -242 186 14 246 186 14 246 190 14 246 190 14
40031 -246 190 14 246 190 14 246 190 14 246 190 14
40032 -246 190 14 246 190 14 246 190 14 246 190 14
40033 -246 190 14 246 190 14 246 190 14 246 190 14
40034 -246 186 14 236 178 12 210 162 10 163 110 8
40035 - 61 42 6 138 138 138 218 218 218 250 250 250
40036 -253 253 253 253 253 253 253 253 253 250 250 250
40037 -242 242 242 210 210 210 144 144 144 66 66 66
40038 - 6 6 6 2 2 6 2 2 6 2 2 6
40039 - 2 2 6 2 2 6 61 42 6 163 110 8
40040 -216 158 10 236 178 12 246 190 14 246 190 14
40041 -246 190 14 246 190 14 246 190 14 246 190 14
40042 -246 190 14 246 190 14 246 190 14 246 190 14
40043 -246 190 14 239 182 13 230 174 11 216 158 10
40044 -190 142 34 124 112 88 70 70 70 38 38 38
40045 - 18 18 18 6 6 6 0 0 0 0 0 0
40046 - 0 0 0 0 0 0 0 0 0 0 0 0
40047 - 0 0 0 0 0 0 0 0 0 0 0 0
40048 - 0 0 0 0 0 0 6 6 6 22 22 22
40049 - 62 62 62 168 124 44 206 145 10 224 166 10
40050 -236 178 12 239 182 13 242 186 14 242 186 14
40051 -246 186 14 246 190 14 246 190 14 246 190 14
40052 -246 190 14 246 190 14 246 190 14 246 190 14
40053 -246 190 14 246 190 14 246 190 14 246 190 14
40054 -246 190 14 236 178 12 216 158 10 175 118 6
40055 - 80 54 7 2 2 6 6 6 6 30 30 30
40056 - 54 54 54 62 62 62 50 50 50 38 38 38
40057 - 14 14 14 2 2 6 2 2 6 2 2 6
40058 - 2 2 6 2 2 6 2 2 6 2 2 6
40059 - 2 2 6 6 6 6 80 54 7 167 114 7
40060 -213 154 11 236 178 12 246 190 14 246 190 14
40061 -246 190 14 246 190 14 246 190 14 246 190 14
40062 -246 190 14 242 186 14 239 182 13 239 182 13
40063 -230 174 11 210 150 10 174 135 50 124 112 88
40064 - 82 82 82 54 54 54 34 34 34 18 18 18
40065 - 6 6 6 0 0 0 0 0 0 0 0 0
40066 - 0 0 0 0 0 0 0 0 0 0 0 0
40067 - 0 0 0 0 0 0 0 0 0 0 0 0
40068 - 0 0 0 0 0 0 6 6 6 18 18 18
40069 - 50 50 50 158 118 36 192 133 9 200 144 11
40070 -216 158 10 219 162 10 224 166 10 226 170 11
40071 -230 174 11 236 178 12 239 182 13 239 182 13
40072 -242 186 14 246 186 14 246 190 14 246 190 14
40073 -246 190 14 246 190 14 246 190 14 246 190 14
40074 -246 186 14 230 174 11 210 150 10 163 110 8
40075 -104 69 6 10 10 10 2 2 6 2 2 6
40076 - 2 2 6 2 2 6 2 2 6 2 2 6
40077 - 2 2 6 2 2 6 2 2 6 2 2 6
40078 - 2 2 6 2 2 6 2 2 6 2 2 6
40079 - 2 2 6 6 6 6 91 60 6 167 114 7
40080 -206 145 10 230 174 11 242 186 14 246 190 14
40081 -246 190 14 246 190 14 246 186 14 242 186 14
40082 -239 182 13 230 174 11 224 166 10 213 154 11
40083 -180 133 36 124 112 88 86 86 86 58 58 58
40084 - 38 38 38 22 22 22 10 10 10 6 6 6
40085 - 0 0 0 0 0 0 0 0 0 0 0 0
40086 - 0 0 0 0 0 0 0 0 0 0 0 0
40087 - 0 0 0 0 0 0 0 0 0 0 0 0
40088 - 0 0 0 0 0 0 0 0 0 14 14 14
40089 - 34 34 34 70 70 70 138 110 50 158 118 36
40090 -167 114 7 180 123 7 192 133 9 197 138 11
40091 -200 144 11 206 145 10 213 154 11 219 162 10
40092 -224 166 10 230 174 11 239 182 13 242 186 14
40093 -246 186 14 246 186 14 246 186 14 246 186 14
40094 -239 182 13 216 158 10 185 133 11 152 99 6
40095 -104 69 6 18 14 6 2 2 6 2 2 6
40096 - 2 2 6 2 2 6 2 2 6 2 2 6
40097 - 2 2 6 2 2 6 2 2 6 2 2 6
40098 - 2 2 6 2 2 6 2 2 6 2 2 6
40099 - 2 2 6 6 6 6 80 54 7 152 99 6
40100 -192 133 9 219 162 10 236 178 12 239 182 13
40101 -246 186 14 242 186 14 239 182 13 236 178 12
40102 -224 166 10 206 145 10 192 133 9 154 121 60
40103 - 94 94 94 62 62 62 42 42 42 22 22 22
40104 - 14 14 14 6 6 6 0 0 0 0 0 0
40105 - 0 0 0 0 0 0 0 0 0 0 0 0
40106 - 0 0 0 0 0 0 0 0 0 0 0 0
40107 - 0 0 0 0 0 0 0 0 0 0 0 0
40108 - 0 0 0 0 0 0 0 0 0 6 6 6
40109 - 18 18 18 34 34 34 58 58 58 78 78 78
40110 -101 98 89 124 112 88 142 110 46 156 107 11
40111 -163 110 8 167 114 7 175 118 6 180 123 7
40112 -185 133 11 197 138 11 210 150 10 219 162 10
40113 -226 170 11 236 178 12 236 178 12 234 174 13
40114 -219 162 10 197 138 11 163 110 8 130 83 6
40115 - 91 60 6 10 10 10 2 2 6 2 2 6
40116 - 18 18 18 38 38 38 38 38 38 38 38 38
40117 - 38 38 38 38 38 38 38 38 38 38 38 38
40118 - 38 38 38 38 38 38 26 26 26 2 2 6
40119 - 2 2 6 6 6 6 70 47 6 137 92 6
40120 -175 118 6 200 144 11 219 162 10 230 174 11
40121 -234 174 13 230 174 11 219 162 10 210 150 10
40122 -192 133 9 163 110 8 124 112 88 82 82 82
40123 - 50 50 50 30 30 30 14 14 14 6 6 6
40124 - 0 0 0 0 0 0 0 0 0 0 0 0
40125 - 0 0 0 0 0 0 0 0 0 0 0 0
40126 - 0 0 0 0 0 0 0 0 0 0 0 0
40127 - 0 0 0 0 0 0 0 0 0 0 0 0
40128 - 0 0 0 0 0 0 0 0 0 0 0 0
40129 - 6 6 6 14 14 14 22 22 22 34 34 34
40130 - 42 42 42 58 58 58 74 74 74 86 86 86
40131 -101 98 89 122 102 70 130 98 46 121 87 25
40132 -137 92 6 152 99 6 163 110 8 180 123 7
40133 -185 133 11 197 138 11 206 145 10 200 144 11
40134 -180 123 7 156 107 11 130 83 6 104 69 6
40135 - 50 34 6 54 54 54 110 110 110 101 98 89
40136 - 86 86 86 82 82 82 78 78 78 78 78 78
40137 - 78 78 78 78 78 78 78 78 78 78 78 78
40138 - 78 78 78 82 82 82 86 86 86 94 94 94
40139 -106 106 106 101 101 101 86 66 34 124 80 6
40140 -156 107 11 180 123 7 192 133 9 200 144 11
40141 -206 145 10 200 144 11 192 133 9 175 118 6
40142 -139 102 15 109 106 95 70 70 70 42 42 42
40143 - 22 22 22 10 10 10 0 0 0 0 0 0
40144 - 0 0 0 0 0 0 0 0 0 0 0 0
40145 - 0 0 0 0 0 0 0 0 0 0 0 0
40146 - 0 0 0 0 0 0 0 0 0 0 0 0
40147 - 0 0 0 0 0 0 0 0 0 0 0 0
40148 - 0 0 0 0 0 0 0 0 0 0 0 0
40149 - 0 0 0 0 0 0 6 6 6 10 10 10
40150 - 14 14 14 22 22 22 30 30 30 38 38 38
40151 - 50 50 50 62 62 62 74 74 74 90 90 90
40152 -101 98 89 112 100 78 121 87 25 124 80 6
40153 -137 92 6 152 99 6 152 99 6 152 99 6
40154 -138 86 6 124 80 6 98 70 6 86 66 30
40155 -101 98 89 82 82 82 58 58 58 46 46 46
40156 - 38 38 38 34 34 34 34 34 34 34 34 34
40157 - 34 34 34 34 34 34 34 34 34 34 34 34
40158 - 34 34 34 34 34 34 38 38 38 42 42 42
40159 - 54 54 54 82 82 82 94 86 76 91 60 6
40160 -134 86 6 156 107 11 167 114 7 175 118 6
40161 -175 118 6 167 114 7 152 99 6 121 87 25
40162 -101 98 89 62 62 62 34 34 34 18 18 18
40163 - 6 6 6 0 0 0 0 0 0 0 0 0
40164 - 0 0 0 0 0 0 0 0 0 0 0 0
40165 - 0 0 0 0 0 0 0 0 0 0 0 0
40166 - 0 0 0 0 0 0 0 0 0 0 0 0
40167 - 0 0 0 0 0 0 0 0 0 0 0 0
40168 - 0 0 0 0 0 0 0 0 0 0 0 0
40169 - 0 0 0 0 0 0 0 0 0 0 0 0
40170 - 0 0 0 6 6 6 6 6 6 10 10 10
40171 - 18 18 18 22 22 22 30 30 30 42 42 42
40172 - 50 50 50 66 66 66 86 86 86 101 98 89
40173 -106 86 58 98 70 6 104 69 6 104 69 6
40174 -104 69 6 91 60 6 82 62 34 90 90 90
40175 - 62 62 62 38 38 38 22 22 22 14 14 14
40176 - 10 10 10 10 10 10 10 10 10 10 10 10
40177 - 10 10 10 10 10 10 6 6 6 10 10 10
40178 - 10 10 10 10 10 10 10 10 10 14 14 14
40179 - 22 22 22 42 42 42 70 70 70 89 81 66
40180 - 80 54 7 104 69 6 124 80 6 137 92 6
40181 -134 86 6 116 81 8 100 82 52 86 86 86
40182 - 58 58 58 30 30 30 14 14 14 6 6 6
40183 - 0 0 0 0 0 0 0 0 0 0 0 0
40184 - 0 0 0 0 0 0 0 0 0 0 0 0
40185 - 0 0 0 0 0 0 0 0 0 0 0 0
40186 - 0 0 0 0 0 0 0 0 0 0 0 0
40187 - 0 0 0 0 0 0 0 0 0 0 0 0
40188 - 0 0 0 0 0 0 0 0 0 0 0 0
40189 - 0 0 0 0 0 0 0 0 0 0 0 0
40190 - 0 0 0 0 0 0 0 0 0 0 0 0
40191 - 0 0 0 6 6 6 10 10 10 14 14 14
40192 - 18 18 18 26 26 26 38 38 38 54 54 54
40193 - 70 70 70 86 86 86 94 86 76 89 81 66
40194 - 89 81 66 86 86 86 74 74 74 50 50 50
40195 - 30 30 30 14 14 14 6 6 6 0 0 0
40196 - 0 0 0 0 0 0 0 0 0 0 0 0
40197 - 0 0 0 0 0 0 0 0 0 0 0 0
40198 - 0 0 0 0 0 0 0 0 0 0 0 0
40199 - 6 6 6 18 18 18 34 34 34 58 58 58
40200 - 82 82 82 89 81 66 89 81 66 89 81 66
40201 - 94 86 66 94 86 76 74 74 74 50 50 50
40202 - 26 26 26 14 14 14 6 6 6 0 0 0
40203 - 0 0 0 0 0 0 0 0 0 0 0 0
40204 - 0 0 0 0 0 0 0 0 0 0 0 0
40205 - 0 0 0 0 0 0 0 0 0 0 0 0
40206 - 0 0 0 0 0 0 0 0 0 0 0 0
40207 - 0 0 0 0 0 0 0 0 0 0 0 0
40208 - 0 0 0 0 0 0 0 0 0 0 0 0
40209 - 0 0 0 0 0 0 0 0 0 0 0 0
40210 - 0 0 0 0 0 0 0 0 0 0 0 0
40211 - 0 0 0 0 0 0 0 0 0 0 0 0
40212 - 6 6 6 6 6 6 14 14 14 18 18 18
40213 - 30 30 30 38 38 38 46 46 46 54 54 54
40214 - 50 50 50 42 42 42 30 30 30 18 18 18
40215 - 10 10 10 0 0 0 0 0 0 0 0 0
40216 - 0 0 0 0 0 0 0 0 0 0 0 0
40217 - 0 0 0 0 0 0 0 0 0 0 0 0
40218 - 0 0 0 0 0 0 0 0 0 0 0 0
40219 - 0 0 0 6 6 6 14 14 14 26 26 26
40220 - 38 38 38 50 50 50 58 58 58 58 58 58
40221 - 54 54 54 42 42 42 30 30 30 18 18 18
40222 - 10 10 10 0 0 0 0 0 0 0 0 0
40223 - 0 0 0 0 0 0 0 0 0 0 0 0
40224 - 0 0 0 0 0 0 0 0 0 0 0 0
40225 - 0 0 0 0 0 0 0 0 0 0 0 0
40226 - 0 0 0 0 0 0 0 0 0 0 0 0
40227 - 0 0 0 0 0 0 0 0 0 0 0 0
40228 - 0 0 0 0 0 0 0 0 0 0 0 0
40229 - 0 0 0 0 0 0 0 0 0 0 0 0
40230 - 0 0 0 0 0 0 0 0 0 0 0 0
40231 - 0 0 0 0 0 0 0 0 0 0 0 0
40232 - 0 0 0 0 0 0 0 0 0 6 6 6
40233 - 6 6 6 10 10 10 14 14 14 18 18 18
40234 - 18 18 18 14 14 14 10 10 10 6 6 6
40235 - 0 0 0 0 0 0 0 0 0 0 0 0
40236 - 0 0 0 0 0 0 0 0 0 0 0 0
40237 - 0 0 0 0 0 0 0 0 0 0 0 0
40238 - 0 0 0 0 0 0 0 0 0 0 0 0
40239 - 0 0 0 0 0 0 0 0 0 6 6 6
40240 - 14 14 14 18 18 18 22 22 22 22 22 22
40241 - 18 18 18 14 14 14 10 10 10 6 6 6
40242 - 0 0 0 0 0 0 0 0 0 0 0 0
40243 - 0 0 0 0 0 0 0 0 0 0 0 0
40244 - 0 0 0 0 0 0 0 0 0 0 0 0
40245 - 0 0 0 0 0 0 0 0 0 0 0 0
40246 - 0 0 0 0 0 0 0 0 0 0 0 0
40247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40260 +4 4 4 4 4 4
40261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40274 +4 4 4 4 4 4
40275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288 +4 4 4 4 4 4
40289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302 +4 4 4 4 4 4
40303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316 +4 4 4 4 4 4
40317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40330 +4 4 4 4 4 4
40331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40335 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40336 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40341 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40342 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40344 +4 4 4 4 4 4
40345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40349 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40350 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40351 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40354 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40355 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40356 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40357 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40358 +4 4 4 4 4 4
40359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40363 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40364 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40365 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40368 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40369 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40370 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40371 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40372 +4 4 4 4 4 4
40373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40377 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40378 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40379 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40382 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40383 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40384 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40385 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40386 +4 4 4 4 4 4
40387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40390 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40391 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40392 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40393 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40394 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40395 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40396 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40397 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40398 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40399 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40400 +4 4 4 4 4 4
40401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40404 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40405 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40406 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40407 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40408 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40409 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40410 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40411 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40412 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40413 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40414 +4 4 4 4 4 4
40415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40418 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40419 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40420 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40421 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40422 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40423 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40424 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40425 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40426 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40427 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40428 +4 4 4 4 4 4
40429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40432 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40433 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40434 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40435 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40436 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40437 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40438 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40439 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40440 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40441 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40442 +4 4 4 4 4 4
40443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40446 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40447 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40448 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40449 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40450 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40451 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40452 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40453 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40454 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40455 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40456 +4 4 4 4 4 4
40457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40460 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40461 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40462 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40463 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40464 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40465 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40466 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40467 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40468 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40469 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40470 +4 4 4 4 4 4
40471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40473 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40474 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40475 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40476 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40477 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40478 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40479 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40480 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40481 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40482 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40483 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40484 +4 4 4 4 4 4
40485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40487 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40488 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40489 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40490 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40491 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40492 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40493 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40494 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40495 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40496 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40497 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40498 +0 0 0 4 4 4
40499 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40500 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40501 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40502 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40503 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40504 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40505 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40506 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40507 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40508 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40509 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40510 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40511 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40512 +2 0 0 0 0 0
40513 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40514 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40515 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40516 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40517 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40518 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40519 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40520 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40521 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40522 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40523 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40524 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40525 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40526 +37 38 37 0 0 0
40527 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40528 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40529 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40530 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40531 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40532 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40533 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40534 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40535 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40536 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40537 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40538 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40539 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40540 +85 115 134 4 0 0
40541 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40542 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40543 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40544 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40545 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40546 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40547 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40548 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40549 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40550 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40551 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40552 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40553 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40554 +60 73 81 4 0 0
40555 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40556 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40557 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40558 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40559 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40560 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40561 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40562 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40563 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40564 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40565 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40566 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40567 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40568 +16 19 21 4 0 0
40569 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40570 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40571 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40572 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40573 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40574 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40575 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40576 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40577 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40578 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40579 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40580 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40581 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40582 +4 0 0 4 3 3
40583 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40584 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40585 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40587 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40588 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40589 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40590 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40591 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40592 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40593 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40594 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40595 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40596 +3 2 2 4 4 4
40597 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40598 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40599 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40600 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40601 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40602 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40603 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40604 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40605 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40606 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40607 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40608 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40609 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40610 +4 4 4 4 4 4
40611 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40612 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40613 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40614 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40615 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40616 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40617 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40618 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40619 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40620 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40621 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40622 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40623 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40624 +4 4 4 4 4 4
40625 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40626 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40627 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40628 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40629 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40630 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40631 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40632 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40633 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40634 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40635 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40636 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40637 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40638 +5 5 5 5 5 5
40639 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40640 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40641 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40642 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40643 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40644 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40645 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40646 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40647 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40648 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40649 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40650 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40651 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40652 +5 5 5 4 4 4
40653 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40654 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40655 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40656 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40657 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40658 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40659 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40660 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40661 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40662 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40663 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40664 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40666 +4 4 4 4 4 4
40667 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40668 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40669 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40670 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40671 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40672 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40673 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40674 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40675 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40676 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40677 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40678 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40680 +4 4 4 4 4 4
40681 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40682 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40683 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40684 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40685 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40686 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40687 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40688 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40689 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40690 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40691 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40694 +4 4 4 4 4 4
40695 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40696 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40697 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40698 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40699 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40700 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40701 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40702 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40703 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40704 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40705 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40708 +4 4 4 4 4 4
40709 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40710 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40711 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40712 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40713 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40714 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40715 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40716 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40717 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40718 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40719 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40722 +4 4 4 4 4 4
40723 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40724 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40725 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40726 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40727 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40728 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40729 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40730 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40731 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40732 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40733 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40736 +4 4 4 4 4 4
40737 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40738 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40739 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40740 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40741 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40742 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40743 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40744 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40745 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40746 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40747 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40750 +4 4 4 4 4 4
40751 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40752 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40753 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40754 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40755 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40756 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40757 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40758 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40759 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40760 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40761 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40764 +4 4 4 4 4 4
40765 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40766 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40767 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40768 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40769 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40770 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40771 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40772 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40773 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40774 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40775 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40778 +4 4 4 4 4 4
40779 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40780 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40781 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40782 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40783 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40784 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40785 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40786 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40787 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40788 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40789 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40792 +4 4 4 4 4 4
40793 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40794 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40795 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40796 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40797 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40798 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40799 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40800 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40801 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40802 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40803 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40806 +4 4 4 4 4 4
40807 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40808 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40809 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40810 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40811 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40812 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40813 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40814 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40815 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40816 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40817 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40820 +4 4 4 4 4 4
40821 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40822 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40823 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40824 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40825 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40826 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40827 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40828 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40829 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40830 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40831 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40834 +4 4 4 4 4 4
40835 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40836 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40837 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40838 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40839 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40840 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40841 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40842 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40843 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40844 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40845 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40848 +4 4 4 4 4 4
40849 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40850 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40851 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40852 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40853 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40854 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40855 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40856 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40857 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40858 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40859 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40862 +4 4 4 4 4 4
40863 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40864 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40865 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40866 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40867 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40868 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40869 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40870 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40871 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40872 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40873 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40876 +4 4 4 4 4 4
40877 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40878 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40879 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40880 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40881 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40882 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40883 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40884 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40885 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40886 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40887 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40890 +4 4 4 4 4 4
40891 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40892 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40893 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40894 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40895 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40896 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40897 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40898 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40899 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40900 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40901 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40904 +4 4 4 4 4 4
40905 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40906 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40907 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40908 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40909 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40910 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40911 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40912 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40913 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40914 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40915 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40918 +4 4 4 4 4 4
40919 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40920 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40921 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40922 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40923 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40924 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40925 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40926 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40927 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40928 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40929 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40932 +4 4 4 4 4 4
40933 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40934 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40935 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40936 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40937 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40938 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40939 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40940 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40941 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40942 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40943 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40946 +4 4 4 4 4 4
40947 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40948 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40949 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40950 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40951 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40952 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40953 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40954 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40955 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40956 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40957 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40960 +4 4 4 4 4 4
40961 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40962 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40963 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40964 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40965 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40966 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40967 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40968 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40969 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40970 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40971 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40974 +4 4 4 4 4 4
40975 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40976 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40977 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40978 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40979 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40980 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40981 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40982 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40983 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40984 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40985 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40988 +4 4 4 4 4 4
40989 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40990 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40991 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40992 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40993 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40994 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40995 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40996 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40997 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40998 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40999 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41002 +4 4 4 4 4 4
41003 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41004 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41005 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41006 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41007 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41008 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41009 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41010 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41011 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41012 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41013 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41016 +4 4 4 4 4 4
41017 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41018 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41019 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41020 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41021 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41022 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41023 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41024 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41025 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41026 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41027 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41030 +4 4 4 4 4 4
41031 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41032 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41033 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41034 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41035 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41036 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41037 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41038 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41039 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41040 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41041 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41044 +4 4 4 4 4 4
41045 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41046 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41047 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41048 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41049 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41050 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41051 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41052 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41053 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41054 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41055 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41058 +4 4 4 4 4 4
41059 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41060 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41061 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41062 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41063 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41064 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41065 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41066 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41067 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41068 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41072 +4 4 4 4 4 4
41073 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41074 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41075 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41076 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41077 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41078 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41079 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41080 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41081 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41082 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41086 +4 4 4 4 4 4
41087 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41088 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41089 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41090 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41091 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41092 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41093 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41094 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41095 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41096 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41100 +4 4 4 4 4 4
41101 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41102 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41103 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41104 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41105 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41106 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41107 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41108 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41109 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41110 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41114 +4 4 4 4 4 4
41115 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41116 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41117 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41118 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41119 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41120 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41121 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41122 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41123 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41128 +4 4 4 4 4 4
41129 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41130 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41131 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41132 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41133 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41134 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41135 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41136 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41137 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41142 +4 4 4 4 4 4
41143 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41144 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41145 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41146 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41147 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41148 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41149 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41150 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41151 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41156 +4 4 4 4 4 4
41157 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41158 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41159 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41160 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41161 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41162 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41163 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41164 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41170 +4 4 4 4 4 4
41171 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41172 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41173 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41174 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41175 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41176 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41177 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41178 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41184 +4 4 4 4 4 4
41185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41186 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41187 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41188 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41189 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41190 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41191 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41192 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41198 +4 4 4 4 4 4
41199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41200 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41201 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41202 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41203 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41204 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41205 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41206 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41212 +4 4 4 4 4 4
41213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41214 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41215 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41216 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41217 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41218 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41219 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41220 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41226 +4 4 4 4 4 4
41227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41230 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41231 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41232 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41233 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41234 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41240 +4 4 4 4 4 4
41241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41244 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41245 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41246 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41247 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41254 +4 4 4 4 4 4
41255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41259 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41260 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41261 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41268 +4 4 4 4 4 4
41269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41273 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41274 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41275 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41282 +4 4 4 4 4 4
41283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41287 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41288 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41289 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41296 +4 4 4 4 4 4
41297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41301 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41302 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41303 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310 +4 4 4 4 4 4
41311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41316 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41317 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324 +4 4 4 4 4 4
41325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41330 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41331 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338 +4 4 4 4 4 4
41339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41343 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41344 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352 +4 4 4 4 4 4
41353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41357 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41358 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366 +4 4 4 4 4 4
41367 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41368 index a159b63..4ab532d 100644
41369 --- a/drivers/video/udlfb.c
41370 +++ b/drivers/video/udlfb.c
41371 @@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41372 dlfb_urb_completion(urb);
41373
41374 error:
41375 - atomic_add(bytes_sent, &dev->bytes_sent);
41376 - atomic_add(bytes_identical, &dev->bytes_identical);
41377 - atomic_add(width*height*2, &dev->bytes_rendered);
41378 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41379 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41380 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41381 end_cycles = get_cycles();
41382 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41383 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41384 >> 10)), /* Kcycles */
41385 &dev->cpu_kcycles_used);
41386
41387 @@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41388 dlfb_urb_completion(urb);
41389
41390 error:
41391 - atomic_add(bytes_sent, &dev->bytes_sent);
41392 - atomic_add(bytes_identical, &dev->bytes_identical);
41393 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41394 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41395 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41396 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41397 end_cycles = get_cycles();
41398 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41399 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41400 >> 10)), /* Kcycles */
41401 &dev->cpu_kcycles_used);
41402 }
41403 @@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41404 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41405 struct dlfb_data *dev = fb_info->par;
41406 return snprintf(buf, PAGE_SIZE, "%u\n",
41407 - atomic_read(&dev->bytes_rendered));
41408 + atomic_read_unchecked(&dev->bytes_rendered));
41409 }
41410
41411 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41412 @@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41413 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41414 struct dlfb_data *dev = fb_info->par;
41415 return snprintf(buf, PAGE_SIZE, "%u\n",
41416 - atomic_read(&dev->bytes_identical));
41417 + atomic_read_unchecked(&dev->bytes_identical));
41418 }
41419
41420 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41421 @@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41422 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41423 struct dlfb_data *dev = fb_info->par;
41424 return snprintf(buf, PAGE_SIZE, "%u\n",
41425 - atomic_read(&dev->bytes_sent));
41426 + atomic_read_unchecked(&dev->bytes_sent));
41427 }
41428
41429 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41430 @@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41431 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41432 struct dlfb_data *dev = fb_info->par;
41433 return snprintf(buf, PAGE_SIZE, "%u\n",
41434 - atomic_read(&dev->cpu_kcycles_used));
41435 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41436 }
41437
41438 static ssize_t edid_show(
41439 @@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41440 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41441 struct dlfb_data *dev = fb_info->par;
41442
41443 - atomic_set(&dev->bytes_rendered, 0);
41444 - atomic_set(&dev->bytes_identical, 0);
41445 - atomic_set(&dev->bytes_sent, 0);
41446 - atomic_set(&dev->cpu_kcycles_used, 0);
41447 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41448 + atomic_set_unchecked(&dev->bytes_identical, 0);
41449 + atomic_set_unchecked(&dev->bytes_sent, 0);
41450 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41451
41452 return count;
41453 }
41454 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41455 index b0e2a42..e2df3ad 100644
41456 --- a/drivers/video/uvesafb.c
41457 +++ b/drivers/video/uvesafb.c
41458 @@ -19,6 +19,7 @@
41459 #include <linux/io.h>
41460 #include <linux/mutex.h>
41461 #include <linux/slab.h>
41462 +#include <linux/moduleloader.h>
41463 #include <video/edid.h>
41464 #include <video/uvesafb.h>
41465 #ifdef CONFIG_X86
41466 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41467 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41468 par->pmi_setpal = par->ypan = 0;
41469 } else {
41470 +
41471 +#ifdef CONFIG_PAX_KERNEXEC
41472 +#ifdef CONFIG_MODULES
41473 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41474 +#endif
41475 + if (!par->pmi_code) {
41476 + par->pmi_setpal = par->ypan = 0;
41477 + return 0;
41478 + }
41479 +#endif
41480 +
41481 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41482 + task->t.regs.edi);
41483 +
41484 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41485 + pax_open_kernel();
41486 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41487 + pax_close_kernel();
41488 +
41489 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41490 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41491 +#else
41492 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41493 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41494 +#endif
41495 +
41496 printk(KERN_INFO "uvesafb: protected mode interface info at "
41497 "%04x:%04x\n",
41498 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41499 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41500 par->ypan = ypan;
41501
41502 if (par->pmi_setpal || par->ypan) {
41503 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41504 if (__supported_pte_mask & _PAGE_NX) {
41505 par->pmi_setpal = par->ypan = 0;
41506 printk(KERN_WARNING "uvesafb: NX protection is actively."
41507 "We have better not to use the PMI.\n");
41508 - } else {
41509 + } else
41510 +#endif
41511 uvesafb_vbe_getpmi(task, par);
41512 - }
41513 }
41514 #else
41515 /* The protected mode interface is not available on non-x86. */
41516 @@ -1836,6 +1860,11 @@ out:
41517 if (par->vbe_modes)
41518 kfree(par->vbe_modes);
41519
41520 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41521 + if (par->pmi_code)
41522 + module_free_exec(NULL, par->pmi_code);
41523 +#endif
41524 +
41525 framebuffer_release(info);
41526 return err;
41527 }
41528 @@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
41529 kfree(par->vbe_state_orig);
41530 if (par->vbe_state_saved)
41531 kfree(par->vbe_state_saved);
41532 +
41533 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41534 + if (par->pmi_code)
41535 + module_free_exec(NULL, par->pmi_code);
41536 +#endif
41537 +
41538 }
41539
41540 framebuffer_release(info);
41541 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41542 index 501b340..86bd4cf 100644
41543 --- a/drivers/video/vesafb.c
41544 +++ b/drivers/video/vesafb.c
41545 @@ -9,6 +9,7 @@
41546 */
41547
41548 #include <linux/module.h>
41549 +#include <linux/moduleloader.h>
41550 #include <linux/kernel.h>
41551 #include <linux/errno.h>
41552 #include <linux/string.h>
41553 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41554 static int vram_total __initdata; /* Set total amount of memory */
41555 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41556 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41557 -static void (*pmi_start)(void) __read_mostly;
41558 -static void (*pmi_pal) (void) __read_mostly;
41559 +static void (*pmi_start)(void) __read_only;
41560 +static void (*pmi_pal) (void) __read_only;
41561 static int depth __read_mostly;
41562 static int vga_compat __read_mostly;
41563 /* --------------------------------------------------------------------- */
41564 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41565 unsigned int size_vmode;
41566 unsigned int size_remap;
41567 unsigned int size_total;
41568 + void *pmi_code = NULL;
41569
41570 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41571 return -ENODEV;
41572 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41573 size_remap = size_total;
41574 vesafb_fix.smem_len = size_remap;
41575
41576 -#ifndef __i386__
41577 - screen_info.vesapm_seg = 0;
41578 -#endif
41579 -
41580 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41581 printk(KERN_WARNING
41582 "vesafb: cannot reserve video memory at 0x%lx\n",
41583 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41584 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41585 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41586
41587 +#ifdef __i386__
41588 +
41589 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41590 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41591 + if (!pmi_code)
41592 +#elif !defined(CONFIG_PAX_KERNEXEC)
41593 + if (0)
41594 +#endif
41595 +
41596 +#endif
41597 + screen_info.vesapm_seg = 0;
41598 +
41599 if (screen_info.vesapm_seg) {
41600 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41601 - screen_info.vesapm_seg,screen_info.vesapm_off);
41602 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41603 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41604 }
41605
41606 if (screen_info.vesapm_seg < 0xc000)
41607 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41608
41609 if (ypan || pmi_setpal) {
41610 unsigned short *pmi_base;
41611 +
41612 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41613 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41614 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41615 +
41616 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41617 + pax_open_kernel();
41618 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41619 +#else
41620 + pmi_code = pmi_base;
41621 +#endif
41622 +
41623 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41624 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41625 +
41626 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41627 + pmi_start = ktva_ktla(pmi_start);
41628 + pmi_pal = ktva_ktla(pmi_pal);
41629 + pax_close_kernel();
41630 +#endif
41631 +
41632 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41633 if (pmi_base[3]) {
41634 printk(KERN_INFO "vesafb: pmi: ports = ");
41635 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41636 info->node, info->fix.id);
41637 return 0;
41638 err:
41639 +
41640 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41641 + module_free_exec(NULL, pmi_code);
41642 +#endif
41643 +
41644 if (info->screen_base)
41645 iounmap(info->screen_base);
41646 framebuffer_release(info);
41647 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41648 index 88714ae..16c2e11 100644
41649 --- a/drivers/video/via/via_clock.h
41650 +++ b/drivers/video/via/via_clock.h
41651 @@ -56,7 +56,7 @@ struct via_clock {
41652
41653 void (*set_engine_pll_state)(u8 state);
41654 void (*set_engine_pll)(struct via_pll_config config);
41655 -};
41656 +} __no_const;
41657
41658
41659 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41660 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41661 index e56c934..fc22f4b 100644
41662 --- a/drivers/xen/xen-pciback/conf_space.h
41663 +++ b/drivers/xen/xen-pciback/conf_space.h
41664 @@ -44,15 +44,15 @@ struct config_field {
41665 struct {
41666 conf_dword_write write;
41667 conf_dword_read read;
41668 - } dw;
41669 + } __no_const dw;
41670 struct {
41671 conf_word_write write;
41672 conf_word_read read;
41673 - } w;
41674 + } __no_const w;
41675 struct {
41676 conf_byte_write write;
41677 conf_byte_read read;
41678 - } b;
41679 + } __no_const b;
41680 } u;
41681 struct list_head list;
41682 };
41683 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41684 index 014c8dd..6f3dfe6 100644
41685 --- a/fs/9p/vfs_inode.c
41686 +++ b/fs/9p/vfs_inode.c
41687 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41688 void
41689 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41690 {
41691 - char *s = nd_get_link(nd);
41692 + const char *s = nd_get_link(nd);
41693
41694 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41695 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41696 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41697 index e95d1b6..3454244 100644
41698 --- a/fs/Kconfig.binfmt
41699 +++ b/fs/Kconfig.binfmt
41700 @@ -89,7 +89,7 @@ config HAVE_AOUT
41701
41702 config BINFMT_AOUT
41703 tristate "Kernel support for a.out and ECOFF binaries"
41704 - depends on HAVE_AOUT
41705 + depends on HAVE_AOUT && BROKEN
41706 ---help---
41707 A.out (Assembler.OUTput) is a set of formats for libraries and
41708 executables used in the earliest versions of UNIX. Linux used
41709 diff --git a/fs/aio.c b/fs/aio.c
41710 index e7f2fad..15ad8a4 100644
41711 --- a/fs/aio.c
41712 +++ b/fs/aio.c
41713 @@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41714 size += sizeof(struct io_event) * nr_events;
41715 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41716
41717 - if (nr_pages < 0)
41718 + if (nr_pages <= 0)
41719 return -EINVAL;
41720
41721 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41722 @@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41723 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41724 {
41725 ssize_t ret;
41726 + struct iovec iovstack;
41727
41728 #ifdef CONFIG_COMPAT
41729 if (compat)
41730 ret = compat_rw_copy_check_uvector(type,
41731 (struct compat_iovec __user *)kiocb->ki_buf,
41732 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41733 + kiocb->ki_nbytes, 1, &iovstack,
41734 &kiocb->ki_iovec, 1);
41735 else
41736 #endif
41737 ret = rw_copy_check_uvector(type,
41738 (struct iovec __user *)kiocb->ki_buf,
41739 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41740 + kiocb->ki_nbytes, 1, &iovstack,
41741 &kiocb->ki_iovec, 1);
41742 if (ret < 0)
41743 goto out;
41744 @@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41745 if (ret < 0)
41746 goto out;
41747
41748 + if (kiocb->ki_iovec == &iovstack) {
41749 + kiocb->ki_inline_vec = iovstack;
41750 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41751 + }
41752 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41753 kiocb->ki_cur_seg = 0;
41754 /* ki_nbytes/left now reflect bytes instead of segs */
41755 diff --git a/fs/attr.c b/fs/attr.c
41756 index d94d1b6..f9bccd6 100644
41757 --- a/fs/attr.c
41758 +++ b/fs/attr.c
41759 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41760 unsigned long limit;
41761
41762 limit = rlimit(RLIMIT_FSIZE);
41763 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41764 if (limit != RLIM_INFINITY && offset > limit)
41765 goto out_sig;
41766 if (offset > inode->i_sb->s_maxbytes)
41767 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41768 index da8876d..9f3e6d8 100644
41769 --- a/fs/autofs4/waitq.c
41770 +++ b/fs/autofs4/waitq.c
41771 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41772 {
41773 unsigned long sigpipe, flags;
41774 mm_segment_t fs;
41775 - const char *data = (const char *)addr;
41776 + const char __user *data = (const char __force_user *)addr;
41777 ssize_t wr = 0;
41778
41779 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41780 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41781 index e18da23..affc30e 100644
41782 --- a/fs/befs/linuxvfs.c
41783 +++ b/fs/befs/linuxvfs.c
41784 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41785 {
41786 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41787 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41788 - char *link = nd_get_link(nd);
41789 + const char *link = nd_get_link(nd);
41790 if (!IS_ERR(link))
41791 kfree(link);
41792 }
41793 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41794 index d146e18..12d1bd1 100644
41795 --- a/fs/binfmt_aout.c
41796 +++ b/fs/binfmt_aout.c
41797 @@ -16,6 +16,7 @@
41798 #include <linux/string.h>
41799 #include <linux/fs.h>
41800 #include <linux/file.h>
41801 +#include <linux/security.h>
41802 #include <linux/stat.h>
41803 #include <linux/fcntl.h>
41804 #include <linux/ptrace.h>
41805 @@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41806 #endif
41807 # define START_STACK(u) ((void __user *)u.start_stack)
41808
41809 + memset(&dump, 0, sizeof(dump));
41810 +
41811 fs = get_fs();
41812 set_fs(KERNEL_DS);
41813 has_dumped = 1;
41814 @@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41815
41816 /* If the size of the dump file exceeds the rlimit, then see what would happen
41817 if we wrote the stack, but not the data area. */
41818 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41819 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41820 dump.u_dsize = 0;
41821
41822 /* Make sure we have enough room to write the stack and data areas. */
41823 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41824 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41825 dump.u_ssize = 0;
41826
41827 @@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41828 rlim = rlimit(RLIMIT_DATA);
41829 if (rlim >= RLIM_INFINITY)
41830 rlim = ~0;
41831 +
41832 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41833 if (ex.a_data + ex.a_bss > rlim)
41834 return -ENOMEM;
41835
41836 @@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41837
41838 install_exec_creds(bprm);
41839
41840 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41841 + current->mm->pax_flags = 0UL;
41842 +#endif
41843 +
41844 +#ifdef CONFIG_PAX_PAGEEXEC
41845 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41846 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41847 +
41848 +#ifdef CONFIG_PAX_EMUTRAMP
41849 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41850 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41851 +#endif
41852 +
41853 +#ifdef CONFIG_PAX_MPROTECT
41854 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41855 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41856 +#endif
41857 +
41858 + }
41859 +#endif
41860 +
41861 if (N_MAGIC(ex) == OMAGIC) {
41862 unsigned long text_addr, map_size;
41863 loff_t pos;
41864 @@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41865 }
41866
41867 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41868 - PROT_READ | PROT_WRITE | PROT_EXEC,
41869 + PROT_READ | PROT_WRITE,
41870 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41871 fd_offset + ex.a_text);
41872 if (error != N_DATADDR(ex)) {
41873 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41874 index 16f7354..7cc1e24 100644
41875 --- a/fs/binfmt_elf.c
41876 +++ b/fs/binfmt_elf.c
41877 @@ -32,6 +32,7 @@
41878 #include <linux/elf.h>
41879 #include <linux/utsname.h>
41880 #include <linux/coredump.h>
41881 +#include <linux/xattr.h>
41882 #include <asm/uaccess.h>
41883 #include <asm/param.h>
41884 #include <asm/page.h>
41885 @@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41886 #define elf_core_dump NULL
41887 #endif
41888
41889 +#ifdef CONFIG_PAX_MPROTECT
41890 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41891 +#endif
41892 +
41893 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41894 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41895 #else
41896 @@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
41897 .load_binary = load_elf_binary,
41898 .load_shlib = load_elf_library,
41899 .core_dump = elf_core_dump,
41900 +
41901 +#ifdef CONFIG_PAX_MPROTECT
41902 + .handle_mprotect= elf_handle_mprotect,
41903 +#endif
41904 +
41905 .min_coredump = ELF_EXEC_PAGESIZE,
41906 };
41907
41908 @@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
41909
41910 static int set_brk(unsigned long start, unsigned long end)
41911 {
41912 + unsigned long e = end;
41913 +
41914 start = ELF_PAGEALIGN(start);
41915 end = ELF_PAGEALIGN(end);
41916 if (end > start) {
41917 @@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41918 if (BAD_ADDR(addr))
41919 return addr;
41920 }
41921 - current->mm->start_brk = current->mm->brk = end;
41922 + current->mm->start_brk = current->mm->brk = e;
41923 return 0;
41924 }
41925
41926 @@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41927 elf_addr_t __user *u_rand_bytes;
41928 const char *k_platform = ELF_PLATFORM;
41929 const char *k_base_platform = ELF_BASE_PLATFORM;
41930 - unsigned char k_rand_bytes[16];
41931 + u32 k_rand_bytes[4];
41932 int items;
41933 elf_addr_t *elf_info;
41934 int ei_index = 0;
41935 const struct cred *cred = current_cred();
41936 struct vm_area_struct *vma;
41937 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41938
41939 /*
41940 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41941 @@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41942 * Generate 16 random bytes for userspace PRNG seeding.
41943 */
41944 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41945 - u_rand_bytes = (elf_addr_t __user *)
41946 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41947 + srandom32(k_rand_bytes[0] ^ random32());
41948 + srandom32(k_rand_bytes[1] ^ random32());
41949 + srandom32(k_rand_bytes[2] ^ random32());
41950 + srandom32(k_rand_bytes[3] ^ random32());
41951 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41952 + u_rand_bytes = (elf_addr_t __user *) p;
41953 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41954 return -EFAULT;
41955
41956 @@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41957 return -EFAULT;
41958 current->mm->env_end = p;
41959
41960 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41961 +
41962 /* Put the elf_info on the stack in the right place. */
41963 sp = (elf_addr_t __user *)envp + 1;
41964 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41965 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41966 return -EFAULT;
41967 return 0;
41968 }
41969 @@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41970 {
41971 struct elf_phdr *elf_phdata;
41972 struct elf_phdr *eppnt;
41973 - unsigned long load_addr = 0;
41974 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41975 int load_addr_set = 0;
41976 unsigned long last_bss = 0, elf_bss = 0;
41977 - unsigned long error = ~0UL;
41978 + unsigned long error = -EINVAL;
41979 unsigned long total_size;
41980 int retval, i, size;
41981
41982 @@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41983 goto out_close;
41984 }
41985
41986 +#ifdef CONFIG_PAX_SEGMEXEC
41987 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41988 + pax_task_size = SEGMEXEC_TASK_SIZE;
41989 +#endif
41990 +
41991 eppnt = elf_phdata;
41992 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41993 if (eppnt->p_type == PT_LOAD) {
41994 @@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41995 k = load_addr + eppnt->p_vaddr;
41996 if (BAD_ADDR(k) ||
41997 eppnt->p_filesz > eppnt->p_memsz ||
41998 - eppnt->p_memsz > TASK_SIZE ||
41999 - TASK_SIZE - eppnt->p_memsz < k) {
42000 + eppnt->p_memsz > pax_task_size ||
42001 + pax_task_size - eppnt->p_memsz < k) {
42002 error = -ENOMEM;
42003 goto out_close;
42004 }
42005 @@ -525,6 +549,311 @@ out:
42006 return error;
42007 }
42008
42009 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42010 +#ifdef CONFIG_PAX_SOFTMODE
42011 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42012 +{
42013 + unsigned long pax_flags = 0UL;
42014 +
42015 +#ifdef CONFIG_PAX_PAGEEXEC
42016 + if (elf_phdata->p_flags & PF_PAGEEXEC)
42017 + pax_flags |= MF_PAX_PAGEEXEC;
42018 +#endif
42019 +
42020 +#ifdef CONFIG_PAX_SEGMEXEC
42021 + if (elf_phdata->p_flags & PF_SEGMEXEC)
42022 + pax_flags |= MF_PAX_SEGMEXEC;
42023 +#endif
42024 +
42025 +#ifdef CONFIG_PAX_EMUTRAMP
42026 + if (elf_phdata->p_flags & PF_EMUTRAMP)
42027 + pax_flags |= MF_PAX_EMUTRAMP;
42028 +#endif
42029 +
42030 +#ifdef CONFIG_PAX_MPROTECT
42031 + if (elf_phdata->p_flags & PF_MPROTECT)
42032 + pax_flags |= MF_PAX_MPROTECT;
42033 +#endif
42034 +
42035 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42036 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42037 + pax_flags |= MF_PAX_RANDMMAP;
42038 +#endif
42039 +
42040 + return pax_flags;
42041 +}
42042 +#endif
42043 +
42044 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42045 +{
42046 + unsigned long pax_flags = 0UL;
42047 +
42048 +#ifdef CONFIG_PAX_PAGEEXEC
42049 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42050 + pax_flags |= MF_PAX_PAGEEXEC;
42051 +#endif
42052 +
42053 +#ifdef CONFIG_PAX_SEGMEXEC
42054 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42055 + pax_flags |= MF_PAX_SEGMEXEC;
42056 +#endif
42057 +
42058 +#ifdef CONFIG_PAX_EMUTRAMP
42059 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42060 + pax_flags |= MF_PAX_EMUTRAMP;
42061 +#endif
42062 +
42063 +#ifdef CONFIG_PAX_MPROTECT
42064 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42065 + pax_flags |= MF_PAX_MPROTECT;
42066 +#endif
42067 +
42068 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42069 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42070 + pax_flags |= MF_PAX_RANDMMAP;
42071 +#endif
42072 +
42073 + return pax_flags;
42074 +}
42075 +#endif
42076 +
42077 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42078 +#ifdef CONFIG_PAX_SOFTMODE
42079 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42080 +{
42081 + unsigned long pax_flags = 0UL;
42082 +
42083 +#ifdef CONFIG_PAX_PAGEEXEC
42084 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42085 + pax_flags |= MF_PAX_PAGEEXEC;
42086 +#endif
42087 +
42088 +#ifdef CONFIG_PAX_SEGMEXEC
42089 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42090 + pax_flags |= MF_PAX_SEGMEXEC;
42091 +#endif
42092 +
42093 +#ifdef CONFIG_PAX_EMUTRAMP
42094 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42095 + pax_flags |= MF_PAX_EMUTRAMP;
42096 +#endif
42097 +
42098 +#ifdef CONFIG_PAX_MPROTECT
42099 + if (pax_flags_softmode & MF_PAX_MPROTECT)
42100 + pax_flags |= MF_PAX_MPROTECT;
42101 +#endif
42102 +
42103 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42104 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42105 + pax_flags |= MF_PAX_RANDMMAP;
42106 +#endif
42107 +
42108 + return pax_flags;
42109 +}
42110 +#endif
42111 +
42112 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42113 +{
42114 + unsigned long pax_flags = 0UL;
42115 +
42116 +#ifdef CONFIG_PAX_PAGEEXEC
42117 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42118 + pax_flags |= MF_PAX_PAGEEXEC;
42119 +#endif
42120 +
42121 +#ifdef CONFIG_PAX_SEGMEXEC
42122 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42123 + pax_flags |= MF_PAX_SEGMEXEC;
42124 +#endif
42125 +
42126 +#ifdef CONFIG_PAX_EMUTRAMP
42127 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42128 + pax_flags |= MF_PAX_EMUTRAMP;
42129 +#endif
42130 +
42131 +#ifdef CONFIG_PAX_MPROTECT
42132 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42133 + pax_flags |= MF_PAX_MPROTECT;
42134 +#endif
42135 +
42136 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42137 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42138 + pax_flags |= MF_PAX_RANDMMAP;
42139 +#endif
42140 +
42141 + return pax_flags;
42142 +}
42143 +#endif
42144 +
42145 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42146 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42147 +{
42148 + unsigned long pax_flags = 0UL;
42149 +
42150 +#ifdef CONFIG_PAX_EI_PAX
42151 +
42152 +#ifdef CONFIG_PAX_PAGEEXEC
42153 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42154 + pax_flags |= MF_PAX_PAGEEXEC;
42155 +#endif
42156 +
42157 +#ifdef CONFIG_PAX_SEGMEXEC
42158 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42159 + pax_flags |= MF_PAX_SEGMEXEC;
42160 +#endif
42161 +
42162 +#ifdef CONFIG_PAX_EMUTRAMP
42163 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42164 + pax_flags |= MF_PAX_EMUTRAMP;
42165 +#endif
42166 +
42167 +#ifdef CONFIG_PAX_MPROTECT
42168 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42169 + pax_flags |= MF_PAX_MPROTECT;
42170 +#endif
42171 +
42172 +#ifdef CONFIG_PAX_ASLR
42173 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42174 + pax_flags |= MF_PAX_RANDMMAP;
42175 +#endif
42176 +
42177 +#else
42178 +
42179 +#ifdef CONFIG_PAX_PAGEEXEC
42180 + pax_flags |= MF_PAX_PAGEEXEC;
42181 +#endif
42182 +
42183 +#ifdef CONFIG_PAX_SEGMEXEC
42184 + pax_flags |= MF_PAX_SEGMEXEC;
42185 +#endif
42186 +
42187 +#ifdef CONFIG_PAX_MPROTECT
42188 + pax_flags |= MF_PAX_MPROTECT;
42189 +#endif
42190 +
42191 +#ifdef CONFIG_PAX_RANDMMAP
42192 + if (randomize_va_space)
42193 + pax_flags |= MF_PAX_RANDMMAP;
42194 +#endif
42195 +
42196 +#endif
42197 +
42198 + return pax_flags;
42199 +}
42200 +
42201 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42202 +{
42203 +
42204 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42205 + unsigned long i;
42206 +
42207 + for (i = 0UL; i < elf_ex->e_phnum; i++)
42208 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42209 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42210 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42211 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42212 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42213 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42214 + return ~0UL;
42215 +
42216 +#ifdef CONFIG_PAX_SOFTMODE
42217 + if (pax_softmode)
42218 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42219 + else
42220 +#endif
42221 +
42222 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42223 + break;
42224 + }
42225 +#endif
42226 +
42227 + return ~0UL;
42228 +}
42229 +
42230 +static unsigned long pax_parse_xattr_pax(struct file * const file)
42231 +{
42232 +
42233 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42234 + ssize_t xattr_size, i;
42235 + unsigned char xattr_value[5];
42236 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42237 +
42238 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42239 + if (xattr_size <= 0)
42240 + return ~0UL;
42241 +
42242 + for (i = 0; i < xattr_size; i++)
42243 + switch (xattr_value[i]) {
42244 + default:
42245 + return ~0UL;
42246 +
42247 +#define parse_flag(option1, option2, flag) \
42248 + case option1: \
42249 + pax_flags_hardmode |= MF_PAX_##flag; \
42250 + break; \
42251 + case option2: \
42252 + pax_flags_softmode |= MF_PAX_##flag; \
42253 + break;
42254 +
42255 + parse_flag('p', 'P', PAGEEXEC);
42256 + parse_flag('e', 'E', EMUTRAMP);
42257 + parse_flag('m', 'M', MPROTECT);
42258 + parse_flag('r', 'R', RANDMMAP);
42259 + parse_flag('s', 'S', SEGMEXEC);
42260 +
42261 +#undef parse_flag
42262 + }
42263 +
42264 + if (pax_flags_hardmode & pax_flags_softmode)
42265 + return ~0UL;
42266 +
42267 +#ifdef CONFIG_PAX_SOFTMODE
42268 + if (pax_softmode)
42269 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42270 + else
42271 +#endif
42272 +
42273 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42274 +#else
42275 + return ~0UL;
42276 +#endif
42277 +
42278 +}
42279 +
42280 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42281 +{
42282 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42283 +
42284 + pax_flags = pax_parse_ei_pax(elf_ex);
42285 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42286 + xattr_pax_flags = pax_parse_xattr_pax(file);
42287 +
42288 + if (pt_pax_flags == ~0UL)
42289 + pt_pax_flags = xattr_pax_flags;
42290 + else if (xattr_pax_flags == ~0UL)
42291 + xattr_pax_flags = pt_pax_flags;
42292 + if (pt_pax_flags != xattr_pax_flags)
42293 + return -EINVAL;
42294 + if (pt_pax_flags != ~0UL)
42295 + pax_flags = pt_pax_flags;
42296 +
42297 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42298 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42299 + if ((__supported_pte_mask & _PAGE_NX))
42300 + pax_flags &= ~MF_PAX_SEGMEXEC;
42301 + else
42302 + pax_flags &= ~MF_PAX_PAGEEXEC;
42303 + }
42304 +#endif
42305 +
42306 + if (0 > pax_check_flags(&pax_flags))
42307 + return -EINVAL;
42308 +
42309 + current->mm->pax_flags = pax_flags;
42310 + return 0;
42311 +}
42312 +#endif
42313 +
42314 /*
42315 * These are the functions used to load ELF style executables and shared
42316 * libraries. There is no binary dependent code anywhere else.
42317 @@ -541,6 +870,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42318 {
42319 unsigned int random_variable = 0;
42320
42321 +#ifdef CONFIG_PAX_RANDUSTACK
42322 + if (randomize_va_space)
42323 + return stack_top - current->mm->delta_stack;
42324 +#endif
42325 +
42326 if ((current->flags & PF_RANDOMIZE) &&
42327 !(current->personality & ADDR_NO_RANDOMIZE)) {
42328 random_variable = get_random_int() & STACK_RND_MASK;
42329 @@ -559,7 +893,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42330 unsigned long load_addr = 0, load_bias = 0;
42331 int load_addr_set = 0;
42332 char * elf_interpreter = NULL;
42333 - unsigned long error;
42334 + unsigned long error = 0;
42335 struct elf_phdr *elf_ppnt, *elf_phdata;
42336 unsigned long elf_bss, elf_brk;
42337 int retval, i;
42338 @@ -569,11 +903,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42339 unsigned long start_code, end_code, start_data, end_data;
42340 unsigned long reloc_func_desc __maybe_unused = 0;
42341 int executable_stack = EXSTACK_DEFAULT;
42342 - unsigned long def_flags = 0;
42343 struct {
42344 struct elfhdr elf_ex;
42345 struct elfhdr interp_elf_ex;
42346 } *loc;
42347 + unsigned long pax_task_size = TASK_SIZE;
42348
42349 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42350 if (!loc) {
42351 @@ -709,11 +1043,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42352 goto out_free_dentry;
42353
42354 /* OK, This is the point of no return */
42355 - current->mm->def_flags = def_flags;
42356 +
42357 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42358 + current->mm->pax_flags = 0UL;
42359 +#endif
42360 +
42361 +#ifdef CONFIG_PAX_DLRESOLVE
42362 + current->mm->call_dl_resolve = 0UL;
42363 +#endif
42364 +
42365 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42366 + current->mm->call_syscall = 0UL;
42367 +#endif
42368 +
42369 +#ifdef CONFIG_PAX_ASLR
42370 + current->mm->delta_mmap = 0UL;
42371 + current->mm->delta_stack = 0UL;
42372 +#endif
42373 +
42374 + current->mm->def_flags = 0;
42375 +
42376 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42377 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42378 + send_sig(SIGKILL, current, 0);
42379 + goto out_free_dentry;
42380 + }
42381 +#endif
42382 +
42383 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42384 + pax_set_initial_flags(bprm);
42385 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42386 + if (pax_set_initial_flags_func)
42387 + (pax_set_initial_flags_func)(bprm);
42388 +#endif
42389 +
42390 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42391 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42392 + current->mm->context.user_cs_limit = PAGE_SIZE;
42393 + current->mm->def_flags |= VM_PAGEEXEC;
42394 + }
42395 +#endif
42396 +
42397 +#ifdef CONFIG_PAX_SEGMEXEC
42398 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42399 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42400 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42401 + pax_task_size = SEGMEXEC_TASK_SIZE;
42402 + current->mm->def_flags |= VM_NOHUGEPAGE;
42403 + }
42404 +#endif
42405 +
42406 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42407 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42408 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42409 + put_cpu();
42410 + }
42411 +#endif
42412
42413 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42414 may depend on the personality. */
42415 SET_PERSONALITY(loc->elf_ex);
42416 +
42417 +#ifdef CONFIG_PAX_ASLR
42418 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42419 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42420 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42421 + }
42422 +#endif
42423 +
42424 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42425 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42426 + executable_stack = EXSTACK_DISABLE_X;
42427 + current->personality &= ~READ_IMPLIES_EXEC;
42428 + } else
42429 +#endif
42430 +
42431 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42432 current->personality |= READ_IMPLIES_EXEC;
42433
42434 @@ -804,6 +1208,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42435 #else
42436 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42437 #endif
42438 +
42439 +#ifdef CONFIG_PAX_RANDMMAP
42440 + /* PaX: randomize base address at the default exe base if requested */
42441 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42442 +#ifdef CONFIG_SPARC64
42443 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42444 +#else
42445 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42446 +#endif
42447 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42448 + elf_flags |= MAP_FIXED;
42449 + }
42450 +#endif
42451 +
42452 }
42453
42454 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42455 @@ -836,9 +1254,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42456 * allowed task size. Note that p_filesz must always be
42457 * <= p_memsz so it is only necessary to check p_memsz.
42458 */
42459 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42460 - elf_ppnt->p_memsz > TASK_SIZE ||
42461 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42462 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42463 + elf_ppnt->p_memsz > pax_task_size ||
42464 + pax_task_size - elf_ppnt->p_memsz < k) {
42465 /* set_brk can never work. Avoid overflows. */
42466 send_sig(SIGKILL, current, 0);
42467 retval = -EINVAL;
42468 @@ -877,11 +1295,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42469 goto out_free_dentry;
42470 }
42471 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42472 - send_sig(SIGSEGV, current, 0);
42473 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42474 - goto out_free_dentry;
42475 + /*
42476 + * This bss-zeroing can fail if the ELF
42477 + * file specifies odd protections. So
42478 + * we don't check the return value
42479 + */
42480 }
42481
42482 +#ifdef CONFIG_PAX_RANDMMAP
42483 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42484 + unsigned long start, size;
42485 +
42486 + start = ELF_PAGEALIGN(elf_brk);
42487 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42488 + down_write(&current->mm->mmap_sem);
42489 + retval = -ENOMEM;
42490 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42491 + unsigned long prot = PROT_NONE;
42492 +
42493 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42494 +// if (current->personality & ADDR_NO_RANDOMIZE)
42495 +// prot = PROT_READ;
42496 + start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42497 + retval = IS_ERR_VALUE(start) ? start : 0;
42498 + }
42499 + up_write(&current->mm->mmap_sem);
42500 + if (retval == 0)
42501 + retval = set_brk(start + size, start + size + PAGE_SIZE);
42502 + if (retval < 0) {
42503 + send_sig(SIGKILL, current, 0);
42504 + goto out_free_dentry;
42505 + }
42506 + }
42507 +#endif
42508 +
42509 if (elf_interpreter) {
42510 unsigned long uninitialized_var(interp_map_addr);
42511
42512 @@ -1109,7 +1556,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42513 * Decide what to dump of a segment, part, all or none.
42514 */
42515 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42516 - unsigned long mm_flags)
42517 + unsigned long mm_flags, long signr)
42518 {
42519 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42520
42521 @@ -1146,7 +1593,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42522 if (vma->vm_file == NULL)
42523 return 0;
42524
42525 - if (FILTER(MAPPED_PRIVATE))
42526 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42527 goto whole;
42528
42529 /*
42530 @@ -1368,9 +1815,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42531 {
42532 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42533 int i = 0;
42534 - do
42535 + do {
42536 i += 2;
42537 - while (auxv[i - 2] != AT_NULL);
42538 + } while (auxv[i - 2] != AT_NULL);
42539 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42540 }
42541
42542 @@ -1892,14 +2339,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42543 }
42544
42545 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42546 - unsigned long mm_flags)
42547 + struct coredump_params *cprm)
42548 {
42549 struct vm_area_struct *vma;
42550 size_t size = 0;
42551
42552 for (vma = first_vma(current, gate_vma); vma != NULL;
42553 vma = next_vma(vma, gate_vma))
42554 - size += vma_dump_size(vma, mm_flags);
42555 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42556 return size;
42557 }
42558
42559 @@ -1993,7 +2440,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42560
42561 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42562
42563 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42564 + offset += elf_core_vma_data_size(gate_vma, cprm);
42565 offset += elf_core_extra_data_size();
42566 e_shoff = offset;
42567
42568 @@ -2007,10 +2454,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42569 offset = dataoff;
42570
42571 size += sizeof(*elf);
42572 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42573 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42574 goto end_coredump;
42575
42576 size += sizeof(*phdr4note);
42577 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42578 if (size > cprm->limit
42579 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42580 goto end_coredump;
42581 @@ -2024,7 +2473,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42582 phdr.p_offset = offset;
42583 phdr.p_vaddr = vma->vm_start;
42584 phdr.p_paddr = 0;
42585 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42586 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42587 phdr.p_memsz = vma->vm_end - vma->vm_start;
42588 offset += phdr.p_filesz;
42589 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42590 @@ -2035,6 +2484,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42591 phdr.p_align = ELF_EXEC_PAGESIZE;
42592
42593 size += sizeof(phdr);
42594 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42595 if (size > cprm->limit
42596 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42597 goto end_coredump;
42598 @@ -2059,7 +2509,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42599 unsigned long addr;
42600 unsigned long end;
42601
42602 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42603 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42604
42605 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42606 struct page *page;
42607 @@ -2068,6 +2518,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42608 page = get_dump_page(addr);
42609 if (page) {
42610 void *kaddr = kmap(page);
42611 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42612 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42613 !dump_write(cprm->file, kaddr,
42614 PAGE_SIZE);
42615 @@ -2085,6 +2536,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42616
42617 if (e_phnum == PN_XNUM) {
42618 size += sizeof(*shdr4extnum);
42619 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42620 if (size > cprm->limit
42621 || !dump_write(cprm->file, shdr4extnum,
42622 sizeof(*shdr4extnum)))
42623 @@ -2105,6 +2557,97 @@ out:
42624
42625 #endif /* CONFIG_ELF_CORE */
42626
42627 +#ifdef CONFIG_PAX_MPROTECT
42628 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42629 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42630 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42631 + *
42632 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42633 + * basis because we want to allow the common case and not the special ones.
42634 + */
42635 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42636 +{
42637 + struct elfhdr elf_h;
42638 + struct elf_phdr elf_p;
42639 + unsigned long i;
42640 + unsigned long oldflags;
42641 + bool is_textrel_rw, is_textrel_rx, is_relro;
42642 +
42643 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42644 + return;
42645 +
42646 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42647 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42648 +
42649 +#ifdef CONFIG_PAX_ELFRELOCS
42650 + /* possible TEXTREL */
42651 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42652 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42653 +#else
42654 + is_textrel_rw = false;
42655 + is_textrel_rx = false;
42656 +#endif
42657 +
42658 + /* possible RELRO */
42659 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42660 +
42661 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42662 + return;
42663 +
42664 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42665 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42666 +
42667 +#ifdef CONFIG_PAX_ETEXECRELOCS
42668 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42669 +#else
42670 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42671 +#endif
42672 +
42673 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42674 + !elf_check_arch(&elf_h) ||
42675 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42676 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42677 + return;
42678 +
42679 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42680 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42681 + return;
42682 + switch (elf_p.p_type) {
42683 + case PT_DYNAMIC:
42684 + if (!is_textrel_rw && !is_textrel_rx)
42685 + continue;
42686 + i = 0UL;
42687 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42688 + elf_dyn dyn;
42689 +
42690 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42691 + return;
42692 + if (dyn.d_tag == DT_NULL)
42693 + return;
42694 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42695 + gr_log_textrel(vma);
42696 + if (is_textrel_rw)
42697 + vma->vm_flags |= VM_MAYWRITE;
42698 + else
42699 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42700 + vma->vm_flags &= ~VM_MAYWRITE;
42701 + return;
42702 + }
42703 + i++;
42704 + }
42705 + return;
42706 +
42707 + case PT_GNU_RELRO:
42708 + if (!is_relro)
42709 + continue;
42710 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42711 + vma->vm_flags &= ~VM_MAYWRITE;
42712 + return;
42713 + }
42714 + }
42715 +}
42716 +#endif
42717 +
42718 static int __init init_elf_binfmt(void)
42719 {
42720 register_binfmt(&elf_format);
42721 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42722 index 6b2daf9..a70dccb 100644
42723 --- a/fs/binfmt_flat.c
42724 +++ b/fs/binfmt_flat.c
42725 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42726 realdatastart = (unsigned long) -ENOMEM;
42727 printk("Unable to allocate RAM for process data, errno %d\n",
42728 (int)-realdatastart);
42729 + down_write(&current->mm->mmap_sem);
42730 do_munmap(current->mm, textpos, text_len);
42731 + up_write(&current->mm->mmap_sem);
42732 ret = realdatastart;
42733 goto err;
42734 }
42735 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42736 }
42737 if (IS_ERR_VALUE(result)) {
42738 printk("Unable to read data+bss, errno %d\n", (int)-result);
42739 + down_write(&current->mm->mmap_sem);
42740 do_munmap(current->mm, textpos, text_len);
42741 do_munmap(current->mm, realdatastart, len);
42742 + up_write(&current->mm->mmap_sem);
42743 ret = result;
42744 goto err;
42745 }
42746 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42747 }
42748 if (IS_ERR_VALUE(result)) {
42749 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42750 + down_write(&current->mm->mmap_sem);
42751 do_munmap(current->mm, textpos, text_len + data_len + extra +
42752 MAX_SHARED_LIBS * sizeof(unsigned long));
42753 + up_write(&current->mm->mmap_sem);
42754 ret = result;
42755 goto err;
42756 }
42757 diff --git a/fs/bio.c b/fs/bio.c
42758 index 84da885..bac1d48 100644
42759 --- a/fs/bio.c
42760 +++ b/fs/bio.c
42761 @@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42762 /*
42763 * Overflow, abort
42764 */
42765 - if (end < start)
42766 + if (end < start || end - start > INT_MAX - nr_pages)
42767 return ERR_PTR(-EINVAL);
42768
42769 nr_pages += end - start;
42770 @@ -972,7 +972,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
42771 /*
42772 * Overflow, abort
42773 */
42774 - if (end < start)
42775 + if (end < start || end - start > INT_MAX - nr_pages)
42776 return ERR_PTR(-EINVAL);
42777
42778 nr_pages += end - start;
42779 @@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42780 const int read = bio_data_dir(bio) == READ;
42781 struct bio_map_data *bmd = bio->bi_private;
42782 int i;
42783 - char *p = bmd->sgvecs[0].iov_base;
42784 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42785
42786 __bio_for_each_segment(bvec, bio, i, 0) {
42787 char *addr = page_address(bvec->bv_page);
42788 diff --git a/fs/block_dev.c b/fs/block_dev.c
42789 index ba11c30..623d736 100644
42790 --- a/fs/block_dev.c
42791 +++ b/fs/block_dev.c
42792 @@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42793 else if (bdev->bd_contains == bdev)
42794 return true; /* is a whole device which isn't held */
42795
42796 - else if (whole->bd_holder == bd_may_claim)
42797 + else if (whole->bd_holder == (void *)bd_may_claim)
42798 return true; /* is a partition of a device that is being partitioned */
42799 else if (whole->bd_holder != NULL)
42800 return false; /* is a partition of a held device */
42801 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42802 index c053e90..e5f1afc 100644
42803 --- a/fs/btrfs/check-integrity.c
42804 +++ b/fs/btrfs/check-integrity.c
42805 @@ -156,7 +156,7 @@ struct btrfsic_block {
42806 union {
42807 bio_end_io_t *bio;
42808 bh_end_io_t *bh;
42809 - } orig_bio_bh_end_io;
42810 + } __no_const orig_bio_bh_end_io;
42811 int submit_bio_bh_rw;
42812 u64 flush_gen; /* only valid if !never_written */
42813 };
42814 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42815 index 4106264..8157ede 100644
42816 --- a/fs/btrfs/ctree.c
42817 +++ b/fs/btrfs/ctree.c
42818 @@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42819 free_extent_buffer(buf);
42820 add_root_to_dirty_list(root);
42821 } else {
42822 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42823 - parent_start = parent->start;
42824 - else
42825 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42826 + if (parent)
42827 + parent_start = parent->start;
42828 + else
42829 + parent_start = 0;
42830 + } else
42831 parent_start = 0;
42832
42833 WARN_ON(trans->transid != btrfs_header_generation(parent));
42834 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42835 index 0df0d1f..4bdcbfe 100644
42836 --- a/fs/btrfs/inode.c
42837 +++ b/fs/btrfs/inode.c
42838 @@ -7074,7 +7074,7 @@ fail:
42839 return -ENOMEM;
42840 }
42841
42842 -static int btrfs_getattr(struct vfsmount *mnt,
42843 +int btrfs_getattr(struct vfsmount *mnt,
42844 struct dentry *dentry, struct kstat *stat)
42845 {
42846 struct inode *inode = dentry->d_inode;
42847 @@ -7088,6 +7088,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42848 return 0;
42849 }
42850
42851 +EXPORT_SYMBOL(btrfs_getattr);
42852 +
42853 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42854 +{
42855 + return BTRFS_I(inode)->root->anon_dev;
42856 +}
42857 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42858 +
42859 /*
42860 * If a file is moved, it will inherit the cow and compression flags of the new
42861 * directory.
42862 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42863 index 14f8e1f..ab8d81f 100644
42864 --- a/fs/btrfs/ioctl.c
42865 +++ b/fs/btrfs/ioctl.c
42866 @@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42867 for (i = 0; i < num_types; i++) {
42868 struct btrfs_space_info *tmp;
42869
42870 + /* Don't copy in more than we allocated */
42871 if (!slot_count)
42872 break;
42873
42874 + slot_count--;
42875 +
42876 info = NULL;
42877 rcu_read_lock();
42878 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42879 @@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42880 memcpy(dest, &space, sizeof(space));
42881 dest++;
42882 space_args.total_spaces++;
42883 - slot_count--;
42884 }
42885 - if (!slot_count)
42886 - break;
42887 }
42888 up_read(&info->groups_sem);
42889 }
42890
42891 - user_dest = (struct btrfs_ioctl_space_info *)
42892 + user_dest = (struct btrfs_ioctl_space_info __user *)
42893 (arg + sizeof(struct btrfs_ioctl_space_args));
42894
42895 if (copy_to_user(user_dest, dest_orig, alloc_size))
42896 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42897 index 646ee21..f020f87 100644
42898 --- a/fs/btrfs/relocation.c
42899 +++ b/fs/btrfs/relocation.c
42900 @@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42901 }
42902 spin_unlock(&rc->reloc_root_tree.lock);
42903
42904 - BUG_ON((struct btrfs_root *)node->data != root);
42905 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42906
42907 if (!del) {
42908 spin_lock(&rc->reloc_root_tree.lock);
42909 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42910 index 622f469..e8d2d55 100644
42911 --- a/fs/cachefiles/bind.c
42912 +++ b/fs/cachefiles/bind.c
42913 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42914 args);
42915
42916 /* start by checking things over */
42917 - ASSERT(cache->fstop_percent >= 0 &&
42918 - cache->fstop_percent < cache->fcull_percent &&
42919 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42920 cache->fcull_percent < cache->frun_percent &&
42921 cache->frun_percent < 100);
42922
42923 - ASSERT(cache->bstop_percent >= 0 &&
42924 - cache->bstop_percent < cache->bcull_percent &&
42925 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42926 cache->bcull_percent < cache->brun_percent &&
42927 cache->brun_percent < 100);
42928
42929 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42930 index 0a1467b..6a53245 100644
42931 --- a/fs/cachefiles/daemon.c
42932 +++ b/fs/cachefiles/daemon.c
42933 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42934 if (n > buflen)
42935 return -EMSGSIZE;
42936
42937 - if (copy_to_user(_buffer, buffer, n) != 0)
42938 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42939 return -EFAULT;
42940
42941 return n;
42942 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42943 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42944 return -EIO;
42945
42946 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42947 + if (datalen > PAGE_SIZE - 1)
42948 return -EOPNOTSUPP;
42949
42950 /* drag the command string into the kernel so we can parse it */
42951 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42952 if (args[0] != '%' || args[1] != '\0')
42953 return -EINVAL;
42954
42955 - if (fstop < 0 || fstop >= cache->fcull_percent)
42956 + if (fstop >= cache->fcull_percent)
42957 return cachefiles_daemon_range_error(cache, args);
42958
42959 cache->fstop_percent = fstop;
42960 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42961 if (args[0] != '%' || args[1] != '\0')
42962 return -EINVAL;
42963
42964 - if (bstop < 0 || bstop >= cache->bcull_percent)
42965 + if (bstop >= cache->bcull_percent)
42966 return cachefiles_daemon_range_error(cache, args);
42967
42968 cache->bstop_percent = bstop;
42969 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42970 index bd6bc1b..b627b53 100644
42971 --- a/fs/cachefiles/internal.h
42972 +++ b/fs/cachefiles/internal.h
42973 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42974 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42975 struct rb_root active_nodes; /* active nodes (can't be culled) */
42976 rwlock_t active_lock; /* lock for active_nodes */
42977 - atomic_t gravecounter; /* graveyard uniquifier */
42978 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42979 unsigned frun_percent; /* when to stop culling (% files) */
42980 unsigned fcull_percent; /* when to start culling (% files) */
42981 unsigned fstop_percent; /* when to stop allocating (% files) */
42982 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42983 * proc.c
42984 */
42985 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42986 -extern atomic_t cachefiles_lookup_histogram[HZ];
42987 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42988 -extern atomic_t cachefiles_create_histogram[HZ];
42989 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42990 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42991 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42992
42993 extern int __init cachefiles_proc_init(void);
42994 extern void cachefiles_proc_cleanup(void);
42995 static inline
42996 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42997 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42998 {
42999 unsigned long jif = jiffies - start_jif;
43000 if (jif >= HZ)
43001 jif = HZ - 1;
43002 - atomic_inc(&histogram[jif]);
43003 + atomic_inc_unchecked(&histogram[jif]);
43004 }
43005
43006 #else
43007 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43008 index 7f0771d..87d4f36 100644
43009 --- a/fs/cachefiles/namei.c
43010 +++ b/fs/cachefiles/namei.c
43011 @@ -318,7 +318,7 @@ try_again:
43012 /* first step is to make up a grave dentry in the graveyard */
43013 sprintf(nbuffer, "%08x%08x",
43014 (uint32_t) get_seconds(),
43015 - (uint32_t) atomic_inc_return(&cache->gravecounter));
43016 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43017
43018 /* do the multiway lock magic */
43019 trap = lock_rename(cache->graveyard, dir);
43020 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43021 index eccd339..4c1d995 100644
43022 --- a/fs/cachefiles/proc.c
43023 +++ b/fs/cachefiles/proc.c
43024 @@ -14,9 +14,9 @@
43025 #include <linux/seq_file.h>
43026 #include "internal.h"
43027
43028 -atomic_t cachefiles_lookup_histogram[HZ];
43029 -atomic_t cachefiles_mkdir_histogram[HZ];
43030 -atomic_t cachefiles_create_histogram[HZ];
43031 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43032 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43033 +atomic_unchecked_t cachefiles_create_histogram[HZ];
43034
43035 /*
43036 * display the latency histogram
43037 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43038 return 0;
43039 default:
43040 index = (unsigned long) v - 3;
43041 - x = atomic_read(&cachefiles_lookup_histogram[index]);
43042 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
43043 - z = atomic_read(&cachefiles_create_histogram[index]);
43044 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43045 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43046 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43047 if (x == 0 && y == 0 && z == 0)
43048 return 0;
43049
43050 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43051 index 0e3c092..818480e 100644
43052 --- a/fs/cachefiles/rdwr.c
43053 +++ b/fs/cachefiles/rdwr.c
43054 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43055 old_fs = get_fs();
43056 set_fs(KERNEL_DS);
43057 ret = file->f_op->write(
43058 - file, (const void __user *) data, len, &pos);
43059 + file, (const void __force_user *) data, len, &pos);
43060 set_fs(old_fs);
43061 kunmap(page);
43062 if (ret != len)
43063 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43064 index 3e8094b..cb3ff3d 100644
43065 --- a/fs/ceph/dir.c
43066 +++ b/fs/ceph/dir.c
43067 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43068 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43069 struct ceph_mds_client *mdsc = fsc->mdsc;
43070 unsigned frag = fpos_frag(filp->f_pos);
43071 - int off = fpos_off(filp->f_pos);
43072 + unsigned int off = fpos_off(filp->f_pos);
43073 int err;
43074 u32 ftype;
43075 struct ceph_mds_reply_info_parsed *rinfo;
43076 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43077 if (nd &&
43078 (nd->flags & LOOKUP_OPEN) &&
43079 !(nd->intent.open.flags & O_CREAT)) {
43080 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
43081 + int mode = nd->intent.open.create_mode & ~current_umask();
43082 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43083 }
43084
43085 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43086 index 2704646..c581c91 100644
43087 --- a/fs/cifs/cifs_debug.c
43088 +++ b/fs/cifs/cifs_debug.c
43089 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43090
43091 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43092 #ifdef CONFIG_CIFS_STATS2
43093 - atomic_set(&totBufAllocCount, 0);
43094 - atomic_set(&totSmBufAllocCount, 0);
43095 + atomic_set_unchecked(&totBufAllocCount, 0);
43096 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43097 #endif /* CONFIG_CIFS_STATS2 */
43098 spin_lock(&cifs_tcp_ses_lock);
43099 list_for_each(tmp1, &cifs_tcp_ses_list) {
43100 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43101 tcon = list_entry(tmp3,
43102 struct cifs_tcon,
43103 tcon_list);
43104 - atomic_set(&tcon->num_smbs_sent, 0);
43105 - atomic_set(&tcon->num_writes, 0);
43106 - atomic_set(&tcon->num_reads, 0);
43107 - atomic_set(&tcon->num_oplock_brks, 0);
43108 - atomic_set(&tcon->num_opens, 0);
43109 - atomic_set(&tcon->num_posixopens, 0);
43110 - atomic_set(&tcon->num_posixmkdirs, 0);
43111 - atomic_set(&tcon->num_closes, 0);
43112 - atomic_set(&tcon->num_deletes, 0);
43113 - atomic_set(&tcon->num_mkdirs, 0);
43114 - atomic_set(&tcon->num_rmdirs, 0);
43115 - atomic_set(&tcon->num_renames, 0);
43116 - atomic_set(&tcon->num_t2renames, 0);
43117 - atomic_set(&tcon->num_ffirst, 0);
43118 - atomic_set(&tcon->num_fnext, 0);
43119 - atomic_set(&tcon->num_fclose, 0);
43120 - atomic_set(&tcon->num_hardlinks, 0);
43121 - atomic_set(&tcon->num_symlinks, 0);
43122 - atomic_set(&tcon->num_locks, 0);
43123 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43124 + atomic_set_unchecked(&tcon->num_writes, 0);
43125 + atomic_set_unchecked(&tcon->num_reads, 0);
43126 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43127 + atomic_set_unchecked(&tcon->num_opens, 0);
43128 + atomic_set_unchecked(&tcon->num_posixopens, 0);
43129 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43130 + atomic_set_unchecked(&tcon->num_closes, 0);
43131 + atomic_set_unchecked(&tcon->num_deletes, 0);
43132 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
43133 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
43134 + atomic_set_unchecked(&tcon->num_renames, 0);
43135 + atomic_set_unchecked(&tcon->num_t2renames, 0);
43136 + atomic_set_unchecked(&tcon->num_ffirst, 0);
43137 + atomic_set_unchecked(&tcon->num_fnext, 0);
43138 + atomic_set_unchecked(&tcon->num_fclose, 0);
43139 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
43140 + atomic_set_unchecked(&tcon->num_symlinks, 0);
43141 + atomic_set_unchecked(&tcon->num_locks, 0);
43142 }
43143 }
43144 }
43145 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43146 smBufAllocCount.counter, cifs_min_small);
43147 #ifdef CONFIG_CIFS_STATS2
43148 seq_printf(m, "Total Large %d Small %d Allocations\n",
43149 - atomic_read(&totBufAllocCount),
43150 - atomic_read(&totSmBufAllocCount));
43151 + atomic_read_unchecked(&totBufAllocCount),
43152 + atomic_read_unchecked(&totSmBufAllocCount));
43153 #endif /* CONFIG_CIFS_STATS2 */
43154
43155 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43156 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43157 if (tcon->need_reconnect)
43158 seq_puts(m, "\tDISCONNECTED ");
43159 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43160 - atomic_read(&tcon->num_smbs_sent),
43161 - atomic_read(&tcon->num_oplock_brks));
43162 + atomic_read_unchecked(&tcon->num_smbs_sent),
43163 + atomic_read_unchecked(&tcon->num_oplock_brks));
43164 seq_printf(m, "\nReads: %d Bytes: %lld",
43165 - atomic_read(&tcon->num_reads),
43166 + atomic_read_unchecked(&tcon->num_reads),
43167 (long long)(tcon->bytes_read));
43168 seq_printf(m, "\nWrites: %d Bytes: %lld",
43169 - atomic_read(&tcon->num_writes),
43170 + atomic_read_unchecked(&tcon->num_writes),
43171 (long long)(tcon->bytes_written));
43172 seq_printf(m, "\nFlushes: %d",
43173 - atomic_read(&tcon->num_flushes));
43174 + atomic_read_unchecked(&tcon->num_flushes));
43175 seq_printf(m, "\nLocks: %d HardLinks: %d "
43176 "Symlinks: %d",
43177 - atomic_read(&tcon->num_locks),
43178 - atomic_read(&tcon->num_hardlinks),
43179 - atomic_read(&tcon->num_symlinks));
43180 + atomic_read_unchecked(&tcon->num_locks),
43181 + atomic_read_unchecked(&tcon->num_hardlinks),
43182 + atomic_read_unchecked(&tcon->num_symlinks));
43183 seq_printf(m, "\nOpens: %d Closes: %d "
43184 "Deletes: %d",
43185 - atomic_read(&tcon->num_opens),
43186 - atomic_read(&tcon->num_closes),
43187 - atomic_read(&tcon->num_deletes));
43188 + atomic_read_unchecked(&tcon->num_opens),
43189 + atomic_read_unchecked(&tcon->num_closes),
43190 + atomic_read_unchecked(&tcon->num_deletes));
43191 seq_printf(m, "\nPosix Opens: %d "
43192 "Posix Mkdirs: %d",
43193 - atomic_read(&tcon->num_posixopens),
43194 - atomic_read(&tcon->num_posixmkdirs));
43195 + atomic_read_unchecked(&tcon->num_posixopens),
43196 + atomic_read_unchecked(&tcon->num_posixmkdirs));
43197 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43198 - atomic_read(&tcon->num_mkdirs),
43199 - atomic_read(&tcon->num_rmdirs));
43200 + atomic_read_unchecked(&tcon->num_mkdirs),
43201 + atomic_read_unchecked(&tcon->num_rmdirs));
43202 seq_printf(m, "\nRenames: %d T2 Renames %d",
43203 - atomic_read(&tcon->num_renames),
43204 - atomic_read(&tcon->num_t2renames));
43205 + atomic_read_unchecked(&tcon->num_renames),
43206 + atomic_read_unchecked(&tcon->num_t2renames));
43207 seq_printf(m, "\nFindFirst: %d FNext %d "
43208 "FClose %d",
43209 - atomic_read(&tcon->num_ffirst),
43210 - atomic_read(&tcon->num_fnext),
43211 - atomic_read(&tcon->num_fclose));
43212 + atomic_read_unchecked(&tcon->num_ffirst),
43213 + atomic_read_unchecked(&tcon->num_fnext),
43214 + atomic_read_unchecked(&tcon->num_fclose));
43215 }
43216 }
43217 }
43218 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43219 index 541ef81..a78deb8 100644
43220 --- a/fs/cifs/cifsfs.c
43221 +++ b/fs/cifs/cifsfs.c
43222 @@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
43223 cifs_req_cachep = kmem_cache_create("cifs_request",
43224 CIFSMaxBufSize +
43225 MAX_CIFS_HDR_SIZE, 0,
43226 - SLAB_HWCACHE_ALIGN, NULL);
43227 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43228 if (cifs_req_cachep == NULL)
43229 return -ENOMEM;
43230
43231 @@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
43232 efficient to alloc 1 per page off the slab compared to 17K (5page)
43233 alloc of large cifs buffers even when page debugging is on */
43234 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43235 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43236 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43237 NULL);
43238 if (cifs_sm_req_cachep == NULL) {
43239 mempool_destroy(cifs_req_poolp);
43240 @@ -1097,8 +1097,8 @@ init_cifs(void)
43241 atomic_set(&bufAllocCount, 0);
43242 atomic_set(&smBufAllocCount, 0);
43243 #ifdef CONFIG_CIFS_STATS2
43244 - atomic_set(&totBufAllocCount, 0);
43245 - atomic_set(&totSmBufAllocCount, 0);
43246 + atomic_set_unchecked(&totBufAllocCount, 0);
43247 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43248 #endif /* CONFIG_CIFS_STATS2 */
43249
43250 atomic_set(&midCount, 0);
43251 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43252 index 73fea28..b996b84 100644
43253 --- a/fs/cifs/cifsglob.h
43254 +++ b/fs/cifs/cifsglob.h
43255 @@ -439,28 +439,28 @@ struct cifs_tcon {
43256 __u16 Flags; /* optional support bits */
43257 enum statusEnum tidStatus;
43258 #ifdef CONFIG_CIFS_STATS
43259 - atomic_t num_smbs_sent;
43260 - atomic_t num_writes;
43261 - atomic_t num_reads;
43262 - atomic_t num_flushes;
43263 - atomic_t num_oplock_brks;
43264 - atomic_t num_opens;
43265 - atomic_t num_closes;
43266 - atomic_t num_deletes;
43267 - atomic_t num_mkdirs;
43268 - atomic_t num_posixopens;
43269 - atomic_t num_posixmkdirs;
43270 - atomic_t num_rmdirs;
43271 - atomic_t num_renames;
43272 - atomic_t num_t2renames;
43273 - atomic_t num_ffirst;
43274 - atomic_t num_fnext;
43275 - atomic_t num_fclose;
43276 - atomic_t num_hardlinks;
43277 - atomic_t num_symlinks;
43278 - atomic_t num_locks;
43279 - atomic_t num_acl_get;
43280 - atomic_t num_acl_set;
43281 + atomic_unchecked_t num_smbs_sent;
43282 + atomic_unchecked_t num_writes;
43283 + atomic_unchecked_t num_reads;
43284 + atomic_unchecked_t num_flushes;
43285 + atomic_unchecked_t num_oplock_brks;
43286 + atomic_unchecked_t num_opens;
43287 + atomic_unchecked_t num_closes;
43288 + atomic_unchecked_t num_deletes;
43289 + atomic_unchecked_t num_mkdirs;
43290 + atomic_unchecked_t num_posixopens;
43291 + atomic_unchecked_t num_posixmkdirs;
43292 + atomic_unchecked_t num_rmdirs;
43293 + atomic_unchecked_t num_renames;
43294 + atomic_unchecked_t num_t2renames;
43295 + atomic_unchecked_t num_ffirst;
43296 + atomic_unchecked_t num_fnext;
43297 + atomic_unchecked_t num_fclose;
43298 + atomic_unchecked_t num_hardlinks;
43299 + atomic_unchecked_t num_symlinks;
43300 + atomic_unchecked_t num_locks;
43301 + atomic_unchecked_t num_acl_get;
43302 + atomic_unchecked_t num_acl_set;
43303 #ifdef CONFIG_CIFS_STATS2
43304 unsigned long long time_writes;
43305 unsigned long long time_reads;
43306 @@ -677,7 +677,7 @@ convert_delimiter(char *path, char delim)
43307 }
43308
43309 #ifdef CONFIG_CIFS_STATS
43310 -#define cifs_stats_inc atomic_inc
43311 +#define cifs_stats_inc atomic_inc_unchecked
43312
43313 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43314 unsigned int bytes)
43315 @@ -1036,8 +1036,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43316 /* Various Debug counters */
43317 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43318 #ifdef CONFIG_CIFS_STATS2
43319 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43320 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43321 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43322 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43323 #endif
43324 GLOBAL_EXTERN atomic_t smBufAllocCount;
43325 GLOBAL_EXTERN atomic_t midCount;
43326 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43327 index 6b0e064..94e6c3c 100644
43328 --- a/fs/cifs/link.c
43329 +++ b/fs/cifs/link.c
43330 @@ -600,7 +600,7 @@ symlink_exit:
43331
43332 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43333 {
43334 - char *p = nd_get_link(nd);
43335 + const char *p = nd_get_link(nd);
43336 if (!IS_ERR(p))
43337 kfree(p);
43338 }
43339 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43340 index c29d1aa..58018da 100644
43341 --- a/fs/cifs/misc.c
43342 +++ b/fs/cifs/misc.c
43343 @@ -156,7 +156,7 @@ cifs_buf_get(void)
43344 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43345 atomic_inc(&bufAllocCount);
43346 #ifdef CONFIG_CIFS_STATS2
43347 - atomic_inc(&totBufAllocCount);
43348 + atomic_inc_unchecked(&totBufAllocCount);
43349 #endif /* CONFIG_CIFS_STATS2 */
43350 }
43351
43352 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43353 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43354 atomic_inc(&smBufAllocCount);
43355 #ifdef CONFIG_CIFS_STATS2
43356 - atomic_inc(&totSmBufAllocCount);
43357 + atomic_inc_unchecked(&totSmBufAllocCount);
43358 #endif /* CONFIG_CIFS_STATS2 */
43359
43360 }
43361 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43362 index 6901578..d402eb5 100644
43363 --- a/fs/coda/cache.c
43364 +++ b/fs/coda/cache.c
43365 @@ -24,7 +24,7 @@
43366 #include "coda_linux.h"
43367 #include "coda_cache.h"
43368
43369 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43370 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43371
43372 /* replace or extend an acl cache hit */
43373 void coda_cache_enter(struct inode *inode, int mask)
43374 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43375 struct coda_inode_info *cii = ITOC(inode);
43376
43377 spin_lock(&cii->c_lock);
43378 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43379 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43380 if (cii->c_uid != current_fsuid()) {
43381 cii->c_uid = current_fsuid();
43382 cii->c_cached_perm = mask;
43383 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43384 {
43385 struct coda_inode_info *cii = ITOC(inode);
43386 spin_lock(&cii->c_lock);
43387 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43388 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43389 spin_unlock(&cii->c_lock);
43390 }
43391
43392 /* remove all acl caches */
43393 void coda_cache_clear_all(struct super_block *sb)
43394 {
43395 - atomic_inc(&permission_epoch);
43396 + atomic_inc_unchecked(&permission_epoch);
43397 }
43398
43399
43400 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43401 spin_lock(&cii->c_lock);
43402 hit = (mask & cii->c_cached_perm) == mask &&
43403 cii->c_uid == current_fsuid() &&
43404 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43405 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43406 spin_unlock(&cii->c_lock);
43407
43408 return hit;
43409 diff --git a/fs/compat.c b/fs/compat.c
43410 index f2944ac..62845d2 100644
43411 --- a/fs/compat.c
43412 +++ b/fs/compat.c
43413 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43414
43415 set_fs(KERNEL_DS);
43416 /* The __user pointer cast is valid because of the set_fs() */
43417 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43418 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43419 set_fs(oldfs);
43420 /* truncating is ok because it's a user address */
43421 if (!ret)
43422 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43423 goto out;
43424
43425 ret = -EINVAL;
43426 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43427 + if (nr_segs > UIO_MAXIOV)
43428 goto out;
43429 if (nr_segs > fast_segs) {
43430 ret = -ENOMEM;
43431 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
43432
43433 struct compat_readdir_callback {
43434 struct compat_old_linux_dirent __user *dirent;
43435 + struct file * file;
43436 int result;
43437 };
43438
43439 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43440 buf->result = -EOVERFLOW;
43441 return -EOVERFLOW;
43442 }
43443 +
43444 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43445 + return 0;
43446 +
43447 buf->result++;
43448 dirent = buf->dirent;
43449 if (!access_ok(VERIFY_WRITE, dirent,
43450 @@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43451
43452 buf.result = 0;
43453 buf.dirent = dirent;
43454 + buf.file = file;
43455
43456 error = vfs_readdir(file, compat_fillonedir, &buf);
43457 if (buf.result)
43458 @@ -900,6 +906,7 @@ struct compat_linux_dirent {
43459 struct compat_getdents_callback {
43460 struct compat_linux_dirent __user *current_dir;
43461 struct compat_linux_dirent __user *previous;
43462 + struct file * file;
43463 int count;
43464 int error;
43465 };
43466 @@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43467 buf->error = -EOVERFLOW;
43468 return -EOVERFLOW;
43469 }
43470 +
43471 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43472 + return 0;
43473 +
43474 dirent = buf->previous;
43475 if (dirent) {
43476 if (__put_user(offset, &dirent->d_off))
43477 @@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43478 buf.previous = NULL;
43479 buf.count = count;
43480 buf.error = 0;
43481 + buf.file = file;
43482
43483 error = vfs_readdir(file, compat_filldir, &buf);
43484 if (error >= 0)
43485 @@ -989,6 +1001,7 @@ out:
43486 struct compat_getdents_callback64 {
43487 struct linux_dirent64 __user *current_dir;
43488 struct linux_dirent64 __user *previous;
43489 + struct file * file;
43490 int count;
43491 int error;
43492 };
43493 @@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43494 buf->error = -EINVAL; /* only used if we fail.. */
43495 if (reclen > buf->count)
43496 return -EINVAL;
43497 +
43498 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43499 + return 0;
43500 +
43501 dirent = buf->previous;
43502
43503 if (dirent) {
43504 @@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43505 buf.previous = NULL;
43506 buf.count = count;
43507 buf.error = 0;
43508 + buf.file = file;
43509
43510 error = vfs_readdir(file, compat_filldir64, &buf);
43511 if (error >= 0)
43512 error = buf.error;
43513 lastdirent = buf.previous;
43514 if (lastdirent) {
43515 - typeof(lastdirent->d_off) d_off = file->f_pos;
43516 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43517 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43518 error = -EFAULT;
43519 else
43520 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43521 index 112e45a..b59845b 100644
43522 --- a/fs/compat_binfmt_elf.c
43523 +++ b/fs/compat_binfmt_elf.c
43524 @@ -30,11 +30,13 @@
43525 #undef elf_phdr
43526 #undef elf_shdr
43527 #undef elf_note
43528 +#undef elf_dyn
43529 #undef elf_addr_t
43530 #define elfhdr elf32_hdr
43531 #define elf_phdr elf32_phdr
43532 #define elf_shdr elf32_shdr
43533 #define elf_note elf32_note
43534 +#define elf_dyn Elf32_Dyn
43535 #define elf_addr_t Elf32_Addr
43536
43537 /*
43538 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43539 index debdfe0..75d31d4 100644
43540 --- a/fs/compat_ioctl.c
43541 +++ b/fs/compat_ioctl.c
43542 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43543
43544 err = get_user(palp, &up->palette);
43545 err |= get_user(length, &up->length);
43546 + if (err)
43547 + return -EFAULT;
43548
43549 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43550 err = put_user(compat_ptr(palp), &up_native->palette);
43551 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43552 return -EFAULT;
43553 if (__get_user(udata, &ss32->iomem_base))
43554 return -EFAULT;
43555 - ss.iomem_base = compat_ptr(udata);
43556 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43557 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43558 __get_user(ss.port_high, &ss32->port_high))
43559 return -EFAULT;
43560 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43561 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43562 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43563 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43564 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43565 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43566 return -EFAULT;
43567
43568 return ioctl_preallocate(file, p);
43569 @@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43570 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43571 {
43572 unsigned int a, b;
43573 - a = *(unsigned int *)p;
43574 - b = *(unsigned int *)q;
43575 + a = *(const unsigned int *)p;
43576 + b = *(const unsigned int *)q;
43577 if (a > b)
43578 return 1;
43579 if (a < b)
43580 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43581 index 7e6c52d..94bc756 100644
43582 --- a/fs/configfs/dir.c
43583 +++ b/fs/configfs/dir.c
43584 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43585 }
43586 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43587 struct configfs_dirent *next;
43588 - const char * name;
43589 + const unsigned char * name;
43590 + char d_name[sizeof(next->s_dentry->d_iname)];
43591 int len;
43592 struct inode *inode = NULL;
43593
43594 @@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43595 continue;
43596
43597 name = configfs_get_name(next);
43598 - len = strlen(name);
43599 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43600 + len = next->s_dentry->d_name.len;
43601 + memcpy(d_name, name, len);
43602 + name = d_name;
43603 + } else
43604 + len = strlen(name);
43605
43606 /*
43607 * We'll have a dentry and an inode for
43608 diff --git a/fs/dcache.c b/fs/dcache.c
43609 index b80531c..8ca7e2d 100644
43610 --- a/fs/dcache.c
43611 +++ b/fs/dcache.c
43612 @@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
43613 mempages -= reserve;
43614
43615 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43616 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43617 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43618
43619 dcache_init();
43620 inode_init();
43621 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43622 index b80bc84..0d46d1a 100644
43623 --- a/fs/debugfs/inode.c
43624 +++ b/fs/debugfs/inode.c
43625 @@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43626 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43627 {
43628 return debugfs_create_file(name,
43629 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43630 + S_IFDIR | S_IRWXU,
43631 +#else
43632 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43633 +#endif
43634 parent, NULL, NULL);
43635 }
43636 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43637 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43638 index ab35b11..b30af66 100644
43639 --- a/fs/ecryptfs/inode.c
43640 +++ b/fs/ecryptfs/inode.c
43641 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43642 old_fs = get_fs();
43643 set_fs(get_ds());
43644 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43645 - (char __user *)lower_buf,
43646 + (char __force_user *)lower_buf,
43647 lower_bufsiz);
43648 set_fs(old_fs);
43649 if (rc < 0)
43650 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43651 }
43652 old_fs = get_fs();
43653 set_fs(get_ds());
43654 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43655 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43656 set_fs(old_fs);
43657 if (rc < 0) {
43658 kfree(buf);
43659 @@ -733,7 +733,7 @@ out:
43660 static void
43661 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43662 {
43663 - char *buf = nd_get_link(nd);
43664 + const char *buf = nd_get_link(nd);
43665 if (!IS_ERR(buf)) {
43666 /* Free the char* */
43667 kfree(buf);
43668 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43669 index c0038f6..47ab347 100644
43670 --- a/fs/ecryptfs/miscdev.c
43671 +++ b/fs/ecryptfs/miscdev.c
43672 @@ -355,7 +355,7 @@ check_list:
43673 goto out_unlock_msg_ctx;
43674 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43675 if (msg_ctx->msg) {
43676 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43677 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43678 goto out_unlock_msg_ctx;
43679 i += packet_length_size;
43680 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43681 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43682 index b2a34a1..162fa69 100644
43683 --- a/fs/ecryptfs/read_write.c
43684 +++ b/fs/ecryptfs/read_write.c
43685 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43686 return -EIO;
43687 fs_save = get_fs();
43688 set_fs(get_ds());
43689 - rc = vfs_write(lower_file, data, size, &offset);
43690 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43691 set_fs(fs_save);
43692 mark_inode_dirty_sync(ecryptfs_inode);
43693 return rc;
43694 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43695 return -EIO;
43696 fs_save = get_fs();
43697 set_fs(get_ds());
43698 - rc = vfs_read(lower_file, data, size, &offset);
43699 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43700 set_fs(fs_save);
43701 return rc;
43702 }
43703 diff --git a/fs/exec.c b/fs/exec.c
43704 index 29e5f84..8bfc7cb 100644
43705 --- a/fs/exec.c
43706 +++ b/fs/exec.c
43707 @@ -55,6 +55,15 @@
43708 #include <linux/pipe_fs_i.h>
43709 #include <linux/oom.h>
43710 #include <linux/compat.h>
43711 +#include <linux/random.h>
43712 +#include <linux/seq_file.h>
43713 +
43714 +#ifdef CONFIG_PAX_REFCOUNT
43715 +#include <linux/kallsyms.h>
43716 +#include <linux/kdebug.h>
43717 +#endif
43718 +
43719 +#include <trace/events/fs.h>
43720
43721 #include <asm/uaccess.h>
43722 #include <asm/mmu_context.h>
43723 @@ -66,6 +75,18 @@
43724
43725 #include <trace/events/sched.h>
43726
43727 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43728 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
43729 +{
43730 + WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
43731 +}
43732 +#endif
43733 +
43734 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43735 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43736 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43737 +#endif
43738 +
43739 int core_uses_pid;
43740 char core_pattern[CORENAME_MAX_SIZE] = "core";
43741 unsigned int core_pipe_limit;
43742 @@ -75,7 +96,7 @@ struct core_name {
43743 char *corename;
43744 int used, size;
43745 };
43746 -static atomic_t call_count = ATOMIC_INIT(1);
43747 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43748
43749 /* The maximal length of core_pattern is also specified in sysctl.c */
43750
43751 @@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43752 int write)
43753 {
43754 struct page *page;
43755 - int ret;
43756
43757 -#ifdef CONFIG_STACK_GROWSUP
43758 - if (write) {
43759 - ret = expand_downwards(bprm->vma, pos);
43760 - if (ret < 0)
43761 - return NULL;
43762 - }
43763 -#endif
43764 - ret = get_user_pages(current, bprm->mm, pos,
43765 - 1, write, 1, &page, NULL);
43766 - if (ret <= 0)
43767 + if (0 > expand_downwards(bprm->vma, pos))
43768 + return NULL;
43769 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43770 return NULL;
43771
43772 if (write) {
43773 @@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43774 if (size <= ARG_MAX)
43775 return page;
43776
43777 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43778 + // only allow 512KB for argv+env on suid/sgid binaries
43779 + // to prevent easy ASLR exhaustion
43780 + if (((bprm->cred->euid != current_euid()) ||
43781 + (bprm->cred->egid != current_egid())) &&
43782 + (size > (512 * 1024))) {
43783 + put_page(page);
43784 + return NULL;
43785 + }
43786 +#endif
43787 +
43788 /*
43789 * Limit to 1/4-th the stack size for the argv+env strings.
43790 * This ensures that:
43791 @@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43792 vma->vm_end = STACK_TOP_MAX;
43793 vma->vm_start = vma->vm_end - PAGE_SIZE;
43794 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43795 +
43796 +#ifdef CONFIG_PAX_SEGMEXEC
43797 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43798 +#endif
43799 +
43800 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43801 INIT_LIST_HEAD(&vma->anon_vma_chain);
43802
43803 @@ -291,6 +320,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43804 mm->stack_vm = mm->total_vm = 1;
43805 up_write(&mm->mmap_sem);
43806 bprm->p = vma->vm_end - sizeof(void *);
43807 +
43808 +#ifdef CONFIG_PAX_RANDUSTACK
43809 + if (randomize_va_space)
43810 + bprm->p ^= random32() & ~PAGE_MASK;
43811 +#endif
43812 +
43813 return 0;
43814 err:
43815 up_write(&mm->mmap_sem);
43816 @@ -399,19 +434,7 @@ err:
43817 return err;
43818 }
43819
43820 -struct user_arg_ptr {
43821 -#ifdef CONFIG_COMPAT
43822 - bool is_compat;
43823 -#endif
43824 - union {
43825 - const char __user *const __user *native;
43826 -#ifdef CONFIG_COMPAT
43827 - compat_uptr_t __user *compat;
43828 -#endif
43829 - } ptr;
43830 -};
43831 -
43832 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43833 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43834 {
43835 const char __user *native;
43836
43837 @@ -420,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43838 compat_uptr_t compat;
43839
43840 if (get_user(compat, argv.ptr.compat + nr))
43841 - return ERR_PTR(-EFAULT);
43842 + return (const char __force_user *)ERR_PTR(-EFAULT);
43843
43844 return compat_ptr(compat);
43845 }
43846 #endif
43847
43848 if (get_user(native, argv.ptr.native + nr))
43849 - return ERR_PTR(-EFAULT);
43850 + return (const char __force_user *)ERR_PTR(-EFAULT);
43851
43852 return native;
43853 }
43854 @@ -446,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
43855 if (!p)
43856 break;
43857
43858 - if (IS_ERR(p))
43859 + if (IS_ERR((const char __force_kernel *)p))
43860 return -EFAULT;
43861
43862 if (i++ >= max)
43863 @@ -480,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43864
43865 ret = -EFAULT;
43866 str = get_user_arg_ptr(argv, argc);
43867 - if (IS_ERR(str))
43868 + if (IS_ERR((const char __force_kernel *)str))
43869 goto out;
43870
43871 len = strnlen_user(str, MAX_ARG_STRLEN);
43872 @@ -562,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43873 int r;
43874 mm_segment_t oldfs = get_fs();
43875 struct user_arg_ptr argv = {
43876 - .ptr.native = (const char __user *const __user *)__argv,
43877 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43878 };
43879
43880 set_fs(KERNEL_DS);
43881 @@ -597,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43882 unsigned long new_end = old_end - shift;
43883 struct mmu_gather tlb;
43884
43885 - BUG_ON(new_start > new_end);
43886 + if (new_start >= new_end || new_start < mmap_min_addr)
43887 + return -ENOMEM;
43888
43889 /*
43890 * ensure there are no vmas between where we want to go
43891 @@ -606,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43892 if (vma != find_vma(mm, new_start))
43893 return -EFAULT;
43894
43895 +#ifdef CONFIG_PAX_SEGMEXEC
43896 + BUG_ON(pax_find_mirror_vma(vma));
43897 +#endif
43898 +
43899 /*
43900 * cover the whole range: [new_start, old_end)
43901 */
43902 @@ -686,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43903 stack_top = arch_align_stack(stack_top);
43904 stack_top = PAGE_ALIGN(stack_top);
43905
43906 - if (unlikely(stack_top < mmap_min_addr) ||
43907 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43908 - return -ENOMEM;
43909 -
43910 stack_shift = vma->vm_end - stack_top;
43911
43912 bprm->p -= stack_shift;
43913 @@ -701,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43914 bprm->exec -= stack_shift;
43915
43916 down_write(&mm->mmap_sem);
43917 +
43918 + /* Move stack pages down in memory. */
43919 + if (stack_shift) {
43920 + ret = shift_arg_pages(vma, stack_shift);
43921 + if (ret)
43922 + goto out_unlock;
43923 + }
43924 +
43925 vm_flags = VM_STACK_FLAGS;
43926
43927 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43928 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43929 + vm_flags &= ~VM_EXEC;
43930 +
43931 +#ifdef CONFIG_PAX_MPROTECT
43932 + if (mm->pax_flags & MF_PAX_MPROTECT)
43933 + vm_flags &= ~VM_MAYEXEC;
43934 +#endif
43935 +
43936 + }
43937 +#endif
43938 +
43939 /*
43940 * Adjust stack execute permissions; explicitly enable for
43941 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43942 @@ -721,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43943 goto out_unlock;
43944 BUG_ON(prev != vma);
43945
43946 - /* Move stack pages down in memory. */
43947 - if (stack_shift) {
43948 - ret = shift_arg_pages(vma, stack_shift);
43949 - if (ret)
43950 - goto out_unlock;
43951 - }
43952 -
43953 /* mprotect_fixup is overkill to remove the temporary stack flags */
43954 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43955
43956 @@ -785,6 +822,8 @@ struct file *open_exec(const char *name)
43957
43958 fsnotify_open(file);
43959
43960 + trace_open_exec(name);
43961 +
43962 err = deny_write_access(file);
43963 if (err)
43964 goto exit;
43965 @@ -808,7 +847,7 @@ int kernel_read(struct file *file, loff_t offset,
43966 old_fs = get_fs();
43967 set_fs(get_ds());
43968 /* The cast to a user pointer is valid due to the set_fs() */
43969 - result = vfs_read(file, (void __user *)addr, count, &pos);
43970 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43971 set_fs(old_fs);
43972 return result;
43973 }
43974 @@ -1254,7 +1293,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43975 }
43976 rcu_read_unlock();
43977
43978 - if (p->fs->users > n_fs) {
43979 + if (atomic_read(&p->fs->users) > n_fs) {
43980 bprm->unsafe |= LSM_UNSAFE_SHARE;
43981 } else {
43982 res = -EAGAIN;
43983 @@ -1451,6 +1490,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43984
43985 EXPORT_SYMBOL(search_binary_handler);
43986
43987 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43988 +static DEFINE_PER_CPU(u64, exec_counter);
43989 +static int __init init_exec_counters(void)
43990 +{
43991 + unsigned int cpu;
43992 +
43993 + for_each_possible_cpu(cpu) {
43994 + per_cpu(exec_counter, cpu) = (u64)cpu;
43995 + }
43996 +
43997 + return 0;
43998 +}
43999 +early_initcall(init_exec_counters);
44000 +static inline void increment_exec_counter(void)
44001 +{
44002 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
44003 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44004 +}
44005 +#else
44006 +static inline void increment_exec_counter(void) {}
44007 +#endif
44008 +
44009 /*
44010 * sys_execve() executes a new program.
44011 */
44012 @@ -1459,6 +1520,11 @@ static int do_execve_common(const char *filename,
44013 struct user_arg_ptr envp,
44014 struct pt_regs *regs)
44015 {
44016 +#ifdef CONFIG_GRKERNSEC
44017 + struct file *old_exec_file;
44018 + struct acl_subject_label *old_acl;
44019 + struct rlimit old_rlim[RLIM_NLIMITS];
44020 +#endif
44021 struct linux_binprm *bprm;
44022 struct file *file;
44023 struct files_struct *displaced;
44024 @@ -1466,6 +1532,8 @@ static int do_execve_common(const char *filename,
44025 int retval;
44026 const struct cred *cred = current_cred();
44027
44028 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44029 +
44030 /*
44031 * We move the actual failure in case of RLIMIT_NPROC excess from
44032 * set*uid() to execve() because too many poorly written programs
44033 @@ -1506,12 +1574,27 @@ static int do_execve_common(const char *filename,
44034 if (IS_ERR(file))
44035 goto out_unmark;
44036
44037 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
44038 + retval = -EPERM;
44039 + goto out_file;
44040 + }
44041 +
44042 sched_exec();
44043
44044 bprm->file = file;
44045 bprm->filename = filename;
44046 bprm->interp = filename;
44047
44048 + if (gr_process_user_ban()) {
44049 + retval = -EPERM;
44050 + goto out_file;
44051 + }
44052 +
44053 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44054 + retval = -EACCES;
44055 + goto out_file;
44056 + }
44057 +
44058 retval = bprm_mm_init(bprm);
44059 if (retval)
44060 goto out_file;
44061 @@ -1528,24 +1611,65 @@ static int do_execve_common(const char *filename,
44062 if (retval < 0)
44063 goto out;
44064
44065 +#ifdef CONFIG_GRKERNSEC
44066 + old_acl = current->acl;
44067 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44068 + old_exec_file = current->exec_file;
44069 + get_file(file);
44070 + current->exec_file = file;
44071 +#endif
44072 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44073 + /* limit suid stack to 8MB
44074 + we saved the old limits above and will restore them if this exec fails
44075 + */
44076 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44077 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44078 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44079 +#endif
44080 +
44081 + if (!gr_tpe_allow(file)) {
44082 + retval = -EACCES;
44083 + goto out_fail;
44084 + }
44085 +
44086 + if (gr_check_crash_exec(file)) {
44087 + retval = -EACCES;
44088 + goto out_fail;
44089 + }
44090 +
44091 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44092 + bprm->unsafe);
44093 + if (retval < 0)
44094 + goto out_fail;
44095 +
44096 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44097 if (retval < 0)
44098 - goto out;
44099 + goto out_fail;
44100
44101 bprm->exec = bprm->p;
44102 retval = copy_strings(bprm->envc, envp, bprm);
44103 if (retval < 0)
44104 - goto out;
44105 + goto out_fail;
44106
44107 retval = copy_strings(bprm->argc, argv, bprm);
44108 if (retval < 0)
44109 - goto out;
44110 + goto out_fail;
44111 +
44112 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44113 +
44114 + gr_handle_exec_args(bprm, argv);
44115
44116 retval = search_binary_handler(bprm,regs);
44117 if (retval < 0)
44118 - goto out;
44119 + goto out_fail;
44120 +#ifdef CONFIG_GRKERNSEC
44121 + if (old_exec_file)
44122 + fput(old_exec_file);
44123 +#endif
44124
44125 /* execve succeeded */
44126 +
44127 + increment_exec_counter();
44128 current->fs->in_exec = 0;
44129 current->in_execve = 0;
44130 acct_update_integrals(current);
44131 @@ -1554,6 +1678,14 @@ static int do_execve_common(const char *filename,
44132 put_files_struct(displaced);
44133 return retval;
44134
44135 +out_fail:
44136 +#ifdef CONFIG_GRKERNSEC
44137 + current->acl = old_acl;
44138 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44139 + fput(current->exec_file);
44140 + current->exec_file = old_exec_file;
44141 +#endif
44142 +
44143 out:
44144 if (bprm->mm) {
44145 acct_arg_size(bprm, 0);
44146 @@ -1627,7 +1759,7 @@ static int expand_corename(struct core_name *cn)
44147 {
44148 char *old_corename = cn->corename;
44149
44150 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44151 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44152 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44153
44154 if (!cn->corename) {
44155 @@ -1724,7 +1856,7 @@ static int format_corename(struct core_name *cn, long signr)
44156 int pid_in_pattern = 0;
44157 int err = 0;
44158
44159 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44160 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44161 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44162 cn->used = 0;
44163
44164 @@ -1821,6 +1953,250 @@ out:
44165 return ispipe;
44166 }
44167
44168 +int pax_check_flags(unsigned long *flags)
44169 +{
44170 + int retval = 0;
44171 +
44172 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44173 + if (*flags & MF_PAX_SEGMEXEC)
44174 + {
44175 + *flags &= ~MF_PAX_SEGMEXEC;
44176 + retval = -EINVAL;
44177 + }
44178 +#endif
44179 +
44180 + if ((*flags & MF_PAX_PAGEEXEC)
44181 +
44182 +#ifdef CONFIG_PAX_PAGEEXEC
44183 + && (*flags & MF_PAX_SEGMEXEC)
44184 +#endif
44185 +
44186 + )
44187 + {
44188 + *flags &= ~MF_PAX_PAGEEXEC;
44189 + retval = -EINVAL;
44190 + }
44191 +
44192 + if ((*flags & MF_PAX_MPROTECT)
44193 +
44194 +#ifdef CONFIG_PAX_MPROTECT
44195 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44196 +#endif
44197 +
44198 + )
44199 + {
44200 + *flags &= ~MF_PAX_MPROTECT;
44201 + retval = -EINVAL;
44202 + }
44203 +
44204 + if ((*flags & MF_PAX_EMUTRAMP)
44205 +
44206 +#ifdef CONFIG_PAX_EMUTRAMP
44207 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44208 +#endif
44209 +
44210 + )
44211 + {
44212 + *flags &= ~MF_PAX_EMUTRAMP;
44213 + retval = -EINVAL;
44214 + }
44215 +
44216 + return retval;
44217 +}
44218 +
44219 +EXPORT_SYMBOL(pax_check_flags);
44220 +
44221 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44222 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44223 +{
44224 + struct task_struct *tsk = current;
44225 + struct mm_struct *mm = current->mm;
44226 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44227 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44228 + char *path_exec = NULL;
44229 + char *path_fault = NULL;
44230 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
44231 +
44232 + if (buffer_exec && buffer_fault) {
44233 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44234 +
44235 + down_read(&mm->mmap_sem);
44236 + vma = mm->mmap;
44237 + while (vma && (!vma_exec || !vma_fault)) {
44238 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44239 + vma_exec = vma;
44240 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44241 + vma_fault = vma;
44242 + vma = vma->vm_next;
44243 + }
44244 + if (vma_exec) {
44245 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44246 + if (IS_ERR(path_exec))
44247 + path_exec = "<path too long>";
44248 + else {
44249 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44250 + if (path_exec) {
44251 + *path_exec = 0;
44252 + path_exec = buffer_exec;
44253 + } else
44254 + path_exec = "<path too long>";
44255 + }
44256 + }
44257 + if (vma_fault) {
44258 + start = vma_fault->vm_start;
44259 + end = vma_fault->vm_end;
44260 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44261 + if (vma_fault->vm_file) {
44262 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44263 + if (IS_ERR(path_fault))
44264 + path_fault = "<path too long>";
44265 + else {
44266 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44267 + if (path_fault) {
44268 + *path_fault = 0;
44269 + path_fault = buffer_fault;
44270 + } else
44271 + path_fault = "<path too long>";
44272 + }
44273 + } else
44274 + path_fault = "<anonymous mapping>";
44275 + }
44276 + up_read(&mm->mmap_sem);
44277 + }
44278 + if (tsk->signal->curr_ip)
44279 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44280 + else
44281 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44282 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44283 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44284 + task_uid(tsk), task_euid(tsk), pc, sp);
44285 + free_page((unsigned long)buffer_exec);
44286 + free_page((unsigned long)buffer_fault);
44287 + pax_report_insns(regs, pc, sp);
44288 + do_coredump(SIGKILL, SIGKILL, regs);
44289 +}
44290 +#endif
44291 +
44292 +#ifdef CONFIG_PAX_REFCOUNT
44293 +void pax_report_refcount_overflow(struct pt_regs *regs)
44294 +{
44295 + if (current->signal->curr_ip)
44296 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44297 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44298 + else
44299 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44300 + current->comm, task_pid_nr(current), current_uid(), current_euid());
44301 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44302 + show_regs(regs);
44303 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44304 +}
44305 +#endif
44306 +
44307 +#ifdef CONFIG_PAX_USERCOPY
44308 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44309 +static noinline int check_stack_object(const void *obj, unsigned long len)
44310 +{
44311 + const void * const stack = task_stack_page(current);
44312 + const void * const stackend = stack + THREAD_SIZE;
44313 +
44314 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44315 + const void *frame = NULL;
44316 + const void *oldframe;
44317 +#endif
44318 +
44319 + if (obj + len < obj)
44320 + return -1;
44321 +
44322 + if (obj + len <= stack || stackend <= obj)
44323 + return 0;
44324 +
44325 + if (obj < stack || stackend < obj + len)
44326 + return -1;
44327 +
44328 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44329 + oldframe = __builtin_frame_address(1);
44330 + if (oldframe)
44331 + frame = __builtin_frame_address(2);
44332 + /*
44333 + low ----------------------------------------------> high
44334 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
44335 + ^----------------^
44336 + allow copies only within here
44337 + */
44338 + while (stack <= frame && frame < stackend) {
44339 + /* if obj + len extends past the last frame, this
44340 + check won't pass and the next frame will be 0,
44341 + causing us to bail out and correctly report
44342 + the copy as invalid
44343 + */
44344 + if (obj + len <= frame)
44345 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44346 + oldframe = frame;
44347 + frame = *(const void * const *)frame;
44348 + }
44349 + return -1;
44350 +#else
44351 + return 1;
44352 +#endif
44353 +}
44354 +
44355 +static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44356 +{
44357 + if (current->signal->curr_ip)
44358 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44359 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44360 + else
44361 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44362 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44363 + dump_stack();
44364 + gr_handle_kernel_exploit();
44365 + do_group_exit(SIGKILL);
44366 +}
44367 +#endif
44368 +
44369 +void check_object_size(const void *ptr, unsigned long n, bool to)
44370 +{
44371 +
44372 +#ifdef CONFIG_PAX_USERCOPY
44373 + const char *type;
44374 +
44375 + if (!n)
44376 + return;
44377 +
44378 + type = check_heap_object(ptr, n, to);
44379 + if (!type) {
44380 + if (check_stack_object(ptr, n) != -1)
44381 + return;
44382 + type = "<process stack>";
44383 + }
44384 +
44385 + pax_report_usercopy(ptr, n, to, type);
44386 +#endif
44387 +
44388 +}
44389 +EXPORT_SYMBOL(check_object_size);
44390 +
44391 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44392 +void pax_track_stack(void)
44393 +{
44394 + unsigned long sp = (unsigned long)&sp;
44395 + if (sp < current_thread_info()->lowest_stack &&
44396 + sp > (unsigned long)task_stack_page(current))
44397 + current_thread_info()->lowest_stack = sp;
44398 +}
44399 +EXPORT_SYMBOL(pax_track_stack);
44400 +#endif
44401 +
44402 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
44403 +void report_size_overflow(const char *file, unsigned int line, const char *func)
44404 +{
44405 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44406 + dump_stack();
44407 + do_group_exit(SIGKILL);
44408 +}
44409 +EXPORT_SYMBOL(report_size_overflow);
44410 +#endif
44411 +
44412 static int zap_process(struct task_struct *start, int exit_code)
44413 {
44414 struct task_struct *t;
44415 @@ -2018,17 +2394,17 @@ static void wait_for_dump_helpers(struct file *file)
44416 pipe = file->f_path.dentry->d_inode->i_pipe;
44417
44418 pipe_lock(pipe);
44419 - pipe->readers++;
44420 - pipe->writers--;
44421 + atomic_inc(&pipe->readers);
44422 + atomic_dec(&pipe->writers);
44423
44424 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44425 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44426 wake_up_interruptible_sync(&pipe->wait);
44427 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44428 pipe_wait(pipe);
44429 }
44430
44431 - pipe->readers--;
44432 - pipe->writers++;
44433 + atomic_dec(&pipe->readers);
44434 + atomic_inc(&pipe->writers);
44435 pipe_unlock(pipe);
44436
44437 }
44438 @@ -2089,7 +2465,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44439 int retval = 0;
44440 int flag = 0;
44441 int ispipe;
44442 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44443 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44444 struct coredump_params cprm = {
44445 .signr = signr,
44446 .regs = regs,
44447 @@ -2104,6 +2480,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44448
44449 audit_core_dumps(signr);
44450
44451 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44452 + gr_handle_brute_attach(current, cprm.mm_flags);
44453 +
44454 binfmt = mm->binfmt;
44455 if (!binfmt || !binfmt->core_dump)
44456 goto fail;
44457 @@ -2171,7 +2550,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44458 }
44459 cprm.limit = RLIM_INFINITY;
44460
44461 - dump_count = atomic_inc_return(&core_dump_count);
44462 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44463 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44464 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44465 task_tgid_vnr(current), current->comm);
44466 @@ -2198,6 +2577,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44467 } else {
44468 struct inode *inode;
44469
44470 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44471 +
44472 if (cprm.limit < binfmt->min_coredump)
44473 goto fail_unlock;
44474
44475 @@ -2241,7 +2622,7 @@ close_fail:
44476 filp_close(cprm.file, NULL);
44477 fail_dropcount:
44478 if (ispipe)
44479 - atomic_dec(&core_dump_count);
44480 + atomic_dec_unchecked(&core_dump_count);
44481 fail_unlock:
44482 kfree(cn.corename);
44483 fail_corename:
44484 @@ -2260,7 +2641,7 @@ fail:
44485 */
44486 int dump_write(struct file *file, const void *addr, int nr)
44487 {
44488 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44489 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44490 }
44491 EXPORT_SYMBOL(dump_write);
44492
44493 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44494 index a8cbe1b..fed04cb 100644
44495 --- a/fs/ext2/balloc.c
44496 +++ b/fs/ext2/balloc.c
44497 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44498
44499 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44500 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44501 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44502 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44503 sbi->s_resuid != current_fsuid() &&
44504 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44505 return 0;
44506 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44507 index baac1b1..1499b62 100644
44508 --- a/fs/ext3/balloc.c
44509 +++ b/fs/ext3/balloc.c
44510 @@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44511
44512 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44513 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44514 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44515 + if (free_blocks < root_blocks + 1 &&
44516 !use_reservation && sbi->s_resuid != current_fsuid() &&
44517 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44518 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44519 + !capable_nolog(CAP_SYS_RESOURCE)) {
44520 return 0;
44521 }
44522 return 1;
44523 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44524 index 8da837b..ed3835b 100644
44525 --- a/fs/ext4/balloc.c
44526 +++ b/fs/ext4/balloc.c
44527 @@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44528 /* Hm, nope. Are (enough) root reserved clusters available? */
44529 if (sbi->s_resuid == current_fsuid() ||
44530 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44531 - capable(CAP_SYS_RESOURCE) ||
44532 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44533 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44534 + capable_nolog(CAP_SYS_RESOURCE)) {
44535
44536 if (free_clusters >= (nclusters + dirty_clusters))
44537 return 1;
44538 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44539 index 0e01e90..ae2bd5e 100644
44540 --- a/fs/ext4/ext4.h
44541 +++ b/fs/ext4/ext4.h
44542 @@ -1225,19 +1225,19 @@ struct ext4_sb_info {
44543 unsigned long s_mb_last_start;
44544
44545 /* stats for buddy allocator */
44546 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44547 - atomic_t s_bal_success; /* we found long enough chunks */
44548 - atomic_t s_bal_allocated; /* in blocks */
44549 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44550 - atomic_t s_bal_goals; /* goal hits */
44551 - atomic_t s_bal_breaks; /* too long searches */
44552 - atomic_t s_bal_2orders; /* 2^order hits */
44553 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44554 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44555 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44556 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44557 + atomic_unchecked_t s_bal_goals; /* goal hits */
44558 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44559 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44560 spinlock_t s_bal_lock;
44561 unsigned long s_mb_buddies_generated;
44562 unsigned long long s_mb_generation_time;
44563 - atomic_t s_mb_lost_chunks;
44564 - atomic_t s_mb_preallocated;
44565 - atomic_t s_mb_discarded;
44566 + atomic_unchecked_t s_mb_lost_chunks;
44567 + atomic_unchecked_t s_mb_preallocated;
44568 + atomic_unchecked_t s_mb_discarded;
44569 atomic_t s_lock_busy;
44570
44571 /* locality groups */
44572 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44573 index 1365903..9727522 100644
44574 --- a/fs/ext4/ioctl.c
44575 +++ b/fs/ext4/ioctl.c
44576 @@ -261,7 +261,6 @@ group_extend_out:
44577 err = ext4_move_extents(filp, donor_filp, me.orig_start,
44578 me.donor_start, me.len, &me.moved_len);
44579 mnt_drop_write_file(filp);
44580 - mnt_drop_write(filp->f_path.mnt);
44581
44582 if (copy_to_user((struct move_extent __user *)arg,
44583 &me, sizeof(me)))
44584 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44585 index 6b0a57e..1955a44 100644
44586 --- a/fs/ext4/mballoc.c
44587 +++ b/fs/ext4/mballoc.c
44588 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44589 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44590
44591 if (EXT4_SB(sb)->s_mb_stats)
44592 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44593 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44594
44595 break;
44596 }
44597 @@ -2041,7 +2041,7 @@ repeat:
44598 ac->ac_status = AC_STATUS_CONTINUE;
44599 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44600 cr = 3;
44601 - atomic_inc(&sbi->s_mb_lost_chunks);
44602 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44603 goto repeat;
44604 }
44605 }
44606 @@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
44607 if (sbi->s_mb_stats) {
44608 ext4_msg(sb, KERN_INFO,
44609 "mballoc: %u blocks %u reqs (%u success)",
44610 - atomic_read(&sbi->s_bal_allocated),
44611 - atomic_read(&sbi->s_bal_reqs),
44612 - atomic_read(&sbi->s_bal_success));
44613 + atomic_read_unchecked(&sbi->s_bal_allocated),
44614 + atomic_read_unchecked(&sbi->s_bal_reqs),
44615 + atomic_read_unchecked(&sbi->s_bal_success));
44616 ext4_msg(sb, KERN_INFO,
44617 "mballoc: %u extents scanned, %u goal hits, "
44618 "%u 2^N hits, %u breaks, %u lost",
44619 - atomic_read(&sbi->s_bal_ex_scanned),
44620 - atomic_read(&sbi->s_bal_goals),
44621 - atomic_read(&sbi->s_bal_2orders),
44622 - atomic_read(&sbi->s_bal_breaks),
44623 - atomic_read(&sbi->s_mb_lost_chunks));
44624 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44625 + atomic_read_unchecked(&sbi->s_bal_goals),
44626 + atomic_read_unchecked(&sbi->s_bal_2orders),
44627 + atomic_read_unchecked(&sbi->s_bal_breaks),
44628 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44629 ext4_msg(sb, KERN_INFO,
44630 "mballoc: %lu generated and it took %Lu",
44631 sbi->s_mb_buddies_generated,
44632 sbi->s_mb_generation_time);
44633 ext4_msg(sb, KERN_INFO,
44634 "mballoc: %u preallocated, %u discarded",
44635 - atomic_read(&sbi->s_mb_preallocated),
44636 - atomic_read(&sbi->s_mb_discarded));
44637 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44638 + atomic_read_unchecked(&sbi->s_mb_discarded));
44639 }
44640
44641 free_percpu(sbi->s_locality_groups);
44642 @@ -3045,16 +3045,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44643 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44644
44645 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44646 - atomic_inc(&sbi->s_bal_reqs);
44647 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44648 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44649 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44650 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44651 - atomic_inc(&sbi->s_bal_success);
44652 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44653 + atomic_inc_unchecked(&sbi->s_bal_success);
44654 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44655 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44656 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44657 - atomic_inc(&sbi->s_bal_goals);
44658 + atomic_inc_unchecked(&sbi->s_bal_goals);
44659 if (ac->ac_found > sbi->s_mb_max_to_scan)
44660 - atomic_inc(&sbi->s_bal_breaks);
44661 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44662 }
44663
44664 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44665 @@ -3458,7 +3458,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44666 trace_ext4_mb_new_inode_pa(ac, pa);
44667
44668 ext4_mb_use_inode_pa(ac, pa);
44669 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44670 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44671
44672 ei = EXT4_I(ac->ac_inode);
44673 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44674 @@ -3518,7 +3518,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44675 trace_ext4_mb_new_group_pa(ac, pa);
44676
44677 ext4_mb_use_group_pa(ac, pa);
44678 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44679 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44680
44681 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44682 lg = ac->ac_lg;
44683 @@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44684 * from the bitmap and continue.
44685 */
44686 }
44687 - atomic_add(free, &sbi->s_mb_discarded);
44688 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44689
44690 return err;
44691 }
44692 @@ -3625,7 +3625,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44693 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44694 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44695 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44696 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44697 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44698 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44699
44700 return 0;
44701 diff --git a/fs/fcntl.c b/fs/fcntl.c
44702 index 75e7c1f..1eb3e4d 100644
44703 --- a/fs/fcntl.c
44704 +++ b/fs/fcntl.c
44705 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44706 if (err)
44707 return err;
44708
44709 + if (gr_handle_chroot_fowner(pid, type))
44710 + return -ENOENT;
44711 + if (gr_check_protected_task_fowner(pid, type))
44712 + return -EACCES;
44713 +
44714 f_modown(filp, pid, type, force);
44715 return 0;
44716 }
44717 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44718
44719 static int f_setown_ex(struct file *filp, unsigned long arg)
44720 {
44721 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44722 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44723 struct f_owner_ex owner;
44724 struct pid *pid;
44725 int type;
44726 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44727
44728 static int f_getown_ex(struct file *filp, unsigned long arg)
44729 {
44730 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44731 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44732 struct f_owner_ex owner;
44733 int ret = 0;
44734
44735 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44736 switch (cmd) {
44737 case F_DUPFD:
44738 case F_DUPFD_CLOEXEC:
44739 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44740 if (arg >= rlimit(RLIMIT_NOFILE))
44741 break;
44742 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44743 diff --git a/fs/fifo.c b/fs/fifo.c
44744 index cf6f434..3d7942c 100644
44745 --- a/fs/fifo.c
44746 +++ b/fs/fifo.c
44747 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44748 */
44749 filp->f_op = &read_pipefifo_fops;
44750 pipe->r_counter++;
44751 - if (pipe->readers++ == 0)
44752 + if (atomic_inc_return(&pipe->readers) == 1)
44753 wake_up_partner(inode);
44754
44755 - if (!pipe->writers) {
44756 + if (!atomic_read(&pipe->writers)) {
44757 if ((filp->f_flags & O_NONBLOCK)) {
44758 /* suppress POLLHUP until we have
44759 * seen a writer */
44760 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44761 * errno=ENXIO when there is no process reading the FIFO.
44762 */
44763 ret = -ENXIO;
44764 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44765 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44766 goto err;
44767
44768 filp->f_op = &write_pipefifo_fops;
44769 pipe->w_counter++;
44770 - if (!pipe->writers++)
44771 + if (atomic_inc_return(&pipe->writers) == 1)
44772 wake_up_partner(inode);
44773
44774 - if (!pipe->readers) {
44775 + if (!atomic_read(&pipe->readers)) {
44776 if (wait_for_partner(inode, &pipe->r_counter))
44777 goto err_wr;
44778 }
44779 @@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44780 */
44781 filp->f_op = &rdwr_pipefifo_fops;
44782
44783 - pipe->readers++;
44784 - pipe->writers++;
44785 + atomic_inc(&pipe->readers);
44786 + atomic_inc(&pipe->writers);
44787 pipe->r_counter++;
44788 pipe->w_counter++;
44789 - if (pipe->readers == 1 || pipe->writers == 1)
44790 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44791 wake_up_partner(inode);
44792 break;
44793
44794 @@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44795 return 0;
44796
44797 err_rd:
44798 - if (!--pipe->readers)
44799 + if (atomic_dec_and_test(&pipe->readers))
44800 wake_up_interruptible(&pipe->wait);
44801 ret = -ERESTARTSYS;
44802 goto err;
44803
44804 err_wr:
44805 - if (!--pipe->writers)
44806 + if (atomic_dec_and_test(&pipe->writers))
44807 wake_up_interruptible(&pipe->wait);
44808 ret = -ERESTARTSYS;
44809 goto err;
44810
44811 err:
44812 - if (!pipe->readers && !pipe->writers)
44813 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44814 free_pipe_info(inode);
44815
44816 err_nocleanup:
44817 diff --git a/fs/file.c b/fs/file.c
44818 index ba3f605..fade102 100644
44819 --- a/fs/file.c
44820 +++ b/fs/file.c
44821 @@ -15,6 +15,7 @@
44822 #include <linux/slab.h>
44823 #include <linux/vmalloc.h>
44824 #include <linux/file.h>
44825 +#include <linux/security.h>
44826 #include <linux/fdtable.h>
44827 #include <linux/bitops.h>
44828 #include <linux/interrupt.h>
44829 @@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
44830 * N.B. For clone tasks sharing a files structure, this test
44831 * will limit the total number of files that can be opened.
44832 */
44833 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44834 if (nr >= rlimit(RLIMIT_NOFILE))
44835 return -EMFILE;
44836
44837 diff --git a/fs/filesystems.c b/fs/filesystems.c
44838 index 96f2428..f5eeb8e 100644
44839 --- a/fs/filesystems.c
44840 +++ b/fs/filesystems.c
44841 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44842 int len = dot ? dot - name : strlen(name);
44843
44844 fs = __get_fs_type(name, len);
44845 +
44846 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44847 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44848 +#else
44849 if (!fs && (request_module("%.*s", len, name) == 0))
44850 +#endif
44851 fs = __get_fs_type(name, len);
44852
44853 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44854 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44855 index e159e68..e7d2a6f 100644
44856 --- a/fs/fs_struct.c
44857 +++ b/fs/fs_struct.c
44858 @@ -4,6 +4,7 @@
44859 #include <linux/path.h>
44860 #include <linux/slab.h>
44861 #include <linux/fs_struct.h>
44862 +#include <linux/grsecurity.h>
44863 #include "internal.h"
44864
44865 static inline void path_get_longterm(struct path *path)
44866 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44867 write_seqcount_begin(&fs->seq);
44868 old_root = fs->root;
44869 fs->root = *path;
44870 + gr_set_chroot_entries(current, path);
44871 write_seqcount_end(&fs->seq);
44872 spin_unlock(&fs->lock);
44873 if (old_root.dentry)
44874 @@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44875 return 1;
44876 }
44877
44878 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44879 +{
44880 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44881 + return 0;
44882 + *p = *new;
44883 +
44884 + gr_set_chroot_entries(task, new);
44885 +
44886 + return 1;
44887 +}
44888 +
44889 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44890 {
44891 struct task_struct *g, *p;
44892 @@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44893 int hits = 0;
44894 spin_lock(&fs->lock);
44895 write_seqcount_begin(&fs->seq);
44896 - hits += replace_path(&fs->root, old_root, new_root);
44897 + hits += replace_root_path(p, &fs->root, old_root, new_root);
44898 hits += replace_path(&fs->pwd, old_root, new_root);
44899 write_seqcount_end(&fs->seq);
44900 while (hits--) {
44901 @@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44902 task_lock(tsk);
44903 spin_lock(&fs->lock);
44904 tsk->fs = NULL;
44905 - kill = !--fs->users;
44906 + gr_clear_chroot_entries(tsk);
44907 + kill = !atomic_dec_return(&fs->users);
44908 spin_unlock(&fs->lock);
44909 task_unlock(tsk);
44910 if (kill)
44911 @@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44912 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44913 /* We don't need to lock fs - think why ;-) */
44914 if (fs) {
44915 - fs->users = 1;
44916 + atomic_set(&fs->users, 1);
44917 fs->in_exec = 0;
44918 spin_lock_init(&fs->lock);
44919 seqcount_init(&fs->seq);
44920 @@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44921 spin_lock(&old->lock);
44922 fs->root = old->root;
44923 path_get_longterm(&fs->root);
44924 + /* instead of calling gr_set_chroot_entries here,
44925 + we call it from every caller of this function
44926 + */
44927 fs->pwd = old->pwd;
44928 path_get_longterm(&fs->pwd);
44929 spin_unlock(&old->lock);
44930 @@ -151,8 +168,9 @@ int unshare_fs_struct(void)
44931
44932 task_lock(current);
44933 spin_lock(&fs->lock);
44934 - kill = !--fs->users;
44935 + kill = !atomic_dec_return(&fs->users);
44936 current->fs = new_fs;
44937 + gr_set_chroot_entries(current, &new_fs->root);
44938 spin_unlock(&fs->lock);
44939 task_unlock(current);
44940
44941 @@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44942
44943 int current_umask(void)
44944 {
44945 - return current->fs->umask;
44946 + return current->fs->umask | gr_acl_umask();
44947 }
44948 EXPORT_SYMBOL(current_umask);
44949
44950 /* to be mentioned only in INIT_TASK */
44951 struct fs_struct init_fs = {
44952 - .users = 1,
44953 + .users = ATOMIC_INIT(1),
44954 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44955 .seq = SEQCNT_ZERO,
44956 .umask = 0022,
44957 @@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
44958 task_lock(current);
44959
44960 spin_lock(&init_fs.lock);
44961 - init_fs.users++;
44962 + atomic_inc(&init_fs.users);
44963 spin_unlock(&init_fs.lock);
44964
44965 spin_lock(&fs->lock);
44966 current->fs = &init_fs;
44967 - kill = !--fs->users;
44968 + gr_set_chroot_entries(current, &current->fs->root);
44969 + kill = !atomic_dec_return(&fs->users);
44970 spin_unlock(&fs->lock);
44971
44972 task_unlock(current);
44973 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44974 index 9905350..02eaec4 100644
44975 --- a/fs/fscache/cookie.c
44976 +++ b/fs/fscache/cookie.c
44977 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44978 parent ? (char *) parent->def->name : "<no-parent>",
44979 def->name, netfs_data);
44980
44981 - fscache_stat(&fscache_n_acquires);
44982 + fscache_stat_unchecked(&fscache_n_acquires);
44983
44984 /* if there's no parent cookie, then we don't create one here either */
44985 if (!parent) {
44986 - fscache_stat(&fscache_n_acquires_null);
44987 + fscache_stat_unchecked(&fscache_n_acquires_null);
44988 _leave(" [no parent]");
44989 return NULL;
44990 }
44991 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44992 /* allocate and initialise a cookie */
44993 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44994 if (!cookie) {
44995 - fscache_stat(&fscache_n_acquires_oom);
44996 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44997 _leave(" [ENOMEM]");
44998 return NULL;
44999 }
45000 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45001
45002 switch (cookie->def->type) {
45003 case FSCACHE_COOKIE_TYPE_INDEX:
45004 - fscache_stat(&fscache_n_cookie_index);
45005 + fscache_stat_unchecked(&fscache_n_cookie_index);
45006 break;
45007 case FSCACHE_COOKIE_TYPE_DATAFILE:
45008 - fscache_stat(&fscache_n_cookie_data);
45009 + fscache_stat_unchecked(&fscache_n_cookie_data);
45010 break;
45011 default:
45012 - fscache_stat(&fscache_n_cookie_special);
45013 + fscache_stat_unchecked(&fscache_n_cookie_special);
45014 break;
45015 }
45016
45017 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45018 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45019 atomic_dec(&parent->n_children);
45020 __fscache_cookie_put(cookie);
45021 - fscache_stat(&fscache_n_acquires_nobufs);
45022 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45023 _leave(" = NULL");
45024 return NULL;
45025 }
45026 }
45027
45028 - fscache_stat(&fscache_n_acquires_ok);
45029 + fscache_stat_unchecked(&fscache_n_acquires_ok);
45030 _leave(" = %p", cookie);
45031 return cookie;
45032 }
45033 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45034 cache = fscache_select_cache_for_object(cookie->parent);
45035 if (!cache) {
45036 up_read(&fscache_addremove_sem);
45037 - fscache_stat(&fscache_n_acquires_no_cache);
45038 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45039 _leave(" = -ENOMEDIUM [no cache]");
45040 return -ENOMEDIUM;
45041 }
45042 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45043 object = cache->ops->alloc_object(cache, cookie);
45044 fscache_stat_d(&fscache_n_cop_alloc_object);
45045 if (IS_ERR(object)) {
45046 - fscache_stat(&fscache_n_object_no_alloc);
45047 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
45048 ret = PTR_ERR(object);
45049 goto error;
45050 }
45051
45052 - fscache_stat(&fscache_n_object_alloc);
45053 + fscache_stat_unchecked(&fscache_n_object_alloc);
45054
45055 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45056
45057 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45058 struct fscache_object *object;
45059 struct hlist_node *_p;
45060
45061 - fscache_stat(&fscache_n_updates);
45062 + fscache_stat_unchecked(&fscache_n_updates);
45063
45064 if (!cookie) {
45065 - fscache_stat(&fscache_n_updates_null);
45066 + fscache_stat_unchecked(&fscache_n_updates_null);
45067 _leave(" [no cookie]");
45068 return;
45069 }
45070 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45071 struct fscache_object *object;
45072 unsigned long event;
45073
45074 - fscache_stat(&fscache_n_relinquishes);
45075 + fscache_stat_unchecked(&fscache_n_relinquishes);
45076 if (retire)
45077 - fscache_stat(&fscache_n_relinquishes_retire);
45078 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45079
45080 if (!cookie) {
45081 - fscache_stat(&fscache_n_relinquishes_null);
45082 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
45083 _leave(" [no cookie]");
45084 return;
45085 }
45086 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45087
45088 /* wait for the cookie to finish being instantiated (or to fail) */
45089 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45090 - fscache_stat(&fscache_n_relinquishes_waitcrt);
45091 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45092 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45093 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45094 }
45095 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45096 index f6aad48..88dcf26 100644
45097 --- a/fs/fscache/internal.h
45098 +++ b/fs/fscache/internal.h
45099 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45100 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45101 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45102
45103 -extern atomic_t fscache_n_op_pend;
45104 -extern atomic_t fscache_n_op_run;
45105 -extern atomic_t fscache_n_op_enqueue;
45106 -extern atomic_t fscache_n_op_deferred_release;
45107 -extern atomic_t fscache_n_op_release;
45108 -extern atomic_t fscache_n_op_gc;
45109 -extern atomic_t fscache_n_op_cancelled;
45110 -extern atomic_t fscache_n_op_rejected;
45111 +extern atomic_unchecked_t fscache_n_op_pend;
45112 +extern atomic_unchecked_t fscache_n_op_run;
45113 +extern atomic_unchecked_t fscache_n_op_enqueue;
45114 +extern atomic_unchecked_t fscache_n_op_deferred_release;
45115 +extern atomic_unchecked_t fscache_n_op_release;
45116 +extern atomic_unchecked_t fscache_n_op_gc;
45117 +extern atomic_unchecked_t fscache_n_op_cancelled;
45118 +extern atomic_unchecked_t fscache_n_op_rejected;
45119
45120 -extern atomic_t fscache_n_attr_changed;
45121 -extern atomic_t fscache_n_attr_changed_ok;
45122 -extern atomic_t fscache_n_attr_changed_nobufs;
45123 -extern atomic_t fscache_n_attr_changed_nomem;
45124 -extern atomic_t fscache_n_attr_changed_calls;
45125 +extern atomic_unchecked_t fscache_n_attr_changed;
45126 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
45127 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45128 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45129 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
45130
45131 -extern atomic_t fscache_n_allocs;
45132 -extern atomic_t fscache_n_allocs_ok;
45133 -extern atomic_t fscache_n_allocs_wait;
45134 -extern atomic_t fscache_n_allocs_nobufs;
45135 -extern atomic_t fscache_n_allocs_intr;
45136 -extern atomic_t fscache_n_allocs_object_dead;
45137 -extern atomic_t fscache_n_alloc_ops;
45138 -extern atomic_t fscache_n_alloc_op_waits;
45139 +extern atomic_unchecked_t fscache_n_allocs;
45140 +extern atomic_unchecked_t fscache_n_allocs_ok;
45141 +extern atomic_unchecked_t fscache_n_allocs_wait;
45142 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
45143 +extern atomic_unchecked_t fscache_n_allocs_intr;
45144 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
45145 +extern atomic_unchecked_t fscache_n_alloc_ops;
45146 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
45147
45148 -extern atomic_t fscache_n_retrievals;
45149 -extern atomic_t fscache_n_retrievals_ok;
45150 -extern atomic_t fscache_n_retrievals_wait;
45151 -extern atomic_t fscache_n_retrievals_nodata;
45152 -extern atomic_t fscache_n_retrievals_nobufs;
45153 -extern atomic_t fscache_n_retrievals_intr;
45154 -extern atomic_t fscache_n_retrievals_nomem;
45155 -extern atomic_t fscache_n_retrievals_object_dead;
45156 -extern atomic_t fscache_n_retrieval_ops;
45157 -extern atomic_t fscache_n_retrieval_op_waits;
45158 +extern atomic_unchecked_t fscache_n_retrievals;
45159 +extern atomic_unchecked_t fscache_n_retrievals_ok;
45160 +extern atomic_unchecked_t fscache_n_retrievals_wait;
45161 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
45162 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45163 +extern atomic_unchecked_t fscache_n_retrievals_intr;
45164 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
45165 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45166 +extern atomic_unchecked_t fscache_n_retrieval_ops;
45167 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45168
45169 -extern atomic_t fscache_n_stores;
45170 -extern atomic_t fscache_n_stores_ok;
45171 -extern atomic_t fscache_n_stores_again;
45172 -extern atomic_t fscache_n_stores_nobufs;
45173 -extern atomic_t fscache_n_stores_oom;
45174 -extern atomic_t fscache_n_store_ops;
45175 -extern atomic_t fscache_n_store_calls;
45176 -extern atomic_t fscache_n_store_pages;
45177 -extern atomic_t fscache_n_store_radix_deletes;
45178 -extern atomic_t fscache_n_store_pages_over_limit;
45179 +extern atomic_unchecked_t fscache_n_stores;
45180 +extern atomic_unchecked_t fscache_n_stores_ok;
45181 +extern atomic_unchecked_t fscache_n_stores_again;
45182 +extern atomic_unchecked_t fscache_n_stores_nobufs;
45183 +extern atomic_unchecked_t fscache_n_stores_oom;
45184 +extern atomic_unchecked_t fscache_n_store_ops;
45185 +extern atomic_unchecked_t fscache_n_store_calls;
45186 +extern atomic_unchecked_t fscache_n_store_pages;
45187 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
45188 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45189
45190 -extern atomic_t fscache_n_store_vmscan_not_storing;
45191 -extern atomic_t fscache_n_store_vmscan_gone;
45192 -extern atomic_t fscache_n_store_vmscan_busy;
45193 -extern atomic_t fscache_n_store_vmscan_cancelled;
45194 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45195 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45196 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45197 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45198
45199 -extern atomic_t fscache_n_marks;
45200 -extern atomic_t fscache_n_uncaches;
45201 +extern atomic_unchecked_t fscache_n_marks;
45202 +extern atomic_unchecked_t fscache_n_uncaches;
45203
45204 -extern atomic_t fscache_n_acquires;
45205 -extern atomic_t fscache_n_acquires_null;
45206 -extern atomic_t fscache_n_acquires_no_cache;
45207 -extern atomic_t fscache_n_acquires_ok;
45208 -extern atomic_t fscache_n_acquires_nobufs;
45209 -extern atomic_t fscache_n_acquires_oom;
45210 +extern atomic_unchecked_t fscache_n_acquires;
45211 +extern atomic_unchecked_t fscache_n_acquires_null;
45212 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
45213 +extern atomic_unchecked_t fscache_n_acquires_ok;
45214 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
45215 +extern atomic_unchecked_t fscache_n_acquires_oom;
45216
45217 -extern atomic_t fscache_n_updates;
45218 -extern atomic_t fscache_n_updates_null;
45219 -extern atomic_t fscache_n_updates_run;
45220 +extern atomic_unchecked_t fscache_n_updates;
45221 +extern atomic_unchecked_t fscache_n_updates_null;
45222 +extern atomic_unchecked_t fscache_n_updates_run;
45223
45224 -extern atomic_t fscache_n_relinquishes;
45225 -extern atomic_t fscache_n_relinquishes_null;
45226 -extern atomic_t fscache_n_relinquishes_waitcrt;
45227 -extern atomic_t fscache_n_relinquishes_retire;
45228 +extern atomic_unchecked_t fscache_n_relinquishes;
45229 +extern atomic_unchecked_t fscache_n_relinquishes_null;
45230 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45231 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
45232
45233 -extern atomic_t fscache_n_cookie_index;
45234 -extern atomic_t fscache_n_cookie_data;
45235 -extern atomic_t fscache_n_cookie_special;
45236 +extern atomic_unchecked_t fscache_n_cookie_index;
45237 +extern atomic_unchecked_t fscache_n_cookie_data;
45238 +extern atomic_unchecked_t fscache_n_cookie_special;
45239
45240 -extern atomic_t fscache_n_object_alloc;
45241 -extern atomic_t fscache_n_object_no_alloc;
45242 -extern atomic_t fscache_n_object_lookups;
45243 -extern atomic_t fscache_n_object_lookups_negative;
45244 -extern atomic_t fscache_n_object_lookups_positive;
45245 -extern atomic_t fscache_n_object_lookups_timed_out;
45246 -extern atomic_t fscache_n_object_created;
45247 -extern atomic_t fscache_n_object_avail;
45248 -extern atomic_t fscache_n_object_dead;
45249 +extern atomic_unchecked_t fscache_n_object_alloc;
45250 +extern atomic_unchecked_t fscache_n_object_no_alloc;
45251 +extern atomic_unchecked_t fscache_n_object_lookups;
45252 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
45253 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
45254 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45255 +extern atomic_unchecked_t fscache_n_object_created;
45256 +extern atomic_unchecked_t fscache_n_object_avail;
45257 +extern atomic_unchecked_t fscache_n_object_dead;
45258
45259 -extern atomic_t fscache_n_checkaux_none;
45260 -extern atomic_t fscache_n_checkaux_okay;
45261 -extern atomic_t fscache_n_checkaux_update;
45262 -extern atomic_t fscache_n_checkaux_obsolete;
45263 +extern atomic_unchecked_t fscache_n_checkaux_none;
45264 +extern atomic_unchecked_t fscache_n_checkaux_okay;
45265 +extern atomic_unchecked_t fscache_n_checkaux_update;
45266 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45267
45268 extern atomic_t fscache_n_cop_alloc_object;
45269 extern atomic_t fscache_n_cop_lookup_object;
45270 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45271 atomic_inc(stat);
45272 }
45273
45274 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45275 +{
45276 + atomic_inc_unchecked(stat);
45277 +}
45278 +
45279 static inline void fscache_stat_d(atomic_t *stat)
45280 {
45281 atomic_dec(stat);
45282 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45283
45284 #define __fscache_stat(stat) (NULL)
45285 #define fscache_stat(stat) do {} while (0)
45286 +#define fscache_stat_unchecked(stat) do {} while (0)
45287 #define fscache_stat_d(stat) do {} while (0)
45288 #endif
45289
45290 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45291 index b6b897c..0ffff9c 100644
45292 --- a/fs/fscache/object.c
45293 +++ b/fs/fscache/object.c
45294 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45295 /* update the object metadata on disk */
45296 case FSCACHE_OBJECT_UPDATING:
45297 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45298 - fscache_stat(&fscache_n_updates_run);
45299 + fscache_stat_unchecked(&fscache_n_updates_run);
45300 fscache_stat(&fscache_n_cop_update_object);
45301 object->cache->ops->update_object(object);
45302 fscache_stat_d(&fscache_n_cop_update_object);
45303 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45304 spin_lock(&object->lock);
45305 object->state = FSCACHE_OBJECT_DEAD;
45306 spin_unlock(&object->lock);
45307 - fscache_stat(&fscache_n_object_dead);
45308 + fscache_stat_unchecked(&fscache_n_object_dead);
45309 goto terminal_transit;
45310
45311 /* handle the parent cache of this object being withdrawn from
45312 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45313 spin_lock(&object->lock);
45314 object->state = FSCACHE_OBJECT_DEAD;
45315 spin_unlock(&object->lock);
45316 - fscache_stat(&fscache_n_object_dead);
45317 + fscache_stat_unchecked(&fscache_n_object_dead);
45318 goto terminal_transit;
45319
45320 /* complain about the object being woken up once it is
45321 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45322 parent->cookie->def->name, cookie->def->name,
45323 object->cache->tag->name);
45324
45325 - fscache_stat(&fscache_n_object_lookups);
45326 + fscache_stat_unchecked(&fscache_n_object_lookups);
45327 fscache_stat(&fscache_n_cop_lookup_object);
45328 ret = object->cache->ops->lookup_object(object);
45329 fscache_stat_d(&fscache_n_cop_lookup_object);
45330 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45331 if (ret == -ETIMEDOUT) {
45332 /* probably stuck behind another object, so move this one to
45333 * the back of the queue */
45334 - fscache_stat(&fscache_n_object_lookups_timed_out);
45335 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45336 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45337 }
45338
45339 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45340
45341 spin_lock(&object->lock);
45342 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45343 - fscache_stat(&fscache_n_object_lookups_negative);
45344 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45345
45346 /* transit here to allow write requests to begin stacking up
45347 * and read requests to begin returning ENODATA */
45348 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45349 * result, in which case there may be data available */
45350 spin_lock(&object->lock);
45351 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45352 - fscache_stat(&fscache_n_object_lookups_positive);
45353 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45354
45355 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45356
45357 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45358 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45359 } else {
45360 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45361 - fscache_stat(&fscache_n_object_created);
45362 + fscache_stat_unchecked(&fscache_n_object_created);
45363
45364 object->state = FSCACHE_OBJECT_AVAILABLE;
45365 spin_unlock(&object->lock);
45366 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45367 fscache_enqueue_dependents(object);
45368
45369 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45370 - fscache_stat(&fscache_n_object_avail);
45371 + fscache_stat_unchecked(&fscache_n_object_avail);
45372
45373 _leave("");
45374 }
45375 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45376 enum fscache_checkaux result;
45377
45378 if (!object->cookie->def->check_aux) {
45379 - fscache_stat(&fscache_n_checkaux_none);
45380 + fscache_stat_unchecked(&fscache_n_checkaux_none);
45381 return FSCACHE_CHECKAUX_OKAY;
45382 }
45383
45384 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45385 switch (result) {
45386 /* entry okay as is */
45387 case FSCACHE_CHECKAUX_OKAY:
45388 - fscache_stat(&fscache_n_checkaux_okay);
45389 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
45390 break;
45391
45392 /* entry requires update */
45393 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45394 - fscache_stat(&fscache_n_checkaux_update);
45395 + fscache_stat_unchecked(&fscache_n_checkaux_update);
45396 break;
45397
45398 /* entry requires deletion */
45399 case FSCACHE_CHECKAUX_OBSOLETE:
45400 - fscache_stat(&fscache_n_checkaux_obsolete);
45401 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45402 break;
45403
45404 default:
45405 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45406 index 30afdfa..2256596 100644
45407 --- a/fs/fscache/operation.c
45408 +++ b/fs/fscache/operation.c
45409 @@ -17,7 +17,7 @@
45410 #include <linux/slab.h>
45411 #include "internal.h"
45412
45413 -atomic_t fscache_op_debug_id;
45414 +atomic_unchecked_t fscache_op_debug_id;
45415 EXPORT_SYMBOL(fscache_op_debug_id);
45416
45417 /**
45418 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45419 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45420 ASSERTCMP(atomic_read(&op->usage), >, 0);
45421
45422 - fscache_stat(&fscache_n_op_enqueue);
45423 + fscache_stat_unchecked(&fscache_n_op_enqueue);
45424 switch (op->flags & FSCACHE_OP_TYPE) {
45425 case FSCACHE_OP_ASYNC:
45426 _debug("queue async");
45427 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45428 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45429 if (op->processor)
45430 fscache_enqueue_operation(op);
45431 - fscache_stat(&fscache_n_op_run);
45432 + fscache_stat_unchecked(&fscache_n_op_run);
45433 }
45434
45435 /*
45436 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45437 if (object->n_ops > 1) {
45438 atomic_inc(&op->usage);
45439 list_add_tail(&op->pend_link, &object->pending_ops);
45440 - fscache_stat(&fscache_n_op_pend);
45441 + fscache_stat_unchecked(&fscache_n_op_pend);
45442 } else if (!list_empty(&object->pending_ops)) {
45443 atomic_inc(&op->usage);
45444 list_add_tail(&op->pend_link, &object->pending_ops);
45445 - fscache_stat(&fscache_n_op_pend);
45446 + fscache_stat_unchecked(&fscache_n_op_pend);
45447 fscache_start_operations(object);
45448 } else {
45449 ASSERTCMP(object->n_in_progress, ==, 0);
45450 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45451 object->n_exclusive++; /* reads and writes must wait */
45452 atomic_inc(&op->usage);
45453 list_add_tail(&op->pend_link, &object->pending_ops);
45454 - fscache_stat(&fscache_n_op_pend);
45455 + fscache_stat_unchecked(&fscache_n_op_pend);
45456 ret = 0;
45457 } else {
45458 /* not allowed to submit ops in any other state */
45459 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45460 if (object->n_exclusive > 0) {
45461 atomic_inc(&op->usage);
45462 list_add_tail(&op->pend_link, &object->pending_ops);
45463 - fscache_stat(&fscache_n_op_pend);
45464 + fscache_stat_unchecked(&fscache_n_op_pend);
45465 } else if (!list_empty(&object->pending_ops)) {
45466 atomic_inc(&op->usage);
45467 list_add_tail(&op->pend_link, &object->pending_ops);
45468 - fscache_stat(&fscache_n_op_pend);
45469 + fscache_stat_unchecked(&fscache_n_op_pend);
45470 fscache_start_operations(object);
45471 } else {
45472 ASSERTCMP(object->n_exclusive, ==, 0);
45473 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45474 object->n_ops++;
45475 atomic_inc(&op->usage);
45476 list_add_tail(&op->pend_link, &object->pending_ops);
45477 - fscache_stat(&fscache_n_op_pend);
45478 + fscache_stat_unchecked(&fscache_n_op_pend);
45479 ret = 0;
45480 } else if (object->state == FSCACHE_OBJECT_DYING ||
45481 object->state == FSCACHE_OBJECT_LC_DYING ||
45482 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45483 - fscache_stat(&fscache_n_op_rejected);
45484 + fscache_stat_unchecked(&fscache_n_op_rejected);
45485 ret = -ENOBUFS;
45486 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45487 fscache_report_unexpected_submission(object, op, ostate);
45488 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45489
45490 ret = -EBUSY;
45491 if (!list_empty(&op->pend_link)) {
45492 - fscache_stat(&fscache_n_op_cancelled);
45493 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45494 list_del_init(&op->pend_link);
45495 object->n_ops--;
45496 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45497 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45498 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45499 BUG();
45500
45501 - fscache_stat(&fscache_n_op_release);
45502 + fscache_stat_unchecked(&fscache_n_op_release);
45503
45504 if (op->release) {
45505 op->release(op);
45506 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45507 * lock, and defer it otherwise */
45508 if (!spin_trylock(&object->lock)) {
45509 _debug("defer put");
45510 - fscache_stat(&fscache_n_op_deferred_release);
45511 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45512
45513 cache = object->cache;
45514 spin_lock(&cache->op_gc_list_lock);
45515 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45516
45517 _debug("GC DEFERRED REL OBJ%x OP%x",
45518 object->debug_id, op->debug_id);
45519 - fscache_stat(&fscache_n_op_gc);
45520 + fscache_stat_unchecked(&fscache_n_op_gc);
45521
45522 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45523
45524 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45525 index 3f7a59b..cf196cc 100644
45526 --- a/fs/fscache/page.c
45527 +++ b/fs/fscache/page.c
45528 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45529 val = radix_tree_lookup(&cookie->stores, page->index);
45530 if (!val) {
45531 rcu_read_unlock();
45532 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45533 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45534 __fscache_uncache_page(cookie, page);
45535 return true;
45536 }
45537 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45538 spin_unlock(&cookie->stores_lock);
45539
45540 if (xpage) {
45541 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45542 - fscache_stat(&fscache_n_store_radix_deletes);
45543 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45544 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45545 ASSERTCMP(xpage, ==, page);
45546 } else {
45547 - fscache_stat(&fscache_n_store_vmscan_gone);
45548 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45549 }
45550
45551 wake_up_bit(&cookie->flags, 0);
45552 @@ -107,7 +107,7 @@ page_busy:
45553 /* we might want to wait here, but that could deadlock the allocator as
45554 * the work threads writing to the cache may all end up sleeping
45555 * on memory allocation */
45556 - fscache_stat(&fscache_n_store_vmscan_busy);
45557 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45558 return false;
45559 }
45560 EXPORT_SYMBOL(__fscache_maybe_release_page);
45561 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45562 FSCACHE_COOKIE_STORING_TAG);
45563 if (!radix_tree_tag_get(&cookie->stores, page->index,
45564 FSCACHE_COOKIE_PENDING_TAG)) {
45565 - fscache_stat(&fscache_n_store_radix_deletes);
45566 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45567 xpage = radix_tree_delete(&cookie->stores, page->index);
45568 }
45569 spin_unlock(&cookie->stores_lock);
45570 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45571
45572 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45573
45574 - fscache_stat(&fscache_n_attr_changed_calls);
45575 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45576
45577 if (fscache_object_is_active(object)) {
45578 fscache_stat(&fscache_n_cop_attr_changed);
45579 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45580
45581 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45582
45583 - fscache_stat(&fscache_n_attr_changed);
45584 + fscache_stat_unchecked(&fscache_n_attr_changed);
45585
45586 op = kzalloc(sizeof(*op), GFP_KERNEL);
45587 if (!op) {
45588 - fscache_stat(&fscache_n_attr_changed_nomem);
45589 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45590 _leave(" = -ENOMEM");
45591 return -ENOMEM;
45592 }
45593 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45594 if (fscache_submit_exclusive_op(object, op) < 0)
45595 goto nobufs;
45596 spin_unlock(&cookie->lock);
45597 - fscache_stat(&fscache_n_attr_changed_ok);
45598 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45599 fscache_put_operation(op);
45600 _leave(" = 0");
45601 return 0;
45602 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45603 nobufs:
45604 spin_unlock(&cookie->lock);
45605 kfree(op);
45606 - fscache_stat(&fscache_n_attr_changed_nobufs);
45607 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45608 _leave(" = %d", -ENOBUFS);
45609 return -ENOBUFS;
45610 }
45611 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45612 /* allocate a retrieval operation and attempt to submit it */
45613 op = kzalloc(sizeof(*op), GFP_NOIO);
45614 if (!op) {
45615 - fscache_stat(&fscache_n_retrievals_nomem);
45616 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45617 return NULL;
45618 }
45619
45620 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45621 return 0;
45622 }
45623
45624 - fscache_stat(&fscache_n_retrievals_wait);
45625 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45626
45627 jif = jiffies;
45628 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45629 fscache_wait_bit_interruptible,
45630 TASK_INTERRUPTIBLE) != 0) {
45631 - fscache_stat(&fscache_n_retrievals_intr);
45632 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45633 _leave(" = -ERESTARTSYS");
45634 return -ERESTARTSYS;
45635 }
45636 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45637 */
45638 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45639 struct fscache_retrieval *op,
45640 - atomic_t *stat_op_waits,
45641 - atomic_t *stat_object_dead)
45642 + atomic_unchecked_t *stat_op_waits,
45643 + atomic_unchecked_t *stat_object_dead)
45644 {
45645 int ret;
45646
45647 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45648 goto check_if_dead;
45649
45650 _debug(">>> WT");
45651 - fscache_stat(stat_op_waits);
45652 + fscache_stat_unchecked(stat_op_waits);
45653 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45654 fscache_wait_bit_interruptible,
45655 TASK_INTERRUPTIBLE) < 0) {
45656 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45657
45658 check_if_dead:
45659 if (unlikely(fscache_object_is_dead(object))) {
45660 - fscache_stat(stat_object_dead);
45661 + fscache_stat_unchecked(stat_object_dead);
45662 return -ENOBUFS;
45663 }
45664 return 0;
45665 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45666
45667 _enter("%p,%p,,,", cookie, page);
45668
45669 - fscache_stat(&fscache_n_retrievals);
45670 + fscache_stat_unchecked(&fscache_n_retrievals);
45671
45672 if (hlist_empty(&cookie->backing_objects))
45673 goto nobufs;
45674 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45675 goto nobufs_unlock;
45676 spin_unlock(&cookie->lock);
45677
45678 - fscache_stat(&fscache_n_retrieval_ops);
45679 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45680
45681 /* pin the netfs read context in case we need to do the actual netfs
45682 * read because we've encountered a cache read failure */
45683 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45684
45685 error:
45686 if (ret == -ENOMEM)
45687 - fscache_stat(&fscache_n_retrievals_nomem);
45688 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45689 else if (ret == -ERESTARTSYS)
45690 - fscache_stat(&fscache_n_retrievals_intr);
45691 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45692 else if (ret == -ENODATA)
45693 - fscache_stat(&fscache_n_retrievals_nodata);
45694 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45695 else if (ret < 0)
45696 - fscache_stat(&fscache_n_retrievals_nobufs);
45697 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45698 else
45699 - fscache_stat(&fscache_n_retrievals_ok);
45700 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45701
45702 fscache_put_retrieval(op);
45703 _leave(" = %d", ret);
45704 @@ -429,7 +429,7 @@ nobufs_unlock:
45705 spin_unlock(&cookie->lock);
45706 kfree(op);
45707 nobufs:
45708 - fscache_stat(&fscache_n_retrievals_nobufs);
45709 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45710 _leave(" = -ENOBUFS");
45711 return -ENOBUFS;
45712 }
45713 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45714
45715 _enter("%p,,%d,,,", cookie, *nr_pages);
45716
45717 - fscache_stat(&fscache_n_retrievals);
45718 + fscache_stat_unchecked(&fscache_n_retrievals);
45719
45720 if (hlist_empty(&cookie->backing_objects))
45721 goto nobufs;
45722 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45723 goto nobufs_unlock;
45724 spin_unlock(&cookie->lock);
45725
45726 - fscache_stat(&fscache_n_retrieval_ops);
45727 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45728
45729 /* pin the netfs read context in case we need to do the actual netfs
45730 * read because we've encountered a cache read failure */
45731 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45732
45733 error:
45734 if (ret == -ENOMEM)
45735 - fscache_stat(&fscache_n_retrievals_nomem);
45736 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45737 else if (ret == -ERESTARTSYS)
45738 - fscache_stat(&fscache_n_retrievals_intr);
45739 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45740 else if (ret == -ENODATA)
45741 - fscache_stat(&fscache_n_retrievals_nodata);
45742 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45743 else if (ret < 0)
45744 - fscache_stat(&fscache_n_retrievals_nobufs);
45745 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45746 else
45747 - fscache_stat(&fscache_n_retrievals_ok);
45748 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45749
45750 fscache_put_retrieval(op);
45751 _leave(" = %d", ret);
45752 @@ -545,7 +545,7 @@ nobufs_unlock:
45753 spin_unlock(&cookie->lock);
45754 kfree(op);
45755 nobufs:
45756 - fscache_stat(&fscache_n_retrievals_nobufs);
45757 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45758 _leave(" = -ENOBUFS");
45759 return -ENOBUFS;
45760 }
45761 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45762
45763 _enter("%p,%p,,,", cookie, page);
45764
45765 - fscache_stat(&fscache_n_allocs);
45766 + fscache_stat_unchecked(&fscache_n_allocs);
45767
45768 if (hlist_empty(&cookie->backing_objects))
45769 goto nobufs;
45770 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45771 goto nobufs_unlock;
45772 spin_unlock(&cookie->lock);
45773
45774 - fscache_stat(&fscache_n_alloc_ops);
45775 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45776
45777 ret = fscache_wait_for_retrieval_activation(
45778 object, op,
45779 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45780
45781 error:
45782 if (ret == -ERESTARTSYS)
45783 - fscache_stat(&fscache_n_allocs_intr);
45784 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45785 else if (ret < 0)
45786 - fscache_stat(&fscache_n_allocs_nobufs);
45787 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45788 else
45789 - fscache_stat(&fscache_n_allocs_ok);
45790 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45791
45792 fscache_put_retrieval(op);
45793 _leave(" = %d", ret);
45794 @@ -625,7 +625,7 @@ nobufs_unlock:
45795 spin_unlock(&cookie->lock);
45796 kfree(op);
45797 nobufs:
45798 - fscache_stat(&fscache_n_allocs_nobufs);
45799 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45800 _leave(" = -ENOBUFS");
45801 return -ENOBUFS;
45802 }
45803 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45804
45805 spin_lock(&cookie->stores_lock);
45806
45807 - fscache_stat(&fscache_n_store_calls);
45808 + fscache_stat_unchecked(&fscache_n_store_calls);
45809
45810 /* find a page to store */
45811 page = NULL;
45812 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45813 page = results[0];
45814 _debug("gang %d [%lx]", n, page->index);
45815 if (page->index > op->store_limit) {
45816 - fscache_stat(&fscache_n_store_pages_over_limit);
45817 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45818 goto superseded;
45819 }
45820
45821 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45822 spin_unlock(&cookie->stores_lock);
45823 spin_unlock(&object->lock);
45824
45825 - fscache_stat(&fscache_n_store_pages);
45826 + fscache_stat_unchecked(&fscache_n_store_pages);
45827 fscache_stat(&fscache_n_cop_write_page);
45828 ret = object->cache->ops->write_page(op, page);
45829 fscache_stat_d(&fscache_n_cop_write_page);
45830 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45831 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45832 ASSERT(PageFsCache(page));
45833
45834 - fscache_stat(&fscache_n_stores);
45835 + fscache_stat_unchecked(&fscache_n_stores);
45836
45837 op = kzalloc(sizeof(*op), GFP_NOIO);
45838 if (!op)
45839 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45840 spin_unlock(&cookie->stores_lock);
45841 spin_unlock(&object->lock);
45842
45843 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45844 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45845 op->store_limit = object->store_limit;
45846
45847 if (fscache_submit_op(object, &op->op) < 0)
45848 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45849
45850 spin_unlock(&cookie->lock);
45851 radix_tree_preload_end();
45852 - fscache_stat(&fscache_n_store_ops);
45853 - fscache_stat(&fscache_n_stores_ok);
45854 + fscache_stat_unchecked(&fscache_n_store_ops);
45855 + fscache_stat_unchecked(&fscache_n_stores_ok);
45856
45857 /* the work queue now carries its own ref on the object */
45858 fscache_put_operation(&op->op);
45859 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45860 return 0;
45861
45862 already_queued:
45863 - fscache_stat(&fscache_n_stores_again);
45864 + fscache_stat_unchecked(&fscache_n_stores_again);
45865 already_pending:
45866 spin_unlock(&cookie->stores_lock);
45867 spin_unlock(&object->lock);
45868 spin_unlock(&cookie->lock);
45869 radix_tree_preload_end();
45870 kfree(op);
45871 - fscache_stat(&fscache_n_stores_ok);
45872 + fscache_stat_unchecked(&fscache_n_stores_ok);
45873 _leave(" = 0");
45874 return 0;
45875
45876 @@ -851,14 +851,14 @@ nobufs:
45877 spin_unlock(&cookie->lock);
45878 radix_tree_preload_end();
45879 kfree(op);
45880 - fscache_stat(&fscache_n_stores_nobufs);
45881 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45882 _leave(" = -ENOBUFS");
45883 return -ENOBUFS;
45884
45885 nomem_free:
45886 kfree(op);
45887 nomem:
45888 - fscache_stat(&fscache_n_stores_oom);
45889 + fscache_stat_unchecked(&fscache_n_stores_oom);
45890 _leave(" = -ENOMEM");
45891 return -ENOMEM;
45892 }
45893 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45894 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45895 ASSERTCMP(page, !=, NULL);
45896
45897 - fscache_stat(&fscache_n_uncaches);
45898 + fscache_stat_unchecked(&fscache_n_uncaches);
45899
45900 /* cache withdrawal may beat us to it */
45901 if (!PageFsCache(page))
45902 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45903 unsigned long loop;
45904
45905 #ifdef CONFIG_FSCACHE_STATS
45906 - atomic_add(pagevec->nr, &fscache_n_marks);
45907 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45908 #endif
45909
45910 for (loop = 0; loop < pagevec->nr; loop++) {
45911 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45912 index 4765190..2a067f2 100644
45913 --- a/fs/fscache/stats.c
45914 +++ b/fs/fscache/stats.c
45915 @@ -18,95 +18,95 @@
45916 /*
45917 * operation counters
45918 */
45919 -atomic_t fscache_n_op_pend;
45920 -atomic_t fscache_n_op_run;
45921 -atomic_t fscache_n_op_enqueue;
45922 -atomic_t fscache_n_op_requeue;
45923 -atomic_t fscache_n_op_deferred_release;
45924 -atomic_t fscache_n_op_release;
45925 -atomic_t fscache_n_op_gc;
45926 -atomic_t fscache_n_op_cancelled;
45927 -atomic_t fscache_n_op_rejected;
45928 +atomic_unchecked_t fscache_n_op_pend;
45929 +atomic_unchecked_t fscache_n_op_run;
45930 +atomic_unchecked_t fscache_n_op_enqueue;
45931 +atomic_unchecked_t fscache_n_op_requeue;
45932 +atomic_unchecked_t fscache_n_op_deferred_release;
45933 +atomic_unchecked_t fscache_n_op_release;
45934 +atomic_unchecked_t fscache_n_op_gc;
45935 +atomic_unchecked_t fscache_n_op_cancelled;
45936 +atomic_unchecked_t fscache_n_op_rejected;
45937
45938 -atomic_t fscache_n_attr_changed;
45939 -atomic_t fscache_n_attr_changed_ok;
45940 -atomic_t fscache_n_attr_changed_nobufs;
45941 -atomic_t fscache_n_attr_changed_nomem;
45942 -atomic_t fscache_n_attr_changed_calls;
45943 +atomic_unchecked_t fscache_n_attr_changed;
45944 +atomic_unchecked_t fscache_n_attr_changed_ok;
45945 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45946 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45947 +atomic_unchecked_t fscache_n_attr_changed_calls;
45948
45949 -atomic_t fscache_n_allocs;
45950 -atomic_t fscache_n_allocs_ok;
45951 -atomic_t fscache_n_allocs_wait;
45952 -atomic_t fscache_n_allocs_nobufs;
45953 -atomic_t fscache_n_allocs_intr;
45954 -atomic_t fscache_n_allocs_object_dead;
45955 -atomic_t fscache_n_alloc_ops;
45956 -atomic_t fscache_n_alloc_op_waits;
45957 +atomic_unchecked_t fscache_n_allocs;
45958 +atomic_unchecked_t fscache_n_allocs_ok;
45959 +atomic_unchecked_t fscache_n_allocs_wait;
45960 +atomic_unchecked_t fscache_n_allocs_nobufs;
45961 +atomic_unchecked_t fscache_n_allocs_intr;
45962 +atomic_unchecked_t fscache_n_allocs_object_dead;
45963 +atomic_unchecked_t fscache_n_alloc_ops;
45964 +atomic_unchecked_t fscache_n_alloc_op_waits;
45965
45966 -atomic_t fscache_n_retrievals;
45967 -atomic_t fscache_n_retrievals_ok;
45968 -atomic_t fscache_n_retrievals_wait;
45969 -atomic_t fscache_n_retrievals_nodata;
45970 -atomic_t fscache_n_retrievals_nobufs;
45971 -atomic_t fscache_n_retrievals_intr;
45972 -atomic_t fscache_n_retrievals_nomem;
45973 -atomic_t fscache_n_retrievals_object_dead;
45974 -atomic_t fscache_n_retrieval_ops;
45975 -atomic_t fscache_n_retrieval_op_waits;
45976 +atomic_unchecked_t fscache_n_retrievals;
45977 +atomic_unchecked_t fscache_n_retrievals_ok;
45978 +atomic_unchecked_t fscache_n_retrievals_wait;
45979 +atomic_unchecked_t fscache_n_retrievals_nodata;
45980 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45981 +atomic_unchecked_t fscache_n_retrievals_intr;
45982 +atomic_unchecked_t fscache_n_retrievals_nomem;
45983 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45984 +atomic_unchecked_t fscache_n_retrieval_ops;
45985 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45986
45987 -atomic_t fscache_n_stores;
45988 -atomic_t fscache_n_stores_ok;
45989 -atomic_t fscache_n_stores_again;
45990 -atomic_t fscache_n_stores_nobufs;
45991 -atomic_t fscache_n_stores_oom;
45992 -atomic_t fscache_n_store_ops;
45993 -atomic_t fscache_n_store_calls;
45994 -atomic_t fscache_n_store_pages;
45995 -atomic_t fscache_n_store_radix_deletes;
45996 -atomic_t fscache_n_store_pages_over_limit;
45997 +atomic_unchecked_t fscache_n_stores;
45998 +atomic_unchecked_t fscache_n_stores_ok;
45999 +atomic_unchecked_t fscache_n_stores_again;
46000 +atomic_unchecked_t fscache_n_stores_nobufs;
46001 +atomic_unchecked_t fscache_n_stores_oom;
46002 +atomic_unchecked_t fscache_n_store_ops;
46003 +atomic_unchecked_t fscache_n_store_calls;
46004 +atomic_unchecked_t fscache_n_store_pages;
46005 +atomic_unchecked_t fscache_n_store_radix_deletes;
46006 +atomic_unchecked_t fscache_n_store_pages_over_limit;
46007
46008 -atomic_t fscache_n_store_vmscan_not_storing;
46009 -atomic_t fscache_n_store_vmscan_gone;
46010 -atomic_t fscache_n_store_vmscan_busy;
46011 -atomic_t fscache_n_store_vmscan_cancelled;
46012 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46013 +atomic_unchecked_t fscache_n_store_vmscan_gone;
46014 +atomic_unchecked_t fscache_n_store_vmscan_busy;
46015 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46016
46017 -atomic_t fscache_n_marks;
46018 -atomic_t fscache_n_uncaches;
46019 +atomic_unchecked_t fscache_n_marks;
46020 +atomic_unchecked_t fscache_n_uncaches;
46021
46022 -atomic_t fscache_n_acquires;
46023 -atomic_t fscache_n_acquires_null;
46024 -atomic_t fscache_n_acquires_no_cache;
46025 -atomic_t fscache_n_acquires_ok;
46026 -atomic_t fscache_n_acquires_nobufs;
46027 -atomic_t fscache_n_acquires_oom;
46028 +atomic_unchecked_t fscache_n_acquires;
46029 +atomic_unchecked_t fscache_n_acquires_null;
46030 +atomic_unchecked_t fscache_n_acquires_no_cache;
46031 +atomic_unchecked_t fscache_n_acquires_ok;
46032 +atomic_unchecked_t fscache_n_acquires_nobufs;
46033 +atomic_unchecked_t fscache_n_acquires_oom;
46034
46035 -atomic_t fscache_n_updates;
46036 -atomic_t fscache_n_updates_null;
46037 -atomic_t fscache_n_updates_run;
46038 +atomic_unchecked_t fscache_n_updates;
46039 +atomic_unchecked_t fscache_n_updates_null;
46040 +atomic_unchecked_t fscache_n_updates_run;
46041
46042 -atomic_t fscache_n_relinquishes;
46043 -atomic_t fscache_n_relinquishes_null;
46044 -atomic_t fscache_n_relinquishes_waitcrt;
46045 -atomic_t fscache_n_relinquishes_retire;
46046 +atomic_unchecked_t fscache_n_relinquishes;
46047 +atomic_unchecked_t fscache_n_relinquishes_null;
46048 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46049 +atomic_unchecked_t fscache_n_relinquishes_retire;
46050
46051 -atomic_t fscache_n_cookie_index;
46052 -atomic_t fscache_n_cookie_data;
46053 -atomic_t fscache_n_cookie_special;
46054 +atomic_unchecked_t fscache_n_cookie_index;
46055 +atomic_unchecked_t fscache_n_cookie_data;
46056 +atomic_unchecked_t fscache_n_cookie_special;
46057
46058 -atomic_t fscache_n_object_alloc;
46059 -atomic_t fscache_n_object_no_alloc;
46060 -atomic_t fscache_n_object_lookups;
46061 -atomic_t fscache_n_object_lookups_negative;
46062 -atomic_t fscache_n_object_lookups_positive;
46063 -atomic_t fscache_n_object_lookups_timed_out;
46064 -atomic_t fscache_n_object_created;
46065 -atomic_t fscache_n_object_avail;
46066 -atomic_t fscache_n_object_dead;
46067 +atomic_unchecked_t fscache_n_object_alloc;
46068 +atomic_unchecked_t fscache_n_object_no_alloc;
46069 +atomic_unchecked_t fscache_n_object_lookups;
46070 +atomic_unchecked_t fscache_n_object_lookups_negative;
46071 +atomic_unchecked_t fscache_n_object_lookups_positive;
46072 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
46073 +atomic_unchecked_t fscache_n_object_created;
46074 +atomic_unchecked_t fscache_n_object_avail;
46075 +atomic_unchecked_t fscache_n_object_dead;
46076
46077 -atomic_t fscache_n_checkaux_none;
46078 -atomic_t fscache_n_checkaux_okay;
46079 -atomic_t fscache_n_checkaux_update;
46080 -atomic_t fscache_n_checkaux_obsolete;
46081 +atomic_unchecked_t fscache_n_checkaux_none;
46082 +atomic_unchecked_t fscache_n_checkaux_okay;
46083 +atomic_unchecked_t fscache_n_checkaux_update;
46084 +atomic_unchecked_t fscache_n_checkaux_obsolete;
46085
46086 atomic_t fscache_n_cop_alloc_object;
46087 atomic_t fscache_n_cop_lookup_object;
46088 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46089 seq_puts(m, "FS-Cache statistics\n");
46090
46091 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46092 - atomic_read(&fscache_n_cookie_index),
46093 - atomic_read(&fscache_n_cookie_data),
46094 - atomic_read(&fscache_n_cookie_special));
46095 + atomic_read_unchecked(&fscache_n_cookie_index),
46096 + atomic_read_unchecked(&fscache_n_cookie_data),
46097 + atomic_read_unchecked(&fscache_n_cookie_special));
46098
46099 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46100 - atomic_read(&fscache_n_object_alloc),
46101 - atomic_read(&fscache_n_object_no_alloc),
46102 - atomic_read(&fscache_n_object_avail),
46103 - atomic_read(&fscache_n_object_dead));
46104 + atomic_read_unchecked(&fscache_n_object_alloc),
46105 + atomic_read_unchecked(&fscache_n_object_no_alloc),
46106 + atomic_read_unchecked(&fscache_n_object_avail),
46107 + atomic_read_unchecked(&fscache_n_object_dead));
46108 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46109 - atomic_read(&fscache_n_checkaux_none),
46110 - atomic_read(&fscache_n_checkaux_okay),
46111 - atomic_read(&fscache_n_checkaux_update),
46112 - atomic_read(&fscache_n_checkaux_obsolete));
46113 + atomic_read_unchecked(&fscache_n_checkaux_none),
46114 + atomic_read_unchecked(&fscache_n_checkaux_okay),
46115 + atomic_read_unchecked(&fscache_n_checkaux_update),
46116 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46117
46118 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46119 - atomic_read(&fscache_n_marks),
46120 - atomic_read(&fscache_n_uncaches));
46121 + atomic_read_unchecked(&fscache_n_marks),
46122 + atomic_read_unchecked(&fscache_n_uncaches));
46123
46124 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46125 " oom=%u\n",
46126 - atomic_read(&fscache_n_acquires),
46127 - atomic_read(&fscache_n_acquires_null),
46128 - atomic_read(&fscache_n_acquires_no_cache),
46129 - atomic_read(&fscache_n_acquires_ok),
46130 - atomic_read(&fscache_n_acquires_nobufs),
46131 - atomic_read(&fscache_n_acquires_oom));
46132 + atomic_read_unchecked(&fscache_n_acquires),
46133 + atomic_read_unchecked(&fscache_n_acquires_null),
46134 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
46135 + atomic_read_unchecked(&fscache_n_acquires_ok),
46136 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
46137 + atomic_read_unchecked(&fscache_n_acquires_oom));
46138
46139 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46140 - atomic_read(&fscache_n_object_lookups),
46141 - atomic_read(&fscache_n_object_lookups_negative),
46142 - atomic_read(&fscache_n_object_lookups_positive),
46143 - atomic_read(&fscache_n_object_created),
46144 - atomic_read(&fscache_n_object_lookups_timed_out));
46145 + atomic_read_unchecked(&fscache_n_object_lookups),
46146 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
46147 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
46148 + atomic_read_unchecked(&fscache_n_object_created),
46149 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46150
46151 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46152 - atomic_read(&fscache_n_updates),
46153 - atomic_read(&fscache_n_updates_null),
46154 - atomic_read(&fscache_n_updates_run));
46155 + atomic_read_unchecked(&fscache_n_updates),
46156 + atomic_read_unchecked(&fscache_n_updates_null),
46157 + atomic_read_unchecked(&fscache_n_updates_run));
46158
46159 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46160 - atomic_read(&fscache_n_relinquishes),
46161 - atomic_read(&fscache_n_relinquishes_null),
46162 - atomic_read(&fscache_n_relinquishes_waitcrt),
46163 - atomic_read(&fscache_n_relinquishes_retire));
46164 + atomic_read_unchecked(&fscache_n_relinquishes),
46165 + atomic_read_unchecked(&fscache_n_relinquishes_null),
46166 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46167 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
46168
46169 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46170 - atomic_read(&fscache_n_attr_changed),
46171 - atomic_read(&fscache_n_attr_changed_ok),
46172 - atomic_read(&fscache_n_attr_changed_nobufs),
46173 - atomic_read(&fscache_n_attr_changed_nomem),
46174 - atomic_read(&fscache_n_attr_changed_calls));
46175 + atomic_read_unchecked(&fscache_n_attr_changed),
46176 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
46177 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46178 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46179 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
46180
46181 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46182 - atomic_read(&fscache_n_allocs),
46183 - atomic_read(&fscache_n_allocs_ok),
46184 - atomic_read(&fscache_n_allocs_wait),
46185 - atomic_read(&fscache_n_allocs_nobufs),
46186 - atomic_read(&fscache_n_allocs_intr));
46187 + atomic_read_unchecked(&fscache_n_allocs),
46188 + atomic_read_unchecked(&fscache_n_allocs_ok),
46189 + atomic_read_unchecked(&fscache_n_allocs_wait),
46190 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
46191 + atomic_read_unchecked(&fscache_n_allocs_intr));
46192 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46193 - atomic_read(&fscache_n_alloc_ops),
46194 - atomic_read(&fscache_n_alloc_op_waits),
46195 - atomic_read(&fscache_n_allocs_object_dead));
46196 + atomic_read_unchecked(&fscache_n_alloc_ops),
46197 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
46198 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
46199
46200 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46201 " int=%u oom=%u\n",
46202 - atomic_read(&fscache_n_retrievals),
46203 - atomic_read(&fscache_n_retrievals_ok),
46204 - atomic_read(&fscache_n_retrievals_wait),
46205 - atomic_read(&fscache_n_retrievals_nodata),
46206 - atomic_read(&fscache_n_retrievals_nobufs),
46207 - atomic_read(&fscache_n_retrievals_intr),
46208 - atomic_read(&fscache_n_retrievals_nomem));
46209 + atomic_read_unchecked(&fscache_n_retrievals),
46210 + atomic_read_unchecked(&fscache_n_retrievals_ok),
46211 + atomic_read_unchecked(&fscache_n_retrievals_wait),
46212 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
46213 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46214 + atomic_read_unchecked(&fscache_n_retrievals_intr),
46215 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
46216 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46217 - atomic_read(&fscache_n_retrieval_ops),
46218 - atomic_read(&fscache_n_retrieval_op_waits),
46219 - atomic_read(&fscache_n_retrievals_object_dead));
46220 + atomic_read_unchecked(&fscache_n_retrieval_ops),
46221 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46222 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46223
46224 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46225 - atomic_read(&fscache_n_stores),
46226 - atomic_read(&fscache_n_stores_ok),
46227 - atomic_read(&fscache_n_stores_again),
46228 - atomic_read(&fscache_n_stores_nobufs),
46229 - atomic_read(&fscache_n_stores_oom));
46230 + atomic_read_unchecked(&fscache_n_stores),
46231 + atomic_read_unchecked(&fscache_n_stores_ok),
46232 + atomic_read_unchecked(&fscache_n_stores_again),
46233 + atomic_read_unchecked(&fscache_n_stores_nobufs),
46234 + atomic_read_unchecked(&fscache_n_stores_oom));
46235 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46236 - atomic_read(&fscache_n_store_ops),
46237 - atomic_read(&fscache_n_store_calls),
46238 - atomic_read(&fscache_n_store_pages),
46239 - atomic_read(&fscache_n_store_radix_deletes),
46240 - atomic_read(&fscache_n_store_pages_over_limit));
46241 + atomic_read_unchecked(&fscache_n_store_ops),
46242 + atomic_read_unchecked(&fscache_n_store_calls),
46243 + atomic_read_unchecked(&fscache_n_store_pages),
46244 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
46245 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46246
46247 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46248 - atomic_read(&fscache_n_store_vmscan_not_storing),
46249 - atomic_read(&fscache_n_store_vmscan_gone),
46250 - atomic_read(&fscache_n_store_vmscan_busy),
46251 - atomic_read(&fscache_n_store_vmscan_cancelled));
46252 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46253 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46254 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46255 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46256
46257 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46258 - atomic_read(&fscache_n_op_pend),
46259 - atomic_read(&fscache_n_op_run),
46260 - atomic_read(&fscache_n_op_enqueue),
46261 - atomic_read(&fscache_n_op_cancelled),
46262 - atomic_read(&fscache_n_op_rejected));
46263 + atomic_read_unchecked(&fscache_n_op_pend),
46264 + atomic_read_unchecked(&fscache_n_op_run),
46265 + atomic_read_unchecked(&fscache_n_op_enqueue),
46266 + atomic_read_unchecked(&fscache_n_op_cancelled),
46267 + atomic_read_unchecked(&fscache_n_op_rejected));
46268 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46269 - atomic_read(&fscache_n_op_deferred_release),
46270 - atomic_read(&fscache_n_op_release),
46271 - atomic_read(&fscache_n_op_gc));
46272 + atomic_read_unchecked(&fscache_n_op_deferred_release),
46273 + atomic_read_unchecked(&fscache_n_op_release),
46274 + atomic_read_unchecked(&fscache_n_op_gc));
46275
46276 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46277 atomic_read(&fscache_n_cop_alloc_object),
46278 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46279 index 3426521..3b75162 100644
46280 --- a/fs/fuse/cuse.c
46281 +++ b/fs/fuse/cuse.c
46282 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
46283 INIT_LIST_HEAD(&cuse_conntbl[i]);
46284
46285 /* inherit and extend fuse_dev_operations */
46286 - cuse_channel_fops = fuse_dev_operations;
46287 - cuse_channel_fops.owner = THIS_MODULE;
46288 - cuse_channel_fops.open = cuse_channel_open;
46289 - cuse_channel_fops.release = cuse_channel_release;
46290 + pax_open_kernel();
46291 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46292 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46293 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
46294 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
46295 + pax_close_kernel();
46296
46297 cuse_class = class_create(THIS_MODULE, "cuse");
46298 if (IS_ERR(cuse_class))
46299 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46300 index 7df2b5e..5804aa7 100644
46301 --- a/fs/fuse/dev.c
46302 +++ b/fs/fuse/dev.c
46303 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46304 ret = 0;
46305 pipe_lock(pipe);
46306
46307 - if (!pipe->readers) {
46308 + if (!atomic_read(&pipe->readers)) {
46309 send_sig(SIGPIPE, current, 0);
46310 if (!ret)
46311 ret = -EPIPE;
46312 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46313 index bc43832..0cfe5a6 100644
46314 --- a/fs/fuse/dir.c
46315 +++ b/fs/fuse/dir.c
46316 @@ -1181,7 +1181,7 @@ static char *read_link(struct dentry *dentry)
46317 return link;
46318 }
46319
46320 -static void free_link(char *link)
46321 +static void free_link(const char *link)
46322 {
46323 if (!IS_ERR(link))
46324 free_page((unsigned long) link);
46325 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46326 index a9ba244..d9df391 100644
46327 --- a/fs/gfs2/inode.c
46328 +++ b/fs/gfs2/inode.c
46329 @@ -1496,7 +1496,7 @@ out:
46330
46331 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46332 {
46333 - char *s = nd_get_link(nd);
46334 + const char *s = nd_get_link(nd);
46335 if (!IS_ERR(s))
46336 kfree(s);
46337 }
46338 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46339 index 001ef01..f7d5f07 100644
46340 --- a/fs/hugetlbfs/inode.c
46341 +++ b/fs/hugetlbfs/inode.c
46342 @@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46343 .kill_sb = kill_litter_super,
46344 };
46345
46346 -static struct vfsmount *hugetlbfs_vfsmount;
46347 +struct vfsmount *hugetlbfs_vfsmount;
46348
46349 static int can_do_hugetlb_shm(void)
46350 {
46351 diff --git a/fs/inode.c b/fs/inode.c
46352 index 9f4f5fe..6214688 100644
46353 --- a/fs/inode.c
46354 +++ b/fs/inode.c
46355 @@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
46356
46357 #ifdef CONFIG_SMP
46358 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46359 - static atomic_t shared_last_ino;
46360 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46361 + static atomic_unchecked_t shared_last_ino;
46362 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46363
46364 res = next - LAST_INO_BATCH;
46365 }
46366 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46367 index 4a6cf28..d3a29d3 100644
46368 --- a/fs/jffs2/erase.c
46369 +++ b/fs/jffs2/erase.c
46370 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46371 struct jffs2_unknown_node marker = {
46372 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46373 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46374 - .totlen = cpu_to_je32(c->cleanmarker_size)
46375 + .totlen = cpu_to_je32(c->cleanmarker_size),
46376 + .hdr_crc = cpu_to_je32(0)
46377 };
46378
46379 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46380 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46381 index 74d9be1..d5dd140 100644
46382 --- a/fs/jffs2/wbuf.c
46383 +++ b/fs/jffs2/wbuf.c
46384 @@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46385 {
46386 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46387 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46388 - .totlen = constant_cpu_to_je32(8)
46389 + .totlen = constant_cpu_to_je32(8),
46390 + .hdr_crc = constant_cpu_to_je32(0)
46391 };
46392
46393 /*
46394 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46395 index 4a82950..bcaa0cb 100644
46396 --- a/fs/jfs/super.c
46397 +++ b/fs/jfs/super.c
46398 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46399
46400 jfs_inode_cachep =
46401 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46402 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46403 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46404 init_once);
46405 if (jfs_inode_cachep == NULL)
46406 return -ENOMEM;
46407 diff --git a/fs/libfs.c b/fs/libfs.c
46408 index 18d08f5..fe3dc64 100644
46409 --- a/fs/libfs.c
46410 +++ b/fs/libfs.c
46411 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46412
46413 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46414 struct dentry *next;
46415 + char d_name[sizeof(next->d_iname)];
46416 + const unsigned char *name;
46417 +
46418 next = list_entry(p, struct dentry, d_u.d_child);
46419 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46420 if (!simple_positive(next)) {
46421 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46422
46423 spin_unlock(&next->d_lock);
46424 spin_unlock(&dentry->d_lock);
46425 - if (filldir(dirent, next->d_name.name,
46426 + name = next->d_name.name;
46427 + if (name == next->d_iname) {
46428 + memcpy(d_name, name, next->d_name.len);
46429 + name = d_name;
46430 + }
46431 + if (filldir(dirent, name,
46432 next->d_name.len, filp->f_pos,
46433 next->d_inode->i_ino,
46434 dt_type(next->d_inode)) < 0)
46435 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46436 index 8392cb8..80d6193 100644
46437 --- a/fs/lockd/clntproc.c
46438 +++ b/fs/lockd/clntproc.c
46439 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46440 /*
46441 * Cookie counter for NLM requests
46442 */
46443 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46444 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46445
46446 void nlmclnt_next_cookie(struct nlm_cookie *c)
46447 {
46448 - u32 cookie = atomic_inc_return(&nlm_cookie);
46449 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46450
46451 memcpy(c->data, &cookie, 4);
46452 c->len=4;
46453 diff --git a/fs/locks.c b/fs/locks.c
46454 index 6a64f15..c3dacf2 100644
46455 --- a/fs/locks.c
46456 +++ b/fs/locks.c
46457 @@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
46458 return 0;
46459 }
46460
46461 -static int assign_type(struct file_lock *fl, int type)
46462 +static int assign_type(struct file_lock *fl, long type)
46463 {
46464 switch (type) {
46465 case F_RDLCK:
46466 @@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
46467 /*
46468 * Initialize a lease, use the default lock manager operations
46469 */
46470 -static int lease_init(struct file *filp, int type, struct file_lock *fl)
46471 +static int lease_init(struct file *filp, long type, struct file_lock *fl)
46472 {
46473 if (assign_type(fl, type) != 0)
46474 return -EINVAL;
46475 @@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
46476 }
46477
46478 /* Allocate a file_lock initialised to this type of lease */
46479 -static struct file_lock *lease_alloc(struct file *filp, int type)
46480 +static struct file_lock *lease_alloc(struct file *filp, long type)
46481 {
46482 struct file_lock *fl = locks_alloc_lock();
46483 int error = -ENOMEM;
46484 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46485 return;
46486
46487 if (filp->f_op && filp->f_op->flock) {
46488 - struct file_lock fl = {
46489 + struct file_lock flock = {
46490 .fl_pid = current->tgid,
46491 .fl_file = filp,
46492 .fl_flags = FL_FLOCK,
46493 .fl_type = F_UNLCK,
46494 .fl_end = OFFSET_MAX,
46495 };
46496 - filp->f_op->flock(filp, F_SETLKW, &fl);
46497 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46498 - fl.fl_ops->fl_release_private(&fl);
46499 + filp->f_op->flock(filp, F_SETLKW, &flock);
46500 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46501 + flock.fl_ops->fl_release_private(&flock);
46502 }
46503
46504 lock_flocks();
46505 diff --git a/fs/namei.c b/fs/namei.c
46506 index c427919..232326c 100644
46507 --- a/fs/namei.c
46508 +++ b/fs/namei.c
46509 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46510 if (ret != -EACCES)
46511 return ret;
46512
46513 +#ifdef CONFIG_GRKERNSEC
46514 + /* we'll block if we have to log due to a denied capability use */
46515 + if (mask & MAY_NOT_BLOCK)
46516 + return -ECHILD;
46517 +#endif
46518 +
46519 if (S_ISDIR(inode->i_mode)) {
46520 /* DACs are overridable for directories */
46521 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46522 - return 0;
46523 if (!(mask & MAY_WRITE))
46524 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46525 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46526 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46527 return 0;
46528 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46529 + return 0;
46530 return -EACCES;
46531 }
46532 /*
46533 + * Searching includes executable on directories, else just read.
46534 + */
46535 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46536 + if (mask == MAY_READ)
46537 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46538 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46539 + return 0;
46540 +
46541 + /*
46542 * Read/write DACs are always overridable.
46543 * Executable DACs are overridable when there is
46544 * at least one exec bit set.
46545 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46546 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46547 return 0;
46548
46549 - /*
46550 - * Searching includes executable on directories, else just read.
46551 - */
46552 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46553 - if (mask == MAY_READ)
46554 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46555 - return 0;
46556 -
46557 return -EACCES;
46558 }
46559
46560 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46561 return error;
46562 }
46563
46564 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46565 + dentry->d_inode, dentry, nd->path.mnt)) {
46566 + error = -EACCES;
46567 + *p = ERR_PTR(error); /* no ->put_link(), please */
46568 + path_put(&nd->path);
46569 + return error;
46570 + }
46571 +
46572 nd->last_type = LAST_BIND;
46573 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46574 error = PTR_ERR(*p);
46575 if (!IS_ERR(*p)) {
46576 - char *s = nd_get_link(nd);
46577 + const char *s = nd_get_link(nd);
46578 error = 0;
46579 if (s)
46580 error = __vfs_follow_link(nd, s);
46581 @@ -1355,6 +1371,9 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
46582 if (!res)
46583 res = walk_component(nd, path, &nd->last,
46584 nd->last_type, LOOKUP_FOLLOW);
46585 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode)) {
46586 + res = -EACCES;
46587 + }
46588 put_link(nd, &link, cookie);
46589 } while (res > 0);
46590
46591 @@ -1746,6 +1765,9 @@ static int path_lookupat(int dfd, const char *name,
46592 err = follow_link(&link, nd, &cookie);
46593 if (!err)
46594 err = lookup_last(nd, &path);
46595 + if (!err && gr_handle_symlink_owner(&link, nd->inode)) {
46596 + err = -EACCES;
46597 + }
46598 put_link(nd, &link, cookie);
46599 }
46600 }
46601 @@ -1753,6 +1775,21 @@ static int path_lookupat(int dfd, const char *name,
46602 if (!err)
46603 err = complete_walk(nd);
46604
46605 + if (!(nd->flags & LOOKUP_PARENT)) {
46606 +#ifdef CONFIG_GRKERNSEC
46607 + if (flags & LOOKUP_RCU) {
46608 + if (!err)
46609 + path_put(&nd->path);
46610 + err = -ECHILD;
46611 + } else
46612 +#endif
46613 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46614 + if (!err)
46615 + path_put(&nd->path);
46616 + err = -ENOENT;
46617 + }
46618 + }
46619 +
46620 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46621 if (!nd->inode->i_op->lookup) {
46622 path_put(&nd->path);
46623 @@ -1780,6 +1817,15 @@ static int do_path_lookup(int dfd, const char *name,
46624 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46625
46626 if (likely(!retval)) {
46627 + if (*name != '/' && nd->path.dentry && nd->inode) {
46628 +#ifdef CONFIG_GRKERNSEC
46629 + if (flags & LOOKUP_RCU)
46630 + return -ECHILD;
46631 +#endif
46632 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46633 + return -ENOENT;
46634 + }
46635 +
46636 if (unlikely(!audit_dummy_context())) {
46637 if (nd->path.dentry && nd->inode)
46638 audit_inode(name, nd->path.dentry);
46639 @@ -2126,6 +2172,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46640 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46641 return -EPERM;
46642
46643 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46644 + return -EPERM;
46645 + if (gr_handle_rawio(inode))
46646 + return -EPERM;
46647 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46648 + return -EACCES;
46649 +
46650 return 0;
46651 }
46652
46653 @@ -2187,6 +2240,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46654 error = complete_walk(nd);
46655 if (error)
46656 return ERR_PTR(error);
46657 +#ifdef CONFIG_GRKERNSEC
46658 + if (nd->flags & LOOKUP_RCU) {
46659 + error = -ECHILD;
46660 + goto exit;
46661 + }
46662 +#endif
46663 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46664 + error = -ENOENT;
46665 + goto exit;
46666 + }
46667 audit_inode(pathname, nd->path.dentry);
46668 if (open_flag & O_CREAT) {
46669 error = -EISDIR;
46670 @@ -2197,6 +2260,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46671 error = complete_walk(nd);
46672 if (error)
46673 return ERR_PTR(error);
46674 +#ifdef CONFIG_GRKERNSEC
46675 + if (nd->flags & LOOKUP_RCU) {
46676 + error = -ECHILD;
46677 + goto exit;
46678 + }
46679 +#endif
46680 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46681 + error = -ENOENT;
46682 + goto exit;
46683 + }
46684 audit_inode(pathname, dir);
46685 goto ok;
46686 }
46687 @@ -2218,6 +2291,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46688 error = complete_walk(nd);
46689 if (error)
46690 return ERR_PTR(error);
46691 +#ifdef CONFIG_GRKERNSEC
46692 + if (nd->flags & LOOKUP_RCU) {
46693 + error = -ECHILD;
46694 + goto exit;
46695 + }
46696 +#endif
46697 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46698 + error = -ENOENT;
46699 + goto exit;
46700 + }
46701
46702 error = -ENOTDIR;
46703 if (nd->flags & LOOKUP_DIRECTORY) {
46704 @@ -2258,6 +2341,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46705 /* Negative dentry, just create the file */
46706 if (!dentry->d_inode) {
46707 umode_t mode = op->mode;
46708 +
46709 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46710 + error = -EACCES;
46711 + goto exit_mutex_unlock;
46712 + }
46713 +
46714 if (!IS_POSIXACL(dir->d_inode))
46715 mode &= ~current_umask();
46716 /*
46717 @@ -2281,6 +2370,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46718 error = vfs_create(dir->d_inode, dentry, mode, nd);
46719 if (error)
46720 goto exit_mutex_unlock;
46721 + else
46722 + gr_handle_create(path->dentry, path->mnt);
46723 mutex_unlock(&dir->d_inode->i_mutex);
46724 dput(nd->path.dentry);
46725 nd->path.dentry = dentry;
46726 @@ -2290,6 +2381,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46727 /*
46728 * It already exists.
46729 */
46730 +
46731 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46732 + error = -ENOENT;
46733 + goto exit_mutex_unlock;
46734 + }
46735 +
46736 + /* only check if O_CREAT is specified, all other checks need to go
46737 + into may_open */
46738 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46739 + error = -EACCES;
46740 + goto exit_mutex_unlock;
46741 + }
46742 +
46743 mutex_unlock(&dir->d_inode->i_mutex);
46744 audit_inode(pathname, path->dentry);
46745
46746 @@ -2407,8 +2511,14 @@ static struct file *path_openat(int dfd, const char *pathname,
46747 error = follow_link(&link, nd, &cookie);
46748 if (unlikely(error))
46749 filp = ERR_PTR(error);
46750 - else
46751 + else {
46752 filp = do_last(nd, &path, op, pathname);
46753 + if (!IS_ERR(filp) && gr_handle_symlink_owner(&link, nd->inode)) {
46754 + if (filp)
46755 + fput(filp);
46756 + filp = ERR_PTR(-EACCES);
46757 + }
46758 + }
46759 put_link(nd, &link, cookie);
46760 }
46761 out:
46762 @@ -2502,6 +2612,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46763 *path = nd.path;
46764 return dentry;
46765 eexist:
46766 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46767 + dput(dentry);
46768 + dentry = ERR_PTR(-ENOENT);
46769 + goto fail;
46770 + }
46771 dput(dentry);
46772 dentry = ERR_PTR(-EEXIST);
46773 fail:
46774 @@ -2524,6 +2639,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46775 }
46776 EXPORT_SYMBOL(user_path_create);
46777
46778 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46779 +{
46780 + char *tmp = getname(pathname);
46781 + struct dentry *res;
46782 + if (IS_ERR(tmp))
46783 + return ERR_CAST(tmp);
46784 + res = kern_path_create(dfd, tmp, path, is_dir);
46785 + if (IS_ERR(res))
46786 + putname(tmp);
46787 + else
46788 + *to = tmp;
46789 + return res;
46790 +}
46791 +
46792 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46793 {
46794 int error = may_create(dir, dentry);
46795 @@ -2591,6 +2720,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46796 error = mnt_want_write(path.mnt);
46797 if (error)
46798 goto out_dput;
46799 +
46800 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46801 + error = -EPERM;
46802 + goto out_drop_write;
46803 + }
46804 +
46805 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46806 + error = -EACCES;
46807 + goto out_drop_write;
46808 + }
46809 +
46810 error = security_path_mknod(&path, dentry, mode, dev);
46811 if (error)
46812 goto out_drop_write;
46813 @@ -2608,6 +2748,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46814 }
46815 out_drop_write:
46816 mnt_drop_write(path.mnt);
46817 +
46818 + if (!error)
46819 + gr_handle_create(dentry, path.mnt);
46820 out_dput:
46821 dput(dentry);
46822 mutex_unlock(&path.dentry->d_inode->i_mutex);
46823 @@ -2661,12 +2804,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46824 error = mnt_want_write(path.mnt);
46825 if (error)
46826 goto out_dput;
46827 +
46828 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46829 + error = -EACCES;
46830 + goto out_drop_write;
46831 + }
46832 +
46833 error = security_path_mkdir(&path, dentry, mode);
46834 if (error)
46835 goto out_drop_write;
46836 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46837 out_drop_write:
46838 mnt_drop_write(path.mnt);
46839 +
46840 + if (!error)
46841 + gr_handle_create(dentry, path.mnt);
46842 out_dput:
46843 dput(dentry);
46844 mutex_unlock(&path.dentry->d_inode->i_mutex);
46845 @@ -2746,6 +2898,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46846 char * name;
46847 struct dentry *dentry;
46848 struct nameidata nd;
46849 + ino_t saved_ino = 0;
46850 + dev_t saved_dev = 0;
46851
46852 error = user_path_parent(dfd, pathname, &nd, &name);
46853 if (error)
46854 @@ -2774,6 +2928,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46855 error = -ENOENT;
46856 goto exit3;
46857 }
46858 +
46859 + saved_ino = dentry->d_inode->i_ino;
46860 + saved_dev = gr_get_dev_from_dentry(dentry);
46861 +
46862 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46863 + error = -EACCES;
46864 + goto exit3;
46865 + }
46866 +
46867 error = mnt_want_write(nd.path.mnt);
46868 if (error)
46869 goto exit3;
46870 @@ -2781,6 +2944,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46871 if (error)
46872 goto exit4;
46873 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46874 + if (!error && (saved_dev || saved_ino))
46875 + gr_handle_delete(saved_ino, saved_dev);
46876 exit4:
46877 mnt_drop_write(nd.path.mnt);
46878 exit3:
46879 @@ -2843,6 +3008,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46880 struct dentry *dentry;
46881 struct nameidata nd;
46882 struct inode *inode = NULL;
46883 + ino_t saved_ino = 0;
46884 + dev_t saved_dev = 0;
46885
46886 error = user_path_parent(dfd, pathname, &nd, &name);
46887 if (error)
46888 @@ -2865,6 +3032,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46889 if (!inode)
46890 goto slashes;
46891 ihold(inode);
46892 +
46893 + if (inode->i_nlink <= 1) {
46894 + saved_ino = inode->i_ino;
46895 + saved_dev = gr_get_dev_from_dentry(dentry);
46896 + }
46897 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46898 + error = -EACCES;
46899 + goto exit2;
46900 + }
46901 +
46902 error = mnt_want_write(nd.path.mnt);
46903 if (error)
46904 goto exit2;
46905 @@ -2872,6 +3049,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46906 if (error)
46907 goto exit3;
46908 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46909 + if (!error && (saved_ino || saved_dev))
46910 + gr_handle_delete(saved_ino, saved_dev);
46911 exit3:
46912 mnt_drop_write(nd.path.mnt);
46913 exit2:
46914 @@ -2947,10 +3126,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46915 error = mnt_want_write(path.mnt);
46916 if (error)
46917 goto out_dput;
46918 +
46919 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46920 + error = -EACCES;
46921 + goto out_drop_write;
46922 + }
46923 +
46924 error = security_path_symlink(&path, dentry, from);
46925 if (error)
46926 goto out_drop_write;
46927 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46928 + if (!error)
46929 + gr_handle_create(dentry, path.mnt);
46930 out_drop_write:
46931 mnt_drop_write(path.mnt);
46932 out_dput:
46933 @@ -3025,6 +3212,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46934 {
46935 struct dentry *new_dentry;
46936 struct path old_path, new_path;
46937 + char *to = NULL;
46938 int how = 0;
46939 int error;
46940
46941 @@ -3048,7 +3236,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46942 if (error)
46943 return error;
46944
46945 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46946 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46947 error = PTR_ERR(new_dentry);
46948 if (IS_ERR(new_dentry))
46949 goto out;
46950 @@ -3059,13 +3247,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46951 error = mnt_want_write(new_path.mnt);
46952 if (error)
46953 goto out_dput;
46954 +
46955 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46956 + old_path.dentry->d_inode,
46957 + old_path.dentry->d_inode->i_mode, to)) {
46958 + error = -EACCES;
46959 + goto out_drop_write;
46960 + }
46961 +
46962 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46963 + old_path.dentry, old_path.mnt, to)) {
46964 + error = -EACCES;
46965 + goto out_drop_write;
46966 + }
46967 +
46968 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46969 if (error)
46970 goto out_drop_write;
46971 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46972 + if (!error)
46973 + gr_handle_create(new_dentry, new_path.mnt);
46974 out_drop_write:
46975 mnt_drop_write(new_path.mnt);
46976 out_dput:
46977 + putname(to);
46978 dput(new_dentry);
46979 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46980 path_put(&new_path);
46981 @@ -3299,6 +3504,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46982 if (new_dentry == trap)
46983 goto exit5;
46984
46985 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46986 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46987 + to);
46988 + if (error)
46989 + goto exit5;
46990 +
46991 error = mnt_want_write(oldnd.path.mnt);
46992 if (error)
46993 goto exit5;
46994 @@ -3308,6 +3519,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46995 goto exit6;
46996 error = vfs_rename(old_dir->d_inode, old_dentry,
46997 new_dir->d_inode, new_dentry);
46998 + if (!error)
46999 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47000 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47001 exit6:
47002 mnt_drop_write(oldnd.path.mnt);
47003 exit5:
47004 @@ -3333,6 +3547,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47005
47006 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47007 {
47008 + char tmpbuf[64];
47009 + const char *newlink;
47010 int len;
47011
47012 len = PTR_ERR(link);
47013 @@ -3342,7 +3558,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47014 len = strlen(link);
47015 if (len > (unsigned) buflen)
47016 len = buflen;
47017 - if (copy_to_user(buffer, link, len))
47018 +
47019 + if (len < sizeof(tmpbuf)) {
47020 + memcpy(tmpbuf, link, len);
47021 + newlink = tmpbuf;
47022 + } else
47023 + newlink = link;
47024 +
47025 + if (copy_to_user(buffer, newlink, len))
47026 len = -EFAULT;
47027 out:
47028 return len;
47029 diff --git a/fs/namespace.c b/fs/namespace.c
47030 index 4e46539..b28253c 100644
47031 --- a/fs/namespace.c
47032 +++ b/fs/namespace.c
47033 @@ -1156,6 +1156,9 @@ static int do_umount(struct mount *mnt, int flags)
47034 if (!(sb->s_flags & MS_RDONLY))
47035 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47036 up_write(&sb->s_umount);
47037 +
47038 + gr_log_remount(mnt->mnt_devname, retval);
47039 +
47040 return retval;
47041 }
47042
47043 @@ -1175,6 +1178,9 @@ static int do_umount(struct mount *mnt, int flags)
47044 br_write_unlock(vfsmount_lock);
47045 up_write(&namespace_sem);
47046 release_mounts(&umount_list);
47047 +
47048 + gr_log_unmount(mnt->mnt_devname, retval);
47049 +
47050 return retval;
47051 }
47052
47053 @@ -2176,6 +2182,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47054 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47055 MS_STRICTATIME);
47056
47057 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47058 + retval = -EPERM;
47059 + goto dput_out;
47060 + }
47061 +
47062 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47063 + retval = -EPERM;
47064 + goto dput_out;
47065 + }
47066 +
47067 if (flags & MS_REMOUNT)
47068 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47069 data_page);
47070 @@ -2190,6 +2206,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47071 dev_name, data_page);
47072 dput_out:
47073 path_put(&path);
47074 +
47075 + gr_log_mount(dev_name, dir_name, retval);
47076 +
47077 return retval;
47078 }
47079
47080 @@ -2471,6 +2490,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47081 if (error)
47082 goto out2;
47083
47084 + if (gr_handle_chroot_pivot()) {
47085 + error = -EPERM;
47086 + goto out2;
47087 + }
47088 +
47089 get_fs_root(current->fs, &root);
47090 error = lock_mount(&old);
47091 if (error)
47092 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47093 index e8bbfa5..864f936 100644
47094 --- a/fs/nfs/inode.c
47095 +++ b/fs/nfs/inode.c
47096 @@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47097 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47098 nfsi->attrtimeo_timestamp = jiffies;
47099
47100 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47101 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47102 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47103 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47104 else
47105 @@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47106 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47107 }
47108
47109 -static atomic_long_t nfs_attr_generation_counter;
47110 +static atomic_long_unchecked_t nfs_attr_generation_counter;
47111
47112 static unsigned long nfs_read_attr_generation_counter(void)
47113 {
47114 - return atomic_long_read(&nfs_attr_generation_counter);
47115 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47116 }
47117
47118 unsigned long nfs_inc_attr_generation_counter(void)
47119 {
47120 - return atomic_long_inc_return(&nfs_attr_generation_counter);
47121 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47122 }
47123
47124 void nfs_fattr_init(struct nfs_fattr *fattr)
47125 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47126 index 5686661..80a9a3a 100644
47127 --- a/fs/nfsd/vfs.c
47128 +++ b/fs/nfsd/vfs.c
47129 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47130 } else {
47131 oldfs = get_fs();
47132 set_fs(KERNEL_DS);
47133 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47134 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47135 set_fs(oldfs);
47136 }
47137
47138 @@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47139
47140 /* Write the data. */
47141 oldfs = get_fs(); set_fs(KERNEL_DS);
47142 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47143 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47144 set_fs(oldfs);
47145 if (host_err < 0)
47146 goto out_nfserr;
47147 @@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47148 */
47149
47150 oldfs = get_fs(); set_fs(KERNEL_DS);
47151 - host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
47152 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
47153 set_fs(oldfs);
47154
47155 if (host_err < 0)
47156 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47157 index 3568c8a..e0240d8 100644
47158 --- a/fs/notify/fanotify/fanotify_user.c
47159 +++ b/fs/notify/fanotify/fanotify_user.c
47160 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47161 goto out_close_fd;
47162
47163 ret = -EFAULT;
47164 - if (copy_to_user(buf, &fanotify_event_metadata,
47165 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47166 + copy_to_user(buf, &fanotify_event_metadata,
47167 fanotify_event_metadata.event_len))
47168 goto out_kill_access_response;
47169
47170 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47171 index c887b13..0fdf472 100644
47172 --- a/fs/notify/notification.c
47173 +++ b/fs/notify/notification.c
47174 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47175 * get set to 0 so it will never get 'freed'
47176 */
47177 static struct fsnotify_event *q_overflow_event;
47178 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47179 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47180
47181 /**
47182 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47183 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47184 */
47185 u32 fsnotify_get_cookie(void)
47186 {
47187 - return atomic_inc_return(&fsnotify_sync_cookie);
47188 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47189 }
47190 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47191
47192 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47193 index 99e3610..02c1068 100644
47194 --- a/fs/ntfs/dir.c
47195 +++ b/fs/ntfs/dir.c
47196 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
47197 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47198 ~(s64)(ndir->itype.index.block_size - 1)));
47199 /* Bounds checks. */
47200 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47201 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47202 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47203 "inode 0x%lx or driver bug.", vdir->i_ino);
47204 goto err_out;
47205 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47206 index 8639169..76697aa 100644
47207 --- a/fs/ntfs/file.c
47208 +++ b/fs/ntfs/file.c
47209 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47210 #endif /* NTFS_RW */
47211 };
47212
47213 -const struct file_operations ntfs_empty_file_ops = {};
47214 +const struct file_operations ntfs_empty_file_ops __read_only;
47215
47216 -const struct inode_operations ntfs_empty_inode_ops = {};
47217 +const struct inode_operations ntfs_empty_inode_ops __read_only;
47218 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47219 index 210c352..a174f83 100644
47220 --- a/fs/ocfs2/localalloc.c
47221 +++ b/fs/ocfs2/localalloc.c
47222 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47223 goto bail;
47224 }
47225
47226 - atomic_inc(&osb->alloc_stats.moves);
47227 + atomic_inc_unchecked(&osb->alloc_stats.moves);
47228
47229 bail:
47230 if (handle)
47231 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47232 index d355e6e..578d905 100644
47233 --- a/fs/ocfs2/ocfs2.h
47234 +++ b/fs/ocfs2/ocfs2.h
47235 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
47236
47237 struct ocfs2_alloc_stats
47238 {
47239 - atomic_t moves;
47240 - atomic_t local_data;
47241 - atomic_t bitmap_data;
47242 - atomic_t bg_allocs;
47243 - atomic_t bg_extends;
47244 + atomic_unchecked_t moves;
47245 + atomic_unchecked_t local_data;
47246 + atomic_unchecked_t bitmap_data;
47247 + atomic_unchecked_t bg_allocs;
47248 + atomic_unchecked_t bg_extends;
47249 };
47250
47251 enum ocfs2_local_alloc_state
47252 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47253 index f169da4..9112253 100644
47254 --- a/fs/ocfs2/suballoc.c
47255 +++ b/fs/ocfs2/suballoc.c
47256 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47257 mlog_errno(status);
47258 goto bail;
47259 }
47260 - atomic_inc(&osb->alloc_stats.bg_extends);
47261 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47262
47263 /* You should never ask for this much metadata */
47264 BUG_ON(bits_wanted >
47265 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47266 mlog_errno(status);
47267 goto bail;
47268 }
47269 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47270 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47271
47272 *suballoc_loc = res.sr_bg_blkno;
47273 *suballoc_bit_start = res.sr_bit_offset;
47274 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47275 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47276 res->sr_bits);
47277
47278 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47279 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47280
47281 BUG_ON(res->sr_bits != 1);
47282
47283 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47284 mlog_errno(status);
47285 goto bail;
47286 }
47287 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47288 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47289
47290 BUG_ON(res.sr_bits != 1);
47291
47292 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47293 cluster_start,
47294 num_clusters);
47295 if (!status)
47296 - atomic_inc(&osb->alloc_stats.local_data);
47297 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
47298 } else {
47299 if (min_clusters > (osb->bitmap_cpg - 1)) {
47300 /* The only paths asking for contiguousness
47301 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47302 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47303 res.sr_bg_blkno,
47304 res.sr_bit_offset);
47305 - atomic_inc(&osb->alloc_stats.bitmap_data);
47306 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47307 *num_clusters = res.sr_bits;
47308 }
47309 }
47310 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47311 index 68f4541..89cfe6a 100644
47312 --- a/fs/ocfs2/super.c
47313 +++ b/fs/ocfs2/super.c
47314 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47315 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47316 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47317 "Stats",
47318 - atomic_read(&osb->alloc_stats.bitmap_data),
47319 - atomic_read(&osb->alloc_stats.local_data),
47320 - atomic_read(&osb->alloc_stats.bg_allocs),
47321 - atomic_read(&osb->alloc_stats.moves),
47322 - atomic_read(&osb->alloc_stats.bg_extends));
47323 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47324 + atomic_read_unchecked(&osb->alloc_stats.local_data),
47325 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47326 + atomic_read_unchecked(&osb->alloc_stats.moves),
47327 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47328
47329 out += snprintf(buf + out, len - out,
47330 "%10s => State: %u Descriptor: %llu Size: %u bits "
47331 @@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47332 spin_lock_init(&osb->osb_xattr_lock);
47333 ocfs2_init_steal_slots(osb);
47334
47335 - atomic_set(&osb->alloc_stats.moves, 0);
47336 - atomic_set(&osb->alloc_stats.local_data, 0);
47337 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
47338 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
47339 - atomic_set(&osb->alloc_stats.bg_extends, 0);
47340 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47341 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47342 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47343 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47344 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47345
47346 /* Copy the blockcheck stats from the superblock probe */
47347 osb->osb_ecc_stats = *stats;
47348 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47349 index 5d22872..523db20 100644
47350 --- a/fs/ocfs2/symlink.c
47351 +++ b/fs/ocfs2/symlink.c
47352 @@ -142,7 +142,7 @@ bail:
47353
47354 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47355 {
47356 - char *link = nd_get_link(nd);
47357 + const char *link = nd_get_link(nd);
47358 if (!IS_ERR(link))
47359 kfree(link);
47360 }
47361 diff --git a/fs/open.c b/fs/open.c
47362 index 3f1108b..822d7f7 100644
47363 --- a/fs/open.c
47364 +++ b/fs/open.c
47365 @@ -31,6 +31,8 @@
47366 #include <linux/ima.h>
47367 #include <linux/dnotify.h>
47368
47369 +#define CREATE_TRACE_POINTS
47370 +#include <trace/events/fs.h>
47371 #include "internal.h"
47372
47373 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
47374 @@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47375 error = locks_verify_truncate(inode, NULL, length);
47376 if (!error)
47377 error = security_path_truncate(&path);
47378 +
47379 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47380 + error = -EACCES;
47381 +
47382 if (!error)
47383 error = do_truncate(path.dentry, length, 0, NULL);
47384
47385 @@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47386 if (__mnt_is_readonly(path.mnt))
47387 res = -EROFS;
47388
47389 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47390 + res = -EACCES;
47391 +
47392 out_path_release:
47393 path_put(&path);
47394 out:
47395 @@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47396 if (error)
47397 goto dput_and_out;
47398
47399 + gr_log_chdir(path.dentry, path.mnt);
47400 +
47401 set_fs_pwd(current->fs, &path);
47402
47403 dput_and_out:
47404 @@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47405 goto out_putf;
47406
47407 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47408 +
47409 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47410 + error = -EPERM;
47411 +
47412 + if (!error)
47413 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47414 +
47415 if (!error)
47416 set_fs_pwd(current->fs, &file->f_path);
47417 out_putf:
47418 @@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47419 if (error)
47420 goto dput_and_out;
47421
47422 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47423 + goto dput_and_out;
47424 +
47425 set_fs_root(current->fs, &path);
47426 +
47427 + gr_handle_chroot_chdir(&path);
47428 +
47429 error = 0;
47430 dput_and_out:
47431 path_put(&path);
47432 @@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
47433 if (error)
47434 return error;
47435 mutex_lock(&inode->i_mutex);
47436 +
47437 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47438 + error = -EACCES;
47439 + goto out_unlock;
47440 + }
47441 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47442 + error = -EACCES;
47443 + goto out_unlock;
47444 + }
47445 +
47446 error = security_path_chmod(path, mode);
47447 if (error)
47448 goto out_unlock;
47449 @@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47450 int error;
47451 struct iattr newattrs;
47452
47453 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47454 + return -EACCES;
47455 +
47456 newattrs.ia_valid = ATTR_CTIME;
47457 if (user != (uid_t) -1) {
47458 newattrs.ia_valid |= ATTR_UID;
47459 @@ -987,6 +1024,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
47460 } else {
47461 fsnotify_open(f);
47462 fd_install(fd, f);
47463 + trace_do_sys_open(tmp, flags, mode);
47464 }
47465 }
47466 putname(tmp);
47467 diff --git a/fs/pipe.c b/fs/pipe.c
47468 index fec5e4a..f4210f9 100644
47469 --- a/fs/pipe.c
47470 +++ b/fs/pipe.c
47471 @@ -438,9 +438,9 @@ redo:
47472 }
47473 if (bufs) /* More to do? */
47474 continue;
47475 - if (!pipe->writers)
47476 + if (!atomic_read(&pipe->writers))
47477 break;
47478 - if (!pipe->waiting_writers) {
47479 + if (!atomic_read(&pipe->waiting_writers)) {
47480 /* syscall merging: Usually we must not sleep
47481 * if O_NONBLOCK is set, or if we got some data.
47482 * But if a writer sleeps in kernel space, then
47483 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47484 mutex_lock(&inode->i_mutex);
47485 pipe = inode->i_pipe;
47486
47487 - if (!pipe->readers) {
47488 + if (!atomic_read(&pipe->readers)) {
47489 send_sig(SIGPIPE, current, 0);
47490 ret = -EPIPE;
47491 goto out;
47492 @@ -553,7 +553,7 @@ redo1:
47493 for (;;) {
47494 int bufs;
47495
47496 - if (!pipe->readers) {
47497 + if (!atomic_read(&pipe->readers)) {
47498 send_sig(SIGPIPE, current, 0);
47499 if (!ret)
47500 ret = -EPIPE;
47501 @@ -644,9 +644,9 @@ redo2:
47502 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47503 do_wakeup = 0;
47504 }
47505 - pipe->waiting_writers++;
47506 + atomic_inc(&pipe->waiting_writers);
47507 pipe_wait(pipe);
47508 - pipe->waiting_writers--;
47509 + atomic_dec(&pipe->waiting_writers);
47510 }
47511 out:
47512 mutex_unlock(&inode->i_mutex);
47513 @@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47514 mask = 0;
47515 if (filp->f_mode & FMODE_READ) {
47516 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47517 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47518 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47519 mask |= POLLHUP;
47520 }
47521
47522 @@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47523 * Most Unices do not set POLLERR for FIFOs but on Linux they
47524 * behave exactly like pipes for poll().
47525 */
47526 - if (!pipe->readers)
47527 + if (!atomic_read(&pipe->readers))
47528 mask |= POLLERR;
47529 }
47530
47531 @@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47532
47533 mutex_lock(&inode->i_mutex);
47534 pipe = inode->i_pipe;
47535 - pipe->readers -= decr;
47536 - pipe->writers -= decw;
47537 + atomic_sub(decr, &pipe->readers);
47538 + atomic_sub(decw, &pipe->writers);
47539
47540 - if (!pipe->readers && !pipe->writers) {
47541 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47542 free_pipe_info(inode);
47543 } else {
47544 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47545 @@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47546
47547 if (inode->i_pipe) {
47548 ret = 0;
47549 - inode->i_pipe->readers++;
47550 + atomic_inc(&inode->i_pipe->readers);
47551 }
47552
47553 mutex_unlock(&inode->i_mutex);
47554 @@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47555
47556 if (inode->i_pipe) {
47557 ret = 0;
47558 - inode->i_pipe->writers++;
47559 + atomic_inc(&inode->i_pipe->writers);
47560 }
47561
47562 mutex_unlock(&inode->i_mutex);
47563 @@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47564 if (inode->i_pipe) {
47565 ret = 0;
47566 if (filp->f_mode & FMODE_READ)
47567 - inode->i_pipe->readers++;
47568 + atomic_inc(&inode->i_pipe->readers);
47569 if (filp->f_mode & FMODE_WRITE)
47570 - inode->i_pipe->writers++;
47571 + atomic_inc(&inode->i_pipe->writers);
47572 }
47573
47574 mutex_unlock(&inode->i_mutex);
47575 @@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
47576 inode->i_pipe = NULL;
47577 }
47578
47579 -static struct vfsmount *pipe_mnt __read_mostly;
47580 +struct vfsmount *pipe_mnt __read_mostly;
47581
47582 /*
47583 * pipefs_dname() is called from d_path().
47584 @@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
47585 goto fail_iput;
47586 inode->i_pipe = pipe;
47587
47588 - pipe->readers = pipe->writers = 1;
47589 + atomic_set(&pipe->readers, 1);
47590 + atomic_set(&pipe->writers, 1);
47591 inode->i_fop = &rdwr_pipefifo_fops;
47592
47593 /*
47594 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47595 index 15af622..0e9f4467 100644
47596 --- a/fs/proc/Kconfig
47597 +++ b/fs/proc/Kconfig
47598 @@ -30,12 +30,12 @@ config PROC_FS
47599
47600 config PROC_KCORE
47601 bool "/proc/kcore support" if !ARM
47602 - depends on PROC_FS && MMU
47603 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47604
47605 config PROC_VMCORE
47606 bool "/proc/vmcore support"
47607 - depends on PROC_FS && CRASH_DUMP
47608 - default y
47609 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47610 + default n
47611 help
47612 Exports the dump image of crashed kernel in ELF format.
47613
47614 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47615 limited in memory.
47616
47617 config PROC_PAGE_MONITOR
47618 - default y
47619 - depends on PROC_FS && MMU
47620 + default n
47621 + depends on PROC_FS && MMU && !GRKERNSEC
47622 bool "Enable /proc page monitoring" if EXPERT
47623 help
47624 Various /proc files exist to monitor process memory utilization:
47625 diff --git a/fs/proc/array.c b/fs/proc/array.c
47626 index f9bd395..acb7847 100644
47627 --- a/fs/proc/array.c
47628 +++ b/fs/proc/array.c
47629 @@ -60,6 +60,7 @@
47630 #include <linux/tty.h>
47631 #include <linux/string.h>
47632 #include <linux/mman.h>
47633 +#include <linux/grsecurity.h>
47634 #include <linux/proc_fs.h>
47635 #include <linux/ioport.h>
47636 #include <linux/uaccess.h>
47637 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47638 seq_putc(m, '\n');
47639 }
47640
47641 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47642 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47643 +{
47644 + if (p->mm)
47645 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47646 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47647 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47648 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47649 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47650 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47651 + else
47652 + seq_printf(m, "PaX:\t-----\n");
47653 +}
47654 +#endif
47655 +
47656 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47657 struct pid *pid, struct task_struct *task)
47658 {
47659 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47660 task_cpus_allowed(m, task);
47661 cpuset_task_status_allowed(m, task);
47662 task_context_switch_counts(m, task);
47663 +
47664 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47665 + task_pax(m, task);
47666 +#endif
47667 +
47668 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47669 + task_grsec_rbac(m, task);
47670 +#endif
47671 +
47672 return 0;
47673 }
47674
47675 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47676 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47677 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47678 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47679 +#endif
47680 +
47681 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47682 struct pid *pid, struct task_struct *task, int whole)
47683 {
47684 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47685 char tcomm[sizeof(task->comm)];
47686 unsigned long flags;
47687
47688 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47689 + if (current->exec_id != m->exec_id) {
47690 + gr_log_badprocpid("stat");
47691 + return 0;
47692 + }
47693 +#endif
47694 +
47695 state = *get_task_state(task);
47696 vsize = eip = esp = 0;
47697 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47698 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47699 gtime = task->gtime;
47700 }
47701
47702 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47703 + if (PAX_RAND_FLAGS(mm)) {
47704 + eip = 0;
47705 + esp = 0;
47706 + wchan = 0;
47707 + }
47708 +#endif
47709 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47710 + wchan = 0;
47711 + eip =0;
47712 + esp =0;
47713 +#endif
47714 +
47715 /* scale priority and nice values from timeslices to -20..20 */
47716 /* to make it look like a "normal" Unix priority/nice value */
47717 priority = task_prio(task);
47718 @@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47719 seq_put_decimal_ull(m, ' ', vsize);
47720 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47721 seq_put_decimal_ull(m, ' ', rsslim);
47722 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47723 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47724 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47725 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47726 +#else
47727 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47728 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47729 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47730 +#endif
47731 seq_put_decimal_ull(m, ' ', esp);
47732 seq_put_decimal_ull(m, ' ', eip);
47733 /* The signal information here is obsolete.
47734 @@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47735 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47736 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47737 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
47738 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47739 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47740 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47741 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
47742 +#else
47743 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47744 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47745 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47746 +#endif
47747 seq_putc(m, '\n');
47748 if (mm)
47749 mmput(mm);
47750 @@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47751 struct pid *pid, struct task_struct *task)
47752 {
47753 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47754 - struct mm_struct *mm = get_task_mm(task);
47755 + struct mm_struct *mm;
47756
47757 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47758 + if (current->exec_id != m->exec_id) {
47759 + gr_log_badprocpid("statm");
47760 + return 0;
47761 + }
47762 +#endif
47763 + mm = get_task_mm(task);
47764 if (mm) {
47765 size = task_statm(mm, &shared, &text, &data, &resident);
47766 mmput(mm);
47767 @@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47768
47769 return 0;
47770 }
47771 +
47772 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47773 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47774 +{
47775 + u32 curr_ip = 0;
47776 + unsigned long flags;
47777 +
47778 + if (lock_task_sighand(task, &flags)) {
47779 + curr_ip = task->signal->curr_ip;
47780 + unlock_task_sighand(task, &flags);
47781 + }
47782 +
47783 + return sprintf(buffer, "%pI4\n", &curr_ip);
47784 +}
47785 +#endif
47786 diff --git a/fs/proc/base.c b/fs/proc/base.c
47787 index 9fc77b4..4877d08 100644
47788 --- a/fs/proc/base.c
47789 +++ b/fs/proc/base.c
47790 @@ -109,6 +109,14 @@ struct pid_entry {
47791 union proc_op op;
47792 };
47793
47794 +struct getdents_callback {
47795 + struct linux_dirent __user * current_dir;
47796 + struct linux_dirent __user * previous;
47797 + struct file * file;
47798 + int count;
47799 + int error;
47800 +};
47801 +
47802 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47803 .name = (NAME), \
47804 .len = sizeof(NAME) - 1, \
47805 @@ -198,11 +206,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
47806 return result;
47807 }
47808
47809 -struct mm_struct *mm_for_maps(struct task_struct *task)
47810 -{
47811 - return mm_access(task, PTRACE_MODE_READ);
47812 -}
47813 -
47814 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47815 {
47816 int res = 0;
47817 @@ -213,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47818 if (!mm->arg_end)
47819 goto out_mm; /* Shh! No looking before we're done */
47820
47821 + if (gr_acl_handle_procpidmem(task))
47822 + goto out_mm;
47823 +
47824 len = mm->arg_end - mm->arg_start;
47825
47826 if (len > PAGE_SIZE)
47827 @@ -240,12 +246,28 @@ out:
47828 return res;
47829 }
47830
47831 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47832 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47833 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47834 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47835 +#endif
47836 +
47837 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47838 {
47839 - struct mm_struct *mm = mm_for_maps(task);
47840 + struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
47841 int res = PTR_ERR(mm);
47842 if (mm && !IS_ERR(mm)) {
47843 unsigned int nwords = 0;
47844 +
47845 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47846 + /* allow if we're currently ptracing this task */
47847 + if (PAX_RAND_FLAGS(mm) &&
47848 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47849 + mmput(mm);
47850 + return 0;
47851 + }
47852 +#endif
47853 +
47854 do {
47855 nwords += 2;
47856 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47857 @@ -259,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47858 }
47859
47860
47861 -#ifdef CONFIG_KALLSYMS
47862 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47863 /*
47864 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47865 * Returns the resolved symbol. If that fails, simply return the address.
47866 @@ -298,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
47867 mutex_unlock(&task->signal->cred_guard_mutex);
47868 }
47869
47870 -#ifdef CONFIG_STACKTRACE
47871 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47872
47873 #define MAX_STACK_TRACE_DEPTH 64
47874
47875 @@ -489,7 +511,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47876 return count;
47877 }
47878
47879 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47880 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47881 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47882 {
47883 long nr;
47884 @@ -518,7 +540,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47885 /************************************************************************/
47886
47887 /* permission checks */
47888 -static int proc_fd_access_allowed(struct inode *inode)
47889 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47890 {
47891 struct task_struct *task;
47892 int allowed = 0;
47893 @@ -528,7 +550,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47894 */
47895 task = get_proc_task(inode);
47896 if (task) {
47897 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47898 + if (log)
47899 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47900 + else
47901 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47902 put_task_struct(task);
47903 }
47904 return allowed;
47905 @@ -566,10 +591,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47906 struct task_struct *task,
47907 int hide_pid_min)
47908 {
47909 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47910 + return false;
47911 +
47912 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47913 + rcu_read_lock();
47914 + {
47915 + const struct cred *tmpcred = current_cred();
47916 + const struct cred *cred = __task_cred(task);
47917 +
47918 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47919 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47920 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47921 +#endif
47922 + ) {
47923 + rcu_read_unlock();
47924 + return true;
47925 + }
47926 + }
47927 + rcu_read_unlock();
47928 +
47929 + if (!pid->hide_pid)
47930 + return false;
47931 +#endif
47932 +
47933 if (pid->hide_pid < hide_pid_min)
47934 return true;
47935 if (in_group_p(pid->pid_gid))
47936 return true;
47937 +
47938 return ptrace_may_access(task, PTRACE_MODE_READ);
47939 }
47940
47941 @@ -587,7 +637,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47942 put_task_struct(task);
47943
47944 if (!has_perms) {
47945 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47946 + {
47947 +#else
47948 if (pid->hide_pid == 2) {
47949 +#endif
47950 /*
47951 * Let's make getdents(), stat(), and open()
47952 * consistent with each other. If a process
47953 @@ -677,7 +731,7 @@ static const struct file_operations proc_single_file_operations = {
47954 .release = single_release,
47955 };
47956
47957 -static int mem_open(struct inode* inode, struct file* file)
47958 +static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
47959 {
47960 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
47961 struct mm_struct *mm;
47962 @@ -685,7 +739,12 @@ static int mem_open(struct inode* inode, struct file* file)
47963 if (!task)
47964 return -ESRCH;
47965
47966 - mm = mm_access(task, PTRACE_MODE_ATTACH);
47967 + if (gr_acl_handle_procpidmem(task)) {
47968 + put_task_struct(task);
47969 + return -EPERM;
47970 + }
47971 +
47972 + mm = mm_access(task, mode);
47973 put_task_struct(task);
47974
47975 if (IS_ERR(mm))
47976 @@ -698,11 +757,24 @@ static int mem_open(struct inode* inode, struct file* file)
47977 mmput(mm);
47978 }
47979
47980 + file->private_data = mm;
47981 +
47982 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47983 + file->f_version = current->exec_id;
47984 +#endif
47985 +
47986 + return 0;
47987 +}
47988 +
47989 +static int mem_open(struct inode *inode, struct file *file)
47990 +{
47991 + int ret;
47992 + ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
47993 +
47994 /* OK to pass negative loff_t, we can catch out-of-range */
47995 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47996 - file->private_data = mm;
47997
47998 - return 0;
47999 + return ret;
48000 }
48001
48002 static ssize_t mem_rw(struct file *file, char __user *buf,
48003 @@ -713,6 +785,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
48004 ssize_t copied;
48005 char *page;
48006
48007 +#ifdef CONFIG_GRKERNSEC
48008 + if (write)
48009 + return -EPERM;
48010 +#endif
48011 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48012 + if (file->f_version != current->exec_id) {
48013 + gr_log_badprocpid("mem");
48014 + return 0;
48015 + }
48016 +#endif
48017 +
48018 if (!mm)
48019 return 0;
48020
48021 @@ -801,42 +884,49 @@ static const struct file_operations proc_mem_operations = {
48022 .release = mem_release,
48023 };
48024
48025 +static int environ_open(struct inode *inode, struct file *file)
48026 +{
48027 + return __mem_open(inode, file, PTRACE_MODE_READ);
48028 +}
48029 +
48030 static ssize_t environ_read(struct file *file, char __user *buf,
48031 size_t count, loff_t *ppos)
48032 {
48033 - struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
48034 char *page;
48035 unsigned long src = *ppos;
48036 - int ret = -ESRCH;
48037 - struct mm_struct *mm;
48038 + int ret = 0;
48039 + struct mm_struct *mm = file->private_data;
48040
48041 - if (!task)
48042 - goto out_no_task;
48043 + if (!mm)
48044 + return 0;
48045 +
48046 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48047 + if (file->f_version != current->exec_id) {
48048 + gr_log_badprocpid("environ");
48049 + return 0;
48050 + }
48051 +#endif
48052
48053 - ret = -ENOMEM;
48054 page = (char *)__get_free_page(GFP_TEMPORARY);
48055 if (!page)
48056 - goto out;
48057 -
48058 -
48059 - mm = mm_for_maps(task);
48060 - ret = PTR_ERR(mm);
48061 - if (!mm || IS_ERR(mm))
48062 - goto out_free;
48063 + return -ENOMEM;
48064
48065 ret = 0;
48066 + if (!atomic_inc_not_zero(&mm->mm_users))
48067 + goto free;
48068 while (count > 0) {
48069 - int this_len, retval, max_len;
48070 + size_t this_len, max_len;
48071 + int retval;
48072 +
48073 + if (src >= (mm->env_end - mm->env_start))
48074 + break;
48075
48076 this_len = mm->env_end - (mm->env_start + src);
48077
48078 - if (this_len <= 0)
48079 - break;
48080 + max_len = min_t(size_t, PAGE_SIZE, count);
48081 + this_len = min(max_len, this_len);
48082
48083 - max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
48084 - this_len = (this_len > max_len) ? max_len : this_len;
48085 -
48086 - retval = access_process_vm(task, (mm->env_start + src),
48087 + retval = access_remote_vm(mm, (mm->env_start + src),
48088 page, this_len, 0);
48089
48090 if (retval <= 0) {
48091 @@ -855,19 +945,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48092 count -= retval;
48093 }
48094 *ppos = src;
48095 -
48096 mmput(mm);
48097 -out_free:
48098 +
48099 +free:
48100 free_page((unsigned long) page);
48101 -out:
48102 - put_task_struct(task);
48103 -out_no_task:
48104 return ret;
48105 }
48106
48107 static const struct file_operations proc_environ_operations = {
48108 + .open = environ_open,
48109 .read = environ_read,
48110 .llseek = generic_file_llseek,
48111 + .release = mem_release,
48112 };
48113
48114 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
48115 @@ -1433,7 +1522,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48116 path_put(&nd->path);
48117
48118 /* Are we allowed to snoop on the tasks file descriptors? */
48119 - if (!proc_fd_access_allowed(inode))
48120 + if (!proc_fd_access_allowed(inode, 0))
48121 goto out;
48122
48123 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48124 @@ -1472,8 +1561,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48125 struct path path;
48126
48127 /* Are we allowed to snoop on the tasks file descriptors? */
48128 - if (!proc_fd_access_allowed(inode))
48129 - goto out;
48130 + /* logging this is needed for learning on chromium to work properly,
48131 + but we don't want to flood the logs from 'ps' which does a readlink
48132 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48133 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
48134 + */
48135 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48136 + if (!proc_fd_access_allowed(inode,0))
48137 + goto out;
48138 + } else {
48139 + if (!proc_fd_access_allowed(inode,1))
48140 + goto out;
48141 + }
48142
48143 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
48144 if (error)
48145 @@ -1538,7 +1637,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48146 rcu_read_lock();
48147 cred = __task_cred(task);
48148 inode->i_uid = cred->euid;
48149 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48150 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48151 +#else
48152 inode->i_gid = cred->egid;
48153 +#endif
48154 rcu_read_unlock();
48155 }
48156 security_task_to_inode(task, inode);
48157 @@ -1574,10 +1677,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48158 return -ENOENT;
48159 }
48160 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48161 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48162 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48163 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48164 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48165 +#endif
48166 task_dumpable(task)) {
48167 cred = __task_cred(task);
48168 stat->uid = cred->euid;
48169 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48170 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48171 +#else
48172 stat->gid = cred->egid;
48173 +#endif
48174 }
48175 }
48176 rcu_read_unlock();
48177 @@ -1615,11 +1727,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48178
48179 if (task) {
48180 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48181 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48182 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48183 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48184 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48185 +#endif
48186 task_dumpable(task)) {
48187 rcu_read_lock();
48188 cred = __task_cred(task);
48189 inode->i_uid = cred->euid;
48190 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48191 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48192 +#else
48193 inode->i_gid = cred->egid;
48194 +#endif
48195 rcu_read_unlock();
48196 } else {
48197 inode->i_uid = 0;
48198 @@ -1737,7 +1858,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48199 int fd = proc_fd(inode);
48200
48201 if (task) {
48202 - files = get_files_struct(task);
48203 + if (!gr_acl_handle_procpidmem(task))
48204 + files = get_files_struct(task);
48205 put_task_struct(task);
48206 }
48207 if (files) {
48208 @@ -2025,11 +2147,8 @@ static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
48209 if (!task)
48210 goto out_notask;
48211
48212 - if (!ptrace_may_access(task, PTRACE_MODE_READ))
48213 - goto out;
48214 -
48215 - mm = get_task_mm(task);
48216 - if (!mm)
48217 + mm = mm_access(task, PTRACE_MODE_READ);
48218 + if (IS_ERR_OR_NULL(mm))
48219 goto out;
48220
48221 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
48222 @@ -2338,11 +2457,21 @@ static const struct file_operations proc_map_files_operations = {
48223 */
48224 static int proc_fd_permission(struct inode *inode, int mask)
48225 {
48226 + struct task_struct *task;
48227 int rv = generic_permission(inode, mask);
48228 - if (rv == 0)
48229 - return 0;
48230 +
48231 if (task_pid(current) == proc_pid(inode))
48232 rv = 0;
48233 +
48234 + task = get_proc_task(inode);
48235 + if (task == NULL)
48236 + return rv;
48237 +
48238 + if (gr_acl_handle_procpidmem(task))
48239 + rv = -EACCES;
48240 +
48241 + put_task_struct(task);
48242 +
48243 return rv;
48244 }
48245
48246 @@ -2452,6 +2581,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48247 if (!task)
48248 goto out_no_task;
48249
48250 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48251 + goto out;
48252 +
48253 /*
48254 * Yes, it does not scale. And it should not. Don't add
48255 * new entries into /proc/<tgid>/ without very good reasons.
48256 @@ -2496,6 +2628,9 @@ static int proc_pident_readdir(struct file *filp,
48257 if (!task)
48258 goto out_no_task;
48259
48260 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48261 + goto out;
48262 +
48263 ret = 0;
48264 i = filp->f_pos;
48265 switch (i) {
48266 @@ -2766,7 +2901,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48267 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48268 void *cookie)
48269 {
48270 - char *s = nd_get_link(nd);
48271 + const char *s = nd_get_link(nd);
48272 if (!IS_ERR(s))
48273 __putname(s);
48274 }
48275 @@ -2967,7 +3102,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48276 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48277 #endif
48278 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48279 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48280 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48281 INF("syscall", S_IRUGO, proc_pid_syscall),
48282 #endif
48283 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48284 @@ -2992,10 +3127,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48285 #ifdef CONFIG_SECURITY
48286 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48287 #endif
48288 -#ifdef CONFIG_KALLSYMS
48289 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48290 INF("wchan", S_IRUGO, proc_pid_wchan),
48291 #endif
48292 -#ifdef CONFIG_STACKTRACE
48293 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48294 ONE("stack", S_IRUGO, proc_pid_stack),
48295 #endif
48296 #ifdef CONFIG_SCHEDSTATS
48297 @@ -3029,6 +3164,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48298 #ifdef CONFIG_HARDWALL
48299 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48300 #endif
48301 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48302 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48303 +#endif
48304 };
48305
48306 static int proc_tgid_base_readdir(struct file * filp,
48307 @@ -3155,7 +3293,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48308 if (!inode)
48309 goto out;
48310
48311 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48312 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48313 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48314 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48315 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48316 +#else
48317 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48318 +#endif
48319 inode->i_op = &proc_tgid_base_inode_operations;
48320 inode->i_fop = &proc_tgid_base_operations;
48321 inode->i_flags|=S_IMMUTABLE;
48322 @@ -3197,7 +3342,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48323 if (!task)
48324 goto out;
48325
48326 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48327 + goto out_put_task;
48328 +
48329 result = proc_pid_instantiate(dir, dentry, task, NULL);
48330 +out_put_task:
48331 put_task_struct(task);
48332 out:
48333 return result;
48334 @@ -3260,6 +3409,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
48335 static int fake_filldir(void *buf, const char *name, int namelen,
48336 loff_t offset, u64 ino, unsigned d_type)
48337 {
48338 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
48339 + __buf->error = -EINVAL;
48340 return 0;
48341 }
48342
48343 @@ -3326,7 +3477,7 @@ static const struct pid_entry tid_base_stuff[] = {
48344 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48345 #endif
48346 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48347 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48348 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48349 INF("syscall", S_IRUGO, proc_pid_syscall),
48350 #endif
48351 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48352 @@ -3350,10 +3501,10 @@ static const struct pid_entry tid_base_stuff[] = {
48353 #ifdef CONFIG_SECURITY
48354 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48355 #endif
48356 -#ifdef CONFIG_KALLSYMS
48357 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48358 INF("wchan", S_IRUGO, proc_pid_wchan),
48359 #endif
48360 -#ifdef CONFIG_STACKTRACE
48361 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48362 ONE("stack", S_IRUGO, proc_pid_stack),
48363 #endif
48364 #ifdef CONFIG_SCHEDSTATS
48365 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48366 index 82676e3..5f8518a 100644
48367 --- a/fs/proc/cmdline.c
48368 +++ b/fs/proc/cmdline.c
48369 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48370
48371 static int __init proc_cmdline_init(void)
48372 {
48373 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48374 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48375 +#else
48376 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48377 +#endif
48378 return 0;
48379 }
48380 module_init(proc_cmdline_init);
48381 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48382 index b143471..bb105e5 100644
48383 --- a/fs/proc/devices.c
48384 +++ b/fs/proc/devices.c
48385 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48386
48387 static int __init proc_devices_init(void)
48388 {
48389 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48390 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48391 +#else
48392 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48393 +#endif
48394 return 0;
48395 }
48396 module_init(proc_devices_init);
48397 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48398 index 205c922..2ee4c57 100644
48399 --- a/fs/proc/inode.c
48400 +++ b/fs/proc/inode.c
48401 @@ -21,11 +21,17 @@
48402 #include <linux/seq_file.h>
48403 #include <linux/slab.h>
48404 #include <linux/mount.h>
48405 +#include <linux/grsecurity.h>
48406
48407 #include <asm/uaccess.h>
48408
48409 #include "internal.h"
48410
48411 +#ifdef CONFIG_PROC_SYSCTL
48412 +extern const struct inode_operations proc_sys_inode_operations;
48413 +extern const struct inode_operations proc_sys_dir_operations;
48414 +#endif
48415 +
48416 static void proc_evict_inode(struct inode *inode)
48417 {
48418 struct proc_dir_entry *de;
48419 @@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
48420 ns_ops = PROC_I(inode)->ns_ops;
48421 if (ns_ops && ns_ops->put)
48422 ns_ops->put(PROC_I(inode)->ns);
48423 +
48424 +#ifdef CONFIG_PROC_SYSCTL
48425 + if (inode->i_op == &proc_sys_inode_operations ||
48426 + inode->i_op == &proc_sys_dir_operations)
48427 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48428 +#endif
48429 +
48430 }
48431
48432 static struct kmem_cache * proc_inode_cachep;
48433 @@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48434 if (de->mode) {
48435 inode->i_mode = de->mode;
48436 inode->i_uid = de->uid;
48437 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48438 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48439 +#else
48440 inode->i_gid = de->gid;
48441 +#endif
48442 }
48443 if (de->size)
48444 inode->i_size = de->size;
48445 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48446 index 5f79bb8..e9ab85d 100644
48447 --- a/fs/proc/internal.h
48448 +++ b/fs/proc/internal.h
48449 @@ -31,8 +31,6 @@ struct vmalloc_info {
48450 unsigned long largest_chunk;
48451 };
48452
48453 -extern struct mm_struct *mm_for_maps(struct task_struct *);
48454 -
48455 #ifdef CONFIG_MMU
48456 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
48457 extern void get_vmalloc_info(struct vmalloc_info *vmi);
48458 @@ -54,6 +52,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48459 struct pid *pid, struct task_struct *task);
48460 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48461 struct pid *pid, struct task_struct *task);
48462 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48463 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48464 +#endif
48465 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48466
48467 extern const struct file_operations proc_pid_maps_operations;
48468 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48469 index 86c67ee..cdca321 100644
48470 --- a/fs/proc/kcore.c
48471 +++ b/fs/proc/kcore.c
48472 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48473 * the addresses in the elf_phdr on our list.
48474 */
48475 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48476 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48477 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48478 + if (tsz > buflen)
48479 tsz = buflen;
48480 -
48481 +
48482 while (buflen) {
48483 struct kcore_list *m;
48484
48485 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48486 kfree(elf_buf);
48487 } else {
48488 if (kern_addr_valid(start)) {
48489 - unsigned long n;
48490 + char *elf_buf;
48491 + mm_segment_t oldfs;
48492
48493 - n = copy_to_user(buffer, (char *)start, tsz);
48494 - /*
48495 - * We cannot distinguish between fault on source
48496 - * and fault on destination. When this happens
48497 - * we clear too and hope it will trigger the
48498 - * EFAULT again.
48499 - */
48500 - if (n) {
48501 - if (clear_user(buffer + tsz - n,
48502 - n))
48503 + elf_buf = kmalloc(tsz, GFP_KERNEL);
48504 + if (!elf_buf)
48505 + return -ENOMEM;
48506 + oldfs = get_fs();
48507 + set_fs(KERNEL_DS);
48508 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48509 + set_fs(oldfs);
48510 + if (copy_to_user(buffer, elf_buf, tsz)) {
48511 + kfree(elf_buf);
48512 return -EFAULT;
48513 + }
48514 }
48515 + set_fs(oldfs);
48516 + kfree(elf_buf);
48517 } else {
48518 if (clear_user(buffer, tsz))
48519 return -EFAULT;
48520 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48521
48522 static int open_kcore(struct inode *inode, struct file *filp)
48523 {
48524 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48525 + return -EPERM;
48526 +#endif
48527 if (!capable(CAP_SYS_RAWIO))
48528 return -EPERM;
48529 if (kcore_need_update)
48530 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48531 index 80e4645..53e5fcf 100644
48532 --- a/fs/proc/meminfo.c
48533 +++ b/fs/proc/meminfo.c
48534 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48535 vmi.used >> 10,
48536 vmi.largest_chunk >> 10
48537 #ifdef CONFIG_MEMORY_FAILURE
48538 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48539 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48540 #endif
48541 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48542 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48543 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48544 index b1822dd..df622cb 100644
48545 --- a/fs/proc/nommu.c
48546 +++ b/fs/proc/nommu.c
48547 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48548 if (len < 1)
48549 len = 1;
48550 seq_printf(m, "%*c", len, ' ');
48551 - seq_path(m, &file->f_path, "");
48552 + seq_path(m, &file->f_path, "\n\\");
48553 }
48554
48555 seq_putc(m, '\n');
48556 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48557 index 06e1cc1..177cd98 100644
48558 --- a/fs/proc/proc_net.c
48559 +++ b/fs/proc/proc_net.c
48560 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48561 struct task_struct *task;
48562 struct nsproxy *ns;
48563 struct net *net = NULL;
48564 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48565 + const struct cred *cred = current_cred();
48566 +#endif
48567 +
48568 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48569 + if (cred->fsuid)
48570 + return net;
48571 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48572 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48573 + return net;
48574 +#endif
48575
48576 rcu_read_lock();
48577 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48578 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48579 index 21d836f..bebf3ee 100644
48580 --- a/fs/proc/proc_sysctl.c
48581 +++ b/fs/proc/proc_sysctl.c
48582 @@ -12,11 +12,15 @@
48583 #include <linux/module.h>
48584 #include "internal.h"
48585
48586 +extern int gr_handle_chroot_sysctl(const int op);
48587 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
48588 + const int op);
48589 +
48590 static const struct dentry_operations proc_sys_dentry_operations;
48591 static const struct file_operations proc_sys_file_operations;
48592 -static const struct inode_operations proc_sys_inode_operations;
48593 +const struct inode_operations proc_sys_inode_operations;
48594 static const struct file_operations proc_sys_dir_file_operations;
48595 -static const struct inode_operations proc_sys_dir_operations;
48596 +const struct inode_operations proc_sys_dir_operations;
48597
48598 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48599 {
48600 @@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48601
48602 err = NULL;
48603 d_set_d_op(dentry, &proc_sys_dentry_operations);
48604 +
48605 + gr_handle_proc_create(dentry, inode);
48606 +
48607 d_add(dentry, inode);
48608
48609 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
48610 + err = ERR_PTR(-ENOENT);
48611 +
48612 out:
48613 sysctl_head_finish(head);
48614 return err;
48615 @@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48616 struct inode *inode = filp->f_path.dentry->d_inode;
48617 struct ctl_table_header *head = grab_header(inode);
48618 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
48619 + int op = write ? MAY_WRITE : MAY_READ;
48620 ssize_t error;
48621 size_t res;
48622
48623 if (IS_ERR(head))
48624 return PTR_ERR(head);
48625
48626 +
48627 /*
48628 * At this point we know that the sysctl was not unregistered
48629 * and won't be until we finish.
48630 */
48631 error = -EPERM;
48632 - if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
48633 + if (sysctl_perm(head->root, table, op))
48634 goto out;
48635
48636 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
48637 @@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48638 if (!table->proc_handler)
48639 goto out;
48640
48641 +#ifdef CONFIG_GRKERNSEC
48642 + error = -EPERM;
48643 + if (gr_handle_chroot_sysctl(op))
48644 + goto out;
48645 + dget(filp->f_path.dentry);
48646 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
48647 + dput(filp->f_path.dentry);
48648 + goto out;
48649 + }
48650 + dput(filp->f_path.dentry);
48651 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48652 + goto out;
48653 + if (write && !capable(CAP_SYS_ADMIN))
48654 + goto out;
48655 +#endif
48656 +
48657 /* careful: calling conventions are nasty here */
48658 res = count;
48659 error = table->proc_handler(table, write, buf, &res, ppos);
48660 @@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48661 return -ENOMEM;
48662 } else {
48663 d_set_d_op(child, &proc_sys_dentry_operations);
48664 +
48665 + gr_handle_proc_create(child, inode);
48666 +
48667 d_add(child, inode);
48668 }
48669 } else {
48670 @@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48671 if ((*pos)++ < file->f_pos)
48672 return 0;
48673
48674 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48675 + return 0;
48676 +
48677 if (unlikely(S_ISLNK(table->mode)))
48678 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48679 else
48680 @@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48681 if (IS_ERR(head))
48682 return PTR_ERR(head);
48683
48684 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
48685 + return -ENOENT;
48686 +
48687 generic_fillattr(inode, stat);
48688 if (table)
48689 stat->mode = (stat->mode & S_IFMT) | table->mode;
48690 @@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48691 .llseek = generic_file_llseek,
48692 };
48693
48694 -static const struct inode_operations proc_sys_inode_operations = {
48695 +const struct inode_operations proc_sys_inode_operations = {
48696 .permission = proc_sys_permission,
48697 .setattr = proc_sys_setattr,
48698 .getattr = proc_sys_getattr,
48699 };
48700
48701 -static const struct inode_operations proc_sys_dir_operations = {
48702 +const struct inode_operations proc_sys_dir_operations = {
48703 .lookup = proc_sys_lookup,
48704 .permission = proc_sys_permission,
48705 .setattr = proc_sys_setattr,
48706 diff --git a/fs/proc/root.c b/fs/proc/root.c
48707 index eed44bf..abeb499 100644
48708 --- a/fs/proc/root.c
48709 +++ b/fs/proc/root.c
48710 @@ -188,7 +188,15 @@ void __init proc_root_init(void)
48711 #ifdef CONFIG_PROC_DEVICETREE
48712 proc_device_tree_init();
48713 #endif
48714 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48715 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48716 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48717 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48718 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48719 +#endif
48720 +#else
48721 proc_mkdir("bus", NULL);
48722 +#endif
48723 proc_sys_init();
48724 }
48725
48726 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48727 index 7faaf2a..7793015 100644
48728 --- a/fs/proc/task_mmu.c
48729 +++ b/fs/proc/task_mmu.c
48730 @@ -11,12 +11,19 @@
48731 #include <linux/rmap.h>
48732 #include <linux/swap.h>
48733 #include <linux/swapops.h>
48734 +#include <linux/grsecurity.h>
48735
48736 #include <asm/elf.h>
48737 #include <asm/uaccess.h>
48738 #include <asm/tlbflush.h>
48739 #include "internal.h"
48740
48741 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48742 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48743 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48744 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48745 +#endif
48746 +
48747 void task_mem(struct seq_file *m, struct mm_struct *mm)
48748 {
48749 unsigned long data, text, lib, swap;
48750 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48751 "VmExe:\t%8lu kB\n"
48752 "VmLib:\t%8lu kB\n"
48753 "VmPTE:\t%8lu kB\n"
48754 - "VmSwap:\t%8lu kB\n",
48755 - hiwater_vm << (PAGE_SHIFT-10),
48756 + "VmSwap:\t%8lu kB\n"
48757 +
48758 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48759 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48760 +#endif
48761 +
48762 + ,hiwater_vm << (PAGE_SHIFT-10),
48763 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48764 mm->locked_vm << (PAGE_SHIFT-10),
48765 mm->pinned_vm << (PAGE_SHIFT-10),
48766 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48767 data << (PAGE_SHIFT-10),
48768 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48769 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48770 - swap << (PAGE_SHIFT-10));
48771 + swap << (PAGE_SHIFT-10)
48772 +
48773 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48774 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48775 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48776 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48777 +#else
48778 + , mm->context.user_cs_base
48779 + , mm->context.user_cs_limit
48780 +#endif
48781 +#endif
48782 +
48783 + );
48784 }
48785
48786 unsigned long task_vsize(struct mm_struct *mm)
48787 @@ -125,7 +149,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
48788 if (!priv->task)
48789 return ERR_PTR(-ESRCH);
48790
48791 - mm = mm_for_maps(priv->task);
48792 + mm = mm_access(priv->task, PTRACE_MODE_READ);
48793 if (!mm || IS_ERR(mm))
48794 return mm;
48795 down_read(&mm->mmap_sem);
48796 @@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48797 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48798 }
48799
48800 - /* We don't show the stack guard page in /proc/maps */
48801 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48802 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48803 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48804 +#else
48805 start = vma->vm_start;
48806 - if (stack_guard_page_start(vma, start))
48807 - start += PAGE_SIZE;
48808 end = vma->vm_end;
48809 - if (stack_guard_page_end(vma, end))
48810 - end -= PAGE_SIZE;
48811 +#endif
48812
48813 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48814 start,
48815 @@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48816 flags & VM_WRITE ? 'w' : '-',
48817 flags & VM_EXEC ? 'x' : '-',
48818 flags & VM_MAYSHARE ? 's' : 'p',
48819 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48820 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48821 +#else
48822 pgoff,
48823 +#endif
48824 MAJOR(dev), MINOR(dev), ino, &len);
48825
48826 /*
48827 @@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48828 */
48829 if (file) {
48830 pad_len_spaces(m, len);
48831 - seq_path(m, &file->f_path, "\n");
48832 + seq_path(m, &file->f_path, "\n\\");
48833 goto done;
48834 }
48835
48836 @@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48837 * Thread stack in /proc/PID/task/TID/maps or
48838 * the main process stack.
48839 */
48840 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
48841 - vma->vm_end >= mm->start_stack)) {
48842 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48843 + (vma->vm_start <= mm->start_stack &&
48844 + vma->vm_end >= mm->start_stack)) {
48845 name = "[stack]";
48846 } else {
48847 /* Thread stack in /proc/PID/maps */
48848 @@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
48849 struct proc_maps_private *priv = m->private;
48850 struct task_struct *task = priv->task;
48851
48852 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48853 + if (current->exec_id != m->exec_id) {
48854 + gr_log_badprocpid("maps");
48855 + return 0;
48856 + }
48857 +#endif
48858 +
48859 show_map_vma(m, vma, is_pid);
48860
48861 if (m->count < m->size) /* vma is copied successfully */
48862 @@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48863 .private = &mss,
48864 };
48865
48866 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48867 + if (current->exec_id != m->exec_id) {
48868 + gr_log_badprocpid("smaps");
48869 + return 0;
48870 + }
48871 +#endif
48872 memset(&mss, 0, sizeof mss);
48873 - mss.vma = vma;
48874 - /* mmap_sem is held in m_start */
48875 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48876 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48877 -
48878 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48879 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48880 +#endif
48881 + mss.vma = vma;
48882 + /* mmap_sem is held in m_start */
48883 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48884 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48885 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48886 + }
48887 +#endif
48888 show_map_vma(m, vma, is_pid);
48889
48890 seq_printf(m,
48891 @@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48892 "KernelPageSize: %8lu kB\n"
48893 "MMUPageSize: %8lu kB\n"
48894 "Locked: %8lu kB\n",
48895 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48896 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48897 +#else
48898 (vma->vm_end - vma->vm_start) >> 10,
48899 +#endif
48900 mss.resident >> 10,
48901 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48902 mss.shared_clean >> 10,
48903 @@ -919,7 +970,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
48904 if (!pm.buffer)
48905 goto out_task;
48906
48907 - mm = mm_for_maps(task);
48908 + mm = mm_access(task, PTRACE_MODE_READ);
48909 ret = PTR_ERR(mm);
48910 if (!mm || IS_ERR(mm))
48911 goto out_free;
48912 @@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48913 int n;
48914 char buffer[50];
48915
48916 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48917 + if (current->exec_id != m->exec_id) {
48918 + gr_log_badprocpid("numa_maps");
48919 + return 0;
48920 + }
48921 +#endif
48922 +
48923 if (!mm)
48924 return 0;
48925
48926 @@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48927 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48928 mpol_cond_put(pol);
48929
48930 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48931 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48932 +#else
48933 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48934 +#endif
48935
48936 if (file) {
48937 seq_printf(m, " file=");
48938 - seq_path(m, &file->f_path, "\n\t= ");
48939 + seq_path(m, &file->f_path, "\n\t\\= ");
48940 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48941 seq_printf(m, " heap");
48942 } else {
48943 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48944 index 74fe164..0848f95 100644
48945 --- a/fs/proc/task_nommu.c
48946 +++ b/fs/proc/task_nommu.c
48947 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48948 else
48949 bytes += kobjsize(mm);
48950
48951 - if (current->fs && current->fs->users > 1)
48952 + if (current->fs && atomic_read(&current->fs->users) > 1)
48953 sbytes += kobjsize(current->fs);
48954 else
48955 bytes += kobjsize(current->fs);
48956 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
48957
48958 if (file) {
48959 pad_len_spaces(m, len);
48960 - seq_path(m, &file->f_path, "");
48961 + seq_path(m, &file->f_path, "\n\\");
48962 } else if (mm) {
48963 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48964
48965 @@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
48966 if (!priv->task)
48967 return ERR_PTR(-ESRCH);
48968
48969 - mm = mm_for_maps(priv->task);
48970 + mm = mm_access(priv->task, PTRACE_MODE_READ);
48971 if (!mm || IS_ERR(mm)) {
48972 put_task_struct(priv->task);
48973 priv->task = NULL;
48974 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48975 index d67908b..d13f6a6 100644
48976 --- a/fs/quota/netlink.c
48977 +++ b/fs/quota/netlink.c
48978 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48979 void quota_send_warning(short type, unsigned int id, dev_t dev,
48980 const char warntype)
48981 {
48982 - static atomic_t seq;
48983 + static atomic_unchecked_t seq;
48984 struct sk_buff *skb;
48985 void *msg_head;
48986 int ret;
48987 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48988 "VFS: Not enough memory to send quota warning.\n");
48989 return;
48990 }
48991 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48992 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48993 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48994 if (!msg_head) {
48995 printk(KERN_ERR
48996 diff --git a/fs/readdir.c b/fs/readdir.c
48997 index cc0a822..43cb195 100644
48998 --- a/fs/readdir.c
48999 +++ b/fs/readdir.c
49000 @@ -17,6 +17,7 @@
49001 #include <linux/security.h>
49002 #include <linux/syscalls.h>
49003 #include <linux/unistd.h>
49004 +#include <linux/namei.h>
49005
49006 #include <asm/uaccess.h>
49007
49008 @@ -67,6 +68,7 @@ struct old_linux_dirent {
49009
49010 struct readdir_callback {
49011 struct old_linux_dirent __user * dirent;
49012 + struct file * file;
49013 int result;
49014 };
49015
49016 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
49017 buf->result = -EOVERFLOW;
49018 return -EOVERFLOW;
49019 }
49020 +
49021 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49022 + return 0;
49023 +
49024 buf->result++;
49025 dirent = buf->dirent;
49026 if (!access_ok(VERIFY_WRITE, dirent,
49027 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
49028
49029 buf.result = 0;
49030 buf.dirent = dirent;
49031 + buf.file = file;
49032
49033 error = vfs_readdir(file, fillonedir, &buf);
49034 if (buf.result)
49035 @@ -142,6 +149,7 @@ struct linux_dirent {
49036 struct getdents_callback {
49037 struct linux_dirent __user * current_dir;
49038 struct linux_dirent __user * previous;
49039 + struct file * file;
49040 int count;
49041 int error;
49042 };
49043 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
49044 buf->error = -EOVERFLOW;
49045 return -EOVERFLOW;
49046 }
49047 +
49048 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49049 + return 0;
49050 +
49051 dirent = buf->previous;
49052 if (dirent) {
49053 if (__put_user(offset, &dirent->d_off))
49054 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49055 buf.previous = NULL;
49056 buf.count = count;
49057 buf.error = 0;
49058 + buf.file = file;
49059
49060 error = vfs_readdir(file, filldir, &buf);
49061 if (error >= 0)
49062 @@ -229,6 +242,7 @@ out:
49063 struct getdents_callback64 {
49064 struct linux_dirent64 __user * current_dir;
49065 struct linux_dirent64 __user * previous;
49066 + struct file *file;
49067 int count;
49068 int error;
49069 };
49070 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
49071 buf->error = -EINVAL; /* only used if we fail.. */
49072 if (reclen > buf->count)
49073 return -EINVAL;
49074 +
49075 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49076 + return 0;
49077 +
49078 dirent = buf->previous;
49079 if (dirent) {
49080 if (__put_user(offset, &dirent->d_off))
49081 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49082
49083 buf.current_dir = dirent;
49084 buf.previous = NULL;
49085 + buf.file = file;
49086 buf.count = count;
49087 buf.error = 0;
49088
49089 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49090 error = buf.error;
49091 lastdirent = buf.previous;
49092 if (lastdirent) {
49093 - typeof(lastdirent->d_off) d_off = file->f_pos;
49094 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49095 if (__put_user(d_off, &lastdirent->d_off))
49096 error = -EFAULT;
49097 else
49098 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
49099 index 2b7882b..1c5ef48 100644
49100 --- a/fs/reiserfs/do_balan.c
49101 +++ b/fs/reiserfs/do_balan.c
49102 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
49103 return;
49104 }
49105
49106 - atomic_inc(&(fs_generation(tb->tb_sb)));
49107 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49108 do_balance_starts(tb);
49109
49110 /* balance leaf returns 0 except if combining L R and S into
49111 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
49112 index 2c1ade6..8c59d8d 100644
49113 --- a/fs/reiserfs/procfs.c
49114 +++ b/fs/reiserfs/procfs.c
49115 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
49116 "SMALL_TAILS " : "NO_TAILS ",
49117 replay_only(sb) ? "REPLAY_ONLY " : "",
49118 convert_reiserfs(sb) ? "CONV " : "",
49119 - atomic_read(&r->s_generation_counter),
49120 + atomic_read_unchecked(&r->s_generation_counter),
49121 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49122 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49123 SF(s_good_search_by_key_reada), SF(s_bmaps),
49124 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
49125 index a59d271..e12d1cf 100644
49126 --- a/fs/reiserfs/reiserfs.h
49127 +++ b/fs/reiserfs/reiserfs.h
49128 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
49129 /* Comment? -Hans */
49130 wait_queue_head_t s_wait;
49131 /* To be obsoleted soon by per buffer seals.. -Hans */
49132 - atomic_t s_generation_counter; // increased by one every time the
49133 + atomic_unchecked_t s_generation_counter; // increased by one every time the
49134 // tree gets re-balanced
49135 unsigned long s_properties; /* File system properties. Currently holds
49136 on-disk FS format */
49137 @@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
49138 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
49139
49140 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
49141 -#define get_generation(s) atomic_read (&fs_generation(s))
49142 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
49143 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
49144 #define __fs_changed(gen,s) (gen != get_generation (s))
49145 #define fs_changed(gen,s) \
49146 diff --git a/fs/select.c b/fs/select.c
49147 index 17d33d0..da0bf5c 100644
49148 --- a/fs/select.c
49149 +++ b/fs/select.c
49150 @@ -20,6 +20,7 @@
49151 #include <linux/export.h>
49152 #include <linux/slab.h>
49153 #include <linux/poll.h>
49154 +#include <linux/security.h>
49155 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49156 #include <linux/file.h>
49157 #include <linux/fdtable.h>
49158 @@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49159 struct poll_list *walk = head;
49160 unsigned long todo = nfds;
49161
49162 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49163 if (nfds > rlimit(RLIMIT_NOFILE))
49164 return -EINVAL;
49165
49166 diff --git a/fs/seq_file.c b/fs/seq_file.c
49167 index 0cbd049..64e705c 100644
49168 --- a/fs/seq_file.c
49169 +++ b/fs/seq_file.c
49170 @@ -9,6 +9,7 @@
49171 #include <linux/export.h>
49172 #include <linux/seq_file.h>
49173 #include <linux/slab.h>
49174 +#include <linux/sched.h>
49175
49176 #include <asm/uaccess.h>
49177 #include <asm/page.h>
49178 @@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
49179 memset(p, 0, sizeof(*p));
49180 mutex_init(&p->lock);
49181 p->op = op;
49182 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49183 + p->exec_id = current->exec_id;
49184 +#endif
49185
49186 /*
49187 * Wrappers around seq_open(e.g. swaps_open) need to be
49188 @@ -92,7 +96,7 @@ static int traverse(struct seq_file *m, loff_t offset)
49189 return 0;
49190 }
49191 if (!m->buf) {
49192 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49193 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
49194 if (!m->buf)
49195 return -ENOMEM;
49196 }
49197 @@ -132,7 +136,7 @@ static int traverse(struct seq_file *m, loff_t offset)
49198 Eoverflow:
49199 m->op->stop(m, p);
49200 kfree(m->buf);
49201 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49202 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
49203 return !m->buf ? -ENOMEM : -EAGAIN;
49204 }
49205
49206 @@ -187,7 +191,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49207
49208 /* grab buffer if we didn't have one */
49209 if (!m->buf) {
49210 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49211 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
49212 if (!m->buf)
49213 goto Enomem;
49214 }
49215 @@ -228,7 +232,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49216 goto Fill;
49217 m->op->stop(m, p);
49218 kfree(m->buf);
49219 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49220 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
49221 if (!m->buf)
49222 goto Enomem;
49223 m->count = 0;
49224 @@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
49225 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49226 void *data)
49227 {
49228 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49229 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49230 int res = -ENOMEM;
49231
49232 if (op) {
49233 diff --git a/fs/splice.c b/fs/splice.c
49234 index 5cac690..f833a99 100644
49235 --- a/fs/splice.c
49236 +++ b/fs/splice.c
49237 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49238 pipe_lock(pipe);
49239
49240 for (;;) {
49241 - if (!pipe->readers) {
49242 + if (!atomic_read(&pipe->readers)) {
49243 send_sig(SIGPIPE, current, 0);
49244 if (!ret)
49245 ret = -EPIPE;
49246 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49247 do_wakeup = 0;
49248 }
49249
49250 - pipe->waiting_writers++;
49251 + atomic_inc(&pipe->waiting_writers);
49252 pipe_wait(pipe);
49253 - pipe->waiting_writers--;
49254 + atomic_dec(&pipe->waiting_writers);
49255 }
49256
49257 pipe_unlock(pipe);
49258 @@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49259 old_fs = get_fs();
49260 set_fs(get_ds());
49261 /* The cast to a user pointer is valid due to the set_fs() */
49262 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49263 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49264 set_fs(old_fs);
49265
49266 return res;
49267 @@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49268 old_fs = get_fs();
49269 set_fs(get_ds());
49270 /* The cast to a user pointer is valid due to the set_fs() */
49271 - res = vfs_write(file, (const char __user *)buf, count, &pos);
49272 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49273 set_fs(old_fs);
49274
49275 return res;
49276 @@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49277 goto err;
49278
49279 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49280 - vec[i].iov_base = (void __user *) page_address(page);
49281 + vec[i].iov_base = (void __force_user *) page_address(page);
49282 vec[i].iov_len = this_len;
49283 spd.pages[i] = page;
49284 spd.nr_pages++;
49285 @@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49286 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49287 {
49288 while (!pipe->nrbufs) {
49289 - if (!pipe->writers)
49290 + if (!atomic_read(&pipe->writers))
49291 return 0;
49292
49293 - if (!pipe->waiting_writers && sd->num_spliced)
49294 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49295 return 0;
49296
49297 if (sd->flags & SPLICE_F_NONBLOCK)
49298 @@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49299 * out of the pipe right after the splice_to_pipe(). So set
49300 * PIPE_READERS appropriately.
49301 */
49302 - pipe->readers = 1;
49303 + atomic_set(&pipe->readers, 1);
49304
49305 current->splice_pipe = pipe;
49306 }
49307 @@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49308 ret = -ERESTARTSYS;
49309 break;
49310 }
49311 - if (!pipe->writers)
49312 + if (!atomic_read(&pipe->writers))
49313 break;
49314 - if (!pipe->waiting_writers) {
49315 + if (!atomic_read(&pipe->waiting_writers)) {
49316 if (flags & SPLICE_F_NONBLOCK) {
49317 ret = -EAGAIN;
49318 break;
49319 @@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49320 pipe_lock(pipe);
49321
49322 while (pipe->nrbufs >= pipe->buffers) {
49323 - if (!pipe->readers) {
49324 + if (!atomic_read(&pipe->readers)) {
49325 send_sig(SIGPIPE, current, 0);
49326 ret = -EPIPE;
49327 break;
49328 @@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49329 ret = -ERESTARTSYS;
49330 break;
49331 }
49332 - pipe->waiting_writers++;
49333 + atomic_inc(&pipe->waiting_writers);
49334 pipe_wait(pipe);
49335 - pipe->waiting_writers--;
49336 + atomic_dec(&pipe->waiting_writers);
49337 }
49338
49339 pipe_unlock(pipe);
49340 @@ -1823,14 +1823,14 @@ retry:
49341 pipe_double_lock(ipipe, opipe);
49342
49343 do {
49344 - if (!opipe->readers) {
49345 + if (!atomic_read(&opipe->readers)) {
49346 send_sig(SIGPIPE, current, 0);
49347 if (!ret)
49348 ret = -EPIPE;
49349 break;
49350 }
49351
49352 - if (!ipipe->nrbufs && !ipipe->writers)
49353 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49354 break;
49355
49356 /*
49357 @@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49358 pipe_double_lock(ipipe, opipe);
49359
49360 do {
49361 - if (!opipe->readers) {
49362 + if (!atomic_read(&opipe->readers)) {
49363 send_sig(SIGPIPE, current, 0);
49364 if (!ret)
49365 ret = -EPIPE;
49366 @@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49367 * return EAGAIN if we have the potential of some data in the
49368 * future, otherwise just return 0
49369 */
49370 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49371 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49372 ret = -EAGAIN;
49373
49374 pipe_unlock(ipipe);
49375 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
49376 index 35a36d3..23424b2 100644
49377 --- a/fs/sysfs/dir.c
49378 +++ b/fs/sysfs/dir.c
49379 @@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
49380 struct sysfs_dirent *sd;
49381 int rc;
49382
49383 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49384 + const char *parent_name = parent_sd->s_name;
49385 +
49386 + mode = S_IFDIR | S_IRWXU;
49387 +
49388 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49389 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49390 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49391 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49392 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
49393 +#endif
49394 +
49395 /* allocate */
49396 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
49397 if (!sd)
49398 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49399 index 00012e3..8392349 100644
49400 --- a/fs/sysfs/file.c
49401 +++ b/fs/sysfs/file.c
49402 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49403
49404 struct sysfs_open_dirent {
49405 atomic_t refcnt;
49406 - atomic_t event;
49407 + atomic_unchecked_t event;
49408 wait_queue_head_t poll;
49409 struct list_head buffers; /* goes through sysfs_buffer.list */
49410 };
49411 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49412 if (!sysfs_get_active(attr_sd))
49413 return -ENODEV;
49414
49415 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49416 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49417 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49418
49419 sysfs_put_active(attr_sd);
49420 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49421 return -ENOMEM;
49422
49423 atomic_set(&new_od->refcnt, 0);
49424 - atomic_set(&new_od->event, 1);
49425 + atomic_set_unchecked(&new_od->event, 1);
49426 init_waitqueue_head(&new_od->poll);
49427 INIT_LIST_HEAD(&new_od->buffers);
49428 goto retry;
49429 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49430
49431 sysfs_put_active(attr_sd);
49432
49433 - if (buffer->event != atomic_read(&od->event))
49434 + if (buffer->event != atomic_read_unchecked(&od->event))
49435 goto trigger;
49436
49437 return DEFAULT_POLLMASK;
49438 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49439
49440 od = sd->s_attr.open;
49441 if (od) {
49442 - atomic_inc(&od->event);
49443 + atomic_inc_unchecked(&od->event);
49444 wake_up_interruptible(&od->poll);
49445 }
49446
49447 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49448 index a7ac78f..02158e1 100644
49449 --- a/fs/sysfs/symlink.c
49450 +++ b/fs/sysfs/symlink.c
49451 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49452
49453 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49454 {
49455 - char *page = nd_get_link(nd);
49456 + const char *page = nd_get_link(nd);
49457 if (!IS_ERR(page))
49458 free_page((unsigned long)page);
49459 }
49460 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49461 index c175b4d..8f36a16 100644
49462 --- a/fs/udf/misc.c
49463 +++ b/fs/udf/misc.c
49464 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49465
49466 u8 udf_tag_checksum(const struct tag *t)
49467 {
49468 - u8 *data = (u8 *)t;
49469 + const u8 *data = (const u8 *)t;
49470 u8 checksum = 0;
49471 int i;
49472 for (i = 0; i < sizeof(struct tag); ++i)
49473 diff --git a/fs/utimes.c b/fs/utimes.c
49474 index ba653f3..06ea4b1 100644
49475 --- a/fs/utimes.c
49476 +++ b/fs/utimes.c
49477 @@ -1,6 +1,7 @@
49478 #include <linux/compiler.h>
49479 #include <linux/file.h>
49480 #include <linux/fs.h>
49481 +#include <linux/security.h>
49482 #include <linux/linkage.h>
49483 #include <linux/mount.h>
49484 #include <linux/namei.h>
49485 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49486 goto mnt_drop_write_and_out;
49487 }
49488 }
49489 +
49490 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49491 + error = -EACCES;
49492 + goto mnt_drop_write_and_out;
49493 + }
49494 +
49495 mutex_lock(&inode->i_mutex);
49496 error = notify_change(path->dentry, &newattrs);
49497 mutex_unlock(&inode->i_mutex);
49498 diff --git a/fs/xattr.c b/fs/xattr.c
49499 index 3c8c1cc..a83c398 100644
49500 --- a/fs/xattr.c
49501 +++ b/fs/xattr.c
49502 @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49503 * Extended attribute SET operations
49504 */
49505 static long
49506 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
49507 +setxattr(struct path *path, const char __user *name, const void __user *value,
49508 size_t size, int flags)
49509 {
49510 int error;
49511 @@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49512 }
49513 }
49514
49515 - error = vfs_setxattr(d, kname, kvalue, size, flags);
49516 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49517 + error = -EACCES;
49518 + goto out;
49519 + }
49520 +
49521 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49522 out:
49523 if (vvalue)
49524 vfree(vvalue);
49525 @@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49526 return error;
49527 error = mnt_want_write(path.mnt);
49528 if (!error) {
49529 - error = setxattr(path.dentry, name, value, size, flags);
49530 + error = setxattr(&path, name, value, size, flags);
49531 mnt_drop_write(path.mnt);
49532 }
49533 path_put(&path);
49534 @@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49535 return error;
49536 error = mnt_want_write(path.mnt);
49537 if (!error) {
49538 - error = setxattr(path.dentry, name, value, size, flags);
49539 + error = setxattr(&path, name, value, size, flags);
49540 mnt_drop_write(path.mnt);
49541 }
49542 path_put(&path);
49543 @@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49544 const void __user *,value, size_t, size, int, flags)
49545 {
49546 struct file *f;
49547 - struct dentry *dentry;
49548 int error = -EBADF;
49549
49550 f = fget(fd);
49551 if (!f)
49552 return error;
49553 - dentry = f->f_path.dentry;
49554 - audit_inode(NULL, dentry);
49555 + audit_inode(NULL, f->f_path.dentry);
49556 error = mnt_want_write_file(f);
49557 if (!error) {
49558 - error = setxattr(dentry, name, value, size, flags);
49559 + error = setxattr(&f->f_path, name, value, size, flags);
49560 mnt_drop_write_file(f);
49561 }
49562 fput(f);
49563 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49564 index 69d06b0..c0996e5 100644
49565 --- a/fs/xattr_acl.c
49566 +++ b/fs/xattr_acl.c
49567 @@ -17,8 +17,8 @@
49568 struct posix_acl *
49569 posix_acl_from_xattr(const void *value, size_t size)
49570 {
49571 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49572 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49573 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49574 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49575 int count;
49576 struct posix_acl *acl;
49577 struct posix_acl_entry *acl_e;
49578 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49579 index 85e7e32..5344e52 100644
49580 --- a/fs/xfs/xfs_bmap.c
49581 +++ b/fs/xfs/xfs_bmap.c
49582 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
49583 int nmap,
49584 int ret_nmap);
49585 #else
49586 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49587 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49588 #endif /* DEBUG */
49589
49590 STATIC int
49591 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49592 index 79d05e8..e3e5861 100644
49593 --- a/fs/xfs/xfs_dir2_sf.c
49594 +++ b/fs/xfs/xfs_dir2_sf.c
49595 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49596 }
49597
49598 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49599 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49600 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49601 + char name[sfep->namelen];
49602 + memcpy(name, sfep->name, sfep->namelen);
49603 + if (filldir(dirent, name, sfep->namelen,
49604 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
49605 + *offset = off & 0x7fffffff;
49606 + return 0;
49607 + }
49608 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49609 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49610 *offset = off & 0x7fffffff;
49611 return 0;
49612 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49613 index 91f8ff5..0ce68f9 100644
49614 --- a/fs/xfs/xfs_ioctl.c
49615 +++ b/fs/xfs/xfs_ioctl.c
49616 @@ -128,7 +128,7 @@ xfs_find_handle(
49617 }
49618
49619 error = -EFAULT;
49620 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49621 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49622 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49623 goto out_put;
49624
49625 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49626 index 3011b87..1ab03e9 100644
49627 --- a/fs/xfs/xfs_iops.c
49628 +++ b/fs/xfs/xfs_iops.c
49629 @@ -397,7 +397,7 @@ xfs_vn_put_link(
49630 struct nameidata *nd,
49631 void *p)
49632 {
49633 - char *s = nd_get_link(nd);
49634 + const char *s = nd_get_link(nd);
49635
49636 if (!IS_ERR(s))
49637 kfree(s);
49638 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49639 new file mode 100644
49640 index 0000000..4d533f1
49641 --- /dev/null
49642 +++ b/grsecurity/Kconfig
49643 @@ -0,0 +1,941 @@
49644 +#
49645 +# grecurity configuration
49646 +#
49647 +menu "Memory Protections"
49648 +depends on GRKERNSEC
49649 +
49650 +config GRKERNSEC_KMEM
49651 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49652 + default y if GRKERNSEC_CONFIG_AUTO
49653 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49654 + help
49655 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49656 + be written to or read from to modify or leak the contents of the running
49657 + kernel. /dev/port will also not be allowed to be opened. If you have module
49658 + support disabled, enabling this will close up four ways that are
49659 + currently used to insert malicious code into the running kernel.
49660 + Even with all these features enabled, we still highly recommend that
49661 + you use the RBAC system, as it is still possible for an attacker to
49662 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49663 + If you are not using XFree86, you may be able to stop this additional
49664 + case by enabling the 'Disable privileged I/O' option. Though nothing
49665 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49666 + but only to video memory, which is the only writing we allow in this
49667 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49668 + not be allowed to mprotect it with PROT_WRITE later.
49669 + It is highly recommended that you say Y here if you meet all the
49670 + conditions above.
49671 +
49672 +config GRKERNSEC_VM86
49673 + bool "Restrict VM86 mode"
49674 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49675 + depends on X86_32
49676 +
49677 + help
49678 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49679 + make use of a special execution mode on 32bit x86 processors called
49680 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49681 + video cards and will still work with this option enabled. The purpose
49682 + of the option is to prevent exploitation of emulation errors in
49683 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49684 + Nearly all users should be able to enable this option.
49685 +
49686 +config GRKERNSEC_IO
49687 + bool "Disable privileged I/O"
49688 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49689 + depends on X86
49690 + select RTC_CLASS
49691 + select RTC_INTF_DEV
49692 + select RTC_DRV_CMOS
49693 +
49694 + help
49695 + If you say Y here, all ioperm and iopl calls will return an error.
49696 + Ioperm and iopl can be used to modify the running kernel.
49697 + Unfortunately, some programs need this access to operate properly,
49698 + the most notable of which are XFree86 and hwclock. hwclock can be
49699 + remedied by having RTC support in the kernel, so real-time
49700 + clock support is enabled if this option is enabled, to ensure
49701 + that hwclock operates correctly. XFree86 still will not
49702 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49703 + IF YOU USE XFree86. If you use XFree86 and you still want to
49704 + protect your kernel against modification, use the RBAC system.
49705 +
49706 +config GRKERNSEC_PROC_MEMMAP
49707 + bool "Harden ASLR against information leaks and entropy reduction"
49708 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
49709 + depends on PAX_NOEXEC || PAX_ASLR
49710 + help
49711 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49712 + give no information about the addresses of its mappings if
49713 + PaX features that rely on random addresses are enabled on the task.
49714 + In addition to sanitizing this information and disabling other
49715 + dangerous sources of information, this option causes reads of sensitive
49716 + /proc/<pid> entries where the file descriptor was opened in a different
49717 + task than the one performing the read. Such attempts are logged.
49718 + This option also limits argv/env strings for suid/sgid binaries
49719 + to 512KB to prevent a complete exhaustion of the stack entropy provided
49720 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49721 + binaries to prevent alternative mmap layouts from being abused.
49722 +
49723 + If you use PaX it is essential that you say Y here as it closes up
49724 + several holes that make full ASLR useless locally.
49725 +
49726 +config GRKERNSEC_BRUTE
49727 + bool "Deter exploit bruteforcing"
49728 + default y if GRKERNSEC_CONFIG_AUTO
49729 + help
49730 + If you say Y here, attempts to bruteforce exploits against forking
49731 + daemons such as apache or sshd, as well as against suid/sgid binaries
49732 + will be deterred. When a child of a forking daemon is killed by PaX
49733 + or crashes due to an illegal instruction or other suspicious signal,
49734 + the parent process will be delayed 30 seconds upon every subsequent
49735 + fork until the administrator is able to assess the situation and
49736 + restart the daemon.
49737 + In the suid/sgid case, the attempt is logged, the user has all their
49738 + processes terminated, and they are prevented from executing any further
49739 + processes for 15 minutes.
49740 + It is recommended that you also enable signal logging in the auditing
49741 + section so that logs are generated when a process triggers a suspicious
49742 + signal.
49743 + If the sysctl option is enabled, a sysctl option with name
49744 + "deter_bruteforce" is created.
49745 +
49746 +
49747 +config GRKERNSEC_MODHARDEN
49748 + bool "Harden module auto-loading"
49749 + default y if GRKERNSEC_CONFIG_AUTO
49750 + depends on MODULES
49751 + help
49752 + If you say Y here, module auto-loading in response to use of some
49753 + feature implemented by an unloaded module will be restricted to
49754 + root users. Enabling this option helps defend against attacks
49755 + by unprivileged users who abuse the auto-loading behavior to
49756 + cause a vulnerable module to load that is then exploited.
49757 +
49758 + If this option prevents a legitimate use of auto-loading for a
49759 + non-root user, the administrator can execute modprobe manually
49760 + with the exact name of the module mentioned in the alert log.
49761 + Alternatively, the administrator can add the module to the list
49762 + of modules loaded at boot by modifying init scripts.
49763 +
49764 + Modification of init scripts will most likely be needed on
49765 + Ubuntu servers with encrypted home directory support enabled,
49766 + as the first non-root user logging in will cause the ecb(aes),
49767 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49768 +
49769 +config GRKERNSEC_HIDESYM
49770 + bool "Hide kernel symbols"
49771 + default y if GRKERNSEC_CONFIG_AUTO
49772 + select PAX_USERCOPY_SLABS
49773 + help
49774 + If you say Y here, getting information on loaded modules, and
49775 + displaying all kernel symbols through a syscall will be restricted
49776 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49777 + /proc/kallsyms will be restricted to the root user. The RBAC
49778 + system can hide that entry even from root.
49779 +
49780 + This option also prevents leaking of kernel addresses through
49781 + several /proc entries.
49782 +
49783 + Note that this option is only effective provided the following
49784 + conditions are met:
49785 + 1) The kernel using grsecurity is not precompiled by some distribution
49786 + 2) You have also enabled GRKERNSEC_DMESG
49787 + 3) You are using the RBAC system and hiding other files such as your
49788 + kernel image and System.map. Alternatively, enabling this option
49789 + causes the permissions on /boot, /lib/modules, and the kernel
49790 + source directory to change at compile time to prevent
49791 + reading by non-root users.
49792 + If the above conditions are met, this option will aid in providing a
49793 + useful protection against local kernel exploitation of overflows
49794 + and arbitrary read/write vulnerabilities.
49795 +
49796 +config GRKERNSEC_KERN_LOCKOUT
49797 + bool "Active kernel exploit response"
49798 + default y if GRKERNSEC_CONFIG_AUTO
49799 + depends on X86 || ARM || PPC || SPARC
49800 + help
49801 + If you say Y here, when a PaX alert is triggered due to suspicious
49802 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49803 + or an OOPS occurs due to bad memory accesses, instead of just
49804 + terminating the offending process (and potentially allowing
49805 + a subsequent exploit from the same user), we will take one of two
49806 + actions:
49807 + If the user was root, we will panic the system
49808 + If the user was non-root, we will log the attempt, terminate
49809 + all processes owned by the user, then prevent them from creating
49810 + any new processes until the system is restarted
49811 + This deters repeated kernel exploitation/bruteforcing attempts
49812 + and is useful for later forensics.
49813 +
49814 +endmenu
49815 +menu "Role Based Access Control Options"
49816 +depends on GRKERNSEC
49817 +
49818 +config GRKERNSEC_RBAC_DEBUG
49819 + bool
49820 +
49821 +config GRKERNSEC_NO_RBAC
49822 + bool "Disable RBAC system"
49823 + help
49824 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49825 + preventing the RBAC system from being enabled. You should only say Y
49826 + here if you have no intention of using the RBAC system, so as to prevent
49827 + an attacker with root access from misusing the RBAC system to hide files
49828 + and processes when loadable module support and /dev/[k]mem have been
49829 + locked down.
49830 +
49831 +config GRKERNSEC_ACL_HIDEKERN
49832 + bool "Hide kernel processes"
49833 + help
49834 + If you say Y here, all kernel threads will be hidden to all
49835 + processes but those whose subject has the "view hidden processes"
49836 + flag.
49837 +
49838 +config GRKERNSEC_ACL_MAXTRIES
49839 + int "Maximum tries before password lockout"
49840 + default 3
49841 + help
49842 + This option enforces the maximum number of times a user can attempt
49843 + to authorize themselves with the grsecurity RBAC system before being
49844 + denied the ability to attempt authorization again for a specified time.
49845 + The lower the number, the harder it will be to brute-force a password.
49846 +
49847 +config GRKERNSEC_ACL_TIMEOUT
49848 + int "Time to wait after max password tries, in seconds"
49849 + default 30
49850 + help
49851 + This option specifies the time the user must wait after attempting to
49852 + authorize to the RBAC system with the maximum number of invalid
49853 + passwords. The higher the number, the harder it will be to brute-force
49854 + a password.
49855 +
49856 +endmenu
49857 +menu "Filesystem Protections"
49858 +depends on GRKERNSEC
49859 +
49860 +config GRKERNSEC_PROC
49861 + bool "Proc restrictions"
49862 + default y if GRKERNSEC_CONFIG_AUTO
49863 + help
49864 + If you say Y here, the permissions of the /proc filesystem
49865 + will be altered to enhance system security and privacy. You MUST
49866 + choose either a user only restriction or a user and group restriction.
49867 + Depending upon the option you choose, you can either restrict users to
49868 + see only the processes they themselves run, or choose a group that can
49869 + view all processes and files normally restricted to root if you choose
49870 + the "restrict to user only" option. NOTE: If you're running identd or
49871 + ntpd as a non-root user, you will have to run it as the group you
49872 + specify here.
49873 +
49874 +config GRKERNSEC_PROC_USER
49875 + bool "Restrict /proc to user only"
49876 + depends on GRKERNSEC_PROC
49877 + help
49878 + If you say Y here, non-root users will only be able to view their own
49879 + processes, and restricts them from viewing network-related information,
49880 + and viewing kernel symbol and module information.
49881 +
49882 +config GRKERNSEC_PROC_USERGROUP
49883 + bool "Allow special group"
49884 + default y if GRKERNSEC_CONFIG_AUTO
49885 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49886 + help
49887 + If you say Y here, you will be able to select a group that will be
49888 + able to view all processes and network-related information. If you've
49889 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49890 + remain hidden. This option is useful if you want to run identd as
49891 + a non-root user.
49892 +
49893 +config GRKERNSEC_PROC_GID
49894 + int "GID for special group"
49895 + depends on GRKERNSEC_PROC_USERGROUP
49896 + default 1001
49897 +
49898 +config GRKERNSEC_PROC_ADD
49899 + bool "Additional restrictions"
49900 + default y if GRKERNSEC_CONFIG_AUTO
49901 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49902 + help
49903 + If you say Y here, additional restrictions will be placed on
49904 + /proc that keep normal users from viewing device information and
49905 + slabinfo information that could be useful for exploits.
49906 +
49907 +config GRKERNSEC_LINK
49908 + bool "Linking restrictions"
49909 + default y if GRKERNSEC_CONFIG_AUTO
49910 + help
49911 + If you say Y here, /tmp race exploits will be prevented, since users
49912 + will no longer be able to follow symlinks owned by other users in
49913 + world-writable +t directories (e.g. /tmp), unless the owner of the
49914 + symlink is the owner of the directory. users will also not be
49915 + able to hardlink to files they do not own. If the sysctl option is
49916 + enabled, a sysctl option with name "linking_restrictions" is created.
49917 +
49918 +config GRKERNSEC_SYMLINKOWN
49919 + bool "Kernel-enforced SymlinksIfOwnerMatch"
49920 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
49921 + help
49922 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
49923 + that prevents it from being used as a security feature. As Apache
49924 + verifies the symlink by performing a stat() against the target of
49925 + the symlink before it is followed, an attacker can setup a symlink
49926 + to point to a same-owned file, then replace the symlink with one
49927 + that targets another user's file just after Apache "validates" the
49928 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
49929 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
49930 + will be in place for the group you specify. If the sysctl option
49931 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
49932 + created.
49933 +
49934 +config GRKERNSEC_SYMLINKOWN_GID
49935 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
49936 + depends on GRKERNSEC_SYMLINKOWN
49937 + default 1006
49938 + help
49939 + Setting this GID determines what group kernel-enforced
49940 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
49941 + is enabled, a sysctl option with name "symlinkown_gid" is created.
49942 +
49943 +config GRKERNSEC_FIFO
49944 + bool "FIFO restrictions"
49945 + default y if GRKERNSEC_CONFIG_AUTO
49946 + help
49947 + If you say Y here, users will not be able to write to FIFOs they don't
49948 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49949 + the FIFO is the same owner of the directory it's held in. If the sysctl
49950 + option is enabled, a sysctl option with name "fifo_restrictions" is
49951 + created.
49952 +
49953 +config GRKERNSEC_SYSFS_RESTRICT
49954 + bool "Sysfs/debugfs restriction"
49955 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49956 + depends on SYSFS
49957 + help
49958 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49959 + any filesystem normally mounted under it (e.g. debugfs) will be
49960 + mostly accessible only by root. These filesystems generally provide access
49961 + to hardware and debug information that isn't appropriate for unprivileged
49962 + users of the system. Sysfs and debugfs have also become a large source
49963 + of new vulnerabilities, ranging from infoleaks to local compromise.
49964 + There has been very little oversight with an eye toward security involved
49965 + in adding new exporters of information to these filesystems, so their
49966 + use is discouraged.
49967 + For reasons of compatibility, a few directories have been whitelisted
49968 + for access by non-root users:
49969 + /sys/fs/selinux
49970 + /sys/fs/fuse
49971 + /sys/devices/system/cpu
49972 +
49973 +config GRKERNSEC_ROFS
49974 + bool "Runtime read-only mount protection"
49975 + help
49976 + If you say Y here, a sysctl option with name "romount_protect" will
49977 + be created. By setting this option to 1 at runtime, filesystems
49978 + will be protected in the following ways:
49979 + * No new writable mounts will be allowed
49980 + * Existing read-only mounts won't be able to be remounted read/write
49981 + * Write operations will be denied on all block devices
49982 + This option acts independently of grsec_lock: once it is set to 1,
49983 + it cannot be turned off. Therefore, please be mindful of the resulting
49984 + behavior if this option is enabled in an init script on a read-only
49985 + filesystem. This feature is mainly intended for secure embedded systems.
49986 +
49987 +config GRKERNSEC_CHROOT
49988 + bool "Chroot jail restrictions"
49989 + default y if GRKERNSEC_CONFIG_AUTO
49990 + help
49991 + If you say Y here, you will be able to choose several options that will
49992 + make breaking out of a chrooted jail much more difficult. If you
49993 + encounter no software incompatibilities with the following options, it
49994 + is recommended that you enable each one.
49995 +
49996 +config GRKERNSEC_CHROOT_MOUNT
49997 + bool "Deny mounts"
49998 + default y if GRKERNSEC_CONFIG_AUTO
49999 + depends on GRKERNSEC_CHROOT
50000 + help
50001 + If you say Y here, processes inside a chroot will not be able to
50002 + mount or remount filesystems. If the sysctl option is enabled, a
50003 + sysctl option with name "chroot_deny_mount" is created.
50004 +
50005 +config GRKERNSEC_CHROOT_DOUBLE
50006 + bool "Deny double-chroots"
50007 + default y if GRKERNSEC_CONFIG_AUTO
50008 + depends on GRKERNSEC_CHROOT
50009 + help
50010 + If you say Y here, processes inside a chroot will not be able to chroot
50011 + again outside the chroot. This is a widely used method of breaking
50012 + out of a chroot jail and should not be allowed. If the sysctl
50013 + option is enabled, a sysctl option with name
50014 + "chroot_deny_chroot" is created.
50015 +
50016 +config GRKERNSEC_CHROOT_PIVOT
50017 + bool "Deny pivot_root in chroot"
50018 + default y if GRKERNSEC_CONFIG_AUTO
50019 + depends on GRKERNSEC_CHROOT
50020 + help
50021 + If you say Y here, processes inside a chroot will not be able to use
50022 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50023 + works similar to chroot in that it changes the root filesystem. This
50024 + function could be misused in a chrooted process to attempt to break out
50025 + of the chroot, and therefore should not be allowed. If the sysctl
50026 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50027 + created.
50028 +
50029 +config GRKERNSEC_CHROOT_CHDIR
50030 + bool "Enforce chdir(\"/\") on all chroots"
50031 + default y if GRKERNSEC_CONFIG_AUTO
50032 + depends on GRKERNSEC_CHROOT
50033 + help
50034 + If you say Y here, the current working directory of all newly-chrooted
50035 + applications will be set to the the root directory of the chroot.
50036 + The man page on chroot(2) states:
50037 + Note that this call does not change the current working
50038 + directory, so that `.' can be outside the tree rooted at
50039 + `/'. In particular, the super-user can escape from a
50040 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50041 +
50042 + It is recommended that you say Y here, since it's not known to break
50043 + any software. If the sysctl option is enabled, a sysctl option with
50044 + name "chroot_enforce_chdir" is created.
50045 +
50046 +config GRKERNSEC_CHROOT_CHMOD
50047 + bool "Deny (f)chmod +s"
50048 + default y if GRKERNSEC_CONFIG_AUTO
50049 + depends on GRKERNSEC_CHROOT
50050 + help
50051 + If you say Y here, processes inside a chroot will not be able to chmod
50052 + or fchmod files to make them have suid or sgid bits. This protects
50053 + against another published method of breaking a chroot. If the sysctl
50054 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50055 + created.
50056 +
50057 +config GRKERNSEC_CHROOT_FCHDIR
50058 + bool "Deny fchdir out of chroot"
50059 + default y if GRKERNSEC_CONFIG_AUTO
50060 + depends on GRKERNSEC_CHROOT
50061 + help
50062 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50063 + to a file descriptor of the chrooting process that points to a directory
50064 + outside the filesystem will be stopped. If the sysctl option
50065 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50066 +
50067 +config GRKERNSEC_CHROOT_MKNOD
50068 + bool "Deny mknod"
50069 + default y if GRKERNSEC_CONFIG_AUTO
50070 + depends on GRKERNSEC_CHROOT
50071 + help
50072 + If you say Y here, processes inside a chroot will not be allowed to
50073 + mknod. The problem with using mknod inside a chroot is that it
50074 + would allow an attacker to create a device entry that is the same
50075 + as one on the physical root of your system, which could range from
50076 + anything from the console device to a device for your harddrive (which
50077 + they could then use to wipe the drive or steal data). It is recommended
50078 + that you say Y here, unless you run into software incompatibilities.
50079 + If the sysctl option is enabled, a sysctl option with name
50080 + "chroot_deny_mknod" is created.
50081 +
50082 +config GRKERNSEC_CHROOT_SHMAT
50083 + bool "Deny shmat() out of chroot"
50084 + default y if GRKERNSEC_CONFIG_AUTO
50085 + depends on GRKERNSEC_CHROOT
50086 + help
50087 + If you say Y here, processes inside a chroot will not be able to attach
50088 + to shared memory segments that were created outside of the chroot jail.
50089 + It is recommended that you say Y here. If the sysctl option is enabled,
50090 + a sysctl option with name "chroot_deny_shmat" is created.
50091 +
50092 +config GRKERNSEC_CHROOT_UNIX
50093 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50094 + default y if GRKERNSEC_CONFIG_AUTO
50095 + depends on GRKERNSEC_CHROOT
50096 + help
50097 + If you say Y here, processes inside a chroot will not be able to
50098 + connect to abstract (meaning not belonging to a filesystem) Unix
50099 + domain sockets that were bound outside of a chroot. It is recommended
50100 + that you say Y here. If the sysctl option is enabled, a sysctl option
50101 + with name "chroot_deny_unix" is created.
50102 +
50103 +config GRKERNSEC_CHROOT_FINDTASK
50104 + bool "Protect outside processes"
50105 + default y if GRKERNSEC_CONFIG_AUTO
50106 + depends on GRKERNSEC_CHROOT
50107 + help
50108 + If you say Y here, processes inside a chroot will not be able to
50109 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50110 + getsid, or view any process outside of the chroot. If the sysctl
50111 + option is enabled, a sysctl option with name "chroot_findtask" is
50112 + created.
50113 +
50114 +config GRKERNSEC_CHROOT_NICE
50115 + bool "Restrict priority changes"
50116 + default y if GRKERNSEC_CONFIG_AUTO
50117 + depends on GRKERNSEC_CHROOT
50118 + help
50119 + If you say Y here, processes inside a chroot will not be able to raise
50120 + the priority of processes in the chroot, or alter the priority of
50121 + processes outside the chroot. This provides more security than simply
50122 + removing CAP_SYS_NICE from the process' capability set. If the
50123 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50124 + is created.
50125 +
50126 +config GRKERNSEC_CHROOT_SYSCTL
50127 + bool "Deny sysctl writes"
50128 + default y if GRKERNSEC_CONFIG_AUTO
50129 + depends on GRKERNSEC_CHROOT
50130 + help
50131 + If you say Y here, an attacker in a chroot will not be able to
50132 + write to sysctl entries, either by sysctl(2) or through a /proc
50133 + interface. It is strongly recommended that you say Y here. If the
50134 + sysctl option is enabled, a sysctl option with name
50135 + "chroot_deny_sysctl" is created.
50136 +
50137 +config GRKERNSEC_CHROOT_CAPS
50138 + bool "Capability restrictions"
50139 + default y if GRKERNSEC_CONFIG_AUTO
50140 + depends on GRKERNSEC_CHROOT
50141 + help
50142 + If you say Y here, the capabilities on all processes within a
50143 + chroot jail will be lowered to stop module insertion, raw i/o,
50144 + system and net admin tasks, rebooting the system, modifying immutable
50145 + files, modifying IPC owned by another, and changing the system time.
50146 + This is left an option because it can break some apps. Disable this
50147 + if your chrooted apps are having problems performing those kinds of
50148 + tasks. If the sysctl option is enabled, a sysctl option with
50149 + name "chroot_caps" is created.
50150 +
50151 +endmenu
50152 +menu "Kernel Auditing"
50153 +depends on GRKERNSEC
50154 +
50155 +config GRKERNSEC_AUDIT_GROUP
50156 + bool "Single group for auditing"
50157 + help
50158 + If you say Y here, the exec, chdir, and (un)mount logging features
50159 + will only operate on a group you specify. This option is recommended
50160 + if you only want to watch certain users instead of having a large
50161 + amount of logs from the entire system. If the sysctl option is enabled,
50162 + a sysctl option with name "audit_group" is created.
50163 +
50164 +config GRKERNSEC_AUDIT_GID
50165 + int "GID for auditing"
50166 + depends on GRKERNSEC_AUDIT_GROUP
50167 + default 1007
50168 +
50169 +config GRKERNSEC_EXECLOG
50170 + bool "Exec logging"
50171 + help
50172 + If you say Y here, all execve() calls will be logged (since the
50173 + other exec*() calls are frontends to execve(), all execution
50174 + will be logged). Useful for shell-servers that like to keep track
50175 + of their users. If the sysctl option is enabled, a sysctl option with
50176 + name "exec_logging" is created.
50177 + WARNING: This option when enabled will produce a LOT of logs, especially
50178 + on an active system.
50179 +
50180 +config GRKERNSEC_RESLOG
50181 + bool "Resource logging"
50182 + default y if GRKERNSEC_CONFIG_AUTO
50183 + help
50184 + If you say Y here, all attempts to overstep resource limits will
50185 + be logged with the resource name, the requested size, and the current
50186 + limit. It is highly recommended that you say Y here. If the sysctl
50187 + option is enabled, a sysctl option with name "resource_logging" is
50188 + created. If the RBAC system is enabled, the sysctl value is ignored.
50189 +
50190 +config GRKERNSEC_CHROOT_EXECLOG
50191 + bool "Log execs within chroot"
50192 + help
50193 + If you say Y here, all executions inside a chroot jail will be logged
50194 + to syslog. This can cause a large amount of logs if certain
50195 + applications (eg. djb's daemontools) are installed on the system, and
50196 + is therefore left as an option. If the sysctl option is enabled, a
50197 + sysctl option with name "chroot_execlog" is created.
50198 +
50199 +config GRKERNSEC_AUDIT_PTRACE
50200 + bool "Ptrace logging"
50201 + help
50202 + If you say Y here, all attempts to attach to a process via ptrace
50203 + will be logged. If the sysctl option is enabled, a sysctl option
50204 + with name "audit_ptrace" is created.
50205 +
50206 +config GRKERNSEC_AUDIT_CHDIR
50207 + bool "Chdir logging"
50208 + help
50209 + If you say Y here, all chdir() calls will be logged. If the sysctl
50210 + option is enabled, a sysctl option with name "audit_chdir" is created.
50211 +
50212 +config GRKERNSEC_AUDIT_MOUNT
50213 + bool "(Un)Mount logging"
50214 + help
50215 + If you say Y here, all mounts and unmounts will be logged. If the
50216 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50217 + created.
50218 +
50219 +config GRKERNSEC_SIGNAL
50220 + bool "Signal logging"
50221 + default y if GRKERNSEC_CONFIG_AUTO
50222 + help
50223 + If you say Y here, certain important signals will be logged, such as
50224 + SIGSEGV, which will as a result inform you of when a error in a program
50225 + occurred, which in some cases could mean a possible exploit attempt.
50226 + If the sysctl option is enabled, a sysctl option with name
50227 + "signal_logging" is created.
50228 +
50229 +config GRKERNSEC_FORKFAIL
50230 + bool "Fork failure logging"
50231 + help
50232 + If you say Y here, all failed fork() attempts will be logged.
50233 + This could suggest a fork bomb, or someone attempting to overstep
50234 + their process limit. If the sysctl option is enabled, a sysctl option
50235 + with name "forkfail_logging" is created.
50236 +
50237 +config GRKERNSEC_TIME
50238 + bool "Time change logging"
50239 + default y if GRKERNSEC_CONFIG_AUTO
50240 + help
50241 + If you say Y here, any changes of the system clock will be logged.
50242 + If the sysctl option is enabled, a sysctl option with name
50243 + "timechange_logging" is created.
50244 +
50245 +config GRKERNSEC_PROC_IPADDR
50246 + bool "/proc/<pid>/ipaddr support"
50247 + default y if GRKERNSEC_CONFIG_AUTO
50248 + help
50249 + If you say Y here, a new entry will be added to each /proc/<pid>
50250 + directory that contains the IP address of the person using the task.
50251 + The IP is carried across local TCP and AF_UNIX stream sockets.
50252 + This information can be useful for IDS/IPSes to perform remote response
50253 + to a local attack. The entry is readable by only the owner of the
50254 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50255 + the RBAC system), and thus does not create privacy concerns.
50256 +
50257 +config GRKERNSEC_RWXMAP_LOG
50258 + bool 'Denied RWX mmap/mprotect logging'
50259 + default y if GRKERNSEC_CONFIG_AUTO
50260 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50261 + help
50262 + If you say Y here, calls to mmap() and mprotect() with explicit
50263 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50264 + denied by the PAX_MPROTECT feature. If the sysctl option is
50265 + enabled, a sysctl option with name "rwxmap_logging" is created.
50266 +
50267 +config GRKERNSEC_AUDIT_TEXTREL
50268 + bool 'ELF text relocations logging (READ HELP)'
50269 + depends on PAX_MPROTECT
50270 + help
50271 + If you say Y here, text relocations will be logged with the filename
50272 + of the offending library or binary. The purpose of the feature is
50273 + to help Linux distribution developers get rid of libraries and
50274 + binaries that need text relocations which hinder the future progress
50275 + of PaX. Only Linux distribution developers should say Y here, and
50276 + never on a production machine, as this option creates an information
50277 + leak that could aid an attacker in defeating the randomization of
50278 + a single memory region. If the sysctl option is enabled, a sysctl
50279 + option with name "audit_textrel" is created.
50280 +
50281 +endmenu
50282 +
50283 +menu "Executable Protections"
50284 +depends on GRKERNSEC
50285 +
50286 +config GRKERNSEC_DMESG
50287 + bool "Dmesg(8) restriction"
50288 + default y if GRKERNSEC_CONFIG_AUTO
50289 + help
50290 + If you say Y here, non-root users will not be able to use dmesg(8)
50291 + to view up to the last 4kb of messages in the kernel's log buffer.
50292 + The kernel's log buffer often contains kernel addresses and other
50293 + identifying information useful to an attacker in fingerprinting a
50294 + system for a targeted exploit.
50295 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50296 + created.
50297 +
50298 +config GRKERNSEC_HARDEN_PTRACE
50299 + bool "Deter ptrace-based process snooping"
50300 + default y if GRKERNSEC_CONFIG_AUTO
50301 + help
50302 + If you say Y here, TTY sniffers and other malicious monitoring
50303 + programs implemented through ptrace will be defeated. If you
50304 + have been using the RBAC system, this option has already been
50305 + enabled for several years for all users, with the ability to make
50306 + fine-grained exceptions.
50307 +
50308 + This option only affects the ability of non-root users to ptrace
50309 + processes that are not a descendent of the ptracing process.
50310 + This means that strace ./binary and gdb ./binary will still work,
50311 + but attaching to arbitrary processes will not. If the sysctl
50312 + option is enabled, a sysctl option with name "harden_ptrace" is
50313 + created.
50314 +
50315 +config GRKERNSEC_PTRACE_READEXEC
50316 + bool "Require read access to ptrace sensitive binaries"
50317 + default y if GRKERNSEC_CONFIG_AUTO
50318 + help
50319 + If you say Y here, unprivileged users will not be able to ptrace unreadable
50320 + binaries. This option is useful in environments that
50321 + remove the read bits (e.g. file mode 4711) from suid binaries to
50322 + prevent infoleaking of their contents. This option adds
50323 + consistency to the use of that file mode, as the binary could normally
50324 + be read out when run without privileges while ptracing.
50325 +
50326 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
50327 + is created.
50328 +
50329 +config GRKERNSEC_SETXID
50330 + bool "Enforce consistent multithreaded privileges"
50331 + default y if GRKERNSEC_CONFIG_AUTO
50332 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
50333 + help
50334 + If you say Y here, a change from a root uid to a non-root uid
50335 + in a multithreaded application will cause the resulting uids,
50336 + gids, supplementary groups, and capabilities in that thread
50337 + to be propagated to the other threads of the process. In most
50338 + cases this is unnecessary, as glibc will emulate this behavior
50339 + on behalf of the application. Other libcs do not act in the
50340 + same way, allowing the other threads of the process to continue
50341 + running with root privileges. If the sysctl option is enabled,
50342 + a sysctl option with name "consistent_setxid" is created.
50343 +
50344 +config GRKERNSEC_TPE
50345 + bool "Trusted Path Execution (TPE)"
50346 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
50347 + help
50348 + If you say Y here, you will be able to choose a gid to add to the
50349 + supplementary groups of users you want to mark as "untrusted."
50350 + These users will not be able to execute any files that are not in
50351 + root-owned directories writable only by root. If the sysctl option
50352 + is enabled, a sysctl option with name "tpe" is created.
50353 +
50354 +config GRKERNSEC_TPE_ALL
50355 + bool "Partially restrict all non-root users"
50356 + depends on GRKERNSEC_TPE
50357 + help
50358 + If you say Y here, all non-root users will be covered under
50359 + a weaker TPE restriction. This is separate from, and in addition to,
50360 + the main TPE options that you have selected elsewhere. Thus, if a
50361 + "trusted" GID is chosen, this restriction applies to even that GID.
50362 + Under this restriction, all non-root users will only be allowed to
50363 + execute files in directories they own that are not group or
50364 + world-writable, or in directories owned by root and writable only by
50365 + root. If the sysctl option is enabled, a sysctl option with name
50366 + "tpe_restrict_all" is created.
50367 +
50368 +config GRKERNSEC_TPE_INVERT
50369 + bool "Invert GID option"
50370 + depends on GRKERNSEC_TPE
50371 + help
50372 + If you say Y here, the group you specify in the TPE configuration will
50373 + decide what group TPE restrictions will be *disabled* for. This
50374 + option is useful if you want TPE restrictions to be applied to most
50375 + users on the system. If the sysctl option is enabled, a sysctl option
50376 + with name "tpe_invert" is created. Unlike other sysctl options, this
50377 + entry will default to on for backward-compatibility.
50378 +
50379 +config GRKERNSEC_TPE_GID
50380 + int "GID for untrusted users"
50381 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50382 + default 1005
50383 + help
50384 + Setting this GID determines what group TPE restrictions will be
50385 + *enabled* for. If the sysctl option is enabled, a sysctl option
50386 + with name "tpe_gid" is created.
50387 +
50388 +config GRKERNSEC_TPE_GID
50389 + int "GID for trusted users"
50390 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50391 + default 1005
50392 + help
50393 + Setting this GID determines what group TPE restrictions will be
50394 + *disabled* for. If the sysctl option is enabled, a sysctl option
50395 + with name "tpe_gid" is created.
50396 +
50397 +endmenu
50398 +menu "Network Protections"
50399 +depends on GRKERNSEC
50400 +
50401 +config GRKERNSEC_RANDNET
50402 + bool "Larger entropy pools"
50403 + default y if GRKERNSEC_CONFIG_AUTO
50404 + help
50405 + If you say Y here, the entropy pools used for many features of Linux
50406 + and grsecurity will be doubled in size. Since several grsecurity
50407 + features use additional randomness, it is recommended that you say Y
50408 + here. Saying Y here has a similar effect as modifying
50409 + /proc/sys/kernel/random/poolsize.
50410 +
50411 +config GRKERNSEC_BLACKHOLE
50412 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50413 + default y if GRKERNSEC_CONFIG_AUTO
50414 + depends on NET
50415 + help
50416 + If you say Y here, neither TCP resets nor ICMP
50417 + destination-unreachable packets will be sent in response to packets
50418 + sent to ports for which no associated listening process exists.
50419 + This feature supports both IPV4 and IPV6 and exempts the
50420 + loopback interface from blackholing. Enabling this feature
50421 + makes a host more resilient to DoS attacks and reduces network
50422 + visibility against scanners.
50423 +
50424 + The blackhole feature as-implemented is equivalent to the FreeBSD
50425 + blackhole feature, as it prevents RST responses to all packets, not
50426 + just SYNs. Under most application behavior this causes no
50427 + problems, but applications (like haproxy) may not close certain
50428 + connections in a way that cleanly terminates them on the remote
50429 + end, leaving the remote host in LAST_ACK state. Because of this
50430 + side-effect and to prevent intentional LAST_ACK DoSes, this
50431 + feature also adds automatic mitigation against such attacks.
50432 + The mitigation drastically reduces the amount of time a socket
50433 + can spend in LAST_ACK state. If you're using haproxy and not
50434 + all servers it connects to have this option enabled, consider
50435 + disabling this feature on the haproxy host.
50436 +
50437 + If the sysctl option is enabled, two sysctl options with names
50438 + "ip_blackhole" and "lastack_retries" will be created.
50439 + While "ip_blackhole" takes the standard zero/non-zero on/off
50440 + toggle, "lastack_retries" uses the same kinds of values as
50441 + "tcp_retries1" and "tcp_retries2". The default value of 4
50442 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50443 + state.
50444 +
50445 +config GRKERNSEC_SOCKET
50446 + bool "Socket restrictions"
50447 + depends on NET
50448 + help
50449 + If you say Y here, you will be able to choose from several options.
50450 + If you assign a GID on your system and add it to the supplementary
50451 + groups of users you want to restrict socket access to, this patch
50452 + will perform up to three things, based on the option(s) you choose.
50453 +
50454 +config GRKERNSEC_SOCKET_ALL
50455 + bool "Deny any sockets to group"
50456 + depends on GRKERNSEC_SOCKET
50457 + help
50458 + If you say Y here, you will be able to choose a GID of whose users will
50459 + be unable to connect to other hosts from your machine or run server
50460 + applications from your machine. If the sysctl option is enabled, a
50461 + sysctl option with name "socket_all" is created.
50462 +
50463 +config GRKERNSEC_SOCKET_ALL_GID
50464 + int "GID to deny all sockets for"
50465 + depends on GRKERNSEC_SOCKET_ALL
50466 + default 1004
50467 + help
50468 + Here you can choose the GID to disable socket access for. Remember to
50469 + add the users you want socket access disabled for to the GID
50470 + specified here. If the sysctl option is enabled, a sysctl option
50471 + with name "socket_all_gid" is created.
50472 +
50473 +config GRKERNSEC_SOCKET_CLIENT
50474 + bool "Deny client sockets to group"
50475 + depends on GRKERNSEC_SOCKET
50476 + help
50477 + If you say Y here, you will be able to choose a GID of whose users will
50478 + be unable to connect to other hosts from your machine, but will be
50479 + able to run servers. If this option is enabled, all users in the group
50480 + you specify will have to use passive mode when initiating ftp transfers
50481 + from the shell on your machine. If the sysctl option is enabled, a
50482 + sysctl option with name "socket_client" is created.
50483 +
50484 +config GRKERNSEC_SOCKET_CLIENT_GID
50485 + int "GID to deny client sockets for"
50486 + depends on GRKERNSEC_SOCKET_CLIENT
50487 + default 1003
50488 + help
50489 + Here you can choose the GID to disable client socket access for.
50490 + Remember to add the users you want client socket access disabled for to
50491 + the GID specified here. If the sysctl option is enabled, a sysctl
50492 + option with name "socket_client_gid" is created.
50493 +
50494 +config GRKERNSEC_SOCKET_SERVER
50495 + bool "Deny server sockets to group"
50496 + depends on GRKERNSEC_SOCKET
50497 + help
50498 + If you say Y here, you will be able to choose a GID of whose users will
50499 + be unable to run server applications from your machine. If the sysctl
50500 + option is enabled, a sysctl option with name "socket_server" is created.
50501 +
50502 +config GRKERNSEC_SOCKET_SERVER_GID
50503 + int "GID to deny server sockets for"
50504 + depends on GRKERNSEC_SOCKET_SERVER
50505 + default 1002
50506 + help
50507 + Here you can choose the GID to disable server socket access for.
50508 + Remember to add the users you want server socket access disabled for to
50509 + the GID specified here. If the sysctl option is enabled, a sysctl
50510 + option with name "socket_server_gid" is created.
50511 +
50512 +endmenu
50513 +menu "Sysctl Support"
50514 +depends on GRKERNSEC && SYSCTL
50515 +
50516 +config GRKERNSEC_SYSCTL
50517 + bool "Sysctl support"
50518 + default y if GRKERNSEC_CONFIG_AUTO
50519 + help
50520 + If you say Y here, you will be able to change the options that
50521 + grsecurity runs with at bootup, without having to recompile your
50522 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50523 + to enable (1) or disable (0) various features. All the sysctl entries
50524 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50525 + All features enabled in the kernel configuration are disabled at boot
50526 + if you do not say Y to the "Turn on features by default" option.
50527 + All options should be set at startup, and the grsec_lock entry should
50528 + be set to a non-zero value after all the options are set.
50529 + *THIS IS EXTREMELY IMPORTANT*
50530 +
50531 +config GRKERNSEC_SYSCTL_DISTRO
50532 + bool "Extra sysctl support for distro makers (READ HELP)"
50533 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50534 + help
50535 + If you say Y here, additional sysctl options will be created
50536 + for features that affect processes running as root. Therefore,
50537 + it is critical when using this option that the grsec_lock entry be
50538 + enabled after boot. Only distros with prebuilt kernel packages
50539 + with this option enabled that can ensure grsec_lock is enabled
50540 + after boot should use this option.
50541 + *Failure to set grsec_lock after boot makes all grsec features
50542 + this option covers useless*
50543 +
50544 + Currently this option creates the following sysctl entries:
50545 + "Disable Privileged I/O": "disable_priv_io"
50546 +
50547 +config GRKERNSEC_SYSCTL_ON
50548 + bool "Turn on features by default"
50549 + default y if GRKERNSEC_CONFIG_AUTO
50550 + depends on GRKERNSEC_SYSCTL
50551 + help
50552 + If you say Y here, instead of having all features enabled in the
50553 + kernel configuration disabled at boot time, the features will be
50554 + enabled at boot time. It is recommended you say Y here unless
50555 + there is some reason you would want all sysctl-tunable features to
50556 + be disabled by default. As mentioned elsewhere, it is important
50557 + to enable the grsec_lock entry once you have finished modifying
50558 + the sysctl entries.
50559 +
50560 +endmenu
50561 +menu "Logging Options"
50562 +depends on GRKERNSEC
50563 +
50564 +config GRKERNSEC_FLOODTIME
50565 + int "Seconds in between log messages (minimum)"
50566 + default 10
50567 + help
50568 + This option allows you to enforce the number of seconds between
50569 + grsecurity log messages. The default should be suitable for most
50570 + people, however, if you choose to change it, choose a value small enough
50571 + to allow informative logs to be produced, but large enough to
50572 + prevent flooding.
50573 +
50574 +config GRKERNSEC_FLOODBURST
50575 + int "Number of messages in a burst (maximum)"
50576 + default 6
50577 + help
50578 + This option allows you to choose the maximum number of messages allowed
50579 + within the flood time interval you chose in a separate option. The
50580 + default should be suitable for most people, however if you find that
50581 + many of your logs are being interpreted as flooding, you may want to
50582 + raise this value.
50583 +
50584 +endmenu
50585 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50586 new file mode 100644
50587 index 0000000..1b9afa9
50588 --- /dev/null
50589 +++ b/grsecurity/Makefile
50590 @@ -0,0 +1,38 @@
50591 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50592 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50593 +# into an RBAC system
50594 +#
50595 +# All code in this directory and various hooks inserted throughout the kernel
50596 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50597 +# under the GPL v2 or higher
50598 +
50599 +KBUILD_CFLAGS += -Werror
50600 +
50601 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50602 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50603 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50604 +
50605 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50606 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50607 + gracl_learn.o grsec_log.o
50608 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50609 +
50610 +ifdef CONFIG_NET
50611 +obj-y += grsec_sock.o
50612 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50613 +endif
50614 +
50615 +ifndef CONFIG_GRKERNSEC
50616 +obj-y += grsec_disabled.o
50617 +endif
50618 +
50619 +ifdef CONFIG_GRKERNSEC_HIDESYM
50620 +extra-y := grsec_hidesym.o
50621 +$(obj)/grsec_hidesym.o:
50622 + @-chmod -f 500 /boot
50623 + @-chmod -f 500 /lib/modules
50624 + @-chmod -f 500 /lib64/modules
50625 + @-chmod -f 500 /lib32/modules
50626 + @-chmod -f 700 .
50627 + @echo ' grsec: protected kernel image paths'
50628 +endif
50629 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50630 new file mode 100644
50631 index 0000000..7a5922f
50632 --- /dev/null
50633 +++ b/grsecurity/gracl.c
50634 @@ -0,0 +1,4016 @@
50635 +#include <linux/kernel.h>
50636 +#include <linux/module.h>
50637 +#include <linux/sched.h>
50638 +#include <linux/mm.h>
50639 +#include <linux/file.h>
50640 +#include <linux/fs.h>
50641 +#include <linux/namei.h>
50642 +#include <linux/mount.h>
50643 +#include <linux/tty.h>
50644 +#include <linux/proc_fs.h>
50645 +#include <linux/lglock.h>
50646 +#include <linux/slab.h>
50647 +#include <linux/vmalloc.h>
50648 +#include <linux/types.h>
50649 +#include <linux/sysctl.h>
50650 +#include <linux/netdevice.h>
50651 +#include <linux/ptrace.h>
50652 +#include <linux/gracl.h>
50653 +#include <linux/gralloc.h>
50654 +#include <linux/security.h>
50655 +#include <linux/grinternal.h>
50656 +#include <linux/pid_namespace.h>
50657 +#include <linux/stop_machine.h>
50658 +#include <linux/fdtable.h>
50659 +#include <linux/percpu.h>
50660 +#include "../fs/mount.h"
50661 +
50662 +#include <asm/uaccess.h>
50663 +#include <asm/errno.h>
50664 +#include <asm/mman.h>
50665 +
50666 +static struct acl_role_db acl_role_set;
50667 +static struct name_db name_set;
50668 +static struct inodev_db inodev_set;
50669 +
50670 +/* for keeping track of userspace pointers used for subjects, so we
50671 + can share references in the kernel as well
50672 +*/
50673 +
50674 +static struct path real_root;
50675 +
50676 +static struct acl_subj_map_db subj_map_set;
50677 +
50678 +static struct acl_role_label *default_role;
50679 +
50680 +static struct acl_role_label *role_list;
50681 +
50682 +static u16 acl_sp_role_value;
50683 +
50684 +extern char *gr_shared_page[4];
50685 +static DEFINE_MUTEX(gr_dev_mutex);
50686 +DEFINE_RWLOCK(gr_inode_lock);
50687 +
50688 +struct gr_arg *gr_usermode;
50689 +
50690 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50691 +
50692 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50693 +extern void gr_clear_learn_entries(void);
50694 +
50695 +#ifdef CONFIG_GRKERNSEC_RESLOG
50696 +extern void gr_log_resource(const struct task_struct *task,
50697 + const int res, const unsigned long wanted, const int gt);
50698 +#endif
50699 +
50700 +unsigned char *gr_system_salt;
50701 +unsigned char *gr_system_sum;
50702 +
50703 +static struct sprole_pw **acl_special_roles = NULL;
50704 +static __u16 num_sprole_pws = 0;
50705 +
50706 +static struct acl_role_label *kernel_role = NULL;
50707 +
50708 +static unsigned int gr_auth_attempts = 0;
50709 +static unsigned long gr_auth_expires = 0UL;
50710 +
50711 +#ifdef CONFIG_NET
50712 +extern struct vfsmount *sock_mnt;
50713 +#endif
50714 +
50715 +extern struct vfsmount *pipe_mnt;
50716 +extern struct vfsmount *shm_mnt;
50717 +#ifdef CONFIG_HUGETLBFS
50718 +extern struct vfsmount *hugetlbfs_vfsmount;
50719 +#endif
50720 +
50721 +static struct acl_object_label *fakefs_obj_rw;
50722 +static struct acl_object_label *fakefs_obj_rwx;
50723 +
50724 +extern int gr_init_uidset(void);
50725 +extern void gr_free_uidset(void);
50726 +extern void gr_remove_uid(uid_t uid);
50727 +extern int gr_find_uid(uid_t uid);
50728 +
50729 +DECLARE_BRLOCK(vfsmount_lock);
50730 +
50731 +__inline__ int
50732 +gr_acl_is_enabled(void)
50733 +{
50734 + return (gr_status & GR_READY);
50735 +}
50736 +
50737 +#ifdef CONFIG_BTRFS_FS
50738 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50739 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50740 +#endif
50741 +
50742 +static inline dev_t __get_dev(const struct dentry *dentry)
50743 +{
50744 +#ifdef CONFIG_BTRFS_FS
50745 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50746 + return get_btrfs_dev_from_inode(dentry->d_inode);
50747 + else
50748 +#endif
50749 + return dentry->d_inode->i_sb->s_dev;
50750 +}
50751 +
50752 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50753 +{
50754 + return __get_dev(dentry);
50755 +}
50756 +
50757 +static char gr_task_roletype_to_char(struct task_struct *task)
50758 +{
50759 + switch (task->role->roletype &
50760 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50761 + GR_ROLE_SPECIAL)) {
50762 + case GR_ROLE_DEFAULT:
50763 + return 'D';
50764 + case GR_ROLE_USER:
50765 + return 'U';
50766 + case GR_ROLE_GROUP:
50767 + return 'G';
50768 + case GR_ROLE_SPECIAL:
50769 + return 'S';
50770 + }
50771 +
50772 + return 'X';
50773 +}
50774 +
50775 +char gr_roletype_to_char(void)
50776 +{
50777 + return gr_task_roletype_to_char(current);
50778 +}
50779 +
50780 +__inline__ int
50781 +gr_acl_tpe_check(void)
50782 +{
50783 + if (unlikely(!(gr_status & GR_READY)))
50784 + return 0;
50785 + if (current->role->roletype & GR_ROLE_TPE)
50786 + return 1;
50787 + else
50788 + return 0;
50789 +}
50790 +
50791 +int
50792 +gr_handle_rawio(const struct inode *inode)
50793 +{
50794 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50795 + if (inode && S_ISBLK(inode->i_mode) &&
50796 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50797 + !capable(CAP_SYS_RAWIO))
50798 + return 1;
50799 +#endif
50800 + return 0;
50801 +}
50802 +
50803 +static int
50804 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50805 +{
50806 + if (likely(lena != lenb))
50807 + return 0;
50808 +
50809 + return !memcmp(a, b, lena);
50810 +}
50811 +
50812 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50813 +{
50814 + *buflen -= namelen;
50815 + if (*buflen < 0)
50816 + return -ENAMETOOLONG;
50817 + *buffer -= namelen;
50818 + memcpy(*buffer, str, namelen);
50819 + return 0;
50820 +}
50821 +
50822 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50823 +{
50824 + return prepend(buffer, buflen, name->name, name->len);
50825 +}
50826 +
50827 +static int prepend_path(const struct path *path, struct path *root,
50828 + char **buffer, int *buflen)
50829 +{
50830 + struct dentry *dentry = path->dentry;
50831 + struct vfsmount *vfsmnt = path->mnt;
50832 + struct mount *mnt = real_mount(vfsmnt);
50833 + bool slash = false;
50834 + int error = 0;
50835 +
50836 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50837 + struct dentry * parent;
50838 +
50839 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50840 + /* Global root? */
50841 + if (!mnt_has_parent(mnt)) {
50842 + goto out;
50843 + }
50844 + dentry = mnt->mnt_mountpoint;
50845 + mnt = mnt->mnt_parent;
50846 + vfsmnt = &mnt->mnt;
50847 + continue;
50848 + }
50849 + parent = dentry->d_parent;
50850 + prefetch(parent);
50851 + spin_lock(&dentry->d_lock);
50852 + error = prepend_name(buffer, buflen, &dentry->d_name);
50853 + spin_unlock(&dentry->d_lock);
50854 + if (!error)
50855 + error = prepend(buffer, buflen, "/", 1);
50856 + if (error)
50857 + break;
50858 +
50859 + slash = true;
50860 + dentry = parent;
50861 + }
50862 +
50863 +out:
50864 + if (!error && !slash)
50865 + error = prepend(buffer, buflen, "/", 1);
50866 +
50867 + return error;
50868 +}
50869 +
50870 +/* this must be called with vfsmount_lock and rename_lock held */
50871 +
50872 +static char *__our_d_path(const struct path *path, struct path *root,
50873 + char *buf, int buflen)
50874 +{
50875 + char *res = buf + buflen;
50876 + int error;
50877 +
50878 + prepend(&res, &buflen, "\0", 1);
50879 + error = prepend_path(path, root, &res, &buflen);
50880 + if (error)
50881 + return ERR_PTR(error);
50882 +
50883 + return res;
50884 +}
50885 +
50886 +static char *
50887 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50888 +{
50889 + char *retval;
50890 +
50891 + retval = __our_d_path(path, root, buf, buflen);
50892 + if (unlikely(IS_ERR(retval)))
50893 + retval = strcpy(buf, "<path too long>");
50894 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50895 + retval[1] = '\0';
50896 +
50897 + return retval;
50898 +}
50899 +
50900 +static char *
50901 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50902 + char *buf, int buflen)
50903 +{
50904 + struct path path;
50905 + char *res;
50906 +
50907 + path.dentry = (struct dentry *)dentry;
50908 + path.mnt = (struct vfsmount *)vfsmnt;
50909 +
50910 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50911 + by the RBAC system */
50912 + res = gen_full_path(&path, &real_root, buf, buflen);
50913 +
50914 + return res;
50915 +}
50916 +
50917 +static char *
50918 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50919 + char *buf, int buflen)
50920 +{
50921 + char *res;
50922 + struct path path;
50923 + struct path root;
50924 + struct task_struct *reaper = init_pid_ns.child_reaper;
50925 +
50926 + path.dentry = (struct dentry *)dentry;
50927 + path.mnt = (struct vfsmount *)vfsmnt;
50928 +
50929 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50930 + get_fs_root(reaper->fs, &root);
50931 +
50932 + write_seqlock(&rename_lock);
50933 + br_read_lock(vfsmount_lock);
50934 + res = gen_full_path(&path, &root, buf, buflen);
50935 + br_read_unlock(vfsmount_lock);
50936 + write_sequnlock(&rename_lock);
50937 +
50938 + path_put(&root);
50939 + return res;
50940 +}
50941 +
50942 +static char *
50943 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50944 +{
50945 + char *ret;
50946 + write_seqlock(&rename_lock);
50947 + br_read_lock(vfsmount_lock);
50948 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50949 + PAGE_SIZE);
50950 + br_read_unlock(vfsmount_lock);
50951 + write_sequnlock(&rename_lock);
50952 + return ret;
50953 +}
50954 +
50955 +static char *
50956 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50957 +{
50958 + char *ret;
50959 + char *buf;
50960 + int buflen;
50961 +
50962 + write_seqlock(&rename_lock);
50963 + br_read_lock(vfsmount_lock);
50964 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50965 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50966 + buflen = (int)(ret - buf);
50967 + if (buflen >= 5)
50968 + prepend(&ret, &buflen, "/proc", 5);
50969 + else
50970 + ret = strcpy(buf, "<path too long>");
50971 + br_read_unlock(vfsmount_lock);
50972 + write_sequnlock(&rename_lock);
50973 + return ret;
50974 +}
50975 +
50976 +char *
50977 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50978 +{
50979 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50980 + PAGE_SIZE);
50981 +}
50982 +
50983 +char *
50984 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50985 +{
50986 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50987 + PAGE_SIZE);
50988 +}
50989 +
50990 +char *
50991 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50992 +{
50993 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50994 + PAGE_SIZE);
50995 +}
50996 +
50997 +char *
50998 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50999 +{
51000 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51001 + PAGE_SIZE);
51002 +}
51003 +
51004 +char *
51005 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51006 +{
51007 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51008 + PAGE_SIZE);
51009 +}
51010 +
51011 +__inline__ __u32
51012 +to_gr_audit(const __u32 reqmode)
51013 +{
51014 + /* masks off auditable permission flags, then shifts them to create
51015 + auditing flags, and adds the special case of append auditing if
51016 + we're requesting write */
51017 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51018 +}
51019 +
51020 +struct acl_subject_label *
51021 +lookup_subject_map(const struct acl_subject_label *userp)
51022 +{
51023 + unsigned int index = shash(userp, subj_map_set.s_size);
51024 + struct subject_map *match;
51025 +
51026 + match = subj_map_set.s_hash[index];
51027 +
51028 + while (match && match->user != userp)
51029 + match = match->next;
51030 +
51031 + if (match != NULL)
51032 + return match->kernel;
51033 + else
51034 + return NULL;
51035 +}
51036 +
51037 +static void
51038 +insert_subj_map_entry(struct subject_map *subjmap)
51039 +{
51040 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51041 + struct subject_map **curr;
51042 +
51043 + subjmap->prev = NULL;
51044 +
51045 + curr = &subj_map_set.s_hash[index];
51046 + if (*curr != NULL)
51047 + (*curr)->prev = subjmap;
51048 +
51049 + subjmap->next = *curr;
51050 + *curr = subjmap;
51051 +
51052 + return;
51053 +}
51054 +
51055 +static struct acl_role_label *
51056 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51057 + const gid_t gid)
51058 +{
51059 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51060 + struct acl_role_label *match;
51061 + struct role_allowed_ip *ipp;
51062 + unsigned int x;
51063 + u32 curr_ip = task->signal->curr_ip;
51064 +
51065 + task->signal->saved_ip = curr_ip;
51066 +
51067 + match = acl_role_set.r_hash[index];
51068 +
51069 + while (match) {
51070 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51071 + for (x = 0; x < match->domain_child_num; x++) {
51072 + if (match->domain_children[x] == uid)
51073 + goto found;
51074 + }
51075 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51076 + break;
51077 + match = match->next;
51078 + }
51079 +found:
51080 + if (match == NULL) {
51081 + try_group:
51082 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51083 + match = acl_role_set.r_hash[index];
51084 +
51085 + while (match) {
51086 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51087 + for (x = 0; x < match->domain_child_num; x++) {
51088 + if (match->domain_children[x] == gid)
51089 + goto found2;
51090 + }
51091 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51092 + break;
51093 + match = match->next;
51094 + }
51095 +found2:
51096 + if (match == NULL)
51097 + match = default_role;
51098 + if (match->allowed_ips == NULL)
51099 + return match;
51100 + else {
51101 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51102 + if (likely
51103 + ((ntohl(curr_ip) & ipp->netmask) ==
51104 + (ntohl(ipp->addr) & ipp->netmask)))
51105 + return match;
51106 + }
51107 + match = default_role;
51108 + }
51109 + } else if (match->allowed_ips == NULL) {
51110 + return match;
51111 + } else {
51112 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51113 + if (likely
51114 + ((ntohl(curr_ip) & ipp->netmask) ==
51115 + (ntohl(ipp->addr) & ipp->netmask)))
51116 + return match;
51117 + }
51118 + goto try_group;
51119 + }
51120 +
51121 + return match;
51122 +}
51123 +
51124 +struct acl_subject_label *
51125 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51126 + const struct acl_role_label *role)
51127 +{
51128 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51129 + struct acl_subject_label *match;
51130 +
51131 + match = role->subj_hash[index];
51132 +
51133 + while (match && (match->inode != ino || match->device != dev ||
51134 + (match->mode & GR_DELETED))) {
51135 + match = match->next;
51136 + }
51137 +
51138 + if (match && !(match->mode & GR_DELETED))
51139 + return match;
51140 + else
51141 + return NULL;
51142 +}
51143 +
51144 +struct acl_subject_label *
51145 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51146 + const struct acl_role_label *role)
51147 +{
51148 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51149 + struct acl_subject_label *match;
51150 +
51151 + match = role->subj_hash[index];
51152 +
51153 + while (match && (match->inode != ino || match->device != dev ||
51154 + !(match->mode & GR_DELETED))) {
51155 + match = match->next;
51156 + }
51157 +
51158 + if (match && (match->mode & GR_DELETED))
51159 + return match;
51160 + else
51161 + return NULL;
51162 +}
51163 +
51164 +static struct acl_object_label *
51165 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51166 + const struct acl_subject_label *subj)
51167 +{
51168 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51169 + struct acl_object_label *match;
51170 +
51171 + match = subj->obj_hash[index];
51172 +
51173 + while (match && (match->inode != ino || match->device != dev ||
51174 + (match->mode & GR_DELETED))) {
51175 + match = match->next;
51176 + }
51177 +
51178 + if (match && !(match->mode & GR_DELETED))
51179 + return match;
51180 + else
51181 + return NULL;
51182 +}
51183 +
51184 +static struct acl_object_label *
51185 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51186 + const struct acl_subject_label *subj)
51187 +{
51188 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51189 + struct acl_object_label *match;
51190 +
51191 + match = subj->obj_hash[index];
51192 +
51193 + while (match && (match->inode != ino || match->device != dev ||
51194 + !(match->mode & GR_DELETED))) {
51195 + match = match->next;
51196 + }
51197 +
51198 + if (match && (match->mode & GR_DELETED))
51199 + return match;
51200 +
51201 + match = subj->obj_hash[index];
51202 +
51203 + while (match && (match->inode != ino || match->device != dev ||
51204 + (match->mode & GR_DELETED))) {
51205 + match = match->next;
51206 + }
51207 +
51208 + if (match && !(match->mode & GR_DELETED))
51209 + return match;
51210 + else
51211 + return NULL;
51212 +}
51213 +
51214 +static struct name_entry *
51215 +lookup_name_entry(const char *name)
51216 +{
51217 + unsigned int len = strlen(name);
51218 + unsigned int key = full_name_hash(name, len);
51219 + unsigned int index = key % name_set.n_size;
51220 + struct name_entry *match;
51221 +
51222 + match = name_set.n_hash[index];
51223 +
51224 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51225 + match = match->next;
51226 +
51227 + return match;
51228 +}
51229 +
51230 +static struct name_entry *
51231 +lookup_name_entry_create(const char *name)
51232 +{
51233 + unsigned int len = strlen(name);
51234 + unsigned int key = full_name_hash(name, len);
51235 + unsigned int index = key % name_set.n_size;
51236 + struct name_entry *match;
51237 +
51238 + match = name_set.n_hash[index];
51239 +
51240 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51241 + !match->deleted))
51242 + match = match->next;
51243 +
51244 + if (match && match->deleted)
51245 + return match;
51246 +
51247 + match = name_set.n_hash[index];
51248 +
51249 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51250 + match->deleted))
51251 + match = match->next;
51252 +
51253 + if (match && !match->deleted)
51254 + return match;
51255 + else
51256 + return NULL;
51257 +}
51258 +
51259 +static struct inodev_entry *
51260 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
51261 +{
51262 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
51263 + struct inodev_entry *match;
51264 +
51265 + match = inodev_set.i_hash[index];
51266 +
51267 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51268 + match = match->next;
51269 +
51270 + return match;
51271 +}
51272 +
51273 +static void
51274 +insert_inodev_entry(struct inodev_entry *entry)
51275 +{
51276 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51277 + inodev_set.i_size);
51278 + struct inodev_entry **curr;
51279 +
51280 + entry->prev = NULL;
51281 +
51282 + curr = &inodev_set.i_hash[index];
51283 + if (*curr != NULL)
51284 + (*curr)->prev = entry;
51285 +
51286 + entry->next = *curr;
51287 + *curr = entry;
51288 +
51289 + return;
51290 +}
51291 +
51292 +static void
51293 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51294 +{
51295 + unsigned int index =
51296 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51297 + struct acl_role_label **curr;
51298 + struct acl_role_label *tmp, *tmp2;
51299 +
51300 + curr = &acl_role_set.r_hash[index];
51301 +
51302 + /* simple case, slot is empty, just set it to our role */
51303 + if (*curr == NULL) {
51304 + *curr = role;
51305 + } else {
51306 + /* example:
51307 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
51308 + 2 -> 3
51309 + */
51310 + /* first check to see if we can already be reached via this slot */
51311 + tmp = *curr;
51312 + while (tmp && tmp != role)
51313 + tmp = tmp->next;
51314 + if (tmp == role) {
51315 + /* we don't need to add ourselves to this slot's chain */
51316 + return;
51317 + }
51318 + /* we need to add ourselves to this chain, two cases */
51319 + if (role->next == NULL) {
51320 + /* simple case, append the current chain to our role */
51321 + role->next = *curr;
51322 + *curr = role;
51323 + } else {
51324 + /* 1 -> 2 -> 3 -> 4
51325 + 2 -> 3 -> 4
51326 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
51327 + */
51328 + /* trickier case: walk our role's chain until we find
51329 + the role for the start of the current slot's chain */
51330 + tmp = role;
51331 + tmp2 = *curr;
51332 + while (tmp->next && tmp->next != tmp2)
51333 + tmp = tmp->next;
51334 + if (tmp->next == tmp2) {
51335 + /* from example above, we found 3, so just
51336 + replace this slot's chain with ours */
51337 + *curr = role;
51338 + } else {
51339 + /* we didn't find a subset of our role's chain
51340 + in the current slot's chain, so append their
51341 + chain to ours, and set us as the first role in
51342 + the slot's chain
51343 +
51344 + we could fold this case with the case above,
51345 + but making it explicit for clarity
51346 + */
51347 + tmp->next = tmp2;
51348 + *curr = role;
51349 + }
51350 + }
51351 + }
51352 +
51353 + return;
51354 +}
51355 +
51356 +static void
51357 +insert_acl_role_label(struct acl_role_label *role)
51358 +{
51359 + int i;
51360 +
51361 + if (role_list == NULL) {
51362 + role_list = role;
51363 + role->prev = NULL;
51364 + } else {
51365 + role->prev = role_list;
51366 + role_list = role;
51367 + }
51368 +
51369 + /* used for hash chains */
51370 + role->next = NULL;
51371 +
51372 + if (role->roletype & GR_ROLE_DOMAIN) {
51373 + for (i = 0; i < role->domain_child_num; i++)
51374 + __insert_acl_role_label(role, role->domain_children[i]);
51375 + } else
51376 + __insert_acl_role_label(role, role->uidgid);
51377 +}
51378 +
51379 +static int
51380 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51381 +{
51382 + struct name_entry **curr, *nentry;
51383 + struct inodev_entry *ientry;
51384 + unsigned int len = strlen(name);
51385 + unsigned int key = full_name_hash(name, len);
51386 + unsigned int index = key % name_set.n_size;
51387 +
51388 + curr = &name_set.n_hash[index];
51389 +
51390 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51391 + curr = &((*curr)->next);
51392 +
51393 + if (*curr != NULL)
51394 + return 1;
51395 +
51396 + nentry = acl_alloc(sizeof (struct name_entry));
51397 + if (nentry == NULL)
51398 + return 0;
51399 + ientry = acl_alloc(sizeof (struct inodev_entry));
51400 + if (ientry == NULL)
51401 + return 0;
51402 + ientry->nentry = nentry;
51403 +
51404 + nentry->key = key;
51405 + nentry->name = name;
51406 + nentry->inode = inode;
51407 + nentry->device = device;
51408 + nentry->len = len;
51409 + nentry->deleted = deleted;
51410 +
51411 + nentry->prev = NULL;
51412 + curr = &name_set.n_hash[index];
51413 + if (*curr != NULL)
51414 + (*curr)->prev = nentry;
51415 + nentry->next = *curr;
51416 + *curr = nentry;
51417 +
51418 + /* insert us into the table searchable by inode/dev */
51419 + insert_inodev_entry(ientry);
51420 +
51421 + return 1;
51422 +}
51423 +
51424 +static void
51425 +insert_acl_obj_label(struct acl_object_label *obj,
51426 + struct acl_subject_label *subj)
51427 +{
51428 + unsigned int index =
51429 + fhash(obj->inode, obj->device, subj->obj_hash_size);
51430 + struct acl_object_label **curr;
51431 +
51432 +
51433 + obj->prev = NULL;
51434 +
51435 + curr = &subj->obj_hash[index];
51436 + if (*curr != NULL)
51437 + (*curr)->prev = obj;
51438 +
51439 + obj->next = *curr;
51440 + *curr = obj;
51441 +
51442 + return;
51443 +}
51444 +
51445 +static void
51446 +insert_acl_subj_label(struct acl_subject_label *obj,
51447 + struct acl_role_label *role)
51448 +{
51449 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51450 + struct acl_subject_label **curr;
51451 +
51452 + obj->prev = NULL;
51453 +
51454 + curr = &role->subj_hash[index];
51455 + if (*curr != NULL)
51456 + (*curr)->prev = obj;
51457 +
51458 + obj->next = *curr;
51459 + *curr = obj;
51460 +
51461 + return;
51462 +}
51463 +
51464 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51465 +
51466 +static void *
51467 +create_table(__u32 * len, int elementsize)
51468 +{
51469 + unsigned int table_sizes[] = {
51470 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51471 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51472 + 4194301, 8388593, 16777213, 33554393, 67108859
51473 + };
51474 + void *newtable = NULL;
51475 + unsigned int pwr = 0;
51476 +
51477 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51478 + table_sizes[pwr] <= *len)
51479 + pwr++;
51480 +
51481 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51482 + return newtable;
51483 +
51484 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51485 + newtable =
51486 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51487 + else
51488 + newtable = vmalloc(table_sizes[pwr] * elementsize);
51489 +
51490 + *len = table_sizes[pwr];
51491 +
51492 + return newtable;
51493 +}
51494 +
51495 +static int
51496 +init_variables(const struct gr_arg *arg)
51497 +{
51498 + struct task_struct *reaper = init_pid_ns.child_reaper;
51499 + unsigned int stacksize;
51500 +
51501 + subj_map_set.s_size = arg->role_db.num_subjects;
51502 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51503 + name_set.n_size = arg->role_db.num_objects;
51504 + inodev_set.i_size = arg->role_db.num_objects;
51505 +
51506 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
51507 + !name_set.n_size || !inodev_set.i_size)
51508 + return 1;
51509 +
51510 + if (!gr_init_uidset())
51511 + return 1;
51512 +
51513 + /* set up the stack that holds allocation info */
51514 +
51515 + stacksize = arg->role_db.num_pointers + 5;
51516 +
51517 + if (!acl_alloc_stack_init(stacksize))
51518 + return 1;
51519 +
51520 + /* grab reference for the real root dentry and vfsmount */
51521 + get_fs_root(reaper->fs, &real_root);
51522 +
51523 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51524 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51525 +#endif
51526 +
51527 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51528 + if (fakefs_obj_rw == NULL)
51529 + return 1;
51530 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51531 +
51532 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51533 + if (fakefs_obj_rwx == NULL)
51534 + return 1;
51535 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51536 +
51537 + subj_map_set.s_hash =
51538 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51539 + acl_role_set.r_hash =
51540 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51541 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51542 + inodev_set.i_hash =
51543 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51544 +
51545 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51546 + !name_set.n_hash || !inodev_set.i_hash)
51547 + return 1;
51548 +
51549 + memset(subj_map_set.s_hash, 0,
51550 + sizeof(struct subject_map *) * subj_map_set.s_size);
51551 + memset(acl_role_set.r_hash, 0,
51552 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51553 + memset(name_set.n_hash, 0,
51554 + sizeof (struct name_entry *) * name_set.n_size);
51555 + memset(inodev_set.i_hash, 0,
51556 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51557 +
51558 + return 0;
51559 +}
51560 +
51561 +/* free information not needed after startup
51562 + currently contains user->kernel pointer mappings for subjects
51563 +*/
51564 +
51565 +static void
51566 +free_init_variables(void)
51567 +{
51568 + __u32 i;
51569 +
51570 + if (subj_map_set.s_hash) {
51571 + for (i = 0; i < subj_map_set.s_size; i++) {
51572 + if (subj_map_set.s_hash[i]) {
51573 + kfree(subj_map_set.s_hash[i]);
51574 + subj_map_set.s_hash[i] = NULL;
51575 + }
51576 + }
51577 +
51578 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51579 + PAGE_SIZE)
51580 + kfree(subj_map_set.s_hash);
51581 + else
51582 + vfree(subj_map_set.s_hash);
51583 + }
51584 +
51585 + return;
51586 +}
51587 +
51588 +static void
51589 +free_variables(void)
51590 +{
51591 + struct acl_subject_label *s;
51592 + struct acl_role_label *r;
51593 + struct task_struct *task, *task2;
51594 + unsigned int x;
51595 +
51596 + gr_clear_learn_entries();
51597 +
51598 + read_lock(&tasklist_lock);
51599 + do_each_thread(task2, task) {
51600 + task->acl_sp_role = 0;
51601 + task->acl_role_id = 0;
51602 + task->acl = NULL;
51603 + task->role = NULL;
51604 + } while_each_thread(task2, task);
51605 + read_unlock(&tasklist_lock);
51606 +
51607 + /* release the reference to the real root dentry and vfsmount */
51608 + path_put(&real_root);
51609 + memset(&real_root, 0, sizeof(real_root));
51610 +
51611 + /* free all object hash tables */
51612 +
51613 + FOR_EACH_ROLE_START(r)
51614 + if (r->subj_hash == NULL)
51615 + goto next_role;
51616 + FOR_EACH_SUBJECT_START(r, s, x)
51617 + if (s->obj_hash == NULL)
51618 + break;
51619 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51620 + kfree(s->obj_hash);
51621 + else
51622 + vfree(s->obj_hash);
51623 + FOR_EACH_SUBJECT_END(s, x)
51624 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51625 + if (s->obj_hash == NULL)
51626 + break;
51627 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51628 + kfree(s->obj_hash);
51629 + else
51630 + vfree(s->obj_hash);
51631 + FOR_EACH_NESTED_SUBJECT_END(s)
51632 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51633 + kfree(r->subj_hash);
51634 + else
51635 + vfree(r->subj_hash);
51636 + r->subj_hash = NULL;
51637 +next_role:
51638 + FOR_EACH_ROLE_END(r)
51639 +
51640 + acl_free_all();
51641 +
51642 + if (acl_role_set.r_hash) {
51643 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51644 + PAGE_SIZE)
51645 + kfree(acl_role_set.r_hash);
51646 + else
51647 + vfree(acl_role_set.r_hash);
51648 + }
51649 + if (name_set.n_hash) {
51650 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51651 + PAGE_SIZE)
51652 + kfree(name_set.n_hash);
51653 + else
51654 + vfree(name_set.n_hash);
51655 + }
51656 +
51657 + if (inodev_set.i_hash) {
51658 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51659 + PAGE_SIZE)
51660 + kfree(inodev_set.i_hash);
51661 + else
51662 + vfree(inodev_set.i_hash);
51663 + }
51664 +
51665 + gr_free_uidset();
51666 +
51667 + memset(&name_set, 0, sizeof (struct name_db));
51668 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51669 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51670 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51671 +
51672 + default_role = NULL;
51673 + kernel_role = NULL;
51674 + role_list = NULL;
51675 +
51676 + return;
51677 +}
51678 +
51679 +static __u32
51680 +count_user_objs(struct acl_object_label *userp)
51681 +{
51682 + struct acl_object_label o_tmp;
51683 + __u32 num = 0;
51684 +
51685 + while (userp) {
51686 + if (copy_from_user(&o_tmp, userp,
51687 + sizeof (struct acl_object_label)))
51688 + break;
51689 +
51690 + userp = o_tmp.prev;
51691 + num++;
51692 + }
51693 +
51694 + return num;
51695 +}
51696 +
51697 +static struct acl_subject_label *
51698 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51699 +
51700 +static int
51701 +copy_user_glob(struct acl_object_label *obj)
51702 +{
51703 + struct acl_object_label *g_tmp, **guser;
51704 + unsigned int len;
51705 + char *tmp;
51706 +
51707 + if (obj->globbed == NULL)
51708 + return 0;
51709 +
51710 + guser = &obj->globbed;
51711 + while (*guser) {
51712 + g_tmp = (struct acl_object_label *)
51713 + acl_alloc(sizeof (struct acl_object_label));
51714 + if (g_tmp == NULL)
51715 + return -ENOMEM;
51716 +
51717 + if (copy_from_user(g_tmp, *guser,
51718 + sizeof (struct acl_object_label)))
51719 + return -EFAULT;
51720 +
51721 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51722 +
51723 + if (!len || len >= PATH_MAX)
51724 + return -EINVAL;
51725 +
51726 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51727 + return -ENOMEM;
51728 +
51729 + if (copy_from_user(tmp, g_tmp->filename, len))
51730 + return -EFAULT;
51731 + tmp[len-1] = '\0';
51732 + g_tmp->filename = tmp;
51733 +
51734 + *guser = g_tmp;
51735 + guser = &(g_tmp->next);
51736 + }
51737 +
51738 + return 0;
51739 +}
51740 +
51741 +static int
51742 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51743 + struct acl_role_label *role)
51744 +{
51745 + struct acl_object_label *o_tmp;
51746 + unsigned int len;
51747 + int ret;
51748 + char *tmp;
51749 +
51750 + while (userp) {
51751 + if ((o_tmp = (struct acl_object_label *)
51752 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51753 + return -ENOMEM;
51754 +
51755 + if (copy_from_user(o_tmp, userp,
51756 + sizeof (struct acl_object_label)))
51757 + return -EFAULT;
51758 +
51759 + userp = o_tmp->prev;
51760 +
51761 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51762 +
51763 + if (!len || len >= PATH_MAX)
51764 + return -EINVAL;
51765 +
51766 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51767 + return -ENOMEM;
51768 +
51769 + if (copy_from_user(tmp, o_tmp->filename, len))
51770 + return -EFAULT;
51771 + tmp[len-1] = '\0';
51772 + o_tmp->filename = tmp;
51773 +
51774 + insert_acl_obj_label(o_tmp, subj);
51775 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51776 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51777 + return -ENOMEM;
51778 +
51779 + ret = copy_user_glob(o_tmp);
51780 + if (ret)
51781 + return ret;
51782 +
51783 + if (o_tmp->nested) {
51784 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51785 + if (IS_ERR(o_tmp->nested))
51786 + return PTR_ERR(o_tmp->nested);
51787 +
51788 + /* insert into nested subject list */
51789 + o_tmp->nested->next = role->hash->first;
51790 + role->hash->first = o_tmp->nested;
51791 + }
51792 + }
51793 +
51794 + return 0;
51795 +}
51796 +
51797 +static __u32
51798 +count_user_subjs(struct acl_subject_label *userp)
51799 +{
51800 + struct acl_subject_label s_tmp;
51801 + __u32 num = 0;
51802 +
51803 + while (userp) {
51804 + if (copy_from_user(&s_tmp, userp,
51805 + sizeof (struct acl_subject_label)))
51806 + break;
51807 +
51808 + userp = s_tmp.prev;
51809 + /* do not count nested subjects against this count, since
51810 + they are not included in the hash table, but are
51811 + attached to objects. We have already counted
51812 + the subjects in userspace for the allocation
51813 + stack
51814 + */
51815 + if (!(s_tmp.mode & GR_NESTED))
51816 + num++;
51817 + }
51818 +
51819 + return num;
51820 +}
51821 +
51822 +static int
51823 +copy_user_allowedips(struct acl_role_label *rolep)
51824 +{
51825 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51826 +
51827 + ruserip = rolep->allowed_ips;
51828 +
51829 + while (ruserip) {
51830 + rlast = rtmp;
51831 +
51832 + if ((rtmp = (struct role_allowed_ip *)
51833 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51834 + return -ENOMEM;
51835 +
51836 + if (copy_from_user(rtmp, ruserip,
51837 + sizeof (struct role_allowed_ip)))
51838 + return -EFAULT;
51839 +
51840 + ruserip = rtmp->prev;
51841 +
51842 + if (!rlast) {
51843 + rtmp->prev = NULL;
51844 + rolep->allowed_ips = rtmp;
51845 + } else {
51846 + rlast->next = rtmp;
51847 + rtmp->prev = rlast;
51848 + }
51849 +
51850 + if (!ruserip)
51851 + rtmp->next = NULL;
51852 + }
51853 +
51854 + return 0;
51855 +}
51856 +
51857 +static int
51858 +copy_user_transitions(struct acl_role_label *rolep)
51859 +{
51860 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51861 +
51862 + unsigned int len;
51863 + char *tmp;
51864 +
51865 + rusertp = rolep->transitions;
51866 +
51867 + while (rusertp) {
51868 + rlast = rtmp;
51869 +
51870 + if ((rtmp = (struct role_transition *)
51871 + acl_alloc(sizeof (struct role_transition))) == NULL)
51872 + return -ENOMEM;
51873 +
51874 + if (copy_from_user(rtmp, rusertp,
51875 + sizeof (struct role_transition)))
51876 + return -EFAULT;
51877 +
51878 + rusertp = rtmp->prev;
51879 +
51880 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51881 +
51882 + if (!len || len >= GR_SPROLE_LEN)
51883 + return -EINVAL;
51884 +
51885 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51886 + return -ENOMEM;
51887 +
51888 + if (copy_from_user(tmp, rtmp->rolename, len))
51889 + return -EFAULT;
51890 + tmp[len-1] = '\0';
51891 + rtmp->rolename = tmp;
51892 +
51893 + if (!rlast) {
51894 + rtmp->prev = NULL;
51895 + rolep->transitions = rtmp;
51896 + } else {
51897 + rlast->next = rtmp;
51898 + rtmp->prev = rlast;
51899 + }
51900 +
51901 + if (!rusertp)
51902 + rtmp->next = NULL;
51903 + }
51904 +
51905 + return 0;
51906 +}
51907 +
51908 +static struct acl_subject_label *
51909 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51910 +{
51911 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51912 + unsigned int len;
51913 + char *tmp;
51914 + __u32 num_objs;
51915 + struct acl_ip_label **i_tmp, *i_utmp2;
51916 + struct gr_hash_struct ghash;
51917 + struct subject_map *subjmap;
51918 + unsigned int i_num;
51919 + int err;
51920 +
51921 + s_tmp = lookup_subject_map(userp);
51922 +
51923 + /* we've already copied this subject into the kernel, just return
51924 + the reference to it, and don't copy it over again
51925 + */
51926 + if (s_tmp)
51927 + return(s_tmp);
51928 +
51929 + if ((s_tmp = (struct acl_subject_label *)
51930 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51931 + return ERR_PTR(-ENOMEM);
51932 +
51933 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51934 + if (subjmap == NULL)
51935 + return ERR_PTR(-ENOMEM);
51936 +
51937 + subjmap->user = userp;
51938 + subjmap->kernel = s_tmp;
51939 + insert_subj_map_entry(subjmap);
51940 +
51941 + if (copy_from_user(s_tmp, userp,
51942 + sizeof (struct acl_subject_label)))
51943 + return ERR_PTR(-EFAULT);
51944 +
51945 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51946 +
51947 + if (!len || len >= PATH_MAX)
51948 + return ERR_PTR(-EINVAL);
51949 +
51950 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51951 + return ERR_PTR(-ENOMEM);
51952 +
51953 + if (copy_from_user(tmp, s_tmp->filename, len))
51954 + return ERR_PTR(-EFAULT);
51955 + tmp[len-1] = '\0';
51956 + s_tmp->filename = tmp;
51957 +
51958 + if (!strcmp(s_tmp->filename, "/"))
51959 + role->root_label = s_tmp;
51960 +
51961 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51962 + return ERR_PTR(-EFAULT);
51963 +
51964 + /* copy user and group transition tables */
51965 +
51966 + if (s_tmp->user_trans_num) {
51967 + uid_t *uidlist;
51968 +
51969 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51970 + if (uidlist == NULL)
51971 + return ERR_PTR(-ENOMEM);
51972 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51973 + return ERR_PTR(-EFAULT);
51974 +
51975 + s_tmp->user_transitions = uidlist;
51976 + }
51977 +
51978 + if (s_tmp->group_trans_num) {
51979 + gid_t *gidlist;
51980 +
51981 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51982 + if (gidlist == NULL)
51983 + return ERR_PTR(-ENOMEM);
51984 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51985 + return ERR_PTR(-EFAULT);
51986 +
51987 + s_tmp->group_transitions = gidlist;
51988 + }
51989 +
51990 + /* set up object hash table */
51991 + num_objs = count_user_objs(ghash.first);
51992 +
51993 + s_tmp->obj_hash_size = num_objs;
51994 + s_tmp->obj_hash =
51995 + (struct acl_object_label **)
51996 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51997 +
51998 + if (!s_tmp->obj_hash)
51999 + return ERR_PTR(-ENOMEM);
52000 +
52001 + memset(s_tmp->obj_hash, 0,
52002 + s_tmp->obj_hash_size *
52003 + sizeof (struct acl_object_label *));
52004 +
52005 + /* add in objects */
52006 + err = copy_user_objs(ghash.first, s_tmp, role);
52007 +
52008 + if (err)
52009 + return ERR_PTR(err);
52010 +
52011 + /* set pointer for parent subject */
52012 + if (s_tmp->parent_subject) {
52013 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52014 +
52015 + if (IS_ERR(s_tmp2))
52016 + return s_tmp2;
52017 +
52018 + s_tmp->parent_subject = s_tmp2;
52019 + }
52020 +
52021 + /* add in ip acls */
52022 +
52023 + if (!s_tmp->ip_num) {
52024 + s_tmp->ips = NULL;
52025 + goto insert;
52026 + }
52027 +
52028 + i_tmp =
52029 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52030 + sizeof (struct acl_ip_label *));
52031 +
52032 + if (!i_tmp)
52033 + return ERR_PTR(-ENOMEM);
52034 +
52035 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52036 + *(i_tmp + i_num) =
52037 + (struct acl_ip_label *)
52038 + acl_alloc(sizeof (struct acl_ip_label));
52039 + if (!*(i_tmp + i_num))
52040 + return ERR_PTR(-ENOMEM);
52041 +
52042 + if (copy_from_user
52043 + (&i_utmp2, s_tmp->ips + i_num,
52044 + sizeof (struct acl_ip_label *)))
52045 + return ERR_PTR(-EFAULT);
52046 +
52047 + if (copy_from_user
52048 + (*(i_tmp + i_num), i_utmp2,
52049 + sizeof (struct acl_ip_label)))
52050 + return ERR_PTR(-EFAULT);
52051 +
52052 + if ((*(i_tmp + i_num))->iface == NULL)
52053 + continue;
52054 +
52055 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52056 + if (!len || len >= IFNAMSIZ)
52057 + return ERR_PTR(-EINVAL);
52058 + tmp = acl_alloc(len);
52059 + if (tmp == NULL)
52060 + return ERR_PTR(-ENOMEM);
52061 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52062 + return ERR_PTR(-EFAULT);
52063 + (*(i_tmp + i_num))->iface = tmp;
52064 + }
52065 +
52066 + s_tmp->ips = i_tmp;
52067 +
52068 +insert:
52069 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52070 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52071 + return ERR_PTR(-ENOMEM);
52072 +
52073 + return s_tmp;
52074 +}
52075 +
52076 +static int
52077 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52078 +{
52079 + struct acl_subject_label s_pre;
52080 + struct acl_subject_label * ret;
52081 + int err;
52082 +
52083 + while (userp) {
52084 + if (copy_from_user(&s_pre, userp,
52085 + sizeof (struct acl_subject_label)))
52086 + return -EFAULT;
52087 +
52088 + /* do not add nested subjects here, add
52089 + while parsing objects
52090 + */
52091 +
52092 + if (s_pre.mode & GR_NESTED) {
52093 + userp = s_pre.prev;
52094 + continue;
52095 + }
52096 +
52097 + ret = do_copy_user_subj(userp, role);
52098 +
52099 + err = PTR_ERR(ret);
52100 + if (IS_ERR(ret))
52101 + return err;
52102 +
52103 + insert_acl_subj_label(ret, role);
52104 +
52105 + userp = s_pre.prev;
52106 + }
52107 +
52108 + return 0;
52109 +}
52110 +
52111 +static int
52112 +copy_user_acl(struct gr_arg *arg)
52113 +{
52114 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52115 + struct sprole_pw *sptmp;
52116 + struct gr_hash_struct *ghash;
52117 + uid_t *domainlist;
52118 + unsigned int r_num;
52119 + unsigned int len;
52120 + char *tmp;
52121 + int err = 0;
52122 + __u16 i;
52123 + __u32 num_subjs;
52124 +
52125 + /* we need a default and kernel role */
52126 + if (arg->role_db.num_roles < 2)
52127 + return -EINVAL;
52128 +
52129 + /* copy special role authentication info from userspace */
52130 +
52131 + num_sprole_pws = arg->num_sprole_pws;
52132 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52133 +
52134 + if (!acl_special_roles && num_sprole_pws)
52135 + return -ENOMEM;
52136 +
52137 + for (i = 0; i < num_sprole_pws; i++) {
52138 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52139 + if (!sptmp)
52140 + return -ENOMEM;
52141 + if (copy_from_user(sptmp, arg->sprole_pws + i,
52142 + sizeof (struct sprole_pw)))
52143 + return -EFAULT;
52144 +
52145 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52146 +
52147 + if (!len || len >= GR_SPROLE_LEN)
52148 + return -EINVAL;
52149 +
52150 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52151 + return -ENOMEM;
52152 +
52153 + if (copy_from_user(tmp, sptmp->rolename, len))
52154 + return -EFAULT;
52155 +
52156 + tmp[len-1] = '\0';
52157 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52158 + printk(KERN_ALERT "Copying special role %s\n", tmp);
52159 +#endif
52160 + sptmp->rolename = tmp;
52161 + acl_special_roles[i] = sptmp;
52162 + }
52163 +
52164 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52165 +
52166 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52167 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
52168 +
52169 + if (!r_tmp)
52170 + return -ENOMEM;
52171 +
52172 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
52173 + sizeof (struct acl_role_label *)))
52174 + return -EFAULT;
52175 +
52176 + if (copy_from_user(r_tmp, r_utmp2,
52177 + sizeof (struct acl_role_label)))
52178 + return -EFAULT;
52179 +
52180 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52181 +
52182 + if (!len || len >= PATH_MAX)
52183 + return -EINVAL;
52184 +
52185 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52186 + return -ENOMEM;
52187 +
52188 + if (copy_from_user(tmp, r_tmp->rolename, len))
52189 + return -EFAULT;
52190 +
52191 + tmp[len-1] = '\0';
52192 + r_tmp->rolename = tmp;
52193 +
52194 + if (!strcmp(r_tmp->rolename, "default")
52195 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52196 + default_role = r_tmp;
52197 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52198 + kernel_role = r_tmp;
52199 + }
52200 +
52201 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52202 + return -ENOMEM;
52203 +
52204 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52205 + return -EFAULT;
52206 +
52207 + r_tmp->hash = ghash;
52208 +
52209 + num_subjs = count_user_subjs(r_tmp->hash->first);
52210 +
52211 + r_tmp->subj_hash_size = num_subjs;
52212 + r_tmp->subj_hash =
52213 + (struct acl_subject_label **)
52214 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52215 +
52216 + if (!r_tmp->subj_hash)
52217 + return -ENOMEM;
52218 +
52219 + err = copy_user_allowedips(r_tmp);
52220 + if (err)
52221 + return err;
52222 +
52223 + /* copy domain info */
52224 + if (r_tmp->domain_children != NULL) {
52225 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52226 + if (domainlist == NULL)
52227 + return -ENOMEM;
52228 +
52229 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52230 + return -EFAULT;
52231 +
52232 + r_tmp->domain_children = domainlist;
52233 + }
52234 +
52235 + err = copy_user_transitions(r_tmp);
52236 + if (err)
52237 + return err;
52238 +
52239 + memset(r_tmp->subj_hash, 0,
52240 + r_tmp->subj_hash_size *
52241 + sizeof (struct acl_subject_label *));
52242 +
52243 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52244 +
52245 + if (err)
52246 + return err;
52247 +
52248 + /* set nested subject list to null */
52249 + r_tmp->hash->first = NULL;
52250 +
52251 + insert_acl_role_label(r_tmp);
52252 + }
52253 +
52254 + if (default_role == NULL || kernel_role == NULL)
52255 + return -EINVAL;
52256 +
52257 + return err;
52258 +}
52259 +
52260 +static int
52261 +gracl_init(struct gr_arg *args)
52262 +{
52263 + int error = 0;
52264 +
52265 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52266 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52267 +
52268 + if (init_variables(args)) {
52269 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52270 + error = -ENOMEM;
52271 + free_variables();
52272 + goto out;
52273 + }
52274 +
52275 + error = copy_user_acl(args);
52276 + free_init_variables();
52277 + if (error) {
52278 + free_variables();
52279 + goto out;
52280 + }
52281 +
52282 + if ((error = gr_set_acls(0))) {
52283 + free_variables();
52284 + goto out;
52285 + }
52286 +
52287 + pax_open_kernel();
52288 + gr_status |= GR_READY;
52289 + pax_close_kernel();
52290 +
52291 + out:
52292 + return error;
52293 +}
52294 +
52295 +/* derived from glibc fnmatch() 0: match, 1: no match*/
52296 +
52297 +static int
52298 +glob_match(const char *p, const char *n)
52299 +{
52300 + char c;
52301 +
52302 + while ((c = *p++) != '\0') {
52303 + switch (c) {
52304 + case '?':
52305 + if (*n == '\0')
52306 + return 1;
52307 + else if (*n == '/')
52308 + return 1;
52309 + break;
52310 + case '\\':
52311 + if (*n != c)
52312 + return 1;
52313 + break;
52314 + case '*':
52315 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
52316 + if (*n == '/')
52317 + return 1;
52318 + else if (c == '?') {
52319 + if (*n == '\0')
52320 + return 1;
52321 + else
52322 + ++n;
52323 + }
52324 + }
52325 + if (c == '\0') {
52326 + return 0;
52327 + } else {
52328 + const char *endp;
52329 +
52330 + if ((endp = strchr(n, '/')) == NULL)
52331 + endp = n + strlen(n);
52332 +
52333 + if (c == '[') {
52334 + for (--p; n < endp; ++n)
52335 + if (!glob_match(p, n))
52336 + return 0;
52337 + } else if (c == '/') {
52338 + while (*n != '\0' && *n != '/')
52339 + ++n;
52340 + if (*n == '/' && !glob_match(p, n + 1))
52341 + return 0;
52342 + } else {
52343 + for (--p; n < endp; ++n)
52344 + if (*n == c && !glob_match(p, n))
52345 + return 0;
52346 + }
52347 +
52348 + return 1;
52349 + }
52350 + case '[':
52351 + {
52352 + int not;
52353 + char cold;
52354 +
52355 + if (*n == '\0' || *n == '/')
52356 + return 1;
52357 +
52358 + not = (*p == '!' || *p == '^');
52359 + if (not)
52360 + ++p;
52361 +
52362 + c = *p++;
52363 + for (;;) {
52364 + unsigned char fn = (unsigned char)*n;
52365 +
52366 + if (c == '\0')
52367 + return 1;
52368 + else {
52369 + if (c == fn)
52370 + goto matched;
52371 + cold = c;
52372 + c = *p++;
52373 +
52374 + if (c == '-' && *p != ']') {
52375 + unsigned char cend = *p++;
52376 +
52377 + if (cend == '\0')
52378 + return 1;
52379 +
52380 + if (cold <= fn && fn <= cend)
52381 + goto matched;
52382 +
52383 + c = *p++;
52384 + }
52385 + }
52386 +
52387 + if (c == ']')
52388 + break;
52389 + }
52390 + if (!not)
52391 + return 1;
52392 + break;
52393 + matched:
52394 + while (c != ']') {
52395 + if (c == '\0')
52396 + return 1;
52397 +
52398 + c = *p++;
52399 + }
52400 + if (not)
52401 + return 1;
52402 + }
52403 + break;
52404 + default:
52405 + if (c != *n)
52406 + return 1;
52407 + }
52408 +
52409 + ++n;
52410 + }
52411 +
52412 + if (*n == '\0')
52413 + return 0;
52414 +
52415 + if (*n == '/')
52416 + return 0;
52417 +
52418 + return 1;
52419 +}
52420 +
52421 +static struct acl_object_label *
52422 +chk_glob_label(struct acl_object_label *globbed,
52423 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
52424 +{
52425 + struct acl_object_label *tmp;
52426 +
52427 + if (*path == NULL)
52428 + *path = gr_to_filename_nolock(dentry, mnt);
52429 +
52430 + tmp = globbed;
52431 +
52432 + while (tmp) {
52433 + if (!glob_match(tmp->filename, *path))
52434 + return tmp;
52435 + tmp = tmp->next;
52436 + }
52437 +
52438 + return NULL;
52439 +}
52440 +
52441 +static struct acl_object_label *
52442 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52443 + const ino_t curr_ino, const dev_t curr_dev,
52444 + const struct acl_subject_label *subj, char **path, const int checkglob)
52445 +{
52446 + struct acl_subject_label *tmpsubj;
52447 + struct acl_object_label *retval;
52448 + struct acl_object_label *retval2;
52449 +
52450 + tmpsubj = (struct acl_subject_label *) subj;
52451 + read_lock(&gr_inode_lock);
52452 + do {
52453 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52454 + if (retval) {
52455 + if (checkglob && retval->globbed) {
52456 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
52457 + if (retval2)
52458 + retval = retval2;
52459 + }
52460 + break;
52461 + }
52462 + } while ((tmpsubj = tmpsubj->parent_subject));
52463 + read_unlock(&gr_inode_lock);
52464 +
52465 + return retval;
52466 +}
52467 +
52468 +static __inline__ struct acl_object_label *
52469 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52470 + struct dentry *curr_dentry,
52471 + const struct acl_subject_label *subj, char **path, const int checkglob)
52472 +{
52473 + int newglob = checkglob;
52474 + ino_t inode;
52475 + dev_t device;
52476 +
52477 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52478 + as we don't want a / * rule to match instead of the / object
52479 + don't do this for create lookups that call this function though, since they're looking up
52480 + on the parent and thus need globbing checks on all paths
52481 + */
52482 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52483 + newglob = GR_NO_GLOB;
52484 +
52485 + spin_lock(&curr_dentry->d_lock);
52486 + inode = curr_dentry->d_inode->i_ino;
52487 + device = __get_dev(curr_dentry);
52488 + spin_unlock(&curr_dentry->d_lock);
52489 +
52490 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52491 +}
52492 +
52493 +static struct acl_object_label *
52494 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52495 + const struct acl_subject_label *subj, char *path, const int checkglob)
52496 +{
52497 + struct dentry *dentry = (struct dentry *) l_dentry;
52498 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52499 + struct mount *real_mnt = real_mount(mnt);
52500 + struct acl_object_label *retval;
52501 + struct dentry *parent;
52502 +
52503 + write_seqlock(&rename_lock);
52504 + br_read_lock(vfsmount_lock);
52505 +
52506 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52507 +#ifdef CONFIG_NET
52508 + mnt == sock_mnt ||
52509 +#endif
52510 +#ifdef CONFIG_HUGETLBFS
52511 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52512 +#endif
52513 + /* ignore Eric Biederman */
52514 + IS_PRIVATE(l_dentry->d_inode))) {
52515 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52516 + goto out;
52517 + }
52518 +
52519 + for (;;) {
52520 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52521 + break;
52522 +
52523 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52524 + if (!mnt_has_parent(real_mnt))
52525 + break;
52526 +
52527 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52528 + if (retval != NULL)
52529 + goto out;
52530 +
52531 + dentry = real_mnt->mnt_mountpoint;
52532 + real_mnt = real_mnt->mnt_parent;
52533 + mnt = &real_mnt->mnt;
52534 + continue;
52535 + }
52536 +
52537 + parent = dentry->d_parent;
52538 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52539 + if (retval != NULL)
52540 + goto out;
52541 +
52542 + dentry = parent;
52543 + }
52544 +
52545 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52546 +
52547 + /* real_root is pinned so we don't have to hold a reference */
52548 + if (retval == NULL)
52549 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52550 +out:
52551 + br_read_unlock(vfsmount_lock);
52552 + write_sequnlock(&rename_lock);
52553 +
52554 + BUG_ON(retval == NULL);
52555 +
52556 + return retval;
52557 +}
52558 +
52559 +static __inline__ struct acl_object_label *
52560 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52561 + const struct acl_subject_label *subj)
52562 +{
52563 + char *path = NULL;
52564 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52565 +}
52566 +
52567 +static __inline__ struct acl_object_label *
52568 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52569 + const struct acl_subject_label *subj)
52570 +{
52571 + char *path = NULL;
52572 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52573 +}
52574 +
52575 +static __inline__ struct acl_object_label *
52576 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52577 + const struct acl_subject_label *subj, char *path)
52578 +{
52579 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52580 +}
52581 +
52582 +static struct acl_subject_label *
52583 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52584 + const struct acl_role_label *role)
52585 +{
52586 + struct dentry *dentry = (struct dentry *) l_dentry;
52587 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52588 + struct mount *real_mnt = real_mount(mnt);
52589 + struct acl_subject_label *retval;
52590 + struct dentry *parent;
52591 +
52592 + write_seqlock(&rename_lock);
52593 + br_read_lock(vfsmount_lock);
52594 +
52595 + for (;;) {
52596 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52597 + break;
52598 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52599 + if (!mnt_has_parent(real_mnt))
52600 + break;
52601 +
52602 + spin_lock(&dentry->d_lock);
52603 + read_lock(&gr_inode_lock);
52604 + retval =
52605 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52606 + __get_dev(dentry), role);
52607 + read_unlock(&gr_inode_lock);
52608 + spin_unlock(&dentry->d_lock);
52609 + if (retval != NULL)
52610 + goto out;
52611 +
52612 + dentry = real_mnt->mnt_mountpoint;
52613 + real_mnt = real_mnt->mnt_parent;
52614 + mnt = &real_mnt->mnt;
52615 + continue;
52616 + }
52617 +
52618 + spin_lock(&dentry->d_lock);
52619 + read_lock(&gr_inode_lock);
52620 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52621 + __get_dev(dentry), role);
52622 + read_unlock(&gr_inode_lock);
52623 + parent = dentry->d_parent;
52624 + spin_unlock(&dentry->d_lock);
52625 +
52626 + if (retval != NULL)
52627 + goto out;
52628 +
52629 + dentry = parent;
52630 + }
52631 +
52632 + spin_lock(&dentry->d_lock);
52633 + read_lock(&gr_inode_lock);
52634 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52635 + __get_dev(dentry), role);
52636 + read_unlock(&gr_inode_lock);
52637 + spin_unlock(&dentry->d_lock);
52638 +
52639 + if (unlikely(retval == NULL)) {
52640 + /* real_root is pinned, we don't need to hold a reference */
52641 + read_lock(&gr_inode_lock);
52642 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52643 + __get_dev(real_root.dentry), role);
52644 + read_unlock(&gr_inode_lock);
52645 + }
52646 +out:
52647 + br_read_unlock(vfsmount_lock);
52648 + write_sequnlock(&rename_lock);
52649 +
52650 + BUG_ON(retval == NULL);
52651 +
52652 + return retval;
52653 +}
52654 +
52655 +static void
52656 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52657 +{
52658 + struct task_struct *task = current;
52659 + const struct cred *cred = current_cred();
52660 +
52661 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52662 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52663 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52664 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52665 +
52666 + return;
52667 +}
52668 +
52669 +static void
52670 +gr_log_learn_id_change(const char type, const unsigned int real,
52671 + const unsigned int effective, const unsigned int fs)
52672 +{
52673 + struct task_struct *task = current;
52674 + const struct cred *cred = current_cred();
52675 +
52676 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52677 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52678 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52679 + type, real, effective, fs, &task->signal->saved_ip);
52680 +
52681 + return;
52682 +}
52683 +
52684 +__u32
52685 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52686 + const struct vfsmount * mnt)
52687 +{
52688 + __u32 retval = mode;
52689 + struct acl_subject_label *curracl;
52690 + struct acl_object_label *currobj;
52691 +
52692 + if (unlikely(!(gr_status & GR_READY)))
52693 + return (mode & ~GR_AUDITS);
52694 +
52695 + curracl = current->acl;
52696 +
52697 + currobj = chk_obj_label(dentry, mnt, curracl);
52698 + retval = currobj->mode & mode;
52699 +
52700 + /* if we're opening a specified transfer file for writing
52701 + (e.g. /dev/initctl), then transfer our role to init
52702 + */
52703 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52704 + current->role->roletype & GR_ROLE_PERSIST)) {
52705 + struct task_struct *task = init_pid_ns.child_reaper;
52706 +
52707 + if (task->role != current->role) {
52708 + task->acl_sp_role = 0;
52709 + task->acl_role_id = current->acl_role_id;
52710 + task->role = current->role;
52711 + rcu_read_lock();
52712 + read_lock(&grsec_exec_file_lock);
52713 + gr_apply_subject_to_task(task);
52714 + read_unlock(&grsec_exec_file_lock);
52715 + rcu_read_unlock();
52716 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52717 + }
52718 + }
52719 +
52720 + if (unlikely
52721 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52722 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52723 + __u32 new_mode = mode;
52724 +
52725 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52726 +
52727 + retval = new_mode;
52728 +
52729 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52730 + new_mode |= GR_INHERIT;
52731 +
52732 + if (!(mode & GR_NOLEARN))
52733 + gr_log_learn(dentry, mnt, new_mode);
52734 + }
52735 +
52736 + return retval;
52737 +}
52738 +
52739 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52740 + const struct dentry *parent,
52741 + const struct vfsmount *mnt)
52742 +{
52743 + struct name_entry *match;
52744 + struct acl_object_label *matchpo;
52745 + struct acl_subject_label *curracl;
52746 + char *path;
52747 +
52748 + if (unlikely(!(gr_status & GR_READY)))
52749 + return NULL;
52750 +
52751 + preempt_disable();
52752 + path = gr_to_filename_rbac(new_dentry, mnt);
52753 + match = lookup_name_entry_create(path);
52754 +
52755 + curracl = current->acl;
52756 +
52757 + if (match) {
52758 + read_lock(&gr_inode_lock);
52759 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52760 + read_unlock(&gr_inode_lock);
52761 +
52762 + if (matchpo) {
52763 + preempt_enable();
52764 + return matchpo;
52765 + }
52766 + }
52767 +
52768 + // lookup parent
52769 +
52770 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52771 +
52772 + preempt_enable();
52773 + return matchpo;
52774 +}
52775 +
52776 +__u32
52777 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52778 + const struct vfsmount * mnt, const __u32 mode)
52779 +{
52780 + struct acl_object_label *matchpo;
52781 + __u32 retval;
52782 +
52783 + if (unlikely(!(gr_status & GR_READY)))
52784 + return (mode & ~GR_AUDITS);
52785 +
52786 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52787 +
52788 + retval = matchpo->mode & mode;
52789 +
52790 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52791 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52792 + __u32 new_mode = mode;
52793 +
52794 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52795 +
52796 + gr_log_learn(new_dentry, mnt, new_mode);
52797 + return new_mode;
52798 + }
52799 +
52800 + return retval;
52801 +}
52802 +
52803 +__u32
52804 +gr_check_link(const struct dentry * new_dentry,
52805 + const struct dentry * parent_dentry,
52806 + const struct vfsmount * parent_mnt,
52807 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52808 +{
52809 + struct acl_object_label *obj;
52810 + __u32 oldmode, newmode;
52811 + __u32 needmode;
52812 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52813 + GR_DELETE | GR_INHERIT;
52814 +
52815 + if (unlikely(!(gr_status & GR_READY)))
52816 + return (GR_CREATE | GR_LINK);
52817 +
52818 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52819 + oldmode = obj->mode;
52820 +
52821 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52822 + newmode = obj->mode;
52823 +
52824 + needmode = newmode & checkmodes;
52825 +
52826 + // old name for hardlink must have at least the permissions of the new name
52827 + if ((oldmode & needmode) != needmode)
52828 + goto bad;
52829 +
52830 + // if old name had restrictions/auditing, make sure the new name does as well
52831 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52832 +
52833 + // don't allow hardlinking of suid/sgid files without permission
52834 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52835 + needmode |= GR_SETID;
52836 +
52837 + if ((newmode & needmode) != needmode)
52838 + goto bad;
52839 +
52840 + // enforce minimum permissions
52841 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52842 + return newmode;
52843 +bad:
52844 + needmode = oldmode;
52845 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52846 + needmode |= GR_SETID;
52847 +
52848 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52849 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52850 + return (GR_CREATE | GR_LINK);
52851 + } else if (newmode & GR_SUPPRESS)
52852 + return GR_SUPPRESS;
52853 + else
52854 + return 0;
52855 +}
52856 +
52857 +int
52858 +gr_check_hidden_task(const struct task_struct *task)
52859 +{
52860 + if (unlikely(!(gr_status & GR_READY)))
52861 + return 0;
52862 +
52863 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52864 + return 1;
52865 +
52866 + return 0;
52867 +}
52868 +
52869 +int
52870 +gr_check_protected_task(const struct task_struct *task)
52871 +{
52872 + if (unlikely(!(gr_status & GR_READY) || !task))
52873 + return 0;
52874 +
52875 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52876 + task->acl != current->acl)
52877 + return 1;
52878 +
52879 + return 0;
52880 +}
52881 +
52882 +int
52883 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52884 +{
52885 + struct task_struct *p;
52886 + int ret = 0;
52887 +
52888 + if (unlikely(!(gr_status & GR_READY) || !pid))
52889 + return ret;
52890 +
52891 + read_lock(&tasklist_lock);
52892 + do_each_pid_task(pid, type, p) {
52893 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52894 + p->acl != current->acl) {
52895 + ret = 1;
52896 + goto out;
52897 + }
52898 + } while_each_pid_task(pid, type, p);
52899 +out:
52900 + read_unlock(&tasklist_lock);
52901 +
52902 + return ret;
52903 +}
52904 +
52905 +void
52906 +gr_copy_label(struct task_struct *tsk)
52907 +{
52908 + tsk->signal->used_accept = 0;
52909 + tsk->acl_sp_role = 0;
52910 + tsk->acl_role_id = current->acl_role_id;
52911 + tsk->acl = current->acl;
52912 + tsk->role = current->role;
52913 + tsk->signal->curr_ip = current->signal->curr_ip;
52914 + tsk->signal->saved_ip = current->signal->saved_ip;
52915 + if (current->exec_file)
52916 + get_file(current->exec_file);
52917 + tsk->exec_file = current->exec_file;
52918 + tsk->is_writable = current->is_writable;
52919 + if (unlikely(current->signal->used_accept)) {
52920 + current->signal->curr_ip = 0;
52921 + current->signal->saved_ip = 0;
52922 + }
52923 +
52924 + return;
52925 +}
52926 +
52927 +static void
52928 +gr_set_proc_res(struct task_struct *task)
52929 +{
52930 + struct acl_subject_label *proc;
52931 + unsigned short i;
52932 +
52933 + proc = task->acl;
52934 +
52935 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52936 + return;
52937 +
52938 + for (i = 0; i < RLIM_NLIMITS; i++) {
52939 + if (!(proc->resmask & (1 << i)))
52940 + continue;
52941 +
52942 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52943 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52944 + }
52945 +
52946 + return;
52947 +}
52948 +
52949 +extern int __gr_process_user_ban(struct user_struct *user);
52950 +
52951 +int
52952 +gr_check_user_change(int real, int effective, int fs)
52953 +{
52954 + unsigned int i;
52955 + __u16 num;
52956 + uid_t *uidlist;
52957 + int curuid;
52958 + int realok = 0;
52959 + int effectiveok = 0;
52960 + int fsok = 0;
52961 +
52962 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52963 + struct user_struct *user;
52964 +
52965 + if (real == -1)
52966 + goto skipit;
52967 +
52968 + user = find_user(real);
52969 + if (user == NULL)
52970 + goto skipit;
52971 +
52972 + if (__gr_process_user_ban(user)) {
52973 + /* for find_user */
52974 + free_uid(user);
52975 + return 1;
52976 + }
52977 +
52978 + /* for find_user */
52979 + free_uid(user);
52980 +
52981 +skipit:
52982 +#endif
52983 +
52984 + if (unlikely(!(gr_status & GR_READY)))
52985 + return 0;
52986 +
52987 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52988 + gr_log_learn_id_change('u', real, effective, fs);
52989 +
52990 + num = current->acl->user_trans_num;
52991 + uidlist = current->acl->user_transitions;
52992 +
52993 + if (uidlist == NULL)
52994 + return 0;
52995 +
52996 + if (real == -1)
52997 + realok = 1;
52998 + if (effective == -1)
52999 + effectiveok = 1;
53000 + if (fs == -1)
53001 + fsok = 1;
53002 +
53003 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
53004 + for (i = 0; i < num; i++) {
53005 + curuid = (int)uidlist[i];
53006 + if (real == curuid)
53007 + realok = 1;
53008 + if (effective == curuid)
53009 + effectiveok = 1;
53010 + if (fs == curuid)
53011 + fsok = 1;
53012 + }
53013 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
53014 + for (i = 0; i < num; i++) {
53015 + curuid = (int)uidlist[i];
53016 + if (real == curuid)
53017 + break;
53018 + if (effective == curuid)
53019 + break;
53020 + if (fs == curuid)
53021 + break;
53022 + }
53023 + /* not in deny list */
53024 + if (i == num) {
53025 + realok = 1;
53026 + effectiveok = 1;
53027 + fsok = 1;
53028 + }
53029 + }
53030 +
53031 + if (realok && effectiveok && fsok)
53032 + return 0;
53033 + else {
53034 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53035 + return 1;
53036 + }
53037 +}
53038 +
53039 +int
53040 +gr_check_group_change(int real, int effective, int fs)
53041 +{
53042 + unsigned int i;
53043 + __u16 num;
53044 + gid_t *gidlist;
53045 + int curgid;
53046 + int realok = 0;
53047 + int effectiveok = 0;
53048 + int fsok = 0;
53049 +
53050 + if (unlikely(!(gr_status & GR_READY)))
53051 + return 0;
53052 +
53053 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53054 + gr_log_learn_id_change('g', real, effective, fs);
53055 +
53056 + num = current->acl->group_trans_num;
53057 + gidlist = current->acl->group_transitions;
53058 +
53059 + if (gidlist == NULL)
53060 + return 0;
53061 +
53062 + if (real == -1)
53063 + realok = 1;
53064 + if (effective == -1)
53065 + effectiveok = 1;
53066 + if (fs == -1)
53067 + fsok = 1;
53068 +
53069 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
53070 + for (i = 0; i < num; i++) {
53071 + curgid = (int)gidlist[i];
53072 + if (real == curgid)
53073 + realok = 1;
53074 + if (effective == curgid)
53075 + effectiveok = 1;
53076 + if (fs == curgid)
53077 + fsok = 1;
53078 + }
53079 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
53080 + for (i = 0; i < num; i++) {
53081 + curgid = (int)gidlist[i];
53082 + if (real == curgid)
53083 + break;
53084 + if (effective == curgid)
53085 + break;
53086 + if (fs == curgid)
53087 + break;
53088 + }
53089 + /* not in deny list */
53090 + if (i == num) {
53091 + realok = 1;
53092 + effectiveok = 1;
53093 + fsok = 1;
53094 + }
53095 + }
53096 +
53097 + if (realok && effectiveok && fsok)
53098 + return 0;
53099 + else {
53100 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53101 + return 1;
53102 + }
53103 +}
53104 +
53105 +extern int gr_acl_is_capable(const int cap);
53106 +
53107 +void
53108 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53109 +{
53110 + struct acl_role_label *role = task->role;
53111 + struct acl_subject_label *subj = NULL;
53112 + struct acl_object_label *obj;
53113 + struct file *filp;
53114 +
53115 + if (unlikely(!(gr_status & GR_READY)))
53116 + return;
53117 +
53118 + filp = task->exec_file;
53119 +
53120 + /* kernel process, we'll give them the kernel role */
53121 + if (unlikely(!filp)) {
53122 + task->role = kernel_role;
53123 + task->acl = kernel_role->root_label;
53124 + return;
53125 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53126 + role = lookup_acl_role_label(task, uid, gid);
53127 +
53128 + /* don't change the role if we're not a privileged process */
53129 + if (role && task->role != role &&
53130 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53131 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53132 + return;
53133 +
53134 + /* perform subject lookup in possibly new role
53135 + we can use this result below in the case where role == task->role
53136 + */
53137 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53138 +
53139 + /* if we changed uid/gid, but result in the same role
53140 + and are using inheritance, don't lose the inherited subject
53141 + if current subject is other than what normal lookup
53142 + would result in, we arrived via inheritance, don't
53143 + lose subject
53144 + */
53145 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53146 + (subj == task->acl)))
53147 + task->acl = subj;
53148 +
53149 + task->role = role;
53150 +
53151 + task->is_writable = 0;
53152 +
53153 + /* ignore additional mmap checks for processes that are writable
53154 + by the default ACL */
53155 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53156 + if (unlikely(obj->mode & GR_WRITE))
53157 + task->is_writable = 1;
53158 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53159 + if (unlikely(obj->mode & GR_WRITE))
53160 + task->is_writable = 1;
53161 +
53162 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53163 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53164 +#endif
53165 +
53166 + gr_set_proc_res(task);
53167 +
53168 + return;
53169 +}
53170 +
53171 +int
53172 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53173 + const int unsafe_flags)
53174 +{
53175 + struct task_struct *task = current;
53176 + struct acl_subject_label *newacl;
53177 + struct acl_object_label *obj;
53178 + __u32 retmode;
53179 +
53180 + if (unlikely(!(gr_status & GR_READY)))
53181 + return 0;
53182 +
53183 + newacl = chk_subj_label(dentry, mnt, task->role);
53184 +
53185 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53186 + did an exec
53187 + */
53188 + rcu_read_lock();
53189 + read_lock(&tasklist_lock);
53190 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53191 + (task->parent->acl->mode & GR_POVERRIDE))) {
53192 + read_unlock(&tasklist_lock);
53193 + rcu_read_unlock();
53194 + goto skip_check;
53195 + }
53196 + read_unlock(&tasklist_lock);
53197 + rcu_read_unlock();
53198 +
53199 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53200 + !(task->role->roletype & GR_ROLE_GOD) &&
53201 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53202 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53203 + if (unsafe_flags & LSM_UNSAFE_SHARE)
53204 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53205 + else
53206 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53207 + return -EACCES;
53208 + }
53209 +
53210 +skip_check:
53211 +
53212 + obj = chk_obj_label(dentry, mnt, task->acl);
53213 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53214 +
53215 + if (!(task->acl->mode & GR_INHERITLEARN) &&
53216 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53217 + if (obj->nested)
53218 + task->acl = obj->nested;
53219 + else
53220 + task->acl = newacl;
53221 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53222 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53223 +
53224 + task->is_writable = 0;
53225 +
53226 + /* ignore additional mmap checks for processes that are writable
53227 + by the default ACL */
53228 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
53229 + if (unlikely(obj->mode & GR_WRITE))
53230 + task->is_writable = 1;
53231 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
53232 + if (unlikely(obj->mode & GR_WRITE))
53233 + task->is_writable = 1;
53234 +
53235 + gr_set_proc_res(task);
53236 +
53237 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53238 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53239 +#endif
53240 + return 0;
53241 +}
53242 +
53243 +/* always called with valid inodev ptr */
53244 +static void
53245 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53246 +{
53247 + struct acl_object_label *matchpo;
53248 + struct acl_subject_label *matchps;
53249 + struct acl_subject_label *subj;
53250 + struct acl_role_label *role;
53251 + unsigned int x;
53252 +
53253 + FOR_EACH_ROLE_START(role)
53254 + FOR_EACH_SUBJECT_START(role, subj, x)
53255 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53256 + matchpo->mode |= GR_DELETED;
53257 + FOR_EACH_SUBJECT_END(subj,x)
53258 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53259 + if (subj->inode == ino && subj->device == dev)
53260 + subj->mode |= GR_DELETED;
53261 + FOR_EACH_NESTED_SUBJECT_END(subj)
53262 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53263 + matchps->mode |= GR_DELETED;
53264 + FOR_EACH_ROLE_END(role)
53265 +
53266 + inodev->nentry->deleted = 1;
53267 +
53268 + return;
53269 +}
53270 +
53271 +void
53272 +gr_handle_delete(const ino_t ino, const dev_t dev)
53273 +{
53274 + struct inodev_entry *inodev;
53275 +
53276 + if (unlikely(!(gr_status & GR_READY)))
53277 + return;
53278 +
53279 + write_lock(&gr_inode_lock);
53280 + inodev = lookup_inodev_entry(ino, dev);
53281 + if (inodev != NULL)
53282 + do_handle_delete(inodev, ino, dev);
53283 + write_unlock(&gr_inode_lock);
53284 +
53285 + return;
53286 +}
53287 +
53288 +static void
53289 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53290 + const ino_t newinode, const dev_t newdevice,
53291 + struct acl_subject_label *subj)
53292 +{
53293 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53294 + struct acl_object_label *match;
53295 +
53296 + match = subj->obj_hash[index];
53297 +
53298 + while (match && (match->inode != oldinode ||
53299 + match->device != olddevice ||
53300 + !(match->mode & GR_DELETED)))
53301 + match = match->next;
53302 +
53303 + if (match && (match->inode == oldinode)
53304 + && (match->device == olddevice)
53305 + && (match->mode & GR_DELETED)) {
53306 + if (match->prev == NULL) {
53307 + subj->obj_hash[index] = match->next;
53308 + if (match->next != NULL)
53309 + match->next->prev = NULL;
53310 + } else {
53311 + match->prev->next = match->next;
53312 + if (match->next != NULL)
53313 + match->next->prev = match->prev;
53314 + }
53315 + match->prev = NULL;
53316 + match->next = NULL;
53317 + match->inode = newinode;
53318 + match->device = newdevice;
53319 + match->mode &= ~GR_DELETED;
53320 +
53321 + insert_acl_obj_label(match, subj);
53322 + }
53323 +
53324 + return;
53325 +}
53326 +
53327 +static void
53328 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53329 + const ino_t newinode, const dev_t newdevice,
53330 + struct acl_role_label *role)
53331 +{
53332 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53333 + struct acl_subject_label *match;
53334 +
53335 + match = role->subj_hash[index];
53336 +
53337 + while (match && (match->inode != oldinode ||
53338 + match->device != olddevice ||
53339 + !(match->mode & GR_DELETED)))
53340 + match = match->next;
53341 +
53342 + if (match && (match->inode == oldinode)
53343 + && (match->device == olddevice)
53344 + && (match->mode & GR_DELETED)) {
53345 + if (match->prev == NULL) {
53346 + role->subj_hash[index] = match->next;
53347 + if (match->next != NULL)
53348 + match->next->prev = NULL;
53349 + } else {
53350 + match->prev->next = match->next;
53351 + if (match->next != NULL)
53352 + match->next->prev = match->prev;
53353 + }
53354 + match->prev = NULL;
53355 + match->next = NULL;
53356 + match->inode = newinode;
53357 + match->device = newdevice;
53358 + match->mode &= ~GR_DELETED;
53359 +
53360 + insert_acl_subj_label(match, role);
53361 + }
53362 +
53363 + return;
53364 +}
53365 +
53366 +static void
53367 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53368 + const ino_t newinode, const dev_t newdevice)
53369 +{
53370 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53371 + struct inodev_entry *match;
53372 +
53373 + match = inodev_set.i_hash[index];
53374 +
53375 + while (match && (match->nentry->inode != oldinode ||
53376 + match->nentry->device != olddevice || !match->nentry->deleted))
53377 + match = match->next;
53378 +
53379 + if (match && (match->nentry->inode == oldinode)
53380 + && (match->nentry->device == olddevice) &&
53381 + match->nentry->deleted) {
53382 + if (match->prev == NULL) {
53383 + inodev_set.i_hash[index] = match->next;
53384 + if (match->next != NULL)
53385 + match->next->prev = NULL;
53386 + } else {
53387 + match->prev->next = match->next;
53388 + if (match->next != NULL)
53389 + match->next->prev = match->prev;
53390 + }
53391 + match->prev = NULL;
53392 + match->next = NULL;
53393 + match->nentry->inode = newinode;
53394 + match->nentry->device = newdevice;
53395 + match->nentry->deleted = 0;
53396 +
53397 + insert_inodev_entry(match);
53398 + }
53399 +
53400 + return;
53401 +}
53402 +
53403 +static void
53404 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53405 +{
53406 + struct acl_subject_label *subj;
53407 + struct acl_role_label *role;
53408 + unsigned int x;
53409 +
53410 + FOR_EACH_ROLE_START(role)
53411 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53412 +
53413 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53414 + if ((subj->inode == ino) && (subj->device == dev)) {
53415 + subj->inode = ino;
53416 + subj->device = dev;
53417 + }
53418 + FOR_EACH_NESTED_SUBJECT_END(subj)
53419 + FOR_EACH_SUBJECT_START(role, subj, x)
53420 + update_acl_obj_label(matchn->inode, matchn->device,
53421 + ino, dev, subj);
53422 + FOR_EACH_SUBJECT_END(subj,x)
53423 + FOR_EACH_ROLE_END(role)
53424 +
53425 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53426 +
53427 + return;
53428 +}
53429 +
53430 +static void
53431 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53432 + const struct vfsmount *mnt)
53433 +{
53434 + ino_t ino = dentry->d_inode->i_ino;
53435 + dev_t dev = __get_dev(dentry);
53436 +
53437 + __do_handle_create(matchn, ino, dev);
53438 +
53439 + return;
53440 +}
53441 +
53442 +void
53443 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53444 +{
53445 + struct name_entry *matchn;
53446 +
53447 + if (unlikely(!(gr_status & GR_READY)))
53448 + return;
53449 +
53450 + preempt_disable();
53451 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53452 +
53453 + if (unlikely((unsigned long)matchn)) {
53454 + write_lock(&gr_inode_lock);
53455 + do_handle_create(matchn, dentry, mnt);
53456 + write_unlock(&gr_inode_lock);
53457 + }
53458 + preempt_enable();
53459 +
53460 + return;
53461 +}
53462 +
53463 +void
53464 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53465 +{
53466 + struct name_entry *matchn;
53467 +
53468 + if (unlikely(!(gr_status & GR_READY)))
53469 + return;
53470 +
53471 + preempt_disable();
53472 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53473 +
53474 + if (unlikely((unsigned long)matchn)) {
53475 + write_lock(&gr_inode_lock);
53476 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53477 + write_unlock(&gr_inode_lock);
53478 + }
53479 + preempt_enable();
53480 +
53481 + return;
53482 +}
53483 +
53484 +void
53485 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53486 + struct dentry *old_dentry,
53487 + struct dentry *new_dentry,
53488 + struct vfsmount *mnt, const __u8 replace)
53489 +{
53490 + struct name_entry *matchn;
53491 + struct inodev_entry *inodev;
53492 + struct inode *inode = new_dentry->d_inode;
53493 + ino_t old_ino = old_dentry->d_inode->i_ino;
53494 + dev_t old_dev = __get_dev(old_dentry);
53495 +
53496 + /* vfs_rename swaps the name and parent link for old_dentry and
53497 + new_dentry
53498 + at this point, old_dentry has the new name, parent link, and inode
53499 + for the renamed file
53500 + if a file is being replaced by a rename, new_dentry has the inode
53501 + and name for the replaced file
53502 + */
53503 +
53504 + if (unlikely(!(gr_status & GR_READY)))
53505 + return;
53506 +
53507 + preempt_disable();
53508 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53509 +
53510 + /* we wouldn't have to check d_inode if it weren't for
53511 + NFS silly-renaming
53512 + */
53513 +
53514 + write_lock(&gr_inode_lock);
53515 + if (unlikely(replace && inode)) {
53516 + ino_t new_ino = inode->i_ino;
53517 + dev_t new_dev = __get_dev(new_dentry);
53518 +
53519 + inodev = lookup_inodev_entry(new_ino, new_dev);
53520 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53521 + do_handle_delete(inodev, new_ino, new_dev);
53522 + }
53523 +
53524 + inodev = lookup_inodev_entry(old_ino, old_dev);
53525 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53526 + do_handle_delete(inodev, old_ino, old_dev);
53527 +
53528 + if (unlikely((unsigned long)matchn))
53529 + do_handle_create(matchn, old_dentry, mnt);
53530 +
53531 + write_unlock(&gr_inode_lock);
53532 + preempt_enable();
53533 +
53534 + return;
53535 +}
53536 +
53537 +static int
53538 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53539 + unsigned char **sum)
53540 +{
53541 + struct acl_role_label *r;
53542 + struct role_allowed_ip *ipp;
53543 + struct role_transition *trans;
53544 + unsigned int i;
53545 + int found = 0;
53546 + u32 curr_ip = current->signal->curr_ip;
53547 +
53548 + current->signal->saved_ip = curr_ip;
53549 +
53550 + /* check transition table */
53551 +
53552 + for (trans = current->role->transitions; trans; trans = trans->next) {
53553 + if (!strcmp(rolename, trans->rolename)) {
53554 + found = 1;
53555 + break;
53556 + }
53557 + }
53558 +
53559 + if (!found)
53560 + return 0;
53561 +
53562 + /* handle special roles that do not require authentication
53563 + and check ip */
53564 +
53565 + FOR_EACH_ROLE_START(r)
53566 + if (!strcmp(rolename, r->rolename) &&
53567 + (r->roletype & GR_ROLE_SPECIAL)) {
53568 + found = 0;
53569 + if (r->allowed_ips != NULL) {
53570 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53571 + if ((ntohl(curr_ip) & ipp->netmask) ==
53572 + (ntohl(ipp->addr) & ipp->netmask))
53573 + found = 1;
53574 + }
53575 + } else
53576 + found = 2;
53577 + if (!found)
53578 + return 0;
53579 +
53580 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53581 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53582 + *salt = NULL;
53583 + *sum = NULL;
53584 + return 1;
53585 + }
53586 + }
53587 + FOR_EACH_ROLE_END(r)
53588 +
53589 + for (i = 0; i < num_sprole_pws; i++) {
53590 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53591 + *salt = acl_special_roles[i]->salt;
53592 + *sum = acl_special_roles[i]->sum;
53593 + return 1;
53594 + }
53595 + }
53596 +
53597 + return 0;
53598 +}
53599 +
53600 +static void
53601 +assign_special_role(char *rolename)
53602 +{
53603 + struct acl_object_label *obj;
53604 + struct acl_role_label *r;
53605 + struct acl_role_label *assigned = NULL;
53606 + struct task_struct *tsk;
53607 + struct file *filp;
53608 +
53609 + FOR_EACH_ROLE_START(r)
53610 + if (!strcmp(rolename, r->rolename) &&
53611 + (r->roletype & GR_ROLE_SPECIAL)) {
53612 + assigned = r;
53613 + break;
53614 + }
53615 + FOR_EACH_ROLE_END(r)
53616 +
53617 + if (!assigned)
53618 + return;
53619 +
53620 + read_lock(&tasklist_lock);
53621 + read_lock(&grsec_exec_file_lock);
53622 +
53623 + tsk = current->real_parent;
53624 + if (tsk == NULL)
53625 + goto out_unlock;
53626 +
53627 + filp = tsk->exec_file;
53628 + if (filp == NULL)
53629 + goto out_unlock;
53630 +
53631 + tsk->is_writable = 0;
53632 +
53633 + tsk->acl_sp_role = 1;
53634 + tsk->acl_role_id = ++acl_sp_role_value;
53635 + tsk->role = assigned;
53636 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53637 +
53638 + /* ignore additional mmap checks for processes that are writable
53639 + by the default ACL */
53640 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53641 + if (unlikely(obj->mode & GR_WRITE))
53642 + tsk->is_writable = 1;
53643 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53644 + if (unlikely(obj->mode & GR_WRITE))
53645 + tsk->is_writable = 1;
53646 +
53647 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53648 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53649 +#endif
53650 +
53651 +out_unlock:
53652 + read_unlock(&grsec_exec_file_lock);
53653 + read_unlock(&tasklist_lock);
53654 + return;
53655 +}
53656 +
53657 +int gr_check_secure_terminal(struct task_struct *task)
53658 +{
53659 + struct task_struct *p, *p2, *p3;
53660 + struct files_struct *files;
53661 + struct fdtable *fdt;
53662 + struct file *our_file = NULL, *file;
53663 + int i;
53664 +
53665 + if (task->signal->tty == NULL)
53666 + return 1;
53667 +
53668 + files = get_files_struct(task);
53669 + if (files != NULL) {
53670 + rcu_read_lock();
53671 + fdt = files_fdtable(files);
53672 + for (i=0; i < fdt->max_fds; i++) {
53673 + file = fcheck_files(files, i);
53674 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53675 + get_file(file);
53676 + our_file = file;
53677 + }
53678 + }
53679 + rcu_read_unlock();
53680 + put_files_struct(files);
53681 + }
53682 +
53683 + if (our_file == NULL)
53684 + return 1;
53685 +
53686 + read_lock(&tasklist_lock);
53687 + do_each_thread(p2, p) {
53688 + files = get_files_struct(p);
53689 + if (files == NULL ||
53690 + (p->signal && p->signal->tty == task->signal->tty)) {
53691 + if (files != NULL)
53692 + put_files_struct(files);
53693 + continue;
53694 + }
53695 + rcu_read_lock();
53696 + fdt = files_fdtable(files);
53697 + for (i=0; i < fdt->max_fds; i++) {
53698 + file = fcheck_files(files, i);
53699 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53700 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53701 + p3 = task;
53702 + while (p3->pid > 0) {
53703 + if (p3 == p)
53704 + break;
53705 + p3 = p3->real_parent;
53706 + }
53707 + if (p3 == p)
53708 + break;
53709 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53710 + gr_handle_alertkill(p);
53711 + rcu_read_unlock();
53712 + put_files_struct(files);
53713 + read_unlock(&tasklist_lock);
53714 + fput(our_file);
53715 + return 0;
53716 + }
53717 + }
53718 + rcu_read_unlock();
53719 + put_files_struct(files);
53720 + } while_each_thread(p2, p);
53721 + read_unlock(&tasklist_lock);
53722 +
53723 + fput(our_file);
53724 + return 1;
53725 +}
53726 +
53727 +static int gr_rbac_disable(void *unused)
53728 +{
53729 + pax_open_kernel();
53730 + gr_status &= ~GR_READY;
53731 + pax_close_kernel();
53732 +
53733 + return 0;
53734 +}
53735 +
53736 +ssize_t
53737 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53738 +{
53739 + struct gr_arg_wrapper uwrap;
53740 + unsigned char *sprole_salt = NULL;
53741 + unsigned char *sprole_sum = NULL;
53742 + int error = sizeof (struct gr_arg_wrapper);
53743 + int error2 = 0;
53744 +
53745 + mutex_lock(&gr_dev_mutex);
53746 +
53747 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53748 + error = -EPERM;
53749 + goto out;
53750 + }
53751 +
53752 + if (count != sizeof (struct gr_arg_wrapper)) {
53753 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53754 + error = -EINVAL;
53755 + goto out;
53756 + }
53757 +
53758 +
53759 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53760 + gr_auth_expires = 0;
53761 + gr_auth_attempts = 0;
53762 + }
53763 +
53764 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53765 + error = -EFAULT;
53766 + goto out;
53767 + }
53768 +
53769 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53770 + error = -EINVAL;
53771 + goto out;
53772 + }
53773 +
53774 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53775 + error = -EFAULT;
53776 + goto out;
53777 + }
53778 +
53779 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53780 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53781 + time_after(gr_auth_expires, get_seconds())) {
53782 + error = -EBUSY;
53783 + goto out;
53784 + }
53785 +
53786 + /* if non-root trying to do anything other than use a special role,
53787 + do not attempt authentication, do not count towards authentication
53788 + locking
53789 + */
53790 +
53791 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53792 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53793 + current_uid()) {
53794 + error = -EPERM;
53795 + goto out;
53796 + }
53797 +
53798 + /* ensure pw and special role name are null terminated */
53799 +
53800 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53801 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53802 +
53803 + /* Okay.
53804 + * We have our enough of the argument structure..(we have yet
53805 + * to copy_from_user the tables themselves) . Copy the tables
53806 + * only if we need them, i.e. for loading operations. */
53807 +
53808 + switch (gr_usermode->mode) {
53809 + case GR_STATUS:
53810 + if (gr_status & GR_READY) {
53811 + error = 1;
53812 + if (!gr_check_secure_terminal(current))
53813 + error = 3;
53814 + } else
53815 + error = 2;
53816 + goto out;
53817 + case GR_SHUTDOWN:
53818 + if ((gr_status & GR_READY)
53819 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53820 + stop_machine(gr_rbac_disable, NULL, NULL);
53821 + free_variables();
53822 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53823 + memset(gr_system_salt, 0, GR_SALT_LEN);
53824 + memset(gr_system_sum, 0, GR_SHA_LEN);
53825 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53826 + } else if (gr_status & GR_READY) {
53827 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53828 + error = -EPERM;
53829 + } else {
53830 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53831 + error = -EAGAIN;
53832 + }
53833 + break;
53834 + case GR_ENABLE:
53835 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53836 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53837 + else {
53838 + if (gr_status & GR_READY)
53839 + error = -EAGAIN;
53840 + else
53841 + error = error2;
53842 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53843 + }
53844 + break;
53845 + case GR_RELOAD:
53846 + if (!(gr_status & GR_READY)) {
53847 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53848 + error = -EAGAIN;
53849 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53850 + stop_machine(gr_rbac_disable, NULL, NULL);
53851 + free_variables();
53852 + error2 = gracl_init(gr_usermode);
53853 + if (!error2)
53854 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53855 + else {
53856 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53857 + error = error2;
53858 + }
53859 + } else {
53860 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53861 + error = -EPERM;
53862 + }
53863 + break;
53864 + case GR_SEGVMOD:
53865 + if (unlikely(!(gr_status & GR_READY))) {
53866 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53867 + error = -EAGAIN;
53868 + break;
53869 + }
53870 +
53871 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53872 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53873 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53874 + struct acl_subject_label *segvacl;
53875 + segvacl =
53876 + lookup_acl_subj_label(gr_usermode->segv_inode,
53877 + gr_usermode->segv_device,
53878 + current->role);
53879 + if (segvacl) {
53880 + segvacl->crashes = 0;
53881 + segvacl->expires = 0;
53882 + }
53883 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53884 + gr_remove_uid(gr_usermode->segv_uid);
53885 + }
53886 + } else {
53887 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53888 + error = -EPERM;
53889 + }
53890 + break;
53891 + case GR_SPROLE:
53892 + case GR_SPROLEPAM:
53893 + if (unlikely(!(gr_status & GR_READY))) {
53894 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53895 + error = -EAGAIN;
53896 + break;
53897 + }
53898 +
53899 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53900 + current->role->expires = 0;
53901 + current->role->auth_attempts = 0;
53902 + }
53903 +
53904 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53905 + time_after(current->role->expires, get_seconds())) {
53906 + error = -EBUSY;
53907 + goto out;
53908 + }
53909 +
53910 + if (lookup_special_role_auth
53911 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53912 + && ((!sprole_salt && !sprole_sum)
53913 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53914 + char *p = "";
53915 + assign_special_role(gr_usermode->sp_role);
53916 + read_lock(&tasklist_lock);
53917 + if (current->real_parent)
53918 + p = current->real_parent->role->rolename;
53919 + read_unlock(&tasklist_lock);
53920 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53921 + p, acl_sp_role_value);
53922 + } else {
53923 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53924 + error = -EPERM;
53925 + if(!(current->role->auth_attempts++))
53926 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53927 +
53928 + goto out;
53929 + }
53930 + break;
53931 + case GR_UNSPROLE:
53932 + if (unlikely(!(gr_status & GR_READY))) {
53933 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53934 + error = -EAGAIN;
53935 + break;
53936 + }
53937 +
53938 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53939 + char *p = "";
53940 + int i = 0;
53941 +
53942 + read_lock(&tasklist_lock);
53943 + if (current->real_parent) {
53944 + p = current->real_parent->role->rolename;
53945 + i = current->real_parent->acl_role_id;
53946 + }
53947 + read_unlock(&tasklist_lock);
53948 +
53949 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53950 + gr_set_acls(1);
53951 + } else {
53952 + error = -EPERM;
53953 + goto out;
53954 + }
53955 + break;
53956 + default:
53957 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53958 + error = -EINVAL;
53959 + break;
53960 + }
53961 +
53962 + if (error != -EPERM)
53963 + goto out;
53964 +
53965 + if(!(gr_auth_attempts++))
53966 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53967 +
53968 + out:
53969 + mutex_unlock(&gr_dev_mutex);
53970 + return error;
53971 +}
53972 +
53973 +/* must be called with
53974 + rcu_read_lock();
53975 + read_lock(&tasklist_lock);
53976 + read_lock(&grsec_exec_file_lock);
53977 +*/
53978 +int gr_apply_subject_to_task(struct task_struct *task)
53979 +{
53980 + struct acl_object_label *obj;
53981 + char *tmpname;
53982 + struct acl_subject_label *tmpsubj;
53983 + struct file *filp;
53984 + struct name_entry *nmatch;
53985 +
53986 + filp = task->exec_file;
53987 + if (filp == NULL)
53988 + return 0;
53989 +
53990 + /* the following is to apply the correct subject
53991 + on binaries running when the RBAC system
53992 + is enabled, when the binaries have been
53993 + replaced or deleted since their execution
53994 + -----
53995 + when the RBAC system starts, the inode/dev
53996 + from exec_file will be one the RBAC system
53997 + is unaware of. It only knows the inode/dev
53998 + of the present file on disk, or the absence
53999 + of it.
54000 + */
54001 + preempt_disable();
54002 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54003 +
54004 + nmatch = lookup_name_entry(tmpname);
54005 + preempt_enable();
54006 + tmpsubj = NULL;
54007 + if (nmatch) {
54008 + if (nmatch->deleted)
54009 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54010 + else
54011 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54012 + if (tmpsubj != NULL)
54013 + task->acl = tmpsubj;
54014 + }
54015 + if (tmpsubj == NULL)
54016 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54017 + task->role);
54018 + if (task->acl) {
54019 + task->is_writable = 0;
54020 + /* ignore additional mmap checks for processes that are writable
54021 + by the default ACL */
54022 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54023 + if (unlikely(obj->mode & GR_WRITE))
54024 + task->is_writable = 1;
54025 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54026 + if (unlikely(obj->mode & GR_WRITE))
54027 + task->is_writable = 1;
54028 +
54029 + gr_set_proc_res(task);
54030 +
54031 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54032 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54033 +#endif
54034 + } else {
54035 + return 1;
54036 + }
54037 +
54038 + return 0;
54039 +}
54040 +
54041 +int
54042 +gr_set_acls(const int type)
54043 +{
54044 + struct task_struct *task, *task2;
54045 + struct acl_role_label *role = current->role;
54046 + __u16 acl_role_id = current->acl_role_id;
54047 + const struct cred *cred;
54048 + int ret;
54049 +
54050 + rcu_read_lock();
54051 + read_lock(&tasklist_lock);
54052 + read_lock(&grsec_exec_file_lock);
54053 + do_each_thread(task2, task) {
54054 + /* check to see if we're called from the exit handler,
54055 + if so, only replace ACLs that have inherited the admin
54056 + ACL */
54057 +
54058 + if (type && (task->role != role ||
54059 + task->acl_role_id != acl_role_id))
54060 + continue;
54061 +
54062 + task->acl_role_id = 0;
54063 + task->acl_sp_role = 0;
54064 +
54065 + if (task->exec_file) {
54066 + cred = __task_cred(task);
54067 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54068 + ret = gr_apply_subject_to_task(task);
54069 + if (ret) {
54070 + read_unlock(&grsec_exec_file_lock);
54071 + read_unlock(&tasklist_lock);
54072 + rcu_read_unlock();
54073 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54074 + return ret;
54075 + }
54076 + } else {
54077 + // it's a kernel process
54078 + task->role = kernel_role;
54079 + task->acl = kernel_role->root_label;
54080 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54081 + task->acl->mode &= ~GR_PROCFIND;
54082 +#endif
54083 + }
54084 + } while_each_thread(task2, task);
54085 + read_unlock(&grsec_exec_file_lock);
54086 + read_unlock(&tasklist_lock);
54087 + rcu_read_unlock();
54088 +
54089 + return 0;
54090 +}
54091 +
54092 +void
54093 +gr_learn_resource(const struct task_struct *task,
54094 + const int res, const unsigned long wanted, const int gt)
54095 +{
54096 + struct acl_subject_label *acl;
54097 + const struct cred *cred;
54098 +
54099 + if (unlikely((gr_status & GR_READY) &&
54100 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54101 + goto skip_reslog;
54102 +
54103 +#ifdef CONFIG_GRKERNSEC_RESLOG
54104 + gr_log_resource(task, res, wanted, gt);
54105 +#endif
54106 + skip_reslog:
54107 +
54108 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54109 + return;
54110 +
54111 + acl = task->acl;
54112 +
54113 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54114 + !(acl->resmask & (1 << (unsigned short) res))))
54115 + return;
54116 +
54117 + if (wanted >= acl->res[res].rlim_cur) {
54118 + unsigned long res_add;
54119 +
54120 + res_add = wanted;
54121 + switch (res) {
54122 + case RLIMIT_CPU:
54123 + res_add += GR_RLIM_CPU_BUMP;
54124 + break;
54125 + case RLIMIT_FSIZE:
54126 + res_add += GR_RLIM_FSIZE_BUMP;
54127 + break;
54128 + case RLIMIT_DATA:
54129 + res_add += GR_RLIM_DATA_BUMP;
54130 + break;
54131 + case RLIMIT_STACK:
54132 + res_add += GR_RLIM_STACK_BUMP;
54133 + break;
54134 + case RLIMIT_CORE:
54135 + res_add += GR_RLIM_CORE_BUMP;
54136 + break;
54137 + case RLIMIT_RSS:
54138 + res_add += GR_RLIM_RSS_BUMP;
54139 + break;
54140 + case RLIMIT_NPROC:
54141 + res_add += GR_RLIM_NPROC_BUMP;
54142 + break;
54143 + case RLIMIT_NOFILE:
54144 + res_add += GR_RLIM_NOFILE_BUMP;
54145 + break;
54146 + case RLIMIT_MEMLOCK:
54147 + res_add += GR_RLIM_MEMLOCK_BUMP;
54148 + break;
54149 + case RLIMIT_AS:
54150 + res_add += GR_RLIM_AS_BUMP;
54151 + break;
54152 + case RLIMIT_LOCKS:
54153 + res_add += GR_RLIM_LOCKS_BUMP;
54154 + break;
54155 + case RLIMIT_SIGPENDING:
54156 + res_add += GR_RLIM_SIGPENDING_BUMP;
54157 + break;
54158 + case RLIMIT_MSGQUEUE:
54159 + res_add += GR_RLIM_MSGQUEUE_BUMP;
54160 + break;
54161 + case RLIMIT_NICE:
54162 + res_add += GR_RLIM_NICE_BUMP;
54163 + break;
54164 + case RLIMIT_RTPRIO:
54165 + res_add += GR_RLIM_RTPRIO_BUMP;
54166 + break;
54167 + case RLIMIT_RTTIME:
54168 + res_add += GR_RLIM_RTTIME_BUMP;
54169 + break;
54170 + }
54171 +
54172 + acl->res[res].rlim_cur = res_add;
54173 +
54174 + if (wanted > acl->res[res].rlim_max)
54175 + acl->res[res].rlim_max = res_add;
54176 +
54177 + /* only log the subject filename, since resource logging is supported for
54178 + single-subject learning only */
54179 + rcu_read_lock();
54180 + cred = __task_cred(task);
54181 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54182 + task->role->roletype, cred->uid, cred->gid, acl->filename,
54183 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54184 + "", (unsigned long) res, &task->signal->saved_ip);
54185 + rcu_read_unlock();
54186 + }
54187 +
54188 + return;
54189 +}
54190 +
54191 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54192 +void
54193 +pax_set_initial_flags(struct linux_binprm *bprm)
54194 +{
54195 + struct task_struct *task = current;
54196 + struct acl_subject_label *proc;
54197 + unsigned long flags;
54198 +
54199 + if (unlikely(!(gr_status & GR_READY)))
54200 + return;
54201 +
54202 + flags = pax_get_flags(task);
54203 +
54204 + proc = task->acl;
54205 +
54206 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54207 + flags &= ~MF_PAX_PAGEEXEC;
54208 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54209 + flags &= ~MF_PAX_SEGMEXEC;
54210 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54211 + flags &= ~MF_PAX_RANDMMAP;
54212 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54213 + flags &= ~MF_PAX_EMUTRAMP;
54214 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54215 + flags &= ~MF_PAX_MPROTECT;
54216 +
54217 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54218 + flags |= MF_PAX_PAGEEXEC;
54219 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54220 + flags |= MF_PAX_SEGMEXEC;
54221 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54222 + flags |= MF_PAX_RANDMMAP;
54223 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54224 + flags |= MF_PAX_EMUTRAMP;
54225 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54226 + flags |= MF_PAX_MPROTECT;
54227 +
54228 + pax_set_flags(task, flags);
54229 +
54230 + return;
54231 +}
54232 +#endif
54233 +
54234 +int
54235 +gr_handle_proc_ptrace(struct task_struct *task)
54236 +{
54237 + struct file *filp;
54238 + struct task_struct *tmp = task;
54239 + struct task_struct *curtemp = current;
54240 + __u32 retmode;
54241 +
54242 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54243 + if (unlikely(!(gr_status & GR_READY)))
54244 + return 0;
54245 +#endif
54246 +
54247 + read_lock(&tasklist_lock);
54248 + read_lock(&grsec_exec_file_lock);
54249 + filp = task->exec_file;
54250 +
54251 + while (tmp->pid > 0) {
54252 + if (tmp == curtemp)
54253 + break;
54254 + tmp = tmp->real_parent;
54255 + }
54256 +
54257 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54258 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54259 + read_unlock(&grsec_exec_file_lock);
54260 + read_unlock(&tasklist_lock);
54261 + return 1;
54262 + }
54263 +
54264 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54265 + if (!(gr_status & GR_READY)) {
54266 + read_unlock(&grsec_exec_file_lock);
54267 + read_unlock(&tasklist_lock);
54268 + return 0;
54269 + }
54270 +#endif
54271 +
54272 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54273 + read_unlock(&grsec_exec_file_lock);
54274 + read_unlock(&tasklist_lock);
54275 +
54276 + if (retmode & GR_NOPTRACE)
54277 + return 1;
54278 +
54279 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54280 + && (current->acl != task->acl || (current->acl != current->role->root_label
54281 + && current->pid != task->pid)))
54282 + return 1;
54283 +
54284 + return 0;
54285 +}
54286 +
54287 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54288 +{
54289 + if (unlikely(!(gr_status & GR_READY)))
54290 + return;
54291 +
54292 + if (!(current->role->roletype & GR_ROLE_GOD))
54293 + return;
54294 +
54295 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54296 + p->role->rolename, gr_task_roletype_to_char(p),
54297 + p->acl->filename);
54298 +}
54299 +
54300 +int
54301 +gr_handle_ptrace(struct task_struct *task, const long request)
54302 +{
54303 + struct task_struct *tmp = task;
54304 + struct task_struct *curtemp = current;
54305 + __u32 retmode;
54306 +
54307 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54308 + if (unlikely(!(gr_status & GR_READY)))
54309 + return 0;
54310 +#endif
54311 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
54312 + read_lock(&tasklist_lock);
54313 + while (tmp->pid > 0) {
54314 + if (tmp == curtemp)
54315 + break;
54316 + tmp = tmp->real_parent;
54317 + }
54318 +
54319 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54320 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54321 + read_unlock(&tasklist_lock);
54322 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54323 + return 1;
54324 + }
54325 + read_unlock(&tasklist_lock);
54326 + }
54327 +
54328 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54329 + if (!(gr_status & GR_READY))
54330 + return 0;
54331 +#endif
54332 +
54333 + read_lock(&grsec_exec_file_lock);
54334 + if (unlikely(!task->exec_file)) {
54335 + read_unlock(&grsec_exec_file_lock);
54336 + return 0;
54337 + }
54338 +
54339 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54340 + read_unlock(&grsec_exec_file_lock);
54341 +
54342 + if (retmode & GR_NOPTRACE) {
54343 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54344 + return 1;
54345 + }
54346 +
54347 + if (retmode & GR_PTRACERD) {
54348 + switch (request) {
54349 + case PTRACE_SEIZE:
54350 + case PTRACE_POKETEXT:
54351 + case PTRACE_POKEDATA:
54352 + case PTRACE_POKEUSR:
54353 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54354 + case PTRACE_SETREGS:
54355 + case PTRACE_SETFPREGS:
54356 +#endif
54357 +#ifdef CONFIG_X86
54358 + case PTRACE_SETFPXREGS:
54359 +#endif
54360 +#ifdef CONFIG_ALTIVEC
54361 + case PTRACE_SETVRREGS:
54362 +#endif
54363 + return 1;
54364 + default:
54365 + return 0;
54366 + }
54367 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
54368 + !(current->role->roletype & GR_ROLE_GOD) &&
54369 + (current->acl != task->acl)) {
54370 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54371 + return 1;
54372 + }
54373 +
54374 + return 0;
54375 +}
54376 +
54377 +static int is_writable_mmap(const struct file *filp)
54378 +{
54379 + struct task_struct *task = current;
54380 + struct acl_object_label *obj, *obj2;
54381 +
54382 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54383 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54384 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54385 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54386 + task->role->root_label);
54387 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54388 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54389 + return 1;
54390 + }
54391 + }
54392 + return 0;
54393 +}
54394 +
54395 +int
54396 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54397 +{
54398 + __u32 mode;
54399 +
54400 + if (unlikely(!file || !(prot & PROT_EXEC)))
54401 + return 1;
54402 +
54403 + if (is_writable_mmap(file))
54404 + return 0;
54405 +
54406 + mode =
54407 + gr_search_file(file->f_path.dentry,
54408 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54409 + file->f_path.mnt);
54410 +
54411 + if (!gr_tpe_allow(file))
54412 + return 0;
54413 +
54414 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54415 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54416 + return 0;
54417 + } else if (unlikely(!(mode & GR_EXEC))) {
54418 + return 0;
54419 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54420 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54421 + return 1;
54422 + }
54423 +
54424 + return 1;
54425 +}
54426 +
54427 +int
54428 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54429 +{
54430 + __u32 mode;
54431 +
54432 + if (unlikely(!file || !(prot & PROT_EXEC)))
54433 + return 1;
54434 +
54435 + if (is_writable_mmap(file))
54436 + return 0;
54437 +
54438 + mode =
54439 + gr_search_file(file->f_path.dentry,
54440 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54441 + file->f_path.mnt);
54442 +
54443 + if (!gr_tpe_allow(file))
54444 + return 0;
54445 +
54446 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54447 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54448 + return 0;
54449 + } else if (unlikely(!(mode & GR_EXEC))) {
54450 + return 0;
54451 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54452 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54453 + return 1;
54454 + }
54455 +
54456 + return 1;
54457 +}
54458 +
54459 +void
54460 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54461 +{
54462 + unsigned long runtime;
54463 + unsigned long cputime;
54464 + unsigned int wday, cday;
54465 + __u8 whr, chr;
54466 + __u8 wmin, cmin;
54467 + __u8 wsec, csec;
54468 + struct timespec timeval;
54469 +
54470 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54471 + !(task->acl->mode & GR_PROCACCT)))
54472 + return;
54473 +
54474 + do_posix_clock_monotonic_gettime(&timeval);
54475 + runtime = timeval.tv_sec - task->start_time.tv_sec;
54476 + wday = runtime / (3600 * 24);
54477 + runtime -= wday * (3600 * 24);
54478 + whr = runtime / 3600;
54479 + runtime -= whr * 3600;
54480 + wmin = runtime / 60;
54481 + runtime -= wmin * 60;
54482 + wsec = runtime;
54483 +
54484 + cputime = (task->utime + task->stime) / HZ;
54485 + cday = cputime / (3600 * 24);
54486 + cputime -= cday * (3600 * 24);
54487 + chr = cputime / 3600;
54488 + cputime -= chr * 3600;
54489 + cmin = cputime / 60;
54490 + cputime -= cmin * 60;
54491 + csec = cputime;
54492 +
54493 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54494 +
54495 + return;
54496 +}
54497 +
54498 +void gr_set_kernel_label(struct task_struct *task)
54499 +{
54500 + if (gr_status & GR_READY) {
54501 + task->role = kernel_role;
54502 + task->acl = kernel_role->root_label;
54503 + }
54504 + return;
54505 +}
54506 +
54507 +#ifdef CONFIG_TASKSTATS
54508 +int gr_is_taskstats_denied(int pid)
54509 +{
54510 + struct task_struct *task;
54511 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54512 + const struct cred *cred;
54513 +#endif
54514 + int ret = 0;
54515 +
54516 + /* restrict taskstats viewing to un-chrooted root users
54517 + who have the 'view' subject flag if the RBAC system is enabled
54518 + */
54519 +
54520 + rcu_read_lock();
54521 + read_lock(&tasklist_lock);
54522 + task = find_task_by_vpid(pid);
54523 + if (task) {
54524 +#ifdef CONFIG_GRKERNSEC_CHROOT
54525 + if (proc_is_chrooted(task))
54526 + ret = -EACCES;
54527 +#endif
54528 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54529 + cred = __task_cred(task);
54530 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54531 + if (cred->uid != 0)
54532 + ret = -EACCES;
54533 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54534 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54535 + ret = -EACCES;
54536 +#endif
54537 +#endif
54538 + if (gr_status & GR_READY) {
54539 + if (!(task->acl->mode & GR_VIEW))
54540 + ret = -EACCES;
54541 + }
54542 + } else
54543 + ret = -ENOENT;
54544 +
54545 + read_unlock(&tasklist_lock);
54546 + rcu_read_unlock();
54547 +
54548 + return ret;
54549 +}
54550 +#endif
54551 +
54552 +/* AUXV entries are filled via a descendant of search_binary_handler
54553 + after we've already applied the subject for the target
54554 +*/
54555 +int gr_acl_enable_at_secure(void)
54556 +{
54557 + if (unlikely(!(gr_status & GR_READY)))
54558 + return 0;
54559 +
54560 + if (current->acl->mode & GR_ATSECURE)
54561 + return 1;
54562 +
54563 + return 0;
54564 +}
54565 +
54566 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54567 +{
54568 + struct task_struct *task = current;
54569 + struct dentry *dentry = file->f_path.dentry;
54570 + struct vfsmount *mnt = file->f_path.mnt;
54571 + struct acl_object_label *obj, *tmp;
54572 + struct acl_subject_label *subj;
54573 + unsigned int bufsize;
54574 + int is_not_root;
54575 + char *path;
54576 + dev_t dev = __get_dev(dentry);
54577 +
54578 + if (unlikely(!(gr_status & GR_READY)))
54579 + return 1;
54580 +
54581 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54582 + return 1;
54583 +
54584 + /* ignore Eric Biederman */
54585 + if (IS_PRIVATE(dentry->d_inode))
54586 + return 1;
54587 +
54588 + subj = task->acl;
54589 + read_lock(&gr_inode_lock);
54590 + do {
54591 + obj = lookup_acl_obj_label(ino, dev, subj);
54592 + if (obj != NULL) {
54593 + read_unlock(&gr_inode_lock);
54594 + return (obj->mode & GR_FIND) ? 1 : 0;
54595 + }
54596 + } while ((subj = subj->parent_subject));
54597 + read_unlock(&gr_inode_lock);
54598 +
54599 + /* this is purely an optimization since we're looking for an object
54600 + for the directory we're doing a readdir on
54601 + if it's possible for any globbed object to match the entry we're
54602 + filling into the directory, then the object we find here will be
54603 + an anchor point with attached globbed objects
54604 + */
54605 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54606 + if (obj->globbed == NULL)
54607 + return (obj->mode & GR_FIND) ? 1 : 0;
54608 +
54609 + is_not_root = ((obj->filename[0] == '/') &&
54610 + (obj->filename[1] == '\0')) ? 0 : 1;
54611 + bufsize = PAGE_SIZE - namelen - is_not_root;
54612 +
54613 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54614 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54615 + return 1;
54616 +
54617 + preempt_disable();
54618 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54619 + bufsize);
54620 +
54621 + bufsize = strlen(path);
54622 +
54623 + /* if base is "/", don't append an additional slash */
54624 + if (is_not_root)
54625 + *(path + bufsize) = '/';
54626 + memcpy(path + bufsize + is_not_root, name, namelen);
54627 + *(path + bufsize + namelen + is_not_root) = '\0';
54628 +
54629 + tmp = obj->globbed;
54630 + while (tmp) {
54631 + if (!glob_match(tmp->filename, path)) {
54632 + preempt_enable();
54633 + return (tmp->mode & GR_FIND) ? 1 : 0;
54634 + }
54635 + tmp = tmp->next;
54636 + }
54637 + preempt_enable();
54638 + return (obj->mode & GR_FIND) ? 1 : 0;
54639 +}
54640 +
54641 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54642 +EXPORT_SYMBOL(gr_acl_is_enabled);
54643 +#endif
54644 +EXPORT_SYMBOL(gr_learn_resource);
54645 +EXPORT_SYMBOL(gr_set_kernel_label);
54646 +#ifdef CONFIG_SECURITY
54647 +EXPORT_SYMBOL(gr_check_user_change);
54648 +EXPORT_SYMBOL(gr_check_group_change);
54649 +#endif
54650 +
54651 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54652 new file mode 100644
54653 index 0000000..34fefda
54654 --- /dev/null
54655 +++ b/grsecurity/gracl_alloc.c
54656 @@ -0,0 +1,105 @@
54657 +#include <linux/kernel.h>
54658 +#include <linux/mm.h>
54659 +#include <linux/slab.h>
54660 +#include <linux/vmalloc.h>
54661 +#include <linux/gracl.h>
54662 +#include <linux/grsecurity.h>
54663 +
54664 +static unsigned long alloc_stack_next = 1;
54665 +static unsigned long alloc_stack_size = 1;
54666 +static void **alloc_stack;
54667 +
54668 +static __inline__ int
54669 +alloc_pop(void)
54670 +{
54671 + if (alloc_stack_next == 1)
54672 + return 0;
54673 +
54674 + kfree(alloc_stack[alloc_stack_next - 2]);
54675 +
54676 + alloc_stack_next--;
54677 +
54678 + return 1;
54679 +}
54680 +
54681 +static __inline__ int
54682 +alloc_push(void *buf)
54683 +{
54684 + if (alloc_stack_next >= alloc_stack_size)
54685 + return 1;
54686 +
54687 + alloc_stack[alloc_stack_next - 1] = buf;
54688 +
54689 + alloc_stack_next++;
54690 +
54691 + return 0;
54692 +}
54693 +
54694 +void *
54695 +acl_alloc(unsigned long len)
54696 +{
54697 + void *ret = NULL;
54698 +
54699 + if (!len || len > PAGE_SIZE)
54700 + goto out;
54701 +
54702 + ret = kmalloc(len, GFP_KERNEL);
54703 +
54704 + if (ret) {
54705 + if (alloc_push(ret)) {
54706 + kfree(ret);
54707 + ret = NULL;
54708 + }
54709 + }
54710 +
54711 +out:
54712 + return ret;
54713 +}
54714 +
54715 +void *
54716 +acl_alloc_num(unsigned long num, unsigned long len)
54717 +{
54718 + if (!len || (num > (PAGE_SIZE / len)))
54719 + return NULL;
54720 +
54721 + return acl_alloc(num * len);
54722 +}
54723 +
54724 +void
54725 +acl_free_all(void)
54726 +{
54727 + if (gr_acl_is_enabled() || !alloc_stack)
54728 + return;
54729 +
54730 + while (alloc_pop()) ;
54731 +
54732 + if (alloc_stack) {
54733 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54734 + kfree(alloc_stack);
54735 + else
54736 + vfree(alloc_stack);
54737 + }
54738 +
54739 + alloc_stack = NULL;
54740 + alloc_stack_size = 1;
54741 + alloc_stack_next = 1;
54742 +
54743 + return;
54744 +}
54745 +
54746 +int
54747 +acl_alloc_stack_init(unsigned long size)
54748 +{
54749 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54750 + alloc_stack =
54751 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54752 + else
54753 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54754 +
54755 + alloc_stack_size = size;
54756 +
54757 + if (!alloc_stack)
54758 + return 0;
54759 + else
54760 + return 1;
54761 +}
54762 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54763 new file mode 100644
54764 index 0000000..6d21049
54765 --- /dev/null
54766 +++ b/grsecurity/gracl_cap.c
54767 @@ -0,0 +1,110 @@
54768 +#include <linux/kernel.h>
54769 +#include <linux/module.h>
54770 +#include <linux/sched.h>
54771 +#include <linux/gracl.h>
54772 +#include <linux/grsecurity.h>
54773 +#include <linux/grinternal.h>
54774 +
54775 +extern const char *captab_log[];
54776 +extern int captab_log_entries;
54777 +
54778 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54779 +{
54780 + struct acl_subject_label *curracl;
54781 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54782 + kernel_cap_t cap_audit = __cap_empty_set;
54783 +
54784 + if (!gr_acl_is_enabled())
54785 + return 1;
54786 +
54787 + curracl = task->acl;
54788 +
54789 + cap_drop = curracl->cap_lower;
54790 + cap_mask = curracl->cap_mask;
54791 + cap_audit = curracl->cap_invert_audit;
54792 +
54793 + while ((curracl = curracl->parent_subject)) {
54794 + /* if the cap isn't specified in the current computed mask but is specified in the
54795 + current level subject, and is lowered in the current level subject, then add
54796 + it to the set of dropped capabilities
54797 + otherwise, add the current level subject's mask to the current computed mask
54798 + */
54799 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54800 + cap_raise(cap_mask, cap);
54801 + if (cap_raised(curracl->cap_lower, cap))
54802 + cap_raise(cap_drop, cap);
54803 + if (cap_raised(curracl->cap_invert_audit, cap))
54804 + cap_raise(cap_audit, cap);
54805 + }
54806 + }
54807 +
54808 + if (!cap_raised(cap_drop, cap)) {
54809 + if (cap_raised(cap_audit, cap))
54810 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54811 + return 1;
54812 + }
54813 +
54814 + curracl = task->acl;
54815 +
54816 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54817 + && cap_raised(cred->cap_effective, cap)) {
54818 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54819 + task->role->roletype, cred->uid,
54820 + cred->gid, task->exec_file ?
54821 + gr_to_filename(task->exec_file->f_path.dentry,
54822 + task->exec_file->f_path.mnt) : curracl->filename,
54823 + curracl->filename, 0UL,
54824 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54825 + return 1;
54826 + }
54827 +
54828 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54829 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54830 +
54831 + return 0;
54832 +}
54833 +
54834 +int
54835 +gr_acl_is_capable(const int cap)
54836 +{
54837 + return gr_task_acl_is_capable(current, current_cred(), cap);
54838 +}
54839 +
54840 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54841 +{
54842 + struct acl_subject_label *curracl;
54843 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54844 +
54845 + if (!gr_acl_is_enabled())
54846 + return 1;
54847 +
54848 + curracl = task->acl;
54849 +
54850 + cap_drop = curracl->cap_lower;
54851 + cap_mask = curracl->cap_mask;
54852 +
54853 + while ((curracl = curracl->parent_subject)) {
54854 + /* if the cap isn't specified in the current computed mask but is specified in the
54855 + current level subject, and is lowered in the current level subject, then add
54856 + it to the set of dropped capabilities
54857 + otherwise, add the current level subject's mask to the current computed mask
54858 + */
54859 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54860 + cap_raise(cap_mask, cap);
54861 + if (cap_raised(curracl->cap_lower, cap))
54862 + cap_raise(cap_drop, cap);
54863 + }
54864 + }
54865 +
54866 + if (!cap_raised(cap_drop, cap))
54867 + return 1;
54868 +
54869 + return 0;
54870 +}
54871 +
54872 +int
54873 +gr_acl_is_capable_nolog(const int cap)
54874 +{
54875 + return gr_task_acl_is_capable_nolog(current, cap);
54876 +}
54877 +
54878 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54879 new file mode 100644
54880 index 0000000..88d0e87
54881 --- /dev/null
54882 +++ b/grsecurity/gracl_fs.c
54883 @@ -0,0 +1,435 @@
54884 +#include <linux/kernel.h>
54885 +#include <linux/sched.h>
54886 +#include <linux/types.h>
54887 +#include <linux/fs.h>
54888 +#include <linux/file.h>
54889 +#include <linux/stat.h>
54890 +#include <linux/grsecurity.h>
54891 +#include <linux/grinternal.h>
54892 +#include <linux/gracl.h>
54893 +
54894 +umode_t
54895 +gr_acl_umask(void)
54896 +{
54897 + if (unlikely(!gr_acl_is_enabled()))
54898 + return 0;
54899 +
54900 + return current->role->umask;
54901 +}
54902 +
54903 +__u32
54904 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54905 + const struct vfsmount * mnt)
54906 +{
54907 + __u32 mode;
54908 +
54909 + if (unlikely(!dentry->d_inode))
54910 + return GR_FIND;
54911 +
54912 + mode =
54913 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54914 +
54915 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54916 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54917 + return mode;
54918 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54919 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54920 + return 0;
54921 + } else if (unlikely(!(mode & GR_FIND)))
54922 + return 0;
54923 +
54924 + return GR_FIND;
54925 +}
54926 +
54927 +__u32
54928 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54929 + int acc_mode)
54930 +{
54931 + __u32 reqmode = GR_FIND;
54932 + __u32 mode;
54933 +
54934 + if (unlikely(!dentry->d_inode))
54935 + return reqmode;
54936 +
54937 + if (acc_mode & MAY_APPEND)
54938 + reqmode |= GR_APPEND;
54939 + else if (acc_mode & MAY_WRITE)
54940 + reqmode |= GR_WRITE;
54941 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54942 + reqmode |= GR_READ;
54943 +
54944 + mode =
54945 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54946 + mnt);
54947 +
54948 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54949 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54950 + reqmode & GR_READ ? " reading" : "",
54951 + reqmode & GR_WRITE ? " writing" : reqmode &
54952 + GR_APPEND ? " appending" : "");
54953 + return reqmode;
54954 + } else
54955 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54956 + {
54957 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54958 + reqmode & GR_READ ? " reading" : "",
54959 + reqmode & GR_WRITE ? " writing" : reqmode &
54960 + GR_APPEND ? " appending" : "");
54961 + return 0;
54962 + } else if (unlikely((mode & reqmode) != reqmode))
54963 + return 0;
54964 +
54965 + return reqmode;
54966 +}
54967 +
54968 +__u32
54969 +gr_acl_handle_creat(const struct dentry * dentry,
54970 + const struct dentry * p_dentry,
54971 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54972 + const int imode)
54973 +{
54974 + __u32 reqmode = GR_WRITE | GR_CREATE;
54975 + __u32 mode;
54976 +
54977 + if (acc_mode & MAY_APPEND)
54978 + reqmode |= GR_APPEND;
54979 + // if a directory was required or the directory already exists, then
54980 + // don't count this open as a read
54981 + if ((acc_mode & MAY_READ) &&
54982 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54983 + reqmode |= GR_READ;
54984 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54985 + reqmode |= GR_SETID;
54986 +
54987 + mode =
54988 + gr_check_create(dentry, p_dentry, p_mnt,
54989 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54990 +
54991 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54992 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54993 + reqmode & GR_READ ? " reading" : "",
54994 + reqmode & GR_WRITE ? " writing" : reqmode &
54995 + GR_APPEND ? " appending" : "");
54996 + return reqmode;
54997 + } else
54998 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54999 + {
55000 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55001 + reqmode & GR_READ ? " reading" : "",
55002 + reqmode & GR_WRITE ? " writing" : reqmode &
55003 + GR_APPEND ? " appending" : "");
55004 + return 0;
55005 + } else if (unlikely((mode & reqmode) != reqmode))
55006 + return 0;
55007 +
55008 + return reqmode;
55009 +}
55010 +
55011 +__u32
55012 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55013 + const int fmode)
55014 +{
55015 + __u32 mode, reqmode = GR_FIND;
55016 +
55017 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55018 + reqmode |= GR_EXEC;
55019 + if (fmode & S_IWOTH)
55020 + reqmode |= GR_WRITE;
55021 + if (fmode & S_IROTH)
55022 + reqmode |= GR_READ;
55023 +
55024 + mode =
55025 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55026 + mnt);
55027 +
55028 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55029 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55030 + reqmode & GR_READ ? " reading" : "",
55031 + reqmode & GR_WRITE ? " writing" : "",
55032 + reqmode & GR_EXEC ? " executing" : "");
55033 + return reqmode;
55034 + } else
55035 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55036 + {
55037 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55038 + reqmode & GR_READ ? " reading" : "",
55039 + reqmode & GR_WRITE ? " writing" : "",
55040 + reqmode & GR_EXEC ? " executing" : "");
55041 + return 0;
55042 + } else if (unlikely((mode & reqmode) != reqmode))
55043 + return 0;
55044 +
55045 + return reqmode;
55046 +}
55047 +
55048 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55049 +{
55050 + __u32 mode;
55051 +
55052 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55053 +
55054 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55055 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55056 + return mode;
55057 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55058 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55059 + return 0;
55060 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55061 + return 0;
55062 +
55063 + return (reqmode);
55064 +}
55065 +
55066 +__u32
55067 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55068 +{
55069 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55070 +}
55071 +
55072 +__u32
55073 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55074 +{
55075 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55076 +}
55077 +
55078 +__u32
55079 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55080 +{
55081 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55082 +}
55083 +
55084 +__u32
55085 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55086 +{
55087 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55088 +}
55089 +
55090 +__u32
55091 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55092 + umode_t *modeptr)
55093 +{
55094 + umode_t mode;
55095 +
55096 + *modeptr &= ~gr_acl_umask();
55097 + mode = *modeptr;
55098 +
55099 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55100 + return 1;
55101 +
55102 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
55103 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55104 + GR_CHMOD_ACL_MSG);
55105 + } else {
55106 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55107 + }
55108 +}
55109 +
55110 +__u32
55111 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55112 +{
55113 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55114 +}
55115 +
55116 +__u32
55117 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55118 +{
55119 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55120 +}
55121 +
55122 +__u32
55123 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55124 +{
55125 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55126 +}
55127 +
55128 +__u32
55129 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55130 +{
55131 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55132 + GR_UNIXCONNECT_ACL_MSG);
55133 +}
55134 +
55135 +/* hardlinks require at minimum create and link permission,
55136 + any additional privilege required is based on the
55137 + privilege of the file being linked to
55138 +*/
55139 +__u32
55140 +gr_acl_handle_link(const struct dentry * new_dentry,
55141 + const struct dentry * parent_dentry,
55142 + const struct vfsmount * parent_mnt,
55143 + const struct dentry * old_dentry,
55144 + const struct vfsmount * old_mnt, const char *to)
55145 +{
55146 + __u32 mode;
55147 + __u32 needmode = GR_CREATE | GR_LINK;
55148 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55149 +
55150 + mode =
55151 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55152 + old_mnt);
55153 +
55154 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55155 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55156 + return mode;
55157 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55158 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55159 + return 0;
55160 + } else if (unlikely((mode & needmode) != needmode))
55161 + return 0;
55162 +
55163 + return 1;
55164 +}
55165 +
55166 +__u32
55167 +gr_acl_handle_symlink(const struct dentry * new_dentry,
55168 + const struct dentry * parent_dentry,
55169 + const struct vfsmount * parent_mnt, const char *from)
55170 +{
55171 + __u32 needmode = GR_WRITE | GR_CREATE;
55172 + __u32 mode;
55173 +
55174 + mode =
55175 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
55176 + GR_CREATE | GR_AUDIT_CREATE |
55177 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55178 +
55179 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55180 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55181 + return mode;
55182 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55183 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55184 + return 0;
55185 + } else if (unlikely((mode & needmode) != needmode))
55186 + return 0;
55187 +
55188 + return (GR_WRITE | GR_CREATE);
55189 +}
55190 +
55191 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55192 +{
55193 + __u32 mode;
55194 +
55195 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55196 +
55197 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55198 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55199 + return mode;
55200 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55201 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55202 + return 0;
55203 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55204 + return 0;
55205 +
55206 + return (reqmode);
55207 +}
55208 +
55209 +__u32
55210 +gr_acl_handle_mknod(const struct dentry * new_dentry,
55211 + const struct dentry * parent_dentry,
55212 + const struct vfsmount * parent_mnt,
55213 + const int mode)
55214 +{
55215 + __u32 reqmode = GR_WRITE | GR_CREATE;
55216 + if (unlikely(mode & (S_ISUID | S_ISGID)))
55217 + reqmode |= GR_SETID;
55218 +
55219 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55220 + reqmode, GR_MKNOD_ACL_MSG);
55221 +}
55222 +
55223 +__u32
55224 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
55225 + const struct dentry *parent_dentry,
55226 + const struct vfsmount *parent_mnt)
55227 +{
55228 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55229 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55230 +}
55231 +
55232 +#define RENAME_CHECK_SUCCESS(old, new) \
55233 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55234 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55235 +
55236 +int
55237 +gr_acl_handle_rename(struct dentry *new_dentry,
55238 + struct dentry *parent_dentry,
55239 + const struct vfsmount *parent_mnt,
55240 + struct dentry *old_dentry,
55241 + struct inode *old_parent_inode,
55242 + struct vfsmount *old_mnt, const char *newname)
55243 +{
55244 + __u32 comp1, comp2;
55245 + int error = 0;
55246 +
55247 + if (unlikely(!gr_acl_is_enabled()))
55248 + return 0;
55249 +
55250 + if (!new_dentry->d_inode) {
55251 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55252 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55253 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55254 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55255 + GR_DELETE | GR_AUDIT_DELETE |
55256 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55257 + GR_SUPPRESS, old_mnt);
55258 + } else {
55259 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55260 + GR_CREATE | GR_DELETE |
55261 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55262 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55263 + GR_SUPPRESS, parent_mnt);
55264 + comp2 =
55265 + gr_search_file(old_dentry,
55266 + GR_READ | GR_WRITE | GR_AUDIT_READ |
55267 + GR_DELETE | GR_AUDIT_DELETE |
55268 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55269 + }
55270 +
55271 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55272 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55273 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55274 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55275 + && !(comp2 & GR_SUPPRESS)) {
55276 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55277 + error = -EACCES;
55278 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55279 + error = -EACCES;
55280 +
55281 + return error;
55282 +}
55283 +
55284 +void
55285 +gr_acl_handle_exit(void)
55286 +{
55287 + u16 id;
55288 + char *rolename;
55289 + struct file *exec_file;
55290 +
55291 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55292 + !(current->role->roletype & GR_ROLE_PERSIST))) {
55293 + id = current->acl_role_id;
55294 + rolename = current->role->rolename;
55295 + gr_set_acls(1);
55296 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55297 + }
55298 +
55299 + write_lock(&grsec_exec_file_lock);
55300 + exec_file = current->exec_file;
55301 + current->exec_file = NULL;
55302 + write_unlock(&grsec_exec_file_lock);
55303 +
55304 + if (exec_file)
55305 + fput(exec_file);
55306 +}
55307 +
55308 +int
55309 +gr_acl_handle_procpidmem(const struct task_struct *task)
55310 +{
55311 + if (unlikely(!gr_acl_is_enabled()))
55312 + return 0;
55313 +
55314 + if (task != current && task->acl->mode & GR_PROTPROCFD)
55315 + return -EACCES;
55316 +
55317 + return 0;
55318 +}
55319 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55320 new file mode 100644
55321 index 0000000..58800a7
55322 --- /dev/null
55323 +++ b/grsecurity/gracl_ip.c
55324 @@ -0,0 +1,384 @@
55325 +#include <linux/kernel.h>
55326 +#include <asm/uaccess.h>
55327 +#include <asm/errno.h>
55328 +#include <net/sock.h>
55329 +#include <linux/file.h>
55330 +#include <linux/fs.h>
55331 +#include <linux/net.h>
55332 +#include <linux/in.h>
55333 +#include <linux/skbuff.h>
55334 +#include <linux/ip.h>
55335 +#include <linux/udp.h>
55336 +#include <linux/types.h>
55337 +#include <linux/sched.h>
55338 +#include <linux/netdevice.h>
55339 +#include <linux/inetdevice.h>
55340 +#include <linux/gracl.h>
55341 +#include <linux/grsecurity.h>
55342 +#include <linux/grinternal.h>
55343 +
55344 +#define GR_BIND 0x01
55345 +#define GR_CONNECT 0x02
55346 +#define GR_INVERT 0x04
55347 +#define GR_BINDOVERRIDE 0x08
55348 +#define GR_CONNECTOVERRIDE 0x10
55349 +#define GR_SOCK_FAMILY 0x20
55350 +
55351 +static const char * gr_protocols[IPPROTO_MAX] = {
55352 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55353 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55354 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55355 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55356 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55357 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55358 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55359 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55360 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55361 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55362 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55363 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55364 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55365 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55366 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55367 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55368 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55369 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55370 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55371 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55372 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55373 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55374 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55375 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55376 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55377 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55378 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55379 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55380 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55381 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55382 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55383 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55384 + };
55385 +
55386 +static const char * gr_socktypes[SOCK_MAX] = {
55387 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55388 + "unknown:7", "unknown:8", "unknown:9", "packet"
55389 + };
55390 +
55391 +static const char * gr_sockfamilies[AF_MAX+1] = {
55392 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55393 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55394 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55395 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55396 + };
55397 +
55398 +const char *
55399 +gr_proto_to_name(unsigned char proto)
55400 +{
55401 + return gr_protocols[proto];
55402 +}
55403 +
55404 +const char *
55405 +gr_socktype_to_name(unsigned char type)
55406 +{
55407 + return gr_socktypes[type];
55408 +}
55409 +
55410 +const char *
55411 +gr_sockfamily_to_name(unsigned char family)
55412 +{
55413 + return gr_sockfamilies[family];
55414 +}
55415 +
55416 +int
55417 +gr_search_socket(const int domain, const int type, const int protocol)
55418 +{
55419 + struct acl_subject_label *curr;
55420 + const struct cred *cred = current_cred();
55421 +
55422 + if (unlikely(!gr_acl_is_enabled()))
55423 + goto exit;
55424 +
55425 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
55426 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55427 + goto exit; // let the kernel handle it
55428 +
55429 + curr = current->acl;
55430 +
55431 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55432 + /* the family is allowed, if this is PF_INET allow it only if
55433 + the extra sock type/protocol checks pass */
55434 + if (domain == PF_INET)
55435 + goto inet_check;
55436 + goto exit;
55437 + } else {
55438 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55439 + __u32 fakeip = 0;
55440 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55441 + current->role->roletype, cred->uid,
55442 + cred->gid, current->exec_file ?
55443 + gr_to_filename(current->exec_file->f_path.dentry,
55444 + current->exec_file->f_path.mnt) :
55445 + curr->filename, curr->filename,
55446 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55447 + &current->signal->saved_ip);
55448 + goto exit;
55449 + }
55450 + goto exit_fail;
55451 + }
55452 +
55453 +inet_check:
55454 + /* the rest of this checking is for IPv4 only */
55455 + if (!curr->ips)
55456 + goto exit;
55457 +
55458 + if ((curr->ip_type & (1 << type)) &&
55459 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55460 + goto exit;
55461 +
55462 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55463 + /* we don't place acls on raw sockets , and sometimes
55464 + dgram/ip sockets are opened for ioctl and not
55465 + bind/connect, so we'll fake a bind learn log */
55466 + if (type == SOCK_RAW || type == SOCK_PACKET) {
55467 + __u32 fakeip = 0;
55468 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55469 + current->role->roletype, cred->uid,
55470 + cred->gid, current->exec_file ?
55471 + gr_to_filename(current->exec_file->f_path.dentry,
55472 + current->exec_file->f_path.mnt) :
55473 + curr->filename, curr->filename,
55474 + &fakeip, 0, type,
55475 + protocol, GR_CONNECT, &current->signal->saved_ip);
55476 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55477 + __u32 fakeip = 0;
55478 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55479 + current->role->roletype, cred->uid,
55480 + cred->gid, current->exec_file ?
55481 + gr_to_filename(current->exec_file->f_path.dentry,
55482 + current->exec_file->f_path.mnt) :
55483 + curr->filename, curr->filename,
55484 + &fakeip, 0, type,
55485 + protocol, GR_BIND, &current->signal->saved_ip);
55486 + }
55487 + /* we'll log when they use connect or bind */
55488 + goto exit;
55489 + }
55490 +
55491 +exit_fail:
55492 + if (domain == PF_INET)
55493 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55494 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
55495 + else
55496 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55497 + gr_socktype_to_name(type), protocol);
55498 +
55499 + return 0;
55500 +exit:
55501 + return 1;
55502 +}
55503 +
55504 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55505 +{
55506 + if ((ip->mode & mode) &&
55507 + (ip_port >= ip->low) &&
55508 + (ip_port <= ip->high) &&
55509 + ((ntohl(ip_addr) & our_netmask) ==
55510 + (ntohl(our_addr) & our_netmask))
55511 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55512 + && (ip->type & (1 << type))) {
55513 + if (ip->mode & GR_INVERT)
55514 + return 2; // specifically denied
55515 + else
55516 + return 1; // allowed
55517 + }
55518 +
55519 + return 0; // not specifically allowed, may continue parsing
55520 +}
55521 +
55522 +static int
55523 +gr_search_connectbind(const int full_mode, struct sock *sk,
55524 + struct sockaddr_in *addr, const int type)
55525 +{
55526 + char iface[IFNAMSIZ] = {0};
55527 + struct acl_subject_label *curr;
55528 + struct acl_ip_label *ip;
55529 + struct inet_sock *isk;
55530 + struct net_device *dev;
55531 + struct in_device *idev;
55532 + unsigned long i;
55533 + int ret;
55534 + int mode = full_mode & (GR_BIND | GR_CONNECT);
55535 + __u32 ip_addr = 0;
55536 + __u32 our_addr;
55537 + __u32 our_netmask;
55538 + char *p;
55539 + __u16 ip_port = 0;
55540 + const struct cred *cred = current_cred();
55541 +
55542 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55543 + return 0;
55544 +
55545 + curr = current->acl;
55546 + isk = inet_sk(sk);
55547 +
55548 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55549 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55550 + addr->sin_addr.s_addr = curr->inaddr_any_override;
55551 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55552 + struct sockaddr_in saddr;
55553 + int err;
55554 +
55555 + saddr.sin_family = AF_INET;
55556 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
55557 + saddr.sin_port = isk->inet_sport;
55558 +
55559 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55560 + if (err)
55561 + return err;
55562 +
55563 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55564 + if (err)
55565 + return err;
55566 + }
55567 +
55568 + if (!curr->ips)
55569 + return 0;
55570 +
55571 + ip_addr = addr->sin_addr.s_addr;
55572 + ip_port = ntohs(addr->sin_port);
55573 +
55574 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55575 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55576 + current->role->roletype, cred->uid,
55577 + cred->gid, current->exec_file ?
55578 + gr_to_filename(current->exec_file->f_path.dentry,
55579 + current->exec_file->f_path.mnt) :
55580 + curr->filename, curr->filename,
55581 + &ip_addr, ip_port, type,
55582 + sk->sk_protocol, mode, &current->signal->saved_ip);
55583 + return 0;
55584 + }
55585 +
55586 + for (i = 0; i < curr->ip_num; i++) {
55587 + ip = *(curr->ips + i);
55588 + if (ip->iface != NULL) {
55589 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55590 + p = strchr(iface, ':');
55591 + if (p != NULL)
55592 + *p = '\0';
55593 + dev = dev_get_by_name(sock_net(sk), iface);
55594 + if (dev == NULL)
55595 + continue;
55596 + idev = in_dev_get(dev);
55597 + if (idev == NULL) {
55598 + dev_put(dev);
55599 + continue;
55600 + }
55601 + rcu_read_lock();
55602 + for_ifa(idev) {
55603 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55604 + our_addr = ifa->ifa_address;
55605 + our_netmask = 0xffffffff;
55606 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55607 + if (ret == 1) {
55608 + rcu_read_unlock();
55609 + in_dev_put(idev);
55610 + dev_put(dev);
55611 + return 0;
55612 + } else if (ret == 2) {
55613 + rcu_read_unlock();
55614 + in_dev_put(idev);
55615 + dev_put(dev);
55616 + goto denied;
55617 + }
55618 + }
55619 + } endfor_ifa(idev);
55620 + rcu_read_unlock();
55621 + in_dev_put(idev);
55622 + dev_put(dev);
55623 + } else {
55624 + our_addr = ip->addr;
55625 + our_netmask = ip->netmask;
55626 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55627 + if (ret == 1)
55628 + return 0;
55629 + else if (ret == 2)
55630 + goto denied;
55631 + }
55632 + }
55633 +
55634 +denied:
55635 + if (mode == GR_BIND)
55636 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55637 + else if (mode == GR_CONNECT)
55638 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55639 +
55640 + return -EACCES;
55641 +}
55642 +
55643 +int
55644 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55645 +{
55646 + /* always allow disconnection of dgram sockets with connect */
55647 + if (addr->sin_family == AF_UNSPEC)
55648 + return 0;
55649 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55650 +}
55651 +
55652 +int
55653 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55654 +{
55655 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55656 +}
55657 +
55658 +int gr_search_listen(struct socket *sock)
55659 +{
55660 + struct sock *sk = sock->sk;
55661 + struct sockaddr_in addr;
55662 +
55663 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55664 + addr.sin_port = inet_sk(sk)->inet_sport;
55665 +
55666 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55667 +}
55668 +
55669 +int gr_search_accept(struct socket *sock)
55670 +{
55671 + struct sock *sk = sock->sk;
55672 + struct sockaddr_in addr;
55673 +
55674 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55675 + addr.sin_port = inet_sk(sk)->inet_sport;
55676 +
55677 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55678 +}
55679 +
55680 +int
55681 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55682 +{
55683 + if (addr)
55684 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55685 + else {
55686 + struct sockaddr_in sin;
55687 + const struct inet_sock *inet = inet_sk(sk);
55688 +
55689 + sin.sin_addr.s_addr = inet->inet_daddr;
55690 + sin.sin_port = inet->inet_dport;
55691 +
55692 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55693 + }
55694 +}
55695 +
55696 +int
55697 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55698 +{
55699 + struct sockaddr_in sin;
55700 +
55701 + if (unlikely(skb->len < sizeof (struct udphdr)))
55702 + return 0; // skip this packet
55703 +
55704 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55705 + sin.sin_port = udp_hdr(skb)->source;
55706 +
55707 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55708 +}
55709 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55710 new file mode 100644
55711 index 0000000..25f54ef
55712 --- /dev/null
55713 +++ b/grsecurity/gracl_learn.c
55714 @@ -0,0 +1,207 @@
55715 +#include <linux/kernel.h>
55716 +#include <linux/mm.h>
55717 +#include <linux/sched.h>
55718 +#include <linux/poll.h>
55719 +#include <linux/string.h>
55720 +#include <linux/file.h>
55721 +#include <linux/types.h>
55722 +#include <linux/vmalloc.h>
55723 +#include <linux/grinternal.h>
55724 +
55725 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55726 + size_t count, loff_t *ppos);
55727 +extern int gr_acl_is_enabled(void);
55728 +
55729 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55730 +static int gr_learn_attached;
55731 +
55732 +/* use a 512k buffer */
55733 +#define LEARN_BUFFER_SIZE (512 * 1024)
55734 +
55735 +static DEFINE_SPINLOCK(gr_learn_lock);
55736 +static DEFINE_MUTEX(gr_learn_user_mutex);
55737 +
55738 +/* we need to maintain two buffers, so that the kernel context of grlearn
55739 + uses a semaphore around the userspace copying, and the other kernel contexts
55740 + use a spinlock when copying into the buffer, since they cannot sleep
55741 +*/
55742 +static char *learn_buffer;
55743 +static char *learn_buffer_user;
55744 +static int learn_buffer_len;
55745 +static int learn_buffer_user_len;
55746 +
55747 +static ssize_t
55748 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55749 +{
55750 + DECLARE_WAITQUEUE(wait, current);
55751 + ssize_t retval = 0;
55752 +
55753 + add_wait_queue(&learn_wait, &wait);
55754 + set_current_state(TASK_INTERRUPTIBLE);
55755 + do {
55756 + mutex_lock(&gr_learn_user_mutex);
55757 + spin_lock(&gr_learn_lock);
55758 + if (learn_buffer_len)
55759 + break;
55760 + spin_unlock(&gr_learn_lock);
55761 + mutex_unlock(&gr_learn_user_mutex);
55762 + if (file->f_flags & O_NONBLOCK) {
55763 + retval = -EAGAIN;
55764 + goto out;
55765 + }
55766 + if (signal_pending(current)) {
55767 + retval = -ERESTARTSYS;
55768 + goto out;
55769 + }
55770 +
55771 + schedule();
55772 + } while (1);
55773 +
55774 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55775 + learn_buffer_user_len = learn_buffer_len;
55776 + retval = learn_buffer_len;
55777 + learn_buffer_len = 0;
55778 +
55779 + spin_unlock(&gr_learn_lock);
55780 +
55781 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55782 + retval = -EFAULT;
55783 +
55784 + mutex_unlock(&gr_learn_user_mutex);
55785 +out:
55786 + set_current_state(TASK_RUNNING);
55787 + remove_wait_queue(&learn_wait, &wait);
55788 + return retval;
55789 +}
55790 +
55791 +static unsigned int
55792 +poll_learn(struct file * file, poll_table * wait)
55793 +{
55794 + poll_wait(file, &learn_wait, wait);
55795 +
55796 + if (learn_buffer_len)
55797 + return (POLLIN | POLLRDNORM);
55798 +
55799 + return 0;
55800 +}
55801 +
55802 +void
55803 +gr_clear_learn_entries(void)
55804 +{
55805 + char *tmp;
55806 +
55807 + mutex_lock(&gr_learn_user_mutex);
55808 + spin_lock(&gr_learn_lock);
55809 + tmp = learn_buffer;
55810 + learn_buffer = NULL;
55811 + spin_unlock(&gr_learn_lock);
55812 + if (tmp)
55813 + vfree(tmp);
55814 + if (learn_buffer_user != NULL) {
55815 + vfree(learn_buffer_user);
55816 + learn_buffer_user = NULL;
55817 + }
55818 + learn_buffer_len = 0;
55819 + mutex_unlock(&gr_learn_user_mutex);
55820 +
55821 + return;
55822 +}
55823 +
55824 +void
55825 +gr_add_learn_entry(const char *fmt, ...)
55826 +{
55827 + va_list args;
55828 + unsigned int len;
55829 +
55830 + if (!gr_learn_attached)
55831 + return;
55832 +
55833 + spin_lock(&gr_learn_lock);
55834 +
55835 + /* leave a gap at the end so we know when it's "full" but don't have to
55836 + compute the exact length of the string we're trying to append
55837 + */
55838 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55839 + spin_unlock(&gr_learn_lock);
55840 + wake_up_interruptible(&learn_wait);
55841 + return;
55842 + }
55843 + if (learn_buffer == NULL) {
55844 + spin_unlock(&gr_learn_lock);
55845 + return;
55846 + }
55847 +
55848 + va_start(args, fmt);
55849 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55850 + va_end(args);
55851 +
55852 + learn_buffer_len += len + 1;
55853 +
55854 + spin_unlock(&gr_learn_lock);
55855 + wake_up_interruptible(&learn_wait);
55856 +
55857 + return;
55858 +}
55859 +
55860 +static int
55861 +open_learn(struct inode *inode, struct file *file)
55862 +{
55863 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55864 + return -EBUSY;
55865 + if (file->f_mode & FMODE_READ) {
55866 + int retval = 0;
55867 + mutex_lock(&gr_learn_user_mutex);
55868 + if (learn_buffer == NULL)
55869 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55870 + if (learn_buffer_user == NULL)
55871 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55872 + if (learn_buffer == NULL) {
55873 + retval = -ENOMEM;
55874 + goto out_error;
55875 + }
55876 + if (learn_buffer_user == NULL) {
55877 + retval = -ENOMEM;
55878 + goto out_error;
55879 + }
55880 + learn_buffer_len = 0;
55881 + learn_buffer_user_len = 0;
55882 + gr_learn_attached = 1;
55883 +out_error:
55884 + mutex_unlock(&gr_learn_user_mutex);
55885 + return retval;
55886 + }
55887 + return 0;
55888 +}
55889 +
55890 +static int
55891 +close_learn(struct inode *inode, struct file *file)
55892 +{
55893 + if (file->f_mode & FMODE_READ) {
55894 + char *tmp = NULL;
55895 + mutex_lock(&gr_learn_user_mutex);
55896 + spin_lock(&gr_learn_lock);
55897 + tmp = learn_buffer;
55898 + learn_buffer = NULL;
55899 + spin_unlock(&gr_learn_lock);
55900 + if (tmp)
55901 + vfree(tmp);
55902 + if (learn_buffer_user != NULL) {
55903 + vfree(learn_buffer_user);
55904 + learn_buffer_user = NULL;
55905 + }
55906 + learn_buffer_len = 0;
55907 + learn_buffer_user_len = 0;
55908 + gr_learn_attached = 0;
55909 + mutex_unlock(&gr_learn_user_mutex);
55910 + }
55911 +
55912 + return 0;
55913 +}
55914 +
55915 +const struct file_operations grsec_fops = {
55916 + .read = read_learn,
55917 + .write = write_grsec_handler,
55918 + .open = open_learn,
55919 + .release = close_learn,
55920 + .poll = poll_learn,
55921 +};
55922 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55923 new file mode 100644
55924 index 0000000..39645c9
55925 --- /dev/null
55926 +++ b/grsecurity/gracl_res.c
55927 @@ -0,0 +1,68 @@
55928 +#include <linux/kernel.h>
55929 +#include <linux/sched.h>
55930 +#include <linux/gracl.h>
55931 +#include <linux/grinternal.h>
55932 +
55933 +static const char *restab_log[] = {
55934 + [RLIMIT_CPU] = "RLIMIT_CPU",
55935 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55936 + [RLIMIT_DATA] = "RLIMIT_DATA",
55937 + [RLIMIT_STACK] = "RLIMIT_STACK",
55938 + [RLIMIT_CORE] = "RLIMIT_CORE",
55939 + [RLIMIT_RSS] = "RLIMIT_RSS",
55940 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55941 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55942 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55943 + [RLIMIT_AS] = "RLIMIT_AS",
55944 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55945 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55946 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55947 + [RLIMIT_NICE] = "RLIMIT_NICE",
55948 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55949 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55950 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55951 +};
55952 +
55953 +void
55954 +gr_log_resource(const struct task_struct *task,
55955 + const int res, const unsigned long wanted, const int gt)
55956 +{
55957 + const struct cred *cred;
55958 + unsigned long rlim;
55959 +
55960 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55961 + return;
55962 +
55963 + // not yet supported resource
55964 + if (unlikely(!restab_log[res]))
55965 + return;
55966 +
55967 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55968 + rlim = task_rlimit_max(task, res);
55969 + else
55970 + rlim = task_rlimit(task, res);
55971 +
55972 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55973 + return;
55974 +
55975 + rcu_read_lock();
55976 + cred = __task_cred(task);
55977 +
55978 + if (res == RLIMIT_NPROC &&
55979 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55980 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55981 + goto out_rcu_unlock;
55982 + else if (res == RLIMIT_MEMLOCK &&
55983 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55984 + goto out_rcu_unlock;
55985 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55986 + goto out_rcu_unlock;
55987 + rcu_read_unlock();
55988 +
55989 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55990 +
55991 + return;
55992 +out_rcu_unlock:
55993 + rcu_read_unlock();
55994 + return;
55995 +}
55996 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55997 new file mode 100644
55998 index 0000000..5556be3
55999 --- /dev/null
56000 +++ b/grsecurity/gracl_segv.c
56001 @@ -0,0 +1,299 @@
56002 +#include <linux/kernel.h>
56003 +#include <linux/mm.h>
56004 +#include <asm/uaccess.h>
56005 +#include <asm/errno.h>
56006 +#include <asm/mman.h>
56007 +#include <net/sock.h>
56008 +#include <linux/file.h>
56009 +#include <linux/fs.h>
56010 +#include <linux/net.h>
56011 +#include <linux/in.h>
56012 +#include <linux/slab.h>
56013 +#include <linux/types.h>
56014 +#include <linux/sched.h>
56015 +#include <linux/timer.h>
56016 +#include <linux/gracl.h>
56017 +#include <linux/grsecurity.h>
56018 +#include <linux/grinternal.h>
56019 +
56020 +static struct crash_uid *uid_set;
56021 +static unsigned short uid_used;
56022 +static DEFINE_SPINLOCK(gr_uid_lock);
56023 +extern rwlock_t gr_inode_lock;
56024 +extern struct acl_subject_label *
56025 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56026 + struct acl_role_label *role);
56027 +
56028 +#ifdef CONFIG_BTRFS_FS
56029 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56030 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56031 +#endif
56032 +
56033 +static inline dev_t __get_dev(const struct dentry *dentry)
56034 +{
56035 +#ifdef CONFIG_BTRFS_FS
56036 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56037 + return get_btrfs_dev_from_inode(dentry->d_inode);
56038 + else
56039 +#endif
56040 + return dentry->d_inode->i_sb->s_dev;
56041 +}
56042 +
56043 +int
56044 +gr_init_uidset(void)
56045 +{
56046 + uid_set =
56047 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56048 + uid_used = 0;
56049 +
56050 + return uid_set ? 1 : 0;
56051 +}
56052 +
56053 +void
56054 +gr_free_uidset(void)
56055 +{
56056 + if (uid_set)
56057 + kfree(uid_set);
56058 +
56059 + return;
56060 +}
56061 +
56062 +int
56063 +gr_find_uid(const uid_t uid)
56064 +{
56065 + struct crash_uid *tmp = uid_set;
56066 + uid_t buid;
56067 + int low = 0, high = uid_used - 1, mid;
56068 +
56069 + while (high >= low) {
56070 + mid = (low + high) >> 1;
56071 + buid = tmp[mid].uid;
56072 + if (buid == uid)
56073 + return mid;
56074 + if (buid > uid)
56075 + high = mid - 1;
56076 + if (buid < uid)
56077 + low = mid + 1;
56078 + }
56079 +
56080 + return -1;
56081 +}
56082 +
56083 +static __inline__ void
56084 +gr_insertsort(void)
56085 +{
56086 + unsigned short i, j;
56087 + struct crash_uid index;
56088 +
56089 + for (i = 1; i < uid_used; i++) {
56090 + index = uid_set[i];
56091 + j = i;
56092 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56093 + uid_set[j] = uid_set[j - 1];
56094 + j--;
56095 + }
56096 + uid_set[j] = index;
56097 + }
56098 +
56099 + return;
56100 +}
56101 +
56102 +static __inline__ void
56103 +gr_insert_uid(const uid_t uid, const unsigned long expires)
56104 +{
56105 + int loc;
56106 +
56107 + if (uid_used == GR_UIDTABLE_MAX)
56108 + return;
56109 +
56110 + loc = gr_find_uid(uid);
56111 +
56112 + if (loc >= 0) {
56113 + uid_set[loc].expires = expires;
56114 + return;
56115 + }
56116 +
56117 + uid_set[uid_used].uid = uid;
56118 + uid_set[uid_used].expires = expires;
56119 + uid_used++;
56120 +
56121 + gr_insertsort();
56122 +
56123 + return;
56124 +}
56125 +
56126 +void
56127 +gr_remove_uid(const unsigned short loc)
56128 +{
56129 + unsigned short i;
56130 +
56131 + for (i = loc + 1; i < uid_used; i++)
56132 + uid_set[i - 1] = uid_set[i];
56133 +
56134 + uid_used--;
56135 +
56136 + return;
56137 +}
56138 +
56139 +int
56140 +gr_check_crash_uid(const uid_t uid)
56141 +{
56142 + int loc;
56143 + int ret = 0;
56144 +
56145 + if (unlikely(!gr_acl_is_enabled()))
56146 + return 0;
56147 +
56148 + spin_lock(&gr_uid_lock);
56149 + loc = gr_find_uid(uid);
56150 +
56151 + if (loc < 0)
56152 + goto out_unlock;
56153 +
56154 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
56155 + gr_remove_uid(loc);
56156 + else
56157 + ret = 1;
56158 +
56159 +out_unlock:
56160 + spin_unlock(&gr_uid_lock);
56161 + return ret;
56162 +}
56163 +
56164 +static __inline__ int
56165 +proc_is_setxid(const struct cred *cred)
56166 +{
56167 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
56168 + cred->uid != cred->fsuid)
56169 + return 1;
56170 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56171 + cred->gid != cred->fsgid)
56172 + return 1;
56173 +
56174 + return 0;
56175 +}
56176 +
56177 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
56178 +
56179 +void
56180 +gr_handle_crash(struct task_struct *task, const int sig)
56181 +{
56182 + struct acl_subject_label *curr;
56183 + struct task_struct *tsk, *tsk2;
56184 + const struct cred *cred;
56185 + const struct cred *cred2;
56186 +
56187 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56188 + return;
56189 +
56190 + if (unlikely(!gr_acl_is_enabled()))
56191 + return;
56192 +
56193 + curr = task->acl;
56194 +
56195 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
56196 + return;
56197 +
56198 + if (time_before_eq(curr->expires, get_seconds())) {
56199 + curr->expires = 0;
56200 + curr->crashes = 0;
56201 + }
56202 +
56203 + curr->crashes++;
56204 +
56205 + if (!curr->expires)
56206 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56207 +
56208 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56209 + time_after(curr->expires, get_seconds())) {
56210 + rcu_read_lock();
56211 + cred = __task_cred(task);
56212 + if (cred->uid && proc_is_setxid(cred)) {
56213 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56214 + spin_lock(&gr_uid_lock);
56215 + gr_insert_uid(cred->uid, curr->expires);
56216 + spin_unlock(&gr_uid_lock);
56217 + curr->expires = 0;
56218 + curr->crashes = 0;
56219 + read_lock(&tasklist_lock);
56220 + do_each_thread(tsk2, tsk) {
56221 + cred2 = __task_cred(tsk);
56222 + if (tsk != task && cred2->uid == cred->uid)
56223 + gr_fake_force_sig(SIGKILL, tsk);
56224 + } while_each_thread(tsk2, tsk);
56225 + read_unlock(&tasklist_lock);
56226 + } else {
56227 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56228 + read_lock(&tasklist_lock);
56229 + read_lock(&grsec_exec_file_lock);
56230 + do_each_thread(tsk2, tsk) {
56231 + if (likely(tsk != task)) {
56232 + // if this thread has the same subject as the one that triggered
56233 + // RES_CRASH and it's the same binary, kill it
56234 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56235 + gr_fake_force_sig(SIGKILL, tsk);
56236 + }
56237 + } while_each_thread(tsk2, tsk);
56238 + read_unlock(&grsec_exec_file_lock);
56239 + read_unlock(&tasklist_lock);
56240 + }
56241 + rcu_read_unlock();
56242 + }
56243 +
56244 + return;
56245 +}
56246 +
56247 +int
56248 +gr_check_crash_exec(const struct file *filp)
56249 +{
56250 + struct acl_subject_label *curr;
56251 +
56252 + if (unlikely(!gr_acl_is_enabled()))
56253 + return 0;
56254 +
56255 + read_lock(&gr_inode_lock);
56256 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56257 + __get_dev(filp->f_path.dentry),
56258 + current->role);
56259 + read_unlock(&gr_inode_lock);
56260 +
56261 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56262 + (!curr->crashes && !curr->expires))
56263 + return 0;
56264 +
56265 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56266 + time_after(curr->expires, get_seconds()))
56267 + return 1;
56268 + else if (time_before_eq(curr->expires, get_seconds())) {
56269 + curr->crashes = 0;
56270 + curr->expires = 0;
56271 + }
56272 +
56273 + return 0;
56274 +}
56275 +
56276 +void
56277 +gr_handle_alertkill(struct task_struct *task)
56278 +{
56279 + struct acl_subject_label *curracl;
56280 + __u32 curr_ip;
56281 + struct task_struct *p, *p2;
56282 +
56283 + if (unlikely(!gr_acl_is_enabled()))
56284 + return;
56285 +
56286 + curracl = task->acl;
56287 + curr_ip = task->signal->curr_ip;
56288 +
56289 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56290 + read_lock(&tasklist_lock);
56291 + do_each_thread(p2, p) {
56292 + if (p->signal->curr_ip == curr_ip)
56293 + gr_fake_force_sig(SIGKILL, p);
56294 + } while_each_thread(p2, p);
56295 + read_unlock(&tasklist_lock);
56296 + } else if (curracl->mode & GR_KILLPROC)
56297 + gr_fake_force_sig(SIGKILL, task);
56298 +
56299 + return;
56300 +}
56301 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56302 new file mode 100644
56303 index 0000000..9d83a69
56304 --- /dev/null
56305 +++ b/grsecurity/gracl_shm.c
56306 @@ -0,0 +1,40 @@
56307 +#include <linux/kernel.h>
56308 +#include <linux/mm.h>
56309 +#include <linux/sched.h>
56310 +#include <linux/file.h>
56311 +#include <linux/ipc.h>
56312 +#include <linux/gracl.h>
56313 +#include <linux/grsecurity.h>
56314 +#include <linux/grinternal.h>
56315 +
56316 +int
56317 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56318 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56319 +{
56320 + struct task_struct *task;
56321 +
56322 + if (!gr_acl_is_enabled())
56323 + return 1;
56324 +
56325 + rcu_read_lock();
56326 + read_lock(&tasklist_lock);
56327 +
56328 + task = find_task_by_vpid(shm_cprid);
56329 +
56330 + if (unlikely(!task))
56331 + task = find_task_by_vpid(shm_lapid);
56332 +
56333 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56334 + (task->pid == shm_lapid)) &&
56335 + (task->acl->mode & GR_PROTSHM) &&
56336 + (task->acl != current->acl))) {
56337 + read_unlock(&tasklist_lock);
56338 + rcu_read_unlock();
56339 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56340 + return 0;
56341 + }
56342 + read_unlock(&tasklist_lock);
56343 + rcu_read_unlock();
56344 +
56345 + return 1;
56346 +}
56347 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56348 new file mode 100644
56349 index 0000000..bc0be01
56350 --- /dev/null
56351 +++ b/grsecurity/grsec_chdir.c
56352 @@ -0,0 +1,19 @@
56353 +#include <linux/kernel.h>
56354 +#include <linux/sched.h>
56355 +#include <linux/fs.h>
56356 +#include <linux/file.h>
56357 +#include <linux/grsecurity.h>
56358 +#include <linux/grinternal.h>
56359 +
56360 +void
56361 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56362 +{
56363 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56364 + if ((grsec_enable_chdir && grsec_enable_group &&
56365 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56366 + !grsec_enable_group)) {
56367 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56368 + }
56369 +#endif
56370 + return;
56371 +}
56372 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56373 new file mode 100644
56374 index 0000000..9807ee2
56375 --- /dev/null
56376 +++ b/grsecurity/grsec_chroot.c
56377 @@ -0,0 +1,368 @@
56378 +#include <linux/kernel.h>
56379 +#include <linux/module.h>
56380 +#include <linux/sched.h>
56381 +#include <linux/file.h>
56382 +#include <linux/fs.h>
56383 +#include <linux/mount.h>
56384 +#include <linux/types.h>
56385 +#include "../fs/mount.h"
56386 +#include <linux/grsecurity.h>
56387 +#include <linux/grinternal.h>
56388 +
56389 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56390 +{
56391 +#ifdef CONFIG_GRKERNSEC
56392 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56393 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
56394 + task->gr_is_chrooted = 1;
56395 + else
56396 + task->gr_is_chrooted = 0;
56397 +
56398 + task->gr_chroot_dentry = path->dentry;
56399 +#endif
56400 + return;
56401 +}
56402 +
56403 +void gr_clear_chroot_entries(struct task_struct *task)
56404 +{
56405 +#ifdef CONFIG_GRKERNSEC
56406 + task->gr_is_chrooted = 0;
56407 + task->gr_chroot_dentry = NULL;
56408 +#endif
56409 + return;
56410 +}
56411 +
56412 +int
56413 +gr_handle_chroot_unix(const pid_t pid)
56414 +{
56415 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56416 + struct task_struct *p;
56417 +
56418 + if (unlikely(!grsec_enable_chroot_unix))
56419 + return 1;
56420 +
56421 + if (likely(!proc_is_chrooted(current)))
56422 + return 1;
56423 +
56424 + rcu_read_lock();
56425 + read_lock(&tasklist_lock);
56426 + p = find_task_by_vpid_unrestricted(pid);
56427 + if (unlikely(p && !have_same_root(current, p))) {
56428 + read_unlock(&tasklist_lock);
56429 + rcu_read_unlock();
56430 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56431 + return 0;
56432 + }
56433 + read_unlock(&tasklist_lock);
56434 + rcu_read_unlock();
56435 +#endif
56436 + return 1;
56437 +}
56438 +
56439 +int
56440 +gr_handle_chroot_nice(void)
56441 +{
56442 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56443 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56444 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56445 + return -EPERM;
56446 + }
56447 +#endif
56448 + return 0;
56449 +}
56450 +
56451 +int
56452 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56453 +{
56454 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56455 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56456 + && proc_is_chrooted(current)) {
56457 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56458 + return -EACCES;
56459 + }
56460 +#endif
56461 + return 0;
56462 +}
56463 +
56464 +int
56465 +gr_handle_chroot_rawio(const struct inode *inode)
56466 +{
56467 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56468 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56469 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56470 + return 1;
56471 +#endif
56472 + return 0;
56473 +}
56474 +
56475 +int
56476 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56477 +{
56478 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56479 + struct task_struct *p;
56480 + int ret = 0;
56481 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56482 + return ret;
56483 +
56484 + read_lock(&tasklist_lock);
56485 + do_each_pid_task(pid, type, p) {
56486 + if (!have_same_root(current, p)) {
56487 + ret = 1;
56488 + goto out;
56489 + }
56490 + } while_each_pid_task(pid, type, p);
56491 +out:
56492 + read_unlock(&tasklist_lock);
56493 + return ret;
56494 +#endif
56495 + return 0;
56496 +}
56497 +
56498 +int
56499 +gr_pid_is_chrooted(struct task_struct *p)
56500 +{
56501 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56502 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56503 + return 0;
56504 +
56505 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56506 + !have_same_root(current, p)) {
56507 + return 1;
56508 + }
56509 +#endif
56510 + return 0;
56511 +}
56512 +
56513 +EXPORT_SYMBOL(gr_pid_is_chrooted);
56514 +
56515 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56516 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56517 +{
56518 + struct path path, currentroot;
56519 + int ret = 0;
56520 +
56521 + path.dentry = (struct dentry *)u_dentry;
56522 + path.mnt = (struct vfsmount *)u_mnt;
56523 + get_fs_root(current->fs, &currentroot);
56524 + if (path_is_under(&path, &currentroot))
56525 + ret = 1;
56526 + path_put(&currentroot);
56527 +
56528 + return ret;
56529 +}
56530 +#endif
56531 +
56532 +int
56533 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56534 +{
56535 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56536 + if (!grsec_enable_chroot_fchdir)
56537 + return 1;
56538 +
56539 + if (!proc_is_chrooted(current))
56540 + return 1;
56541 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56542 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56543 + return 0;
56544 + }
56545 +#endif
56546 + return 1;
56547 +}
56548 +
56549 +int
56550 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56551 + const time_t shm_createtime)
56552 +{
56553 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56554 + struct task_struct *p;
56555 + time_t starttime;
56556 +
56557 + if (unlikely(!grsec_enable_chroot_shmat))
56558 + return 1;
56559 +
56560 + if (likely(!proc_is_chrooted(current)))
56561 + return 1;
56562 +
56563 + rcu_read_lock();
56564 + read_lock(&tasklist_lock);
56565 +
56566 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56567 + starttime = p->start_time.tv_sec;
56568 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56569 + if (have_same_root(current, p)) {
56570 + goto allow;
56571 + } else {
56572 + read_unlock(&tasklist_lock);
56573 + rcu_read_unlock();
56574 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56575 + return 0;
56576 + }
56577 + }
56578 + /* creator exited, pid reuse, fall through to next check */
56579 + }
56580 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56581 + if (unlikely(!have_same_root(current, p))) {
56582 + read_unlock(&tasklist_lock);
56583 + rcu_read_unlock();
56584 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56585 + return 0;
56586 + }
56587 + }
56588 +
56589 +allow:
56590 + read_unlock(&tasklist_lock);
56591 + rcu_read_unlock();
56592 +#endif
56593 + return 1;
56594 +}
56595 +
56596 +void
56597 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56598 +{
56599 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56600 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56601 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56602 +#endif
56603 + return;
56604 +}
56605 +
56606 +int
56607 +gr_handle_chroot_mknod(const struct dentry *dentry,
56608 + const struct vfsmount *mnt, const int mode)
56609 +{
56610 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56611 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56612 + proc_is_chrooted(current)) {
56613 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56614 + return -EPERM;
56615 + }
56616 +#endif
56617 + return 0;
56618 +}
56619 +
56620 +int
56621 +gr_handle_chroot_mount(const struct dentry *dentry,
56622 + const struct vfsmount *mnt, const char *dev_name)
56623 +{
56624 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56625 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56626 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56627 + return -EPERM;
56628 + }
56629 +#endif
56630 + return 0;
56631 +}
56632 +
56633 +int
56634 +gr_handle_chroot_pivot(void)
56635 +{
56636 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56637 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56638 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56639 + return -EPERM;
56640 + }
56641 +#endif
56642 + return 0;
56643 +}
56644 +
56645 +int
56646 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56647 +{
56648 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56649 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56650 + !gr_is_outside_chroot(dentry, mnt)) {
56651 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56652 + return -EPERM;
56653 + }
56654 +#endif
56655 + return 0;
56656 +}
56657 +
56658 +extern const char *captab_log[];
56659 +extern int captab_log_entries;
56660 +
56661 +int
56662 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56663 +{
56664 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56665 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56666 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56667 + if (cap_raised(chroot_caps, cap)) {
56668 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56669 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56670 + }
56671 + return 0;
56672 + }
56673 + }
56674 +#endif
56675 + return 1;
56676 +}
56677 +
56678 +int
56679 +gr_chroot_is_capable(const int cap)
56680 +{
56681 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56682 + return gr_task_chroot_is_capable(current, current_cred(), cap);
56683 +#endif
56684 + return 1;
56685 +}
56686 +
56687 +int
56688 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56689 +{
56690 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56691 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56692 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56693 + if (cap_raised(chroot_caps, cap)) {
56694 + return 0;
56695 + }
56696 + }
56697 +#endif
56698 + return 1;
56699 +}
56700 +
56701 +int
56702 +gr_chroot_is_capable_nolog(const int cap)
56703 +{
56704 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56705 + return gr_task_chroot_is_capable_nolog(current, cap);
56706 +#endif
56707 + return 1;
56708 +}
56709 +
56710 +int
56711 +gr_handle_chroot_sysctl(const int op)
56712 +{
56713 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56714 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56715 + proc_is_chrooted(current))
56716 + return -EACCES;
56717 +#endif
56718 + return 0;
56719 +}
56720 +
56721 +void
56722 +gr_handle_chroot_chdir(struct path *path)
56723 +{
56724 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56725 + if (grsec_enable_chroot_chdir)
56726 + set_fs_pwd(current->fs, path);
56727 +#endif
56728 + return;
56729 +}
56730 +
56731 +int
56732 +gr_handle_chroot_chmod(const struct dentry *dentry,
56733 + const struct vfsmount *mnt, const int mode)
56734 +{
56735 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56736 + /* allow chmod +s on directories, but not files */
56737 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56738 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56739 + proc_is_chrooted(current)) {
56740 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56741 + return -EPERM;
56742 + }
56743 +#endif
56744 + return 0;
56745 +}
56746 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56747 new file mode 100644
56748 index 0000000..213ad8b
56749 --- /dev/null
56750 +++ b/grsecurity/grsec_disabled.c
56751 @@ -0,0 +1,437 @@
56752 +#include <linux/kernel.h>
56753 +#include <linux/module.h>
56754 +#include <linux/sched.h>
56755 +#include <linux/file.h>
56756 +#include <linux/fs.h>
56757 +#include <linux/kdev_t.h>
56758 +#include <linux/net.h>
56759 +#include <linux/in.h>
56760 +#include <linux/ip.h>
56761 +#include <linux/skbuff.h>
56762 +#include <linux/sysctl.h>
56763 +
56764 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56765 +void
56766 +pax_set_initial_flags(struct linux_binprm *bprm)
56767 +{
56768 + return;
56769 +}
56770 +#endif
56771 +
56772 +#ifdef CONFIG_SYSCTL
56773 +__u32
56774 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56775 +{
56776 + return 0;
56777 +}
56778 +#endif
56779 +
56780 +#ifdef CONFIG_TASKSTATS
56781 +int gr_is_taskstats_denied(int pid)
56782 +{
56783 + return 0;
56784 +}
56785 +#endif
56786 +
56787 +int
56788 +gr_acl_is_enabled(void)
56789 +{
56790 + return 0;
56791 +}
56792 +
56793 +void
56794 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56795 +{
56796 + return;
56797 +}
56798 +
56799 +int
56800 +gr_handle_rawio(const struct inode *inode)
56801 +{
56802 + return 0;
56803 +}
56804 +
56805 +void
56806 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56807 +{
56808 + return;
56809 +}
56810 +
56811 +int
56812 +gr_handle_ptrace(struct task_struct *task, const long request)
56813 +{
56814 + return 0;
56815 +}
56816 +
56817 +int
56818 +gr_handle_proc_ptrace(struct task_struct *task)
56819 +{
56820 + return 0;
56821 +}
56822 +
56823 +void
56824 +gr_learn_resource(const struct task_struct *task,
56825 + const int res, const unsigned long wanted, const int gt)
56826 +{
56827 + return;
56828 +}
56829 +
56830 +int
56831 +gr_set_acls(const int type)
56832 +{
56833 + return 0;
56834 +}
56835 +
56836 +int
56837 +gr_check_hidden_task(const struct task_struct *tsk)
56838 +{
56839 + return 0;
56840 +}
56841 +
56842 +int
56843 +gr_check_protected_task(const struct task_struct *task)
56844 +{
56845 + return 0;
56846 +}
56847 +
56848 +int
56849 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56850 +{
56851 + return 0;
56852 +}
56853 +
56854 +void
56855 +gr_copy_label(struct task_struct *tsk)
56856 +{
56857 + return;
56858 +}
56859 +
56860 +void
56861 +gr_set_pax_flags(struct task_struct *task)
56862 +{
56863 + return;
56864 +}
56865 +
56866 +int
56867 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56868 + const int unsafe_share)
56869 +{
56870 + return 0;
56871 +}
56872 +
56873 +void
56874 +gr_handle_delete(const ino_t ino, const dev_t dev)
56875 +{
56876 + return;
56877 +}
56878 +
56879 +void
56880 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56881 +{
56882 + return;
56883 +}
56884 +
56885 +void
56886 +gr_handle_crash(struct task_struct *task, const int sig)
56887 +{
56888 + return;
56889 +}
56890 +
56891 +int
56892 +gr_check_crash_exec(const struct file *filp)
56893 +{
56894 + return 0;
56895 +}
56896 +
56897 +int
56898 +gr_check_crash_uid(const uid_t uid)
56899 +{
56900 + return 0;
56901 +}
56902 +
56903 +void
56904 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56905 + struct dentry *old_dentry,
56906 + struct dentry *new_dentry,
56907 + struct vfsmount *mnt, const __u8 replace)
56908 +{
56909 + return;
56910 +}
56911 +
56912 +int
56913 +gr_search_socket(const int family, const int type, const int protocol)
56914 +{
56915 + return 1;
56916 +}
56917 +
56918 +int
56919 +gr_search_connectbind(const int mode, const struct socket *sock,
56920 + const struct sockaddr_in *addr)
56921 +{
56922 + return 0;
56923 +}
56924 +
56925 +void
56926 +gr_handle_alertkill(struct task_struct *task)
56927 +{
56928 + return;
56929 +}
56930 +
56931 +__u32
56932 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56933 +{
56934 + return 1;
56935 +}
56936 +
56937 +__u32
56938 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56939 + const struct vfsmount * mnt)
56940 +{
56941 + return 1;
56942 +}
56943 +
56944 +__u32
56945 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56946 + int acc_mode)
56947 +{
56948 + return 1;
56949 +}
56950 +
56951 +__u32
56952 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56953 +{
56954 + return 1;
56955 +}
56956 +
56957 +__u32
56958 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56959 +{
56960 + return 1;
56961 +}
56962 +
56963 +int
56964 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56965 + unsigned int *vm_flags)
56966 +{
56967 + return 1;
56968 +}
56969 +
56970 +__u32
56971 +gr_acl_handle_truncate(const struct dentry * dentry,
56972 + const struct vfsmount * mnt)
56973 +{
56974 + return 1;
56975 +}
56976 +
56977 +__u32
56978 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56979 +{
56980 + return 1;
56981 +}
56982 +
56983 +__u32
56984 +gr_acl_handle_access(const struct dentry * dentry,
56985 + const struct vfsmount * mnt, const int fmode)
56986 +{
56987 + return 1;
56988 +}
56989 +
56990 +__u32
56991 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56992 + umode_t *mode)
56993 +{
56994 + return 1;
56995 +}
56996 +
56997 +__u32
56998 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56999 +{
57000 + return 1;
57001 +}
57002 +
57003 +__u32
57004 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57005 +{
57006 + return 1;
57007 +}
57008 +
57009 +void
57010 +grsecurity_init(void)
57011 +{
57012 + return;
57013 +}
57014 +
57015 +umode_t gr_acl_umask(void)
57016 +{
57017 + return 0;
57018 +}
57019 +
57020 +__u32
57021 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57022 + const struct dentry * parent_dentry,
57023 + const struct vfsmount * parent_mnt,
57024 + const int mode)
57025 +{
57026 + return 1;
57027 +}
57028 +
57029 +__u32
57030 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
57031 + const struct dentry * parent_dentry,
57032 + const struct vfsmount * parent_mnt)
57033 +{
57034 + return 1;
57035 +}
57036 +
57037 +__u32
57038 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57039 + const struct dentry * parent_dentry,
57040 + const struct vfsmount * parent_mnt, const char *from)
57041 +{
57042 + return 1;
57043 +}
57044 +
57045 +__u32
57046 +gr_acl_handle_link(const struct dentry * new_dentry,
57047 + const struct dentry * parent_dentry,
57048 + const struct vfsmount * parent_mnt,
57049 + const struct dentry * old_dentry,
57050 + const struct vfsmount * old_mnt, const char *to)
57051 +{
57052 + return 1;
57053 +}
57054 +
57055 +int
57056 +gr_acl_handle_rename(const struct dentry *new_dentry,
57057 + const struct dentry *parent_dentry,
57058 + const struct vfsmount *parent_mnt,
57059 + const struct dentry *old_dentry,
57060 + const struct inode *old_parent_inode,
57061 + const struct vfsmount *old_mnt, const char *newname)
57062 +{
57063 + return 0;
57064 +}
57065 +
57066 +int
57067 +gr_acl_handle_filldir(const struct file *file, const char *name,
57068 + const int namelen, const ino_t ino)
57069 +{
57070 + return 1;
57071 +}
57072 +
57073 +int
57074 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57075 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57076 +{
57077 + return 1;
57078 +}
57079 +
57080 +int
57081 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57082 +{
57083 + return 0;
57084 +}
57085 +
57086 +int
57087 +gr_search_accept(const struct socket *sock)
57088 +{
57089 + return 0;
57090 +}
57091 +
57092 +int
57093 +gr_search_listen(const struct socket *sock)
57094 +{
57095 + return 0;
57096 +}
57097 +
57098 +int
57099 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57100 +{
57101 + return 0;
57102 +}
57103 +
57104 +__u32
57105 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57106 +{
57107 + return 1;
57108 +}
57109 +
57110 +__u32
57111 +gr_acl_handle_creat(const struct dentry * dentry,
57112 + const struct dentry * p_dentry,
57113 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57114 + const int imode)
57115 +{
57116 + return 1;
57117 +}
57118 +
57119 +void
57120 +gr_acl_handle_exit(void)
57121 +{
57122 + return;
57123 +}
57124 +
57125 +int
57126 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57127 +{
57128 + return 1;
57129 +}
57130 +
57131 +void
57132 +gr_set_role_label(const uid_t uid, const gid_t gid)
57133 +{
57134 + return;
57135 +}
57136 +
57137 +int
57138 +gr_acl_handle_procpidmem(const struct task_struct *task)
57139 +{
57140 + return 0;
57141 +}
57142 +
57143 +int
57144 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57145 +{
57146 + return 0;
57147 +}
57148 +
57149 +int
57150 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57151 +{
57152 + return 0;
57153 +}
57154 +
57155 +void
57156 +gr_set_kernel_label(struct task_struct *task)
57157 +{
57158 + return;
57159 +}
57160 +
57161 +int
57162 +gr_check_user_change(int real, int effective, int fs)
57163 +{
57164 + return 0;
57165 +}
57166 +
57167 +int
57168 +gr_check_group_change(int real, int effective, int fs)
57169 +{
57170 + return 0;
57171 +}
57172 +
57173 +int gr_acl_enable_at_secure(void)
57174 +{
57175 + return 0;
57176 +}
57177 +
57178 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57179 +{
57180 + return dentry->d_inode->i_sb->s_dev;
57181 +}
57182 +
57183 +EXPORT_SYMBOL(gr_learn_resource);
57184 +EXPORT_SYMBOL(gr_set_kernel_label);
57185 +#ifdef CONFIG_SECURITY
57186 +EXPORT_SYMBOL(gr_check_user_change);
57187 +EXPORT_SYMBOL(gr_check_group_change);
57188 +#endif
57189 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57190 new file mode 100644
57191 index 0000000..abfa971
57192 --- /dev/null
57193 +++ b/grsecurity/grsec_exec.c
57194 @@ -0,0 +1,174 @@
57195 +#include <linux/kernel.h>
57196 +#include <linux/sched.h>
57197 +#include <linux/file.h>
57198 +#include <linux/binfmts.h>
57199 +#include <linux/fs.h>
57200 +#include <linux/types.h>
57201 +#include <linux/grdefs.h>
57202 +#include <linux/grsecurity.h>
57203 +#include <linux/grinternal.h>
57204 +#include <linux/capability.h>
57205 +#include <linux/module.h>
57206 +
57207 +#include <asm/uaccess.h>
57208 +
57209 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57210 +static char gr_exec_arg_buf[132];
57211 +static DEFINE_MUTEX(gr_exec_arg_mutex);
57212 +#endif
57213 +
57214 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57215 +
57216 +void
57217 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57218 +{
57219 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57220 + char *grarg = gr_exec_arg_buf;
57221 + unsigned int i, x, execlen = 0;
57222 + char c;
57223 +
57224 + if (!((grsec_enable_execlog && grsec_enable_group &&
57225 + in_group_p(grsec_audit_gid))
57226 + || (grsec_enable_execlog && !grsec_enable_group)))
57227 + return;
57228 +
57229 + mutex_lock(&gr_exec_arg_mutex);
57230 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
57231 +
57232 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
57233 + const char __user *p;
57234 + unsigned int len;
57235 +
57236 + p = get_user_arg_ptr(argv, i);
57237 + if (IS_ERR(p))
57238 + goto log;
57239 +
57240 + len = strnlen_user(p, 128 - execlen);
57241 + if (len > 128 - execlen)
57242 + len = 128 - execlen;
57243 + else if (len > 0)
57244 + len--;
57245 + if (copy_from_user(grarg + execlen, p, len))
57246 + goto log;
57247 +
57248 + /* rewrite unprintable characters */
57249 + for (x = 0; x < len; x++) {
57250 + c = *(grarg + execlen + x);
57251 + if (c < 32 || c > 126)
57252 + *(grarg + execlen + x) = ' ';
57253 + }
57254 +
57255 + execlen += len;
57256 + *(grarg + execlen) = ' ';
57257 + *(grarg + execlen + 1) = '\0';
57258 + execlen++;
57259 + }
57260 +
57261 + log:
57262 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57263 + bprm->file->f_path.mnt, grarg);
57264 + mutex_unlock(&gr_exec_arg_mutex);
57265 +#endif
57266 + return;
57267 +}
57268 +
57269 +#ifdef CONFIG_GRKERNSEC
57270 +extern int gr_acl_is_capable(const int cap);
57271 +extern int gr_acl_is_capable_nolog(const int cap);
57272 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57273 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
57274 +extern int gr_chroot_is_capable(const int cap);
57275 +extern int gr_chroot_is_capable_nolog(const int cap);
57276 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57277 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
57278 +#endif
57279 +
57280 +const char *captab_log[] = {
57281 + "CAP_CHOWN",
57282 + "CAP_DAC_OVERRIDE",
57283 + "CAP_DAC_READ_SEARCH",
57284 + "CAP_FOWNER",
57285 + "CAP_FSETID",
57286 + "CAP_KILL",
57287 + "CAP_SETGID",
57288 + "CAP_SETUID",
57289 + "CAP_SETPCAP",
57290 + "CAP_LINUX_IMMUTABLE",
57291 + "CAP_NET_BIND_SERVICE",
57292 + "CAP_NET_BROADCAST",
57293 + "CAP_NET_ADMIN",
57294 + "CAP_NET_RAW",
57295 + "CAP_IPC_LOCK",
57296 + "CAP_IPC_OWNER",
57297 + "CAP_SYS_MODULE",
57298 + "CAP_SYS_RAWIO",
57299 + "CAP_SYS_CHROOT",
57300 + "CAP_SYS_PTRACE",
57301 + "CAP_SYS_PACCT",
57302 + "CAP_SYS_ADMIN",
57303 + "CAP_SYS_BOOT",
57304 + "CAP_SYS_NICE",
57305 + "CAP_SYS_RESOURCE",
57306 + "CAP_SYS_TIME",
57307 + "CAP_SYS_TTY_CONFIG",
57308 + "CAP_MKNOD",
57309 + "CAP_LEASE",
57310 + "CAP_AUDIT_WRITE",
57311 + "CAP_AUDIT_CONTROL",
57312 + "CAP_SETFCAP",
57313 + "CAP_MAC_OVERRIDE",
57314 + "CAP_MAC_ADMIN",
57315 + "CAP_SYSLOG",
57316 + "CAP_WAKE_ALARM"
57317 +};
57318 +
57319 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57320 +
57321 +int gr_is_capable(const int cap)
57322 +{
57323 +#ifdef CONFIG_GRKERNSEC
57324 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57325 + return 1;
57326 + return 0;
57327 +#else
57328 + return 1;
57329 +#endif
57330 +}
57331 +
57332 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57333 +{
57334 +#ifdef CONFIG_GRKERNSEC
57335 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
57336 + return 1;
57337 + return 0;
57338 +#else
57339 + return 1;
57340 +#endif
57341 +}
57342 +
57343 +int gr_is_capable_nolog(const int cap)
57344 +{
57345 +#ifdef CONFIG_GRKERNSEC
57346 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57347 + return 1;
57348 + return 0;
57349 +#else
57350 + return 1;
57351 +#endif
57352 +}
57353 +
57354 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
57355 +{
57356 +#ifdef CONFIG_GRKERNSEC
57357 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
57358 + return 1;
57359 + return 0;
57360 +#else
57361 + return 1;
57362 +#endif
57363 +}
57364 +
57365 +EXPORT_SYMBOL(gr_is_capable);
57366 +EXPORT_SYMBOL(gr_is_capable_nolog);
57367 +EXPORT_SYMBOL(gr_task_is_capable);
57368 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
57369 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57370 new file mode 100644
57371 index 0000000..d3ee748
57372 --- /dev/null
57373 +++ b/grsecurity/grsec_fifo.c
57374 @@ -0,0 +1,24 @@
57375 +#include <linux/kernel.h>
57376 +#include <linux/sched.h>
57377 +#include <linux/fs.h>
57378 +#include <linux/file.h>
57379 +#include <linux/grinternal.h>
57380 +
57381 +int
57382 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57383 + const struct dentry *dir, const int flag, const int acc_mode)
57384 +{
57385 +#ifdef CONFIG_GRKERNSEC_FIFO
57386 + const struct cred *cred = current_cred();
57387 +
57388 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57389 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57390 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57391 + (cred->fsuid != dentry->d_inode->i_uid)) {
57392 + if (!inode_permission(dentry->d_inode, acc_mode))
57393 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57394 + return -EACCES;
57395 + }
57396 +#endif
57397 + return 0;
57398 +}
57399 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57400 new file mode 100644
57401 index 0000000..8ca18bf
57402 --- /dev/null
57403 +++ b/grsecurity/grsec_fork.c
57404 @@ -0,0 +1,23 @@
57405 +#include <linux/kernel.h>
57406 +#include <linux/sched.h>
57407 +#include <linux/grsecurity.h>
57408 +#include <linux/grinternal.h>
57409 +#include <linux/errno.h>
57410 +
57411 +void
57412 +gr_log_forkfail(const int retval)
57413 +{
57414 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57415 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57416 + switch (retval) {
57417 + case -EAGAIN:
57418 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57419 + break;
57420 + case -ENOMEM:
57421 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57422 + break;
57423 + }
57424 + }
57425 +#endif
57426 + return;
57427 +}
57428 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57429 new file mode 100644
57430 index 0000000..05a6015
57431 --- /dev/null
57432 +++ b/grsecurity/grsec_init.c
57433 @@ -0,0 +1,283 @@
57434 +#include <linux/kernel.h>
57435 +#include <linux/sched.h>
57436 +#include <linux/mm.h>
57437 +#include <linux/gracl.h>
57438 +#include <linux/slab.h>
57439 +#include <linux/vmalloc.h>
57440 +#include <linux/percpu.h>
57441 +#include <linux/module.h>
57442 +
57443 +int grsec_enable_ptrace_readexec;
57444 +int grsec_enable_setxid;
57445 +int grsec_enable_symlinkown;
57446 +int grsec_symlinkown_gid;
57447 +int grsec_enable_brute;
57448 +int grsec_enable_link;
57449 +int grsec_enable_dmesg;
57450 +int grsec_enable_harden_ptrace;
57451 +int grsec_enable_fifo;
57452 +int grsec_enable_execlog;
57453 +int grsec_enable_signal;
57454 +int grsec_enable_forkfail;
57455 +int grsec_enable_audit_ptrace;
57456 +int grsec_enable_time;
57457 +int grsec_enable_audit_textrel;
57458 +int grsec_enable_group;
57459 +int grsec_audit_gid;
57460 +int grsec_enable_chdir;
57461 +int grsec_enable_mount;
57462 +int grsec_enable_rofs;
57463 +int grsec_enable_chroot_findtask;
57464 +int grsec_enable_chroot_mount;
57465 +int grsec_enable_chroot_shmat;
57466 +int grsec_enable_chroot_fchdir;
57467 +int grsec_enable_chroot_double;
57468 +int grsec_enable_chroot_pivot;
57469 +int grsec_enable_chroot_chdir;
57470 +int grsec_enable_chroot_chmod;
57471 +int grsec_enable_chroot_mknod;
57472 +int grsec_enable_chroot_nice;
57473 +int grsec_enable_chroot_execlog;
57474 +int grsec_enable_chroot_caps;
57475 +int grsec_enable_chroot_sysctl;
57476 +int grsec_enable_chroot_unix;
57477 +int grsec_enable_tpe;
57478 +int grsec_tpe_gid;
57479 +int grsec_enable_blackhole;
57480 +#ifdef CONFIG_IPV6_MODULE
57481 +EXPORT_SYMBOL(grsec_enable_blackhole);
57482 +#endif
57483 +int grsec_lastack_retries;
57484 +int grsec_enable_tpe_all;
57485 +int grsec_enable_tpe_invert;
57486 +int grsec_enable_socket_all;
57487 +int grsec_socket_all_gid;
57488 +int grsec_enable_socket_client;
57489 +int grsec_socket_client_gid;
57490 +int grsec_enable_socket_server;
57491 +int grsec_socket_server_gid;
57492 +int grsec_resource_logging;
57493 +int grsec_disable_privio;
57494 +int grsec_enable_log_rwxmaps;
57495 +int grsec_lock;
57496 +
57497 +DEFINE_SPINLOCK(grsec_alert_lock);
57498 +unsigned long grsec_alert_wtime = 0;
57499 +unsigned long grsec_alert_fyet = 0;
57500 +
57501 +DEFINE_SPINLOCK(grsec_audit_lock);
57502 +
57503 +DEFINE_RWLOCK(grsec_exec_file_lock);
57504 +
57505 +char *gr_shared_page[4];
57506 +
57507 +char *gr_alert_log_fmt;
57508 +char *gr_audit_log_fmt;
57509 +char *gr_alert_log_buf;
57510 +char *gr_audit_log_buf;
57511 +
57512 +extern struct gr_arg *gr_usermode;
57513 +extern unsigned char *gr_system_salt;
57514 +extern unsigned char *gr_system_sum;
57515 +
57516 +void __init
57517 +grsecurity_init(void)
57518 +{
57519 + int j;
57520 + /* create the per-cpu shared pages */
57521 +
57522 +#ifdef CONFIG_X86
57523 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57524 +#endif
57525 +
57526 + for (j = 0; j < 4; j++) {
57527 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57528 + if (gr_shared_page[j] == NULL) {
57529 + panic("Unable to allocate grsecurity shared page");
57530 + return;
57531 + }
57532 + }
57533 +
57534 + /* allocate log buffers */
57535 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57536 + if (!gr_alert_log_fmt) {
57537 + panic("Unable to allocate grsecurity alert log format buffer");
57538 + return;
57539 + }
57540 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57541 + if (!gr_audit_log_fmt) {
57542 + panic("Unable to allocate grsecurity audit log format buffer");
57543 + return;
57544 + }
57545 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57546 + if (!gr_alert_log_buf) {
57547 + panic("Unable to allocate grsecurity alert log buffer");
57548 + return;
57549 + }
57550 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57551 + if (!gr_audit_log_buf) {
57552 + panic("Unable to allocate grsecurity audit log buffer");
57553 + return;
57554 + }
57555 +
57556 + /* allocate memory for authentication structure */
57557 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57558 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57559 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57560 +
57561 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57562 + panic("Unable to allocate grsecurity authentication structure");
57563 + return;
57564 + }
57565 +
57566 +
57567 +#ifdef CONFIG_GRKERNSEC_IO
57568 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57569 + grsec_disable_privio = 1;
57570 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57571 + grsec_disable_privio = 1;
57572 +#else
57573 + grsec_disable_privio = 0;
57574 +#endif
57575 +#endif
57576 +
57577 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57578 + /* for backward compatibility, tpe_invert always defaults to on if
57579 + enabled in the kernel
57580 + */
57581 + grsec_enable_tpe_invert = 1;
57582 +#endif
57583 +
57584 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57585 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57586 + grsec_lock = 1;
57587 +#endif
57588 +
57589 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57590 + grsec_enable_audit_textrel = 1;
57591 +#endif
57592 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57593 + grsec_enable_log_rwxmaps = 1;
57594 +#endif
57595 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57596 + grsec_enable_group = 1;
57597 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57598 +#endif
57599 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57600 + grsec_enable_ptrace_readexec = 1;
57601 +#endif
57602 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57603 + grsec_enable_chdir = 1;
57604 +#endif
57605 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57606 + grsec_enable_harden_ptrace = 1;
57607 +#endif
57608 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57609 + grsec_enable_mount = 1;
57610 +#endif
57611 +#ifdef CONFIG_GRKERNSEC_LINK
57612 + grsec_enable_link = 1;
57613 +#endif
57614 +#ifdef CONFIG_GRKERNSEC_BRUTE
57615 + grsec_enable_brute = 1;
57616 +#endif
57617 +#ifdef CONFIG_GRKERNSEC_DMESG
57618 + grsec_enable_dmesg = 1;
57619 +#endif
57620 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57621 + grsec_enable_blackhole = 1;
57622 + grsec_lastack_retries = 4;
57623 +#endif
57624 +#ifdef CONFIG_GRKERNSEC_FIFO
57625 + grsec_enable_fifo = 1;
57626 +#endif
57627 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57628 + grsec_enable_execlog = 1;
57629 +#endif
57630 +#ifdef CONFIG_GRKERNSEC_SETXID
57631 + grsec_enable_setxid = 1;
57632 +#endif
57633 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57634 + grsec_enable_signal = 1;
57635 +#endif
57636 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57637 + grsec_enable_forkfail = 1;
57638 +#endif
57639 +#ifdef CONFIG_GRKERNSEC_TIME
57640 + grsec_enable_time = 1;
57641 +#endif
57642 +#ifdef CONFIG_GRKERNSEC_RESLOG
57643 + grsec_resource_logging = 1;
57644 +#endif
57645 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57646 + grsec_enable_chroot_findtask = 1;
57647 +#endif
57648 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57649 + grsec_enable_chroot_unix = 1;
57650 +#endif
57651 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57652 + grsec_enable_chroot_mount = 1;
57653 +#endif
57654 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57655 + grsec_enable_chroot_fchdir = 1;
57656 +#endif
57657 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57658 + grsec_enable_chroot_shmat = 1;
57659 +#endif
57660 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57661 + grsec_enable_audit_ptrace = 1;
57662 +#endif
57663 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57664 + grsec_enable_chroot_double = 1;
57665 +#endif
57666 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57667 + grsec_enable_chroot_pivot = 1;
57668 +#endif
57669 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57670 + grsec_enable_chroot_chdir = 1;
57671 +#endif
57672 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57673 + grsec_enable_chroot_chmod = 1;
57674 +#endif
57675 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57676 + grsec_enable_chroot_mknod = 1;
57677 +#endif
57678 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57679 + grsec_enable_chroot_nice = 1;
57680 +#endif
57681 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57682 + grsec_enable_chroot_execlog = 1;
57683 +#endif
57684 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57685 + grsec_enable_chroot_caps = 1;
57686 +#endif
57687 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57688 + grsec_enable_chroot_sysctl = 1;
57689 +#endif
57690 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
57691 + grsec_enable_symlinkown = 1;
57692 + grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
57693 +#endif
57694 +#ifdef CONFIG_GRKERNSEC_TPE
57695 + grsec_enable_tpe = 1;
57696 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57697 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57698 + grsec_enable_tpe_all = 1;
57699 +#endif
57700 +#endif
57701 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57702 + grsec_enable_socket_all = 1;
57703 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57704 +#endif
57705 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57706 + grsec_enable_socket_client = 1;
57707 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57708 +#endif
57709 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57710 + grsec_enable_socket_server = 1;
57711 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57712 +#endif
57713 +#endif
57714 +
57715 + return;
57716 +}
57717 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57718 new file mode 100644
57719 index 0000000..35a96d1
57720 --- /dev/null
57721 +++ b/grsecurity/grsec_link.c
57722 @@ -0,0 +1,59 @@
57723 +#include <linux/kernel.h>
57724 +#include <linux/sched.h>
57725 +#include <linux/fs.h>
57726 +#include <linux/file.h>
57727 +#include <linux/grinternal.h>
57728 +
57729 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
57730 +{
57731 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
57732 + const struct inode *link_inode = link->dentry->d_inode;
57733 +
57734 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
57735 + /* ignore root-owned links, e.g. /proc/self */
57736 + link_inode->i_uid &&
57737 + link_inode->i_uid != target->i_uid) {
57738 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
57739 + return 1;
57740 + }
57741 +#endif
57742 + return 0;
57743 +}
57744 +
57745 +int
57746 +gr_handle_follow_link(const struct inode *parent,
57747 + const struct inode *inode,
57748 + const struct dentry *dentry, const struct vfsmount *mnt)
57749 +{
57750 +#ifdef CONFIG_GRKERNSEC_LINK
57751 + const struct cred *cred = current_cred();
57752 +
57753 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57754 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57755 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57756 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57757 + return -EACCES;
57758 + }
57759 +#endif
57760 + return 0;
57761 +}
57762 +
57763 +int
57764 +gr_handle_hardlink(const struct dentry *dentry,
57765 + const struct vfsmount *mnt,
57766 + struct inode *inode, const int mode, const char *to)
57767 +{
57768 +#ifdef CONFIG_GRKERNSEC_LINK
57769 + const struct cred *cred = current_cred();
57770 +
57771 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57772 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57773 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57774 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57775 + !capable(CAP_FOWNER) && cred->uid) {
57776 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57777 + return -EPERM;
57778 + }
57779 +#endif
57780 + return 0;
57781 +}
57782 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57783 new file mode 100644
57784 index 0000000..a45d2e9
57785 --- /dev/null
57786 +++ b/grsecurity/grsec_log.c
57787 @@ -0,0 +1,322 @@
57788 +#include <linux/kernel.h>
57789 +#include <linux/sched.h>
57790 +#include <linux/file.h>
57791 +#include <linux/tty.h>
57792 +#include <linux/fs.h>
57793 +#include <linux/grinternal.h>
57794 +
57795 +#ifdef CONFIG_TREE_PREEMPT_RCU
57796 +#define DISABLE_PREEMPT() preempt_disable()
57797 +#define ENABLE_PREEMPT() preempt_enable()
57798 +#else
57799 +#define DISABLE_PREEMPT()
57800 +#define ENABLE_PREEMPT()
57801 +#endif
57802 +
57803 +#define BEGIN_LOCKS(x) \
57804 + DISABLE_PREEMPT(); \
57805 + rcu_read_lock(); \
57806 + read_lock(&tasklist_lock); \
57807 + read_lock(&grsec_exec_file_lock); \
57808 + if (x != GR_DO_AUDIT) \
57809 + spin_lock(&grsec_alert_lock); \
57810 + else \
57811 + spin_lock(&grsec_audit_lock)
57812 +
57813 +#define END_LOCKS(x) \
57814 + if (x != GR_DO_AUDIT) \
57815 + spin_unlock(&grsec_alert_lock); \
57816 + else \
57817 + spin_unlock(&grsec_audit_lock); \
57818 + read_unlock(&grsec_exec_file_lock); \
57819 + read_unlock(&tasklist_lock); \
57820 + rcu_read_unlock(); \
57821 + ENABLE_PREEMPT(); \
57822 + if (x == GR_DONT_AUDIT) \
57823 + gr_handle_alertkill(current)
57824 +
57825 +enum {
57826 + FLOODING,
57827 + NO_FLOODING
57828 +};
57829 +
57830 +extern char *gr_alert_log_fmt;
57831 +extern char *gr_audit_log_fmt;
57832 +extern char *gr_alert_log_buf;
57833 +extern char *gr_audit_log_buf;
57834 +
57835 +static int gr_log_start(int audit)
57836 +{
57837 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57838 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57839 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57840 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57841 + unsigned long curr_secs = get_seconds();
57842 +
57843 + if (audit == GR_DO_AUDIT)
57844 + goto set_fmt;
57845 +
57846 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57847 + grsec_alert_wtime = curr_secs;
57848 + grsec_alert_fyet = 0;
57849 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57850 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57851 + grsec_alert_fyet++;
57852 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57853 + grsec_alert_wtime = curr_secs;
57854 + grsec_alert_fyet++;
57855 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57856 + return FLOODING;
57857 + }
57858 + else return FLOODING;
57859 +
57860 +set_fmt:
57861 +#endif
57862 + memset(buf, 0, PAGE_SIZE);
57863 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57864 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57865 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57866 + } else if (current->signal->curr_ip) {
57867 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57868 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57869 + } else if (gr_acl_is_enabled()) {
57870 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57871 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57872 + } else {
57873 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57874 + strcpy(buf, fmt);
57875 + }
57876 +
57877 + return NO_FLOODING;
57878 +}
57879 +
57880 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57881 + __attribute__ ((format (printf, 2, 0)));
57882 +
57883 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57884 +{
57885 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57886 + unsigned int len = strlen(buf);
57887 +
57888 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57889 +
57890 + return;
57891 +}
57892 +
57893 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57894 + __attribute__ ((format (printf, 2, 3)));
57895 +
57896 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57897 +{
57898 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57899 + unsigned int len = strlen(buf);
57900 + va_list ap;
57901 +
57902 + va_start(ap, msg);
57903 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57904 + va_end(ap);
57905 +
57906 + return;
57907 +}
57908 +
57909 +static void gr_log_end(int audit, int append_default)
57910 +{
57911 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57912 +
57913 + if (append_default) {
57914 + unsigned int len = strlen(buf);
57915 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57916 + }
57917 +
57918 + printk("%s\n", buf);
57919 +
57920 + return;
57921 +}
57922 +
57923 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57924 +{
57925 + int logtype;
57926 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57927 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57928 + void *voidptr = NULL;
57929 + int num1 = 0, num2 = 0;
57930 + unsigned long ulong1 = 0, ulong2 = 0;
57931 + struct dentry *dentry = NULL;
57932 + struct vfsmount *mnt = NULL;
57933 + struct file *file = NULL;
57934 + struct task_struct *task = NULL;
57935 + const struct cred *cred, *pcred;
57936 + va_list ap;
57937 +
57938 + BEGIN_LOCKS(audit);
57939 + logtype = gr_log_start(audit);
57940 + if (logtype == FLOODING) {
57941 + END_LOCKS(audit);
57942 + return;
57943 + }
57944 + va_start(ap, argtypes);
57945 + switch (argtypes) {
57946 + case GR_TTYSNIFF:
57947 + task = va_arg(ap, struct task_struct *);
57948 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57949 + break;
57950 + case GR_SYSCTL_HIDDEN:
57951 + str1 = va_arg(ap, char *);
57952 + gr_log_middle_varargs(audit, msg, result, str1);
57953 + break;
57954 + case GR_RBAC:
57955 + dentry = va_arg(ap, struct dentry *);
57956 + mnt = va_arg(ap, struct vfsmount *);
57957 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57958 + break;
57959 + case GR_RBAC_STR:
57960 + dentry = va_arg(ap, struct dentry *);
57961 + mnt = va_arg(ap, struct vfsmount *);
57962 + str1 = va_arg(ap, char *);
57963 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57964 + break;
57965 + case GR_STR_RBAC:
57966 + str1 = va_arg(ap, char *);
57967 + dentry = va_arg(ap, struct dentry *);
57968 + mnt = va_arg(ap, struct vfsmount *);
57969 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57970 + break;
57971 + case GR_RBAC_MODE2:
57972 + dentry = va_arg(ap, struct dentry *);
57973 + mnt = va_arg(ap, struct vfsmount *);
57974 + str1 = va_arg(ap, char *);
57975 + str2 = va_arg(ap, char *);
57976 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57977 + break;
57978 + case GR_RBAC_MODE3:
57979 + dentry = va_arg(ap, struct dentry *);
57980 + mnt = va_arg(ap, struct vfsmount *);
57981 + str1 = va_arg(ap, char *);
57982 + str2 = va_arg(ap, char *);
57983 + str3 = va_arg(ap, char *);
57984 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57985 + break;
57986 + case GR_FILENAME:
57987 + dentry = va_arg(ap, struct dentry *);
57988 + mnt = va_arg(ap, struct vfsmount *);
57989 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57990 + break;
57991 + case GR_STR_FILENAME:
57992 + str1 = va_arg(ap, char *);
57993 + dentry = va_arg(ap, struct dentry *);
57994 + mnt = va_arg(ap, struct vfsmount *);
57995 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57996 + break;
57997 + case GR_FILENAME_STR:
57998 + dentry = va_arg(ap, struct dentry *);
57999 + mnt = va_arg(ap, struct vfsmount *);
58000 + str1 = va_arg(ap, char *);
58001 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58002 + break;
58003 + case GR_FILENAME_TWO_INT:
58004 + dentry = va_arg(ap, struct dentry *);
58005 + mnt = va_arg(ap, struct vfsmount *);
58006 + num1 = va_arg(ap, int);
58007 + num2 = va_arg(ap, int);
58008 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58009 + break;
58010 + case GR_FILENAME_TWO_INT_STR:
58011 + dentry = va_arg(ap, struct dentry *);
58012 + mnt = va_arg(ap, struct vfsmount *);
58013 + num1 = va_arg(ap, int);
58014 + num2 = va_arg(ap, int);
58015 + str1 = va_arg(ap, char *);
58016 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58017 + break;
58018 + case GR_TEXTREL:
58019 + file = va_arg(ap, struct file *);
58020 + ulong1 = va_arg(ap, unsigned long);
58021 + ulong2 = va_arg(ap, unsigned long);
58022 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58023 + break;
58024 + case GR_PTRACE:
58025 + task = va_arg(ap, struct task_struct *);
58026 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58027 + break;
58028 + case GR_RESOURCE:
58029 + task = va_arg(ap, struct task_struct *);
58030 + cred = __task_cred(task);
58031 + pcred = __task_cred(task->real_parent);
58032 + ulong1 = va_arg(ap, unsigned long);
58033 + str1 = va_arg(ap, char *);
58034 + ulong2 = va_arg(ap, unsigned long);
58035 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58036 + break;
58037 + case GR_CAP:
58038 + task = va_arg(ap, struct task_struct *);
58039 + cred = __task_cred(task);
58040 + pcred = __task_cred(task->real_parent);
58041 + str1 = va_arg(ap, char *);
58042 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58043 + break;
58044 + case GR_SIG:
58045 + str1 = va_arg(ap, char *);
58046 + voidptr = va_arg(ap, void *);
58047 + gr_log_middle_varargs(audit, msg, str1, voidptr);
58048 + break;
58049 + case GR_SIG2:
58050 + task = va_arg(ap, struct task_struct *);
58051 + cred = __task_cred(task);
58052 + pcred = __task_cred(task->real_parent);
58053 + num1 = va_arg(ap, int);
58054 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58055 + break;
58056 + case GR_CRASH1:
58057 + task = va_arg(ap, struct task_struct *);
58058 + cred = __task_cred(task);
58059 + pcred = __task_cred(task->real_parent);
58060 + ulong1 = va_arg(ap, unsigned long);
58061 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58062 + break;
58063 + case GR_CRASH2:
58064 + task = va_arg(ap, struct task_struct *);
58065 + cred = __task_cred(task);
58066 + pcred = __task_cred(task->real_parent);
58067 + ulong1 = va_arg(ap, unsigned long);
58068 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58069 + break;
58070 + case GR_RWXMAP:
58071 + file = va_arg(ap, struct file *);
58072 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58073 + break;
58074 + case GR_PSACCT:
58075 + {
58076 + unsigned int wday, cday;
58077 + __u8 whr, chr;
58078 + __u8 wmin, cmin;
58079 + __u8 wsec, csec;
58080 + char cur_tty[64] = { 0 };
58081 + char parent_tty[64] = { 0 };
58082 +
58083 + task = va_arg(ap, struct task_struct *);
58084 + wday = va_arg(ap, unsigned int);
58085 + cday = va_arg(ap, unsigned int);
58086 + whr = va_arg(ap, int);
58087 + chr = va_arg(ap, int);
58088 + wmin = va_arg(ap, int);
58089 + cmin = va_arg(ap, int);
58090 + wsec = va_arg(ap, int);
58091 + csec = va_arg(ap, int);
58092 + ulong1 = va_arg(ap, unsigned long);
58093 + cred = __task_cred(task);
58094 + pcred = __task_cred(task->real_parent);
58095 +
58096 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58097 + }
58098 + break;
58099 + default:
58100 + gr_log_middle(audit, msg, ap);
58101 + }
58102 + va_end(ap);
58103 + // these don't need DEFAULTSECARGS printed on the end
58104 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58105 + gr_log_end(audit, 0);
58106 + else
58107 + gr_log_end(audit, 1);
58108 + END_LOCKS(audit);
58109 +}
58110 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58111 new file mode 100644
58112 index 0000000..f536303
58113 --- /dev/null
58114 +++ b/grsecurity/grsec_mem.c
58115 @@ -0,0 +1,40 @@
58116 +#include <linux/kernel.h>
58117 +#include <linux/sched.h>
58118 +#include <linux/mm.h>
58119 +#include <linux/mman.h>
58120 +#include <linux/grinternal.h>
58121 +
58122 +void
58123 +gr_handle_ioperm(void)
58124 +{
58125 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58126 + return;
58127 +}
58128 +
58129 +void
58130 +gr_handle_iopl(void)
58131 +{
58132 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58133 + return;
58134 +}
58135 +
58136 +void
58137 +gr_handle_mem_readwrite(u64 from, u64 to)
58138 +{
58139 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58140 + return;
58141 +}
58142 +
58143 +void
58144 +gr_handle_vm86(void)
58145 +{
58146 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58147 + return;
58148 +}
58149 +
58150 +void
58151 +gr_log_badprocpid(const char *entry)
58152 +{
58153 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
58154 + return;
58155 +}
58156 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58157 new file mode 100644
58158 index 0000000..2131422
58159 --- /dev/null
58160 +++ b/grsecurity/grsec_mount.c
58161 @@ -0,0 +1,62 @@
58162 +#include <linux/kernel.h>
58163 +#include <linux/sched.h>
58164 +#include <linux/mount.h>
58165 +#include <linux/grsecurity.h>
58166 +#include <linux/grinternal.h>
58167 +
58168 +void
58169 +gr_log_remount(const char *devname, const int retval)
58170 +{
58171 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58172 + if (grsec_enable_mount && (retval >= 0))
58173 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58174 +#endif
58175 + return;
58176 +}
58177 +
58178 +void
58179 +gr_log_unmount(const char *devname, const int retval)
58180 +{
58181 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58182 + if (grsec_enable_mount && (retval >= 0))
58183 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58184 +#endif
58185 + return;
58186 +}
58187 +
58188 +void
58189 +gr_log_mount(const char *from, const char *to, const int retval)
58190 +{
58191 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58192 + if (grsec_enable_mount && (retval >= 0))
58193 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58194 +#endif
58195 + return;
58196 +}
58197 +
58198 +int
58199 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58200 +{
58201 +#ifdef CONFIG_GRKERNSEC_ROFS
58202 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58203 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58204 + return -EPERM;
58205 + } else
58206 + return 0;
58207 +#endif
58208 + return 0;
58209 +}
58210 +
58211 +int
58212 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58213 +{
58214 +#ifdef CONFIG_GRKERNSEC_ROFS
58215 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58216 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58217 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58218 + return -EPERM;
58219 + } else
58220 + return 0;
58221 +#endif
58222 + return 0;
58223 +}
58224 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58225 new file mode 100644
58226 index 0000000..a3b12a0
58227 --- /dev/null
58228 +++ b/grsecurity/grsec_pax.c
58229 @@ -0,0 +1,36 @@
58230 +#include <linux/kernel.h>
58231 +#include <linux/sched.h>
58232 +#include <linux/mm.h>
58233 +#include <linux/file.h>
58234 +#include <linux/grinternal.h>
58235 +#include <linux/grsecurity.h>
58236 +
58237 +void
58238 +gr_log_textrel(struct vm_area_struct * vma)
58239 +{
58240 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58241 + if (grsec_enable_audit_textrel)
58242 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58243 +#endif
58244 + return;
58245 +}
58246 +
58247 +void
58248 +gr_log_rwxmmap(struct file *file)
58249 +{
58250 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58251 + if (grsec_enable_log_rwxmaps)
58252 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58253 +#endif
58254 + return;
58255 +}
58256 +
58257 +void
58258 +gr_log_rwxmprotect(struct file *file)
58259 +{
58260 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58261 + if (grsec_enable_log_rwxmaps)
58262 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58263 +#endif
58264 + return;
58265 +}
58266 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58267 new file mode 100644
58268 index 0000000..f7f29aa
58269 --- /dev/null
58270 +++ b/grsecurity/grsec_ptrace.c
58271 @@ -0,0 +1,30 @@
58272 +#include <linux/kernel.h>
58273 +#include <linux/sched.h>
58274 +#include <linux/grinternal.h>
58275 +#include <linux/security.h>
58276 +
58277 +void
58278 +gr_audit_ptrace(struct task_struct *task)
58279 +{
58280 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58281 + if (grsec_enable_audit_ptrace)
58282 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58283 +#endif
58284 + return;
58285 +}
58286 +
58287 +int
58288 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
58289 +{
58290 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58291 + const struct dentry *dentry = file->f_path.dentry;
58292 + const struct vfsmount *mnt = file->f_path.mnt;
58293 +
58294 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
58295 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
58296 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
58297 + return -EACCES;
58298 + }
58299 +#endif
58300 + return 0;
58301 +}
58302 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58303 new file mode 100644
58304 index 0000000..7a5b2de
58305 --- /dev/null
58306 +++ b/grsecurity/grsec_sig.c
58307 @@ -0,0 +1,207 @@
58308 +#include <linux/kernel.h>
58309 +#include <linux/sched.h>
58310 +#include <linux/delay.h>
58311 +#include <linux/grsecurity.h>
58312 +#include <linux/grinternal.h>
58313 +#include <linux/hardirq.h>
58314 +
58315 +char *signames[] = {
58316 + [SIGSEGV] = "Segmentation fault",
58317 + [SIGILL] = "Illegal instruction",
58318 + [SIGABRT] = "Abort",
58319 + [SIGBUS] = "Invalid alignment/Bus error"
58320 +};
58321 +
58322 +void
58323 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58324 +{
58325 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58326 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58327 + (sig == SIGABRT) || (sig == SIGBUS))) {
58328 + if (t->pid == current->pid) {
58329 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58330 + } else {
58331 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58332 + }
58333 + }
58334 +#endif
58335 + return;
58336 +}
58337 +
58338 +int
58339 +gr_handle_signal(const struct task_struct *p, const int sig)
58340 +{
58341 +#ifdef CONFIG_GRKERNSEC
58342 + /* ignore the 0 signal for protected task checks */
58343 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58344 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58345 + return -EPERM;
58346 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58347 + return -EPERM;
58348 + }
58349 +#endif
58350 + return 0;
58351 +}
58352 +
58353 +#ifdef CONFIG_GRKERNSEC
58354 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58355 +
58356 +int gr_fake_force_sig(int sig, struct task_struct *t)
58357 +{
58358 + unsigned long int flags;
58359 + int ret, blocked, ignored;
58360 + struct k_sigaction *action;
58361 +
58362 + spin_lock_irqsave(&t->sighand->siglock, flags);
58363 + action = &t->sighand->action[sig-1];
58364 + ignored = action->sa.sa_handler == SIG_IGN;
58365 + blocked = sigismember(&t->blocked, sig);
58366 + if (blocked || ignored) {
58367 + action->sa.sa_handler = SIG_DFL;
58368 + if (blocked) {
58369 + sigdelset(&t->blocked, sig);
58370 + recalc_sigpending_and_wake(t);
58371 + }
58372 + }
58373 + if (action->sa.sa_handler == SIG_DFL)
58374 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
58375 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58376 +
58377 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
58378 +
58379 + return ret;
58380 +}
58381 +#endif
58382 +
58383 +#ifdef CONFIG_GRKERNSEC_BRUTE
58384 +#define GR_USER_BAN_TIME (15 * 60)
58385 +
58386 +static int __get_dumpable(unsigned long mm_flags)
58387 +{
58388 + int ret;
58389 +
58390 + ret = mm_flags & MMF_DUMPABLE_MASK;
58391 + return (ret >= 2) ? 2 : ret;
58392 +}
58393 +#endif
58394 +
58395 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58396 +{
58397 +#ifdef CONFIG_GRKERNSEC_BRUTE
58398 + uid_t uid = 0;
58399 +
58400 + if (!grsec_enable_brute)
58401 + return;
58402 +
58403 + rcu_read_lock();
58404 + read_lock(&tasklist_lock);
58405 + read_lock(&grsec_exec_file_lock);
58406 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58407 + p->real_parent->brute = 1;
58408 + else {
58409 + const struct cred *cred = __task_cred(p), *cred2;
58410 + struct task_struct *tsk, *tsk2;
58411 +
58412 + if (!__get_dumpable(mm_flags) && cred->uid) {
58413 + struct user_struct *user;
58414 +
58415 + uid = cred->uid;
58416 +
58417 + /* this is put upon execution past expiration */
58418 + user = find_user(uid);
58419 + if (user == NULL)
58420 + goto unlock;
58421 + user->banned = 1;
58422 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58423 + if (user->ban_expires == ~0UL)
58424 + user->ban_expires--;
58425 +
58426 + do_each_thread(tsk2, tsk) {
58427 + cred2 = __task_cred(tsk);
58428 + if (tsk != p && cred2->uid == uid)
58429 + gr_fake_force_sig(SIGKILL, tsk);
58430 + } while_each_thread(tsk2, tsk);
58431 + }
58432 + }
58433 +unlock:
58434 + read_unlock(&grsec_exec_file_lock);
58435 + read_unlock(&tasklist_lock);
58436 + rcu_read_unlock();
58437 +
58438 + if (uid)
58439 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58440 +
58441 +#endif
58442 + return;
58443 +}
58444 +
58445 +void gr_handle_brute_check(void)
58446 +{
58447 +#ifdef CONFIG_GRKERNSEC_BRUTE
58448 + if (current->brute)
58449 + msleep(30 * 1000);
58450 +#endif
58451 + return;
58452 +}
58453 +
58454 +void gr_handle_kernel_exploit(void)
58455 +{
58456 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58457 + const struct cred *cred;
58458 + struct task_struct *tsk, *tsk2;
58459 + struct user_struct *user;
58460 + uid_t uid;
58461 +
58462 + if (in_irq() || in_serving_softirq() || in_nmi())
58463 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58464 +
58465 + uid = current_uid();
58466 +
58467 + if (uid == 0)
58468 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
58469 + else {
58470 + /* kill all the processes of this user, hold a reference
58471 + to their creds struct, and prevent them from creating
58472 + another process until system reset
58473 + */
58474 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58475 + /* we intentionally leak this ref */
58476 + user = get_uid(current->cred->user);
58477 + if (user) {
58478 + user->banned = 1;
58479 + user->ban_expires = ~0UL;
58480 + }
58481 +
58482 + read_lock(&tasklist_lock);
58483 + do_each_thread(tsk2, tsk) {
58484 + cred = __task_cred(tsk);
58485 + if (cred->uid == uid)
58486 + gr_fake_force_sig(SIGKILL, tsk);
58487 + } while_each_thread(tsk2, tsk);
58488 + read_unlock(&tasklist_lock);
58489 + }
58490 +#endif
58491 +}
58492 +
58493 +int __gr_process_user_ban(struct user_struct *user)
58494 +{
58495 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58496 + if (unlikely(user->banned)) {
58497 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58498 + user->banned = 0;
58499 + user->ban_expires = 0;
58500 + free_uid(user);
58501 + } else
58502 + return -EPERM;
58503 + }
58504 +#endif
58505 + return 0;
58506 +}
58507 +
58508 +int gr_process_user_ban(void)
58509 +{
58510 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58511 + return __gr_process_user_ban(current->cred->user);
58512 +#endif
58513 + return 0;
58514 +}
58515 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58516 new file mode 100644
58517 index 0000000..4030d57
58518 --- /dev/null
58519 +++ b/grsecurity/grsec_sock.c
58520 @@ -0,0 +1,244 @@
58521 +#include <linux/kernel.h>
58522 +#include <linux/module.h>
58523 +#include <linux/sched.h>
58524 +#include <linux/file.h>
58525 +#include <linux/net.h>
58526 +#include <linux/in.h>
58527 +#include <linux/ip.h>
58528 +#include <net/sock.h>
58529 +#include <net/inet_sock.h>
58530 +#include <linux/grsecurity.h>
58531 +#include <linux/grinternal.h>
58532 +#include <linux/gracl.h>
58533 +
58534 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58535 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58536 +
58537 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
58538 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
58539 +
58540 +#ifdef CONFIG_UNIX_MODULE
58541 +EXPORT_SYMBOL(gr_acl_handle_unix);
58542 +EXPORT_SYMBOL(gr_acl_handle_mknod);
58543 +EXPORT_SYMBOL(gr_handle_chroot_unix);
58544 +EXPORT_SYMBOL(gr_handle_create);
58545 +#endif
58546 +
58547 +#ifdef CONFIG_GRKERNSEC
58548 +#define gr_conn_table_size 32749
58549 +struct conn_table_entry {
58550 + struct conn_table_entry *next;
58551 + struct signal_struct *sig;
58552 +};
58553 +
58554 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58555 +DEFINE_SPINLOCK(gr_conn_table_lock);
58556 +
58557 +extern const char * gr_socktype_to_name(unsigned char type);
58558 +extern const char * gr_proto_to_name(unsigned char proto);
58559 +extern const char * gr_sockfamily_to_name(unsigned char family);
58560 +
58561 +static __inline__ int
58562 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58563 +{
58564 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58565 +}
58566 +
58567 +static __inline__ int
58568 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58569 + __u16 sport, __u16 dport)
58570 +{
58571 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58572 + sig->gr_sport == sport && sig->gr_dport == dport))
58573 + return 1;
58574 + else
58575 + return 0;
58576 +}
58577 +
58578 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58579 +{
58580 + struct conn_table_entry **match;
58581 + unsigned int index;
58582 +
58583 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58584 + sig->gr_sport, sig->gr_dport,
58585 + gr_conn_table_size);
58586 +
58587 + newent->sig = sig;
58588 +
58589 + match = &gr_conn_table[index];
58590 + newent->next = *match;
58591 + *match = newent;
58592 +
58593 + return;
58594 +}
58595 +
58596 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58597 +{
58598 + struct conn_table_entry *match, *last = NULL;
58599 + unsigned int index;
58600 +
58601 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58602 + sig->gr_sport, sig->gr_dport,
58603 + gr_conn_table_size);
58604 +
58605 + match = gr_conn_table[index];
58606 + while (match && !conn_match(match->sig,
58607 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58608 + sig->gr_dport)) {
58609 + last = match;
58610 + match = match->next;
58611 + }
58612 +
58613 + if (match) {
58614 + if (last)
58615 + last->next = match->next;
58616 + else
58617 + gr_conn_table[index] = NULL;
58618 + kfree(match);
58619 + }
58620 +
58621 + return;
58622 +}
58623 +
58624 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58625 + __u16 sport, __u16 dport)
58626 +{
58627 + struct conn_table_entry *match;
58628 + unsigned int index;
58629 +
58630 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58631 +
58632 + match = gr_conn_table[index];
58633 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58634 + match = match->next;
58635 +
58636 + if (match)
58637 + return match->sig;
58638 + else
58639 + return NULL;
58640 +}
58641 +
58642 +#endif
58643 +
58644 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58645 +{
58646 +#ifdef CONFIG_GRKERNSEC
58647 + struct signal_struct *sig = task->signal;
58648 + struct conn_table_entry *newent;
58649 +
58650 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58651 + if (newent == NULL)
58652 + return;
58653 + /* no bh lock needed since we are called with bh disabled */
58654 + spin_lock(&gr_conn_table_lock);
58655 + gr_del_task_from_ip_table_nolock(sig);
58656 + sig->gr_saddr = inet->inet_rcv_saddr;
58657 + sig->gr_daddr = inet->inet_daddr;
58658 + sig->gr_sport = inet->inet_sport;
58659 + sig->gr_dport = inet->inet_dport;
58660 + gr_add_to_task_ip_table_nolock(sig, newent);
58661 + spin_unlock(&gr_conn_table_lock);
58662 +#endif
58663 + return;
58664 +}
58665 +
58666 +void gr_del_task_from_ip_table(struct task_struct *task)
58667 +{
58668 +#ifdef CONFIG_GRKERNSEC
58669 + spin_lock_bh(&gr_conn_table_lock);
58670 + gr_del_task_from_ip_table_nolock(task->signal);
58671 + spin_unlock_bh(&gr_conn_table_lock);
58672 +#endif
58673 + return;
58674 +}
58675 +
58676 +void
58677 +gr_attach_curr_ip(const struct sock *sk)
58678 +{
58679 +#ifdef CONFIG_GRKERNSEC
58680 + struct signal_struct *p, *set;
58681 + const struct inet_sock *inet = inet_sk(sk);
58682 +
58683 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58684 + return;
58685 +
58686 + set = current->signal;
58687 +
58688 + spin_lock_bh(&gr_conn_table_lock);
58689 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58690 + inet->inet_dport, inet->inet_sport);
58691 + if (unlikely(p != NULL)) {
58692 + set->curr_ip = p->curr_ip;
58693 + set->used_accept = 1;
58694 + gr_del_task_from_ip_table_nolock(p);
58695 + spin_unlock_bh(&gr_conn_table_lock);
58696 + return;
58697 + }
58698 + spin_unlock_bh(&gr_conn_table_lock);
58699 +
58700 + set->curr_ip = inet->inet_daddr;
58701 + set->used_accept = 1;
58702 +#endif
58703 + return;
58704 +}
58705 +
58706 +int
58707 +gr_handle_sock_all(const int family, const int type, const int protocol)
58708 +{
58709 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58710 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58711 + (family != AF_UNIX)) {
58712 + if (family == AF_INET)
58713 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58714 + else
58715 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58716 + return -EACCES;
58717 + }
58718 +#endif
58719 + return 0;
58720 +}
58721 +
58722 +int
58723 +gr_handle_sock_server(const struct sockaddr *sck)
58724 +{
58725 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58726 + if (grsec_enable_socket_server &&
58727 + in_group_p(grsec_socket_server_gid) &&
58728 + sck && (sck->sa_family != AF_UNIX) &&
58729 + (sck->sa_family != AF_LOCAL)) {
58730 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58731 + return -EACCES;
58732 + }
58733 +#endif
58734 + return 0;
58735 +}
58736 +
58737 +int
58738 +gr_handle_sock_server_other(const struct sock *sck)
58739 +{
58740 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58741 + if (grsec_enable_socket_server &&
58742 + in_group_p(grsec_socket_server_gid) &&
58743 + sck && (sck->sk_family != AF_UNIX) &&
58744 + (sck->sk_family != AF_LOCAL)) {
58745 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58746 + return -EACCES;
58747 + }
58748 +#endif
58749 + return 0;
58750 +}
58751 +
58752 +int
58753 +gr_handle_sock_client(const struct sockaddr *sck)
58754 +{
58755 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58756 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58757 + sck && (sck->sa_family != AF_UNIX) &&
58758 + (sck->sa_family != AF_LOCAL)) {
58759 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58760 + return -EACCES;
58761 + }
58762 +#endif
58763 + return 0;
58764 +}
58765 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58766 new file mode 100644
58767 index 0000000..f55ef0f
58768 --- /dev/null
58769 +++ b/grsecurity/grsec_sysctl.c
58770 @@ -0,0 +1,469 @@
58771 +#include <linux/kernel.h>
58772 +#include <linux/sched.h>
58773 +#include <linux/sysctl.h>
58774 +#include <linux/grsecurity.h>
58775 +#include <linux/grinternal.h>
58776 +
58777 +int
58778 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58779 +{
58780 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58781 + if (dirname == NULL || name == NULL)
58782 + return 0;
58783 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58784 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58785 + return -EACCES;
58786 + }
58787 +#endif
58788 + return 0;
58789 +}
58790 +
58791 +#ifdef CONFIG_GRKERNSEC_ROFS
58792 +static int __maybe_unused one = 1;
58793 +#endif
58794 +
58795 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58796 +struct ctl_table grsecurity_table[] = {
58797 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58798 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58799 +#ifdef CONFIG_GRKERNSEC_IO
58800 + {
58801 + .procname = "disable_priv_io",
58802 + .data = &grsec_disable_privio,
58803 + .maxlen = sizeof(int),
58804 + .mode = 0600,
58805 + .proc_handler = &proc_dointvec,
58806 + },
58807 +#endif
58808 +#endif
58809 +#ifdef CONFIG_GRKERNSEC_LINK
58810 + {
58811 + .procname = "linking_restrictions",
58812 + .data = &grsec_enable_link,
58813 + .maxlen = sizeof(int),
58814 + .mode = 0600,
58815 + .proc_handler = &proc_dointvec,
58816 + },
58817 +#endif
58818 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58819 + {
58820 + .procname = "enforce_symlinksifowner",
58821 + .data = &grsec_enable_symlinkown,
58822 + .maxlen = sizeof(int),
58823 + .mode = 0600,
58824 + .proc_handler = &proc_dointvec,
58825 + },
58826 + {
58827 + .procname = "symlinkown_gid",
58828 + .data = &grsec_symlinkown_gid,
58829 + .maxlen = sizeof(int),
58830 + .mode = 0600,
58831 + .proc_handler = &proc_dointvec,
58832 + },
58833 +#endif
58834 +#ifdef CONFIG_GRKERNSEC_BRUTE
58835 + {
58836 + .procname = "deter_bruteforce",
58837 + .data = &grsec_enable_brute,
58838 + .maxlen = sizeof(int),
58839 + .mode = 0600,
58840 + .proc_handler = &proc_dointvec,
58841 + },
58842 +#endif
58843 +#ifdef CONFIG_GRKERNSEC_FIFO
58844 + {
58845 + .procname = "fifo_restrictions",
58846 + .data = &grsec_enable_fifo,
58847 + .maxlen = sizeof(int),
58848 + .mode = 0600,
58849 + .proc_handler = &proc_dointvec,
58850 + },
58851 +#endif
58852 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58853 + {
58854 + .procname = "ptrace_readexec",
58855 + .data = &grsec_enable_ptrace_readexec,
58856 + .maxlen = sizeof(int),
58857 + .mode = 0600,
58858 + .proc_handler = &proc_dointvec,
58859 + },
58860 +#endif
58861 +#ifdef CONFIG_GRKERNSEC_SETXID
58862 + {
58863 + .procname = "consistent_setxid",
58864 + .data = &grsec_enable_setxid,
58865 + .maxlen = sizeof(int),
58866 + .mode = 0600,
58867 + .proc_handler = &proc_dointvec,
58868 + },
58869 +#endif
58870 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58871 + {
58872 + .procname = "ip_blackhole",
58873 + .data = &grsec_enable_blackhole,
58874 + .maxlen = sizeof(int),
58875 + .mode = 0600,
58876 + .proc_handler = &proc_dointvec,
58877 + },
58878 + {
58879 + .procname = "lastack_retries",
58880 + .data = &grsec_lastack_retries,
58881 + .maxlen = sizeof(int),
58882 + .mode = 0600,
58883 + .proc_handler = &proc_dointvec,
58884 + },
58885 +#endif
58886 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58887 + {
58888 + .procname = "exec_logging",
58889 + .data = &grsec_enable_execlog,
58890 + .maxlen = sizeof(int),
58891 + .mode = 0600,
58892 + .proc_handler = &proc_dointvec,
58893 + },
58894 +#endif
58895 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58896 + {
58897 + .procname = "rwxmap_logging",
58898 + .data = &grsec_enable_log_rwxmaps,
58899 + .maxlen = sizeof(int),
58900 + .mode = 0600,
58901 + .proc_handler = &proc_dointvec,
58902 + },
58903 +#endif
58904 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58905 + {
58906 + .procname = "signal_logging",
58907 + .data = &grsec_enable_signal,
58908 + .maxlen = sizeof(int),
58909 + .mode = 0600,
58910 + .proc_handler = &proc_dointvec,
58911 + },
58912 +#endif
58913 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58914 + {
58915 + .procname = "forkfail_logging",
58916 + .data = &grsec_enable_forkfail,
58917 + .maxlen = sizeof(int),
58918 + .mode = 0600,
58919 + .proc_handler = &proc_dointvec,
58920 + },
58921 +#endif
58922 +#ifdef CONFIG_GRKERNSEC_TIME
58923 + {
58924 + .procname = "timechange_logging",
58925 + .data = &grsec_enable_time,
58926 + .maxlen = sizeof(int),
58927 + .mode = 0600,
58928 + .proc_handler = &proc_dointvec,
58929 + },
58930 +#endif
58931 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58932 + {
58933 + .procname = "chroot_deny_shmat",
58934 + .data = &grsec_enable_chroot_shmat,
58935 + .maxlen = sizeof(int),
58936 + .mode = 0600,
58937 + .proc_handler = &proc_dointvec,
58938 + },
58939 +#endif
58940 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58941 + {
58942 + .procname = "chroot_deny_unix",
58943 + .data = &grsec_enable_chroot_unix,
58944 + .maxlen = sizeof(int),
58945 + .mode = 0600,
58946 + .proc_handler = &proc_dointvec,
58947 + },
58948 +#endif
58949 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58950 + {
58951 + .procname = "chroot_deny_mount",
58952 + .data = &grsec_enable_chroot_mount,
58953 + .maxlen = sizeof(int),
58954 + .mode = 0600,
58955 + .proc_handler = &proc_dointvec,
58956 + },
58957 +#endif
58958 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58959 + {
58960 + .procname = "chroot_deny_fchdir",
58961 + .data = &grsec_enable_chroot_fchdir,
58962 + .maxlen = sizeof(int),
58963 + .mode = 0600,
58964 + .proc_handler = &proc_dointvec,
58965 + },
58966 +#endif
58967 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58968 + {
58969 + .procname = "chroot_deny_chroot",
58970 + .data = &grsec_enable_chroot_double,
58971 + .maxlen = sizeof(int),
58972 + .mode = 0600,
58973 + .proc_handler = &proc_dointvec,
58974 + },
58975 +#endif
58976 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58977 + {
58978 + .procname = "chroot_deny_pivot",
58979 + .data = &grsec_enable_chroot_pivot,
58980 + .maxlen = sizeof(int),
58981 + .mode = 0600,
58982 + .proc_handler = &proc_dointvec,
58983 + },
58984 +#endif
58985 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58986 + {
58987 + .procname = "chroot_enforce_chdir",
58988 + .data = &grsec_enable_chroot_chdir,
58989 + .maxlen = sizeof(int),
58990 + .mode = 0600,
58991 + .proc_handler = &proc_dointvec,
58992 + },
58993 +#endif
58994 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58995 + {
58996 + .procname = "chroot_deny_chmod",
58997 + .data = &grsec_enable_chroot_chmod,
58998 + .maxlen = sizeof(int),
58999 + .mode = 0600,
59000 + .proc_handler = &proc_dointvec,
59001 + },
59002 +#endif
59003 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59004 + {
59005 + .procname = "chroot_deny_mknod",
59006 + .data = &grsec_enable_chroot_mknod,
59007 + .maxlen = sizeof(int),
59008 + .mode = 0600,
59009 + .proc_handler = &proc_dointvec,
59010 + },
59011 +#endif
59012 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59013 + {
59014 + .procname = "chroot_restrict_nice",
59015 + .data = &grsec_enable_chroot_nice,
59016 + .maxlen = sizeof(int),
59017 + .mode = 0600,
59018 + .proc_handler = &proc_dointvec,
59019 + },
59020 +#endif
59021 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59022 + {
59023 + .procname = "chroot_execlog",
59024 + .data = &grsec_enable_chroot_execlog,
59025 + .maxlen = sizeof(int),
59026 + .mode = 0600,
59027 + .proc_handler = &proc_dointvec,
59028 + },
59029 +#endif
59030 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59031 + {
59032 + .procname = "chroot_caps",
59033 + .data = &grsec_enable_chroot_caps,
59034 + .maxlen = sizeof(int),
59035 + .mode = 0600,
59036 + .proc_handler = &proc_dointvec,
59037 + },
59038 +#endif
59039 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59040 + {
59041 + .procname = "chroot_deny_sysctl",
59042 + .data = &grsec_enable_chroot_sysctl,
59043 + .maxlen = sizeof(int),
59044 + .mode = 0600,
59045 + .proc_handler = &proc_dointvec,
59046 + },
59047 +#endif
59048 +#ifdef CONFIG_GRKERNSEC_TPE
59049 + {
59050 + .procname = "tpe",
59051 + .data = &grsec_enable_tpe,
59052 + .maxlen = sizeof(int),
59053 + .mode = 0600,
59054 + .proc_handler = &proc_dointvec,
59055 + },
59056 + {
59057 + .procname = "tpe_gid",
59058 + .data = &grsec_tpe_gid,
59059 + .maxlen = sizeof(int),
59060 + .mode = 0600,
59061 + .proc_handler = &proc_dointvec,
59062 + },
59063 +#endif
59064 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59065 + {
59066 + .procname = "tpe_invert",
59067 + .data = &grsec_enable_tpe_invert,
59068 + .maxlen = sizeof(int),
59069 + .mode = 0600,
59070 + .proc_handler = &proc_dointvec,
59071 + },
59072 +#endif
59073 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59074 + {
59075 + .procname = "tpe_restrict_all",
59076 + .data = &grsec_enable_tpe_all,
59077 + .maxlen = sizeof(int),
59078 + .mode = 0600,
59079 + .proc_handler = &proc_dointvec,
59080 + },
59081 +#endif
59082 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59083 + {
59084 + .procname = "socket_all",
59085 + .data = &grsec_enable_socket_all,
59086 + .maxlen = sizeof(int),
59087 + .mode = 0600,
59088 + .proc_handler = &proc_dointvec,
59089 + },
59090 + {
59091 + .procname = "socket_all_gid",
59092 + .data = &grsec_socket_all_gid,
59093 + .maxlen = sizeof(int),
59094 + .mode = 0600,
59095 + .proc_handler = &proc_dointvec,
59096 + },
59097 +#endif
59098 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59099 + {
59100 + .procname = "socket_client",
59101 + .data = &grsec_enable_socket_client,
59102 + .maxlen = sizeof(int),
59103 + .mode = 0600,
59104 + .proc_handler = &proc_dointvec,
59105 + },
59106 + {
59107 + .procname = "socket_client_gid",
59108 + .data = &grsec_socket_client_gid,
59109 + .maxlen = sizeof(int),
59110 + .mode = 0600,
59111 + .proc_handler = &proc_dointvec,
59112 + },
59113 +#endif
59114 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59115 + {
59116 + .procname = "socket_server",
59117 + .data = &grsec_enable_socket_server,
59118 + .maxlen = sizeof(int),
59119 + .mode = 0600,
59120 + .proc_handler = &proc_dointvec,
59121 + },
59122 + {
59123 + .procname = "socket_server_gid",
59124 + .data = &grsec_socket_server_gid,
59125 + .maxlen = sizeof(int),
59126 + .mode = 0600,
59127 + .proc_handler = &proc_dointvec,
59128 + },
59129 +#endif
59130 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59131 + {
59132 + .procname = "audit_group",
59133 + .data = &grsec_enable_group,
59134 + .maxlen = sizeof(int),
59135 + .mode = 0600,
59136 + .proc_handler = &proc_dointvec,
59137 + },
59138 + {
59139 + .procname = "audit_gid",
59140 + .data = &grsec_audit_gid,
59141 + .maxlen = sizeof(int),
59142 + .mode = 0600,
59143 + .proc_handler = &proc_dointvec,
59144 + },
59145 +#endif
59146 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59147 + {
59148 + .procname = "audit_chdir",
59149 + .data = &grsec_enable_chdir,
59150 + .maxlen = sizeof(int),
59151 + .mode = 0600,
59152 + .proc_handler = &proc_dointvec,
59153 + },
59154 +#endif
59155 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59156 + {
59157 + .procname = "audit_mount",
59158 + .data = &grsec_enable_mount,
59159 + .maxlen = sizeof(int),
59160 + .mode = 0600,
59161 + .proc_handler = &proc_dointvec,
59162 + },
59163 +#endif
59164 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59165 + {
59166 + .procname = "audit_textrel",
59167 + .data = &grsec_enable_audit_textrel,
59168 + .maxlen = sizeof(int),
59169 + .mode = 0600,
59170 + .proc_handler = &proc_dointvec,
59171 + },
59172 +#endif
59173 +#ifdef CONFIG_GRKERNSEC_DMESG
59174 + {
59175 + .procname = "dmesg",
59176 + .data = &grsec_enable_dmesg,
59177 + .maxlen = sizeof(int),
59178 + .mode = 0600,
59179 + .proc_handler = &proc_dointvec,
59180 + },
59181 +#endif
59182 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59183 + {
59184 + .procname = "chroot_findtask",
59185 + .data = &grsec_enable_chroot_findtask,
59186 + .maxlen = sizeof(int),
59187 + .mode = 0600,
59188 + .proc_handler = &proc_dointvec,
59189 + },
59190 +#endif
59191 +#ifdef CONFIG_GRKERNSEC_RESLOG
59192 + {
59193 + .procname = "resource_logging",
59194 + .data = &grsec_resource_logging,
59195 + .maxlen = sizeof(int),
59196 + .mode = 0600,
59197 + .proc_handler = &proc_dointvec,
59198 + },
59199 +#endif
59200 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59201 + {
59202 + .procname = "audit_ptrace",
59203 + .data = &grsec_enable_audit_ptrace,
59204 + .maxlen = sizeof(int),
59205 + .mode = 0600,
59206 + .proc_handler = &proc_dointvec,
59207 + },
59208 +#endif
59209 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59210 + {
59211 + .procname = "harden_ptrace",
59212 + .data = &grsec_enable_harden_ptrace,
59213 + .maxlen = sizeof(int),
59214 + .mode = 0600,
59215 + .proc_handler = &proc_dointvec,
59216 + },
59217 +#endif
59218 + {
59219 + .procname = "grsec_lock",
59220 + .data = &grsec_lock,
59221 + .maxlen = sizeof(int),
59222 + .mode = 0600,
59223 + .proc_handler = &proc_dointvec,
59224 + },
59225 +#endif
59226 +#ifdef CONFIG_GRKERNSEC_ROFS
59227 + {
59228 + .procname = "romount_protect",
59229 + .data = &grsec_enable_rofs,
59230 + .maxlen = sizeof(int),
59231 + .mode = 0600,
59232 + .proc_handler = &proc_dointvec_minmax,
59233 + .extra1 = &one,
59234 + .extra2 = &one,
59235 + },
59236 +#endif
59237 + { }
59238 +};
59239 +#endif
59240 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59241 new file mode 100644
59242 index 0000000..0dc13c3
59243 --- /dev/null
59244 +++ b/grsecurity/grsec_time.c
59245 @@ -0,0 +1,16 @@
59246 +#include <linux/kernel.h>
59247 +#include <linux/sched.h>
59248 +#include <linux/grinternal.h>
59249 +#include <linux/module.h>
59250 +
59251 +void
59252 +gr_log_timechange(void)
59253 +{
59254 +#ifdef CONFIG_GRKERNSEC_TIME
59255 + if (grsec_enable_time)
59256 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59257 +#endif
59258 + return;
59259 +}
59260 +
59261 +EXPORT_SYMBOL(gr_log_timechange);
59262 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59263 new file mode 100644
59264 index 0000000..07e0dc0
59265 --- /dev/null
59266 +++ b/grsecurity/grsec_tpe.c
59267 @@ -0,0 +1,73 @@
59268 +#include <linux/kernel.h>
59269 +#include <linux/sched.h>
59270 +#include <linux/file.h>
59271 +#include <linux/fs.h>
59272 +#include <linux/grinternal.h>
59273 +
59274 +extern int gr_acl_tpe_check(void);
59275 +
59276 +int
59277 +gr_tpe_allow(const struct file *file)
59278 +{
59279 +#ifdef CONFIG_GRKERNSEC
59280 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59281 + const struct cred *cred = current_cred();
59282 + char *msg = NULL;
59283 + char *msg2 = NULL;
59284 +
59285 + // never restrict root
59286 + if (!cred->uid)
59287 + return 1;
59288 +
59289 + if (grsec_enable_tpe) {
59290 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59291 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
59292 + msg = "not being in trusted group";
59293 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
59294 + msg = "being in untrusted group";
59295 +#else
59296 + if (in_group_p(grsec_tpe_gid))
59297 + msg = "being in untrusted group";
59298 +#endif
59299 + }
59300 + if (!msg && gr_acl_tpe_check())
59301 + msg = "being in untrusted role";
59302 +
59303 + // not in any affected group/role
59304 + if (!msg)
59305 + goto next_check;
59306 +
59307 + if (inode->i_uid)
59308 + msg2 = "file in non-root-owned directory";
59309 + else if (inode->i_mode & S_IWOTH)
59310 + msg2 = "file in world-writable directory";
59311 + else if (inode->i_mode & S_IWGRP)
59312 + msg2 = "file in group-writable directory";
59313 +
59314 + if (msg && msg2) {
59315 + char fullmsg[70] = {0};
59316 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
59317 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
59318 + return 0;
59319 + }
59320 + msg = NULL;
59321 +next_check:
59322 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59323 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
59324 + return 1;
59325 +
59326 + if (inode->i_uid && (inode->i_uid != cred->uid))
59327 + msg = "directory not owned by user";
59328 + else if (inode->i_mode & S_IWOTH)
59329 + msg = "file in world-writable directory";
59330 + else if (inode->i_mode & S_IWGRP)
59331 + msg = "file in group-writable directory";
59332 +
59333 + if (msg) {
59334 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
59335 + return 0;
59336 + }
59337 +#endif
59338 +#endif
59339 + return 1;
59340 +}
59341 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59342 new file mode 100644
59343 index 0000000..9f7b1ac
59344 --- /dev/null
59345 +++ b/grsecurity/grsum.c
59346 @@ -0,0 +1,61 @@
59347 +#include <linux/err.h>
59348 +#include <linux/kernel.h>
59349 +#include <linux/sched.h>
59350 +#include <linux/mm.h>
59351 +#include <linux/scatterlist.h>
59352 +#include <linux/crypto.h>
59353 +#include <linux/gracl.h>
59354 +
59355 +
59356 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59357 +#error "crypto and sha256 must be built into the kernel"
59358 +#endif
59359 +
59360 +int
59361 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59362 +{
59363 + char *p;
59364 + struct crypto_hash *tfm;
59365 + struct hash_desc desc;
59366 + struct scatterlist sg;
59367 + unsigned char temp_sum[GR_SHA_LEN];
59368 + volatile int retval = 0;
59369 + volatile int dummy = 0;
59370 + unsigned int i;
59371 +
59372 + sg_init_table(&sg, 1);
59373 +
59374 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59375 + if (IS_ERR(tfm)) {
59376 + /* should never happen, since sha256 should be built in */
59377 + return 1;
59378 + }
59379 +
59380 + desc.tfm = tfm;
59381 + desc.flags = 0;
59382 +
59383 + crypto_hash_init(&desc);
59384 +
59385 + p = salt;
59386 + sg_set_buf(&sg, p, GR_SALT_LEN);
59387 + crypto_hash_update(&desc, &sg, sg.length);
59388 +
59389 + p = entry->pw;
59390 + sg_set_buf(&sg, p, strlen(p));
59391 +
59392 + crypto_hash_update(&desc, &sg, sg.length);
59393 +
59394 + crypto_hash_final(&desc, temp_sum);
59395 +
59396 + memset(entry->pw, 0, GR_PW_LEN);
59397 +
59398 + for (i = 0; i < GR_SHA_LEN; i++)
59399 + if (sum[i] != temp_sum[i])
59400 + retval = 1;
59401 + else
59402 + dummy = 1; // waste a cycle
59403 +
59404 + crypto_free_hash(tfm);
59405 +
59406 + return retval;
59407 +}
59408 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59409 index f1c8ca6..b5c1cc7 100644
59410 --- a/include/acpi/acpi_bus.h
59411 +++ b/include/acpi/acpi_bus.h
59412 @@ -107,7 +107,7 @@ struct acpi_device_ops {
59413 acpi_op_bind bind;
59414 acpi_op_unbind unbind;
59415 acpi_op_notify notify;
59416 -};
59417 +} __no_const;
59418
59419 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59420
59421 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59422 index b7babf0..3ba8aee 100644
59423 --- a/include/asm-generic/atomic-long.h
59424 +++ b/include/asm-generic/atomic-long.h
59425 @@ -22,6 +22,12 @@
59426
59427 typedef atomic64_t atomic_long_t;
59428
59429 +#ifdef CONFIG_PAX_REFCOUNT
59430 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
59431 +#else
59432 +typedef atomic64_t atomic_long_unchecked_t;
59433 +#endif
59434 +
59435 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59436
59437 static inline long atomic_long_read(atomic_long_t *l)
59438 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59439 return (long)atomic64_read(v);
59440 }
59441
59442 +#ifdef CONFIG_PAX_REFCOUNT
59443 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59444 +{
59445 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59446 +
59447 + return (long)atomic64_read_unchecked(v);
59448 +}
59449 +#endif
59450 +
59451 static inline void atomic_long_set(atomic_long_t *l, long i)
59452 {
59453 atomic64_t *v = (atomic64_t *)l;
59454 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59455 atomic64_set(v, i);
59456 }
59457
59458 +#ifdef CONFIG_PAX_REFCOUNT
59459 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59460 +{
59461 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59462 +
59463 + atomic64_set_unchecked(v, i);
59464 +}
59465 +#endif
59466 +
59467 static inline void atomic_long_inc(atomic_long_t *l)
59468 {
59469 atomic64_t *v = (atomic64_t *)l;
59470 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59471 atomic64_inc(v);
59472 }
59473
59474 +#ifdef CONFIG_PAX_REFCOUNT
59475 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59476 +{
59477 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59478 +
59479 + atomic64_inc_unchecked(v);
59480 +}
59481 +#endif
59482 +
59483 static inline void atomic_long_dec(atomic_long_t *l)
59484 {
59485 atomic64_t *v = (atomic64_t *)l;
59486 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59487 atomic64_dec(v);
59488 }
59489
59490 +#ifdef CONFIG_PAX_REFCOUNT
59491 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59492 +{
59493 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59494 +
59495 + atomic64_dec_unchecked(v);
59496 +}
59497 +#endif
59498 +
59499 static inline void atomic_long_add(long i, atomic_long_t *l)
59500 {
59501 atomic64_t *v = (atomic64_t *)l;
59502 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59503 atomic64_add(i, v);
59504 }
59505
59506 +#ifdef CONFIG_PAX_REFCOUNT
59507 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59508 +{
59509 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59510 +
59511 + atomic64_add_unchecked(i, v);
59512 +}
59513 +#endif
59514 +
59515 static inline void atomic_long_sub(long i, atomic_long_t *l)
59516 {
59517 atomic64_t *v = (atomic64_t *)l;
59518 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59519 atomic64_sub(i, v);
59520 }
59521
59522 +#ifdef CONFIG_PAX_REFCOUNT
59523 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59524 +{
59525 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59526 +
59527 + atomic64_sub_unchecked(i, v);
59528 +}
59529 +#endif
59530 +
59531 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59532 {
59533 atomic64_t *v = (atomic64_t *)l;
59534 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59535 return (long)atomic64_inc_return(v);
59536 }
59537
59538 +#ifdef CONFIG_PAX_REFCOUNT
59539 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59540 +{
59541 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59542 +
59543 + return (long)atomic64_inc_return_unchecked(v);
59544 +}
59545 +#endif
59546 +
59547 static inline long atomic_long_dec_return(atomic_long_t *l)
59548 {
59549 atomic64_t *v = (atomic64_t *)l;
59550 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59551
59552 typedef atomic_t atomic_long_t;
59553
59554 +#ifdef CONFIG_PAX_REFCOUNT
59555 +typedef atomic_unchecked_t atomic_long_unchecked_t;
59556 +#else
59557 +typedef atomic_t atomic_long_unchecked_t;
59558 +#endif
59559 +
59560 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59561 static inline long atomic_long_read(atomic_long_t *l)
59562 {
59563 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59564 return (long)atomic_read(v);
59565 }
59566
59567 +#ifdef CONFIG_PAX_REFCOUNT
59568 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59569 +{
59570 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59571 +
59572 + return (long)atomic_read_unchecked(v);
59573 +}
59574 +#endif
59575 +
59576 static inline void atomic_long_set(atomic_long_t *l, long i)
59577 {
59578 atomic_t *v = (atomic_t *)l;
59579 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59580 atomic_set(v, i);
59581 }
59582
59583 +#ifdef CONFIG_PAX_REFCOUNT
59584 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59585 +{
59586 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59587 +
59588 + atomic_set_unchecked(v, i);
59589 +}
59590 +#endif
59591 +
59592 static inline void atomic_long_inc(atomic_long_t *l)
59593 {
59594 atomic_t *v = (atomic_t *)l;
59595 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59596 atomic_inc(v);
59597 }
59598
59599 +#ifdef CONFIG_PAX_REFCOUNT
59600 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59601 +{
59602 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59603 +
59604 + atomic_inc_unchecked(v);
59605 +}
59606 +#endif
59607 +
59608 static inline void atomic_long_dec(atomic_long_t *l)
59609 {
59610 atomic_t *v = (atomic_t *)l;
59611 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59612 atomic_dec(v);
59613 }
59614
59615 +#ifdef CONFIG_PAX_REFCOUNT
59616 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59617 +{
59618 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59619 +
59620 + atomic_dec_unchecked(v);
59621 +}
59622 +#endif
59623 +
59624 static inline void atomic_long_add(long i, atomic_long_t *l)
59625 {
59626 atomic_t *v = (atomic_t *)l;
59627 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59628 atomic_add(i, v);
59629 }
59630
59631 +#ifdef CONFIG_PAX_REFCOUNT
59632 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59633 +{
59634 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59635 +
59636 + atomic_add_unchecked(i, v);
59637 +}
59638 +#endif
59639 +
59640 static inline void atomic_long_sub(long i, atomic_long_t *l)
59641 {
59642 atomic_t *v = (atomic_t *)l;
59643 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59644 atomic_sub(i, v);
59645 }
59646
59647 +#ifdef CONFIG_PAX_REFCOUNT
59648 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59649 +{
59650 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59651 +
59652 + atomic_sub_unchecked(i, v);
59653 +}
59654 +#endif
59655 +
59656 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59657 {
59658 atomic_t *v = (atomic_t *)l;
59659 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59660 return (long)atomic_inc_return(v);
59661 }
59662
59663 +#ifdef CONFIG_PAX_REFCOUNT
59664 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59665 +{
59666 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59667 +
59668 + return (long)atomic_inc_return_unchecked(v);
59669 +}
59670 +#endif
59671 +
59672 static inline long atomic_long_dec_return(atomic_long_t *l)
59673 {
59674 atomic_t *v = (atomic_t *)l;
59675 @@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59676
59677 #endif /* BITS_PER_LONG == 64 */
59678
59679 +#ifdef CONFIG_PAX_REFCOUNT
59680 +static inline void pax_refcount_needs_these_functions(void)
59681 +{
59682 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59683 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59684 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59685 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59686 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59687 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59688 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59689 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59690 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59691 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59692 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59693 +#ifdef CONFIG_X86
59694 + atomic_clear_mask_unchecked(0, NULL);
59695 + atomic_set_mask_unchecked(0, NULL);
59696 +#endif
59697 +
59698 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59699 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59700 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59701 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59702 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59703 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59704 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59705 +}
59706 +#else
59707 +#define atomic_read_unchecked(v) atomic_read(v)
59708 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59709 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59710 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59711 +#define atomic_inc_unchecked(v) atomic_inc(v)
59712 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59713 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59714 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59715 +#define atomic_dec_unchecked(v) atomic_dec(v)
59716 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59717 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59718 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
59719 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
59720 +
59721 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59722 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59723 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59724 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59725 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59726 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59727 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59728 +#endif
59729 +
59730 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59731 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
59732 index 1ced641..c896ee8 100644
59733 --- a/include/asm-generic/atomic.h
59734 +++ b/include/asm-generic/atomic.h
59735 @@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
59736 * Atomically clears the bits set in @mask from @v
59737 */
59738 #ifndef atomic_clear_mask
59739 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
59740 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
59741 {
59742 unsigned long flags;
59743
59744 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59745 index b18ce4f..2ee2843 100644
59746 --- a/include/asm-generic/atomic64.h
59747 +++ b/include/asm-generic/atomic64.h
59748 @@ -16,6 +16,8 @@ typedef struct {
59749 long long counter;
59750 } atomic64_t;
59751
59752 +typedef atomic64_t atomic64_unchecked_t;
59753 +
59754 #define ATOMIC64_INIT(i) { (i) }
59755
59756 extern long long atomic64_read(const atomic64_t *v);
59757 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59758 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59759 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59760
59761 +#define atomic64_read_unchecked(v) atomic64_read(v)
59762 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59763 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59764 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59765 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59766 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59767 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59768 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59769 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59770 +
59771 #endif /* _ASM_GENERIC_ATOMIC64_H */
59772 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59773 index 1bfcfe5..e04c5c9 100644
59774 --- a/include/asm-generic/cache.h
59775 +++ b/include/asm-generic/cache.h
59776 @@ -6,7 +6,7 @@
59777 * cache lines need to provide their own cache.h.
59778 */
59779
59780 -#define L1_CACHE_SHIFT 5
59781 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59782 +#define L1_CACHE_SHIFT 5UL
59783 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59784
59785 #endif /* __ASM_GENERIC_CACHE_H */
59786 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59787 index 0d68a1e..b74a761 100644
59788 --- a/include/asm-generic/emergency-restart.h
59789 +++ b/include/asm-generic/emergency-restart.h
59790 @@ -1,7 +1,7 @@
59791 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59792 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59793
59794 -static inline void machine_emergency_restart(void)
59795 +static inline __noreturn void machine_emergency_restart(void)
59796 {
59797 machine_restart(NULL);
59798 }
59799 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59800 index 0232ccb..13d9165 100644
59801 --- a/include/asm-generic/kmap_types.h
59802 +++ b/include/asm-generic/kmap_types.h
59803 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59804 KMAP_D(17) KM_NMI,
59805 KMAP_D(18) KM_NMI_PTE,
59806 KMAP_D(19) KM_KDB,
59807 +KMAP_D(20) KM_CLEARPAGE,
59808 /*
59809 * Remember to update debug_kmap_atomic() when adding new kmap types!
59810 */
59811 -KMAP_D(20) KM_TYPE_NR
59812 +KMAP_D(21) KM_TYPE_NR
59813 };
59814
59815 #undef KMAP_D
59816 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59817 index 9ceb03b..2efbcbd 100644
59818 --- a/include/asm-generic/local.h
59819 +++ b/include/asm-generic/local.h
59820 @@ -39,6 +39,7 @@ typedef struct
59821 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59822 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59823 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59824 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59825
59826 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59827 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59828 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59829 index 725612b..9cc513a 100644
59830 --- a/include/asm-generic/pgtable-nopmd.h
59831 +++ b/include/asm-generic/pgtable-nopmd.h
59832 @@ -1,14 +1,19 @@
59833 #ifndef _PGTABLE_NOPMD_H
59834 #define _PGTABLE_NOPMD_H
59835
59836 -#ifndef __ASSEMBLY__
59837 -
59838 #include <asm-generic/pgtable-nopud.h>
59839
59840 -struct mm_struct;
59841 -
59842 #define __PAGETABLE_PMD_FOLDED
59843
59844 +#define PMD_SHIFT PUD_SHIFT
59845 +#define PTRS_PER_PMD 1
59846 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59847 +#define PMD_MASK (~(PMD_SIZE-1))
59848 +
59849 +#ifndef __ASSEMBLY__
59850 +
59851 +struct mm_struct;
59852 +
59853 /*
59854 * Having the pmd type consist of a pud gets the size right, and allows
59855 * us to conceptually access the pud entry that this pmd is folded into
59856 @@ -16,11 +21,6 @@ struct mm_struct;
59857 */
59858 typedef struct { pud_t pud; } pmd_t;
59859
59860 -#define PMD_SHIFT PUD_SHIFT
59861 -#define PTRS_PER_PMD 1
59862 -#define PMD_SIZE (1UL << PMD_SHIFT)
59863 -#define PMD_MASK (~(PMD_SIZE-1))
59864 -
59865 /*
59866 * The "pud_xxx()" functions here are trivial for a folded two-level
59867 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59868 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59869 index 810431d..0ec4804f 100644
59870 --- a/include/asm-generic/pgtable-nopud.h
59871 +++ b/include/asm-generic/pgtable-nopud.h
59872 @@ -1,10 +1,15 @@
59873 #ifndef _PGTABLE_NOPUD_H
59874 #define _PGTABLE_NOPUD_H
59875
59876 -#ifndef __ASSEMBLY__
59877 -
59878 #define __PAGETABLE_PUD_FOLDED
59879
59880 +#define PUD_SHIFT PGDIR_SHIFT
59881 +#define PTRS_PER_PUD 1
59882 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59883 +#define PUD_MASK (~(PUD_SIZE-1))
59884 +
59885 +#ifndef __ASSEMBLY__
59886 +
59887 /*
59888 * Having the pud type consist of a pgd gets the size right, and allows
59889 * us to conceptually access the pgd entry that this pud is folded into
59890 @@ -12,11 +17,6 @@
59891 */
59892 typedef struct { pgd_t pgd; } pud_t;
59893
59894 -#define PUD_SHIFT PGDIR_SHIFT
59895 -#define PTRS_PER_PUD 1
59896 -#define PUD_SIZE (1UL << PUD_SHIFT)
59897 -#define PUD_MASK (~(PUD_SIZE-1))
59898 -
59899 /*
59900 * The "pgd_xxx()" functions here are trivial for a folded two-level
59901 * setup: the pud is never bad, and a pud always exists (as it's folded
59902 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59903 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59904
59905 #define pgd_populate(mm, pgd, pud) do { } while (0)
59906 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59907 /*
59908 * (puds are folded into pgds so this doesn't get actually called,
59909 * but the define is needed for a generic inline function.)
59910 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59911 index c7ec2cd..909d125 100644
59912 --- a/include/asm-generic/pgtable.h
59913 +++ b/include/asm-generic/pgtable.h
59914 @@ -531,6 +531,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59915 #endif
59916 }
59917
59918 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59919 +static inline unsigned long pax_open_kernel(void) { return 0; }
59920 +#endif
59921 +
59922 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59923 +static inline unsigned long pax_close_kernel(void) { return 0; }
59924 +#endif
59925 +
59926 #endif /* CONFIG_MMU */
59927
59928 #endif /* !__ASSEMBLY__ */
59929 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59930 index 8aeadf6..f1dc019 100644
59931 --- a/include/asm-generic/vmlinux.lds.h
59932 +++ b/include/asm-generic/vmlinux.lds.h
59933 @@ -218,6 +218,7 @@
59934 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59935 VMLINUX_SYMBOL(__start_rodata) = .; \
59936 *(.rodata) *(.rodata.*) \
59937 + *(.data..read_only) \
59938 *(__vermagic) /* Kernel version magic */ \
59939 . = ALIGN(8); \
59940 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59941 @@ -716,17 +717,18 @@
59942 * section in the linker script will go there too. @phdr should have
59943 * a leading colon.
59944 *
59945 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59946 + * Note that this macros defines per_cpu_load as an absolute symbol.
59947 * If there is no need to put the percpu section at a predetermined
59948 * address, use PERCPU_SECTION.
59949 */
59950 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59951 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59952 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59953 + per_cpu_load = .; \
59954 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59955 - LOAD_OFFSET) { \
59956 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59957 PERCPU_INPUT(cacheline) \
59958 } phdr \
59959 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59960 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59961
59962 /**
59963 * PERCPU_SECTION - define output section for percpu area, simple version
59964 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59965 index dd73104..fde86bd 100644
59966 --- a/include/drm/drmP.h
59967 +++ b/include/drm/drmP.h
59968 @@ -72,6 +72,7 @@
59969 #include <linux/workqueue.h>
59970 #include <linux/poll.h>
59971 #include <asm/pgalloc.h>
59972 +#include <asm/local.h>
59973 #include "drm.h"
59974
59975 #include <linux/idr.h>
59976 @@ -1074,7 +1075,7 @@ struct drm_device {
59977
59978 /** \name Usage Counters */
59979 /*@{ */
59980 - int open_count; /**< Outstanding files open */
59981 + local_t open_count; /**< Outstanding files open */
59982 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59983 atomic_t vma_count; /**< Outstanding vma areas open */
59984 int buf_use; /**< Buffers in use -- cannot alloc */
59985 @@ -1085,7 +1086,7 @@ struct drm_device {
59986 /*@{ */
59987 unsigned long counters;
59988 enum drm_stat_type types[15];
59989 - atomic_t counts[15];
59990 + atomic_unchecked_t counts[15];
59991 /*@} */
59992
59993 struct list_head filelist;
59994 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59995 index 37515d1..34fa8b0 100644
59996 --- a/include/drm/drm_crtc_helper.h
59997 +++ b/include/drm/drm_crtc_helper.h
59998 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59999
60000 /* disable crtc when not in use - more explicit than dpms off */
60001 void (*disable)(struct drm_crtc *crtc);
60002 -};
60003 +} __no_const;
60004
60005 struct drm_encoder_helper_funcs {
60006 void (*dpms)(struct drm_encoder *encoder, int mode);
60007 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60008 struct drm_connector *connector);
60009 /* disable encoder when not in use - more explicit than dpms off */
60010 void (*disable)(struct drm_encoder *encoder);
60011 -};
60012 +} __no_const;
60013
60014 struct drm_connector_helper_funcs {
60015 int (*get_modes)(struct drm_connector *connector);
60016 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60017 index d6d1da4..fdd1ac5 100644
60018 --- a/include/drm/ttm/ttm_memory.h
60019 +++ b/include/drm/ttm/ttm_memory.h
60020 @@ -48,7 +48,7 @@
60021
60022 struct ttm_mem_shrink {
60023 int (*do_shrink) (struct ttm_mem_shrink *);
60024 -};
60025 +} __no_const;
60026
60027 /**
60028 * struct ttm_mem_global - Global memory accounting structure.
60029 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60030 index e86dfca..40cc55f 100644
60031 --- a/include/linux/a.out.h
60032 +++ b/include/linux/a.out.h
60033 @@ -39,6 +39,14 @@ enum machine_type {
60034 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60035 };
60036
60037 +/* Constants for the N_FLAGS field */
60038 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60039 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60040 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60041 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60042 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60043 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60044 +
60045 #if !defined (N_MAGIC)
60046 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60047 #endif
60048 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60049 index 06fd4bb..1caec0d 100644
60050 --- a/include/linux/atmdev.h
60051 +++ b/include/linux/atmdev.h
60052 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60053 #endif
60054
60055 struct k_atm_aal_stats {
60056 -#define __HANDLE_ITEM(i) atomic_t i
60057 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60058 __AAL_STAT_ITEMS
60059 #undef __HANDLE_ITEM
60060 };
60061 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60062 index 366422b..1fa7f84 100644
60063 --- a/include/linux/binfmts.h
60064 +++ b/include/linux/binfmts.h
60065 @@ -89,6 +89,7 @@ struct linux_binfmt {
60066 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60067 int (*load_shlib)(struct file *);
60068 int (*core_dump)(struct coredump_params *cprm);
60069 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60070 unsigned long min_coredump; /* minimal dump size */
60071 };
60072
60073 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60074 index 4d4ac24..2c3ccce 100644
60075 --- a/include/linux/blkdev.h
60076 +++ b/include/linux/blkdev.h
60077 @@ -1376,7 +1376,7 @@ struct block_device_operations {
60078 /* this callback is with swap_lock and sometimes page table lock held */
60079 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60080 struct module *owner;
60081 -};
60082 +} __do_const;
60083
60084 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60085 unsigned long);
60086 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60087 index 4d1a074..88f929a 100644
60088 --- a/include/linux/blktrace_api.h
60089 +++ b/include/linux/blktrace_api.h
60090 @@ -162,7 +162,7 @@ struct blk_trace {
60091 struct dentry *dir;
60092 struct dentry *dropped_file;
60093 struct dentry *msg_file;
60094 - atomic_t dropped;
60095 + atomic_unchecked_t dropped;
60096 };
60097
60098 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60099 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60100 index 83195fb..0b0f77d 100644
60101 --- a/include/linux/byteorder/little_endian.h
60102 +++ b/include/linux/byteorder/little_endian.h
60103 @@ -42,51 +42,51 @@
60104
60105 static inline __le64 __cpu_to_le64p(const __u64 *p)
60106 {
60107 - return (__force __le64)*p;
60108 + return (__force const __le64)*p;
60109 }
60110 static inline __u64 __le64_to_cpup(const __le64 *p)
60111 {
60112 - return (__force __u64)*p;
60113 + return (__force const __u64)*p;
60114 }
60115 static inline __le32 __cpu_to_le32p(const __u32 *p)
60116 {
60117 - return (__force __le32)*p;
60118 + return (__force const __le32)*p;
60119 }
60120 static inline __u32 __le32_to_cpup(const __le32 *p)
60121 {
60122 - return (__force __u32)*p;
60123 + return (__force const __u32)*p;
60124 }
60125 static inline __le16 __cpu_to_le16p(const __u16 *p)
60126 {
60127 - return (__force __le16)*p;
60128 + return (__force const __le16)*p;
60129 }
60130 static inline __u16 __le16_to_cpup(const __le16 *p)
60131 {
60132 - return (__force __u16)*p;
60133 + return (__force const __u16)*p;
60134 }
60135 static inline __be64 __cpu_to_be64p(const __u64 *p)
60136 {
60137 - return (__force __be64)__swab64p(p);
60138 + return (__force const __be64)__swab64p(p);
60139 }
60140 static inline __u64 __be64_to_cpup(const __be64 *p)
60141 {
60142 - return __swab64p((__u64 *)p);
60143 + return __swab64p((const __u64 *)p);
60144 }
60145 static inline __be32 __cpu_to_be32p(const __u32 *p)
60146 {
60147 - return (__force __be32)__swab32p(p);
60148 + return (__force const __be32)__swab32p(p);
60149 }
60150 static inline __u32 __be32_to_cpup(const __be32 *p)
60151 {
60152 - return __swab32p((__u32 *)p);
60153 + return __swab32p((const __u32 *)p);
60154 }
60155 static inline __be16 __cpu_to_be16p(const __u16 *p)
60156 {
60157 - return (__force __be16)__swab16p(p);
60158 + return (__force const __be16)__swab16p(p);
60159 }
60160 static inline __u16 __be16_to_cpup(const __be16 *p)
60161 {
60162 - return __swab16p((__u16 *)p);
60163 + return __swab16p((const __u16 *)p);
60164 }
60165 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60166 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60167 diff --git a/include/linux/cache.h b/include/linux/cache.h
60168 index 4c57065..4307975 100644
60169 --- a/include/linux/cache.h
60170 +++ b/include/linux/cache.h
60171 @@ -16,6 +16,10 @@
60172 #define __read_mostly
60173 #endif
60174
60175 +#ifndef __read_only
60176 +#define __read_only __read_mostly
60177 +#endif
60178 +
60179 #ifndef ____cacheline_aligned
60180 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60181 #endif
60182 diff --git a/include/linux/capability.h b/include/linux/capability.h
60183 index 12d52de..b5f7fa7 100644
60184 --- a/include/linux/capability.h
60185 +++ b/include/linux/capability.h
60186 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
60187 extern bool capable(int cap);
60188 extern bool ns_capable(struct user_namespace *ns, int cap);
60189 extern bool nsown_capable(int cap);
60190 +extern bool capable_nolog(int cap);
60191 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60192
60193 /* audit system wants to get cap info from files as well */
60194 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60195 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60196 index 42e55de..1cd0e66 100644
60197 --- a/include/linux/cleancache.h
60198 +++ b/include/linux/cleancache.h
60199 @@ -31,7 +31,7 @@ struct cleancache_ops {
60200 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
60201 void (*invalidate_inode)(int, struct cleancache_filekey);
60202 void (*invalidate_fs)(int);
60203 -};
60204 +} __no_const;
60205
60206 extern struct cleancache_ops
60207 cleancache_register_ops(struct cleancache_ops *ops);
60208 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60209 index 2f40791..9c9e13c 100644
60210 --- a/include/linux/compiler-gcc4.h
60211 +++ b/include/linux/compiler-gcc4.h
60212 @@ -32,6 +32,20 @@
60213 #define __linktime_error(message) __attribute__((__error__(message)))
60214
60215 #if __GNUC_MINOR__ >= 5
60216 +
60217 +#ifdef CONSTIFY_PLUGIN
60218 +#define __no_const __attribute__((no_const))
60219 +#define __do_const __attribute__((do_const))
60220 +#endif
60221 +
60222 +#ifdef SIZE_OVERFLOW_PLUGIN
60223 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
60224 +#endif
60225 +
60226 +#ifdef LATENT_ENTROPY_PLUGIN
60227 +#define __latent_entropy __attribute__((latent_entropy))
60228 +#endif
60229 +
60230 /*
60231 * Mark a position in code as unreachable. This can be used to
60232 * suppress control flow warnings after asm blocks that transfer
60233 @@ -47,6 +61,11 @@
60234 #define __noclone __attribute__((__noclone__))
60235
60236 #endif
60237 +
60238 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60239 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60240 +#define __bos0(ptr) __bos((ptr), 0)
60241 +#define __bos1(ptr) __bos((ptr), 1)
60242 #endif
60243
60244 #if __GNUC_MINOR__ > 0
60245 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60246 index 923d093..1fef491 100644
60247 --- a/include/linux/compiler.h
60248 +++ b/include/linux/compiler.h
60249 @@ -5,31 +5,62 @@
60250
60251 #ifdef __CHECKER__
60252 # define __user __attribute__((noderef, address_space(1)))
60253 +# define __force_user __force __user
60254 # define __kernel __attribute__((address_space(0)))
60255 +# define __force_kernel __force __kernel
60256 # define __safe __attribute__((safe))
60257 # define __force __attribute__((force))
60258 # define __nocast __attribute__((nocast))
60259 # define __iomem __attribute__((noderef, address_space(2)))
60260 +# define __force_iomem __force __iomem
60261 # define __acquires(x) __attribute__((context(x,0,1)))
60262 # define __releases(x) __attribute__((context(x,1,0)))
60263 # define __acquire(x) __context__(x,1)
60264 # define __release(x) __context__(x,-1)
60265 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60266 # define __percpu __attribute__((noderef, address_space(3)))
60267 +# define __force_percpu __force __percpu
60268 #ifdef CONFIG_SPARSE_RCU_POINTER
60269 # define __rcu __attribute__((noderef, address_space(4)))
60270 +# define __force_rcu __force __rcu
60271 #else
60272 # define __rcu
60273 +# define __force_rcu
60274 #endif
60275 extern void __chk_user_ptr(const volatile void __user *);
60276 extern void __chk_io_ptr(const volatile void __iomem *);
60277 +#elif defined(CHECKER_PLUGIN)
60278 +//# define __user
60279 +//# define __force_user
60280 +//# define __kernel
60281 +//# define __force_kernel
60282 +# define __safe
60283 +# define __force
60284 +# define __nocast
60285 +# define __iomem
60286 +# define __force_iomem
60287 +# define __chk_user_ptr(x) (void)0
60288 +# define __chk_io_ptr(x) (void)0
60289 +# define __builtin_warning(x, y...) (1)
60290 +# define __acquires(x)
60291 +# define __releases(x)
60292 +# define __acquire(x) (void)0
60293 +# define __release(x) (void)0
60294 +# define __cond_lock(x,c) (c)
60295 +# define __percpu
60296 +# define __force_percpu
60297 +# define __rcu
60298 +# define __force_rcu
60299 #else
60300 # define __user
60301 +# define __force_user
60302 # define __kernel
60303 +# define __force_kernel
60304 # define __safe
60305 # define __force
60306 # define __nocast
60307 # define __iomem
60308 +# define __force_iomem
60309 # define __chk_user_ptr(x) (void)0
60310 # define __chk_io_ptr(x) (void)0
60311 # define __builtin_warning(x, y...) (1)
60312 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60313 # define __release(x) (void)0
60314 # define __cond_lock(x,c) (c)
60315 # define __percpu
60316 +# define __force_percpu
60317 # define __rcu
60318 +# define __force_rcu
60319 #endif
60320
60321 #ifdef __KERNEL__
60322 @@ -264,6 +297,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60323 # define __attribute_const__ /* unimplemented */
60324 #endif
60325
60326 +#ifndef __no_const
60327 +# define __no_const
60328 +#endif
60329 +
60330 +#ifndef __do_const
60331 +# define __do_const
60332 +#endif
60333 +
60334 +#ifndef __size_overflow
60335 +# define __size_overflow(...)
60336 +#endif
60337 +
60338 +#ifndef __latent_entropy
60339 +# define __latent_entropy
60340 +#endif
60341 +
60342 /*
60343 * Tell gcc if a function is cold. The compiler will assume any path
60344 * directly leading to the call is unlikely.
60345 @@ -273,6 +322,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60346 #define __cold
60347 #endif
60348
60349 +#ifndef __alloc_size
60350 +#define __alloc_size(...)
60351 +#endif
60352 +
60353 +#ifndef __bos
60354 +#define __bos(ptr, arg)
60355 +#endif
60356 +
60357 +#ifndef __bos0
60358 +#define __bos0(ptr)
60359 +#endif
60360 +
60361 +#ifndef __bos1
60362 +#define __bos1(ptr)
60363 +#endif
60364 +
60365 /* Simple shorthand for a section definition */
60366 #ifndef __section
60367 # define __section(S) __attribute__ ((__section__(#S)))
60368 @@ -308,6 +373,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60369 * use is to mediate communication between process-level code and irq/NMI
60370 * handlers, all running on the same CPU.
60371 */
60372 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60373 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60374 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60375
60376 #endif /* __LINUX_COMPILER_H */
60377 diff --git a/include/linux/cred.h b/include/linux/cred.h
60378 index adadf71..6af5560 100644
60379 --- a/include/linux/cred.h
60380 +++ b/include/linux/cred.h
60381 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60382 static inline void validate_process_creds(void)
60383 {
60384 }
60385 +static inline void validate_task_creds(struct task_struct *task)
60386 +{
60387 +}
60388 #endif
60389
60390 /**
60391 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60392 index b92eadf..b4ecdc1 100644
60393 --- a/include/linux/crypto.h
60394 +++ b/include/linux/crypto.h
60395 @@ -373,7 +373,7 @@ struct cipher_tfm {
60396 const u8 *key, unsigned int keylen);
60397 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60398 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60399 -};
60400 +} __no_const;
60401
60402 struct hash_tfm {
60403 int (*init)(struct hash_desc *desc);
60404 @@ -394,13 +394,13 @@ struct compress_tfm {
60405 int (*cot_decompress)(struct crypto_tfm *tfm,
60406 const u8 *src, unsigned int slen,
60407 u8 *dst, unsigned int *dlen);
60408 -};
60409 +} __no_const;
60410
60411 struct rng_tfm {
60412 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60413 unsigned int dlen);
60414 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60415 -};
60416 +} __no_const;
60417
60418 #define crt_ablkcipher crt_u.ablkcipher
60419 #define crt_aead crt_u.aead
60420 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60421 index 7925bf0..d5143d2 100644
60422 --- a/include/linux/decompress/mm.h
60423 +++ b/include/linux/decompress/mm.h
60424 @@ -77,7 +77,7 @@ static void free(void *where)
60425 * warnings when not needed (indeed large_malloc / large_free are not
60426 * needed by inflate */
60427
60428 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60429 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60430 #define free(a) kfree(a)
60431
60432 #define large_malloc(a) vmalloc(a)
60433 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60434 index dfc099e..e583e66 100644
60435 --- a/include/linux/dma-mapping.h
60436 +++ b/include/linux/dma-mapping.h
60437 @@ -51,7 +51,7 @@ struct dma_map_ops {
60438 u64 (*get_required_mask)(struct device *dev);
60439 #endif
60440 int is_phys;
60441 -};
60442 +} __do_const;
60443
60444 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60445
60446 diff --git a/include/linux/efi.h b/include/linux/efi.h
60447 index ec45ccd..9923c32 100644
60448 --- a/include/linux/efi.h
60449 +++ b/include/linux/efi.h
60450 @@ -635,7 +635,7 @@ struct efivar_operations {
60451 efi_get_variable_t *get_variable;
60452 efi_get_next_variable_t *get_next_variable;
60453 efi_set_variable_t *set_variable;
60454 -};
60455 +} __no_const;
60456
60457 struct efivars {
60458 /*
60459 diff --git a/include/linux/elf.h b/include/linux/elf.h
60460 index 999b4f5..57753b4 100644
60461 --- a/include/linux/elf.h
60462 +++ b/include/linux/elf.h
60463 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
60464 #define PT_GNU_EH_FRAME 0x6474e550
60465
60466 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60467 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60468 +
60469 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60470 +
60471 +/* Constants for the e_flags field */
60472 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60473 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60474 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60475 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60476 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60477 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60478
60479 /*
60480 * Extended Numbering
60481 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
60482 #define DT_DEBUG 21
60483 #define DT_TEXTREL 22
60484 #define DT_JMPREL 23
60485 +#define DT_FLAGS 30
60486 + #define DF_TEXTREL 0x00000004
60487 #define DT_ENCODING 32
60488 #define OLD_DT_LOOS 0x60000000
60489 #define DT_LOOS 0x6000000d
60490 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
60491 #define PF_W 0x2
60492 #define PF_X 0x1
60493
60494 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60495 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60496 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60497 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60498 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60499 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60500 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60501 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60502 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60503 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60504 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60505 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60506 +
60507 typedef struct elf32_phdr{
60508 Elf32_Word p_type;
60509 Elf32_Off p_offset;
60510 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
60511 #define EI_OSABI 7
60512 #define EI_PAD 8
60513
60514 +#define EI_PAX 14
60515 +
60516 #define ELFMAG0 0x7f /* EI_MAG */
60517 #define ELFMAG1 'E'
60518 #define ELFMAG2 'L'
60519 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
60520 #define elf_note elf32_note
60521 #define elf_addr_t Elf32_Off
60522 #define Elf_Half Elf32_Half
60523 +#define elf_dyn Elf32_Dyn
60524
60525 #else
60526
60527 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
60528 #define elf_note elf64_note
60529 #define elf_addr_t Elf64_Off
60530 #define Elf_Half Elf64_Half
60531 +#define elf_dyn Elf64_Dyn
60532
60533 #endif
60534
60535 diff --git a/include/linux/filter.h b/include/linux/filter.h
60536 index 8eeb205..d59bfa2 100644
60537 --- a/include/linux/filter.h
60538 +++ b/include/linux/filter.h
60539 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60540
60541 struct sk_buff;
60542 struct sock;
60543 +struct bpf_jit_work;
60544
60545 struct sk_filter
60546 {
60547 @@ -141,6 +142,9 @@ struct sk_filter
60548 unsigned int len; /* Number of filter blocks */
60549 unsigned int (*bpf_func)(const struct sk_buff *skb,
60550 const struct sock_filter *filter);
60551 +#ifdef CONFIG_BPF_JIT
60552 + struct bpf_jit_work *work;
60553 +#endif
60554 struct rcu_head rcu;
60555 struct sock_filter insns[0];
60556 };
60557 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60558 index cdc9b71..ce69fb5 100644
60559 --- a/include/linux/firewire.h
60560 +++ b/include/linux/firewire.h
60561 @@ -413,7 +413,7 @@ struct fw_iso_context {
60562 union {
60563 fw_iso_callback_t sc;
60564 fw_iso_mc_callback_t mc;
60565 - } callback;
60566 + } __no_const callback;
60567 void *callback_data;
60568 };
60569
60570 diff --git a/include/linux/fs.h b/include/linux/fs.h
60571 index 25c40b9..1bfd4f4 100644
60572 --- a/include/linux/fs.h
60573 +++ b/include/linux/fs.h
60574 @@ -1634,7 +1634,8 @@ struct file_operations {
60575 int (*setlease)(struct file *, long, struct file_lock **);
60576 long (*fallocate)(struct file *file, int mode, loff_t offset,
60577 loff_t len);
60578 -};
60579 +} __do_const;
60580 +typedef struct file_operations __no_const file_operations_no_const;
60581
60582 struct inode_operations {
60583 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60584 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60585 index 003dc0f..3c4ea97 100644
60586 --- a/include/linux/fs_struct.h
60587 +++ b/include/linux/fs_struct.h
60588 @@ -6,7 +6,7 @@
60589 #include <linux/seqlock.h>
60590
60591 struct fs_struct {
60592 - int users;
60593 + atomic_t users;
60594 spinlock_t lock;
60595 seqcount_t seq;
60596 int umask;
60597 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60598 index ce31408..b1ad003 100644
60599 --- a/include/linux/fscache-cache.h
60600 +++ b/include/linux/fscache-cache.h
60601 @@ -102,7 +102,7 @@ struct fscache_operation {
60602 fscache_operation_release_t release;
60603 };
60604
60605 -extern atomic_t fscache_op_debug_id;
60606 +extern atomic_unchecked_t fscache_op_debug_id;
60607 extern void fscache_op_work_func(struct work_struct *work);
60608
60609 extern void fscache_enqueue_operation(struct fscache_operation *);
60610 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60611 {
60612 INIT_WORK(&op->work, fscache_op_work_func);
60613 atomic_set(&op->usage, 1);
60614 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60615 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60616 op->processor = processor;
60617 op->release = release;
60618 INIT_LIST_HEAD(&op->pend_link);
60619 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60620 index a6dfe69..569586df 100644
60621 --- a/include/linux/fsnotify.h
60622 +++ b/include/linux/fsnotify.h
60623 @@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60624 */
60625 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60626 {
60627 - return kstrdup(name, GFP_KERNEL);
60628 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60629 }
60630
60631 /*
60632 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60633 index 91d0e0a3..035666b 100644
60634 --- a/include/linux/fsnotify_backend.h
60635 +++ b/include/linux/fsnotify_backend.h
60636 @@ -105,6 +105,7 @@ struct fsnotify_ops {
60637 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60638 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60639 };
60640 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60641
60642 /*
60643 * A group is a "thing" that wants to receive notification about filesystem
60644 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60645 index 176a939..1462211 100644
60646 --- a/include/linux/ftrace_event.h
60647 +++ b/include/linux/ftrace_event.h
60648 @@ -97,7 +97,7 @@ struct trace_event_functions {
60649 trace_print_func raw;
60650 trace_print_func hex;
60651 trace_print_func binary;
60652 -};
60653 +} __no_const;
60654
60655 struct trace_event {
60656 struct hlist_node node;
60657 @@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60658 extern int trace_add_event_call(struct ftrace_event_call *call);
60659 extern void trace_remove_event_call(struct ftrace_event_call *call);
60660
60661 -#define is_signed_type(type) (((type)(-1)) < 0)
60662 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60663
60664 int trace_set_clr_event(const char *system, const char *event, int set);
60665
60666 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60667 index 017a7fb..33a8507 100644
60668 --- a/include/linux/genhd.h
60669 +++ b/include/linux/genhd.h
60670 @@ -185,7 +185,7 @@ struct gendisk {
60671 struct kobject *slave_dir;
60672
60673 struct timer_rand_state *random;
60674 - atomic_t sync_io; /* RAID */
60675 + atomic_unchecked_t sync_io; /* RAID */
60676 struct disk_events *ev;
60677 #ifdef CONFIG_BLK_DEV_INTEGRITY
60678 struct blk_integrity *integrity;
60679 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
60680 index 581e74b..8c34a24 100644
60681 --- a/include/linux/gfp.h
60682 +++ b/include/linux/gfp.h
60683 @@ -38,6 +38,12 @@ struct vm_area_struct;
60684 #define ___GFP_OTHER_NODE 0x800000u
60685 #define ___GFP_WRITE 0x1000000u
60686
60687 +#ifdef CONFIG_PAX_USERCOPY_SLABS
60688 +#define ___GFP_USERCOPY 0x2000000u
60689 +#else
60690 +#define ___GFP_USERCOPY 0
60691 +#endif
60692 +
60693 /*
60694 * GFP bitmasks..
60695 *
60696 @@ -87,6 +93,7 @@ struct vm_area_struct;
60697 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
60698 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
60699 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
60700 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
60701
60702 /*
60703 * This may seem redundant, but it's a way of annotating false positives vs.
60704 @@ -94,7 +101,7 @@ struct vm_area_struct;
60705 */
60706 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
60707
60708 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
60709 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
60710 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
60711
60712 /* This equals 0, but use constants in case they ever change */
60713 @@ -148,6 +155,8 @@ struct vm_area_struct;
60714 /* 4GB DMA on some platforms */
60715 #define GFP_DMA32 __GFP_DMA32
60716
60717 +#define GFP_USERCOPY __GFP_USERCOPY
60718 +
60719 /* Convert GFP flags to their corresponding migrate type */
60720 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
60721 {
60722 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60723 new file mode 100644
60724 index 0000000..c938b1f
60725 --- /dev/null
60726 +++ b/include/linux/gracl.h
60727 @@ -0,0 +1,319 @@
60728 +#ifndef GR_ACL_H
60729 +#define GR_ACL_H
60730 +
60731 +#include <linux/grdefs.h>
60732 +#include <linux/resource.h>
60733 +#include <linux/capability.h>
60734 +#include <linux/dcache.h>
60735 +#include <asm/resource.h>
60736 +
60737 +/* Major status information */
60738 +
60739 +#define GR_VERSION "grsecurity 2.9.1"
60740 +#define GRSECURITY_VERSION 0x2901
60741 +
60742 +enum {
60743 + GR_SHUTDOWN = 0,
60744 + GR_ENABLE = 1,
60745 + GR_SPROLE = 2,
60746 + GR_RELOAD = 3,
60747 + GR_SEGVMOD = 4,
60748 + GR_STATUS = 5,
60749 + GR_UNSPROLE = 6,
60750 + GR_PASSSET = 7,
60751 + GR_SPROLEPAM = 8,
60752 +};
60753 +
60754 +/* Password setup definitions
60755 + * kernel/grhash.c */
60756 +enum {
60757 + GR_PW_LEN = 128,
60758 + GR_SALT_LEN = 16,
60759 + GR_SHA_LEN = 32,
60760 +};
60761 +
60762 +enum {
60763 + GR_SPROLE_LEN = 64,
60764 +};
60765 +
60766 +enum {
60767 + GR_NO_GLOB = 0,
60768 + GR_REG_GLOB,
60769 + GR_CREATE_GLOB
60770 +};
60771 +
60772 +#define GR_NLIMITS 32
60773 +
60774 +/* Begin Data Structures */
60775 +
60776 +struct sprole_pw {
60777 + unsigned char *rolename;
60778 + unsigned char salt[GR_SALT_LEN];
60779 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60780 +};
60781 +
60782 +struct name_entry {
60783 + __u32 key;
60784 + ino_t inode;
60785 + dev_t device;
60786 + char *name;
60787 + __u16 len;
60788 + __u8 deleted;
60789 + struct name_entry *prev;
60790 + struct name_entry *next;
60791 +};
60792 +
60793 +struct inodev_entry {
60794 + struct name_entry *nentry;
60795 + struct inodev_entry *prev;
60796 + struct inodev_entry *next;
60797 +};
60798 +
60799 +struct acl_role_db {
60800 + struct acl_role_label **r_hash;
60801 + __u32 r_size;
60802 +};
60803 +
60804 +struct inodev_db {
60805 + struct inodev_entry **i_hash;
60806 + __u32 i_size;
60807 +};
60808 +
60809 +struct name_db {
60810 + struct name_entry **n_hash;
60811 + __u32 n_size;
60812 +};
60813 +
60814 +struct crash_uid {
60815 + uid_t uid;
60816 + unsigned long expires;
60817 +};
60818 +
60819 +struct gr_hash_struct {
60820 + void **table;
60821 + void **nametable;
60822 + void *first;
60823 + __u32 table_size;
60824 + __u32 used_size;
60825 + int type;
60826 +};
60827 +
60828 +/* Userspace Grsecurity ACL data structures */
60829 +
60830 +struct acl_subject_label {
60831 + char *filename;
60832 + ino_t inode;
60833 + dev_t device;
60834 + __u32 mode;
60835 + kernel_cap_t cap_mask;
60836 + kernel_cap_t cap_lower;
60837 + kernel_cap_t cap_invert_audit;
60838 +
60839 + struct rlimit res[GR_NLIMITS];
60840 + __u32 resmask;
60841 +
60842 + __u8 user_trans_type;
60843 + __u8 group_trans_type;
60844 + uid_t *user_transitions;
60845 + gid_t *group_transitions;
60846 + __u16 user_trans_num;
60847 + __u16 group_trans_num;
60848 +
60849 + __u32 sock_families[2];
60850 + __u32 ip_proto[8];
60851 + __u32 ip_type;
60852 + struct acl_ip_label **ips;
60853 + __u32 ip_num;
60854 + __u32 inaddr_any_override;
60855 +
60856 + __u32 crashes;
60857 + unsigned long expires;
60858 +
60859 + struct acl_subject_label *parent_subject;
60860 + struct gr_hash_struct *hash;
60861 + struct acl_subject_label *prev;
60862 + struct acl_subject_label *next;
60863 +
60864 + struct acl_object_label **obj_hash;
60865 + __u32 obj_hash_size;
60866 + __u16 pax_flags;
60867 +};
60868 +
60869 +struct role_allowed_ip {
60870 + __u32 addr;
60871 + __u32 netmask;
60872 +
60873 + struct role_allowed_ip *prev;
60874 + struct role_allowed_ip *next;
60875 +};
60876 +
60877 +struct role_transition {
60878 + char *rolename;
60879 +
60880 + struct role_transition *prev;
60881 + struct role_transition *next;
60882 +};
60883 +
60884 +struct acl_role_label {
60885 + char *rolename;
60886 + uid_t uidgid;
60887 + __u16 roletype;
60888 +
60889 + __u16 auth_attempts;
60890 + unsigned long expires;
60891 +
60892 + struct acl_subject_label *root_label;
60893 + struct gr_hash_struct *hash;
60894 +
60895 + struct acl_role_label *prev;
60896 + struct acl_role_label *next;
60897 +
60898 + struct role_transition *transitions;
60899 + struct role_allowed_ip *allowed_ips;
60900 + uid_t *domain_children;
60901 + __u16 domain_child_num;
60902 +
60903 + umode_t umask;
60904 +
60905 + struct acl_subject_label **subj_hash;
60906 + __u32 subj_hash_size;
60907 +};
60908 +
60909 +struct user_acl_role_db {
60910 + struct acl_role_label **r_table;
60911 + __u32 num_pointers; /* Number of allocations to track */
60912 + __u32 num_roles; /* Number of roles */
60913 + __u32 num_domain_children; /* Number of domain children */
60914 + __u32 num_subjects; /* Number of subjects */
60915 + __u32 num_objects; /* Number of objects */
60916 +};
60917 +
60918 +struct acl_object_label {
60919 + char *filename;
60920 + ino_t inode;
60921 + dev_t device;
60922 + __u32 mode;
60923 +
60924 + struct acl_subject_label *nested;
60925 + struct acl_object_label *globbed;
60926 +
60927 + /* next two structures not used */
60928 +
60929 + struct acl_object_label *prev;
60930 + struct acl_object_label *next;
60931 +};
60932 +
60933 +struct acl_ip_label {
60934 + char *iface;
60935 + __u32 addr;
60936 + __u32 netmask;
60937 + __u16 low, high;
60938 + __u8 mode;
60939 + __u32 type;
60940 + __u32 proto[8];
60941 +
60942 + /* next two structures not used */
60943 +
60944 + struct acl_ip_label *prev;
60945 + struct acl_ip_label *next;
60946 +};
60947 +
60948 +struct gr_arg {
60949 + struct user_acl_role_db role_db;
60950 + unsigned char pw[GR_PW_LEN];
60951 + unsigned char salt[GR_SALT_LEN];
60952 + unsigned char sum[GR_SHA_LEN];
60953 + unsigned char sp_role[GR_SPROLE_LEN];
60954 + struct sprole_pw *sprole_pws;
60955 + dev_t segv_device;
60956 + ino_t segv_inode;
60957 + uid_t segv_uid;
60958 + __u16 num_sprole_pws;
60959 + __u16 mode;
60960 +};
60961 +
60962 +struct gr_arg_wrapper {
60963 + struct gr_arg *arg;
60964 + __u32 version;
60965 + __u32 size;
60966 +};
60967 +
60968 +struct subject_map {
60969 + struct acl_subject_label *user;
60970 + struct acl_subject_label *kernel;
60971 + struct subject_map *prev;
60972 + struct subject_map *next;
60973 +};
60974 +
60975 +struct acl_subj_map_db {
60976 + struct subject_map **s_hash;
60977 + __u32 s_size;
60978 +};
60979 +
60980 +/* End Data Structures Section */
60981 +
60982 +/* Hash functions generated by empirical testing by Brad Spengler
60983 + Makes good use of the low bits of the inode. Generally 0-1 times
60984 + in loop for successful match. 0-3 for unsuccessful match.
60985 + Shift/add algorithm with modulus of table size and an XOR*/
60986 +
60987 +static __inline__ unsigned int
60988 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60989 +{
60990 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60991 +}
60992 +
60993 + static __inline__ unsigned int
60994 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60995 +{
60996 + return ((const unsigned long)userp % sz);
60997 +}
60998 +
60999 +static __inline__ unsigned int
61000 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61001 +{
61002 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61003 +}
61004 +
61005 +static __inline__ unsigned int
61006 +nhash(const char *name, const __u16 len, const unsigned int sz)
61007 +{
61008 + return full_name_hash((const unsigned char *)name, len) % sz;
61009 +}
61010 +
61011 +#define FOR_EACH_ROLE_START(role) \
61012 + role = role_list; \
61013 + while (role) {
61014 +
61015 +#define FOR_EACH_ROLE_END(role) \
61016 + role = role->prev; \
61017 + }
61018 +
61019 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61020 + subj = NULL; \
61021 + iter = 0; \
61022 + while (iter < role->subj_hash_size) { \
61023 + if (subj == NULL) \
61024 + subj = role->subj_hash[iter]; \
61025 + if (subj == NULL) { \
61026 + iter++; \
61027 + continue; \
61028 + }
61029 +
61030 +#define FOR_EACH_SUBJECT_END(subj,iter) \
61031 + subj = subj->next; \
61032 + if (subj == NULL) \
61033 + iter++; \
61034 + }
61035 +
61036 +
61037 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61038 + subj = role->hash->first; \
61039 + while (subj != NULL) {
61040 +
61041 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61042 + subj = subj->next; \
61043 + }
61044 +
61045 +#endif
61046 +
61047 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61048 new file mode 100644
61049 index 0000000..323ecf2
61050 --- /dev/null
61051 +++ b/include/linux/gralloc.h
61052 @@ -0,0 +1,9 @@
61053 +#ifndef __GRALLOC_H
61054 +#define __GRALLOC_H
61055 +
61056 +void acl_free_all(void);
61057 +int acl_alloc_stack_init(unsigned long size);
61058 +void *acl_alloc(unsigned long len);
61059 +void *acl_alloc_num(unsigned long num, unsigned long len);
61060 +
61061 +#endif
61062 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61063 new file mode 100644
61064 index 0000000..b30e9bc
61065 --- /dev/null
61066 +++ b/include/linux/grdefs.h
61067 @@ -0,0 +1,140 @@
61068 +#ifndef GRDEFS_H
61069 +#define GRDEFS_H
61070 +
61071 +/* Begin grsecurity status declarations */
61072 +
61073 +enum {
61074 + GR_READY = 0x01,
61075 + GR_STATUS_INIT = 0x00 // disabled state
61076 +};
61077 +
61078 +/* Begin ACL declarations */
61079 +
61080 +/* Role flags */
61081 +
61082 +enum {
61083 + GR_ROLE_USER = 0x0001,
61084 + GR_ROLE_GROUP = 0x0002,
61085 + GR_ROLE_DEFAULT = 0x0004,
61086 + GR_ROLE_SPECIAL = 0x0008,
61087 + GR_ROLE_AUTH = 0x0010,
61088 + GR_ROLE_NOPW = 0x0020,
61089 + GR_ROLE_GOD = 0x0040,
61090 + GR_ROLE_LEARN = 0x0080,
61091 + GR_ROLE_TPE = 0x0100,
61092 + GR_ROLE_DOMAIN = 0x0200,
61093 + GR_ROLE_PAM = 0x0400,
61094 + GR_ROLE_PERSIST = 0x0800
61095 +};
61096 +
61097 +/* ACL Subject and Object mode flags */
61098 +enum {
61099 + GR_DELETED = 0x80000000
61100 +};
61101 +
61102 +/* ACL Object-only mode flags */
61103 +enum {
61104 + GR_READ = 0x00000001,
61105 + GR_APPEND = 0x00000002,
61106 + GR_WRITE = 0x00000004,
61107 + GR_EXEC = 0x00000008,
61108 + GR_FIND = 0x00000010,
61109 + GR_INHERIT = 0x00000020,
61110 + GR_SETID = 0x00000040,
61111 + GR_CREATE = 0x00000080,
61112 + GR_DELETE = 0x00000100,
61113 + GR_LINK = 0x00000200,
61114 + GR_AUDIT_READ = 0x00000400,
61115 + GR_AUDIT_APPEND = 0x00000800,
61116 + GR_AUDIT_WRITE = 0x00001000,
61117 + GR_AUDIT_EXEC = 0x00002000,
61118 + GR_AUDIT_FIND = 0x00004000,
61119 + GR_AUDIT_INHERIT= 0x00008000,
61120 + GR_AUDIT_SETID = 0x00010000,
61121 + GR_AUDIT_CREATE = 0x00020000,
61122 + GR_AUDIT_DELETE = 0x00040000,
61123 + GR_AUDIT_LINK = 0x00080000,
61124 + GR_PTRACERD = 0x00100000,
61125 + GR_NOPTRACE = 0x00200000,
61126 + GR_SUPPRESS = 0x00400000,
61127 + GR_NOLEARN = 0x00800000,
61128 + GR_INIT_TRANSFER= 0x01000000
61129 +};
61130 +
61131 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61132 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61133 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61134 +
61135 +/* ACL subject-only mode flags */
61136 +enum {
61137 + GR_KILL = 0x00000001,
61138 + GR_VIEW = 0x00000002,
61139 + GR_PROTECTED = 0x00000004,
61140 + GR_LEARN = 0x00000008,
61141 + GR_OVERRIDE = 0x00000010,
61142 + /* just a placeholder, this mode is only used in userspace */
61143 + GR_DUMMY = 0x00000020,
61144 + GR_PROTSHM = 0x00000040,
61145 + GR_KILLPROC = 0x00000080,
61146 + GR_KILLIPPROC = 0x00000100,
61147 + /* just a placeholder, this mode is only used in userspace */
61148 + GR_NOTROJAN = 0x00000200,
61149 + GR_PROTPROCFD = 0x00000400,
61150 + GR_PROCACCT = 0x00000800,
61151 + GR_RELAXPTRACE = 0x00001000,
61152 + GR_NESTED = 0x00002000,
61153 + GR_INHERITLEARN = 0x00004000,
61154 + GR_PROCFIND = 0x00008000,
61155 + GR_POVERRIDE = 0x00010000,
61156 + GR_KERNELAUTH = 0x00020000,
61157 + GR_ATSECURE = 0x00040000,
61158 + GR_SHMEXEC = 0x00080000
61159 +};
61160 +
61161 +enum {
61162 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61163 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61164 + GR_PAX_ENABLE_MPROTECT = 0x0004,
61165 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
61166 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61167 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61168 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61169 + GR_PAX_DISABLE_MPROTECT = 0x0400,
61170 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
61171 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61172 +};
61173 +
61174 +enum {
61175 + GR_ID_USER = 0x01,
61176 + GR_ID_GROUP = 0x02,
61177 +};
61178 +
61179 +enum {
61180 + GR_ID_ALLOW = 0x01,
61181 + GR_ID_DENY = 0x02,
61182 +};
61183 +
61184 +#define GR_CRASH_RES 31
61185 +#define GR_UIDTABLE_MAX 500
61186 +
61187 +/* begin resource learning section */
61188 +enum {
61189 + GR_RLIM_CPU_BUMP = 60,
61190 + GR_RLIM_FSIZE_BUMP = 50000,
61191 + GR_RLIM_DATA_BUMP = 10000,
61192 + GR_RLIM_STACK_BUMP = 1000,
61193 + GR_RLIM_CORE_BUMP = 10000,
61194 + GR_RLIM_RSS_BUMP = 500000,
61195 + GR_RLIM_NPROC_BUMP = 1,
61196 + GR_RLIM_NOFILE_BUMP = 5,
61197 + GR_RLIM_MEMLOCK_BUMP = 50000,
61198 + GR_RLIM_AS_BUMP = 500000,
61199 + GR_RLIM_LOCKS_BUMP = 2,
61200 + GR_RLIM_SIGPENDING_BUMP = 5,
61201 + GR_RLIM_MSGQUEUE_BUMP = 10000,
61202 + GR_RLIM_NICE_BUMP = 1,
61203 + GR_RLIM_RTPRIO_BUMP = 1,
61204 + GR_RLIM_RTTIME_BUMP = 1000000
61205 +};
61206 +
61207 +#endif
61208 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61209 new file mode 100644
61210 index 0000000..c9292f7
61211 --- /dev/null
61212 +++ b/include/linux/grinternal.h
61213 @@ -0,0 +1,223 @@
61214 +#ifndef __GRINTERNAL_H
61215 +#define __GRINTERNAL_H
61216 +
61217 +#ifdef CONFIG_GRKERNSEC
61218 +
61219 +#include <linux/fs.h>
61220 +#include <linux/mnt_namespace.h>
61221 +#include <linux/nsproxy.h>
61222 +#include <linux/gracl.h>
61223 +#include <linux/grdefs.h>
61224 +#include <linux/grmsg.h>
61225 +
61226 +void gr_add_learn_entry(const char *fmt, ...)
61227 + __attribute__ ((format (printf, 1, 2)));
61228 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61229 + const struct vfsmount *mnt);
61230 +__u32 gr_check_create(const struct dentry *new_dentry,
61231 + const struct dentry *parent,
61232 + const struct vfsmount *mnt, const __u32 mode);
61233 +int gr_check_protected_task(const struct task_struct *task);
61234 +__u32 to_gr_audit(const __u32 reqmode);
61235 +int gr_set_acls(const int type);
61236 +int gr_apply_subject_to_task(struct task_struct *task);
61237 +int gr_acl_is_enabled(void);
61238 +char gr_roletype_to_char(void);
61239 +
61240 +void gr_handle_alertkill(struct task_struct *task);
61241 +char *gr_to_filename(const struct dentry *dentry,
61242 + const struct vfsmount *mnt);
61243 +char *gr_to_filename1(const struct dentry *dentry,
61244 + const struct vfsmount *mnt);
61245 +char *gr_to_filename2(const struct dentry *dentry,
61246 + const struct vfsmount *mnt);
61247 +char *gr_to_filename3(const struct dentry *dentry,
61248 + const struct vfsmount *mnt);
61249 +
61250 +extern int grsec_enable_ptrace_readexec;
61251 +extern int grsec_enable_harden_ptrace;
61252 +extern int grsec_enable_link;
61253 +extern int grsec_enable_fifo;
61254 +extern int grsec_enable_execve;
61255 +extern int grsec_enable_shm;
61256 +extern int grsec_enable_execlog;
61257 +extern int grsec_enable_signal;
61258 +extern int grsec_enable_audit_ptrace;
61259 +extern int grsec_enable_forkfail;
61260 +extern int grsec_enable_time;
61261 +extern int grsec_enable_rofs;
61262 +extern int grsec_enable_chroot_shmat;
61263 +extern int grsec_enable_chroot_mount;
61264 +extern int grsec_enable_chroot_double;
61265 +extern int grsec_enable_chroot_pivot;
61266 +extern int grsec_enable_chroot_chdir;
61267 +extern int grsec_enable_chroot_chmod;
61268 +extern int grsec_enable_chroot_mknod;
61269 +extern int grsec_enable_chroot_fchdir;
61270 +extern int grsec_enable_chroot_nice;
61271 +extern int grsec_enable_chroot_execlog;
61272 +extern int grsec_enable_chroot_caps;
61273 +extern int grsec_enable_chroot_sysctl;
61274 +extern int grsec_enable_chroot_unix;
61275 +extern int grsec_enable_symlinkown;
61276 +extern int grsec_symlinkown_gid;
61277 +extern int grsec_enable_tpe;
61278 +extern int grsec_tpe_gid;
61279 +extern int grsec_enable_tpe_all;
61280 +extern int grsec_enable_tpe_invert;
61281 +extern int grsec_enable_socket_all;
61282 +extern int grsec_socket_all_gid;
61283 +extern int grsec_enable_socket_client;
61284 +extern int grsec_socket_client_gid;
61285 +extern int grsec_enable_socket_server;
61286 +extern int grsec_socket_server_gid;
61287 +extern int grsec_audit_gid;
61288 +extern int grsec_enable_group;
61289 +extern int grsec_enable_audit_textrel;
61290 +extern int grsec_enable_log_rwxmaps;
61291 +extern int grsec_enable_mount;
61292 +extern int grsec_enable_chdir;
61293 +extern int grsec_resource_logging;
61294 +extern int grsec_enable_blackhole;
61295 +extern int grsec_lastack_retries;
61296 +extern int grsec_enable_brute;
61297 +extern int grsec_lock;
61298 +
61299 +extern spinlock_t grsec_alert_lock;
61300 +extern unsigned long grsec_alert_wtime;
61301 +extern unsigned long grsec_alert_fyet;
61302 +
61303 +extern spinlock_t grsec_audit_lock;
61304 +
61305 +extern rwlock_t grsec_exec_file_lock;
61306 +
61307 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61308 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61309 + (tsk)->exec_file->f_vfsmnt) : "/")
61310 +
61311 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61312 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61313 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61314 +
61315 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61316 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
61317 + (tsk)->exec_file->f_vfsmnt) : "/")
61318 +
61319 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61320 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61321 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61322 +
61323 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61324 +
61325 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61326 +
61327 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61328 + (task)->pid, (cred)->uid, \
61329 + (cred)->euid, (cred)->gid, (cred)->egid, \
61330 + gr_parent_task_fullpath(task), \
61331 + (task)->real_parent->comm, (task)->real_parent->pid, \
61332 + (pcred)->uid, (pcred)->euid, \
61333 + (pcred)->gid, (pcred)->egid
61334 +
61335 +#define GR_CHROOT_CAPS {{ \
61336 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61337 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61338 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61339 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61340 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61341 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61342 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61343 +
61344 +#define security_learn(normal_msg,args...) \
61345 +({ \
61346 + read_lock(&grsec_exec_file_lock); \
61347 + gr_add_learn_entry(normal_msg "\n", ## args); \
61348 + read_unlock(&grsec_exec_file_lock); \
61349 +})
61350 +
61351 +enum {
61352 + GR_DO_AUDIT,
61353 + GR_DONT_AUDIT,
61354 + /* used for non-audit messages that we shouldn't kill the task on */
61355 + GR_DONT_AUDIT_GOOD
61356 +};
61357 +
61358 +enum {
61359 + GR_TTYSNIFF,
61360 + GR_RBAC,
61361 + GR_RBAC_STR,
61362 + GR_STR_RBAC,
61363 + GR_RBAC_MODE2,
61364 + GR_RBAC_MODE3,
61365 + GR_FILENAME,
61366 + GR_SYSCTL_HIDDEN,
61367 + GR_NOARGS,
61368 + GR_ONE_INT,
61369 + GR_ONE_INT_TWO_STR,
61370 + GR_ONE_STR,
61371 + GR_STR_INT,
61372 + GR_TWO_STR_INT,
61373 + GR_TWO_INT,
61374 + GR_TWO_U64,
61375 + GR_THREE_INT,
61376 + GR_FIVE_INT_TWO_STR,
61377 + GR_TWO_STR,
61378 + GR_THREE_STR,
61379 + GR_FOUR_STR,
61380 + GR_STR_FILENAME,
61381 + GR_FILENAME_STR,
61382 + GR_FILENAME_TWO_INT,
61383 + GR_FILENAME_TWO_INT_STR,
61384 + GR_TEXTREL,
61385 + GR_PTRACE,
61386 + GR_RESOURCE,
61387 + GR_CAP,
61388 + GR_SIG,
61389 + GR_SIG2,
61390 + GR_CRASH1,
61391 + GR_CRASH2,
61392 + GR_PSACCT,
61393 + GR_RWXMAP
61394 +};
61395 +
61396 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61397 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61398 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61399 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61400 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61401 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61402 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61403 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61404 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61405 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61406 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61407 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61408 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61409 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61410 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61411 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61412 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61413 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61414 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61415 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61416 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61417 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61418 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61419 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61420 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61421 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61422 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61423 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61424 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61425 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61426 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61427 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61428 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61429 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61430 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61431 +
61432 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61433 +
61434 +#endif
61435 +
61436 +#endif
61437 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61438 new file mode 100644
61439 index 0000000..54f4e85
61440 --- /dev/null
61441 +++ b/include/linux/grmsg.h
61442 @@ -0,0 +1,110 @@
61443 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61444 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61445 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61446 +#define GR_STOPMOD_MSG "denied modification of module state by "
61447 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61448 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61449 +#define GR_IOPERM_MSG "denied use of ioperm() by "
61450 +#define GR_IOPL_MSG "denied use of iopl() by "
61451 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61452 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61453 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61454 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61455 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61456 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61457 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61458 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61459 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61460 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61461 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61462 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61463 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61464 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61465 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61466 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61467 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61468 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61469 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61470 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61471 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61472 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61473 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61474 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61475 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61476 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61477 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
61478 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61479 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61480 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61481 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61482 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61483 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61484 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61485 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61486 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61487 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61488 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61489 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61490 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61491 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61492 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61493 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61494 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
61495 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61496 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61497 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61498 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61499 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61500 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61501 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61502 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61503 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61504 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61505 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61506 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61507 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61508 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61509 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61510 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61511 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61512 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61513 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61514 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
61515 +#define GR_NICE_CHROOT_MSG "denied priority change by "
61516 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61517 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61518 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61519 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61520 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61521 +#define GR_TIME_MSG "time set by "
61522 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61523 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61524 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61525 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61526 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61527 +#define GR_BIND_MSG "denied bind() by "
61528 +#define GR_CONNECT_MSG "denied connect() by "
61529 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61530 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61531 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61532 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61533 +#define GR_CAP_ACL_MSG "use of %s denied for "
61534 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61535 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61536 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61537 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61538 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61539 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61540 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61541 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61542 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61543 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61544 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61545 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61546 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61547 +#define GR_VM86_MSG "denied use of vm86 by "
61548 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61549 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
61550 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61551 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
61552 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
61553 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61554 new file mode 100644
61555 index 0000000..38bfb04
61556 --- /dev/null
61557 +++ b/include/linux/grsecurity.h
61558 @@ -0,0 +1,233 @@
61559 +#ifndef GR_SECURITY_H
61560 +#define GR_SECURITY_H
61561 +#include <linux/fs.h>
61562 +#include <linux/fs_struct.h>
61563 +#include <linux/binfmts.h>
61564 +#include <linux/gracl.h>
61565 +
61566 +/* notify of brain-dead configs */
61567 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61568 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61569 +#endif
61570 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61571 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61572 +#endif
61573 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61574 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61575 +#endif
61576 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61577 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
61578 +#endif
61579 +
61580 +#include <linux/compat.h>
61581 +
61582 +struct user_arg_ptr {
61583 +#ifdef CONFIG_COMPAT
61584 + bool is_compat;
61585 +#endif
61586 + union {
61587 + const char __user *const __user *native;
61588 +#ifdef CONFIG_COMPAT
61589 + compat_uptr_t __user *compat;
61590 +#endif
61591 + } ptr;
61592 +};
61593 +
61594 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61595 +void gr_handle_brute_check(void);
61596 +void gr_handle_kernel_exploit(void);
61597 +int gr_process_user_ban(void);
61598 +
61599 +char gr_roletype_to_char(void);
61600 +
61601 +int gr_acl_enable_at_secure(void);
61602 +
61603 +int gr_check_user_change(int real, int effective, int fs);
61604 +int gr_check_group_change(int real, int effective, int fs);
61605 +
61606 +void gr_del_task_from_ip_table(struct task_struct *p);
61607 +
61608 +int gr_pid_is_chrooted(struct task_struct *p);
61609 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61610 +int gr_handle_chroot_nice(void);
61611 +int gr_handle_chroot_sysctl(const int op);
61612 +int gr_handle_chroot_setpriority(struct task_struct *p,
61613 + const int niceval);
61614 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61615 +int gr_handle_chroot_chroot(const struct dentry *dentry,
61616 + const struct vfsmount *mnt);
61617 +void gr_handle_chroot_chdir(struct path *path);
61618 +int gr_handle_chroot_chmod(const struct dentry *dentry,
61619 + const struct vfsmount *mnt, const int mode);
61620 +int gr_handle_chroot_mknod(const struct dentry *dentry,
61621 + const struct vfsmount *mnt, const int mode);
61622 +int gr_handle_chroot_mount(const struct dentry *dentry,
61623 + const struct vfsmount *mnt,
61624 + const char *dev_name);
61625 +int gr_handle_chroot_pivot(void);
61626 +int gr_handle_chroot_unix(const pid_t pid);
61627 +
61628 +int gr_handle_rawio(const struct inode *inode);
61629 +
61630 +void gr_handle_ioperm(void);
61631 +void gr_handle_iopl(void);
61632 +
61633 +umode_t gr_acl_umask(void);
61634 +
61635 +int gr_tpe_allow(const struct file *file);
61636 +
61637 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61638 +void gr_clear_chroot_entries(struct task_struct *task);
61639 +
61640 +void gr_log_forkfail(const int retval);
61641 +void gr_log_timechange(void);
61642 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61643 +void gr_log_chdir(const struct dentry *dentry,
61644 + const struct vfsmount *mnt);
61645 +void gr_log_chroot_exec(const struct dentry *dentry,
61646 + const struct vfsmount *mnt);
61647 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61648 +void gr_log_remount(const char *devname, const int retval);
61649 +void gr_log_unmount(const char *devname, const int retval);
61650 +void gr_log_mount(const char *from, const char *to, const int retval);
61651 +void gr_log_textrel(struct vm_area_struct *vma);
61652 +void gr_log_rwxmmap(struct file *file);
61653 +void gr_log_rwxmprotect(struct file *file);
61654 +
61655 +int gr_handle_follow_link(const struct inode *parent,
61656 + const struct inode *inode,
61657 + const struct dentry *dentry,
61658 + const struct vfsmount *mnt);
61659 +int gr_handle_fifo(const struct dentry *dentry,
61660 + const struct vfsmount *mnt,
61661 + const struct dentry *dir, const int flag,
61662 + const int acc_mode);
61663 +int gr_handle_hardlink(const struct dentry *dentry,
61664 + const struct vfsmount *mnt,
61665 + struct inode *inode,
61666 + const int mode, const char *to);
61667 +
61668 +int gr_is_capable(const int cap);
61669 +int gr_is_capable_nolog(const int cap);
61670 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61671 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61672 +
61673 +void gr_learn_resource(const struct task_struct *task, const int limit,
61674 + const unsigned long wanted, const int gt);
61675 +void gr_copy_label(struct task_struct *tsk);
61676 +void gr_handle_crash(struct task_struct *task, const int sig);
61677 +int gr_handle_signal(const struct task_struct *p, const int sig);
61678 +int gr_check_crash_uid(const uid_t uid);
61679 +int gr_check_protected_task(const struct task_struct *task);
61680 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61681 +int gr_acl_handle_mmap(const struct file *file,
61682 + const unsigned long prot);
61683 +int gr_acl_handle_mprotect(const struct file *file,
61684 + const unsigned long prot);
61685 +int gr_check_hidden_task(const struct task_struct *tsk);
61686 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61687 + const struct vfsmount *mnt);
61688 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61689 + const struct vfsmount *mnt);
61690 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61691 + const struct vfsmount *mnt, const int fmode);
61692 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61693 + const struct vfsmount *mnt, umode_t *mode);
61694 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61695 + const struct vfsmount *mnt);
61696 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61697 + const struct vfsmount *mnt);
61698 +int gr_handle_ptrace(struct task_struct *task, const long request);
61699 +int gr_handle_proc_ptrace(struct task_struct *task);
61700 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61701 + const struct vfsmount *mnt);
61702 +int gr_check_crash_exec(const struct file *filp);
61703 +int gr_acl_is_enabled(void);
61704 +void gr_set_kernel_label(struct task_struct *task);
61705 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61706 + const gid_t gid);
61707 +int gr_set_proc_label(const struct dentry *dentry,
61708 + const struct vfsmount *mnt,
61709 + const int unsafe_flags);
61710 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61711 + const struct vfsmount *mnt);
61712 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61713 + const struct vfsmount *mnt, int acc_mode);
61714 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61715 + const struct dentry *p_dentry,
61716 + const struct vfsmount *p_mnt,
61717 + int open_flags, int acc_mode, const int imode);
61718 +void gr_handle_create(const struct dentry *dentry,
61719 + const struct vfsmount *mnt);
61720 +void gr_handle_proc_create(const struct dentry *dentry,
61721 + const struct inode *inode);
61722 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61723 + const struct dentry *parent_dentry,
61724 + const struct vfsmount *parent_mnt,
61725 + const int mode);
61726 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61727 + const struct dentry *parent_dentry,
61728 + const struct vfsmount *parent_mnt);
61729 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61730 + const struct vfsmount *mnt);
61731 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61732 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61733 + const struct vfsmount *mnt);
61734 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61735 + const struct dentry *parent_dentry,
61736 + const struct vfsmount *parent_mnt,
61737 + const char *from);
61738 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61739 + const struct dentry *parent_dentry,
61740 + const struct vfsmount *parent_mnt,
61741 + const struct dentry *old_dentry,
61742 + const struct vfsmount *old_mnt, const char *to);
61743 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
61744 +int gr_acl_handle_rename(struct dentry *new_dentry,
61745 + struct dentry *parent_dentry,
61746 + const struct vfsmount *parent_mnt,
61747 + struct dentry *old_dentry,
61748 + struct inode *old_parent_inode,
61749 + struct vfsmount *old_mnt, const char *newname);
61750 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61751 + struct dentry *old_dentry,
61752 + struct dentry *new_dentry,
61753 + struct vfsmount *mnt, const __u8 replace);
61754 +__u32 gr_check_link(const struct dentry *new_dentry,
61755 + const struct dentry *parent_dentry,
61756 + const struct vfsmount *parent_mnt,
61757 + const struct dentry *old_dentry,
61758 + const struct vfsmount *old_mnt);
61759 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61760 + const unsigned int namelen, const ino_t ino);
61761 +
61762 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61763 + const struct vfsmount *mnt);
61764 +void gr_acl_handle_exit(void);
61765 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61766 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61767 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61768 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61769 +void gr_audit_ptrace(struct task_struct *task);
61770 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61771 +
61772 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61773 +
61774 +#ifdef CONFIG_GRKERNSEC
61775 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61776 +void gr_handle_vm86(void);
61777 +void gr_handle_mem_readwrite(u64 from, u64 to);
61778 +
61779 +void gr_log_badprocpid(const char *entry);
61780 +
61781 +extern int grsec_enable_dmesg;
61782 +extern int grsec_disable_privio;
61783 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61784 +extern int grsec_enable_chroot_findtask;
61785 +#endif
61786 +#ifdef CONFIG_GRKERNSEC_SETXID
61787 +extern int grsec_enable_setxid;
61788 +#endif
61789 +#endif
61790 +
61791 +#endif
61792 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61793 new file mode 100644
61794 index 0000000..e7ffaaf
61795 --- /dev/null
61796 +++ b/include/linux/grsock.h
61797 @@ -0,0 +1,19 @@
61798 +#ifndef __GRSOCK_H
61799 +#define __GRSOCK_H
61800 +
61801 +extern void gr_attach_curr_ip(const struct sock *sk);
61802 +extern int gr_handle_sock_all(const int family, const int type,
61803 + const int protocol);
61804 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61805 +extern int gr_handle_sock_server_other(const struct sock *sck);
61806 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61807 +extern int gr_search_connect(struct socket * sock,
61808 + struct sockaddr_in * addr);
61809 +extern int gr_search_bind(struct socket * sock,
61810 + struct sockaddr_in * addr);
61811 +extern int gr_search_listen(struct socket * sock);
61812 +extern int gr_search_accept(struct socket * sock);
61813 +extern int gr_search_socket(const int domain, const int type,
61814 + const int protocol);
61815 +
61816 +#endif
61817 diff --git a/include/linux/hid.h b/include/linux/hid.h
61818 index 3a95da6..51986f1 100644
61819 --- a/include/linux/hid.h
61820 +++ b/include/linux/hid.h
61821 @@ -696,7 +696,7 @@ struct hid_ll_driver {
61822 unsigned int code, int value);
61823
61824 int (*parse)(struct hid_device *hdev);
61825 -};
61826 +} __no_const;
61827
61828 #define PM_HINT_FULLON 1<<5
61829 #define PM_HINT_NORMAL 1<<1
61830 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61831 index d3999b4..1304cb4 100644
61832 --- a/include/linux/highmem.h
61833 +++ b/include/linux/highmem.h
61834 @@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61835 kunmap_atomic(kaddr);
61836 }
61837
61838 +static inline void sanitize_highpage(struct page *page)
61839 +{
61840 + void *kaddr;
61841 + unsigned long flags;
61842 +
61843 + local_irq_save(flags);
61844 + kaddr = kmap_atomic(page);
61845 + clear_page(kaddr);
61846 + kunmap_atomic(kaddr);
61847 + local_irq_restore(flags);
61848 +}
61849 +
61850 static inline void zero_user_segments(struct page *page,
61851 unsigned start1, unsigned end1,
61852 unsigned start2, unsigned end2)
61853 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61854 index 195d8b3..e20cfab 100644
61855 --- a/include/linux/i2c.h
61856 +++ b/include/linux/i2c.h
61857 @@ -365,6 +365,7 @@ struct i2c_algorithm {
61858 /* To determine what the adapter supports */
61859 u32 (*functionality) (struct i2c_adapter *);
61860 };
61861 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61862
61863 /*
61864 * i2c_adapter is the structure used to identify a physical i2c bus along
61865 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61866 index d23c3c2..eb63c81 100644
61867 --- a/include/linux/i2o.h
61868 +++ b/include/linux/i2o.h
61869 @@ -565,7 +565,7 @@ struct i2o_controller {
61870 struct i2o_device *exec; /* Executive */
61871 #if BITS_PER_LONG == 64
61872 spinlock_t context_list_lock; /* lock for context_list */
61873 - atomic_t context_list_counter; /* needed for unique contexts */
61874 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61875 struct list_head context_list; /* list of context id's
61876 and pointers */
61877 #endif
61878 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61879 index 58404b0..439ed95 100644
61880 --- a/include/linux/if_team.h
61881 +++ b/include/linux/if_team.h
61882 @@ -64,6 +64,7 @@ struct team_mode_ops {
61883 void (*port_leave)(struct team *team, struct team_port *port);
61884 void (*port_change_mac)(struct team *team, struct team_port *port);
61885 };
61886 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61887
61888 enum team_option_type {
61889 TEAM_OPTION_TYPE_U32,
61890 @@ -112,7 +113,7 @@ struct team {
61891 struct list_head option_list;
61892
61893 const struct team_mode *mode;
61894 - struct team_mode_ops ops;
61895 + team_mode_ops_no_const ops;
61896 long mode_priv[TEAM_MODE_PRIV_LONGS];
61897 };
61898
61899 diff --git a/include/linux/init.h b/include/linux/init.h
61900 index 6b95109..bcbdd68 100644
61901 --- a/include/linux/init.h
61902 +++ b/include/linux/init.h
61903 @@ -39,9 +39,15 @@
61904 * Also note, that this data cannot be "const".
61905 */
61906
61907 +#ifdef MODULE
61908 +#define add_latent_entropy
61909 +#else
61910 +#define add_latent_entropy __latent_entropy
61911 +#endif
61912 +
61913 /* These are for everybody (although not all archs will actually
61914 discard it in modules) */
61915 -#define __init __section(.init.text) __cold notrace
61916 +#define __init __section(.init.text) __cold notrace add_latent_entropy
61917 #define __initdata __section(.init.data)
61918 #define __initconst __section(.init.rodata)
61919 #define __exitdata __section(.exit.data)
61920 @@ -83,7 +89,7 @@
61921 #define __exit __section(.exit.text) __exitused __cold notrace
61922
61923 /* Used for HOTPLUG */
61924 -#define __devinit __section(.devinit.text) __cold notrace
61925 +#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
61926 #define __devinitdata __section(.devinit.data)
61927 #define __devinitconst __section(.devinit.rodata)
61928 #define __devexit __section(.devexit.text) __exitused __cold notrace
61929 @@ -91,7 +97,7 @@
61930 #define __devexitconst __section(.devexit.rodata)
61931
61932 /* Used for HOTPLUG_CPU */
61933 -#define __cpuinit __section(.cpuinit.text) __cold notrace
61934 +#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
61935 #define __cpuinitdata __section(.cpuinit.data)
61936 #define __cpuinitconst __section(.cpuinit.rodata)
61937 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
61938 @@ -99,7 +105,7 @@
61939 #define __cpuexitconst __section(.cpuexit.rodata)
61940
61941 /* Used for MEMORY_HOTPLUG */
61942 -#define __meminit __section(.meminit.text) __cold notrace
61943 +#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
61944 #define __meminitdata __section(.meminit.data)
61945 #define __meminitconst __section(.meminit.rodata)
61946 #define __memexit __section(.memexit.text) __exitused __cold notrace
61947 @@ -294,13 +300,13 @@ void __init parse_early_options(char *cmdline);
61948
61949 /* Each module must use one module_init(). */
61950 #define module_init(initfn) \
61951 - static inline initcall_t __inittest(void) \
61952 + static inline __used initcall_t __inittest(void) \
61953 { return initfn; } \
61954 int init_module(void) __attribute__((alias(#initfn)));
61955
61956 /* This is only required if you want to be unloadable. */
61957 #define module_exit(exitfn) \
61958 - static inline exitcall_t __exittest(void) \
61959 + static inline __used exitcall_t __exittest(void) \
61960 { return exitfn; } \
61961 void cleanup_module(void) __attribute__((alias(#exitfn)));
61962
61963 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61964 index e4baff5..83bb175 100644
61965 --- a/include/linux/init_task.h
61966 +++ b/include/linux/init_task.h
61967 @@ -134,6 +134,12 @@ extern struct cred init_cred;
61968
61969 #define INIT_TASK_COMM "swapper"
61970
61971 +#ifdef CONFIG_X86
61972 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61973 +#else
61974 +#define INIT_TASK_THREAD_INFO
61975 +#endif
61976 +
61977 /*
61978 * INIT_TASK is used to set up the first task table, touch at
61979 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61980 @@ -172,6 +178,7 @@ extern struct cred init_cred;
61981 RCU_INIT_POINTER(.cred, &init_cred), \
61982 .comm = INIT_TASK_COMM, \
61983 .thread = INIT_THREAD, \
61984 + INIT_TASK_THREAD_INFO \
61985 .fs = &init_fs, \
61986 .files = &init_files, \
61987 .signal = &init_signals, \
61988 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61989 index e6ca56d..8583707 100644
61990 --- a/include/linux/intel-iommu.h
61991 +++ b/include/linux/intel-iommu.h
61992 @@ -296,7 +296,7 @@ struct iommu_flush {
61993 u8 fm, u64 type);
61994 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61995 unsigned int size_order, u64 type);
61996 -};
61997 +} __no_const;
61998
61999 enum {
62000 SR_DMAR_FECTL_REG,
62001 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
62002 index 2aea5d2..0b82f0c 100644
62003 --- a/include/linux/interrupt.h
62004 +++ b/include/linux/interrupt.h
62005 @@ -439,7 +439,7 @@ enum
62006 /* map softirq index to softirq name. update 'softirq_to_name' in
62007 * kernel/softirq.c when adding a new softirq.
62008 */
62009 -extern char *softirq_to_name[NR_SOFTIRQS];
62010 +extern const char * const softirq_to_name[NR_SOFTIRQS];
62011
62012 /* softirq mask and active fields moved to irq_cpustat_t in
62013 * asm/hardirq.h to get better cache usage. KAO
62014 @@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
62015
62016 struct softirq_action
62017 {
62018 - void (*action)(struct softirq_action *);
62019 + void (*action)(void);
62020 };
62021
62022 asmlinkage void do_softirq(void);
62023 asmlinkage void __do_softirq(void);
62024 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62025 +extern void open_softirq(int nr, void (*action)(void));
62026 extern void softirq_init(void);
62027 extern void __raise_softirq_irqoff(unsigned int nr);
62028
62029 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
62030 index 3875719..4cd454c 100644
62031 --- a/include/linux/kallsyms.h
62032 +++ b/include/linux/kallsyms.h
62033 @@ -15,7 +15,8 @@
62034
62035 struct module;
62036
62037 -#ifdef CONFIG_KALLSYMS
62038 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
62039 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62040 /* Lookup the address for a symbol. Returns 0 if not found. */
62041 unsigned long kallsyms_lookup_name(const char *name);
62042
62043 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
62044 /* Stupid that this does nothing, but I didn't create this mess. */
62045 #define __print_symbol(fmt, addr)
62046 #endif /*CONFIG_KALLSYMS*/
62047 +#else /* when included by kallsyms.c, vsnprintf.c, or
62048 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
62049 +extern void __print_symbol(const char *fmt, unsigned long address);
62050 +extern int sprint_backtrace(char *buffer, unsigned long address);
62051 +extern int sprint_symbol(char *buffer, unsigned long address);
62052 +const char *kallsyms_lookup(unsigned long addr,
62053 + unsigned long *symbolsize,
62054 + unsigned long *offset,
62055 + char **modname, char *namebuf);
62056 +#endif
62057
62058 /* This macro allows us to keep printk typechecking */
62059 static __printf(1, 2)
62060 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62061 index c4d2fc1..5df9c19 100644
62062 --- a/include/linux/kgdb.h
62063 +++ b/include/linux/kgdb.h
62064 @@ -53,7 +53,7 @@ extern int kgdb_connected;
62065 extern int kgdb_io_module_registered;
62066
62067 extern atomic_t kgdb_setting_breakpoint;
62068 -extern atomic_t kgdb_cpu_doing_single_step;
62069 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62070
62071 extern struct task_struct *kgdb_usethread;
62072 extern struct task_struct *kgdb_contthread;
62073 @@ -252,7 +252,7 @@ struct kgdb_arch {
62074 void (*disable_hw_break)(struct pt_regs *regs);
62075 void (*remove_all_hw_break)(void);
62076 void (*correct_hw_break)(void);
62077 -};
62078 +} __do_const;
62079
62080 /**
62081 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62082 @@ -277,7 +277,7 @@ struct kgdb_io {
62083 void (*pre_exception) (void);
62084 void (*post_exception) (void);
62085 int is_console;
62086 -};
62087 +} __do_const;
62088
62089 extern struct kgdb_arch arch_kgdb_ops;
62090
62091 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62092 index dd99c32..da06047 100644
62093 --- a/include/linux/kmod.h
62094 +++ b/include/linux/kmod.h
62095 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62096 * usually useless though. */
62097 extern __printf(2, 3)
62098 int __request_module(bool wait, const char *name, ...);
62099 +extern __printf(3, 4)
62100 +int ___request_module(bool wait, char *param_name, const char *name, ...);
62101 #define request_module(mod...) __request_module(true, mod)
62102 #define request_module_nowait(mod...) __request_module(false, mod)
62103 #define try_then_request_module(x, mod...) \
62104 diff --git a/include/linux/kref.h b/include/linux/kref.h
62105 index 9c07dce..a92fa71 100644
62106 --- a/include/linux/kref.h
62107 +++ b/include/linux/kref.h
62108 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62109 static inline int kref_sub(struct kref *kref, unsigned int count,
62110 void (*release)(struct kref *kref))
62111 {
62112 - WARN_ON(release == NULL);
62113 + BUG_ON(release == NULL);
62114
62115 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62116 release(kref);
62117 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62118 index 72cbf08..dd0201d 100644
62119 --- a/include/linux/kvm_host.h
62120 +++ b/include/linux/kvm_host.h
62121 @@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62122 void vcpu_load(struct kvm_vcpu *vcpu);
62123 void vcpu_put(struct kvm_vcpu *vcpu);
62124
62125 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62126 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62127 struct module *module);
62128 void kvm_exit(void);
62129
62130 @@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62131 struct kvm_guest_debug *dbg);
62132 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62133
62134 -int kvm_arch_init(void *opaque);
62135 +int kvm_arch_init(const void *opaque);
62136 void kvm_arch_exit(void);
62137
62138 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62139 diff --git a/include/linux/libata.h b/include/linux/libata.h
62140 index 6e887c7..4539601 100644
62141 --- a/include/linux/libata.h
62142 +++ b/include/linux/libata.h
62143 @@ -910,7 +910,7 @@ struct ata_port_operations {
62144 * fields must be pointers.
62145 */
62146 const struct ata_port_operations *inherits;
62147 -};
62148 +} __do_const;
62149
62150 struct ata_port_info {
62151 unsigned long flags;
62152 diff --git a/include/linux/mca.h b/include/linux/mca.h
62153 index 3797270..7765ede 100644
62154 --- a/include/linux/mca.h
62155 +++ b/include/linux/mca.h
62156 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62157 int region);
62158 void * (*mca_transform_memory)(struct mca_device *,
62159 void *memory);
62160 -};
62161 +} __no_const;
62162
62163 struct mca_bus {
62164 u64 default_dma_mask;
62165 diff --git a/include/linux/memory.h b/include/linux/memory.h
62166 index 1ac7f6e..a5794d0 100644
62167 --- a/include/linux/memory.h
62168 +++ b/include/linux/memory.h
62169 @@ -143,7 +143,7 @@ struct memory_accessor {
62170 size_t count);
62171 ssize_t (*write)(struct memory_accessor *, const char *buf,
62172 off_t offset, size_t count);
62173 -};
62174 +} __no_const;
62175
62176 /*
62177 * Kernel text modification mutex, used for code patching. Users of this lock
62178 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62179 index ee96cd5..7823c3a 100644
62180 --- a/include/linux/mfd/abx500.h
62181 +++ b/include/linux/mfd/abx500.h
62182 @@ -455,6 +455,7 @@ struct abx500_ops {
62183 int (*event_registers_startup_state_get) (struct device *, u8 *);
62184 int (*startup_irq_enabled) (struct device *, unsigned int);
62185 };
62186 +typedef struct abx500_ops __no_const abx500_ops_no_const;
62187
62188 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62189 void abx500_remove_ops(struct device *dev);
62190 diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
62191 index 9b07725..3d55001 100644
62192 --- a/include/linux/mfd/abx500/ux500_chargalg.h
62193 +++ b/include/linux/mfd/abx500/ux500_chargalg.h
62194 @@ -19,7 +19,7 @@ struct ux500_charger_ops {
62195 int (*enable) (struct ux500_charger *, int, int, int);
62196 int (*kick_wd) (struct ux500_charger *);
62197 int (*update_curr) (struct ux500_charger *, int);
62198 -};
62199 +} __no_const;
62200
62201 /**
62202 * struct ux500_charger - power supply ux500 charger sub class
62203 diff --git a/include/linux/mm.h b/include/linux/mm.h
62204 index 74aa71b..4ae97ba 100644
62205 --- a/include/linux/mm.h
62206 +++ b/include/linux/mm.h
62207 @@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
62208
62209 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62210 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62211 +
62212 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62213 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62214 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62215 +#else
62216 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62217 +#endif
62218 +
62219 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62220 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62221
62222 @@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
62223 int set_page_dirty_lock(struct page *page);
62224 int clear_page_dirty_for_io(struct page *page);
62225
62226 -/* Is the vma a continuation of the stack vma above it? */
62227 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62228 -{
62229 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62230 -}
62231 -
62232 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
62233 - unsigned long addr)
62234 -{
62235 - return (vma->vm_flags & VM_GROWSDOWN) &&
62236 - (vma->vm_start == addr) &&
62237 - !vma_growsdown(vma->vm_prev, addr);
62238 -}
62239 -
62240 -/* Is the vma a continuation of the stack vma below it? */
62241 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62242 -{
62243 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62244 -}
62245 -
62246 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
62247 - unsigned long addr)
62248 -{
62249 - return (vma->vm_flags & VM_GROWSUP) &&
62250 - (vma->vm_end == addr) &&
62251 - !vma_growsup(vma->vm_next, addr);
62252 -}
62253 -
62254 extern pid_t
62255 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
62256
62257 @@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
62258 }
62259 #endif
62260
62261 +#ifdef CONFIG_MMU
62262 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62263 +#else
62264 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62265 +{
62266 + return __pgprot(0);
62267 +}
62268 +#endif
62269 +
62270 int vma_wants_writenotify(struct vm_area_struct *vma);
62271
62272 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62273 @@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
62274 {
62275 return 0;
62276 }
62277 +
62278 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
62279 + unsigned long address)
62280 +{
62281 + return 0;
62282 +}
62283 #else
62284 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62285 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62286 #endif
62287
62288 #ifdef __PAGETABLE_PMD_FOLDED
62289 @@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
62290 {
62291 return 0;
62292 }
62293 +
62294 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
62295 + unsigned long address)
62296 +{
62297 + return 0;
62298 +}
62299 #else
62300 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
62301 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
62302 #endif
62303
62304 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
62305 @@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
62306 NULL: pud_offset(pgd, address);
62307 }
62308
62309 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
62310 +{
62311 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
62312 + NULL: pud_offset(pgd, address);
62313 +}
62314 +
62315 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
62316 {
62317 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
62318 NULL: pmd_offset(pud, address);
62319 }
62320 +
62321 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
62322 +{
62323 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
62324 + NULL: pmd_offset(pud, address);
62325 +}
62326 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
62327
62328 #if USE_SPLIT_PTLOCKS
62329 @@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
62330 unsigned long, unsigned long,
62331 unsigned long, unsigned long);
62332 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62333 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62334
62335 /* These take the mm semaphore themselves */
62336 extern unsigned long vm_brk(unsigned long, unsigned long);
62337 @@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62338 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62339 struct vm_area_struct **pprev);
62340
62341 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62342 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62343 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62344 +
62345 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62346 NULL if none. Assume start_addr < end_addr. */
62347 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62348 @@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
62349 return vma;
62350 }
62351
62352 -#ifdef CONFIG_MMU
62353 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
62354 -#else
62355 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62356 -{
62357 - return __pgprot(0);
62358 -}
62359 -#endif
62360 -
62361 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62362 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62363 unsigned long pfn, unsigned long size, pgprot_t);
62364 @@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
62365 extern int sysctl_memory_failure_early_kill;
62366 extern int sysctl_memory_failure_recovery;
62367 extern void shake_page(struct page *p, int access);
62368 -extern atomic_long_t mce_bad_pages;
62369 +extern atomic_long_unchecked_t mce_bad_pages;
62370 extern int soft_offline_page(struct page *page, int flags);
62371
62372 extern void dump_page(struct page *page);
62373 @@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
62374 static inline bool page_is_guard(struct page *page) { return false; }
62375 #endif /* CONFIG_DEBUG_PAGEALLOC */
62376
62377 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62378 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62379 +#else
62380 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62381 +#endif
62382 +
62383 #endif /* __KERNEL__ */
62384 #endif /* _LINUX_MM_H */
62385 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62386 index b35752f..41075a0 100644
62387 --- a/include/linux/mm_types.h
62388 +++ b/include/linux/mm_types.h
62389 @@ -262,6 +262,8 @@ struct vm_area_struct {
62390 #ifdef CONFIG_NUMA
62391 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62392 #endif
62393 +
62394 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62395 };
62396
62397 struct core_thread {
62398 @@ -336,7 +338,7 @@ struct mm_struct {
62399 unsigned long def_flags;
62400 unsigned long nr_ptes; /* Page table pages */
62401 unsigned long start_code, end_code, start_data, end_data;
62402 - unsigned long start_brk, brk, start_stack;
62403 + unsigned long brk_gap, start_brk, brk, start_stack;
62404 unsigned long arg_start, arg_end, env_start, env_end;
62405
62406 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
62407 @@ -398,6 +400,24 @@ struct mm_struct {
62408 #ifdef CONFIG_CPUMASK_OFFSTACK
62409 struct cpumask cpumask_allocation;
62410 #endif
62411 +
62412 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62413 + unsigned long pax_flags;
62414 +#endif
62415 +
62416 +#ifdef CONFIG_PAX_DLRESOLVE
62417 + unsigned long call_dl_resolve;
62418 +#endif
62419 +
62420 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62421 + unsigned long call_syscall;
62422 +#endif
62423 +
62424 +#ifdef CONFIG_PAX_ASLR
62425 + unsigned long delta_mmap; /* randomized offset */
62426 + unsigned long delta_stack; /* randomized offset */
62427 +#endif
62428 +
62429 };
62430
62431 static inline void mm_init_cpumask(struct mm_struct *mm)
62432 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62433 index 1d1b1e1..2a13c78 100644
62434 --- a/include/linux/mmu_notifier.h
62435 +++ b/include/linux/mmu_notifier.h
62436 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62437 */
62438 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62439 ({ \
62440 - pte_t __pte; \
62441 + pte_t ___pte; \
62442 struct vm_area_struct *___vma = __vma; \
62443 unsigned long ___address = __address; \
62444 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62445 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62446 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62447 - __pte; \
62448 + ___pte; \
62449 })
62450
62451 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62452 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62453 index 5f6806b..49db2b2 100644
62454 --- a/include/linux/mmzone.h
62455 +++ b/include/linux/mmzone.h
62456 @@ -380,7 +380,7 @@ struct zone {
62457 unsigned long flags; /* zone flags, see below */
62458
62459 /* Zone statistics */
62460 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62461 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62462
62463 /*
62464 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62465 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62466 index 501da4c..ba79bb4 100644
62467 --- a/include/linux/mod_devicetable.h
62468 +++ b/include/linux/mod_devicetable.h
62469 @@ -12,7 +12,7 @@
62470 typedef unsigned long kernel_ulong_t;
62471 #endif
62472
62473 -#define PCI_ANY_ID (~0)
62474 +#define PCI_ANY_ID ((__u16)~0)
62475
62476 struct pci_device_id {
62477 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62478 @@ -131,7 +131,7 @@ struct usb_device_id {
62479 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62480 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62481
62482 -#define HID_ANY_ID (~0)
62483 +#define HID_ANY_ID (~0U)
62484
62485 struct hid_device_id {
62486 __u16 bus;
62487 diff --git a/include/linux/module.h b/include/linux/module.h
62488 index fbcafe2..e5d9587 100644
62489 --- a/include/linux/module.h
62490 +++ b/include/linux/module.h
62491 @@ -17,6 +17,7 @@
62492 #include <linux/moduleparam.h>
62493 #include <linux/tracepoint.h>
62494 #include <linux/export.h>
62495 +#include <linux/fs.h>
62496
62497 #include <linux/percpu.h>
62498 #include <asm/module.h>
62499 @@ -273,19 +274,16 @@ struct module
62500 int (*init)(void);
62501
62502 /* If this is non-NULL, vfree after init() returns */
62503 - void *module_init;
62504 + void *module_init_rx, *module_init_rw;
62505
62506 /* Here is the actual code + data, vfree'd on unload. */
62507 - void *module_core;
62508 + void *module_core_rx, *module_core_rw;
62509
62510 /* Here are the sizes of the init and core sections */
62511 - unsigned int init_size, core_size;
62512 + unsigned int init_size_rw, core_size_rw;
62513
62514 /* The size of the executable code in each section. */
62515 - unsigned int init_text_size, core_text_size;
62516 -
62517 - /* Size of RO sections of the module (text+rodata) */
62518 - unsigned int init_ro_size, core_ro_size;
62519 + unsigned int init_size_rx, core_size_rx;
62520
62521 /* Arch-specific module values */
62522 struct mod_arch_specific arch;
62523 @@ -341,6 +339,10 @@ struct module
62524 #ifdef CONFIG_EVENT_TRACING
62525 struct ftrace_event_call **trace_events;
62526 unsigned int num_trace_events;
62527 + struct file_operations trace_id;
62528 + struct file_operations trace_enable;
62529 + struct file_operations trace_format;
62530 + struct file_operations trace_filter;
62531 #endif
62532 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62533 unsigned int num_ftrace_callsites;
62534 @@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
62535 bool is_module_percpu_address(unsigned long addr);
62536 bool is_module_text_address(unsigned long addr);
62537
62538 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62539 +{
62540 +
62541 +#ifdef CONFIG_PAX_KERNEXEC
62542 + if (ktla_ktva(addr) >= (unsigned long)start &&
62543 + ktla_ktva(addr) < (unsigned long)start + size)
62544 + return 1;
62545 +#endif
62546 +
62547 + return ((void *)addr >= start && (void *)addr < start + size);
62548 +}
62549 +
62550 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62551 +{
62552 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62553 +}
62554 +
62555 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62556 +{
62557 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62558 +}
62559 +
62560 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62561 +{
62562 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62563 +}
62564 +
62565 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62566 +{
62567 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62568 +}
62569 +
62570 static inline int within_module_core(unsigned long addr, struct module *mod)
62571 {
62572 - return (unsigned long)mod->module_core <= addr &&
62573 - addr < (unsigned long)mod->module_core + mod->core_size;
62574 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62575 }
62576
62577 static inline int within_module_init(unsigned long addr, struct module *mod)
62578 {
62579 - return (unsigned long)mod->module_init <= addr &&
62580 - addr < (unsigned long)mod->module_init + mod->init_size;
62581 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62582 }
62583
62584 /* Search for module by name: must hold module_mutex. */
62585 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62586 index b2be02e..72d2f78 100644
62587 --- a/include/linux/moduleloader.h
62588 +++ b/include/linux/moduleloader.h
62589 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62590
62591 /* Allocator used for allocating struct module, core sections and init
62592 sections. Returns NULL on failure. */
62593 -void *module_alloc(unsigned long size);
62594 +void *module_alloc(unsigned long size) __size_overflow(1);
62595 +
62596 +#ifdef CONFIG_PAX_KERNEXEC
62597 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
62598 +#else
62599 +#define module_alloc_exec(x) module_alloc(x)
62600 +#endif
62601
62602 /* Free memory returned from module_alloc. */
62603 void module_free(struct module *mod, void *module_region);
62604
62605 +#ifdef CONFIG_PAX_KERNEXEC
62606 +void module_free_exec(struct module *mod, void *module_region);
62607 +#else
62608 +#define module_free_exec(x, y) module_free((x), (y))
62609 +#endif
62610 +
62611 /* Apply the given relocation to the (simplified) ELF. Return -error
62612 or 0. */
62613 int apply_relocate(Elf_Shdr *sechdrs,
62614 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62615 index 944bc18..042d291 100644
62616 --- a/include/linux/moduleparam.h
62617 +++ b/include/linux/moduleparam.h
62618 @@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
62619 * @len is usually just sizeof(string).
62620 */
62621 #define module_param_string(name, string, len, perm) \
62622 - static const struct kparam_string __param_string_##name \
62623 + static const struct kparam_string __param_string_##name __used \
62624 = { len, string }; \
62625 __module_param_call(MODULE_PARAM_PREFIX, name, \
62626 &param_ops_string, \
62627 @@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
62628 */
62629 #define module_param_array_named(name, array, type, nump, perm) \
62630 param_check_##type(name, &(array)[0]); \
62631 - static const struct kparam_array __param_arr_##name \
62632 + static const struct kparam_array __param_arr_##name __used \
62633 = { .max = ARRAY_SIZE(array), .num = nump, \
62634 .ops = &param_ops_##type, \
62635 .elemsize = sizeof(array[0]), .elem = array }; \
62636 diff --git a/include/linux/namei.h b/include/linux/namei.h
62637 index ffc0213..2c1f2cb 100644
62638 --- a/include/linux/namei.h
62639 +++ b/include/linux/namei.h
62640 @@ -24,7 +24,7 @@ struct nameidata {
62641 unsigned seq;
62642 int last_type;
62643 unsigned depth;
62644 - char *saved_names[MAX_NESTED_LINKS + 1];
62645 + const char *saved_names[MAX_NESTED_LINKS + 1];
62646
62647 /* Intent data */
62648 union {
62649 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62650 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62651 extern void unlock_rename(struct dentry *, struct dentry *);
62652
62653 -static inline void nd_set_link(struct nameidata *nd, char *path)
62654 +static inline void nd_set_link(struct nameidata *nd, const char *path)
62655 {
62656 nd->saved_names[nd->depth] = path;
62657 }
62658
62659 -static inline char *nd_get_link(struct nameidata *nd)
62660 +static inline const char *nd_get_link(const struct nameidata *nd)
62661 {
62662 return nd->saved_names[nd->depth];
62663 }
62664 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62665 index 33900a5..2072000 100644
62666 --- a/include/linux/netdevice.h
62667 +++ b/include/linux/netdevice.h
62668 @@ -1003,6 +1003,7 @@ struct net_device_ops {
62669 int (*ndo_neigh_construct)(struct neighbour *n);
62670 void (*ndo_neigh_destroy)(struct neighbour *n);
62671 };
62672 +typedef struct net_device_ops __no_const net_device_ops_no_const;
62673
62674 /*
62675 * The DEVICE structure.
62676 @@ -1064,7 +1065,7 @@ struct net_device {
62677 int iflink;
62678
62679 struct net_device_stats stats;
62680 - atomic_long_t rx_dropped; /* dropped packets by core network
62681 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62682 * Do not use this in drivers.
62683 */
62684
62685 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62686 new file mode 100644
62687 index 0000000..33f4af8
62688 --- /dev/null
62689 +++ b/include/linux/netfilter/xt_gradm.h
62690 @@ -0,0 +1,9 @@
62691 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62692 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62693 +
62694 +struct xt_gradm_mtinfo {
62695 + __u16 flags;
62696 + __u16 invflags;
62697 +};
62698 +
62699 +#endif
62700 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62701 index c65a18a..0c05f3a 100644
62702 --- a/include/linux/of_pdt.h
62703 +++ b/include/linux/of_pdt.h
62704 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62705
62706 /* return 0 on success; fill in 'len' with number of bytes in path */
62707 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62708 -};
62709 +} __no_const;
62710
62711 extern void *prom_early_alloc(unsigned long size);
62712
62713 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62714 index a4c5624..79d6d88 100644
62715 --- a/include/linux/oprofile.h
62716 +++ b/include/linux/oprofile.h
62717 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62718 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62719 char const * name, ulong * val);
62720
62721 -/** Create a file for read-only access to an atomic_t. */
62722 +/** Create a file for read-only access to an atomic_unchecked_t. */
62723 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62724 - char const * name, atomic_t * val);
62725 + char const * name, atomic_unchecked_t * val);
62726
62727 /** create a directory */
62728 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62729 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62730 index ddbb6a9..be1680e 100644
62731 --- a/include/linux/perf_event.h
62732 +++ b/include/linux/perf_event.h
62733 @@ -879,8 +879,8 @@ struct perf_event {
62734
62735 enum perf_event_active_state state;
62736 unsigned int attach_state;
62737 - local64_t count;
62738 - atomic64_t child_count;
62739 + local64_t count; /* PaX: fix it one day */
62740 + atomic64_unchecked_t child_count;
62741
62742 /*
62743 * These are the total time in nanoseconds that the event
62744 @@ -931,8 +931,8 @@ struct perf_event {
62745 * These accumulate total time (in nanoseconds) that children
62746 * events have been enabled and running, respectively.
62747 */
62748 - atomic64_t child_total_time_enabled;
62749 - atomic64_t child_total_time_running;
62750 + atomic64_unchecked_t child_total_time_enabled;
62751 + atomic64_unchecked_t child_total_time_running;
62752
62753 /*
62754 * Protect attach/detach and child_list:
62755 diff --git a/include/linux/personality.h b/include/linux/personality.h
62756 index 8fc7dd1a..c19d89e 100644
62757 --- a/include/linux/personality.h
62758 +++ b/include/linux/personality.h
62759 @@ -44,6 +44,7 @@ enum {
62760 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62761 ADDR_NO_RANDOMIZE | \
62762 ADDR_COMPAT_LAYOUT | \
62763 + ADDR_LIMIT_3GB | \
62764 MMAP_PAGE_ZERO)
62765
62766 /*
62767 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62768 index e1ac1ce..0675fed 100644
62769 --- a/include/linux/pipe_fs_i.h
62770 +++ b/include/linux/pipe_fs_i.h
62771 @@ -45,9 +45,9 @@ struct pipe_buffer {
62772 struct pipe_inode_info {
62773 wait_queue_head_t wait;
62774 unsigned int nrbufs, curbuf, buffers;
62775 - unsigned int readers;
62776 - unsigned int writers;
62777 - unsigned int waiting_writers;
62778 + atomic_t readers;
62779 + atomic_t writers;
62780 + atomic_t waiting_writers;
62781 unsigned int r_counter;
62782 unsigned int w_counter;
62783 struct page *tmp_page;
62784 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62785 index 609daae..5392427 100644
62786 --- a/include/linux/pm_runtime.h
62787 +++ b/include/linux/pm_runtime.h
62788 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62789
62790 static inline void pm_runtime_mark_last_busy(struct device *dev)
62791 {
62792 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62793 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62794 }
62795
62796 #else /* !CONFIG_PM_RUNTIME */
62797 diff --git a/include/linux/poison.h b/include/linux/poison.h
62798 index 2110a81..13a11bb 100644
62799 --- a/include/linux/poison.h
62800 +++ b/include/linux/poison.h
62801 @@ -19,8 +19,8 @@
62802 * under normal circumstances, used to verify that nobody uses
62803 * non-initialized list entries.
62804 */
62805 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62806 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62807 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62808 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62809
62810 /********** include/linux/timer.h **********/
62811 /*
62812 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62813 index 5a710b9..0b0dab9 100644
62814 --- a/include/linux/preempt.h
62815 +++ b/include/linux/preempt.h
62816 @@ -126,7 +126,7 @@ struct preempt_ops {
62817 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62818 void (*sched_out)(struct preempt_notifier *notifier,
62819 struct task_struct *next);
62820 -};
62821 +} __no_const;
62822
62823 /**
62824 * preempt_notifier - key for installing preemption notifiers
62825 diff --git a/include/linux/printk.h b/include/linux/printk.h
62826 index 0525927..a5388b6 100644
62827 --- a/include/linux/printk.h
62828 +++ b/include/linux/printk.h
62829 @@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62830 extern int printk_needs_cpu(int cpu);
62831 extern void printk_tick(void);
62832
62833 +extern int kptr_restrict;
62834 +
62835 #ifdef CONFIG_PRINTK
62836 asmlinkage __printf(1, 0)
62837 int vprintk(const char *fmt, va_list args);
62838 @@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62839
62840 extern int printk_delay_msec;
62841 extern int dmesg_restrict;
62842 -extern int kptr_restrict;
62843
62844 void log_buf_kexec_setup(void);
62845 void __init setup_log_buf(int early);
62846 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62847 index 85c5073..51fac8b 100644
62848 --- a/include/linux/proc_fs.h
62849 +++ b/include/linux/proc_fs.h
62850 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62851 return proc_create_data(name, mode, parent, proc_fops, NULL);
62852 }
62853
62854 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62855 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62856 +{
62857 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62858 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62859 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62860 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62861 +#else
62862 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62863 +#endif
62864 +}
62865 +
62866 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62867 umode_t mode, struct proc_dir_entry *base,
62868 read_proc_t *read_proc, void * data)
62869 @@ -258,7 +270,7 @@ union proc_op {
62870 int (*proc_show)(struct seq_file *m,
62871 struct pid_namespace *ns, struct pid *pid,
62872 struct task_struct *task);
62873 -};
62874 +} __no_const;
62875
62876 struct ctl_table_header;
62877 struct ctl_table;
62878 diff --git a/include/linux/random.h b/include/linux/random.h
62879 index 8f74538..de61694 100644
62880 --- a/include/linux/random.h
62881 +++ b/include/linux/random.h
62882 @@ -54,6 +54,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
62883 unsigned int value);
62884 extern void add_interrupt_randomness(int irq);
62885
62886 +#ifdef CONFIG_PAX_LATENT_ENTROPY
62887 +extern void transfer_latent_entropy(void);
62888 +#endif
62889 +
62890 extern void get_random_bytes(void *buf, int nbytes);
62891 void generate_random_uuid(unsigned char uuid_out[16]);
62892
62893 @@ -69,12 +73,17 @@ void srandom32(u32 seed);
62894
62895 u32 prandom32(struct rnd_state *);
62896
62897 +static inline unsigned long pax_get_random_long(void)
62898 +{
62899 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62900 +}
62901 +
62902 /*
62903 * Handle minimum values for seeds
62904 */
62905 static inline u32 __seed(u32 x, u32 m)
62906 {
62907 - return (x < m) ? x + m : x;
62908 + return (x <= m) ? x + m + 1 : x;
62909 }
62910
62911 /**
62912 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62913 index e0879a7..a12f962 100644
62914 --- a/include/linux/reboot.h
62915 +++ b/include/linux/reboot.h
62916 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62917 * Architecture-specific implementations of sys_reboot commands.
62918 */
62919
62920 -extern void machine_restart(char *cmd);
62921 -extern void machine_halt(void);
62922 -extern void machine_power_off(void);
62923 +extern void machine_restart(char *cmd) __noreturn;
62924 +extern void machine_halt(void) __noreturn;
62925 +extern void machine_power_off(void) __noreturn;
62926
62927 extern void machine_shutdown(void);
62928 struct pt_regs;
62929 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62930 */
62931
62932 extern void kernel_restart_prepare(char *cmd);
62933 -extern void kernel_restart(char *cmd);
62934 -extern void kernel_halt(void);
62935 -extern void kernel_power_off(void);
62936 +extern void kernel_restart(char *cmd) __noreturn;
62937 +extern void kernel_halt(void) __noreturn;
62938 +extern void kernel_power_off(void) __noreturn;
62939
62940 extern int C_A_D; /* for sysctl */
62941 void ctrl_alt_del(void);
62942 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62943 * Emergency restart, callable from an interrupt handler.
62944 */
62945
62946 -extern void emergency_restart(void);
62947 +extern void emergency_restart(void) __noreturn;
62948 #include <asm/emergency-restart.h>
62949
62950 #endif
62951 diff --git a/include/linux/relay.h b/include/linux/relay.h
62952 index 91cacc3..b55ff74 100644
62953 --- a/include/linux/relay.h
62954 +++ b/include/linux/relay.h
62955 @@ -160,7 +160,7 @@ struct rchan_callbacks
62956 * The callback should return 0 if successful, negative if not.
62957 */
62958 int (*remove_buf_file)(struct dentry *dentry);
62959 -};
62960 +} __no_const;
62961
62962 /*
62963 * CONFIG_RELAY kernel API, kernel/relay.c
62964 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62965 index 6fdf027..ff72610 100644
62966 --- a/include/linux/rfkill.h
62967 +++ b/include/linux/rfkill.h
62968 @@ -147,6 +147,7 @@ struct rfkill_ops {
62969 void (*query)(struct rfkill *rfkill, void *data);
62970 int (*set_block)(void *data, bool blocked);
62971 };
62972 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62973
62974 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62975 /**
62976 diff --git a/include/linux/rio.h b/include/linux/rio.h
62977 index 4d50611..c6858a2 100644
62978 --- a/include/linux/rio.h
62979 +++ b/include/linux/rio.h
62980 @@ -315,7 +315,7 @@ struct rio_ops {
62981 int mbox, void *buffer, size_t len);
62982 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62983 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62984 -};
62985 +} __no_const;
62986
62987 #define RIO_RESOURCE_MEM 0x00000100
62988 #define RIO_RESOURCE_DOORBELL 0x00000200
62989 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62990 index fd07c45..4676b8e 100644
62991 --- a/include/linux/rmap.h
62992 +++ b/include/linux/rmap.h
62993 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62994 void anon_vma_init(void); /* create anon_vma_cachep */
62995 int anon_vma_prepare(struct vm_area_struct *);
62996 void unlink_anon_vmas(struct vm_area_struct *);
62997 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62998 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62999 void anon_vma_moveto_tail(struct vm_area_struct *);
63000 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
63001 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
63002
63003 static inline void anon_vma_merge(struct vm_area_struct *vma,
63004 struct vm_area_struct *next)
63005 diff --git a/include/linux/sched.h b/include/linux/sched.h
63006 index 7b06169..c92adbe 100644
63007 --- a/include/linux/sched.h
63008 +++ b/include/linux/sched.h
63009 @@ -100,6 +100,7 @@ struct bio_list;
63010 struct fs_struct;
63011 struct perf_event_context;
63012 struct blk_plug;
63013 +struct linux_binprm;
63014
63015 /*
63016 * List of flags we want to share for kernel threads,
63017 @@ -382,10 +383,13 @@ struct user_namespace;
63018 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63019
63020 extern int sysctl_max_map_count;
63021 +extern unsigned long sysctl_heap_stack_gap;
63022
63023 #include <linux/aio.h>
63024
63025 #ifdef CONFIG_MMU
63026 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63027 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
63028 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63029 extern unsigned long
63030 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
63031 @@ -643,6 +647,17 @@ struct signal_struct {
63032 #ifdef CONFIG_TASKSTATS
63033 struct taskstats *stats;
63034 #endif
63035 +
63036 +#ifdef CONFIG_GRKERNSEC
63037 + u32 curr_ip;
63038 + u32 saved_ip;
63039 + u32 gr_saddr;
63040 + u32 gr_daddr;
63041 + u16 gr_sport;
63042 + u16 gr_dport;
63043 + u8 used_accept:1;
63044 +#endif
63045 +
63046 #ifdef CONFIG_AUDIT
63047 unsigned audit_tty;
63048 struct tty_audit_buf *tty_audit_buf;
63049 @@ -726,6 +741,11 @@ struct user_struct {
63050 struct key *session_keyring; /* UID's default session keyring */
63051 #endif
63052
63053 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63054 + unsigned int banned;
63055 + unsigned long ban_expires;
63056 +#endif
63057 +
63058 /* Hash table maintenance information */
63059 struct hlist_node uidhash_node;
63060 uid_t uid;
63061 @@ -1386,8 +1406,8 @@ struct task_struct {
63062 struct list_head thread_group;
63063
63064 struct completion *vfork_done; /* for vfork() */
63065 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63066 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63067 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63068 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63069
63070 cputime_t utime, stime, utimescaled, stimescaled;
63071 cputime_t gtime;
63072 @@ -1403,13 +1423,6 @@ struct task_struct {
63073 struct task_cputime cputime_expires;
63074 struct list_head cpu_timers[3];
63075
63076 -/* process credentials */
63077 - const struct cred __rcu *real_cred; /* objective and real subjective task
63078 - * credentials (COW) */
63079 - const struct cred __rcu *cred; /* effective (overridable) subjective task
63080 - * credentials (COW) */
63081 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63082 -
63083 char comm[TASK_COMM_LEN]; /* executable name excluding path
63084 - access with [gs]et_task_comm (which lock
63085 it with task_lock())
63086 @@ -1426,8 +1439,16 @@ struct task_struct {
63087 #endif
63088 /* CPU-specific state of this task */
63089 struct thread_struct thread;
63090 +/* thread_info moved to task_struct */
63091 +#ifdef CONFIG_X86
63092 + struct thread_info tinfo;
63093 +#endif
63094 /* filesystem information */
63095 struct fs_struct *fs;
63096 +
63097 + const struct cred __rcu *cred; /* effective (overridable) subjective task
63098 + * credentials (COW) */
63099 +
63100 /* open file information */
63101 struct files_struct *files;
63102 /* namespaces */
63103 @@ -1469,6 +1490,11 @@ struct task_struct {
63104 struct rt_mutex_waiter *pi_blocked_on;
63105 #endif
63106
63107 +/* process credentials */
63108 + const struct cred __rcu *real_cred; /* objective and real subjective task
63109 + * credentials (COW) */
63110 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63111 +
63112 #ifdef CONFIG_DEBUG_MUTEXES
63113 /* mutex deadlock detection */
63114 struct mutex_waiter *blocked_on;
63115 @@ -1585,6 +1611,27 @@ struct task_struct {
63116 unsigned long default_timer_slack_ns;
63117
63118 struct list_head *scm_work_list;
63119 +
63120 +#ifdef CONFIG_GRKERNSEC
63121 + /* grsecurity */
63122 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63123 + u64 exec_id;
63124 +#endif
63125 +#ifdef CONFIG_GRKERNSEC_SETXID
63126 + const struct cred *delayed_cred;
63127 +#endif
63128 + struct dentry *gr_chroot_dentry;
63129 + struct acl_subject_label *acl;
63130 + struct acl_role_label *role;
63131 + struct file *exec_file;
63132 + u16 acl_role_id;
63133 + /* is this the task that authenticated to the special role */
63134 + u8 acl_sp_role;
63135 + u8 is_writable;
63136 + u8 brute;
63137 + u8 gr_is_chrooted;
63138 +#endif
63139 +
63140 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63141 /* Index of current stored address in ret_stack */
63142 int curr_ret_stack;
63143 @@ -1619,6 +1666,51 @@ struct task_struct {
63144 #endif
63145 };
63146
63147 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63148 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63149 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63150 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63151 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63152 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63153 +
63154 +#ifdef CONFIG_PAX_SOFTMODE
63155 +extern int pax_softmode;
63156 +#endif
63157 +
63158 +extern int pax_check_flags(unsigned long *);
63159 +
63160 +/* if tsk != current then task_lock must be held on it */
63161 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63162 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
63163 +{
63164 + if (likely(tsk->mm))
63165 + return tsk->mm->pax_flags;
63166 + else
63167 + return 0UL;
63168 +}
63169 +
63170 +/* if tsk != current then task_lock must be held on it */
63171 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63172 +{
63173 + if (likely(tsk->mm)) {
63174 + tsk->mm->pax_flags = flags;
63175 + return 0;
63176 + }
63177 + return -EINVAL;
63178 +}
63179 +#endif
63180 +
63181 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63182 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
63183 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63184 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63185 +#endif
63186 +
63187 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63188 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63189 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
63190 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
63191 +
63192 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63193 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63194
63195 @@ -2146,7 +2238,9 @@ void yield(void);
63196 extern struct exec_domain default_exec_domain;
63197
63198 union thread_union {
63199 +#ifndef CONFIG_X86
63200 struct thread_info thread_info;
63201 +#endif
63202 unsigned long stack[THREAD_SIZE/sizeof(long)];
63203 };
63204
63205 @@ -2179,6 +2273,7 @@ extern struct pid_namespace init_pid_ns;
63206 */
63207
63208 extern struct task_struct *find_task_by_vpid(pid_t nr);
63209 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63210 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63211 struct pid_namespace *ns);
63212
63213 @@ -2322,7 +2417,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63214 extern void exit_itimers(struct signal_struct *);
63215 extern void flush_itimer_signals(void);
63216
63217 -extern void do_group_exit(int);
63218 +extern __noreturn void do_group_exit(int);
63219
63220 extern void daemonize(const char *, ...);
63221 extern int allow_signal(int);
63222 @@ -2523,9 +2618,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
63223
63224 #endif
63225
63226 -static inline int object_is_on_stack(void *obj)
63227 +static inline int object_starts_on_stack(void *obj)
63228 {
63229 - void *stack = task_stack_page(current);
63230 + const void *stack = task_stack_page(current);
63231
63232 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63233 }
63234 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63235 index 899fbb4..1cb4138 100644
63236 --- a/include/linux/screen_info.h
63237 +++ b/include/linux/screen_info.h
63238 @@ -43,7 +43,8 @@ struct screen_info {
63239 __u16 pages; /* 0x32 */
63240 __u16 vesa_attributes; /* 0x34 */
63241 __u32 capabilities; /* 0x36 */
63242 - __u8 _reserved[6]; /* 0x3a */
63243 + __u16 vesapm_size; /* 0x3a */
63244 + __u8 _reserved[4]; /* 0x3c */
63245 } __attribute__((packed));
63246
63247 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63248 diff --git a/include/linux/security.h b/include/linux/security.h
63249 index 673afbb..2b7454b 100644
63250 --- a/include/linux/security.h
63251 +++ b/include/linux/security.h
63252 @@ -26,6 +26,7 @@
63253 #include <linux/capability.h>
63254 #include <linux/slab.h>
63255 #include <linux/err.h>
63256 +#include <linux/grsecurity.h>
63257
63258 struct linux_binprm;
63259 struct cred;
63260 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63261 index fc61854..d7c490b 100644
63262 --- a/include/linux/seq_file.h
63263 +++ b/include/linux/seq_file.h
63264 @@ -25,6 +25,9 @@ struct seq_file {
63265 struct mutex lock;
63266 const struct seq_operations *op;
63267 int poll_event;
63268 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63269 + u64 exec_id;
63270 +#endif
63271 void *private;
63272 };
63273
63274 @@ -34,6 +37,7 @@ struct seq_operations {
63275 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63276 int (*show) (struct seq_file *m, void *v);
63277 };
63278 +typedef struct seq_operations __no_const seq_operations_no_const;
63279
63280 #define SEQ_SKIP 1
63281
63282 diff --git a/include/linux/shm.h b/include/linux/shm.h
63283 index 92808b8..c28cac4 100644
63284 --- a/include/linux/shm.h
63285 +++ b/include/linux/shm.h
63286 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63287
63288 /* The task created the shm object. NULL if the task is dead. */
63289 struct task_struct *shm_creator;
63290 +#ifdef CONFIG_GRKERNSEC
63291 + time_t shm_createtime;
63292 + pid_t shm_lapid;
63293 +#endif
63294 };
63295
63296 /* shm_mode upper byte flags */
63297 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63298 index c1bae8d..2dbcd31 100644
63299 --- a/include/linux/skbuff.h
63300 +++ b/include/linux/skbuff.h
63301 @@ -663,7 +663,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63302 */
63303 static inline int skb_queue_empty(const struct sk_buff_head *list)
63304 {
63305 - return list->next == (struct sk_buff *)list;
63306 + return list->next == (const struct sk_buff *)list;
63307 }
63308
63309 /**
63310 @@ -676,7 +676,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63311 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63312 const struct sk_buff *skb)
63313 {
63314 - return skb->next == (struct sk_buff *)list;
63315 + return skb->next == (const struct sk_buff *)list;
63316 }
63317
63318 /**
63319 @@ -689,7 +689,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63320 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63321 const struct sk_buff *skb)
63322 {
63323 - return skb->prev == (struct sk_buff *)list;
63324 + return skb->prev == (const struct sk_buff *)list;
63325 }
63326
63327 /**
63328 @@ -1584,7 +1584,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63329 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63330 */
63331 #ifndef NET_SKB_PAD
63332 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63333 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63334 #endif
63335
63336 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63337 diff --git a/include/linux/slab.h b/include/linux/slab.h
63338 index a595dce..dfab0d2 100644
63339 --- a/include/linux/slab.h
63340 +++ b/include/linux/slab.h
63341 @@ -11,12 +11,20 @@
63342
63343 #include <linux/gfp.h>
63344 #include <linux/types.h>
63345 +#include <linux/err.h>
63346
63347 /*
63348 * Flags to pass to kmem_cache_create().
63349 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63350 */
63351 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63352 +
63353 +#ifdef CONFIG_PAX_USERCOPY_SLABS
63354 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63355 +#else
63356 +#define SLAB_USERCOPY 0x00000000UL
63357 +#endif
63358 +
63359 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63360 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63361 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63362 @@ -87,10 +95,13 @@
63363 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63364 * Both make kfree a no-op.
63365 */
63366 -#define ZERO_SIZE_PTR ((void *)16)
63367 +#define ZERO_SIZE_PTR \
63368 +({ \
63369 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63370 + (void *)(-MAX_ERRNO-1L); \
63371 +})
63372
63373 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63374 - (unsigned long)ZERO_SIZE_PTR)
63375 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63376
63377 /*
63378 * struct kmem_cache related prototypes
63379 @@ -161,6 +172,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
63380 void kfree(const void *);
63381 void kzfree(const void *);
63382 size_t ksize(const void *);
63383 +const char *check_heap_object(const void *ptr, unsigned long n, bool to);
63384 +bool is_usercopy_object(const void *ptr);
63385
63386 /*
63387 * Allocator specific definitions. These are mainly used to establish optimized
63388 @@ -240,6 +253,7 @@ size_t ksize(const void *);
63389 * for general use, and so are not documented here. For a full list of
63390 * potential flags, always refer to linux/gfp.h.
63391 */
63392 +static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
63393 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
63394 {
63395 if (size != 0 && n > ULONG_MAX / size)
63396 @@ -298,7 +312,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
63397 */
63398 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63399 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63400 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63401 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
63402 #define kmalloc_track_caller(size, flags) \
63403 __kmalloc_track_caller(size, flags, _RET_IP_)
63404 #else
63405 @@ -317,7 +331,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63406 */
63407 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63408 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63409 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
63410 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
63411 #define kmalloc_node_track_caller(size, flags, node) \
63412 __kmalloc_node_track_caller(size, flags, node, \
63413 _RET_IP_)
63414 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63415 index fbd1117..0a3d314 100644
63416 --- a/include/linux/slab_def.h
63417 +++ b/include/linux/slab_def.h
63418 @@ -66,10 +66,10 @@ struct kmem_cache {
63419 unsigned long node_allocs;
63420 unsigned long node_frees;
63421 unsigned long node_overflow;
63422 - atomic_t allochit;
63423 - atomic_t allocmiss;
63424 - atomic_t freehit;
63425 - atomic_t freemiss;
63426 + atomic_unchecked_t allochit;
63427 + atomic_unchecked_t allocmiss;
63428 + atomic_unchecked_t freehit;
63429 + atomic_unchecked_t freemiss;
63430
63431 /*
63432 * If debugging is enabled, then the allocator can add additional
63433 @@ -103,11 +103,16 @@ struct cache_sizes {
63434 #ifdef CONFIG_ZONE_DMA
63435 struct kmem_cache *cs_dmacachep;
63436 #endif
63437 +
63438 +#ifdef CONFIG_PAX_USERCOPY_SLABS
63439 + struct kmem_cache *cs_usercopycachep;
63440 +#endif
63441 +
63442 };
63443 extern struct cache_sizes malloc_sizes[];
63444
63445 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63446 -void *__kmalloc(size_t size, gfp_t flags);
63447 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63448
63449 #ifdef CONFIG_TRACING
63450 extern void *kmem_cache_alloc_trace(size_t size,
63451 @@ -150,6 +155,13 @@ found:
63452 cachep = malloc_sizes[i].cs_dmacachep;
63453 else
63454 #endif
63455 +
63456 +#ifdef CONFIG_PAX_USERCOPY_SLABS
63457 + if (flags & GFP_USERCOPY)
63458 + cachep = malloc_sizes[i].cs_usercopycachep;
63459 + else
63460 +#endif
63461 +
63462 cachep = malloc_sizes[i].cs_cachep;
63463
63464 ret = kmem_cache_alloc_trace(size, cachep, flags);
63465 @@ -160,7 +172,7 @@ found:
63466 }
63467
63468 #ifdef CONFIG_NUMA
63469 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
63470 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63471 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63472
63473 #ifdef CONFIG_TRACING
63474 @@ -203,6 +215,13 @@ found:
63475 cachep = malloc_sizes[i].cs_dmacachep;
63476 else
63477 #endif
63478 +
63479 +#ifdef CONFIG_PAX_USERCOPY_SLABS
63480 + if (flags & GFP_USERCOPY)
63481 + cachep = malloc_sizes[i].cs_usercopycachep;
63482 + else
63483 +#endif
63484 +
63485 cachep = malloc_sizes[i].cs_cachep;
63486
63487 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
63488 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
63489 index 0ec00b3..39cb7fc 100644
63490 --- a/include/linux/slob_def.h
63491 +++ b/include/linux/slob_def.h
63492 @@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
63493 return kmem_cache_alloc_node(cachep, flags, -1);
63494 }
63495
63496 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
63497 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63498
63499 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63500 {
63501 @@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63502 return __kmalloc_node(size, flags, -1);
63503 }
63504
63505 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63506 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
63507 {
63508 return kmalloc(size, flags);
63509 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63510 index c2f8c8b..be9e036 100644
63511 --- a/include/linux/slub_def.h
63512 +++ b/include/linux/slub_def.h
63513 @@ -92,7 +92,7 @@ struct kmem_cache {
63514 struct kmem_cache_order_objects max;
63515 struct kmem_cache_order_objects min;
63516 gfp_t allocflags; /* gfp flags to use on each alloc */
63517 - int refcount; /* Refcount for slab cache destroy */
63518 + atomic_t refcount; /* Refcount for slab cache destroy */
63519 void (*ctor)(void *);
63520 int inuse; /* Offset to metadata */
63521 int align; /* Alignment */
63522 @@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
63523 * Sorry that the following has to be that ugly but some versions of GCC
63524 * have trouble with constant propagation and loops.
63525 */
63526 +static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
63527 static __always_inline int kmalloc_index(size_t size)
63528 {
63529 if (!size)
63530 @@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63531 }
63532
63533 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63534 -void *__kmalloc(size_t size, gfp_t flags);
63535 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
63536
63537 static __always_inline void *
63538 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63539 @@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
63540 }
63541 #endif
63542
63543 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
63544 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
63545 {
63546 unsigned int order = get_order(size);
63547 @@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63548 }
63549
63550 #ifdef CONFIG_NUMA
63551 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
63552 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63553 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63554
63555 #ifdef CONFIG_TRACING
63556 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63557 index de8832d..0147b46 100644
63558 --- a/include/linux/sonet.h
63559 +++ b/include/linux/sonet.h
63560 @@ -61,7 +61,7 @@ struct sonet_stats {
63561 #include <linux/atomic.h>
63562
63563 struct k_sonet_stats {
63564 -#define __HANDLE_ITEM(i) atomic_t i
63565 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
63566 __SONET_ITEMS
63567 #undef __HANDLE_ITEM
63568 };
63569 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63570 index 523547e..2cb7140 100644
63571 --- a/include/linux/sunrpc/clnt.h
63572 +++ b/include/linux/sunrpc/clnt.h
63573 @@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63574 {
63575 switch (sap->sa_family) {
63576 case AF_INET:
63577 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
63578 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63579 case AF_INET6:
63580 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63581 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63582 }
63583 return 0;
63584 }
63585 @@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63586 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63587 const struct sockaddr *src)
63588 {
63589 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63590 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63591 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63592
63593 dsin->sin_family = ssin->sin_family;
63594 @@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63595 if (sa->sa_family != AF_INET6)
63596 return 0;
63597
63598 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63599 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63600 }
63601
63602 #endif /* __KERNEL__ */
63603 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63604 index dc0c3cc..8503fb6 100644
63605 --- a/include/linux/sunrpc/sched.h
63606 +++ b/include/linux/sunrpc/sched.h
63607 @@ -106,6 +106,7 @@ struct rpc_call_ops {
63608 void (*rpc_count_stats)(struct rpc_task *, void *);
63609 void (*rpc_release)(void *);
63610 };
63611 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63612
63613 struct rpc_task_setup {
63614 struct rpc_task *task;
63615 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63616 index 0b8e3e6..33e0a01 100644
63617 --- a/include/linux/sunrpc/svc_rdma.h
63618 +++ b/include/linux/sunrpc/svc_rdma.h
63619 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63620 extern unsigned int svcrdma_max_requests;
63621 extern unsigned int svcrdma_max_req_size;
63622
63623 -extern atomic_t rdma_stat_recv;
63624 -extern atomic_t rdma_stat_read;
63625 -extern atomic_t rdma_stat_write;
63626 -extern atomic_t rdma_stat_sq_starve;
63627 -extern atomic_t rdma_stat_rq_starve;
63628 -extern atomic_t rdma_stat_rq_poll;
63629 -extern atomic_t rdma_stat_rq_prod;
63630 -extern atomic_t rdma_stat_sq_poll;
63631 -extern atomic_t rdma_stat_sq_prod;
63632 +extern atomic_unchecked_t rdma_stat_recv;
63633 +extern atomic_unchecked_t rdma_stat_read;
63634 +extern atomic_unchecked_t rdma_stat_write;
63635 +extern atomic_unchecked_t rdma_stat_sq_starve;
63636 +extern atomic_unchecked_t rdma_stat_rq_starve;
63637 +extern atomic_unchecked_t rdma_stat_rq_poll;
63638 +extern atomic_unchecked_t rdma_stat_rq_prod;
63639 +extern atomic_unchecked_t rdma_stat_sq_poll;
63640 +extern atomic_unchecked_t rdma_stat_sq_prod;
63641
63642 #define RPCRDMA_VERSION 1
63643
63644 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63645 index c34b4c8..a65b67d 100644
63646 --- a/include/linux/sysctl.h
63647 +++ b/include/linux/sysctl.h
63648 @@ -155,7 +155,11 @@ enum
63649 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63650 };
63651
63652 -
63653 +#ifdef CONFIG_PAX_SOFTMODE
63654 +enum {
63655 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63656 +};
63657 +#endif
63658
63659 /* CTL_VM names: */
63660 enum
63661 @@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63662
63663 extern int proc_dostring(struct ctl_table *, int,
63664 void __user *, size_t *, loff_t *);
63665 +extern int proc_dostring_modpriv(struct ctl_table *, int,
63666 + void __user *, size_t *, loff_t *);
63667 extern int proc_dointvec(struct ctl_table *, int,
63668 void __user *, size_t *, loff_t *);
63669 extern int proc_dointvec_minmax(struct ctl_table *, int,
63670 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63671 index ff7dc08..893e1bd 100644
63672 --- a/include/linux/tty_ldisc.h
63673 +++ b/include/linux/tty_ldisc.h
63674 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63675
63676 struct module *owner;
63677
63678 - int refcount;
63679 + atomic_t refcount;
63680 };
63681
63682 struct tty_ldisc {
63683 diff --git a/include/linux/types.h b/include/linux/types.h
63684 index 7f480db..175c256 100644
63685 --- a/include/linux/types.h
63686 +++ b/include/linux/types.h
63687 @@ -220,10 +220,26 @@ typedef struct {
63688 int counter;
63689 } atomic_t;
63690
63691 +#ifdef CONFIG_PAX_REFCOUNT
63692 +typedef struct {
63693 + int counter;
63694 +} atomic_unchecked_t;
63695 +#else
63696 +typedef atomic_t atomic_unchecked_t;
63697 +#endif
63698 +
63699 #ifdef CONFIG_64BIT
63700 typedef struct {
63701 long counter;
63702 } atomic64_t;
63703 +
63704 +#ifdef CONFIG_PAX_REFCOUNT
63705 +typedef struct {
63706 + long counter;
63707 +} atomic64_unchecked_t;
63708 +#else
63709 +typedef atomic64_t atomic64_unchecked_t;
63710 +#endif
63711 #endif
63712
63713 struct list_head {
63714 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63715 index 5ca0951..ab496a5 100644
63716 --- a/include/linux/uaccess.h
63717 +++ b/include/linux/uaccess.h
63718 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63719 long ret; \
63720 mm_segment_t old_fs = get_fs(); \
63721 \
63722 - set_fs(KERNEL_DS); \
63723 pagefault_disable(); \
63724 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63725 - pagefault_enable(); \
63726 + set_fs(KERNEL_DS); \
63727 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63728 set_fs(old_fs); \
63729 + pagefault_enable(); \
63730 ret; \
63731 })
63732
63733 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63734 index 99c1b4d..bb94261 100644
63735 --- a/include/linux/unaligned/access_ok.h
63736 +++ b/include/linux/unaligned/access_ok.h
63737 @@ -6,32 +6,32 @@
63738
63739 static inline u16 get_unaligned_le16(const void *p)
63740 {
63741 - return le16_to_cpup((__le16 *)p);
63742 + return le16_to_cpup((const __le16 *)p);
63743 }
63744
63745 static inline u32 get_unaligned_le32(const void *p)
63746 {
63747 - return le32_to_cpup((__le32 *)p);
63748 + return le32_to_cpup((const __le32 *)p);
63749 }
63750
63751 static inline u64 get_unaligned_le64(const void *p)
63752 {
63753 - return le64_to_cpup((__le64 *)p);
63754 + return le64_to_cpup((const __le64 *)p);
63755 }
63756
63757 static inline u16 get_unaligned_be16(const void *p)
63758 {
63759 - return be16_to_cpup((__be16 *)p);
63760 + return be16_to_cpup((const __be16 *)p);
63761 }
63762
63763 static inline u32 get_unaligned_be32(const void *p)
63764 {
63765 - return be32_to_cpup((__be32 *)p);
63766 + return be32_to_cpup((const __be32 *)p);
63767 }
63768
63769 static inline u64 get_unaligned_be64(const void *p)
63770 {
63771 - return be64_to_cpup((__be64 *)p);
63772 + return be64_to_cpup((const __be64 *)p);
63773 }
63774
63775 static inline void put_unaligned_le16(u16 val, void *p)
63776 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63777 index 547e59c..db6ad19 100644
63778 --- a/include/linux/usb/renesas_usbhs.h
63779 +++ b/include/linux/usb/renesas_usbhs.h
63780 @@ -39,7 +39,7 @@ enum {
63781 */
63782 struct renesas_usbhs_driver_callback {
63783 int (*notify_hotplug)(struct platform_device *pdev);
63784 -};
63785 +} __no_const;
63786
63787 /*
63788 * callback functions for platform
63789 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63790 * VBUS control is needed for Host
63791 */
63792 int (*set_vbus)(struct platform_device *pdev, int enable);
63793 -};
63794 +} __no_const;
63795
63796 /*
63797 * parameters for renesas usbhs
63798 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63799 index 6f8fbcf..8259001 100644
63800 --- a/include/linux/vermagic.h
63801 +++ b/include/linux/vermagic.h
63802 @@ -25,9 +25,35 @@
63803 #define MODULE_ARCH_VERMAGIC ""
63804 #endif
63805
63806 +#ifdef CONFIG_PAX_REFCOUNT
63807 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63808 +#else
63809 +#define MODULE_PAX_REFCOUNT ""
63810 +#endif
63811 +
63812 +#ifdef CONSTIFY_PLUGIN
63813 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63814 +#else
63815 +#define MODULE_CONSTIFY_PLUGIN ""
63816 +#endif
63817 +
63818 +#ifdef STACKLEAK_PLUGIN
63819 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63820 +#else
63821 +#define MODULE_STACKLEAK_PLUGIN ""
63822 +#endif
63823 +
63824 +#ifdef CONFIG_GRKERNSEC
63825 +#define MODULE_GRSEC "GRSEC "
63826 +#else
63827 +#define MODULE_GRSEC ""
63828 +#endif
63829 +
63830 #define VERMAGIC_STRING \
63831 UTS_RELEASE " " \
63832 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63833 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63834 - MODULE_ARCH_VERMAGIC
63835 + MODULE_ARCH_VERMAGIC \
63836 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63837 + MODULE_GRSEC
63838
63839 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63840 index dcdfc2b..ec79ab5 100644
63841 --- a/include/linux/vmalloc.h
63842 +++ b/include/linux/vmalloc.h
63843 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63844 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63845 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63846 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63847 +
63848 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63849 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63850 +#endif
63851 +
63852 /* bits [20..32] reserved for arch specific ioremap internals */
63853
63854 /*
63855 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63856 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63857 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63858 unsigned long start, unsigned long end, gfp_t gfp_mask,
63859 - pgprot_t prot, int node, void *caller);
63860 + pgprot_t prot, int node, void *caller) __size_overflow(1);
63861 extern void vfree(const void *addr);
63862
63863 extern void *vmap(struct page **pages, unsigned int count,
63864 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63865 extern void free_vm_area(struct vm_struct *area);
63866
63867 /* for /dev/kmem */
63868 -extern long vread(char *buf, char *addr, unsigned long count);
63869 -extern long vwrite(char *buf, char *addr, unsigned long count);
63870 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63871 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63872
63873 /*
63874 * Internals. Dont't use..
63875 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63876 index 65efb92..137adbb 100644
63877 --- a/include/linux/vmstat.h
63878 +++ b/include/linux/vmstat.h
63879 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63880 /*
63881 * Zone based page accounting with per cpu differentials.
63882 */
63883 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63884 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63885
63886 static inline void zone_page_state_add(long x, struct zone *zone,
63887 enum zone_stat_item item)
63888 {
63889 - atomic_long_add(x, &zone->vm_stat[item]);
63890 - atomic_long_add(x, &vm_stat[item]);
63891 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63892 + atomic_long_add_unchecked(x, &vm_stat[item]);
63893 }
63894
63895 static inline unsigned long global_page_state(enum zone_stat_item item)
63896 {
63897 - long x = atomic_long_read(&vm_stat[item]);
63898 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63899 #ifdef CONFIG_SMP
63900 if (x < 0)
63901 x = 0;
63902 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63903 static inline unsigned long zone_page_state(struct zone *zone,
63904 enum zone_stat_item item)
63905 {
63906 - long x = atomic_long_read(&zone->vm_stat[item]);
63907 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63908 #ifdef CONFIG_SMP
63909 if (x < 0)
63910 x = 0;
63911 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63912 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63913 enum zone_stat_item item)
63914 {
63915 - long x = atomic_long_read(&zone->vm_stat[item]);
63916 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63917
63918 #ifdef CONFIG_SMP
63919 int cpu;
63920 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63921
63922 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63923 {
63924 - atomic_long_inc(&zone->vm_stat[item]);
63925 - atomic_long_inc(&vm_stat[item]);
63926 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63927 + atomic_long_inc_unchecked(&vm_stat[item]);
63928 }
63929
63930 static inline void __inc_zone_page_state(struct page *page,
63931 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63932
63933 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63934 {
63935 - atomic_long_dec(&zone->vm_stat[item]);
63936 - atomic_long_dec(&vm_stat[item]);
63937 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63938 + atomic_long_dec_unchecked(&vm_stat[item]);
63939 }
63940
63941 static inline void __dec_zone_page_state(struct page *page,
63942 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63943 index e5d1220..ef6e406 100644
63944 --- a/include/linux/xattr.h
63945 +++ b/include/linux/xattr.h
63946 @@ -57,6 +57,11 @@
63947 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63948 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63949
63950 +/* User namespace */
63951 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63952 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63953 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63954 +
63955 #ifdef __KERNEL__
63956
63957 #include <linux/types.h>
63958 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63959 index 4aeff96..b378cdc 100644
63960 --- a/include/media/saa7146_vv.h
63961 +++ b/include/media/saa7146_vv.h
63962 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63963 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63964
63965 /* the extension can override this */
63966 - struct v4l2_ioctl_ops ops;
63967 + v4l2_ioctl_ops_no_const ops;
63968 /* pointer to the saa7146 core ops */
63969 const struct v4l2_ioctl_ops *core_ops;
63970
63971 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63972 index 96d2221..2292f89 100644
63973 --- a/include/media/v4l2-dev.h
63974 +++ b/include/media/v4l2-dev.h
63975 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63976
63977
63978 struct v4l2_file_operations {
63979 - struct module *owner;
63980 + struct module * const owner;
63981 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63982 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63983 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63984 @@ -71,6 +71,7 @@ struct v4l2_file_operations {
63985 int (*open) (struct file *);
63986 int (*release) (struct file *);
63987 };
63988 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63989
63990 /*
63991 * Newer version of video_device, handled by videodev2.c
63992 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63993 index 3cb939c..f23c6bb 100644
63994 --- a/include/media/v4l2-ioctl.h
63995 +++ b/include/media/v4l2-ioctl.h
63996 @@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
63997 long (*vidioc_default) (struct file *file, void *fh,
63998 bool valid_prio, int cmd, void *arg);
63999 };
64000 -
64001 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
64002
64003 /* v4l debugging and diagnostics */
64004
64005 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
64006 index 6db8ecf..8c23861 100644
64007 --- a/include/net/caif/caif_hsi.h
64008 +++ b/include/net/caif/caif_hsi.h
64009 @@ -98,7 +98,7 @@ struct cfhsi_drv {
64010 void (*rx_done_cb) (struct cfhsi_drv *drv);
64011 void (*wake_up_cb) (struct cfhsi_drv *drv);
64012 void (*wake_down_cb) (struct cfhsi_drv *drv);
64013 -};
64014 +} __no_const;
64015
64016 /* Structure implemented by HSI device. */
64017 struct cfhsi_dev {
64018 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
64019 index 9e5425b..8136ffc 100644
64020 --- a/include/net/caif/cfctrl.h
64021 +++ b/include/net/caif/cfctrl.h
64022 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
64023 void (*radioset_rsp)(void);
64024 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64025 struct cflayer *client_layer);
64026 -};
64027 +} __no_const;
64028
64029 /* Link Setup Parameters for CAIF-Links. */
64030 struct cfctrl_link_param {
64031 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
64032 struct cfctrl {
64033 struct cfsrvl serv;
64034 struct cfctrl_rsp res;
64035 - atomic_t req_seq_no;
64036 - atomic_t rsp_seq_no;
64037 + atomic_unchecked_t req_seq_no;
64038 + atomic_unchecked_t rsp_seq_no;
64039 struct list_head list;
64040 /* Protects from simultaneous access to first_req list */
64041 spinlock_t info_list_lock;
64042 diff --git a/include/net/flow.h b/include/net/flow.h
64043 index 6c469db..7743b8e 100644
64044 --- a/include/net/flow.h
64045 +++ b/include/net/flow.h
64046 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
64047
64048 extern void flow_cache_flush(void);
64049 extern void flow_cache_flush_deferred(void);
64050 -extern atomic_t flow_cache_genid;
64051 +extern atomic_unchecked_t flow_cache_genid;
64052
64053 #endif
64054 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64055 index 2040bff..f4c0733 100644
64056 --- a/include/net/inetpeer.h
64057 +++ b/include/net/inetpeer.h
64058 @@ -51,8 +51,8 @@ struct inet_peer {
64059 */
64060 union {
64061 struct {
64062 - atomic_t rid; /* Frag reception counter */
64063 - atomic_t ip_id_count; /* IP ID for the next packet */
64064 + atomic_unchecked_t rid; /* Frag reception counter */
64065 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64066 __u32 tcp_ts;
64067 __u32 tcp_ts_stamp;
64068 };
64069 @@ -118,11 +118,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64070 more++;
64071 inet_peer_refcheck(p);
64072 do {
64073 - old = atomic_read(&p->ip_id_count);
64074 + old = atomic_read_unchecked(&p->ip_id_count);
64075 new = old + more;
64076 if (!new)
64077 new = 1;
64078 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64079 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64080 return new;
64081 }
64082
64083 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64084 index 10422ef..662570f 100644
64085 --- a/include/net/ip_fib.h
64086 +++ b/include/net/ip_fib.h
64087 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64088
64089 #define FIB_RES_SADDR(net, res) \
64090 ((FIB_RES_NH(res).nh_saddr_genid == \
64091 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64092 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64093 FIB_RES_NH(res).nh_saddr : \
64094 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64095 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64096 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64097 index 72522f0..2965e05 100644
64098 --- a/include/net/ip_vs.h
64099 +++ b/include/net/ip_vs.h
64100 @@ -510,7 +510,7 @@ struct ip_vs_conn {
64101 struct ip_vs_conn *control; /* Master control connection */
64102 atomic_t n_control; /* Number of controlled ones */
64103 struct ip_vs_dest *dest; /* real server */
64104 - atomic_t in_pkts; /* incoming packet counter */
64105 + atomic_unchecked_t in_pkts; /* incoming packet counter */
64106
64107 /* packet transmitter for different forwarding methods. If it
64108 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64109 @@ -648,7 +648,7 @@ struct ip_vs_dest {
64110 __be16 port; /* port number of the server */
64111 union nf_inet_addr addr; /* IP address of the server */
64112 volatile unsigned flags; /* dest status flags */
64113 - atomic_t conn_flags; /* flags to copy to conn */
64114 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
64115 atomic_t weight; /* server weight */
64116
64117 atomic_t refcnt; /* reference counter */
64118 @@ -1356,7 +1356,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
64119 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
64120
64121 if (!ct || !nf_ct_is_untracked(ct)) {
64122 - nf_reset(skb);
64123 + nf_conntrack_put(skb->nfct);
64124 skb->nfct = &nf_ct_untracked_get()->ct_general;
64125 skb->nfctinfo = IP_CT_NEW;
64126 nf_conntrack_get(skb->nfct);
64127 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64128 index 69b610a..fe3962c 100644
64129 --- a/include/net/irda/ircomm_core.h
64130 +++ b/include/net/irda/ircomm_core.h
64131 @@ -51,7 +51,7 @@ typedef struct {
64132 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64133 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64134 struct ircomm_info *);
64135 -} call_t;
64136 +} __no_const call_t;
64137
64138 struct ircomm_cb {
64139 irda_queue_t queue;
64140 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64141 index 59ba38bc..d515662 100644
64142 --- a/include/net/irda/ircomm_tty.h
64143 +++ b/include/net/irda/ircomm_tty.h
64144 @@ -35,6 +35,7 @@
64145 #include <linux/termios.h>
64146 #include <linux/timer.h>
64147 #include <linux/tty.h> /* struct tty_struct */
64148 +#include <asm/local.h>
64149
64150 #include <net/irda/irias_object.h>
64151 #include <net/irda/ircomm_core.h>
64152 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64153 unsigned short close_delay;
64154 unsigned short closing_wait; /* time to wait before closing */
64155
64156 - int open_count;
64157 - int blocked_open; /* # of blocked opens */
64158 + local_t open_count;
64159 + local_t blocked_open; /* # of blocked opens */
64160
64161 /* Protect concurent access to :
64162 * o self->open_count
64163 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64164 index cc7c197..9f2da2a 100644
64165 --- a/include/net/iucv/af_iucv.h
64166 +++ b/include/net/iucv/af_iucv.h
64167 @@ -141,7 +141,7 @@ struct iucv_sock {
64168 struct iucv_sock_list {
64169 struct hlist_head head;
64170 rwlock_t lock;
64171 - atomic_t autobind_name;
64172 + atomic_unchecked_t autobind_name;
64173 };
64174
64175 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64176 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64177 index 34c996f..bb3b4d4 100644
64178 --- a/include/net/neighbour.h
64179 +++ b/include/net/neighbour.h
64180 @@ -123,7 +123,7 @@ struct neigh_ops {
64181 void (*error_report)(struct neighbour *, struct sk_buff *);
64182 int (*output)(struct neighbour *, struct sk_buff *);
64183 int (*connected_output)(struct neighbour *, struct sk_buff *);
64184 -};
64185 +} __do_const;
64186
64187 struct pneigh_entry {
64188 struct pneigh_entry *next;
64189 diff --git a/include/net/netlink.h b/include/net/netlink.h
64190 index f394fe5..fd073f9 100644
64191 --- a/include/net/netlink.h
64192 +++ b/include/net/netlink.h
64193 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
64194 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64195 {
64196 if (mark)
64197 - skb_trim(skb, (unsigned char *) mark - skb->data);
64198 + skb_trim(skb, (const unsigned char *) mark - skb->data);
64199 }
64200
64201 /**
64202 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
64203 index bbd023a..97c6d0d 100644
64204 --- a/include/net/netns/ipv4.h
64205 +++ b/include/net/netns/ipv4.h
64206 @@ -57,8 +57,8 @@ struct netns_ipv4 {
64207 unsigned int sysctl_ping_group_range[2];
64208 long sysctl_tcp_mem[3];
64209
64210 - atomic_t rt_genid;
64211 - atomic_t dev_addr_genid;
64212 + atomic_unchecked_t rt_genid;
64213 + atomic_unchecked_t dev_addr_genid;
64214
64215 #ifdef CONFIG_IP_MROUTE
64216 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
64217 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
64218 index a2ef814..31a8e3f 100644
64219 --- a/include/net/sctp/sctp.h
64220 +++ b/include/net/sctp/sctp.h
64221 @@ -318,9 +318,9 @@ do { \
64222
64223 #else /* SCTP_DEBUG */
64224
64225 -#define SCTP_DEBUG_PRINTK(whatever...)
64226 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
64227 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64228 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
64229 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
64230 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64231 #define SCTP_ENABLE_DEBUG
64232 #define SCTP_DISABLE_DEBUG
64233 #define SCTP_ASSERT(expr, str, func)
64234 diff --git a/include/net/sock.h b/include/net/sock.h
64235 index 5a0a58a..2e3d4d0 100644
64236 --- a/include/net/sock.h
64237 +++ b/include/net/sock.h
64238 @@ -302,7 +302,7 @@ struct sock {
64239 #ifdef CONFIG_RPS
64240 __u32 sk_rxhash;
64241 #endif
64242 - atomic_t sk_drops;
64243 + atomic_unchecked_t sk_drops;
64244 int sk_rcvbuf;
64245
64246 struct sk_filter __rcu *sk_filter;
64247 @@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
64248 }
64249
64250 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64251 - char __user *from, char *to,
64252 + char __user *from, unsigned char *to,
64253 int copy, int offset)
64254 {
64255 if (skb->ip_summed == CHECKSUM_NONE) {
64256 diff --git a/include/net/tcp.h b/include/net/tcp.h
64257 index f75a04d..702cf06 100644
64258 --- a/include/net/tcp.h
64259 +++ b/include/net/tcp.h
64260 @@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
64261 char *name;
64262 sa_family_t family;
64263 const struct file_operations *seq_fops;
64264 - struct seq_operations seq_ops;
64265 + seq_operations_no_const seq_ops;
64266 };
64267
64268 struct tcp_iter_state {
64269 diff --git a/include/net/udp.h b/include/net/udp.h
64270 index 5d606d9..e879f7b 100644
64271 --- a/include/net/udp.h
64272 +++ b/include/net/udp.h
64273 @@ -244,7 +244,7 @@ struct udp_seq_afinfo {
64274 sa_family_t family;
64275 struct udp_table *udp_table;
64276 const struct file_operations *seq_fops;
64277 - struct seq_operations seq_ops;
64278 + seq_operations_no_const seq_ops;
64279 };
64280
64281 struct udp_iter_state {
64282 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64283 index 96239e7..c85b032 100644
64284 --- a/include/net/xfrm.h
64285 +++ b/include/net/xfrm.h
64286 @@ -505,7 +505,7 @@ struct xfrm_policy {
64287 struct timer_list timer;
64288
64289 struct flow_cache_object flo;
64290 - atomic_t genid;
64291 + atomic_unchecked_t genid;
64292 u32 priority;
64293 u32 index;
64294 struct xfrm_mark mark;
64295 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64296 index 1a046b1..ee0bef0 100644
64297 --- a/include/rdma/iw_cm.h
64298 +++ b/include/rdma/iw_cm.h
64299 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
64300 int backlog);
64301
64302 int (*destroy_listen)(struct iw_cm_id *cm_id);
64303 -};
64304 +} __no_const;
64305
64306 /**
64307 * iw_create_cm_id - Create an IW CM identifier.
64308 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64309 index 8f9dfba..610ab6c 100644
64310 --- a/include/scsi/libfc.h
64311 +++ b/include/scsi/libfc.h
64312 @@ -756,6 +756,7 @@ struct libfc_function_template {
64313 */
64314 void (*disc_stop_final) (struct fc_lport *);
64315 };
64316 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64317
64318 /**
64319 * struct fc_disc - Discovery context
64320 @@ -861,7 +862,7 @@ struct fc_lport {
64321 struct fc_vport *vport;
64322
64323 /* Operational Information */
64324 - struct libfc_function_template tt;
64325 + libfc_function_template_no_const tt;
64326 u8 link_up;
64327 u8 qfull;
64328 enum fc_lport_state state;
64329 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64330 index ba96988..ecf2eb9 100644
64331 --- a/include/scsi/scsi_device.h
64332 +++ b/include/scsi/scsi_device.h
64333 @@ -163,9 +163,9 @@ struct scsi_device {
64334 unsigned int max_device_blocked; /* what device_blocked counts down from */
64335 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64336
64337 - atomic_t iorequest_cnt;
64338 - atomic_t iodone_cnt;
64339 - atomic_t ioerr_cnt;
64340 + atomic_unchecked_t iorequest_cnt;
64341 + atomic_unchecked_t iodone_cnt;
64342 + atomic_unchecked_t ioerr_cnt;
64343
64344 struct device sdev_gendev,
64345 sdev_dev;
64346 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64347 index 719faf1..d1154d4 100644
64348 --- a/include/scsi/scsi_transport_fc.h
64349 +++ b/include/scsi/scsi_transport_fc.h
64350 @@ -739,7 +739,7 @@ struct fc_function_template {
64351 unsigned long show_host_system_hostname:1;
64352
64353 unsigned long disable_target_scan:1;
64354 -};
64355 +} __do_const;
64356
64357
64358 /**
64359 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64360 index 030b87c..98a6954 100644
64361 --- a/include/sound/ak4xxx-adda.h
64362 +++ b/include/sound/ak4xxx-adda.h
64363 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64364 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64365 unsigned char val);
64366 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64367 -};
64368 +} __no_const;
64369
64370 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64371
64372 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64373 index 8c05e47..2b5df97 100644
64374 --- a/include/sound/hwdep.h
64375 +++ b/include/sound/hwdep.h
64376 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64377 struct snd_hwdep_dsp_status *status);
64378 int (*dsp_load)(struct snd_hwdep *hw,
64379 struct snd_hwdep_dsp_image *image);
64380 -};
64381 +} __no_const;
64382
64383 struct snd_hwdep {
64384 struct snd_card *card;
64385 diff --git a/include/sound/info.h b/include/sound/info.h
64386 index 9ca1a49..aba1728 100644
64387 --- a/include/sound/info.h
64388 +++ b/include/sound/info.h
64389 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
64390 struct snd_info_buffer *buffer);
64391 void (*write)(struct snd_info_entry *entry,
64392 struct snd_info_buffer *buffer);
64393 -};
64394 +} __no_const;
64395
64396 struct snd_info_entry_ops {
64397 int (*open)(struct snd_info_entry *entry,
64398 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64399 index 0d11128..814178e 100644
64400 --- a/include/sound/pcm.h
64401 +++ b/include/sound/pcm.h
64402 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
64403 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64404 int (*ack)(struct snd_pcm_substream *substream);
64405 };
64406 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64407
64408 /*
64409 *
64410 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64411 index af1b49e..a5d55a5 100644
64412 --- a/include/sound/sb16_csp.h
64413 +++ b/include/sound/sb16_csp.h
64414 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64415 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64416 int (*csp_stop) (struct snd_sb_csp * p);
64417 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64418 -};
64419 +} __no_const;
64420
64421 /*
64422 * CSP private data
64423 diff --git a/include/sound/soc.h b/include/sound/soc.h
64424 index 2ebf787..0276839 100644
64425 --- a/include/sound/soc.h
64426 +++ b/include/sound/soc.h
64427 @@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
64428 /* platform IO - used for platform DAPM */
64429 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64430 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64431 -};
64432 +} __do_const;
64433
64434 struct snd_soc_platform {
64435 const char *name;
64436 @@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
64437 struct snd_soc_dai_link *dai_link;
64438 struct mutex pcm_mutex;
64439 enum snd_soc_pcm_subclass pcm_subclass;
64440 - struct snd_pcm_ops ops;
64441 + snd_pcm_ops_no_const ops;
64442
64443 unsigned int complete:1;
64444 unsigned int dev_registered:1;
64445 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64446 index 4119966..1a4671c 100644
64447 --- a/include/sound/ymfpci.h
64448 +++ b/include/sound/ymfpci.h
64449 @@ -358,7 +358,7 @@ struct snd_ymfpci {
64450 spinlock_t reg_lock;
64451 spinlock_t voice_lock;
64452 wait_queue_head_t interrupt_sleep;
64453 - atomic_t interrupt_sleep_count;
64454 + atomic_unchecked_t interrupt_sleep_count;
64455 struct snd_info_entry *proc_entry;
64456 const struct firmware *dsp_microcode;
64457 const struct firmware *controller_microcode;
64458 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64459 index aaccc5f..092d568 100644
64460 --- a/include/target/target_core_base.h
64461 +++ b/include/target/target_core_base.h
64462 @@ -447,7 +447,7 @@ struct t10_reservation_ops {
64463 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64464 int (*t10_pr_register)(struct se_cmd *);
64465 int (*t10_pr_clear)(struct se_cmd *);
64466 -};
64467 +} __no_const;
64468
64469 struct t10_reservation {
64470 /* Reservation effects all target ports */
64471 @@ -576,7 +576,7 @@ struct se_cmd {
64472 atomic_t t_se_count;
64473 atomic_t t_task_cdbs_left;
64474 atomic_t t_task_cdbs_ex_left;
64475 - atomic_t t_task_cdbs_sent;
64476 + atomic_unchecked_t t_task_cdbs_sent;
64477 unsigned int transport_state;
64478 #define CMD_T_ABORTED (1 << 0)
64479 #define CMD_T_ACTIVE (1 << 1)
64480 @@ -802,7 +802,7 @@ struct se_device {
64481 spinlock_t stats_lock;
64482 /* Active commands on this virtual SE device */
64483 atomic_t simple_cmds;
64484 - atomic_t dev_ordered_id;
64485 + atomic_unchecked_t dev_ordered_id;
64486 atomic_t execute_tasks;
64487 atomic_t dev_ordered_sync;
64488 atomic_t dev_qf_count;
64489 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
64490 new file mode 100644
64491 index 0000000..2efe49d
64492 --- /dev/null
64493 +++ b/include/trace/events/fs.h
64494 @@ -0,0 +1,53 @@
64495 +#undef TRACE_SYSTEM
64496 +#define TRACE_SYSTEM fs
64497 +
64498 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
64499 +#define _TRACE_FS_H
64500 +
64501 +#include <linux/fs.h>
64502 +#include <linux/tracepoint.h>
64503 +
64504 +TRACE_EVENT(do_sys_open,
64505 +
64506 + TP_PROTO(char *filename, int flags, int mode),
64507 +
64508 + TP_ARGS(filename, flags, mode),
64509 +
64510 + TP_STRUCT__entry(
64511 + __string( filename, filename )
64512 + __field( int, flags )
64513 + __field( int, mode )
64514 + ),
64515 +
64516 + TP_fast_assign(
64517 + __assign_str(filename, filename);
64518 + __entry->flags = flags;
64519 + __entry->mode = mode;
64520 + ),
64521 +
64522 + TP_printk("\"%s\" %x %o",
64523 + __get_str(filename), __entry->flags, __entry->mode)
64524 +);
64525 +
64526 +TRACE_EVENT(open_exec,
64527 +
64528 + TP_PROTO(const char *filename),
64529 +
64530 + TP_ARGS(filename),
64531 +
64532 + TP_STRUCT__entry(
64533 + __string( filename, filename )
64534 + ),
64535 +
64536 + TP_fast_assign(
64537 + __assign_str(filename, filename);
64538 + ),
64539 +
64540 + TP_printk("\"%s\"",
64541 + __get_str(filename))
64542 +);
64543 +
64544 +#endif /* _TRACE_FS_H */
64545 +
64546 +/* This part must be outside protection */
64547 +#include <trace/define_trace.h>
64548 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64549 index 1c09820..7f5ec79 100644
64550 --- a/include/trace/events/irq.h
64551 +++ b/include/trace/events/irq.h
64552 @@ -36,7 +36,7 @@ struct softirq_action;
64553 */
64554 TRACE_EVENT(irq_handler_entry,
64555
64556 - TP_PROTO(int irq, struct irqaction *action),
64557 + TP_PROTO(int irq, const struct irqaction *action),
64558
64559 TP_ARGS(irq, action),
64560
64561 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64562 */
64563 TRACE_EVENT(irq_handler_exit,
64564
64565 - TP_PROTO(int irq, struct irqaction *action, int ret),
64566 + TP_PROTO(int irq, const struct irqaction *action, int ret),
64567
64568 TP_ARGS(irq, action, ret),
64569
64570 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64571 index f9466fa..f4e2b81 100644
64572 --- a/include/video/udlfb.h
64573 +++ b/include/video/udlfb.h
64574 @@ -53,10 +53,10 @@ struct dlfb_data {
64575 u32 pseudo_palette[256];
64576 int blank_mode; /*one of FB_BLANK_ */
64577 /* blit-only rendering path metrics, exposed through sysfs */
64578 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64579 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64580 - atomic_t bytes_sent; /* to usb, after compression including overhead */
64581 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64582 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64583 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64584 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64585 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64586 };
64587
64588 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64589 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64590 index 0993a22..32ba2fe 100644
64591 --- a/include/video/uvesafb.h
64592 +++ b/include/video/uvesafb.h
64593 @@ -177,6 +177,7 @@ struct uvesafb_par {
64594 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64595 u8 pmi_setpal; /* PMI for palette changes */
64596 u16 *pmi_base; /* protected mode interface location */
64597 + u8 *pmi_code; /* protected mode code location */
64598 void *pmi_start;
64599 void *pmi_pal;
64600 u8 *vbe_state_orig; /*
64601 diff --git a/init/Kconfig b/init/Kconfig
64602 index 6cfd71d..16006e6 100644
64603 --- a/init/Kconfig
64604 +++ b/init/Kconfig
64605 @@ -790,6 +790,7 @@ endif # CGROUPS
64606
64607 config CHECKPOINT_RESTORE
64608 bool "Checkpoint/restore support" if EXPERT
64609 + depends on !GRKERNSEC
64610 default n
64611 help
64612 Enables additional kernel features in a sake of checkpoint/restore.
64613 @@ -1240,7 +1241,7 @@ config SLUB_DEBUG
64614
64615 config COMPAT_BRK
64616 bool "Disable heap randomization"
64617 - default y
64618 + default n
64619 help
64620 Randomizing heap placement makes heap exploits harder, but it
64621 also breaks ancient binaries (including anything libc5 based).
64622 @@ -1423,7 +1424,7 @@ config INIT_ALL_POSSIBLE
64623 config STOP_MACHINE
64624 bool
64625 default y
64626 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
64627 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
64628 help
64629 Need stop_machine() primitive.
64630
64631 diff --git a/init/do_mounts.c b/init/do_mounts.c
64632 index 42b0707..c06eef4 100644
64633 --- a/init/do_mounts.c
64634 +++ b/init/do_mounts.c
64635 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
64636 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64637 {
64638 struct super_block *s;
64639 - int err = sys_mount(name, "/root", fs, flags, data);
64640 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64641 if (err)
64642 return err;
64643
64644 - sys_chdir((const char __user __force *)"/root");
64645 + sys_chdir((const char __force_user *)"/root");
64646 s = current->fs->pwd.dentry->d_sb;
64647 ROOT_DEV = s->s_dev;
64648 printk(KERN_INFO
64649 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
64650 va_start(args, fmt);
64651 vsprintf(buf, fmt, args);
64652 va_end(args);
64653 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64654 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64655 if (fd >= 0) {
64656 sys_ioctl(fd, FDEJECT, 0);
64657 sys_close(fd);
64658 }
64659 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64660 - fd = sys_open("/dev/console", O_RDWR, 0);
64661 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64662 if (fd >= 0) {
64663 sys_ioctl(fd, TCGETS, (long)&termios);
64664 termios.c_lflag &= ~ICANON;
64665 sys_ioctl(fd, TCSETSF, (long)&termios);
64666 - sys_read(fd, &c, 1);
64667 + sys_read(fd, (char __user *)&c, 1);
64668 termios.c_lflag |= ICANON;
64669 sys_ioctl(fd, TCSETSF, (long)&termios);
64670 sys_close(fd);
64671 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
64672 mount_root();
64673 out:
64674 devtmpfs_mount("dev");
64675 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64676 - sys_chroot((const char __user __force *)".");
64677 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64678 + sys_chroot((const char __force_user *)".");
64679 }
64680 diff --git a/init/do_mounts.h b/init/do_mounts.h
64681 index f5b978a..69dbfe8 100644
64682 --- a/init/do_mounts.h
64683 +++ b/init/do_mounts.h
64684 @@ -15,15 +15,15 @@ extern int root_mountflags;
64685
64686 static inline int create_dev(char *name, dev_t dev)
64687 {
64688 - sys_unlink(name);
64689 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64690 + sys_unlink((char __force_user *)name);
64691 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64692 }
64693
64694 #if BITS_PER_LONG == 32
64695 static inline u32 bstat(char *name)
64696 {
64697 struct stat64 stat;
64698 - if (sys_stat64(name, &stat) != 0)
64699 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64700 return 0;
64701 if (!S_ISBLK(stat.st_mode))
64702 return 0;
64703 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64704 static inline u32 bstat(char *name)
64705 {
64706 struct stat stat;
64707 - if (sys_newstat(name, &stat) != 0)
64708 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64709 return 0;
64710 if (!S_ISBLK(stat.st_mode))
64711 return 0;
64712 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64713 index 9047330..de0d1fb 100644
64714 --- a/init/do_mounts_initrd.c
64715 +++ b/init/do_mounts_initrd.c
64716 @@ -43,13 +43,13 @@ static void __init handle_initrd(void)
64717 create_dev("/dev/root.old", Root_RAM0);
64718 /* mount initrd on rootfs' /root */
64719 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64720 - sys_mkdir("/old", 0700);
64721 - root_fd = sys_open("/", 0, 0);
64722 - old_fd = sys_open("/old", 0, 0);
64723 + sys_mkdir((const char __force_user *)"/old", 0700);
64724 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
64725 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64726 /* move initrd over / and chdir/chroot in initrd root */
64727 - sys_chdir("/root");
64728 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64729 - sys_chroot(".");
64730 + sys_chdir((const char __force_user *)"/root");
64731 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64732 + sys_chroot((const char __force_user *)".");
64733
64734 /*
64735 * In case that a resume from disk is carried out by linuxrc or one of
64736 @@ -66,15 +66,15 @@ static void __init handle_initrd(void)
64737
64738 /* move initrd to rootfs' /old */
64739 sys_fchdir(old_fd);
64740 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
64741 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64742 /* switch root and cwd back to / of rootfs */
64743 sys_fchdir(root_fd);
64744 - sys_chroot(".");
64745 + sys_chroot((const char __force_user *)".");
64746 sys_close(old_fd);
64747 sys_close(root_fd);
64748
64749 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64750 - sys_chdir("/old");
64751 + sys_chdir((const char __force_user *)"/old");
64752 return;
64753 }
64754
64755 @@ -82,17 +82,17 @@ static void __init handle_initrd(void)
64756 mount_root();
64757
64758 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64759 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64760 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64761 if (!error)
64762 printk("okay\n");
64763 else {
64764 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64765 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64766 if (error == -ENOENT)
64767 printk("/initrd does not exist. Ignored.\n");
64768 else
64769 printk("failed\n");
64770 printk(KERN_NOTICE "Unmounting old root\n");
64771 - sys_umount("/old", MNT_DETACH);
64772 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64773 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64774 if (fd < 0) {
64775 error = fd;
64776 @@ -115,11 +115,11 @@ int __init initrd_load(void)
64777 * mounted in the normal path.
64778 */
64779 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64780 - sys_unlink("/initrd.image");
64781 + sys_unlink((const char __force_user *)"/initrd.image");
64782 handle_initrd();
64783 return 1;
64784 }
64785 }
64786 - sys_unlink("/initrd.image");
64787 + sys_unlink((const char __force_user *)"/initrd.image");
64788 return 0;
64789 }
64790 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64791 index 32c4799..c27ee74 100644
64792 --- a/init/do_mounts_md.c
64793 +++ b/init/do_mounts_md.c
64794 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64795 partitioned ? "_d" : "", minor,
64796 md_setup_args[ent].device_names);
64797
64798 - fd = sys_open(name, 0, 0);
64799 + fd = sys_open((char __force_user *)name, 0, 0);
64800 if (fd < 0) {
64801 printk(KERN_ERR "md: open failed - cannot start "
64802 "array %s\n", name);
64803 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64804 * array without it
64805 */
64806 sys_close(fd);
64807 - fd = sys_open(name, 0, 0);
64808 + fd = sys_open((char __force_user *)name, 0, 0);
64809 sys_ioctl(fd, BLKRRPART, 0);
64810 }
64811 sys_close(fd);
64812 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64813
64814 wait_for_device_probe();
64815
64816 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64817 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64818 if (fd >= 0) {
64819 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64820 sys_close(fd);
64821 diff --git a/init/initramfs.c b/init/initramfs.c
64822 index 8216c30..25e8e32 100644
64823 --- a/init/initramfs.c
64824 +++ b/init/initramfs.c
64825 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64826 }
64827 }
64828
64829 -static long __init do_utime(char __user *filename, time_t mtime)
64830 +static long __init do_utime(__force char __user *filename, time_t mtime)
64831 {
64832 struct timespec t[2];
64833
64834 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64835 struct dir_entry *de, *tmp;
64836 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64837 list_del(&de->list);
64838 - do_utime(de->name, de->mtime);
64839 + do_utime((char __force_user *)de->name, de->mtime);
64840 kfree(de->name);
64841 kfree(de);
64842 }
64843 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64844 if (nlink >= 2) {
64845 char *old = find_link(major, minor, ino, mode, collected);
64846 if (old)
64847 - return (sys_link(old, collected) < 0) ? -1 : 1;
64848 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64849 }
64850 return 0;
64851 }
64852 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64853 {
64854 struct stat st;
64855
64856 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64857 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64858 if (S_ISDIR(st.st_mode))
64859 - sys_rmdir(path);
64860 + sys_rmdir((char __force_user *)path);
64861 else
64862 - sys_unlink(path);
64863 + sys_unlink((char __force_user *)path);
64864 }
64865 }
64866
64867 @@ -305,7 +305,7 @@ static int __init do_name(void)
64868 int openflags = O_WRONLY|O_CREAT;
64869 if (ml != 1)
64870 openflags |= O_TRUNC;
64871 - wfd = sys_open(collected, openflags, mode);
64872 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64873
64874 if (wfd >= 0) {
64875 sys_fchown(wfd, uid, gid);
64876 @@ -317,17 +317,17 @@ static int __init do_name(void)
64877 }
64878 }
64879 } else if (S_ISDIR(mode)) {
64880 - sys_mkdir(collected, mode);
64881 - sys_chown(collected, uid, gid);
64882 - sys_chmod(collected, mode);
64883 + sys_mkdir((char __force_user *)collected, mode);
64884 + sys_chown((char __force_user *)collected, uid, gid);
64885 + sys_chmod((char __force_user *)collected, mode);
64886 dir_add(collected, mtime);
64887 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64888 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64889 if (maybe_link() == 0) {
64890 - sys_mknod(collected, mode, rdev);
64891 - sys_chown(collected, uid, gid);
64892 - sys_chmod(collected, mode);
64893 - do_utime(collected, mtime);
64894 + sys_mknod((char __force_user *)collected, mode, rdev);
64895 + sys_chown((char __force_user *)collected, uid, gid);
64896 + sys_chmod((char __force_user *)collected, mode);
64897 + do_utime((char __force_user *)collected, mtime);
64898 }
64899 }
64900 return 0;
64901 @@ -336,15 +336,15 @@ static int __init do_name(void)
64902 static int __init do_copy(void)
64903 {
64904 if (count >= body_len) {
64905 - sys_write(wfd, victim, body_len);
64906 + sys_write(wfd, (char __force_user *)victim, body_len);
64907 sys_close(wfd);
64908 - do_utime(vcollected, mtime);
64909 + do_utime((char __force_user *)vcollected, mtime);
64910 kfree(vcollected);
64911 eat(body_len);
64912 state = SkipIt;
64913 return 0;
64914 } else {
64915 - sys_write(wfd, victim, count);
64916 + sys_write(wfd, (char __force_user *)victim, count);
64917 body_len -= count;
64918 eat(count);
64919 return 1;
64920 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64921 {
64922 collected[N_ALIGN(name_len) + body_len] = '\0';
64923 clean_path(collected, 0);
64924 - sys_symlink(collected + N_ALIGN(name_len), collected);
64925 - sys_lchown(collected, uid, gid);
64926 - do_utime(collected, mtime);
64927 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64928 + sys_lchown((char __force_user *)collected, uid, gid);
64929 + do_utime((char __force_user *)collected, mtime);
64930 state = SkipIt;
64931 next_state = Reset;
64932 return 0;
64933 diff --git a/init/main.c b/init/main.c
64934 index b08c5f7..bf65a52 100644
64935 --- a/init/main.c
64936 +++ b/init/main.c
64937 @@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
64938 extern void tc_init(void);
64939 #endif
64940
64941 +extern void grsecurity_init(void);
64942 +
64943 /*
64944 * Debug helper: via this flag we know that we are in 'early bootup code'
64945 * where only the boot processor is running with IRQ disabled. This means
64946 @@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
64947
64948 __setup("reset_devices", set_reset_devices);
64949
64950 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64951 +extern char pax_enter_kernel_user[];
64952 +extern char pax_exit_kernel_user[];
64953 +extern pgdval_t clone_pgd_mask;
64954 +#endif
64955 +
64956 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64957 +static int __init setup_pax_nouderef(char *str)
64958 +{
64959 +#ifdef CONFIG_X86_32
64960 + unsigned int cpu;
64961 + struct desc_struct *gdt;
64962 +
64963 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64964 + gdt = get_cpu_gdt_table(cpu);
64965 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64966 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64967 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64968 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64969 + }
64970 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64971 +#else
64972 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64973 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64974 + clone_pgd_mask = ~(pgdval_t)0UL;
64975 +#endif
64976 +
64977 + return 0;
64978 +}
64979 +early_param("pax_nouderef", setup_pax_nouderef);
64980 +#endif
64981 +
64982 +#ifdef CONFIG_PAX_SOFTMODE
64983 +int pax_softmode;
64984 +
64985 +static int __init setup_pax_softmode(char *str)
64986 +{
64987 + get_option(&str, &pax_softmode);
64988 + return 1;
64989 +}
64990 +__setup("pax_softmode=", setup_pax_softmode);
64991 +#endif
64992 +
64993 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64994 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64995 static const char *panic_later, *panic_param;
64996 @@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64997 {
64998 int count = preempt_count();
64999 int ret;
65000 + const char *msg1 = "", *msg2 = "";
65001
65002 if (initcall_debug)
65003 ret = do_one_initcall_debug(fn);
65004 @@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
65005 sprintf(msgbuf, "error code %d ", ret);
65006
65007 if (preempt_count() != count) {
65008 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65009 + msg1 = " preemption imbalance";
65010 preempt_count() = count;
65011 }
65012 if (irqs_disabled()) {
65013 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65014 + msg2 = " disabled interrupts";
65015 local_irq_enable();
65016 }
65017 - if (msgbuf[0]) {
65018 - printk("initcall %pF returned with %s\n", fn, msgbuf);
65019 + if (msgbuf[0] || *msg1 || *msg2) {
65020 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65021 }
65022
65023 return ret;
65024 @@ -747,8 +793,14 @@ static void __init do_initcall_level(int level)
65025 level, level,
65026 repair_env_string);
65027
65028 - for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
65029 + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
65030 do_one_initcall(*fn);
65031 +
65032 +#ifdef CONFIG_PAX_LATENT_ENTROPY
65033 + transfer_latent_entropy();
65034 +#endif
65035 +
65036 + }
65037 }
65038
65039 static void __init do_initcalls(void)
65040 @@ -782,8 +834,14 @@ static void __init do_pre_smp_initcalls(void)
65041 {
65042 initcall_t *fn;
65043
65044 - for (fn = __initcall_start; fn < __initcall0_start; fn++)
65045 + for (fn = __initcall_start; fn < __initcall0_start; fn++) {
65046 do_one_initcall(*fn);
65047 +
65048 +#ifdef CONFIG_PAX_LATENT_ENTROPY
65049 + transfer_latent_entropy();
65050 +#endif
65051 +
65052 + }
65053 }
65054
65055 static void run_init_process(const char *init_filename)
65056 @@ -865,7 +923,7 @@ static int __init kernel_init(void * unused)
65057 do_basic_setup();
65058
65059 /* Open the /dev/console on the rootfs, this should never fail */
65060 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
65061 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
65062 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65063
65064 (void) sys_dup(0);
65065 @@ -878,11 +936,13 @@ static int __init kernel_init(void * unused)
65066 if (!ramdisk_execute_command)
65067 ramdisk_execute_command = "/init";
65068
65069 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
65070 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
65071 ramdisk_execute_command = NULL;
65072 prepare_namespace();
65073 }
65074
65075 + grsecurity_init();
65076 +
65077 /*
65078 * Ok, we have completed the initial bootup, and
65079 * we're essentially up and running. Get rid of the
65080 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
65081 index 28bd64d..c66b72a 100644
65082 --- a/ipc/mqueue.c
65083 +++ b/ipc/mqueue.c
65084 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
65085 mq_bytes = (mq_msg_tblsz +
65086 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
65087
65088 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
65089 spin_lock(&mq_lock);
65090 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
65091 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
65092 diff --git a/ipc/msg.c b/ipc/msg.c
65093 index 7385de2..a8180e08 100644
65094 --- a/ipc/msg.c
65095 +++ b/ipc/msg.c
65096 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
65097 return security_msg_queue_associate(msq, msgflg);
65098 }
65099
65100 +static struct ipc_ops msg_ops = {
65101 + .getnew = newque,
65102 + .associate = msg_security,
65103 + .more_checks = NULL
65104 +};
65105 +
65106 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
65107 {
65108 struct ipc_namespace *ns;
65109 - struct ipc_ops msg_ops;
65110 struct ipc_params msg_params;
65111
65112 ns = current->nsproxy->ipc_ns;
65113
65114 - msg_ops.getnew = newque;
65115 - msg_ops.associate = msg_security;
65116 - msg_ops.more_checks = NULL;
65117 -
65118 msg_params.key = key;
65119 msg_params.flg = msgflg;
65120
65121 diff --git a/ipc/sem.c b/ipc/sem.c
65122 index 5215a81..cfc0cac 100644
65123 --- a/ipc/sem.c
65124 +++ b/ipc/sem.c
65125 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
65126 return 0;
65127 }
65128
65129 +static struct ipc_ops sem_ops = {
65130 + .getnew = newary,
65131 + .associate = sem_security,
65132 + .more_checks = sem_more_checks
65133 +};
65134 +
65135 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65136 {
65137 struct ipc_namespace *ns;
65138 - struct ipc_ops sem_ops;
65139 struct ipc_params sem_params;
65140
65141 ns = current->nsproxy->ipc_ns;
65142 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65143 if (nsems < 0 || nsems > ns->sc_semmsl)
65144 return -EINVAL;
65145
65146 - sem_ops.getnew = newary;
65147 - sem_ops.associate = sem_security;
65148 - sem_ops.more_checks = sem_more_checks;
65149 -
65150 sem_params.key = key;
65151 sem_params.flg = semflg;
65152 sem_params.u.nsems = nsems;
65153 diff --git a/ipc/shm.c b/ipc/shm.c
65154 index 406c5b2..bc66d67 100644
65155 --- a/ipc/shm.c
65156 +++ b/ipc/shm.c
65157 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
65158 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65159 #endif
65160
65161 +#ifdef CONFIG_GRKERNSEC
65162 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65163 + const time_t shm_createtime, const uid_t cuid,
65164 + const int shmid);
65165 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65166 + const time_t shm_createtime);
65167 +#endif
65168 +
65169 void shm_init_ns(struct ipc_namespace *ns)
65170 {
65171 ns->shm_ctlmax = SHMMAX;
65172 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
65173 shp->shm_lprid = 0;
65174 shp->shm_atim = shp->shm_dtim = 0;
65175 shp->shm_ctim = get_seconds();
65176 +#ifdef CONFIG_GRKERNSEC
65177 + {
65178 + struct timespec timeval;
65179 + do_posix_clock_monotonic_gettime(&timeval);
65180 +
65181 + shp->shm_createtime = timeval.tv_sec;
65182 + }
65183 +#endif
65184 shp->shm_segsz = size;
65185 shp->shm_nattch = 0;
65186 shp->shm_file = file;
65187 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
65188 return 0;
65189 }
65190
65191 +static struct ipc_ops shm_ops = {
65192 + .getnew = newseg,
65193 + .associate = shm_security,
65194 + .more_checks = shm_more_checks
65195 +};
65196 +
65197 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65198 {
65199 struct ipc_namespace *ns;
65200 - struct ipc_ops shm_ops;
65201 struct ipc_params shm_params;
65202
65203 ns = current->nsproxy->ipc_ns;
65204
65205 - shm_ops.getnew = newseg;
65206 - shm_ops.associate = shm_security;
65207 - shm_ops.more_checks = shm_more_checks;
65208 -
65209 shm_params.key = key;
65210 shm_params.flg = shmflg;
65211 shm_params.u.size = size;
65212 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65213 f_mode = FMODE_READ | FMODE_WRITE;
65214 }
65215 if (shmflg & SHM_EXEC) {
65216 +
65217 +#ifdef CONFIG_PAX_MPROTECT
65218 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
65219 + goto out;
65220 +#endif
65221 +
65222 prot |= PROT_EXEC;
65223 acc_mode |= S_IXUGO;
65224 }
65225 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65226 if (err)
65227 goto out_unlock;
65228
65229 +#ifdef CONFIG_GRKERNSEC
65230 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65231 + shp->shm_perm.cuid, shmid) ||
65232 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65233 + err = -EACCES;
65234 + goto out_unlock;
65235 + }
65236 +#endif
65237 +
65238 path = shp->shm_file->f_path;
65239 path_get(&path);
65240 shp->shm_nattch++;
65241 +#ifdef CONFIG_GRKERNSEC
65242 + shp->shm_lapid = current->pid;
65243 +#endif
65244 size = i_size_read(path.dentry->d_inode);
65245 shm_unlock(shp);
65246
65247 diff --git a/kernel/acct.c b/kernel/acct.c
65248 index 02e6167..54824f7 100644
65249 --- a/kernel/acct.c
65250 +++ b/kernel/acct.c
65251 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
65252 */
65253 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65254 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65255 - file->f_op->write(file, (char *)&ac,
65256 + file->f_op->write(file, (char __force_user *)&ac,
65257 sizeof(acct_t), &file->f_pos);
65258 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65259 set_fs(fs);
65260 diff --git a/kernel/audit.c b/kernel/audit.c
65261 index 1c7f2c6..9ba5359 100644
65262 --- a/kernel/audit.c
65263 +++ b/kernel/audit.c
65264 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
65265 3) suppressed due to audit_rate_limit
65266 4) suppressed due to audit_backlog_limit
65267 */
65268 -static atomic_t audit_lost = ATOMIC_INIT(0);
65269 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65270
65271 /* The netlink socket. */
65272 static struct sock *audit_sock;
65273 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65274 unsigned long now;
65275 int print;
65276
65277 - atomic_inc(&audit_lost);
65278 + atomic_inc_unchecked(&audit_lost);
65279
65280 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65281
65282 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65283 printk(KERN_WARNING
65284 "audit: audit_lost=%d audit_rate_limit=%d "
65285 "audit_backlog_limit=%d\n",
65286 - atomic_read(&audit_lost),
65287 + atomic_read_unchecked(&audit_lost),
65288 audit_rate_limit,
65289 audit_backlog_limit);
65290 audit_panic(message);
65291 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65292 status_set.pid = audit_pid;
65293 status_set.rate_limit = audit_rate_limit;
65294 status_set.backlog_limit = audit_backlog_limit;
65295 - status_set.lost = atomic_read(&audit_lost);
65296 + status_set.lost = atomic_read_unchecked(&audit_lost);
65297 status_set.backlog = skb_queue_len(&audit_skb_queue);
65298 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65299 &status_set, sizeof(status_set));
65300 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65301 index af1de0f..06dfe57 100644
65302 --- a/kernel/auditsc.c
65303 +++ b/kernel/auditsc.c
65304 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65305 }
65306
65307 /* global counter which is incremented every time something logs in */
65308 -static atomic_t session_id = ATOMIC_INIT(0);
65309 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65310
65311 /**
65312 * audit_set_loginuid - set current task's audit_context loginuid
65313 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
65314 return -EPERM;
65315 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
65316
65317 - sessionid = atomic_inc_return(&session_id);
65318 + sessionid = atomic_inc_return_unchecked(&session_id);
65319 if (context && context->in_syscall) {
65320 struct audit_buffer *ab;
65321
65322 diff --git a/kernel/capability.c b/kernel/capability.c
65323 index 3f1adb6..c564db0 100644
65324 --- a/kernel/capability.c
65325 +++ b/kernel/capability.c
65326 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65327 * before modification is attempted and the application
65328 * fails.
65329 */
65330 + if (tocopy > ARRAY_SIZE(kdata))
65331 + return -EFAULT;
65332 +
65333 if (copy_to_user(dataptr, kdata, tocopy
65334 * sizeof(struct __user_cap_data_struct))) {
65335 return -EFAULT;
65336 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
65337 int ret;
65338
65339 rcu_read_lock();
65340 - ret = security_capable(__task_cred(t), ns, cap);
65341 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
65342 + gr_task_is_capable(t, __task_cred(t), cap);
65343 rcu_read_unlock();
65344
65345 - return (ret == 0);
65346 + return ret;
65347 }
65348
65349 /**
65350 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
65351 int ret;
65352
65353 rcu_read_lock();
65354 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
65355 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
65356 rcu_read_unlock();
65357
65358 - return (ret == 0);
65359 + return ret;
65360 }
65361
65362 /**
65363 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65364 BUG();
65365 }
65366
65367 - if (security_capable(current_cred(), ns, cap) == 0) {
65368 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
65369 current->flags |= PF_SUPERPRIV;
65370 return true;
65371 }
65372 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
65373 }
65374 EXPORT_SYMBOL(ns_capable);
65375
65376 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
65377 +{
65378 + if (unlikely(!cap_valid(cap))) {
65379 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65380 + BUG();
65381 + }
65382 +
65383 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
65384 + current->flags |= PF_SUPERPRIV;
65385 + return true;
65386 + }
65387 + return false;
65388 +}
65389 +EXPORT_SYMBOL(ns_capable_nolog);
65390 +
65391 /**
65392 * capable - Determine if the current task has a superior capability in effect
65393 * @cap: The capability to be tested for
65394 @@ -408,6 +427,12 @@ bool capable(int cap)
65395 }
65396 EXPORT_SYMBOL(capable);
65397
65398 +bool capable_nolog(int cap)
65399 +{
65400 + return ns_capable_nolog(&init_user_ns, cap);
65401 +}
65402 +EXPORT_SYMBOL(capable_nolog);
65403 +
65404 /**
65405 * nsown_capable - Check superior capability to one's own user_ns
65406 * @cap: The capability in question
65407 diff --git a/kernel/compat.c b/kernel/compat.c
65408 index d2c67aa..a629b2e 100644
65409 --- a/kernel/compat.c
65410 +++ b/kernel/compat.c
65411 @@ -13,6 +13,7 @@
65412
65413 #include <linux/linkage.h>
65414 #include <linux/compat.h>
65415 +#include <linux/module.h>
65416 #include <linux/errno.h>
65417 #include <linux/time.h>
65418 #include <linux/signal.h>
65419 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65420 mm_segment_t oldfs;
65421 long ret;
65422
65423 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65424 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65425 oldfs = get_fs();
65426 set_fs(KERNEL_DS);
65427 ret = hrtimer_nanosleep_restart(restart);
65428 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65429 oldfs = get_fs();
65430 set_fs(KERNEL_DS);
65431 ret = hrtimer_nanosleep(&tu,
65432 - rmtp ? (struct timespec __user *)&rmt : NULL,
65433 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
65434 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65435 set_fs(oldfs);
65436
65437 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65438 mm_segment_t old_fs = get_fs();
65439
65440 set_fs(KERNEL_DS);
65441 - ret = sys_sigpending((old_sigset_t __user *) &s);
65442 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
65443 set_fs(old_fs);
65444 if (ret == 0)
65445 ret = put_user(s, set);
65446 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65447 mm_segment_t old_fs = get_fs();
65448
65449 set_fs(KERNEL_DS);
65450 - ret = sys_old_getrlimit(resource, &r);
65451 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65452 set_fs(old_fs);
65453
65454 if (!ret) {
65455 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65456 mm_segment_t old_fs = get_fs();
65457
65458 set_fs(KERNEL_DS);
65459 - ret = sys_getrusage(who, (struct rusage __user *) &r);
65460 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65461 set_fs(old_fs);
65462
65463 if (ret)
65464 @@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65465 set_fs (KERNEL_DS);
65466 ret = sys_wait4(pid,
65467 (stat_addr ?
65468 - (unsigned int __user *) &status : NULL),
65469 - options, (struct rusage __user *) &r);
65470 + (unsigned int __force_user *) &status : NULL),
65471 + options, (struct rusage __force_user *) &r);
65472 set_fs (old_fs);
65473
65474 if (ret > 0) {
65475 @@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65476 memset(&info, 0, sizeof(info));
65477
65478 set_fs(KERNEL_DS);
65479 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65480 - uru ? (struct rusage __user *)&ru : NULL);
65481 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65482 + uru ? (struct rusage __force_user *)&ru : NULL);
65483 set_fs(old_fs);
65484
65485 if ((ret < 0) || (info.si_signo == 0))
65486 @@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65487 oldfs = get_fs();
65488 set_fs(KERNEL_DS);
65489 err = sys_timer_settime(timer_id, flags,
65490 - (struct itimerspec __user *) &newts,
65491 - (struct itimerspec __user *) &oldts);
65492 + (struct itimerspec __force_user *) &newts,
65493 + (struct itimerspec __force_user *) &oldts);
65494 set_fs(oldfs);
65495 if (!err && old && put_compat_itimerspec(old, &oldts))
65496 return -EFAULT;
65497 @@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65498 oldfs = get_fs();
65499 set_fs(KERNEL_DS);
65500 err = sys_timer_gettime(timer_id,
65501 - (struct itimerspec __user *) &ts);
65502 + (struct itimerspec __force_user *) &ts);
65503 set_fs(oldfs);
65504 if (!err && put_compat_itimerspec(setting, &ts))
65505 return -EFAULT;
65506 @@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65507 oldfs = get_fs();
65508 set_fs(KERNEL_DS);
65509 err = sys_clock_settime(which_clock,
65510 - (struct timespec __user *) &ts);
65511 + (struct timespec __force_user *) &ts);
65512 set_fs(oldfs);
65513 return err;
65514 }
65515 @@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65516 oldfs = get_fs();
65517 set_fs(KERNEL_DS);
65518 err = sys_clock_gettime(which_clock,
65519 - (struct timespec __user *) &ts);
65520 + (struct timespec __force_user *) &ts);
65521 set_fs(oldfs);
65522 if (!err && put_compat_timespec(&ts, tp))
65523 return -EFAULT;
65524 @@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65525
65526 oldfs = get_fs();
65527 set_fs(KERNEL_DS);
65528 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65529 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65530 set_fs(oldfs);
65531
65532 err = compat_put_timex(utp, &txc);
65533 @@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65534 oldfs = get_fs();
65535 set_fs(KERNEL_DS);
65536 err = sys_clock_getres(which_clock,
65537 - (struct timespec __user *) &ts);
65538 + (struct timespec __force_user *) &ts);
65539 set_fs(oldfs);
65540 if (!err && tp && put_compat_timespec(&ts, tp))
65541 return -EFAULT;
65542 @@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65543 long err;
65544 mm_segment_t oldfs;
65545 struct timespec tu;
65546 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65547 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65548
65549 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65550 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65551 oldfs = get_fs();
65552 set_fs(KERNEL_DS);
65553 err = clock_nanosleep_restart(restart);
65554 @@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65555 oldfs = get_fs();
65556 set_fs(KERNEL_DS);
65557 err = sys_clock_nanosleep(which_clock, flags,
65558 - (struct timespec __user *) &in,
65559 - (struct timespec __user *) &out);
65560 + (struct timespec __force_user *) &in,
65561 + (struct timespec __force_user *) &out);
65562 set_fs(oldfs);
65563
65564 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65565 diff --git a/kernel/configs.c b/kernel/configs.c
65566 index 42e8fa0..9e7406b 100644
65567 --- a/kernel/configs.c
65568 +++ b/kernel/configs.c
65569 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65570 struct proc_dir_entry *entry;
65571
65572 /* create the current config file */
65573 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65574 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65575 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65576 + &ikconfig_file_ops);
65577 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65578 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65579 + &ikconfig_file_ops);
65580 +#endif
65581 +#else
65582 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65583 &ikconfig_file_ops);
65584 +#endif
65585 +
65586 if (!entry)
65587 return -ENOMEM;
65588
65589 diff --git a/kernel/cred.c b/kernel/cred.c
65590 index e70683d..27761b6 100644
65591 --- a/kernel/cred.c
65592 +++ b/kernel/cred.c
65593 @@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
65594 validate_creds(cred);
65595 put_cred(cred);
65596 }
65597 +
65598 +#ifdef CONFIG_GRKERNSEC_SETXID
65599 + cred = (struct cred *) tsk->delayed_cred;
65600 + if (cred) {
65601 + tsk->delayed_cred = NULL;
65602 + validate_creds(cred);
65603 + put_cred(cred);
65604 + }
65605 +#endif
65606 }
65607
65608 /**
65609 @@ -473,7 +482,7 @@ error_put:
65610 * Always returns 0 thus allowing this function to be tail-called at the end
65611 * of, say, sys_setgid().
65612 */
65613 -int commit_creds(struct cred *new)
65614 +static int __commit_creds(struct cred *new)
65615 {
65616 struct task_struct *task = current;
65617 const struct cred *old = task->real_cred;
65618 @@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
65619
65620 get_cred(new); /* we will require a ref for the subj creds too */
65621
65622 + gr_set_role_label(task, new->uid, new->gid);
65623 +
65624 /* dumpability changes */
65625 if (old->euid != new->euid ||
65626 old->egid != new->egid ||
65627 @@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
65628 put_cred(old);
65629 return 0;
65630 }
65631 +#ifdef CONFIG_GRKERNSEC_SETXID
65632 +extern int set_user(struct cred *new);
65633 +
65634 +void gr_delayed_cred_worker(void)
65635 +{
65636 + const struct cred *new = current->delayed_cred;
65637 + struct cred *ncred;
65638 +
65639 + current->delayed_cred = NULL;
65640 +
65641 + if (current_uid() && new != NULL) {
65642 + // from doing get_cred on it when queueing this
65643 + put_cred(new);
65644 + return;
65645 + } else if (new == NULL)
65646 + return;
65647 +
65648 + ncred = prepare_creds();
65649 + if (!ncred)
65650 + goto die;
65651 + // uids
65652 + ncred->uid = new->uid;
65653 + ncred->euid = new->euid;
65654 + ncred->suid = new->suid;
65655 + ncred->fsuid = new->fsuid;
65656 + // gids
65657 + ncred->gid = new->gid;
65658 + ncred->egid = new->egid;
65659 + ncred->sgid = new->sgid;
65660 + ncred->fsgid = new->fsgid;
65661 + // groups
65662 + if (set_groups(ncred, new->group_info) < 0) {
65663 + abort_creds(ncred);
65664 + goto die;
65665 + }
65666 + // caps
65667 + ncred->securebits = new->securebits;
65668 + ncred->cap_inheritable = new->cap_inheritable;
65669 + ncred->cap_permitted = new->cap_permitted;
65670 + ncred->cap_effective = new->cap_effective;
65671 + ncred->cap_bset = new->cap_bset;
65672 +
65673 + if (set_user(ncred)) {
65674 + abort_creds(ncred);
65675 + goto die;
65676 + }
65677 +
65678 + // from doing get_cred on it when queueing this
65679 + put_cred(new);
65680 +
65681 + __commit_creds(ncred);
65682 + return;
65683 +die:
65684 + // from doing get_cred on it when queueing this
65685 + put_cred(new);
65686 + do_group_exit(SIGKILL);
65687 +}
65688 +#endif
65689 +
65690 +int commit_creds(struct cred *new)
65691 +{
65692 +#ifdef CONFIG_GRKERNSEC_SETXID
65693 + int ret;
65694 + int schedule_it = 0;
65695 + struct task_struct *t;
65696 +
65697 + /* we won't get called with tasklist_lock held for writing
65698 + and interrupts disabled as the cred struct in that case is
65699 + init_cred
65700 + */
65701 + if (grsec_enable_setxid && !current_is_single_threaded() &&
65702 + !current_uid() && new->uid) {
65703 + schedule_it = 1;
65704 + }
65705 + ret = __commit_creds(new);
65706 + if (schedule_it) {
65707 + rcu_read_lock();
65708 + read_lock(&tasklist_lock);
65709 + for (t = next_thread(current); t != current;
65710 + t = next_thread(t)) {
65711 + if (t->delayed_cred == NULL) {
65712 + t->delayed_cred = get_cred(new);
65713 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
65714 + set_tsk_need_resched(t);
65715 + }
65716 + }
65717 + read_unlock(&tasklist_lock);
65718 + rcu_read_unlock();
65719 + }
65720 + return ret;
65721 +#else
65722 + return __commit_creds(new);
65723 +#endif
65724 +}
65725 +
65726 EXPORT_SYMBOL(commit_creds);
65727
65728 /**
65729 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65730 index 0557f24..1a00d9a 100644
65731 --- a/kernel/debug/debug_core.c
65732 +++ b/kernel/debug/debug_core.c
65733 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65734 */
65735 static atomic_t masters_in_kgdb;
65736 static atomic_t slaves_in_kgdb;
65737 -static atomic_t kgdb_break_tasklet_var;
65738 +static atomic_unchecked_t kgdb_break_tasklet_var;
65739 atomic_t kgdb_setting_breakpoint;
65740
65741 struct task_struct *kgdb_usethread;
65742 @@ -132,7 +132,7 @@ int kgdb_single_step;
65743 static pid_t kgdb_sstep_pid;
65744
65745 /* to keep track of the CPU which is doing the single stepping*/
65746 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65747 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65748
65749 /*
65750 * If you are debugging a problem where roundup (the collection of
65751 @@ -540,7 +540,7 @@ return_normal:
65752 * kernel will only try for the value of sstep_tries before
65753 * giving up and continuing on.
65754 */
65755 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65756 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65757 (kgdb_info[cpu].task &&
65758 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65759 atomic_set(&kgdb_active, -1);
65760 @@ -634,8 +634,8 @@ cpu_master_loop:
65761 }
65762
65763 kgdb_restore:
65764 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65765 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65766 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65767 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65768 if (kgdb_info[sstep_cpu].task)
65769 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65770 else
65771 @@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
65772 static void kgdb_tasklet_bpt(unsigned long ing)
65773 {
65774 kgdb_breakpoint();
65775 - atomic_set(&kgdb_break_tasklet_var, 0);
65776 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65777 }
65778
65779 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65780
65781 void kgdb_schedule_breakpoint(void)
65782 {
65783 - if (atomic_read(&kgdb_break_tasklet_var) ||
65784 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65785 atomic_read(&kgdb_active) != -1 ||
65786 atomic_read(&kgdb_setting_breakpoint))
65787 return;
65788 - atomic_inc(&kgdb_break_tasklet_var);
65789 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65790 tasklet_schedule(&kgdb_tasklet_breakpoint);
65791 }
65792 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65793 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65794 index 67b847d..93834dd 100644
65795 --- a/kernel/debug/kdb/kdb_main.c
65796 +++ b/kernel/debug/kdb/kdb_main.c
65797 @@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
65798 list_for_each_entry(mod, kdb_modules, list) {
65799
65800 kdb_printf("%-20s%8u 0x%p ", mod->name,
65801 - mod->core_size, (void *)mod);
65802 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65803 #ifdef CONFIG_MODULE_UNLOAD
65804 kdb_printf("%4ld ", module_refcount(mod));
65805 #endif
65806 @@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
65807 kdb_printf(" (Loading)");
65808 else
65809 kdb_printf(" (Live)");
65810 - kdb_printf(" 0x%p", mod->module_core);
65811 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65812
65813 #ifdef CONFIG_MODULE_UNLOAD
65814 {
65815 diff --git a/kernel/events/core.c b/kernel/events/core.c
65816 index fd126f8..70b755b 100644
65817 --- a/kernel/events/core.c
65818 +++ b/kernel/events/core.c
65819 @@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65820 return 0;
65821 }
65822
65823 -static atomic64_t perf_event_id;
65824 +static atomic64_unchecked_t perf_event_id;
65825
65826 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65827 enum event_type_t event_type);
65828 @@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
65829
65830 static inline u64 perf_event_count(struct perf_event *event)
65831 {
65832 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65833 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65834 }
65835
65836 static u64 perf_event_read(struct perf_event *event)
65837 @@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65838 mutex_lock(&event->child_mutex);
65839 total += perf_event_read(event);
65840 *enabled += event->total_time_enabled +
65841 - atomic64_read(&event->child_total_time_enabled);
65842 + atomic64_read_unchecked(&event->child_total_time_enabled);
65843 *running += event->total_time_running +
65844 - atomic64_read(&event->child_total_time_running);
65845 + atomic64_read_unchecked(&event->child_total_time_running);
65846
65847 list_for_each_entry(child, &event->child_list, child_list) {
65848 total += perf_event_read(child);
65849 @@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
65850 userpg->offset -= local64_read(&event->hw.prev_count);
65851
65852 userpg->time_enabled = enabled +
65853 - atomic64_read(&event->child_total_time_enabled);
65854 + atomic64_read_unchecked(&event->child_total_time_enabled);
65855
65856 userpg->time_running = running +
65857 - atomic64_read(&event->child_total_time_running);
65858 + atomic64_read_unchecked(&event->child_total_time_running);
65859
65860 arch_perf_update_userpage(userpg, now);
65861
65862 @@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65863 values[n++] = perf_event_count(event);
65864 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65865 values[n++] = enabled +
65866 - atomic64_read(&event->child_total_time_enabled);
65867 + atomic64_read_unchecked(&event->child_total_time_enabled);
65868 }
65869 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65870 values[n++] = running +
65871 - atomic64_read(&event->child_total_time_running);
65872 + atomic64_read_unchecked(&event->child_total_time_running);
65873 }
65874 if (read_format & PERF_FORMAT_ID)
65875 values[n++] = primary_event_id(event);
65876 @@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65877 * need to add enough zero bytes after the string to handle
65878 * the 64bit alignment we do later.
65879 */
65880 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65881 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65882 if (!buf) {
65883 name = strncpy(tmp, "//enomem", sizeof(tmp));
65884 goto got_name;
65885 }
65886 - name = d_path(&file->f_path, buf, PATH_MAX);
65887 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65888 if (IS_ERR(name)) {
65889 name = strncpy(tmp, "//toolong", sizeof(tmp));
65890 goto got_name;
65891 @@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65892 event->parent = parent_event;
65893
65894 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65895 - event->id = atomic64_inc_return(&perf_event_id);
65896 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65897
65898 event->state = PERF_EVENT_STATE_INACTIVE;
65899
65900 @@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
65901 /*
65902 * Add back the child's count to the parent's count:
65903 */
65904 - atomic64_add(child_val, &parent_event->child_count);
65905 - atomic64_add(child_event->total_time_enabled,
65906 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65907 + atomic64_add_unchecked(child_event->total_time_enabled,
65908 &parent_event->child_total_time_enabled);
65909 - atomic64_add(child_event->total_time_running,
65910 + atomic64_add_unchecked(child_event->total_time_running,
65911 &parent_event->child_total_time_running);
65912
65913 /*
65914 diff --git a/kernel/exit.c b/kernel/exit.c
65915 index 9d81012..d7911f1 100644
65916 --- a/kernel/exit.c
65917 +++ b/kernel/exit.c
65918 @@ -59,6 +59,10 @@
65919 #include <asm/pgtable.h>
65920 #include <asm/mmu_context.h>
65921
65922 +#ifdef CONFIG_GRKERNSEC
65923 +extern rwlock_t grsec_exec_file_lock;
65924 +#endif
65925 +
65926 static void exit_mm(struct task_struct * tsk);
65927
65928 static void __unhash_process(struct task_struct *p, bool group_dead)
65929 @@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
65930 struct task_struct *leader;
65931 int zap_leader;
65932 repeat:
65933 +#ifdef CONFIG_NET
65934 + gr_del_task_from_ip_table(p);
65935 +#endif
65936 +
65937 /* don't need to get the RCU readlock here - the process is dead and
65938 * can't be modifying its own credentials. But shut RCU-lockdep up */
65939 rcu_read_lock();
65940 @@ -382,7 +390,7 @@ int allow_signal(int sig)
65941 * know it'll be handled, so that they don't get converted to
65942 * SIGKILL or just silently dropped.
65943 */
65944 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65945 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65946 recalc_sigpending();
65947 spin_unlock_irq(&current->sighand->siglock);
65948 return 0;
65949 @@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
65950 vsnprintf(current->comm, sizeof(current->comm), name, args);
65951 va_end(args);
65952
65953 +#ifdef CONFIG_GRKERNSEC
65954 + write_lock(&grsec_exec_file_lock);
65955 + if (current->exec_file) {
65956 + fput(current->exec_file);
65957 + current->exec_file = NULL;
65958 + }
65959 + write_unlock(&grsec_exec_file_lock);
65960 +#endif
65961 +
65962 + gr_set_kernel_label(current);
65963 +
65964 /*
65965 * If we were started as result of loading a module, close all of the
65966 * user space pages. We don't need them, and if we didn't close them
65967 @@ -901,6 +920,8 @@ void do_exit(long code)
65968 struct task_struct *tsk = current;
65969 int group_dead;
65970
65971 + set_fs(USER_DS);
65972 +
65973 profile_task_exit(tsk);
65974
65975 WARN_ON(blk_needs_flush_plug(tsk));
65976 @@ -917,7 +938,6 @@ void do_exit(long code)
65977 * mm_release()->clear_child_tid() from writing to a user-controlled
65978 * kernel address.
65979 */
65980 - set_fs(USER_DS);
65981
65982 ptrace_event(PTRACE_EVENT_EXIT, code);
65983
65984 @@ -978,6 +998,9 @@ void do_exit(long code)
65985 tsk->exit_code = code;
65986 taskstats_exit(tsk, group_dead);
65987
65988 + gr_acl_handle_psacct(tsk, code);
65989 + gr_acl_handle_exit();
65990 +
65991 exit_mm(tsk);
65992
65993 if (group_dead)
65994 @@ -1094,7 +1117,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65995 * Take down every thread in the group. This is called by fatal signals
65996 * as well as by sys_exit_group (below).
65997 */
65998 -void
65999 +__noreturn void
66000 do_group_exit(int exit_code)
66001 {
66002 struct signal_struct *sig = current->signal;
66003 diff --git a/kernel/fork.c b/kernel/fork.c
66004 index 8163333..aee97f3 100644
66005 --- a/kernel/fork.c
66006 +++ b/kernel/fork.c
66007 @@ -274,19 +274,24 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
66008 }
66009
66010 err = arch_dup_task_struct(tsk, orig);
66011 - if (err)
66012 - goto out;
66013
66014 + /*
66015 + * We defer looking at err, because we will need this setup
66016 + * for the clean up path to work correctly.
66017 + */
66018 tsk->stack = ti;
66019 -
66020 setup_thread_stack(tsk, orig);
66021 +
66022 + if (err)
66023 + goto out;
66024 +
66025 clear_user_return_notifier(tsk);
66026 clear_tsk_need_resched(tsk);
66027 stackend = end_of_stack(tsk);
66028 *stackend = STACK_END_MAGIC; /* for overflow detection */
66029
66030 #ifdef CONFIG_CC_STACKPROTECTOR
66031 - tsk->stack_canary = get_random_int();
66032 + tsk->stack_canary = pax_get_random_long();
66033 #endif
66034
66035 /*
66036 @@ -310,13 +315,78 @@ out:
66037 }
66038
66039 #ifdef CONFIG_MMU
66040 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
66041 +{
66042 + struct vm_area_struct *tmp;
66043 + unsigned long charge;
66044 + struct mempolicy *pol;
66045 + struct file *file;
66046 +
66047 + charge = 0;
66048 + if (mpnt->vm_flags & VM_ACCOUNT) {
66049 + unsigned long len;
66050 + len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66051 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
66052 + goto fail_nomem;
66053 + charge = len;
66054 + }
66055 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66056 + if (!tmp)
66057 + goto fail_nomem;
66058 + *tmp = *mpnt;
66059 + tmp->vm_mm = mm;
66060 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
66061 + pol = mpol_dup(vma_policy(mpnt));
66062 + if (IS_ERR(pol))
66063 + goto fail_nomem_policy;
66064 + vma_set_policy(tmp, pol);
66065 + if (anon_vma_fork(tmp, mpnt))
66066 + goto fail_nomem_anon_vma_fork;
66067 + tmp->vm_flags &= ~VM_LOCKED;
66068 + tmp->vm_next = tmp->vm_prev = NULL;
66069 + tmp->vm_mirror = NULL;
66070 + file = tmp->vm_file;
66071 + if (file) {
66072 + struct inode *inode = file->f_path.dentry->d_inode;
66073 + struct address_space *mapping = file->f_mapping;
66074 +
66075 + get_file(file);
66076 + if (tmp->vm_flags & VM_DENYWRITE)
66077 + atomic_dec(&inode->i_writecount);
66078 + mutex_lock(&mapping->i_mmap_mutex);
66079 + if (tmp->vm_flags & VM_SHARED)
66080 + mapping->i_mmap_writable++;
66081 + flush_dcache_mmap_lock(mapping);
66082 + /* insert tmp into the share list, just after mpnt */
66083 + vma_prio_tree_add(tmp, mpnt);
66084 + flush_dcache_mmap_unlock(mapping);
66085 + mutex_unlock(&mapping->i_mmap_mutex);
66086 + }
66087 +
66088 + /*
66089 + * Clear hugetlb-related page reserves for children. This only
66090 + * affects MAP_PRIVATE mappings. Faults generated by the child
66091 + * are not guaranteed to succeed, even if read-only
66092 + */
66093 + if (is_vm_hugetlb_page(tmp))
66094 + reset_vma_resv_huge_pages(tmp);
66095 +
66096 + return tmp;
66097 +
66098 +fail_nomem_anon_vma_fork:
66099 + mpol_put(pol);
66100 +fail_nomem_policy:
66101 + kmem_cache_free(vm_area_cachep, tmp);
66102 +fail_nomem:
66103 + vm_unacct_memory(charge);
66104 + return NULL;
66105 +}
66106 +
66107 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66108 {
66109 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
66110 struct rb_node **rb_link, *rb_parent;
66111 int retval;
66112 - unsigned long charge;
66113 - struct mempolicy *pol;
66114
66115 down_write(&oldmm->mmap_sem);
66116 flush_cache_dup_mm(oldmm);
66117 @@ -328,8 +398,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66118 mm->locked_vm = 0;
66119 mm->mmap = NULL;
66120 mm->mmap_cache = NULL;
66121 - mm->free_area_cache = oldmm->mmap_base;
66122 - mm->cached_hole_size = ~0UL;
66123 + mm->free_area_cache = oldmm->free_area_cache;
66124 + mm->cached_hole_size = oldmm->cached_hole_size;
66125 mm->map_count = 0;
66126 cpumask_clear(mm_cpumask(mm));
66127 mm->mm_rb = RB_ROOT;
66128 @@ -345,8 +415,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66129
66130 prev = NULL;
66131 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
66132 - struct file *file;
66133 -
66134 if (mpnt->vm_flags & VM_DONTCOPY) {
66135 long pages = vma_pages(mpnt);
66136 mm->total_vm -= pages;
66137 @@ -354,54 +422,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66138 -pages);
66139 continue;
66140 }
66141 - charge = 0;
66142 - if (mpnt->vm_flags & VM_ACCOUNT) {
66143 - unsigned long len;
66144 - len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66145 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
66146 - goto fail_nomem;
66147 - charge = len;
66148 + tmp = dup_vma(mm, oldmm, mpnt);
66149 + if (!tmp) {
66150 + retval = -ENOMEM;
66151 + goto out;
66152 }
66153 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66154 - if (!tmp)
66155 - goto fail_nomem;
66156 - *tmp = *mpnt;
66157 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
66158 - pol = mpol_dup(vma_policy(mpnt));
66159 - retval = PTR_ERR(pol);
66160 - if (IS_ERR(pol))
66161 - goto fail_nomem_policy;
66162 - vma_set_policy(tmp, pol);
66163 - tmp->vm_mm = mm;
66164 - if (anon_vma_fork(tmp, mpnt))
66165 - goto fail_nomem_anon_vma_fork;
66166 - tmp->vm_flags &= ~VM_LOCKED;
66167 - tmp->vm_next = tmp->vm_prev = NULL;
66168 - file = tmp->vm_file;
66169 - if (file) {
66170 - struct inode *inode = file->f_path.dentry->d_inode;
66171 - struct address_space *mapping = file->f_mapping;
66172 -
66173 - get_file(file);
66174 - if (tmp->vm_flags & VM_DENYWRITE)
66175 - atomic_dec(&inode->i_writecount);
66176 - mutex_lock(&mapping->i_mmap_mutex);
66177 - if (tmp->vm_flags & VM_SHARED)
66178 - mapping->i_mmap_writable++;
66179 - flush_dcache_mmap_lock(mapping);
66180 - /* insert tmp into the share list, just after mpnt */
66181 - vma_prio_tree_add(tmp, mpnt);
66182 - flush_dcache_mmap_unlock(mapping);
66183 - mutex_unlock(&mapping->i_mmap_mutex);
66184 - }
66185 -
66186 - /*
66187 - * Clear hugetlb-related page reserves for children. This only
66188 - * affects MAP_PRIVATE mappings. Faults generated by the child
66189 - * are not guaranteed to succeed, even if read-only
66190 - */
66191 - if (is_vm_hugetlb_page(tmp))
66192 - reset_vma_resv_huge_pages(tmp);
66193
66194 /*
66195 * Link in the new vma and copy the page table entries.
66196 @@ -424,6 +449,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66197 if (retval)
66198 goto out;
66199 }
66200 +
66201 +#ifdef CONFIG_PAX_SEGMEXEC
66202 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66203 + struct vm_area_struct *mpnt_m;
66204 +
66205 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66206 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66207 +
66208 + if (!mpnt->vm_mirror)
66209 + continue;
66210 +
66211 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66212 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66213 + mpnt->vm_mirror = mpnt_m;
66214 + } else {
66215 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66216 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66217 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66218 + mpnt->vm_mirror->vm_mirror = mpnt;
66219 + }
66220 + }
66221 + BUG_ON(mpnt_m);
66222 + }
66223 +#endif
66224 +
66225 /* a new mm has just been created */
66226 arch_dup_mmap(oldmm, mm);
66227 retval = 0;
66228 @@ -432,14 +482,6 @@ out:
66229 flush_tlb_mm(oldmm);
66230 up_write(&oldmm->mmap_sem);
66231 return retval;
66232 -fail_nomem_anon_vma_fork:
66233 - mpol_put(pol);
66234 -fail_nomem_policy:
66235 - kmem_cache_free(vm_area_cachep, tmp);
66236 -fail_nomem:
66237 - retval = -ENOMEM;
66238 - vm_unacct_memory(charge);
66239 - goto out;
66240 }
66241
66242 static inline int mm_alloc_pgd(struct mm_struct *mm)
66243 @@ -676,8 +718,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
66244 return ERR_PTR(err);
66245
66246 mm = get_task_mm(task);
66247 - if (mm && mm != current->mm &&
66248 - !ptrace_may_access(task, mode)) {
66249 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
66250 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
66251 mmput(mm);
66252 mm = ERR_PTR(-EACCES);
66253 }
66254 @@ -899,13 +941,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
66255 spin_unlock(&fs->lock);
66256 return -EAGAIN;
66257 }
66258 - fs->users++;
66259 + atomic_inc(&fs->users);
66260 spin_unlock(&fs->lock);
66261 return 0;
66262 }
66263 tsk->fs = copy_fs_struct(fs);
66264 if (!tsk->fs)
66265 return -ENOMEM;
66266 + gr_set_chroot_entries(tsk, &tsk->fs->root);
66267 return 0;
66268 }
66269
66270 @@ -1172,6 +1215,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66271 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66272 #endif
66273 retval = -EAGAIN;
66274 +
66275 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66276 +
66277 if (atomic_read(&p->real_cred->user->processes) >=
66278 task_rlimit(p, RLIMIT_NPROC)) {
66279 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66280 @@ -1392,6 +1438,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66281 /* Need tasklist lock for parent etc handling! */
66282 write_lock_irq(&tasklist_lock);
66283
66284 + /* synchronizes with gr_set_acls() */
66285 + gr_copy_label(p);
66286 +
66287 /* CLONE_PARENT re-uses the old parent */
66288 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
66289 p->real_parent = current->real_parent;
66290 @@ -1502,6 +1551,8 @@ bad_fork_cleanup_count:
66291 bad_fork_free:
66292 free_task(p);
66293 fork_out:
66294 + gr_log_forkfail(retval);
66295 +
66296 return ERR_PTR(retval);
66297 }
66298
66299 @@ -1602,6 +1653,8 @@ long do_fork(unsigned long clone_flags,
66300 if (clone_flags & CLONE_PARENT_SETTID)
66301 put_user(nr, parent_tidptr);
66302
66303 + gr_handle_brute_check();
66304 +
66305 if (clone_flags & CLONE_VFORK) {
66306 p->vfork_done = &vfork;
66307 init_completion(&vfork);
66308 @@ -1700,7 +1753,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66309 return 0;
66310
66311 /* don't need lock here; in the worst case we'll do useless copy */
66312 - if (fs->users == 1)
66313 + if (atomic_read(&fs->users) == 1)
66314 return 0;
66315
66316 *new_fsp = copy_fs_struct(fs);
66317 @@ -1789,7 +1842,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66318 fs = current->fs;
66319 spin_lock(&fs->lock);
66320 current->fs = new_fs;
66321 - if (--fs->users)
66322 + gr_set_chroot_entries(current, &current->fs->root);
66323 + if (atomic_dec_return(&fs->users))
66324 new_fs = NULL;
66325 else
66326 new_fs = fs;
66327 diff --git a/kernel/futex.c b/kernel/futex.c
66328 index e2b0fb9..db818ac 100644
66329 --- a/kernel/futex.c
66330 +++ b/kernel/futex.c
66331 @@ -54,6 +54,7 @@
66332 #include <linux/mount.h>
66333 #include <linux/pagemap.h>
66334 #include <linux/syscalls.h>
66335 +#include <linux/ptrace.h>
66336 #include <linux/signal.h>
66337 #include <linux/export.h>
66338 #include <linux/magic.h>
66339 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66340 struct page *page, *page_head;
66341 int err, ro = 0;
66342
66343 +#ifdef CONFIG_PAX_SEGMEXEC
66344 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66345 + return -EFAULT;
66346 +#endif
66347 +
66348 /*
66349 * The futex address must be "naturally" aligned.
66350 */
66351 @@ -2711,6 +2717,7 @@ static int __init futex_init(void)
66352 {
66353 u32 curval;
66354 int i;
66355 + mm_segment_t oldfs;
66356
66357 /*
66358 * This will fail and we want it. Some arch implementations do
66359 @@ -2722,8 +2729,11 @@ static int __init futex_init(void)
66360 * implementation, the non-functional ones will return
66361 * -ENOSYS.
66362 */
66363 + oldfs = get_fs();
66364 + set_fs(USER_DS);
66365 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66366 futex_cmpxchg_enabled = 1;
66367 + set_fs(oldfs);
66368
66369 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66370 plist_head_init(&futex_queues[i].chain);
66371 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66372 index 9b22d03..6295b62 100644
66373 --- a/kernel/gcov/base.c
66374 +++ b/kernel/gcov/base.c
66375 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
66376 }
66377
66378 #ifdef CONFIG_MODULES
66379 -static inline int within(void *addr, void *start, unsigned long size)
66380 -{
66381 - return ((addr >= start) && (addr < start + size));
66382 -}
66383 -
66384 /* Update list and generate events when modules are unloaded. */
66385 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66386 void *data)
66387 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66388 prev = NULL;
66389 /* Remove entries located in module from linked list. */
66390 for (info = gcov_info_head; info; info = info->next) {
66391 - if (within(info, mod->module_core, mod->core_size)) {
66392 + if (within_module_core_rw((unsigned long)info, mod)) {
66393 if (prev)
66394 prev->next = info->next;
66395 else
66396 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66397 index 6db7a5e..25b6648 100644
66398 --- a/kernel/hrtimer.c
66399 +++ b/kernel/hrtimer.c
66400 @@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
66401 local_irq_restore(flags);
66402 }
66403
66404 -static void run_hrtimer_softirq(struct softirq_action *h)
66405 +static void run_hrtimer_softirq(void)
66406 {
66407 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
66408
66409 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66410 index 4304919..408c4c0 100644
66411 --- a/kernel/jump_label.c
66412 +++ b/kernel/jump_label.c
66413 @@ -13,6 +13,7 @@
66414 #include <linux/sort.h>
66415 #include <linux/err.h>
66416 #include <linux/static_key.h>
66417 +#include <linux/mm.h>
66418
66419 #ifdef HAVE_JUMP_LABEL
66420
66421 @@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66422
66423 size = (((unsigned long)stop - (unsigned long)start)
66424 / sizeof(struct jump_entry));
66425 + pax_open_kernel();
66426 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66427 + pax_close_kernel();
66428 }
66429
66430 static void jump_label_update(struct static_key *key, int enable);
66431 @@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66432 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66433 struct jump_entry *iter;
66434
66435 + pax_open_kernel();
66436 for (iter = iter_start; iter < iter_stop; iter++) {
66437 if (within_module_init(iter->code, mod))
66438 iter->code = 0;
66439 }
66440 + pax_close_kernel();
66441 }
66442
66443 static int
66444 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66445 index 079f1d3..4e80e69 100644
66446 --- a/kernel/kallsyms.c
66447 +++ b/kernel/kallsyms.c
66448 @@ -11,6 +11,9 @@
66449 * Changed the compression method from stem compression to "table lookup"
66450 * compression (see scripts/kallsyms.c for a more complete description)
66451 */
66452 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66453 +#define __INCLUDED_BY_HIDESYM 1
66454 +#endif
66455 #include <linux/kallsyms.h>
66456 #include <linux/module.h>
66457 #include <linux/init.h>
66458 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66459
66460 static inline int is_kernel_inittext(unsigned long addr)
66461 {
66462 + if (system_state != SYSTEM_BOOTING)
66463 + return 0;
66464 +
66465 if (addr >= (unsigned long)_sinittext
66466 && addr <= (unsigned long)_einittext)
66467 return 1;
66468 return 0;
66469 }
66470
66471 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66472 +#ifdef CONFIG_MODULES
66473 +static inline int is_module_text(unsigned long addr)
66474 +{
66475 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66476 + return 1;
66477 +
66478 + addr = ktla_ktva(addr);
66479 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66480 +}
66481 +#else
66482 +static inline int is_module_text(unsigned long addr)
66483 +{
66484 + return 0;
66485 +}
66486 +#endif
66487 +#endif
66488 +
66489 static inline int is_kernel_text(unsigned long addr)
66490 {
66491 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66492 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66493
66494 static inline int is_kernel(unsigned long addr)
66495 {
66496 +
66497 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66498 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
66499 + return 1;
66500 +
66501 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66502 +#else
66503 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66504 +#endif
66505 +
66506 return 1;
66507 return in_gate_area_no_mm(addr);
66508 }
66509
66510 static int is_ksym_addr(unsigned long addr)
66511 {
66512 +
66513 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66514 + if (is_module_text(addr))
66515 + return 0;
66516 +#endif
66517 +
66518 if (all_var)
66519 return is_kernel(addr);
66520
66521 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66522
66523 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66524 {
66525 - iter->name[0] = '\0';
66526 iter->nameoff = get_symbol_offset(new_pos);
66527 iter->pos = new_pos;
66528 }
66529 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66530 {
66531 struct kallsym_iter *iter = m->private;
66532
66533 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66534 + if (current_uid())
66535 + return 0;
66536 +#endif
66537 +
66538 /* Some debugging symbols have no name. Ignore them. */
66539 if (!iter->name[0])
66540 return 0;
66541 @@ -515,11 +558,22 @@ static int s_show(struct seq_file *m, void *p)
66542 */
66543 type = iter->exported ? toupper(iter->type) :
66544 tolower(iter->type);
66545 +
66546 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66547 + seq_printf(m, "%pP %c %s\t[%s]\n", (void *)iter->value,
66548 + type, iter->name, iter->module_name);
66549 +#else
66550 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
66551 type, iter->name, iter->module_name);
66552 +#endif
66553 } else
66554 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66555 + seq_printf(m, "%pP %c %s\n", (void *)iter->value,
66556 + iter->type, iter->name);
66557 +#else
66558 seq_printf(m, "%pK %c %s\n", (void *)iter->value,
66559 iter->type, iter->name);
66560 +#endif
66561 return 0;
66562 }
66563
66564 @@ -540,7 +594,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66565 struct kallsym_iter *iter;
66566 int ret;
66567
66568 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66569 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66570 if (!iter)
66571 return -ENOMEM;
66572 reset_iter(iter, 0);
66573 diff --git a/kernel/kexec.c b/kernel/kexec.c
66574 index 4e2e472..cd0c7ae 100644
66575 --- a/kernel/kexec.c
66576 +++ b/kernel/kexec.c
66577 @@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66578 unsigned long flags)
66579 {
66580 struct compat_kexec_segment in;
66581 - struct kexec_segment out, __user *ksegments;
66582 + struct kexec_segment out;
66583 + struct kexec_segment __user *ksegments;
66584 unsigned long i, result;
66585
66586 /* Don't allow clients that don't understand the native
66587 diff --git a/kernel/kmod.c b/kernel/kmod.c
66588 index 05698a7..a4c1e3a 100644
66589 --- a/kernel/kmod.c
66590 +++ b/kernel/kmod.c
66591 @@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
66592 kfree(info->argv);
66593 }
66594
66595 -static int call_modprobe(char *module_name, int wait)
66596 +static int call_modprobe(char *module_name, char *module_param, int wait)
66597 {
66598 static char *envp[] = {
66599 "HOME=/",
66600 @@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
66601 NULL
66602 };
66603
66604 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
66605 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
66606 if (!argv)
66607 goto out;
66608
66609 @@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
66610 argv[1] = "-q";
66611 argv[2] = "--";
66612 argv[3] = module_name; /* check free_modprobe_argv() */
66613 - argv[4] = NULL;
66614 + argv[4] = module_param;
66615 + argv[5] = NULL;
66616
66617 return call_usermodehelper_fns(modprobe_path, argv, envp,
66618 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
66619 @@ -112,9 +113,8 @@ out:
66620 * If module auto-loading support is disabled then this function
66621 * becomes a no-operation.
66622 */
66623 -int __request_module(bool wait, const char *fmt, ...)
66624 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66625 {
66626 - va_list args;
66627 char module_name[MODULE_NAME_LEN];
66628 unsigned int max_modprobes;
66629 int ret;
66630 @@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
66631 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66632 static int kmod_loop_msg;
66633
66634 - va_start(args, fmt);
66635 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66636 - va_end(args);
66637 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66638 if (ret >= MODULE_NAME_LEN)
66639 return -ENAMETOOLONG;
66640
66641 @@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
66642 if (ret)
66643 return ret;
66644
66645 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66646 + if (!current_uid()) {
66647 + /* hack to workaround consolekit/udisks stupidity */
66648 + read_lock(&tasklist_lock);
66649 + if (!strcmp(current->comm, "mount") &&
66650 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66651 + read_unlock(&tasklist_lock);
66652 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66653 + return -EPERM;
66654 + }
66655 + read_unlock(&tasklist_lock);
66656 + }
66657 +#endif
66658 +
66659 /* If modprobe needs a service that is in a module, we get a recursive
66660 * loop. Limit the number of running kmod threads to max_threads/2 or
66661 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66662 @@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
66663
66664 trace_module_request(module_name, wait, _RET_IP_);
66665
66666 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
66667 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
66668
66669 atomic_dec(&kmod_concurrent);
66670 return ret;
66671 }
66672 +
66673 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66674 +{
66675 + va_list args;
66676 + int ret;
66677 +
66678 + va_start(args, fmt);
66679 + ret = ____request_module(wait, module_param, fmt, args);
66680 + va_end(args);
66681 +
66682 + return ret;
66683 +}
66684 +
66685 +int __request_module(bool wait, const char *fmt, ...)
66686 +{
66687 + va_list args;
66688 + int ret;
66689 +
66690 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66691 + if (current_uid()) {
66692 + char module_param[MODULE_NAME_LEN];
66693 +
66694 + memset(module_param, 0, sizeof(module_param));
66695 +
66696 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66697 +
66698 + va_start(args, fmt);
66699 + ret = ____request_module(wait, module_param, fmt, args);
66700 + va_end(args);
66701 +
66702 + return ret;
66703 + }
66704 +#endif
66705 +
66706 + va_start(args, fmt);
66707 + ret = ____request_module(wait, NULL, fmt, args);
66708 + va_end(args);
66709 +
66710 + return ret;
66711 +}
66712 +
66713 EXPORT_SYMBOL(__request_module);
66714 #endif /* CONFIG_MODULES */
66715
66716 @@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
66717 *
66718 * Thus the __user pointer cast is valid here.
66719 */
66720 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
66721 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66722
66723 /*
66724 * If ret is 0, either ____call_usermodehelper failed and the
66725 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66726 index c62b854..cb67968 100644
66727 --- a/kernel/kprobes.c
66728 +++ b/kernel/kprobes.c
66729 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66730 * kernel image and loaded module images reside. This is required
66731 * so x86_64 can correctly handle the %rip-relative fixups.
66732 */
66733 - kip->insns = module_alloc(PAGE_SIZE);
66734 + kip->insns = module_alloc_exec(PAGE_SIZE);
66735 if (!kip->insns) {
66736 kfree(kip);
66737 return NULL;
66738 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66739 */
66740 if (!list_is_singular(&kip->list)) {
66741 list_del(&kip->list);
66742 - module_free(NULL, kip->insns);
66743 + module_free_exec(NULL, kip->insns);
66744 kfree(kip);
66745 }
66746 return 1;
66747 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
66748 {
66749 int i, err = 0;
66750 unsigned long offset = 0, size = 0;
66751 - char *modname, namebuf[128];
66752 + char *modname, namebuf[KSYM_NAME_LEN];
66753 const char *symbol_name;
66754 void *addr;
66755 struct kprobe_blackpoint *kb;
66756 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66757 const char *sym = NULL;
66758 unsigned int i = *(loff_t *) v;
66759 unsigned long offset = 0;
66760 - char *modname, namebuf[128];
66761 + char *modname, namebuf[KSYM_NAME_LEN];
66762
66763 head = &kprobe_table[i];
66764 preempt_disable();
66765 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66766 index 4e316e1..5501eef 100644
66767 --- a/kernel/ksysfs.c
66768 +++ b/kernel/ksysfs.c
66769 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66770 {
66771 if (count+1 > UEVENT_HELPER_PATH_LEN)
66772 return -ENOENT;
66773 + if (!capable(CAP_SYS_ADMIN))
66774 + return -EPERM;
66775 memcpy(uevent_helper, buf, count);
66776 uevent_helper[count] = '\0';
66777 if (count && uevent_helper[count-1] == '\n')
66778 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66779 index ea9ee45..67ebc8f 100644
66780 --- a/kernel/lockdep.c
66781 +++ b/kernel/lockdep.c
66782 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
66783 end = (unsigned long) &_end,
66784 addr = (unsigned long) obj;
66785
66786 +#ifdef CONFIG_PAX_KERNEXEC
66787 + start = ktla_ktva(start);
66788 +#endif
66789 +
66790 /*
66791 * static variable?
66792 */
66793 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66794 if (!static_obj(lock->key)) {
66795 debug_locks_off();
66796 printk("INFO: trying to register non-static key.\n");
66797 + printk("lock:%pS key:%pS.\n", lock, lock->key);
66798 printk("the code is fine but needs lockdep annotation.\n");
66799 printk("turning off the locking correctness validator.\n");
66800 dump_stack();
66801 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66802 if (!class)
66803 return 0;
66804 }
66805 - atomic_inc((atomic_t *)&class->ops);
66806 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66807 if (very_verbose(class)) {
66808 printk("\nacquire class [%p] %s", class->key, class->name);
66809 if (class->name_version > 1)
66810 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66811 index 91c32a0..b2c71c5 100644
66812 --- a/kernel/lockdep_proc.c
66813 +++ b/kernel/lockdep_proc.c
66814 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66815
66816 static void print_name(struct seq_file *m, struct lock_class *class)
66817 {
66818 - char str[128];
66819 + char str[KSYM_NAME_LEN];
66820 const char *name = class->name;
66821
66822 if (!name) {
66823 diff --git a/kernel/module.c b/kernel/module.c
66824 index 78ac6ec..e87db0e 100644
66825 --- a/kernel/module.c
66826 +++ b/kernel/module.c
66827 @@ -58,6 +58,7 @@
66828 #include <linux/jump_label.h>
66829 #include <linux/pfn.h>
66830 #include <linux/bsearch.h>
66831 +#include <linux/grsecurity.h>
66832
66833 #define CREATE_TRACE_POINTS
66834 #include <trace/events/module.h>
66835 @@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66836
66837 /* Bounds of module allocation, for speeding __module_address.
66838 * Protected by module_mutex. */
66839 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66840 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66841 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66842
66843 int register_module_notifier(struct notifier_block * nb)
66844 {
66845 @@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66846 return true;
66847
66848 list_for_each_entry_rcu(mod, &modules, list) {
66849 - struct symsearch arr[] = {
66850 + struct symsearch modarr[] = {
66851 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66852 NOT_GPL_ONLY, false },
66853 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66854 @@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66855 #endif
66856 };
66857
66858 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66859 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66860 return true;
66861 }
66862 return false;
66863 @@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66864 static int percpu_modalloc(struct module *mod,
66865 unsigned long size, unsigned long align)
66866 {
66867 - if (align > PAGE_SIZE) {
66868 + if (align-1 >= PAGE_SIZE) {
66869 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66870 mod->name, align, PAGE_SIZE);
66871 align = PAGE_SIZE;
66872 @@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
66873 static ssize_t show_coresize(struct module_attribute *mattr,
66874 struct module_kobject *mk, char *buffer)
66875 {
66876 - return sprintf(buffer, "%u\n", mk->mod->core_size);
66877 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66878 }
66879
66880 static struct module_attribute modinfo_coresize =
66881 @@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
66882 static ssize_t show_initsize(struct module_attribute *mattr,
66883 struct module_kobject *mk, char *buffer)
66884 {
66885 - return sprintf(buffer, "%u\n", mk->mod->init_size);
66886 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66887 }
66888
66889 static struct module_attribute modinfo_initsize =
66890 @@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
66891 */
66892 #ifdef CONFIG_SYSFS
66893
66894 -#ifdef CONFIG_KALLSYMS
66895 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66896 static inline bool sect_empty(const Elf_Shdr *sect)
66897 {
66898 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66899 @@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
66900
66901 static void unset_module_core_ro_nx(struct module *mod)
66902 {
66903 - set_page_attributes(mod->module_core + mod->core_text_size,
66904 - mod->module_core + mod->core_size,
66905 + set_page_attributes(mod->module_core_rw,
66906 + mod->module_core_rw + mod->core_size_rw,
66907 set_memory_x);
66908 - set_page_attributes(mod->module_core,
66909 - mod->module_core + mod->core_ro_size,
66910 + set_page_attributes(mod->module_core_rx,
66911 + mod->module_core_rx + mod->core_size_rx,
66912 set_memory_rw);
66913 }
66914
66915 static void unset_module_init_ro_nx(struct module *mod)
66916 {
66917 - set_page_attributes(mod->module_init + mod->init_text_size,
66918 - mod->module_init + mod->init_size,
66919 + set_page_attributes(mod->module_init_rw,
66920 + mod->module_init_rw + mod->init_size_rw,
66921 set_memory_x);
66922 - set_page_attributes(mod->module_init,
66923 - mod->module_init + mod->init_ro_size,
66924 + set_page_attributes(mod->module_init_rx,
66925 + mod->module_init_rx + mod->init_size_rx,
66926 set_memory_rw);
66927 }
66928
66929 @@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
66930
66931 mutex_lock(&module_mutex);
66932 list_for_each_entry_rcu(mod, &modules, list) {
66933 - if ((mod->module_core) && (mod->core_text_size)) {
66934 - set_page_attributes(mod->module_core,
66935 - mod->module_core + mod->core_text_size,
66936 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66937 + set_page_attributes(mod->module_core_rx,
66938 + mod->module_core_rx + mod->core_size_rx,
66939 set_memory_rw);
66940 }
66941 - if ((mod->module_init) && (mod->init_text_size)) {
66942 - set_page_attributes(mod->module_init,
66943 - mod->module_init + mod->init_text_size,
66944 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66945 + set_page_attributes(mod->module_init_rx,
66946 + mod->module_init_rx + mod->init_size_rx,
66947 set_memory_rw);
66948 }
66949 }
66950 @@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
66951
66952 mutex_lock(&module_mutex);
66953 list_for_each_entry_rcu(mod, &modules, list) {
66954 - if ((mod->module_core) && (mod->core_text_size)) {
66955 - set_page_attributes(mod->module_core,
66956 - mod->module_core + mod->core_text_size,
66957 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66958 + set_page_attributes(mod->module_core_rx,
66959 + mod->module_core_rx + mod->core_size_rx,
66960 set_memory_ro);
66961 }
66962 - if ((mod->module_init) && (mod->init_text_size)) {
66963 - set_page_attributes(mod->module_init,
66964 - mod->module_init + mod->init_text_size,
66965 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66966 + set_page_attributes(mod->module_init_rx,
66967 + mod->module_init_rx + mod->init_size_rx,
66968 set_memory_ro);
66969 }
66970 }
66971 @@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
66972
66973 /* This may be NULL, but that's OK */
66974 unset_module_init_ro_nx(mod);
66975 - module_free(mod, mod->module_init);
66976 + module_free(mod, mod->module_init_rw);
66977 + module_free_exec(mod, mod->module_init_rx);
66978 kfree(mod->args);
66979 percpu_modfree(mod);
66980
66981 /* Free lock-classes: */
66982 - lockdep_free_key_range(mod->module_core, mod->core_size);
66983 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66984 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66985
66986 /* Finally, free the core (containing the module structure) */
66987 unset_module_core_ro_nx(mod);
66988 - module_free(mod, mod->module_core);
66989 + module_free_exec(mod, mod->module_core_rx);
66990 + module_free(mod, mod->module_core_rw);
66991
66992 #ifdef CONFIG_MPU
66993 update_protections(current->mm);
66994 @@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66995 int ret = 0;
66996 const struct kernel_symbol *ksym;
66997
66998 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66999 + int is_fs_load = 0;
67000 + int register_filesystem_found = 0;
67001 + char *p;
67002 +
67003 + p = strstr(mod->args, "grsec_modharden_fs");
67004 + if (p) {
67005 + char *endptr = p + strlen("grsec_modharden_fs");
67006 + /* copy \0 as well */
67007 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
67008 + is_fs_load = 1;
67009 + }
67010 +#endif
67011 +
67012 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
67013 const char *name = info->strtab + sym[i].st_name;
67014
67015 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67016 + /* it's a real shame this will never get ripped and copied
67017 + upstream! ;(
67018 + */
67019 + if (is_fs_load && !strcmp(name, "register_filesystem"))
67020 + register_filesystem_found = 1;
67021 +#endif
67022 +
67023 switch (sym[i].st_shndx) {
67024 case SHN_COMMON:
67025 /* We compiled with -fno-common. These are not
67026 @@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67027 ksym = resolve_symbol_wait(mod, info, name);
67028 /* Ok if resolved. */
67029 if (ksym && !IS_ERR(ksym)) {
67030 + pax_open_kernel();
67031 sym[i].st_value = ksym->value;
67032 + pax_close_kernel();
67033 break;
67034 }
67035
67036 @@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67037 secbase = (unsigned long)mod_percpu(mod);
67038 else
67039 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
67040 + pax_open_kernel();
67041 sym[i].st_value += secbase;
67042 + pax_close_kernel();
67043 break;
67044 }
67045 }
67046
67047 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67048 + if (is_fs_load && !register_filesystem_found) {
67049 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67050 + ret = -EPERM;
67051 + }
67052 +#endif
67053 +
67054 return ret;
67055 }
67056
67057 @@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
67058 || s->sh_entsize != ~0UL
67059 || strstarts(sname, ".init"))
67060 continue;
67061 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67062 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67063 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67064 + else
67065 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
67066 pr_debug("\t%s\n", sname);
67067 }
67068 - switch (m) {
67069 - case 0: /* executable */
67070 - mod->core_size = debug_align(mod->core_size);
67071 - mod->core_text_size = mod->core_size;
67072 - break;
67073 - case 1: /* RO: text and ro-data */
67074 - mod->core_size = debug_align(mod->core_size);
67075 - mod->core_ro_size = mod->core_size;
67076 - break;
67077 - case 3: /* whole core */
67078 - mod->core_size = debug_align(mod->core_size);
67079 - break;
67080 - }
67081 }
67082
67083 pr_debug("Init section allocation order:\n");
67084 @@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
67085 || s->sh_entsize != ~0UL
67086 || !strstarts(sname, ".init"))
67087 continue;
67088 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67089 - | INIT_OFFSET_MASK);
67090 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67091 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67092 + else
67093 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67094 + s->sh_entsize |= INIT_OFFSET_MASK;
67095 pr_debug("\t%s\n", sname);
67096 }
67097 - switch (m) {
67098 - case 0: /* executable */
67099 - mod->init_size = debug_align(mod->init_size);
67100 - mod->init_text_size = mod->init_size;
67101 - break;
67102 - case 1: /* RO: text and ro-data */
67103 - mod->init_size = debug_align(mod->init_size);
67104 - mod->init_ro_size = mod->init_size;
67105 - break;
67106 - case 3: /* whole init */
67107 - mod->init_size = debug_align(mod->init_size);
67108 - break;
67109 - }
67110 }
67111 }
67112
67113 @@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67114
67115 /* Put symbol section at end of init part of module. */
67116 symsect->sh_flags |= SHF_ALLOC;
67117 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67118 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67119 info->index.sym) | INIT_OFFSET_MASK;
67120 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
67121
67122 @@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67123 }
67124
67125 /* Append room for core symbols at end of core part. */
67126 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67127 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67128 - mod->core_size += strtab_size;
67129 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67130 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67131 + mod->core_size_rx += strtab_size;
67132
67133 /* Put string table section at end of init part of module. */
67134 strsect->sh_flags |= SHF_ALLOC;
67135 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67136 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67137 info->index.str) | INIT_OFFSET_MASK;
67138 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
67139 }
67140 @@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67141 /* Make sure we get permanent strtab: don't use info->strtab. */
67142 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
67143
67144 + pax_open_kernel();
67145 +
67146 /* Set types up while we still have access to sections. */
67147 for (i = 0; i < mod->num_symtab; i++)
67148 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
67149
67150 - mod->core_symtab = dst = mod->module_core + info->symoffs;
67151 - mod->core_strtab = s = mod->module_core + info->stroffs;
67152 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
67153 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
67154 src = mod->symtab;
67155 *dst = *src;
67156 *s++ = 0;
67157 @@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67158 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
67159 }
67160 mod->core_num_syms = ndst;
67161 +
67162 + pax_close_kernel();
67163 }
67164 #else
67165 static inline void layout_symtab(struct module *mod, struct load_info *info)
67166 @@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
67167 return size == 0 ? NULL : vmalloc_exec(size);
67168 }
67169
67170 -static void *module_alloc_update_bounds(unsigned long size)
67171 +static void *module_alloc_update_bounds_rw(unsigned long size)
67172 {
67173 void *ret = module_alloc(size);
67174
67175 if (ret) {
67176 mutex_lock(&module_mutex);
67177 /* Update module bounds. */
67178 - if ((unsigned long)ret < module_addr_min)
67179 - module_addr_min = (unsigned long)ret;
67180 - if ((unsigned long)ret + size > module_addr_max)
67181 - module_addr_max = (unsigned long)ret + size;
67182 + if ((unsigned long)ret < module_addr_min_rw)
67183 + module_addr_min_rw = (unsigned long)ret;
67184 + if ((unsigned long)ret + size > module_addr_max_rw)
67185 + module_addr_max_rw = (unsigned long)ret + size;
67186 + mutex_unlock(&module_mutex);
67187 + }
67188 + return ret;
67189 +}
67190 +
67191 +static void *module_alloc_update_bounds_rx(unsigned long size)
67192 +{
67193 + void *ret = module_alloc_exec(size);
67194 +
67195 + if (ret) {
67196 + mutex_lock(&module_mutex);
67197 + /* Update module bounds. */
67198 + if ((unsigned long)ret < module_addr_min_rx)
67199 + module_addr_min_rx = (unsigned long)ret;
67200 + if ((unsigned long)ret + size > module_addr_max_rx)
67201 + module_addr_max_rx = (unsigned long)ret + size;
67202 mutex_unlock(&module_mutex);
67203 }
67204 return ret;
67205 @@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
67206 static int check_modinfo(struct module *mod, struct load_info *info)
67207 {
67208 const char *modmagic = get_modinfo(info, "vermagic");
67209 + const char *license = get_modinfo(info, "license");
67210 int err;
67211
67212 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67213 + if (!license || !license_is_gpl_compatible(license))
67214 + return -ENOEXEC;
67215 +#endif
67216 +
67217 /* This is allowed: modprobe --force will invalidate it. */
67218 if (!modmagic) {
67219 err = try_to_force_load(mod, "bad vermagic");
67220 @@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
67221 }
67222
67223 /* Set up license info based on the info section */
67224 - set_license(mod, get_modinfo(info, "license"));
67225 + set_license(mod, license);
67226
67227 return 0;
67228 }
67229 @@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
67230 void *ptr;
67231
67232 /* Do the allocs. */
67233 - ptr = module_alloc_update_bounds(mod->core_size);
67234 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67235 /*
67236 * The pointer to this block is stored in the module structure
67237 * which is inside the block. Just mark it as not being a
67238 @@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
67239 if (!ptr)
67240 return -ENOMEM;
67241
67242 - memset(ptr, 0, mod->core_size);
67243 - mod->module_core = ptr;
67244 + memset(ptr, 0, mod->core_size_rw);
67245 + mod->module_core_rw = ptr;
67246
67247 - ptr = module_alloc_update_bounds(mod->init_size);
67248 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67249 /*
67250 * The pointer to this block is stored in the module structure
67251 * which is inside the block. This block doesn't need to be
67252 * scanned as it contains data and code that will be freed
67253 * after the module is initialized.
67254 */
67255 - kmemleak_ignore(ptr);
67256 - if (!ptr && mod->init_size) {
67257 - module_free(mod, mod->module_core);
67258 + kmemleak_not_leak(ptr);
67259 + if (!ptr && mod->init_size_rw) {
67260 + module_free(mod, mod->module_core_rw);
67261 return -ENOMEM;
67262 }
67263 - memset(ptr, 0, mod->init_size);
67264 - mod->module_init = ptr;
67265 + memset(ptr, 0, mod->init_size_rw);
67266 + mod->module_init_rw = ptr;
67267 +
67268 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67269 + kmemleak_not_leak(ptr);
67270 + if (!ptr) {
67271 + module_free(mod, mod->module_init_rw);
67272 + module_free(mod, mod->module_core_rw);
67273 + return -ENOMEM;
67274 + }
67275 +
67276 + pax_open_kernel();
67277 + memset(ptr, 0, mod->core_size_rx);
67278 + pax_close_kernel();
67279 + mod->module_core_rx = ptr;
67280 +
67281 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67282 + kmemleak_not_leak(ptr);
67283 + if (!ptr && mod->init_size_rx) {
67284 + module_free_exec(mod, mod->module_core_rx);
67285 + module_free(mod, mod->module_init_rw);
67286 + module_free(mod, mod->module_core_rw);
67287 + return -ENOMEM;
67288 + }
67289 +
67290 + pax_open_kernel();
67291 + memset(ptr, 0, mod->init_size_rx);
67292 + pax_close_kernel();
67293 + mod->module_init_rx = ptr;
67294
67295 /* Transfer each section which specifies SHF_ALLOC */
67296 pr_debug("final section addresses:\n");
67297 @@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
67298 if (!(shdr->sh_flags & SHF_ALLOC))
67299 continue;
67300
67301 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
67302 - dest = mod->module_init
67303 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67304 - else
67305 - dest = mod->module_core + shdr->sh_entsize;
67306 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67307 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67308 + dest = mod->module_init_rw
67309 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67310 + else
67311 + dest = mod->module_init_rx
67312 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67313 + } else {
67314 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67315 + dest = mod->module_core_rw + shdr->sh_entsize;
67316 + else
67317 + dest = mod->module_core_rx + shdr->sh_entsize;
67318 + }
67319 +
67320 + if (shdr->sh_type != SHT_NOBITS) {
67321 +
67322 +#ifdef CONFIG_PAX_KERNEXEC
67323 +#ifdef CONFIG_X86_64
67324 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67325 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67326 +#endif
67327 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67328 + pax_open_kernel();
67329 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67330 + pax_close_kernel();
67331 + } else
67332 +#endif
67333
67334 - if (shdr->sh_type != SHT_NOBITS)
67335 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67336 + }
67337 /* Update sh_addr to point to copy in image. */
67338 - shdr->sh_addr = (unsigned long)dest;
67339 +
67340 +#ifdef CONFIG_PAX_KERNEXEC
67341 + if (shdr->sh_flags & SHF_EXECINSTR)
67342 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
67343 + else
67344 +#endif
67345 +
67346 + shdr->sh_addr = (unsigned long)dest;
67347 pr_debug("\t0x%lx %s\n",
67348 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
67349 }
67350 @@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
67351 * Do it before processing of module parameters, so the module
67352 * can provide parameter accessor functions of its own.
67353 */
67354 - if (mod->module_init)
67355 - flush_icache_range((unsigned long)mod->module_init,
67356 - (unsigned long)mod->module_init
67357 - + mod->init_size);
67358 - flush_icache_range((unsigned long)mod->module_core,
67359 - (unsigned long)mod->module_core + mod->core_size);
67360 + if (mod->module_init_rx)
67361 + flush_icache_range((unsigned long)mod->module_init_rx,
67362 + (unsigned long)mod->module_init_rx
67363 + + mod->init_size_rx);
67364 + flush_icache_range((unsigned long)mod->module_core_rx,
67365 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
67366
67367 set_fs(old_fs);
67368 }
67369 @@ -2833,8 +2933,10 @@ out:
67370 static void module_deallocate(struct module *mod, struct load_info *info)
67371 {
67372 percpu_modfree(mod);
67373 - module_free(mod, mod->module_init);
67374 - module_free(mod, mod->module_core);
67375 + module_free_exec(mod, mod->module_init_rx);
67376 + module_free_exec(mod, mod->module_core_rx);
67377 + module_free(mod, mod->module_init_rw);
67378 + module_free(mod, mod->module_core_rw);
67379 }
67380
67381 int __weak module_finalize(const Elf_Ehdr *hdr,
67382 @@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
67383 if (err)
67384 goto free_unload;
67385
67386 + /* Now copy in args */
67387 + mod->args = strndup_user(uargs, ~0UL >> 1);
67388 + if (IS_ERR(mod->args)) {
67389 + err = PTR_ERR(mod->args);
67390 + goto free_unload;
67391 + }
67392 +
67393 /* Set up MODINFO_ATTR fields */
67394 setup_modinfo(mod, &info);
67395
67396 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67397 + {
67398 + char *p, *p2;
67399 +
67400 + if (strstr(mod->args, "grsec_modharden_netdev")) {
67401 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67402 + err = -EPERM;
67403 + goto free_modinfo;
67404 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67405 + p += strlen("grsec_modharden_normal");
67406 + p2 = strstr(p, "_");
67407 + if (p2) {
67408 + *p2 = '\0';
67409 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67410 + *p2 = '_';
67411 + }
67412 + err = -EPERM;
67413 + goto free_modinfo;
67414 + }
67415 + }
67416 +#endif
67417 +
67418 /* Fix up syms, so that st_value is a pointer to location. */
67419 err = simplify_symbols(mod, &info);
67420 if (err < 0)
67421 @@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
67422
67423 flush_module_icache(mod);
67424
67425 - /* Now copy in args */
67426 - mod->args = strndup_user(uargs, ~0UL >> 1);
67427 - if (IS_ERR(mod->args)) {
67428 - err = PTR_ERR(mod->args);
67429 - goto free_arch_cleanup;
67430 - }
67431 -
67432 /* Mark state as coming so strong_try_module_get() ignores us. */
67433 mod->state = MODULE_STATE_COMING;
67434
67435 @@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
67436 unlock:
67437 mutex_unlock(&module_mutex);
67438 synchronize_sched();
67439 - kfree(mod->args);
67440 - free_arch_cleanup:
67441 module_arch_cleanup(mod);
67442 free_modinfo:
67443 free_modinfo(mod);
67444 + kfree(mod->args);
67445 free_unload:
67446 module_unload_free(mod);
67447 free_module:
67448 @@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67449 MODULE_STATE_COMING, mod);
67450
67451 /* Set RO and NX regions for core */
67452 - set_section_ro_nx(mod->module_core,
67453 - mod->core_text_size,
67454 - mod->core_ro_size,
67455 - mod->core_size);
67456 + set_section_ro_nx(mod->module_core_rx,
67457 + mod->core_size_rx,
67458 + mod->core_size_rx,
67459 + mod->core_size_rx);
67460
67461 /* Set RO and NX regions for init */
67462 - set_section_ro_nx(mod->module_init,
67463 - mod->init_text_size,
67464 - mod->init_ro_size,
67465 - mod->init_size);
67466 + set_section_ro_nx(mod->module_init_rx,
67467 + mod->init_size_rx,
67468 + mod->init_size_rx,
67469 + mod->init_size_rx);
67470
67471 do_mod_ctors(mod);
67472 /* Start the module */
67473 @@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67474 mod->strtab = mod->core_strtab;
67475 #endif
67476 unset_module_init_ro_nx(mod);
67477 - module_free(mod, mod->module_init);
67478 - mod->module_init = NULL;
67479 - mod->init_size = 0;
67480 - mod->init_ro_size = 0;
67481 - mod->init_text_size = 0;
67482 + module_free(mod, mod->module_init_rw);
67483 + module_free_exec(mod, mod->module_init_rx);
67484 + mod->module_init_rw = NULL;
67485 + mod->module_init_rx = NULL;
67486 + mod->init_size_rw = 0;
67487 + mod->init_size_rx = 0;
67488 mutex_unlock(&module_mutex);
67489
67490 return 0;
67491 @@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
67492 unsigned long nextval;
67493
67494 /* At worse, next value is at end of module */
67495 - if (within_module_init(addr, mod))
67496 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
67497 + if (within_module_init_rx(addr, mod))
67498 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67499 + else if (within_module_init_rw(addr, mod))
67500 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67501 + else if (within_module_core_rx(addr, mod))
67502 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67503 + else if (within_module_core_rw(addr, mod))
67504 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67505 else
67506 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
67507 + return NULL;
67508
67509 /* Scan for closest preceding symbol, and next symbol. (ELF
67510 starts real symbols at 1). */
67511 @@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
67512 char buf[8];
67513
67514 seq_printf(m, "%s %u",
67515 - mod->name, mod->init_size + mod->core_size);
67516 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67517 print_unload_info(m, mod);
67518
67519 /* Informative for users. */
67520 @@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
67521 mod->state == MODULE_STATE_COMING ? "Loading":
67522 "Live");
67523 /* Used by oprofile and other similar tools. */
67524 - seq_printf(m, " 0x%pK", mod->module_core);
67525 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
67526
67527 /* Taints info */
67528 if (mod->taints)
67529 @@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
67530
67531 static int __init proc_modules_init(void)
67532 {
67533 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67534 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67535 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67536 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67537 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67538 +#else
67539 proc_create("modules", 0, NULL, &proc_modules_operations);
67540 +#endif
67541 +#else
67542 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67543 +#endif
67544 return 0;
67545 }
67546 module_init(proc_modules_init);
67547 @@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
67548 {
67549 struct module *mod;
67550
67551 - if (addr < module_addr_min || addr > module_addr_max)
67552 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67553 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
67554 return NULL;
67555
67556 list_for_each_entry_rcu(mod, &modules, list)
67557 - if (within_module_core(addr, mod)
67558 - || within_module_init(addr, mod))
67559 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
67560 return mod;
67561 return NULL;
67562 }
67563 @@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
67564 */
67565 struct module *__module_text_address(unsigned long addr)
67566 {
67567 - struct module *mod = __module_address(addr);
67568 + struct module *mod;
67569 +
67570 +#ifdef CONFIG_X86_32
67571 + addr = ktla_ktva(addr);
67572 +#endif
67573 +
67574 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67575 + return NULL;
67576 +
67577 + mod = __module_address(addr);
67578 +
67579 if (mod) {
67580 /* Make sure it's within the text section. */
67581 - if (!within(addr, mod->module_init, mod->init_text_size)
67582 - && !within(addr, mod->module_core, mod->core_text_size))
67583 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67584 mod = NULL;
67585 }
67586 return mod;
67587 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67588 index 7e3443f..b2a1e6b 100644
67589 --- a/kernel/mutex-debug.c
67590 +++ b/kernel/mutex-debug.c
67591 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67592 }
67593
67594 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67595 - struct thread_info *ti)
67596 + struct task_struct *task)
67597 {
67598 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67599
67600 /* Mark the current thread as blocked on the lock: */
67601 - ti->task->blocked_on = waiter;
67602 + task->blocked_on = waiter;
67603 }
67604
67605 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67606 - struct thread_info *ti)
67607 + struct task_struct *task)
67608 {
67609 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67610 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67611 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67612 - ti->task->blocked_on = NULL;
67613 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
67614 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67615 + task->blocked_on = NULL;
67616
67617 list_del_init(&waiter->list);
67618 waiter->task = NULL;
67619 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67620 index 0799fd3..d06ae3b 100644
67621 --- a/kernel/mutex-debug.h
67622 +++ b/kernel/mutex-debug.h
67623 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67624 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67625 extern void debug_mutex_add_waiter(struct mutex *lock,
67626 struct mutex_waiter *waiter,
67627 - struct thread_info *ti);
67628 + struct task_struct *task);
67629 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67630 - struct thread_info *ti);
67631 + struct task_struct *task);
67632 extern void debug_mutex_unlock(struct mutex *lock);
67633 extern void debug_mutex_init(struct mutex *lock, const char *name,
67634 struct lock_class_key *key);
67635 diff --git a/kernel/mutex.c b/kernel/mutex.c
67636 index a307cc9..27fd2e9 100644
67637 --- a/kernel/mutex.c
67638 +++ b/kernel/mutex.c
67639 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67640 spin_lock_mutex(&lock->wait_lock, flags);
67641
67642 debug_mutex_lock_common(lock, &waiter);
67643 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67644 + debug_mutex_add_waiter(lock, &waiter, task);
67645
67646 /* add waiting tasks to the end of the waitqueue (FIFO): */
67647 list_add_tail(&waiter.list, &lock->wait_list);
67648 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67649 * TASK_UNINTERRUPTIBLE case.)
67650 */
67651 if (unlikely(signal_pending_state(state, task))) {
67652 - mutex_remove_waiter(lock, &waiter,
67653 - task_thread_info(task));
67654 + mutex_remove_waiter(lock, &waiter, task);
67655 mutex_release(&lock->dep_map, 1, ip);
67656 spin_unlock_mutex(&lock->wait_lock, flags);
67657
67658 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67659 done:
67660 lock_acquired(&lock->dep_map, ip);
67661 /* got the lock - rejoice! */
67662 - mutex_remove_waiter(lock, &waiter, current_thread_info());
67663 + mutex_remove_waiter(lock, &waiter, task);
67664 mutex_set_owner(lock);
67665
67666 /* set it to 0 if there are no waiters left: */
67667 diff --git a/kernel/panic.c b/kernel/panic.c
67668 index 9ed023b..e49543e 100644
67669 --- a/kernel/panic.c
67670 +++ b/kernel/panic.c
67671 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67672 const char *board;
67673
67674 printk(KERN_WARNING "------------[ cut here ]------------\n");
67675 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67676 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67677 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67678 if (board)
67679 printk(KERN_WARNING "Hardware name: %s\n", board);
67680 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67681 */
67682 void __stack_chk_fail(void)
67683 {
67684 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
67685 + dump_stack();
67686 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67687 __builtin_return_address(0));
67688 }
67689 EXPORT_SYMBOL(__stack_chk_fail);
67690 diff --git a/kernel/pid.c b/kernel/pid.c
67691 index 9f08dfa..6765c40 100644
67692 --- a/kernel/pid.c
67693 +++ b/kernel/pid.c
67694 @@ -33,6 +33,7 @@
67695 #include <linux/rculist.h>
67696 #include <linux/bootmem.h>
67697 #include <linux/hash.h>
67698 +#include <linux/security.h>
67699 #include <linux/pid_namespace.h>
67700 #include <linux/init_task.h>
67701 #include <linux/syscalls.h>
67702 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67703
67704 int pid_max = PID_MAX_DEFAULT;
67705
67706 -#define RESERVED_PIDS 300
67707 +#define RESERVED_PIDS 500
67708
67709 int pid_max_min = RESERVED_PIDS + 1;
67710 int pid_max_max = PID_MAX_LIMIT;
67711 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
67712 */
67713 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67714 {
67715 + struct task_struct *task;
67716 +
67717 rcu_lockdep_assert(rcu_read_lock_held(),
67718 "find_task_by_pid_ns() needs rcu_read_lock()"
67719 " protection");
67720 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67721 +
67722 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67723 +
67724 + if (gr_pid_is_chrooted(task))
67725 + return NULL;
67726 +
67727 + return task;
67728 }
67729
67730 struct task_struct *find_task_by_vpid(pid_t vnr)
67731 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67732 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67733 }
67734
67735 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67736 +{
67737 + rcu_lockdep_assert(rcu_read_lock_held(),
67738 + "find_task_by_pid_ns() needs rcu_read_lock()"
67739 + " protection");
67740 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67741 +}
67742 +
67743 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67744 {
67745 struct pid *pid;
67746 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67747 index 125cb67..a4d1c30 100644
67748 --- a/kernel/posix-cpu-timers.c
67749 +++ b/kernel/posix-cpu-timers.c
67750 @@ -6,6 +6,7 @@
67751 #include <linux/posix-timers.h>
67752 #include <linux/errno.h>
67753 #include <linux/math64.h>
67754 +#include <linux/security.h>
67755 #include <asm/uaccess.h>
67756 #include <linux/kernel_stat.h>
67757 #include <trace/events/timer.h>
67758 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
67759
67760 static __init int init_posix_cpu_timers(void)
67761 {
67762 - struct k_clock process = {
67763 + static struct k_clock process = {
67764 .clock_getres = process_cpu_clock_getres,
67765 .clock_get = process_cpu_clock_get,
67766 .timer_create = process_cpu_timer_create,
67767 .nsleep = process_cpu_nsleep,
67768 .nsleep_restart = process_cpu_nsleep_restart,
67769 };
67770 - struct k_clock thread = {
67771 + static struct k_clock thread = {
67772 .clock_getres = thread_cpu_clock_getres,
67773 .clock_get = thread_cpu_clock_get,
67774 .timer_create = thread_cpu_timer_create,
67775 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67776 index 69185ae..cc2847a 100644
67777 --- a/kernel/posix-timers.c
67778 +++ b/kernel/posix-timers.c
67779 @@ -43,6 +43,7 @@
67780 #include <linux/idr.h>
67781 #include <linux/posix-clock.h>
67782 #include <linux/posix-timers.h>
67783 +#include <linux/grsecurity.h>
67784 #include <linux/syscalls.h>
67785 #include <linux/wait.h>
67786 #include <linux/workqueue.h>
67787 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67788 * which we beg off on and pass to do_sys_settimeofday().
67789 */
67790
67791 -static struct k_clock posix_clocks[MAX_CLOCKS];
67792 +static struct k_clock *posix_clocks[MAX_CLOCKS];
67793
67794 /*
67795 * These ones are defined below.
67796 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67797 */
67798 static __init int init_posix_timers(void)
67799 {
67800 - struct k_clock clock_realtime = {
67801 + static struct k_clock clock_realtime = {
67802 .clock_getres = hrtimer_get_res,
67803 .clock_get = posix_clock_realtime_get,
67804 .clock_set = posix_clock_realtime_set,
67805 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67806 .timer_get = common_timer_get,
67807 .timer_del = common_timer_del,
67808 };
67809 - struct k_clock clock_monotonic = {
67810 + static struct k_clock clock_monotonic = {
67811 .clock_getres = hrtimer_get_res,
67812 .clock_get = posix_ktime_get_ts,
67813 .nsleep = common_nsleep,
67814 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67815 .timer_get = common_timer_get,
67816 .timer_del = common_timer_del,
67817 };
67818 - struct k_clock clock_monotonic_raw = {
67819 + static struct k_clock clock_monotonic_raw = {
67820 .clock_getres = hrtimer_get_res,
67821 .clock_get = posix_get_monotonic_raw,
67822 };
67823 - struct k_clock clock_realtime_coarse = {
67824 + static struct k_clock clock_realtime_coarse = {
67825 .clock_getres = posix_get_coarse_res,
67826 .clock_get = posix_get_realtime_coarse,
67827 };
67828 - struct k_clock clock_monotonic_coarse = {
67829 + static struct k_clock clock_monotonic_coarse = {
67830 .clock_getres = posix_get_coarse_res,
67831 .clock_get = posix_get_monotonic_coarse,
67832 };
67833 - struct k_clock clock_boottime = {
67834 + static struct k_clock clock_boottime = {
67835 .clock_getres = hrtimer_get_res,
67836 .clock_get = posix_get_boottime,
67837 .nsleep = common_nsleep,
67838 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67839 return;
67840 }
67841
67842 - posix_clocks[clock_id] = *new_clock;
67843 + posix_clocks[clock_id] = new_clock;
67844 }
67845 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67846
67847 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67848 return (id & CLOCKFD_MASK) == CLOCKFD ?
67849 &clock_posix_dynamic : &clock_posix_cpu;
67850
67851 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67852 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67853 return NULL;
67854 - return &posix_clocks[id];
67855 + return posix_clocks[id];
67856 }
67857
67858 static int common_timer_create(struct k_itimer *new_timer)
67859 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67860 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67861 return -EFAULT;
67862
67863 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67864 + have their clock_set fptr set to a nosettime dummy function
67865 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67866 + call common_clock_set, which calls do_sys_settimeofday, which
67867 + we hook
67868 + */
67869 +
67870 return kc->clock_set(which_clock, &new_tp);
67871 }
67872
67873 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67874 index d523593..68197a4 100644
67875 --- a/kernel/power/poweroff.c
67876 +++ b/kernel/power/poweroff.c
67877 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67878 .enable_mask = SYSRQ_ENABLE_BOOT,
67879 };
67880
67881 -static int pm_sysrq_init(void)
67882 +static int __init pm_sysrq_init(void)
67883 {
67884 register_sysrq_key('o', &sysrq_poweroff_op);
67885 return 0;
67886 diff --git a/kernel/power/process.c b/kernel/power/process.c
67887 index 19db29f..33b52b6 100644
67888 --- a/kernel/power/process.c
67889 +++ b/kernel/power/process.c
67890 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67891 u64 elapsed_csecs64;
67892 unsigned int elapsed_csecs;
67893 bool wakeup = false;
67894 + bool timedout = false;
67895
67896 do_gettimeofday(&start);
67897
67898 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67899
67900 while (true) {
67901 todo = 0;
67902 + if (time_after(jiffies, end_time))
67903 + timedout = true;
67904 read_lock(&tasklist_lock);
67905 do_each_thread(g, p) {
67906 if (p == current || !freeze_task(p))
67907 @@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67908 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67909 * transition can't race with task state testing here.
67910 */
67911 - if (!task_is_stopped_or_traced(p) &&
67912 - !freezer_should_skip(p))
67913 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67914 todo++;
67915 + if (timedout) {
67916 + printk(KERN_ERR "Task refusing to freeze:\n");
67917 + sched_show_task(p);
67918 + }
67919 + }
67920 } while_each_thread(g, p);
67921 read_unlock(&tasklist_lock);
67922
67923 @@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
67924 todo += wq_busy;
67925 }
67926
67927 - if (!todo || time_after(jiffies, end_time))
67928 + if (!todo || timedout)
67929 break;
67930
67931 if (pm_wakeup_pending()) {
67932 diff --git a/kernel/printk.c b/kernel/printk.c
67933 index b663c2c..1d6ba7a 100644
67934 --- a/kernel/printk.c
67935 +++ b/kernel/printk.c
67936 @@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
67937 if (from_file && type != SYSLOG_ACTION_OPEN)
67938 return 0;
67939
67940 +#ifdef CONFIG_GRKERNSEC_DMESG
67941 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67942 + return -EPERM;
67943 +#endif
67944 +
67945 if (syslog_action_restricted(type)) {
67946 if (capable(CAP_SYSLOG))
67947 return 0;
67948 diff --git a/kernel/profile.c b/kernel/profile.c
67949 index 76b8e77..a2930e8 100644
67950 --- a/kernel/profile.c
67951 +++ b/kernel/profile.c
67952 @@ -39,7 +39,7 @@ struct profile_hit {
67953 /* Oprofile timer tick hook */
67954 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67955
67956 -static atomic_t *prof_buffer;
67957 +static atomic_unchecked_t *prof_buffer;
67958 static unsigned long prof_len, prof_shift;
67959
67960 int prof_on __read_mostly;
67961 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67962 hits[i].pc = 0;
67963 continue;
67964 }
67965 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67966 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67967 hits[i].hits = hits[i].pc = 0;
67968 }
67969 }
67970 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67971 * Add the current hit(s) and flush the write-queue out
67972 * to the global buffer:
67973 */
67974 - atomic_add(nr_hits, &prof_buffer[pc]);
67975 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67976 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67977 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67978 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67979 hits[i].pc = hits[i].hits = 0;
67980 }
67981 out:
67982 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67983 {
67984 unsigned long pc;
67985 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67986 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67987 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67988 }
67989 #endif /* !CONFIG_SMP */
67990
67991 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67992 return -EFAULT;
67993 buf++; p++; count--; read++;
67994 }
67995 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67996 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67997 if (copy_to_user(buf, (void *)pnt, count))
67998 return -EFAULT;
67999 read += count;
68000 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
68001 }
68002 #endif
68003 profile_discard_flip_buffers();
68004 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
68005 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
68006 return count;
68007 }
68008
68009 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
68010 index ee8d49b..bd3d790 100644
68011 --- a/kernel/ptrace.c
68012 +++ b/kernel/ptrace.c
68013 @@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68014
68015 if (seize)
68016 flags |= PT_SEIZED;
68017 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
68018 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
68019 flags |= PT_PTRACE_CAP;
68020 task->ptrace = flags;
68021
68022 @@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68023 break;
68024 return -EIO;
68025 }
68026 - if (copy_to_user(dst, buf, retval))
68027 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68028 return -EFAULT;
68029 copied += retval;
68030 src += retval;
68031 @@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
68032 bool seized = child->ptrace & PT_SEIZED;
68033 int ret = -EIO;
68034 siginfo_t siginfo, *si;
68035 - void __user *datavp = (void __user *) data;
68036 + void __user *datavp = (__force void __user *) data;
68037 unsigned long __user *datalp = datavp;
68038 unsigned long flags;
68039
68040 @@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
68041 goto out;
68042 }
68043
68044 + if (gr_handle_ptrace(child, request)) {
68045 + ret = -EPERM;
68046 + goto out_put_task_struct;
68047 + }
68048 +
68049 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68050 ret = ptrace_attach(child, request, addr, data);
68051 /*
68052 * Some architectures need to do book-keeping after
68053 * a ptrace attach.
68054 */
68055 - if (!ret)
68056 + if (!ret) {
68057 arch_ptrace_attach(child);
68058 + gr_audit_ptrace(child);
68059 + }
68060 goto out_put_task_struct;
68061 }
68062
68063 @@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
68064 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68065 if (copied != sizeof(tmp))
68066 return -EIO;
68067 - return put_user(tmp, (unsigned long __user *)data);
68068 + return put_user(tmp, (__force unsigned long __user *)data);
68069 }
68070
68071 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
68072 @@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68073 goto out;
68074 }
68075
68076 + if (gr_handle_ptrace(child, request)) {
68077 + ret = -EPERM;
68078 + goto out_put_task_struct;
68079 + }
68080 +
68081 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68082 ret = ptrace_attach(child, request, addr, data);
68083 /*
68084 * Some architectures need to do book-keeping after
68085 * a ptrace attach.
68086 */
68087 - if (!ret)
68088 + if (!ret) {
68089 arch_ptrace_attach(child);
68090 + gr_audit_ptrace(child);
68091 + }
68092 goto out_put_task_struct;
68093 }
68094
68095 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
68096 index 37a5444..eec170a 100644
68097 --- a/kernel/rcutiny.c
68098 +++ b/kernel/rcutiny.c
68099 @@ -46,7 +46,7 @@
68100 struct rcu_ctrlblk;
68101 static void invoke_rcu_callbacks(void);
68102 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
68103 -static void rcu_process_callbacks(struct softirq_action *unused);
68104 +static void rcu_process_callbacks(void);
68105 static void __call_rcu(struct rcu_head *head,
68106 void (*func)(struct rcu_head *rcu),
68107 struct rcu_ctrlblk *rcp);
68108 @@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
68109 rcu_is_callbacks_kthread()));
68110 }
68111
68112 -static void rcu_process_callbacks(struct softirq_action *unused)
68113 +static void rcu_process_callbacks(void)
68114 {
68115 __rcu_process_callbacks(&rcu_sched_ctrlblk);
68116 __rcu_process_callbacks(&rcu_bh_ctrlblk);
68117 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
68118 index 22ecea0..3789898 100644
68119 --- a/kernel/rcutiny_plugin.h
68120 +++ b/kernel/rcutiny_plugin.h
68121 @@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
68122 have_rcu_kthread_work = morework;
68123 local_irq_restore(flags);
68124 if (work)
68125 - rcu_process_callbacks(NULL);
68126 + rcu_process_callbacks();
68127 schedule_timeout_interruptible(1); /* Leave CPU for others. */
68128 }
68129
68130 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
68131 index a89b381..efdcad8 100644
68132 --- a/kernel/rcutorture.c
68133 +++ b/kernel/rcutorture.c
68134 @@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
68135 { 0 };
68136 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
68137 { 0 };
68138 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68139 -static atomic_t n_rcu_torture_alloc;
68140 -static atomic_t n_rcu_torture_alloc_fail;
68141 -static atomic_t n_rcu_torture_free;
68142 -static atomic_t n_rcu_torture_mberror;
68143 -static atomic_t n_rcu_torture_error;
68144 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68145 +static atomic_unchecked_t n_rcu_torture_alloc;
68146 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
68147 +static atomic_unchecked_t n_rcu_torture_free;
68148 +static atomic_unchecked_t n_rcu_torture_mberror;
68149 +static atomic_unchecked_t n_rcu_torture_error;
68150 static long n_rcu_torture_boost_ktrerror;
68151 static long n_rcu_torture_boost_rterror;
68152 static long n_rcu_torture_boost_failure;
68153 @@ -253,11 +253,11 @@ rcu_torture_alloc(void)
68154
68155 spin_lock_bh(&rcu_torture_lock);
68156 if (list_empty(&rcu_torture_freelist)) {
68157 - atomic_inc(&n_rcu_torture_alloc_fail);
68158 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
68159 spin_unlock_bh(&rcu_torture_lock);
68160 return NULL;
68161 }
68162 - atomic_inc(&n_rcu_torture_alloc);
68163 + atomic_inc_unchecked(&n_rcu_torture_alloc);
68164 p = rcu_torture_freelist.next;
68165 list_del_init(p);
68166 spin_unlock_bh(&rcu_torture_lock);
68167 @@ -270,7 +270,7 @@ rcu_torture_alloc(void)
68168 static void
68169 rcu_torture_free(struct rcu_torture *p)
68170 {
68171 - atomic_inc(&n_rcu_torture_free);
68172 + atomic_inc_unchecked(&n_rcu_torture_free);
68173 spin_lock_bh(&rcu_torture_lock);
68174 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
68175 spin_unlock_bh(&rcu_torture_lock);
68176 @@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
68177 i = rp->rtort_pipe_count;
68178 if (i > RCU_TORTURE_PIPE_LEN)
68179 i = RCU_TORTURE_PIPE_LEN;
68180 - atomic_inc(&rcu_torture_wcount[i]);
68181 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68182 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68183 rp->rtort_mbtest = 0;
68184 rcu_torture_free(rp);
68185 @@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
68186 i = rp->rtort_pipe_count;
68187 if (i > RCU_TORTURE_PIPE_LEN)
68188 i = RCU_TORTURE_PIPE_LEN;
68189 - atomic_inc(&rcu_torture_wcount[i]);
68190 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68191 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68192 rp->rtort_mbtest = 0;
68193 list_del(&rp->rtort_free);
68194 @@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
68195 i = old_rp->rtort_pipe_count;
68196 if (i > RCU_TORTURE_PIPE_LEN)
68197 i = RCU_TORTURE_PIPE_LEN;
68198 - atomic_inc(&rcu_torture_wcount[i]);
68199 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68200 old_rp->rtort_pipe_count++;
68201 cur_ops->deferred_free(old_rp);
68202 }
68203 @@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
68204 }
68205 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
68206 if (p->rtort_mbtest == 0)
68207 - atomic_inc(&n_rcu_torture_mberror);
68208 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68209 spin_lock(&rand_lock);
68210 cur_ops->read_delay(&rand);
68211 n_rcu_torture_timers++;
68212 @@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
68213 }
68214 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
68215 if (p->rtort_mbtest == 0)
68216 - atomic_inc(&n_rcu_torture_mberror);
68217 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68218 cur_ops->read_delay(&rand);
68219 preempt_disable();
68220 pipe_count = p->rtort_pipe_count;
68221 @@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
68222 rcu_torture_current,
68223 rcu_torture_current_version,
68224 list_empty(&rcu_torture_freelist),
68225 - atomic_read(&n_rcu_torture_alloc),
68226 - atomic_read(&n_rcu_torture_alloc_fail),
68227 - atomic_read(&n_rcu_torture_free),
68228 - atomic_read(&n_rcu_torture_mberror),
68229 + atomic_read_unchecked(&n_rcu_torture_alloc),
68230 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68231 + atomic_read_unchecked(&n_rcu_torture_free),
68232 + atomic_read_unchecked(&n_rcu_torture_mberror),
68233 n_rcu_torture_boost_ktrerror,
68234 n_rcu_torture_boost_rterror,
68235 n_rcu_torture_boost_failure,
68236 @@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
68237 n_online_attempts,
68238 n_offline_successes,
68239 n_offline_attempts);
68240 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68241 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68242 n_rcu_torture_boost_ktrerror != 0 ||
68243 n_rcu_torture_boost_rterror != 0 ||
68244 n_rcu_torture_boost_failure != 0)
68245 @@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
68246 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68247 if (i > 1) {
68248 cnt += sprintf(&page[cnt], "!!! ");
68249 - atomic_inc(&n_rcu_torture_error);
68250 + atomic_inc_unchecked(&n_rcu_torture_error);
68251 WARN_ON_ONCE(1);
68252 }
68253 cnt += sprintf(&page[cnt], "Reader Pipe: ");
68254 @@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
68255 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68256 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68257 cnt += sprintf(&page[cnt], " %d",
68258 - atomic_read(&rcu_torture_wcount[i]));
68259 + atomic_read_unchecked(&rcu_torture_wcount[i]));
68260 }
68261 cnt += sprintf(&page[cnt], "\n");
68262 if (cur_ops->stats)
68263 @@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
68264
68265 if (cur_ops->cleanup)
68266 cur_ops->cleanup();
68267 - if (atomic_read(&n_rcu_torture_error))
68268 + if (atomic_read_unchecked(&n_rcu_torture_error))
68269 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68270 else if (n_online_successes != n_online_attempts ||
68271 n_offline_successes != n_offline_attempts)
68272 @@ -1744,17 +1744,17 @@ rcu_torture_init(void)
68273
68274 rcu_torture_current = NULL;
68275 rcu_torture_current_version = 0;
68276 - atomic_set(&n_rcu_torture_alloc, 0);
68277 - atomic_set(&n_rcu_torture_alloc_fail, 0);
68278 - atomic_set(&n_rcu_torture_free, 0);
68279 - atomic_set(&n_rcu_torture_mberror, 0);
68280 - atomic_set(&n_rcu_torture_error, 0);
68281 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68282 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68283 + atomic_set_unchecked(&n_rcu_torture_free, 0);
68284 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68285 + atomic_set_unchecked(&n_rcu_torture_error, 0);
68286 n_rcu_torture_boost_ktrerror = 0;
68287 n_rcu_torture_boost_rterror = 0;
68288 n_rcu_torture_boost_failure = 0;
68289 n_rcu_torture_boosts = 0;
68290 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68291 - atomic_set(&rcu_torture_wcount[i], 0);
68292 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68293 for_each_possible_cpu(cpu) {
68294 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68295 per_cpu(rcu_torture_count, cpu)[i] = 0;
68296 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68297 index d0c5baf..109b2e7 100644
68298 --- a/kernel/rcutree.c
68299 +++ b/kernel/rcutree.c
68300 @@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
68301 rcu_prepare_for_idle(smp_processor_id());
68302 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68303 smp_mb__before_atomic_inc(); /* See above. */
68304 - atomic_inc(&rdtp->dynticks);
68305 + atomic_inc_unchecked(&rdtp->dynticks);
68306 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68307 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68308 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68309
68310 /*
68311 * The idle task is not permitted to enter the idle loop while
68312 @@ -448,10 +448,10 @@ void rcu_irq_exit(void)
68313 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
68314 {
68315 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68316 - atomic_inc(&rdtp->dynticks);
68317 + atomic_inc_unchecked(&rdtp->dynticks);
68318 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68319 smp_mb__after_atomic_inc(); /* See above. */
68320 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68321 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68322 rcu_cleanup_after_idle(smp_processor_id());
68323 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
68324 if (!is_idle_task(current)) {
68325 @@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
68326 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68327
68328 if (rdtp->dynticks_nmi_nesting == 0 &&
68329 - (atomic_read(&rdtp->dynticks) & 0x1))
68330 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68331 return;
68332 rdtp->dynticks_nmi_nesting++;
68333 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68334 - atomic_inc(&rdtp->dynticks);
68335 + atomic_inc_unchecked(&rdtp->dynticks);
68336 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68337 smp_mb__after_atomic_inc(); /* See above. */
68338 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68339 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68340 }
68341
68342 /**
68343 @@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
68344 return;
68345 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68346 smp_mb__before_atomic_inc(); /* See above. */
68347 - atomic_inc(&rdtp->dynticks);
68348 + atomic_inc_unchecked(&rdtp->dynticks);
68349 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68350 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68351 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68352 }
68353
68354 #ifdef CONFIG_PROVE_RCU
68355 @@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
68356 int ret;
68357
68358 preempt_disable();
68359 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68360 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68361 preempt_enable();
68362 return ret;
68363 }
68364 @@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
68365 */
68366 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68367 {
68368 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68369 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68370 return (rdp->dynticks_snap & 0x1) == 0;
68371 }
68372
68373 @@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68374 unsigned int curr;
68375 unsigned int snap;
68376
68377 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
68378 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68379 snap = (unsigned int)rdp->dynticks_snap;
68380
68381 /*
68382 @@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
68383 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
68384 */
68385 if (till_stall_check < 3) {
68386 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
68387 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
68388 till_stall_check = 3;
68389 } else if (till_stall_check > 300) {
68390 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
68391 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
68392 till_stall_check = 300;
68393 }
68394 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
68395 @@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68396 /*
68397 * Do RCU core processing for the current CPU.
68398 */
68399 -static void rcu_process_callbacks(struct softirq_action *unused)
68400 +static void rcu_process_callbacks(void)
68401 {
68402 trace_rcu_utilization("Start RCU core");
68403 __rcu_process_callbacks(&rcu_sched_state,
68404 @@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
68405 }
68406 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
68407
68408 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68409 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68410 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68411 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68412
68413 static int synchronize_sched_expedited_cpu_stop(void *data)
68414 {
68415 @@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
68416 int firstsnap, s, snap, trycount = 0;
68417
68418 /* Note that atomic_inc_return() implies full memory barrier. */
68419 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68420 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68421 get_online_cpus();
68422 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
68423
68424 @@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
68425 }
68426
68427 /* Check to see if someone else did our work for us. */
68428 - s = atomic_read(&sync_sched_expedited_done);
68429 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68430 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68431 smp_mb(); /* ensure test happens before caller kfree */
68432 return;
68433 @@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
68434 * grace period works for us.
68435 */
68436 get_online_cpus();
68437 - snap = atomic_read(&sync_sched_expedited_started);
68438 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
68439 smp_mb(); /* ensure read is before try_stop_cpus(). */
68440 }
68441
68442 @@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
68443 * than we did beat us to the punch.
68444 */
68445 do {
68446 - s = atomic_read(&sync_sched_expedited_done);
68447 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68448 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68449 smp_mb(); /* ensure test happens before caller kfree */
68450 break;
68451 }
68452 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68453 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68454
68455 put_online_cpus();
68456 }
68457 @@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
68458 rdp->qlen = 0;
68459 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
68460 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
68461 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
68462 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
68463 rdp->cpu = cpu;
68464 rdp->rsp = rsp;
68465 raw_spin_unlock_irqrestore(&rnp->lock, flags);
68466 @@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
68467 rdp->n_force_qs_snap = rsp->n_force_qs;
68468 rdp->blimit = blimit;
68469 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
68470 - atomic_set(&rdp->dynticks->dynticks,
68471 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
68472 + atomic_set_unchecked(&rdp->dynticks->dynticks,
68473 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
68474 rcu_prepare_for_idle_init(cpu);
68475 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
68476
68477 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68478 index cdd1be0..5b2efb4 100644
68479 --- a/kernel/rcutree.h
68480 +++ b/kernel/rcutree.h
68481 @@ -87,7 +87,7 @@ struct rcu_dynticks {
68482 long long dynticks_nesting; /* Track irq/process nesting level. */
68483 /* Process level is worth LLONG_MAX/2. */
68484 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68485 - atomic_t dynticks; /* Even value for idle, else odd. */
68486 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
68487 };
68488
68489 /* RCU's kthread states for tracing. */
68490 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68491 index c023464..7f57225 100644
68492 --- a/kernel/rcutree_plugin.h
68493 +++ b/kernel/rcutree_plugin.h
68494 @@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
68495
68496 /* Clean up and exit. */
68497 smp_mb(); /* ensure expedited GP seen before counter increment. */
68498 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68499 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68500 unlock_mb_ret:
68501 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68502 mb_ret:
68503 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68504 index ed459ed..a03c3fa 100644
68505 --- a/kernel/rcutree_trace.c
68506 +++ b/kernel/rcutree_trace.c
68507 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68508 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68509 rdp->qs_pending);
68510 seq_printf(m, " dt=%d/%llx/%d df=%lu",
68511 - atomic_read(&rdp->dynticks->dynticks),
68512 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68513 rdp->dynticks->dynticks_nesting,
68514 rdp->dynticks->dynticks_nmi_nesting,
68515 rdp->dynticks_fqs);
68516 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68517 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68518 rdp->qs_pending);
68519 seq_printf(m, ",%d,%llx,%d,%lu",
68520 - atomic_read(&rdp->dynticks->dynticks),
68521 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68522 rdp->dynticks->dynticks_nesting,
68523 rdp->dynticks->dynticks_nmi_nesting,
68524 rdp->dynticks_fqs);
68525 diff --git a/kernel/resource.c b/kernel/resource.c
68526 index 7e8ea66..1efd11f 100644
68527 --- a/kernel/resource.c
68528 +++ b/kernel/resource.c
68529 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68530
68531 static int __init ioresources_init(void)
68532 {
68533 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68534 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68535 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68536 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68537 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68538 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68539 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68540 +#endif
68541 +#else
68542 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68543 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68544 +#endif
68545 return 0;
68546 }
68547 __initcall(ioresources_init);
68548 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68549 index 98ec494..4241d6d 100644
68550 --- a/kernel/rtmutex-tester.c
68551 +++ b/kernel/rtmutex-tester.c
68552 @@ -20,7 +20,7 @@
68553 #define MAX_RT_TEST_MUTEXES 8
68554
68555 static spinlock_t rttest_lock;
68556 -static atomic_t rttest_event;
68557 +static atomic_unchecked_t rttest_event;
68558
68559 struct test_thread_data {
68560 int opcode;
68561 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68562
68563 case RTTEST_LOCKCONT:
68564 td->mutexes[td->opdata] = 1;
68565 - td->event = atomic_add_return(1, &rttest_event);
68566 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68567 return 0;
68568
68569 case RTTEST_RESET:
68570 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68571 return 0;
68572
68573 case RTTEST_RESETEVENT:
68574 - atomic_set(&rttest_event, 0);
68575 + atomic_set_unchecked(&rttest_event, 0);
68576 return 0;
68577
68578 default:
68579 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68580 return ret;
68581
68582 td->mutexes[id] = 1;
68583 - td->event = atomic_add_return(1, &rttest_event);
68584 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68585 rt_mutex_lock(&mutexes[id]);
68586 - td->event = atomic_add_return(1, &rttest_event);
68587 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68588 td->mutexes[id] = 4;
68589 return 0;
68590
68591 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68592 return ret;
68593
68594 td->mutexes[id] = 1;
68595 - td->event = atomic_add_return(1, &rttest_event);
68596 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68597 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68598 - td->event = atomic_add_return(1, &rttest_event);
68599 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68600 td->mutexes[id] = ret ? 0 : 4;
68601 return ret ? -EINTR : 0;
68602
68603 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68604 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68605 return ret;
68606
68607 - td->event = atomic_add_return(1, &rttest_event);
68608 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68609 rt_mutex_unlock(&mutexes[id]);
68610 - td->event = atomic_add_return(1, &rttest_event);
68611 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68612 td->mutexes[id] = 0;
68613 return 0;
68614
68615 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68616 break;
68617
68618 td->mutexes[dat] = 2;
68619 - td->event = atomic_add_return(1, &rttest_event);
68620 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68621 break;
68622
68623 default:
68624 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68625 return;
68626
68627 td->mutexes[dat] = 3;
68628 - td->event = atomic_add_return(1, &rttest_event);
68629 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68630 break;
68631
68632 case RTTEST_LOCKNOWAIT:
68633 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68634 return;
68635
68636 td->mutexes[dat] = 1;
68637 - td->event = atomic_add_return(1, &rttest_event);
68638 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68639 return;
68640
68641 default:
68642 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
68643 index 0984a21..939f183 100644
68644 --- a/kernel/sched/auto_group.c
68645 +++ b/kernel/sched/auto_group.c
68646 @@ -11,7 +11,7 @@
68647
68648 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68649 static struct autogroup autogroup_default;
68650 -static atomic_t autogroup_seq_nr;
68651 +static atomic_unchecked_t autogroup_seq_nr;
68652
68653 void __init autogroup_init(struct task_struct *init_task)
68654 {
68655 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68656
68657 kref_init(&ag->kref);
68658 init_rwsem(&ag->lock);
68659 - ag->id = atomic_inc_return(&autogroup_seq_nr);
68660 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68661 ag->tg = tg;
68662 #ifdef CONFIG_RT_GROUP_SCHED
68663 /*
68664 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
68665 index 817bf70..9099fb4 100644
68666 --- a/kernel/sched/core.c
68667 +++ b/kernel/sched/core.c
68668 @@ -4038,6 +4038,8 @@ int can_nice(const struct task_struct *p, const int nice)
68669 /* convert nice value [19,-20] to rlimit style value [1,40] */
68670 int nice_rlim = 20 - nice;
68671
68672 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68673 +
68674 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68675 capable(CAP_SYS_NICE));
68676 }
68677 @@ -4071,7 +4073,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68678 if (nice > 19)
68679 nice = 19;
68680
68681 - if (increment < 0 && !can_nice(current, nice))
68682 + if (increment < 0 && (!can_nice(current, nice) ||
68683 + gr_handle_chroot_nice()))
68684 return -EPERM;
68685
68686 retval = security_task_setnice(current, nice);
68687 @@ -4228,6 +4231,7 @@ recheck:
68688 unsigned long rlim_rtprio =
68689 task_rlimit(p, RLIMIT_RTPRIO);
68690
68691 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68692 /* can't set/change the rt policy */
68693 if (policy != p->policy && !rlim_rtprio)
68694 return -EPERM;
68695 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
68696 index e955364..eacd2a4 100644
68697 --- a/kernel/sched/fair.c
68698 +++ b/kernel/sched/fair.c
68699 @@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68700 * run_rebalance_domains is triggered when needed from the scheduler tick.
68701 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68702 */
68703 -static void run_rebalance_domains(struct softirq_action *h)
68704 +static void run_rebalance_domains(void)
68705 {
68706 int this_cpu = smp_processor_id();
68707 struct rq *this_rq = cpu_rq(this_cpu);
68708 diff --git a/kernel/signal.c b/kernel/signal.c
68709 index 17afcaf..4500b05 100644
68710 --- a/kernel/signal.c
68711 +++ b/kernel/signal.c
68712 @@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
68713
68714 int print_fatal_signals __read_mostly;
68715
68716 -static void __user *sig_handler(struct task_struct *t, int sig)
68717 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
68718 {
68719 return t->sighand->action[sig - 1].sa.sa_handler;
68720 }
68721
68722 -static int sig_handler_ignored(void __user *handler, int sig)
68723 +static int sig_handler_ignored(__sighandler_t handler, int sig)
68724 {
68725 /* Is it explicitly or implicitly ignored? */
68726 return handler == SIG_IGN ||
68727 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68728
68729 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
68730 {
68731 - void __user *handler;
68732 + __sighandler_t handler;
68733
68734 handler = sig_handler(t, sig);
68735
68736 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68737 atomic_inc(&user->sigpending);
68738 rcu_read_unlock();
68739
68740 + if (!override_rlimit)
68741 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68742 +
68743 if (override_rlimit ||
68744 atomic_read(&user->sigpending) <=
68745 task_rlimit(t, RLIMIT_SIGPENDING)) {
68746 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68747
68748 int unhandled_signal(struct task_struct *tsk, int sig)
68749 {
68750 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68751 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68752 if (is_global_init(tsk))
68753 return 1;
68754 if (handler != SIG_IGN && handler != SIG_DFL)
68755 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68756 }
68757 }
68758
68759 + /* allow glibc communication via tgkill to other threads in our
68760 + thread group */
68761 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68762 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68763 + && gr_handle_signal(t, sig))
68764 + return -EPERM;
68765 +
68766 return security_task_kill(t, info, sig, 0);
68767 }
68768
68769 @@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68770 return send_signal(sig, info, p, 1);
68771 }
68772
68773 -static int
68774 +int
68775 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68776 {
68777 return send_signal(sig, info, t, 0);
68778 @@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68779 unsigned long int flags;
68780 int ret, blocked, ignored;
68781 struct k_sigaction *action;
68782 + int is_unhandled = 0;
68783
68784 spin_lock_irqsave(&t->sighand->siglock, flags);
68785 action = &t->sighand->action[sig-1];
68786 @@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68787 }
68788 if (action->sa.sa_handler == SIG_DFL)
68789 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68790 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68791 + is_unhandled = 1;
68792 ret = specific_send_sig_info(sig, info, t);
68793 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68794
68795 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
68796 + normal operation */
68797 + if (is_unhandled) {
68798 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68799 + gr_handle_crash(t, sig);
68800 + }
68801 +
68802 return ret;
68803 }
68804
68805 @@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68806 ret = check_kill_permission(sig, info, p);
68807 rcu_read_unlock();
68808
68809 - if (!ret && sig)
68810 + if (!ret && sig) {
68811 ret = do_send_sig_info(sig, info, p, true);
68812 + if (!ret)
68813 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68814 + }
68815
68816 return ret;
68817 }
68818 @@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68819 int error = -ESRCH;
68820
68821 rcu_read_lock();
68822 - p = find_task_by_vpid(pid);
68823 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68824 + /* allow glibc communication via tgkill to other threads in our
68825 + thread group */
68826 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68827 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68828 + p = find_task_by_vpid_unrestricted(pid);
68829 + else
68830 +#endif
68831 + p = find_task_by_vpid(pid);
68832 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68833 error = check_kill_permission(sig, info, p);
68834 /*
68835 diff --git a/kernel/smp.c b/kernel/smp.c
68836 index 2f8b10e..a41bc14 100644
68837 --- a/kernel/smp.c
68838 +++ b/kernel/smp.c
68839 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68840 }
68841 EXPORT_SYMBOL(smp_call_function);
68842
68843 -void ipi_call_lock(void)
68844 +void ipi_call_lock(void) __acquires(call_function.lock)
68845 {
68846 raw_spin_lock(&call_function.lock);
68847 }
68848
68849 -void ipi_call_unlock(void)
68850 +void ipi_call_unlock(void) __releases(call_function.lock)
68851 {
68852 raw_spin_unlock(&call_function.lock);
68853 }
68854
68855 -void ipi_call_lock_irq(void)
68856 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68857 {
68858 raw_spin_lock_irq(&call_function.lock);
68859 }
68860
68861 -void ipi_call_unlock_irq(void)
68862 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68863 {
68864 raw_spin_unlock_irq(&call_function.lock);
68865 }
68866 diff --git a/kernel/softirq.c b/kernel/softirq.c
68867 index 671f959..91c51cb 100644
68868 --- a/kernel/softirq.c
68869 +++ b/kernel/softirq.c
68870 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68871
68872 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68873
68874 -char *softirq_to_name[NR_SOFTIRQS] = {
68875 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68876 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68877 "TASKLET", "SCHED", "HRTIMER", "RCU"
68878 };
68879 @@ -235,7 +235,7 @@ restart:
68880 kstat_incr_softirqs_this_cpu(vec_nr);
68881
68882 trace_softirq_entry(vec_nr);
68883 - h->action(h);
68884 + h->action();
68885 trace_softirq_exit(vec_nr);
68886 if (unlikely(prev_count != preempt_count())) {
68887 printk(KERN_ERR "huh, entered softirq %u %s %p"
68888 @@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
68889 or_softirq_pending(1UL << nr);
68890 }
68891
68892 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68893 +void open_softirq(int nr, void (*action)(void))
68894 {
68895 - softirq_vec[nr].action = action;
68896 + pax_open_kernel();
68897 + *(void **)&softirq_vec[nr].action = action;
68898 + pax_close_kernel();
68899 }
68900
68901 /*
68902 @@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68903
68904 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68905
68906 -static void tasklet_action(struct softirq_action *a)
68907 +static void tasklet_action(void)
68908 {
68909 struct tasklet_struct *list;
68910
68911 @@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
68912 }
68913 }
68914
68915 -static void tasklet_hi_action(struct softirq_action *a)
68916 +static void tasklet_hi_action(void)
68917 {
68918 struct tasklet_struct *list;
68919
68920 diff --git a/kernel/sys.c b/kernel/sys.c
68921 index e7006eb..8fb7c51 100644
68922 --- a/kernel/sys.c
68923 +++ b/kernel/sys.c
68924 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68925 error = -EACCES;
68926 goto out;
68927 }
68928 +
68929 + if (gr_handle_chroot_setpriority(p, niceval)) {
68930 + error = -EACCES;
68931 + goto out;
68932 + }
68933 +
68934 no_nice = security_task_setnice(p, niceval);
68935 if (no_nice) {
68936 error = no_nice;
68937 @@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68938 goto error;
68939 }
68940
68941 + if (gr_check_group_change(new->gid, new->egid, -1))
68942 + goto error;
68943 +
68944 if (rgid != (gid_t) -1 ||
68945 (egid != (gid_t) -1 && egid != old->gid))
68946 new->sgid = new->egid;
68947 @@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68948 old = current_cred();
68949
68950 retval = -EPERM;
68951 +
68952 + if (gr_check_group_change(gid, gid, gid))
68953 + goto error;
68954 +
68955 if (nsown_capable(CAP_SETGID))
68956 new->gid = new->egid = new->sgid = new->fsgid = gid;
68957 else if (gid == old->gid || gid == old->sgid)
68958 @@ -627,7 +640,7 @@ error:
68959 /*
68960 * change the user struct in a credentials set to match the new UID
68961 */
68962 -static int set_user(struct cred *new)
68963 +int set_user(struct cred *new)
68964 {
68965 struct user_struct *new_user;
68966
68967 @@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68968 goto error;
68969 }
68970
68971 + if (gr_check_user_change(new->uid, new->euid, -1))
68972 + goto error;
68973 +
68974 if (new->uid != old->uid) {
68975 retval = set_user(new);
68976 if (retval < 0)
68977 @@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68978 old = current_cred();
68979
68980 retval = -EPERM;
68981 +
68982 + if (gr_check_crash_uid(uid))
68983 + goto error;
68984 + if (gr_check_user_change(uid, uid, uid))
68985 + goto error;
68986 +
68987 if (nsown_capable(CAP_SETUID)) {
68988 new->suid = new->uid = uid;
68989 if (uid != old->uid) {
68990 @@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68991 goto error;
68992 }
68993
68994 + if (gr_check_user_change(ruid, euid, -1))
68995 + goto error;
68996 +
68997 if (ruid != (uid_t) -1) {
68998 new->uid = ruid;
68999 if (ruid != old->uid) {
69000 @@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
69001 goto error;
69002 }
69003
69004 + if (gr_check_group_change(rgid, egid, -1))
69005 + goto error;
69006 +
69007 if (rgid != (gid_t) -1)
69008 new->gid = rgid;
69009 if (egid != (gid_t) -1)
69010 @@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69011 old = current_cred();
69012 old_fsuid = old->fsuid;
69013
69014 + if (gr_check_user_change(-1, -1, uid))
69015 + goto error;
69016 +
69017 if (uid == old->uid || uid == old->euid ||
69018 uid == old->suid || uid == old->fsuid ||
69019 nsown_capable(CAP_SETUID)) {
69020 @@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69021 }
69022 }
69023
69024 +error:
69025 abort_creds(new);
69026 return old_fsuid;
69027
69028 @@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
69029 if (gid == old->gid || gid == old->egid ||
69030 gid == old->sgid || gid == old->fsgid ||
69031 nsown_capable(CAP_SETGID)) {
69032 + if (gr_check_group_change(-1, -1, gid))
69033 + goto error;
69034 +
69035 if (gid != old_fsgid) {
69036 new->fsgid = gid;
69037 goto change_okay;
69038 }
69039 }
69040
69041 +error:
69042 abort_creds(new);
69043 return old_fsgid;
69044
69045 @@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
69046 }
69047 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69048 snprintf(buf, len, "2.6.%u%s", v, rest);
69049 - ret = copy_to_user(release, buf, len);
69050 + if (len > sizeof(buf))
69051 + ret = -EFAULT;
69052 + else
69053 + ret = copy_to_user(release, buf, len);
69054 }
69055 return ret;
69056 }
69057 @@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
69058 return -EFAULT;
69059
69060 down_read(&uts_sem);
69061 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
69062 + error = __copy_to_user(name->sysname, &utsname()->sysname,
69063 __OLD_UTS_LEN);
69064 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69065 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69066 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
69067 __OLD_UTS_LEN);
69068 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69069 - error |= __copy_to_user(&name->release, &utsname()->release,
69070 + error |= __copy_to_user(name->release, &utsname()->release,
69071 __OLD_UTS_LEN);
69072 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69073 - error |= __copy_to_user(&name->version, &utsname()->version,
69074 + error |= __copy_to_user(name->version, &utsname()->version,
69075 __OLD_UTS_LEN);
69076 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69077 - error |= __copy_to_user(&name->machine, &utsname()->machine,
69078 + error |= __copy_to_user(name->machine, &utsname()->machine,
69079 __OLD_UTS_LEN);
69080 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69081 up_read(&uts_sem);
69082 @@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
69083 error = get_dumpable(me->mm);
69084 break;
69085 case PR_SET_DUMPABLE:
69086 - if (arg2 < 0 || arg2 > 1) {
69087 + if (arg2 > 1) {
69088 error = -EINVAL;
69089 break;
69090 }
69091 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
69092 index 4ab1187..0b75ced 100644
69093 --- a/kernel/sysctl.c
69094 +++ b/kernel/sysctl.c
69095 @@ -91,7 +91,6 @@
69096
69097
69098 #if defined(CONFIG_SYSCTL)
69099 -
69100 /* External variables not in a header file. */
69101 extern int sysctl_overcommit_memory;
69102 extern int sysctl_overcommit_ratio;
69103 @@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
69104 void __user *buffer, size_t *lenp, loff_t *ppos);
69105 #endif
69106
69107 -#ifdef CONFIG_PRINTK
69108 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69109 void __user *buffer, size_t *lenp, loff_t *ppos);
69110 -#endif
69111
69112 #ifdef CONFIG_MAGIC_SYSRQ
69113 /* Note: sysrq code uses it's own private copy */
69114 @@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
69115
69116 #endif
69117
69118 +extern struct ctl_table grsecurity_table[];
69119 +
69120 static struct ctl_table kern_table[];
69121 static struct ctl_table vm_table[];
69122 static struct ctl_table fs_table[];
69123 @@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
69124 int sysctl_legacy_va_layout;
69125 #endif
69126
69127 +#ifdef CONFIG_PAX_SOFTMODE
69128 +static ctl_table pax_table[] = {
69129 + {
69130 + .procname = "softmode",
69131 + .data = &pax_softmode,
69132 + .maxlen = sizeof(unsigned int),
69133 + .mode = 0600,
69134 + .proc_handler = &proc_dointvec,
69135 + },
69136 +
69137 + { }
69138 +};
69139 +#endif
69140 +
69141 /* The default sysctl tables: */
69142
69143 static struct ctl_table sysctl_base_table[] = {
69144 @@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
69145 #endif
69146
69147 static struct ctl_table kern_table[] = {
69148 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69149 + {
69150 + .procname = "grsecurity",
69151 + .mode = 0500,
69152 + .child = grsecurity_table,
69153 + },
69154 +#endif
69155 +
69156 +#ifdef CONFIG_PAX_SOFTMODE
69157 + {
69158 + .procname = "pax",
69159 + .mode = 0500,
69160 + .child = pax_table,
69161 + },
69162 +#endif
69163 +
69164 {
69165 .procname = "sched_child_runs_first",
69166 .data = &sysctl_sched_child_runs_first,
69167 @@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
69168 .data = &modprobe_path,
69169 .maxlen = KMOD_PATH_LEN,
69170 .mode = 0644,
69171 - .proc_handler = proc_dostring,
69172 + .proc_handler = proc_dostring_modpriv,
69173 },
69174 {
69175 .procname = "modules_disabled",
69176 @@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
69177 .extra1 = &zero,
69178 .extra2 = &one,
69179 },
69180 +#endif
69181 {
69182 .procname = "kptr_restrict",
69183 .data = &kptr_restrict,
69184 .maxlen = sizeof(int),
69185 .mode = 0644,
69186 .proc_handler = proc_dointvec_minmax_sysadmin,
69187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69188 + .extra1 = &two,
69189 +#else
69190 .extra1 = &zero,
69191 +#endif
69192 .extra2 = &two,
69193 },
69194 -#endif
69195 {
69196 .procname = "ngroups_max",
69197 .data = &ngroups_max,
69198 @@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
69199 .proc_handler = proc_dointvec_minmax,
69200 .extra1 = &zero,
69201 },
69202 + {
69203 + .procname = "heap_stack_gap",
69204 + .data = &sysctl_heap_stack_gap,
69205 + .maxlen = sizeof(sysctl_heap_stack_gap),
69206 + .mode = 0644,
69207 + .proc_handler = proc_doulongvec_minmax,
69208 + },
69209 #else
69210 {
69211 .procname = "nr_trim_pages",
69212 @@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
69213 buffer, lenp, ppos);
69214 }
69215
69216 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69217 + void __user *buffer, size_t *lenp, loff_t *ppos)
69218 +{
69219 + if (write && !capable(CAP_SYS_MODULE))
69220 + return -EPERM;
69221 +
69222 + return _proc_do_string(table->data, table->maxlen, write,
69223 + buffer, lenp, ppos);
69224 +}
69225 +
69226 static size_t proc_skip_spaces(char **buf)
69227 {
69228 size_t ret;
69229 @@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
69230 len = strlen(tmp);
69231 if (len > *size)
69232 len = *size;
69233 + if (len > sizeof(tmp))
69234 + len = sizeof(tmp);
69235 if (copy_to_user(*buf, tmp, len))
69236 return -EFAULT;
69237 *size -= len;
69238 @@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
69239 return err;
69240 }
69241
69242 -#ifdef CONFIG_PRINTK
69243 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69244 void __user *buffer, size_t *lenp, loff_t *ppos)
69245 {
69246 @@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69247
69248 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
69249 }
69250 -#endif
69251
69252 struct do_proc_dointvec_minmax_conv_param {
69253 int *min;
69254 @@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
69255 *i = val;
69256 } else {
69257 val = convdiv * (*i) / convmul;
69258 - if (!first)
69259 + if (!first) {
69260 err = proc_put_char(&buffer, &left, '\t');
69261 + if (err)
69262 + break;
69263 + }
69264 err = proc_put_long(&buffer, &left, val, false);
69265 if (err)
69266 break;
69267 @@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
69268 return -ENOSYS;
69269 }
69270
69271 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69272 + void __user *buffer, size_t *lenp, loff_t *ppos)
69273 +{
69274 + return -ENOSYS;
69275 +}
69276 +
69277 int proc_dointvec(struct ctl_table *table, int write,
69278 void __user *buffer, size_t *lenp, loff_t *ppos)
69279 {
69280 @@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69281 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69282 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69283 EXPORT_SYMBOL(proc_dostring);
69284 +EXPORT_SYMBOL(proc_dostring_modpriv);
69285 EXPORT_SYMBOL(proc_doulongvec_minmax);
69286 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69287 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69288 index a650694..aaeeb20 100644
69289 --- a/kernel/sysctl_binary.c
69290 +++ b/kernel/sysctl_binary.c
69291 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69292 int i;
69293
69294 set_fs(KERNEL_DS);
69295 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69296 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69297 set_fs(old_fs);
69298 if (result < 0)
69299 goto out_kfree;
69300 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69301 }
69302
69303 set_fs(KERNEL_DS);
69304 - result = vfs_write(file, buffer, str - buffer, &pos);
69305 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69306 set_fs(old_fs);
69307 if (result < 0)
69308 goto out_kfree;
69309 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69310 int i;
69311
69312 set_fs(KERNEL_DS);
69313 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69314 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69315 set_fs(old_fs);
69316 if (result < 0)
69317 goto out_kfree;
69318 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69319 }
69320
69321 set_fs(KERNEL_DS);
69322 - result = vfs_write(file, buffer, str - buffer, &pos);
69323 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69324 set_fs(old_fs);
69325 if (result < 0)
69326 goto out_kfree;
69327 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69328 int i;
69329
69330 set_fs(KERNEL_DS);
69331 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69332 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69333 set_fs(old_fs);
69334 if (result < 0)
69335 goto out;
69336 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69337 __le16 dnaddr;
69338
69339 set_fs(KERNEL_DS);
69340 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69341 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69342 set_fs(old_fs);
69343 if (result < 0)
69344 goto out;
69345 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69346 le16_to_cpu(dnaddr) & 0x3ff);
69347
69348 set_fs(KERNEL_DS);
69349 - result = vfs_write(file, buf, len, &pos);
69350 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69351 set_fs(old_fs);
69352 if (result < 0)
69353 goto out;
69354 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69355 index e660464..c8b9e67 100644
69356 --- a/kernel/taskstats.c
69357 +++ b/kernel/taskstats.c
69358 @@ -27,9 +27,12 @@
69359 #include <linux/cgroup.h>
69360 #include <linux/fs.h>
69361 #include <linux/file.h>
69362 +#include <linux/grsecurity.h>
69363 #include <net/genetlink.h>
69364 #include <linux/atomic.h>
69365
69366 +extern int gr_is_taskstats_denied(int pid);
69367 +
69368 /*
69369 * Maximum length of a cpumask that can be specified in
69370 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69371 @@ -556,6 +559,9 @@ err:
69372
69373 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69374 {
69375 + if (gr_is_taskstats_denied(current->pid))
69376 + return -EACCES;
69377 +
69378 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69379 return cmd_attr_register_cpumask(info);
69380 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69381 diff --git a/kernel/time.c b/kernel/time.c
69382 index ba744cf..267b7c5 100644
69383 --- a/kernel/time.c
69384 +++ b/kernel/time.c
69385 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69386 return error;
69387
69388 if (tz) {
69389 + /* we log in do_settimeofday called below, so don't log twice
69390 + */
69391 + if (!tv)
69392 + gr_log_timechange();
69393 +
69394 sys_tz = *tz;
69395 update_vsyscall_tz();
69396 if (firsttime) {
69397 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69398 index 8a538c5..def79d4 100644
69399 --- a/kernel/time/alarmtimer.c
69400 +++ b/kernel/time/alarmtimer.c
69401 @@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
69402 struct platform_device *pdev;
69403 int error = 0;
69404 int i;
69405 - struct k_clock alarm_clock = {
69406 + static struct k_clock alarm_clock = {
69407 .clock_getres = alarm_clock_getres,
69408 .clock_get = alarm_clock_get,
69409 .timer_create = alarm_timer_create,
69410 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69411 index f113755..ec24223 100644
69412 --- a/kernel/time/tick-broadcast.c
69413 +++ b/kernel/time/tick-broadcast.c
69414 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69415 * then clear the broadcast bit.
69416 */
69417 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69418 - int cpu = smp_processor_id();
69419 + cpu = smp_processor_id();
69420
69421 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69422 tick_broadcast_clear_oneshot(cpu);
69423 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69424 index 7c50de8..e29a94d 100644
69425 --- a/kernel/time/timekeeping.c
69426 +++ b/kernel/time/timekeeping.c
69427 @@ -14,6 +14,7 @@
69428 #include <linux/init.h>
69429 #include <linux/mm.h>
69430 #include <linux/sched.h>
69431 +#include <linux/grsecurity.h>
69432 #include <linux/syscore_ops.h>
69433 #include <linux/clocksource.h>
69434 #include <linux/jiffies.h>
69435 @@ -388,6 +389,8 @@ int do_settimeofday(const struct timespec *tv)
69436 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69437 return -EINVAL;
69438
69439 + gr_log_timechange();
69440 +
69441 write_seqlock_irqsave(&timekeeper.lock, flags);
69442
69443 timekeeping_forward_now();
69444 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69445 index 3258455..f35227d 100644
69446 --- a/kernel/time/timer_list.c
69447 +++ b/kernel/time/timer_list.c
69448 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69449
69450 static void print_name_offset(struct seq_file *m, void *sym)
69451 {
69452 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69453 + SEQ_printf(m, "<%p>", NULL);
69454 +#else
69455 char symname[KSYM_NAME_LEN];
69456
69457 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69458 SEQ_printf(m, "<%pK>", sym);
69459 else
69460 SEQ_printf(m, "%s", symname);
69461 +#endif
69462 }
69463
69464 static void
69465 @@ -112,7 +116,11 @@ next_one:
69466 static void
69467 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69468 {
69469 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69470 + SEQ_printf(m, " .base: %p\n", NULL);
69471 +#else
69472 SEQ_printf(m, " .base: %pK\n", base);
69473 +#endif
69474 SEQ_printf(m, " .index: %d\n",
69475 base->index);
69476 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69477 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69478 {
69479 struct proc_dir_entry *pe;
69480
69481 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69482 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69483 +#else
69484 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69485 +#endif
69486 if (!pe)
69487 return -ENOMEM;
69488 return 0;
69489 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69490 index 0b537f2..9e71eca 100644
69491 --- a/kernel/time/timer_stats.c
69492 +++ b/kernel/time/timer_stats.c
69493 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69494 static unsigned long nr_entries;
69495 static struct entry entries[MAX_ENTRIES];
69496
69497 -static atomic_t overflow_count;
69498 +static atomic_unchecked_t overflow_count;
69499
69500 /*
69501 * The entries are in a hash-table, for fast lookup:
69502 @@ -140,7 +140,7 @@ static void reset_entries(void)
69503 nr_entries = 0;
69504 memset(entries, 0, sizeof(entries));
69505 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69506 - atomic_set(&overflow_count, 0);
69507 + atomic_set_unchecked(&overflow_count, 0);
69508 }
69509
69510 static struct entry *alloc_entry(void)
69511 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69512 if (likely(entry))
69513 entry->count++;
69514 else
69515 - atomic_inc(&overflow_count);
69516 + atomic_inc_unchecked(&overflow_count);
69517
69518 out_unlock:
69519 raw_spin_unlock_irqrestore(lock, flags);
69520 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69521
69522 static void print_name_offset(struct seq_file *m, unsigned long addr)
69523 {
69524 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69525 + seq_printf(m, "<%p>", NULL);
69526 +#else
69527 char symname[KSYM_NAME_LEN];
69528
69529 if (lookup_symbol_name(addr, symname) < 0)
69530 seq_printf(m, "<%p>", (void *)addr);
69531 else
69532 seq_printf(m, "%s", symname);
69533 +#endif
69534 }
69535
69536 static int tstats_show(struct seq_file *m, void *v)
69537 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69538
69539 seq_puts(m, "Timer Stats Version: v0.2\n");
69540 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69541 - if (atomic_read(&overflow_count))
69542 + if (atomic_read_unchecked(&overflow_count))
69543 seq_printf(m, "Overflow: %d entries\n",
69544 - atomic_read(&overflow_count));
69545 + atomic_read_unchecked(&overflow_count));
69546
69547 for (i = 0; i < nr_entries; i++) {
69548 entry = entries + i;
69549 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69550 {
69551 struct proc_dir_entry *pe;
69552
69553 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69554 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69555 +#else
69556 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69557 +#endif
69558 if (!pe)
69559 return -ENOMEM;
69560 return 0;
69561 diff --git a/kernel/timer.c b/kernel/timer.c
69562 index a297ffc..5e16b0b 100644
69563 --- a/kernel/timer.c
69564 +++ b/kernel/timer.c
69565 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
69566 /*
69567 * This function runs timers and the timer-tq in bottom half context.
69568 */
69569 -static void run_timer_softirq(struct softirq_action *h)
69570 +static void run_timer_softirq(void)
69571 {
69572 struct tvec_base *base = __this_cpu_read(tvec_bases);
69573
69574 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69575 index c0bd030..62a1927 100644
69576 --- a/kernel/trace/blktrace.c
69577 +++ b/kernel/trace/blktrace.c
69578 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69579 struct blk_trace *bt = filp->private_data;
69580 char buf[16];
69581
69582 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69583 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69584
69585 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69586 }
69587 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69588 return 1;
69589
69590 bt = buf->chan->private_data;
69591 - atomic_inc(&bt->dropped);
69592 + atomic_inc_unchecked(&bt->dropped);
69593 return 0;
69594 }
69595
69596 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69597
69598 bt->dir = dir;
69599 bt->dev = dev;
69600 - atomic_set(&bt->dropped, 0);
69601 + atomic_set_unchecked(&bt->dropped, 0);
69602
69603 ret = -EIO;
69604 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69605 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69606 index 0fa92f6..89950b2 100644
69607 --- a/kernel/trace/ftrace.c
69608 +++ b/kernel/trace/ftrace.c
69609 @@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69610 if (unlikely(ftrace_disabled))
69611 return 0;
69612
69613 + ret = ftrace_arch_code_modify_prepare();
69614 + FTRACE_WARN_ON(ret);
69615 + if (ret)
69616 + return 0;
69617 +
69618 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69619 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69620 if (ret) {
69621 ftrace_bug(ret, ip);
69622 - return 0;
69623 }
69624 - return 1;
69625 + return ret ? 0 : 1;
69626 }
69627
69628 /*
69629 @@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69630
69631 int
69632 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69633 - void *data)
69634 + void *data)
69635 {
69636 struct ftrace_func_probe *entry;
69637 struct ftrace_page *pg;
69638 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69639 index 55e4d4c..8c915ec 100644
69640 --- a/kernel/trace/trace.c
69641 +++ b/kernel/trace/trace.c
69642 @@ -4316,10 +4316,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69643 };
69644 #endif
69645
69646 -static struct dentry *d_tracer;
69647 -
69648 struct dentry *tracing_init_dentry(void)
69649 {
69650 + static struct dentry *d_tracer;
69651 static int once;
69652
69653 if (d_tracer)
69654 @@ -4339,10 +4338,9 @@ struct dentry *tracing_init_dentry(void)
69655 return d_tracer;
69656 }
69657
69658 -static struct dentry *d_percpu;
69659 -
69660 struct dentry *tracing_dentry_percpu(void)
69661 {
69662 + static struct dentry *d_percpu;
69663 static int once;
69664 struct dentry *d_tracer;
69665
69666 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69667 index 29111da..d190fe2 100644
69668 --- a/kernel/trace/trace_events.c
69669 +++ b/kernel/trace/trace_events.c
69670 @@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
69671 struct ftrace_module_file_ops {
69672 struct list_head list;
69673 struct module *mod;
69674 - struct file_operations id;
69675 - struct file_operations enable;
69676 - struct file_operations format;
69677 - struct file_operations filter;
69678 };
69679
69680 static struct ftrace_module_file_ops *
69681 @@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
69682
69683 file_ops->mod = mod;
69684
69685 - file_ops->id = ftrace_event_id_fops;
69686 - file_ops->id.owner = mod;
69687 -
69688 - file_ops->enable = ftrace_enable_fops;
69689 - file_ops->enable.owner = mod;
69690 -
69691 - file_ops->filter = ftrace_event_filter_fops;
69692 - file_ops->filter.owner = mod;
69693 -
69694 - file_ops->format = ftrace_event_format_fops;
69695 - file_ops->format.owner = mod;
69696 + pax_open_kernel();
69697 + *(void **)&mod->trace_id.owner = mod;
69698 + *(void **)&mod->trace_enable.owner = mod;
69699 + *(void **)&mod->trace_filter.owner = mod;
69700 + *(void **)&mod->trace_format.owner = mod;
69701 + pax_close_kernel();
69702
69703 list_add(&file_ops->list, &ftrace_module_file_list);
69704
69705 @@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
69706
69707 for_each_event(call, start, end) {
69708 __trace_add_event_call(*call, mod,
69709 - &file_ops->id, &file_ops->enable,
69710 - &file_ops->filter, &file_ops->format);
69711 + &mod->trace_id, &mod->trace_enable,
69712 + &mod->trace_filter, &mod->trace_format);
69713 }
69714 }
69715
69716 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69717 index 580a05e..9b31acb 100644
69718 --- a/kernel/trace/trace_kprobe.c
69719 +++ b/kernel/trace/trace_kprobe.c
69720 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69721 long ret;
69722 int maxlen = get_rloc_len(*(u32 *)dest);
69723 u8 *dst = get_rloc_data(dest);
69724 - u8 *src = addr;
69725 + const u8 __user *src = (const u8 __force_user *)addr;
69726 mm_segment_t old_fs = get_fs();
69727 if (!maxlen)
69728 return;
69729 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69730 pagefault_disable();
69731 do
69732 ret = __copy_from_user_inatomic(dst++, src++, 1);
69733 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69734 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69735 dst[-1] = '\0';
69736 pagefault_enable();
69737 set_fs(old_fs);
69738 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69739 ((u8 *)get_rloc_data(dest))[0] = '\0';
69740 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69741 } else
69742 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69743 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69744 get_rloc_offs(*(u32 *)dest));
69745 }
69746 /* Return the length of string -- including null terminal byte */
69747 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69748 set_fs(KERNEL_DS);
69749 pagefault_disable();
69750 do {
69751 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69752 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69753 len++;
69754 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69755 pagefault_enable();
69756 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69757 index fd3c8aa..5f324a6 100644
69758 --- a/kernel/trace/trace_mmiotrace.c
69759 +++ b/kernel/trace/trace_mmiotrace.c
69760 @@ -24,7 +24,7 @@ struct header_iter {
69761 static struct trace_array *mmio_trace_array;
69762 static bool overrun_detected;
69763 static unsigned long prev_overruns;
69764 -static atomic_t dropped_count;
69765 +static atomic_unchecked_t dropped_count;
69766
69767 static void mmio_reset_data(struct trace_array *tr)
69768 {
69769 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69770
69771 static unsigned long count_overruns(struct trace_iterator *iter)
69772 {
69773 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
69774 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69775 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69776
69777 if (over > prev_overruns)
69778 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69779 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69780 sizeof(*entry), 0, pc);
69781 if (!event) {
69782 - atomic_inc(&dropped_count);
69783 + atomic_inc_unchecked(&dropped_count);
69784 return;
69785 }
69786 entry = ring_buffer_event_data(event);
69787 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69788 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69789 sizeof(*entry), 0, pc);
69790 if (!event) {
69791 - atomic_inc(&dropped_count);
69792 + atomic_inc_unchecked(&dropped_count);
69793 return;
69794 }
69795 entry = ring_buffer_event_data(event);
69796 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69797 index df611a0..10d8b32 100644
69798 --- a/kernel/trace/trace_output.c
69799 +++ b/kernel/trace/trace_output.c
69800 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
69801
69802 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69803 if (!IS_ERR(p)) {
69804 - p = mangle_path(s->buffer + s->len, p, "\n");
69805 + p = mangle_path(s->buffer + s->len, p, "\n\\");
69806 if (p) {
69807 s->len = p - s->buffer;
69808 return 1;
69809 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69810 index d4545f4..a9010a1 100644
69811 --- a/kernel/trace/trace_stack.c
69812 +++ b/kernel/trace/trace_stack.c
69813 @@ -53,7 +53,7 @@ static inline void check_stack(void)
69814 return;
69815
69816 /* we do not handle interrupt stacks yet */
69817 - if (!object_is_on_stack(&this_size))
69818 + if (!object_starts_on_stack(&this_size))
69819 return;
69820
69821 local_irq_save(flags);
69822 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69823 index 209b379..7f76423 100644
69824 --- a/kernel/trace/trace_workqueue.c
69825 +++ b/kernel/trace/trace_workqueue.c
69826 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69827 int cpu;
69828 pid_t pid;
69829 /* Can be inserted from interrupt or user context, need to be atomic */
69830 - atomic_t inserted;
69831 + atomic_unchecked_t inserted;
69832 /*
69833 * Don't need to be atomic, works are serialized in a single workqueue thread
69834 * on a single CPU.
69835 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69836 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69837 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69838 if (node->pid == wq_thread->pid) {
69839 - atomic_inc(&node->inserted);
69840 + atomic_inc_unchecked(&node->inserted);
69841 goto found;
69842 }
69843 }
69844 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69845 tsk = get_pid_task(pid, PIDTYPE_PID);
69846 if (tsk) {
69847 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69848 - atomic_read(&cws->inserted), cws->executed,
69849 + atomic_read_unchecked(&cws->inserted), cws->executed,
69850 tsk->comm);
69851 put_task_struct(tsk);
69852 }
69853 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69854 index 6777153..8519f60 100644
69855 --- a/lib/Kconfig.debug
69856 +++ b/lib/Kconfig.debug
69857 @@ -1132,6 +1132,7 @@ config LATENCYTOP
69858 depends on DEBUG_KERNEL
69859 depends on STACKTRACE_SUPPORT
69860 depends on PROC_FS
69861 + depends on !GRKERNSEC_HIDESYM
69862 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69863 select KALLSYMS
69864 select KALLSYMS_ALL
69865 diff --git a/lib/bitmap.c b/lib/bitmap.c
69866 index b5a8b6a..a69623c 100644
69867 --- a/lib/bitmap.c
69868 +++ b/lib/bitmap.c
69869 @@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69870 {
69871 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69872 u32 chunk;
69873 - const char __user __force *ubuf = (const char __user __force *)buf;
69874 + const char __user *ubuf = (const char __force_user *)buf;
69875
69876 bitmap_zero(maskp, nmaskbits);
69877
69878 @@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
69879 {
69880 if (!access_ok(VERIFY_READ, ubuf, ulen))
69881 return -EFAULT;
69882 - return __bitmap_parse((const char __force *)ubuf,
69883 + return __bitmap_parse((const char __force_kernel *)ubuf,
69884 ulen, 1, maskp, nmaskbits);
69885
69886 }
69887 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69888 {
69889 unsigned a, b;
69890 int c, old_c, totaldigits;
69891 - const char __user __force *ubuf = (const char __user __force *)buf;
69892 + const char __user *ubuf = (const char __force_user *)buf;
69893 int exp_digit, in_range;
69894
69895 totaldigits = c = 0;
69896 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69897 {
69898 if (!access_ok(VERIFY_READ, ubuf, ulen))
69899 return -EFAULT;
69900 - return __bitmap_parselist((const char __force *)ubuf,
69901 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69902 ulen, 1, maskp, nmaskbits);
69903 }
69904 EXPORT_SYMBOL(bitmap_parselist_user);
69905 diff --git a/lib/bug.c b/lib/bug.c
69906 index a28c141..2bd3d95 100644
69907 --- a/lib/bug.c
69908 +++ b/lib/bug.c
69909 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69910 return BUG_TRAP_TYPE_NONE;
69911
69912 bug = find_bug(bugaddr);
69913 + if (!bug)
69914 + return BUG_TRAP_TYPE_NONE;
69915
69916 file = NULL;
69917 line = 0;
69918 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69919 index 0ab9ae8..f01ceca 100644
69920 --- a/lib/debugobjects.c
69921 +++ b/lib/debugobjects.c
69922 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69923 if (limit > 4)
69924 return;
69925
69926 - is_on_stack = object_is_on_stack(addr);
69927 + is_on_stack = object_starts_on_stack(addr);
69928 if (is_on_stack == onstack)
69929 return;
69930
69931 diff --git a/lib/devres.c b/lib/devres.c
69932 index 80b9c76..9e32279 100644
69933 --- a/lib/devres.c
69934 +++ b/lib/devres.c
69935 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69936 void devm_iounmap(struct device *dev, void __iomem *addr)
69937 {
69938 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69939 - (void *)addr));
69940 + (void __force *)addr));
69941 iounmap(addr);
69942 }
69943 EXPORT_SYMBOL(devm_iounmap);
69944 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69945 {
69946 ioport_unmap(addr);
69947 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69948 - devm_ioport_map_match, (void *)addr));
69949 + devm_ioport_map_match, (void __force *)addr));
69950 }
69951 EXPORT_SYMBOL(devm_ioport_unmap);
69952
69953 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69954 index 13ef233..5241683 100644
69955 --- a/lib/dma-debug.c
69956 +++ b/lib/dma-debug.c
69957 @@ -924,7 +924,7 @@ out:
69958
69959 static void check_for_stack(struct device *dev, void *addr)
69960 {
69961 - if (object_is_on_stack(addr))
69962 + if (object_starts_on_stack(addr))
69963 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69964 "stack [addr=%p]\n", addr);
69965 }
69966 diff --git a/lib/extable.c b/lib/extable.c
69967 index 4cac81e..63e9b8f 100644
69968 --- a/lib/extable.c
69969 +++ b/lib/extable.c
69970 @@ -13,6 +13,7 @@
69971 #include <linux/init.h>
69972 #include <linux/sort.h>
69973 #include <asm/uaccess.h>
69974 +#include <asm/pgtable.h>
69975
69976 #ifndef ARCH_HAS_SORT_EXTABLE
69977 /*
69978 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69979 void sort_extable(struct exception_table_entry *start,
69980 struct exception_table_entry *finish)
69981 {
69982 + pax_open_kernel();
69983 sort(start, finish - start, sizeof(struct exception_table_entry),
69984 cmp_ex, NULL);
69985 + pax_close_kernel();
69986 }
69987
69988 #ifdef CONFIG_MODULES
69989 diff --git a/lib/inflate.c b/lib/inflate.c
69990 index 013a761..c28f3fc 100644
69991 --- a/lib/inflate.c
69992 +++ b/lib/inflate.c
69993 @@ -269,7 +269,7 @@ static void free(void *where)
69994 malloc_ptr = free_mem_ptr;
69995 }
69996 #else
69997 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69998 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69999 #define free(a) kfree(a)
70000 #endif
70001
70002 diff --git a/lib/ioremap.c b/lib/ioremap.c
70003 index 0c9216c..863bd89 100644
70004 --- a/lib/ioremap.c
70005 +++ b/lib/ioremap.c
70006 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
70007 unsigned long next;
70008
70009 phys_addr -= addr;
70010 - pmd = pmd_alloc(&init_mm, pud, addr);
70011 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
70012 if (!pmd)
70013 return -ENOMEM;
70014 do {
70015 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
70016 unsigned long next;
70017
70018 phys_addr -= addr;
70019 - pud = pud_alloc(&init_mm, pgd, addr);
70020 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
70021 if (!pud)
70022 return -ENOMEM;
70023 do {
70024 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70025 index bd2bea9..6b3c95e 100644
70026 --- a/lib/is_single_threaded.c
70027 +++ b/lib/is_single_threaded.c
70028 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70029 struct task_struct *p, *t;
70030 bool ret;
70031
70032 + if (!mm)
70033 + return true;
70034 +
70035 if (atomic_read(&task->signal->live) != 1)
70036 return false;
70037
70038 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
70039 index 3ac50dc..240bb7e 100644
70040 --- a/lib/radix-tree.c
70041 +++ b/lib/radix-tree.c
70042 @@ -79,7 +79,7 @@ struct radix_tree_preload {
70043 int nr;
70044 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70045 };
70046 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70047 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70048
70049 static inline void *ptr_to_indirect(void *ptr)
70050 {
70051 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
70052 index abbabec..d5eba6c 100644
70053 --- a/lib/vsprintf.c
70054 +++ b/lib/vsprintf.c
70055 @@ -16,6 +16,9 @@
70056 * - scnprintf and vscnprintf
70057 */
70058
70059 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70060 +#define __INCLUDED_BY_HIDESYM 1
70061 +#endif
70062 #include <stdarg.h>
70063 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
70064 #include <linux/types.h>
70065 @@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
70066 char sym[KSYM_SYMBOL_LEN];
70067 if (ext == 'B')
70068 sprint_backtrace(sym, value);
70069 - else if (ext != 'f' && ext != 's')
70070 + else if (ext != 'f' && ext != 's' && ext != 'a')
70071 sprint_symbol(sym, value);
70072 else
70073 kallsyms_lookup(value, NULL, NULL, NULL, sym);
70074 @@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
70075 return number(buf, end, *(const netdev_features_t *)addr, spec);
70076 }
70077
70078 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70079 +int kptr_restrict __read_mostly = 2;
70080 +#else
70081 int kptr_restrict __read_mostly;
70082 +#endif
70083
70084 /*
70085 * Show a '%p' thing. A kernel extension is that the '%p' is followed
70086 @@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
70087 * - 'S' For symbolic direct pointers with offset
70088 * - 's' For symbolic direct pointers without offset
70089 * - 'B' For backtraced symbolic direct pointers with offset
70090 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70091 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70092 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70093 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70094 * - 'M' For a 6-byte MAC address, it prints the address in the
70095 @@ -866,14 +875,25 @@ static noinline_for_stack
70096 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70097 struct printf_spec spec)
70098 {
70099 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70100 + /* 'P' = approved pointers to copy to userland,
70101 + as in the /proc/kallsyms case, as we make it display nothing
70102 + for non-root users, and the real contents for root users
70103 + */
70104 + if (ptr > TASK_SIZE && *fmt != 'P' && is_usercopy_object(buf)) {
70105 + ptr = NULL;
70106 + goto simple;
70107 + }
70108 +#endif
70109 +
70110 if (!ptr && *fmt != 'K') {
70111 /*
70112 - * Print (null) with the same width as a pointer so it makes
70113 + * Print (nil) with the same width as a pointer so it makes
70114 * tabular output look nice.
70115 */
70116 if (spec.field_width == -1)
70117 spec.field_width = 2 * sizeof(void *);
70118 - return string(buf, end, "(null)", spec);
70119 + return string(buf, end, "(nil)", spec);
70120 }
70121
70122 switch (*fmt) {
70123 @@ -883,6 +903,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70124 /* Fallthrough */
70125 case 'S':
70126 case 's':
70127 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70128 + break;
70129 +#else
70130 + return symbol_string(buf, end, ptr, spec, *fmt);
70131 +#endif
70132 + case 'A':
70133 + case 'a':
70134 case 'B':
70135 return symbol_string(buf, end, ptr, spec, *fmt);
70136 case 'R':
70137 @@ -920,6 +947,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70138 va_end(va);
70139 return buf;
70140 }
70141 + case 'P':
70142 + break;
70143 case 'K':
70144 /*
70145 * %pK cannot be used in IRQ context because its test
70146 @@ -942,6 +971,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70147 }
70148 break;
70149 }
70150 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70151 +simple:
70152 +#endif
70153 spec.flags |= SMALL;
70154 if (spec.field_width == -1) {
70155 spec.field_width = 2 * sizeof(void *);
70156 @@ -1653,11 +1685,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70157 typeof(type) value; \
70158 if (sizeof(type) == 8) { \
70159 args = PTR_ALIGN(args, sizeof(u32)); \
70160 - *(u32 *)&value = *(u32 *)args; \
70161 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70162 + *(u32 *)&value = *(const u32 *)args; \
70163 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70164 } else { \
70165 args = PTR_ALIGN(args, sizeof(type)); \
70166 - value = *(typeof(type) *)args; \
70167 + value = *(const typeof(type) *)args; \
70168 } \
70169 args += sizeof(type); \
70170 value; \
70171 @@ -1720,7 +1752,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70172 case FORMAT_TYPE_STR: {
70173 const char *str_arg = args;
70174 args += strlen(str_arg) + 1;
70175 - str = string(str, end, (char *)str_arg, spec);
70176 + str = string(str, end, str_arg, spec);
70177 break;
70178 }
70179
70180 diff --git a/localversion-grsec b/localversion-grsec
70181 new file mode 100644
70182 index 0000000..7cd6065
70183 --- /dev/null
70184 +++ b/localversion-grsec
70185 @@ -0,0 +1 @@
70186 +-grsec
70187 diff --git a/mm/Kconfig b/mm/Kconfig
70188 index e338407..4210331 100644
70189 --- a/mm/Kconfig
70190 +++ b/mm/Kconfig
70191 @@ -247,10 +247,10 @@ config KSM
70192 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
70193
70194 config DEFAULT_MMAP_MIN_ADDR
70195 - int "Low address space to protect from user allocation"
70196 + int "Low address space to protect from user allocation"
70197 depends on MMU
70198 - default 4096
70199 - help
70200 + default 65536
70201 + help
70202 This is the portion of low virtual memory which should be protected
70203 from userspace allocation. Keeping a user from writing to low pages
70204 can help reduce the impact of kernel NULL pointer bugs.
70205 @@ -280,7 +280,7 @@ config MEMORY_FAILURE
70206
70207 config HWPOISON_INJECT
70208 tristate "HWPoison pages injector"
70209 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
70210 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
70211 select PROC_PAGE_MONITOR
70212
70213 config NOMMU_INITIAL_TRIM_EXCESS
70214 diff --git a/mm/filemap.c b/mm/filemap.c
70215 index 79c4b2b..596b417 100644
70216 --- a/mm/filemap.c
70217 +++ b/mm/filemap.c
70218 @@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
70219 struct address_space *mapping = file->f_mapping;
70220
70221 if (!mapping->a_ops->readpage)
70222 - return -ENOEXEC;
70223 + return -ENODEV;
70224 file_accessed(file);
70225 vma->vm_ops = &generic_file_vm_ops;
70226 vma->vm_flags |= VM_CAN_NONLINEAR;
70227 @@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
70228 *pos = i_size_read(inode);
70229
70230 if (limit != RLIM_INFINITY) {
70231 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70232 if (*pos >= limit) {
70233 send_sig(SIGXFSZ, current, 0);
70234 return -EFBIG;
70235 diff --git a/mm/fremap.c b/mm/fremap.c
70236 index 9ed4fd4..c42648d 100644
70237 --- a/mm/fremap.c
70238 +++ b/mm/fremap.c
70239 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
70240 retry:
70241 vma = find_vma(mm, start);
70242
70243 +#ifdef CONFIG_PAX_SEGMEXEC
70244 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70245 + goto out;
70246 +#endif
70247 +
70248 /*
70249 * Make sure the vma is shared, that it supports prefaulting,
70250 * and that the remapped range is valid and fully within
70251 diff --git a/mm/highmem.c b/mm/highmem.c
70252 index 57d82c6..e9e0552 100644
70253 --- a/mm/highmem.c
70254 +++ b/mm/highmem.c
70255 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
70256 * So no dangers, even with speculative execution.
70257 */
70258 page = pte_page(pkmap_page_table[i]);
70259 + pax_open_kernel();
70260 pte_clear(&init_mm, (unsigned long)page_address(page),
70261 &pkmap_page_table[i]);
70262 -
70263 + pax_close_kernel();
70264 set_page_address(page, NULL);
70265 need_flush = 1;
70266 }
70267 @@ -186,9 +187,11 @@ start:
70268 }
70269 }
70270 vaddr = PKMAP_ADDR(last_pkmap_nr);
70271 +
70272 + pax_open_kernel();
70273 set_pte_at(&init_mm, vaddr,
70274 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70275 -
70276 + pax_close_kernel();
70277 pkmap_count[last_pkmap_nr] = 1;
70278 set_page_address(page, (void *)vaddr);
70279
70280 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70281 index f0e5306..cb9398e 100644
70282 --- a/mm/huge_memory.c
70283 +++ b/mm/huge_memory.c
70284 @@ -733,7 +733,7 @@ out:
70285 * run pte_offset_map on the pmd, if an huge pmd could
70286 * materialize from under us from a different thread.
70287 */
70288 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70289 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70290 return VM_FAULT_OOM;
70291 /* if an huge pmd materialized from under us just retry later */
70292 if (unlikely(pmd_trans_huge(*pmd)))
70293 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70294 index 263e177..3f36aec 100644
70295 --- a/mm/hugetlb.c
70296 +++ b/mm/hugetlb.c
70297 @@ -2446,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70298 return 1;
70299 }
70300
70301 +#ifdef CONFIG_PAX_SEGMEXEC
70302 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70303 +{
70304 + struct mm_struct *mm = vma->vm_mm;
70305 + struct vm_area_struct *vma_m;
70306 + unsigned long address_m;
70307 + pte_t *ptep_m;
70308 +
70309 + vma_m = pax_find_mirror_vma(vma);
70310 + if (!vma_m)
70311 + return;
70312 +
70313 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70314 + address_m = address + SEGMEXEC_TASK_SIZE;
70315 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70316 + get_page(page_m);
70317 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
70318 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70319 +}
70320 +#endif
70321 +
70322 /*
70323 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70324 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
70325 @@ -2558,6 +2579,11 @@ retry_avoidcopy:
70326 make_huge_pte(vma, new_page, 1));
70327 page_remove_rmap(old_page);
70328 hugepage_add_new_anon_rmap(new_page, vma, address);
70329 +
70330 +#ifdef CONFIG_PAX_SEGMEXEC
70331 + pax_mirror_huge_pte(vma, address, new_page);
70332 +#endif
70333 +
70334 /* Make the old page be freed below */
70335 new_page = old_page;
70336 mmu_notifier_invalidate_range_end(mm,
70337 @@ -2712,6 +2738,10 @@ retry:
70338 && (vma->vm_flags & VM_SHARED)));
70339 set_huge_pte_at(mm, address, ptep, new_pte);
70340
70341 +#ifdef CONFIG_PAX_SEGMEXEC
70342 + pax_mirror_huge_pte(vma, address, page);
70343 +#endif
70344 +
70345 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70346 /* Optimization, do the COW without a second fault */
70347 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70348 @@ -2741,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70349 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70350 struct hstate *h = hstate_vma(vma);
70351
70352 +#ifdef CONFIG_PAX_SEGMEXEC
70353 + struct vm_area_struct *vma_m;
70354 +#endif
70355 +
70356 address &= huge_page_mask(h);
70357
70358 ptep = huge_pte_offset(mm, address);
70359 @@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70360 VM_FAULT_SET_HINDEX(h - hstates);
70361 }
70362
70363 +#ifdef CONFIG_PAX_SEGMEXEC
70364 + vma_m = pax_find_mirror_vma(vma);
70365 + if (vma_m) {
70366 + unsigned long address_m;
70367 +
70368 + if (vma->vm_start > vma_m->vm_start) {
70369 + address_m = address;
70370 + address -= SEGMEXEC_TASK_SIZE;
70371 + vma = vma_m;
70372 + h = hstate_vma(vma);
70373 + } else
70374 + address_m = address + SEGMEXEC_TASK_SIZE;
70375 +
70376 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70377 + return VM_FAULT_OOM;
70378 + address_m &= HPAGE_MASK;
70379 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70380 + }
70381 +#endif
70382 +
70383 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70384 if (!ptep)
70385 return VM_FAULT_OOM;
70386 diff --git a/mm/internal.h b/mm/internal.h
70387 index 2189af4..f2ca332 100644
70388 --- a/mm/internal.h
70389 +++ b/mm/internal.h
70390 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70391 * in mm/page_alloc.c
70392 */
70393 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70394 +extern void free_compound_page(struct page *page);
70395 extern void prep_compound_page(struct page *page, unsigned long order);
70396 #ifdef CONFIG_MEMORY_FAILURE
70397 extern bool is_free_buddy_page(struct page *page);
70398 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70399 index 45eb621..6ccd8ea 100644
70400 --- a/mm/kmemleak.c
70401 +++ b/mm/kmemleak.c
70402 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
70403
70404 for (i = 0; i < object->trace_len; i++) {
70405 void *ptr = (void *)object->trace[i];
70406 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70407 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70408 }
70409 }
70410
70411 diff --git a/mm/maccess.c b/mm/maccess.c
70412 index d53adf9..03a24bf 100644
70413 --- a/mm/maccess.c
70414 +++ b/mm/maccess.c
70415 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70416 set_fs(KERNEL_DS);
70417 pagefault_disable();
70418 ret = __copy_from_user_inatomic(dst,
70419 - (__force const void __user *)src, size);
70420 + (const void __force_user *)src, size);
70421 pagefault_enable();
70422 set_fs(old_fs);
70423
70424 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70425
70426 set_fs(KERNEL_DS);
70427 pagefault_disable();
70428 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70429 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70430 pagefault_enable();
70431 set_fs(old_fs);
70432
70433 diff --git a/mm/madvise.c b/mm/madvise.c
70434 index 55f645c..cde5320 100644
70435 --- a/mm/madvise.c
70436 +++ b/mm/madvise.c
70437 @@ -46,6 +46,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70438 pgoff_t pgoff;
70439 unsigned long new_flags = vma->vm_flags;
70440
70441 +#ifdef CONFIG_PAX_SEGMEXEC
70442 + struct vm_area_struct *vma_m;
70443 +#endif
70444 +
70445 switch (behavior) {
70446 case MADV_NORMAL:
70447 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70448 @@ -117,6 +121,13 @@ success:
70449 /*
70450 * vm_flags is protected by the mmap_sem held in write mode.
70451 */
70452 +
70453 +#ifdef CONFIG_PAX_SEGMEXEC
70454 + vma_m = pax_find_mirror_vma(vma);
70455 + if (vma_m)
70456 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70457 +#endif
70458 +
70459 vma->vm_flags = new_flags;
70460
70461 out:
70462 @@ -175,6 +186,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70463 struct vm_area_struct ** prev,
70464 unsigned long start, unsigned long end)
70465 {
70466 +
70467 +#ifdef CONFIG_PAX_SEGMEXEC
70468 + struct vm_area_struct *vma_m;
70469 +#endif
70470 +
70471 *prev = vma;
70472 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70473 return -EINVAL;
70474 @@ -187,6 +203,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70475 zap_page_range(vma, start, end - start, &details);
70476 } else
70477 zap_page_range(vma, start, end - start, NULL);
70478 +
70479 +#ifdef CONFIG_PAX_SEGMEXEC
70480 + vma_m = pax_find_mirror_vma(vma);
70481 + if (vma_m) {
70482 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70483 + struct zap_details details = {
70484 + .nonlinear_vma = vma_m,
70485 + .last_index = ULONG_MAX,
70486 + };
70487 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70488 + } else
70489 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70490 + }
70491 +#endif
70492 +
70493 return 0;
70494 }
70495
70496 @@ -394,6 +425,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70497 if (end < start)
70498 goto out;
70499
70500 +#ifdef CONFIG_PAX_SEGMEXEC
70501 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70502 + if (end > SEGMEXEC_TASK_SIZE)
70503 + goto out;
70504 + } else
70505 +#endif
70506 +
70507 + if (end > TASK_SIZE)
70508 + goto out;
70509 +
70510 error = 0;
70511 if (end == start)
70512 goto out;
70513 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70514 index 97cc273..6ed703f 100644
70515 --- a/mm/memory-failure.c
70516 +++ b/mm/memory-failure.c
70517 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70518
70519 int sysctl_memory_failure_recovery __read_mostly = 1;
70520
70521 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70522 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70523
70524 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70525
70526 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
70527 pfn, t->comm, t->pid);
70528 si.si_signo = SIGBUS;
70529 si.si_errno = 0;
70530 - si.si_addr = (void *)addr;
70531 + si.si_addr = (void __user *)addr;
70532 #ifdef __ARCH_SI_TRAPNO
70533 si.si_trapno = trapno;
70534 #endif
70535 @@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70536 }
70537
70538 nr_pages = 1 << compound_trans_order(hpage);
70539 - atomic_long_add(nr_pages, &mce_bad_pages);
70540 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70541
70542 /*
70543 * We need/can do nothing about count=0 pages.
70544 @@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70545 if (!PageHWPoison(hpage)
70546 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70547 || (p != hpage && TestSetPageHWPoison(hpage))) {
70548 - atomic_long_sub(nr_pages, &mce_bad_pages);
70549 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70550 return 0;
70551 }
70552 set_page_hwpoison_huge_page(hpage);
70553 @@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70554 }
70555 if (hwpoison_filter(p)) {
70556 if (TestClearPageHWPoison(p))
70557 - atomic_long_sub(nr_pages, &mce_bad_pages);
70558 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70559 unlock_page(hpage);
70560 put_page(hpage);
70561 return 0;
70562 @@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
70563 return 0;
70564 }
70565 if (TestClearPageHWPoison(p))
70566 - atomic_long_sub(nr_pages, &mce_bad_pages);
70567 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70568 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70569 return 0;
70570 }
70571 @@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
70572 */
70573 if (TestClearPageHWPoison(page)) {
70574 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70575 - atomic_long_sub(nr_pages, &mce_bad_pages);
70576 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70577 freeit = 1;
70578 if (PageHuge(page))
70579 clear_page_hwpoison_huge_page(page);
70580 @@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70581 }
70582 done:
70583 if (!PageHWPoison(hpage))
70584 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70585 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70586 set_page_hwpoison_huge_page(hpage);
70587 dequeue_hwpoisoned_huge_page(hpage);
70588 /* keep elevated page count for bad page */
70589 @@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
70590 return ret;
70591
70592 done:
70593 - atomic_long_add(1, &mce_bad_pages);
70594 + atomic_long_add_unchecked(1, &mce_bad_pages);
70595 SetPageHWPoison(page);
70596 /* keep elevated page count for bad page */
70597 return ret;
70598 diff --git a/mm/memory.c b/mm/memory.c
70599 index 6105f47..3363489 100644
70600 --- a/mm/memory.c
70601 +++ b/mm/memory.c
70602 @@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70603 return;
70604
70605 pmd = pmd_offset(pud, start);
70606 +
70607 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70608 pud_clear(pud);
70609 pmd_free_tlb(tlb, pmd, start);
70610 +#endif
70611 +
70612 }
70613
70614 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70615 @@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70616 if (end - 1 > ceiling - 1)
70617 return;
70618
70619 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70620 pud = pud_offset(pgd, start);
70621 pgd_clear(pgd);
70622 pud_free_tlb(tlb, pud, start);
70623 +#endif
70624 +
70625 }
70626
70627 /*
70628 @@ -1597,12 +1604,6 @@ no_page_table:
70629 return page;
70630 }
70631
70632 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70633 -{
70634 - return stack_guard_page_start(vma, addr) ||
70635 - stack_guard_page_end(vma, addr+PAGE_SIZE);
70636 -}
70637 -
70638 /**
70639 * __get_user_pages() - pin user pages in memory
70640 * @tsk: task_struct of target task
70641 @@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70642 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70643 i = 0;
70644
70645 - do {
70646 + while (nr_pages) {
70647 struct vm_area_struct *vma;
70648
70649 - vma = find_extend_vma(mm, start);
70650 + vma = find_vma(mm, start);
70651 if (!vma && in_gate_area(mm, start)) {
70652 unsigned long pg = start & PAGE_MASK;
70653 pgd_t *pgd;
70654 @@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70655 goto next_page;
70656 }
70657
70658 - if (!vma ||
70659 + if (!vma || start < vma->vm_start ||
70660 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70661 !(vm_flags & vma->vm_flags))
70662 return i ? : -EFAULT;
70663 @@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70664 int ret;
70665 unsigned int fault_flags = 0;
70666
70667 - /* For mlock, just skip the stack guard page. */
70668 - if (foll_flags & FOLL_MLOCK) {
70669 - if (stack_guard_page(vma, start))
70670 - goto next_page;
70671 - }
70672 if (foll_flags & FOLL_WRITE)
70673 fault_flags |= FAULT_FLAG_WRITE;
70674 if (nonblocking)
70675 @@ -1831,7 +1827,7 @@ next_page:
70676 start += PAGE_SIZE;
70677 nr_pages--;
70678 } while (nr_pages && start < vma->vm_end);
70679 - } while (nr_pages);
70680 + }
70681 return i;
70682 }
70683 EXPORT_SYMBOL(__get_user_pages);
70684 @@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70685 page_add_file_rmap(page);
70686 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70687
70688 +#ifdef CONFIG_PAX_SEGMEXEC
70689 + pax_mirror_file_pte(vma, addr, page, ptl);
70690 +#endif
70691 +
70692 retval = 0;
70693 pte_unmap_unlock(pte, ptl);
70694 return retval;
70695 @@ -2072,10 +2072,22 @@ out:
70696 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70697 struct page *page)
70698 {
70699 +
70700 +#ifdef CONFIG_PAX_SEGMEXEC
70701 + struct vm_area_struct *vma_m;
70702 +#endif
70703 +
70704 if (addr < vma->vm_start || addr >= vma->vm_end)
70705 return -EFAULT;
70706 if (!page_count(page))
70707 return -EINVAL;
70708 +
70709 +#ifdef CONFIG_PAX_SEGMEXEC
70710 + vma_m = pax_find_mirror_vma(vma);
70711 + if (vma_m)
70712 + vma_m->vm_flags |= VM_INSERTPAGE;
70713 +#endif
70714 +
70715 vma->vm_flags |= VM_INSERTPAGE;
70716 return insert_page(vma, addr, page, vma->vm_page_prot);
70717 }
70718 @@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70719 unsigned long pfn)
70720 {
70721 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70722 + BUG_ON(vma->vm_mirror);
70723
70724 if (addr < vma->vm_start || addr >= vma->vm_end)
70725 return -EFAULT;
70726 @@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
70727
70728 BUG_ON(pud_huge(*pud));
70729
70730 - pmd = pmd_alloc(mm, pud, addr);
70731 + pmd = (mm == &init_mm) ?
70732 + pmd_alloc_kernel(mm, pud, addr) :
70733 + pmd_alloc(mm, pud, addr);
70734 if (!pmd)
70735 return -ENOMEM;
70736 do {
70737 @@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
70738 unsigned long next;
70739 int err;
70740
70741 - pud = pud_alloc(mm, pgd, addr);
70742 + pud = (mm == &init_mm) ?
70743 + pud_alloc_kernel(mm, pgd, addr) :
70744 + pud_alloc(mm, pgd, addr);
70745 if (!pud)
70746 return -ENOMEM;
70747 do {
70748 @@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70749 copy_user_highpage(dst, src, va, vma);
70750 }
70751
70752 +#ifdef CONFIG_PAX_SEGMEXEC
70753 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70754 +{
70755 + struct mm_struct *mm = vma->vm_mm;
70756 + spinlock_t *ptl;
70757 + pte_t *pte, entry;
70758 +
70759 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70760 + entry = *pte;
70761 + if (!pte_present(entry)) {
70762 + if (!pte_none(entry)) {
70763 + BUG_ON(pte_file(entry));
70764 + free_swap_and_cache(pte_to_swp_entry(entry));
70765 + pte_clear_not_present_full(mm, address, pte, 0);
70766 + }
70767 + } else {
70768 + struct page *page;
70769 +
70770 + flush_cache_page(vma, address, pte_pfn(entry));
70771 + entry = ptep_clear_flush(vma, address, pte);
70772 + BUG_ON(pte_dirty(entry));
70773 + page = vm_normal_page(vma, address, entry);
70774 + if (page) {
70775 + update_hiwater_rss(mm);
70776 + if (PageAnon(page))
70777 + dec_mm_counter_fast(mm, MM_ANONPAGES);
70778 + else
70779 + dec_mm_counter_fast(mm, MM_FILEPAGES);
70780 + page_remove_rmap(page);
70781 + page_cache_release(page);
70782 + }
70783 + }
70784 + pte_unmap_unlock(pte, ptl);
70785 +}
70786 +
70787 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
70788 + *
70789 + * the ptl of the lower mapped page is held on entry and is not released on exit
70790 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70791 + */
70792 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70793 +{
70794 + struct mm_struct *mm = vma->vm_mm;
70795 + unsigned long address_m;
70796 + spinlock_t *ptl_m;
70797 + struct vm_area_struct *vma_m;
70798 + pmd_t *pmd_m;
70799 + pte_t *pte_m, entry_m;
70800 +
70801 + BUG_ON(!page_m || !PageAnon(page_m));
70802 +
70803 + vma_m = pax_find_mirror_vma(vma);
70804 + if (!vma_m)
70805 + return;
70806 +
70807 + BUG_ON(!PageLocked(page_m));
70808 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70809 + address_m = address + SEGMEXEC_TASK_SIZE;
70810 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70811 + pte_m = pte_offset_map(pmd_m, address_m);
70812 + ptl_m = pte_lockptr(mm, pmd_m);
70813 + if (ptl != ptl_m) {
70814 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70815 + if (!pte_none(*pte_m))
70816 + goto out;
70817 + }
70818 +
70819 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70820 + page_cache_get(page_m);
70821 + page_add_anon_rmap(page_m, vma_m, address_m);
70822 + inc_mm_counter_fast(mm, MM_ANONPAGES);
70823 + set_pte_at(mm, address_m, pte_m, entry_m);
70824 + update_mmu_cache(vma_m, address_m, entry_m);
70825 +out:
70826 + if (ptl != ptl_m)
70827 + spin_unlock(ptl_m);
70828 + pte_unmap(pte_m);
70829 + unlock_page(page_m);
70830 +}
70831 +
70832 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70833 +{
70834 + struct mm_struct *mm = vma->vm_mm;
70835 + unsigned long address_m;
70836 + spinlock_t *ptl_m;
70837 + struct vm_area_struct *vma_m;
70838 + pmd_t *pmd_m;
70839 + pte_t *pte_m, entry_m;
70840 +
70841 + BUG_ON(!page_m || PageAnon(page_m));
70842 +
70843 + vma_m = pax_find_mirror_vma(vma);
70844 + if (!vma_m)
70845 + return;
70846 +
70847 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70848 + address_m = address + SEGMEXEC_TASK_SIZE;
70849 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70850 + pte_m = pte_offset_map(pmd_m, address_m);
70851 + ptl_m = pte_lockptr(mm, pmd_m);
70852 + if (ptl != ptl_m) {
70853 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70854 + if (!pte_none(*pte_m))
70855 + goto out;
70856 + }
70857 +
70858 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70859 + page_cache_get(page_m);
70860 + page_add_file_rmap(page_m);
70861 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70862 + set_pte_at(mm, address_m, pte_m, entry_m);
70863 + update_mmu_cache(vma_m, address_m, entry_m);
70864 +out:
70865 + if (ptl != ptl_m)
70866 + spin_unlock(ptl_m);
70867 + pte_unmap(pte_m);
70868 +}
70869 +
70870 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70871 +{
70872 + struct mm_struct *mm = vma->vm_mm;
70873 + unsigned long address_m;
70874 + spinlock_t *ptl_m;
70875 + struct vm_area_struct *vma_m;
70876 + pmd_t *pmd_m;
70877 + pte_t *pte_m, entry_m;
70878 +
70879 + vma_m = pax_find_mirror_vma(vma);
70880 + if (!vma_m)
70881 + return;
70882 +
70883 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70884 + address_m = address + SEGMEXEC_TASK_SIZE;
70885 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70886 + pte_m = pte_offset_map(pmd_m, address_m);
70887 + ptl_m = pte_lockptr(mm, pmd_m);
70888 + if (ptl != ptl_m) {
70889 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70890 + if (!pte_none(*pte_m))
70891 + goto out;
70892 + }
70893 +
70894 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70895 + set_pte_at(mm, address_m, pte_m, entry_m);
70896 +out:
70897 + if (ptl != ptl_m)
70898 + spin_unlock(ptl_m);
70899 + pte_unmap(pte_m);
70900 +}
70901 +
70902 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70903 +{
70904 + struct page *page_m;
70905 + pte_t entry;
70906 +
70907 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70908 + goto out;
70909 +
70910 + entry = *pte;
70911 + page_m = vm_normal_page(vma, address, entry);
70912 + if (!page_m)
70913 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70914 + else if (PageAnon(page_m)) {
70915 + if (pax_find_mirror_vma(vma)) {
70916 + pte_unmap_unlock(pte, ptl);
70917 + lock_page(page_m);
70918 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70919 + if (pte_same(entry, *pte))
70920 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70921 + else
70922 + unlock_page(page_m);
70923 + }
70924 + } else
70925 + pax_mirror_file_pte(vma, address, page_m, ptl);
70926 +
70927 +out:
70928 + pte_unmap_unlock(pte, ptl);
70929 +}
70930 +#endif
70931 +
70932 /*
70933 * This routine handles present pages, when users try to write
70934 * to a shared page. It is done by copying the page to a new address
70935 @@ -2687,6 +2884,12 @@ gotten:
70936 */
70937 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70938 if (likely(pte_same(*page_table, orig_pte))) {
70939 +
70940 +#ifdef CONFIG_PAX_SEGMEXEC
70941 + if (pax_find_mirror_vma(vma))
70942 + BUG_ON(!trylock_page(new_page));
70943 +#endif
70944 +
70945 if (old_page) {
70946 if (!PageAnon(old_page)) {
70947 dec_mm_counter_fast(mm, MM_FILEPAGES);
70948 @@ -2738,6 +2941,10 @@ gotten:
70949 page_remove_rmap(old_page);
70950 }
70951
70952 +#ifdef CONFIG_PAX_SEGMEXEC
70953 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70954 +#endif
70955 +
70956 /* Free the old page.. */
70957 new_page = old_page;
70958 ret |= VM_FAULT_WRITE;
70959 @@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70960 swap_free(entry);
70961 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70962 try_to_free_swap(page);
70963 +
70964 +#ifdef CONFIG_PAX_SEGMEXEC
70965 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70966 +#endif
70967 +
70968 unlock_page(page);
70969 if (swapcache) {
70970 /*
70971 @@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70972
70973 /* No need to invalidate - it was non-present before */
70974 update_mmu_cache(vma, address, page_table);
70975 +
70976 +#ifdef CONFIG_PAX_SEGMEXEC
70977 + pax_mirror_anon_pte(vma, address, page, ptl);
70978 +#endif
70979 +
70980 unlock:
70981 pte_unmap_unlock(page_table, ptl);
70982 out:
70983 @@ -3059,40 +3276,6 @@ out_release:
70984 }
70985
70986 /*
70987 - * This is like a special single-page "expand_{down|up}wards()",
70988 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70989 - * doesn't hit another vma.
70990 - */
70991 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70992 -{
70993 - address &= PAGE_MASK;
70994 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70995 - struct vm_area_struct *prev = vma->vm_prev;
70996 -
70997 - /*
70998 - * Is there a mapping abutting this one below?
70999 - *
71000 - * That's only ok if it's the same stack mapping
71001 - * that has gotten split..
71002 - */
71003 - if (prev && prev->vm_end == address)
71004 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
71005 -
71006 - expand_downwards(vma, address - PAGE_SIZE);
71007 - }
71008 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
71009 - struct vm_area_struct *next = vma->vm_next;
71010 -
71011 - /* As VM_GROWSDOWN but s/below/above/ */
71012 - if (next && next->vm_start == address + PAGE_SIZE)
71013 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
71014 -
71015 - expand_upwards(vma, address + PAGE_SIZE);
71016 - }
71017 - return 0;
71018 -}
71019 -
71020 -/*
71021 * We enter with non-exclusive mmap_sem (to exclude vma changes,
71022 * but allow concurrent faults), and pte mapped but not yet locked.
71023 * We return with mmap_sem still held, but pte unmapped and unlocked.
71024 @@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71025 unsigned long address, pte_t *page_table, pmd_t *pmd,
71026 unsigned int flags)
71027 {
71028 - struct page *page;
71029 + struct page *page = NULL;
71030 spinlock_t *ptl;
71031 pte_t entry;
71032
71033 - pte_unmap(page_table);
71034 -
71035 - /* Check if we need to add a guard page to the stack */
71036 - if (check_stack_guard_page(vma, address) < 0)
71037 - return VM_FAULT_SIGBUS;
71038 -
71039 - /* Use the zero-page for reads */
71040 if (!(flags & FAULT_FLAG_WRITE)) {
71041 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
71042 vma->vm_page_prot));
71043 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71044 + ptl = pte_lockptr(mm, pmd);
71045 + spin_lock(ptl);
71046 if (!pte_none(*page_table))
71047 goto unlock;
71048 goto setpte;
71049 }
71050
71051 /* Allocate our own private page. */
71052 + pte_unmap(page_table);
71053 +
71054 if (unlikely(anon_vma_prepare(vma)))
71055 goto oom;
71056 page = alloc_zeroed_user_highpage_movable(vma, address);
71057 @@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71058 if (!pte_none(*page_table))
71059 goto release;
71060
71061 +#ifdef CONFIG_PAX_SEGMEXEC
71062 + if (pax_find_mirror_vma(vma))
71063 + BUG_ON(!trylock_page(page));
71064 +#endif
71065 +
71066 inc_mm_counter_fast(mm, MM_ANONPAGES);
71067 page_add_new_anon_rmap(page, vma, address);
71068 setpte:
71069 @@ -3147,6 +3331,12 @@ setpte:
71070
71071 /* No need to invalidate - it was non-present before */
71072 update_mmu_cache(vma, address, page_table);
71073 +
71074 +#ifdef CONFIG_PAX_SEGMEXEC
71075 + if (page)
71076 + pax_mirror_anon_pte(vma, address, page, ptl);
71077 +#endif
71078 +
71079 unlock:
71080 pte_unmap_unlock(page_table, ptl);
71081 return 0;
71082 @@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71083 */
71084 /* Only go through if we didn't race with anybody else... */
71085 if (likely(pte_same(*page_table, orig_pte))) {
71086 +
71087 +#ifdef CONFIG_PAX_SEGMEXEC
71088 + if (anon && pax_find_mirror_vma(vma))
71089 + BUG_ON(!trylock_page(page));
71090 +#endif
71091 +
71092 flush_icache_page(vma, page);
71093 entry = mk_pte(page, vma->vm_page_prot);
71094 if (flags & FAULT_FLAG_WRITE)
71095 @@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71096
71097 /* no need to invalidate: a not-present page won't be cached */
71098 update_mmu_cache(vma, address, page_table);
71099 +
71100 +#ifdef CONFIG_PAX_SEGMEXEC
71101 + if (anon)
71102 + pax_mirror_anon_pte(vma, address, page, ptl);
71103 + else
71104 + pax_mirror_file_pte(vma, address, page, ptl);
71105 +#endif
71106 +
71107 } else {
71108 if (cow_page)
71109 mem_cgroup_uncharge_page(cow_page);
71110 @@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
71111 if (flags & FAULT_FLAG_WRITE)
71112 flush_tlb_fix_spurious_fault(vma, address);
71113 }
71114 +
71115 +#ifdef CONFIG_PAX_SEGMEXEC
71116 + pax_mirror_pte(vma, address, pte, pmd, ptl);
71117 + return 0;
71118 +#endif
71119 +
71120 unlock:
71121 pte_unmap_unlock(pte, ptl);
71122 return 0;
71123 @@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71124 pmd_t *pmd;
71125 pte_t *pte;
71126
71127 +#ifdef CONFIG_PAX_SEGMEXEC
71128 + struct vm_area_struct *vma_m;
71129 +#endif
71130 +
71131 __set_current_state(TASK_RUNNING);
71132
71133 count_vm_event(PGFAULT);
71134 @@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71135 if (unlikely(is_vm_hugetlb_page(vma)))
71136 return hugetlb_fault(mm, vma, address, flags);
71137
71138 +#ifdef CONFIG_PAX_SEGMEXEC
71139 + vma_m = pax_find_mirror_vma(vma);
71140 + if (vma_m) {
71141 + unsigned long address_m;
71142 + pgd_t *pgd_m;
71143 + pud_t *pud_m;
71144 + pmd_t *pmd_m;
71145 +
71146 + if (vma->vm_start > vma_m->vm_start) {
71147 + address_m = address;
71148 + address -= SEGMEXEC_TASK_SIZE;
71149 + vma = vma_m;
71150 + } else
71151 + address_m = address + SEGMEXEC_TASK_SIZE;
71152 +
71153 + pgd_m = pgd_offset(mm, address_m);
71154 + pud_m = pud_alloc(mm, pgd_m, address_m);
71155 + if (!pud_m)
71156 + return VM_FAULT_OOM;
71157 + pmd_m = pmd_alloc(mm, pud_m, address_m);
71158 + if (!pmd_m)
71159 + return VM_FAULT_OOM;
71160 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
71161 + return VM_FAULT_OOM;
71162 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71163 + }
71164 +#endif
71165 +
71166 pgd = pgd_offset(mm, address);
71167 pud = pud_alloc(mm, pgd, address);
71168 if (!pud)
71169 @@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71170 * run pte_offset_map on the pmd, if an huge pmd could
71171 * materialize from under us from a different thread.
71172 */
71173 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71174 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71175 return VM_FAULT_OOM;
71176 /* if an huge pmd materialized from under us just retry later */
71177 if (unlikely(pmd_trans_huge(*pmd)))
71178 @@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71179 spin_unlock(&mm->page_table_lock);
71180 return 0;
71181 }
71182 +
71183 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71184 +{
71185 + pud_t *new = pud_alloc_one(mm, address);
71186 + if (!new)
71187 + return -ENOMEM;
71188 +
71189 + smp_wmb(); /* See comment in __pte_alloc */
71190 +
71191 + spin_lock(&mm->page_table_lock);
71192 + if (pgd_present(*pgd)) /* Another has populated it */
71193 + pud_free(mm, new);
71194 + else
71195 + pgd_populate_kernel(mm, pgd, new);
71196 + spin_unlock(&mm->page_table_lock);
71197 + return 0;
71198 +}
71199 #endif /* __PAGETABLE_PUD_FOLDED */
71200
71201 #ifndef __PAGETABLE_PMD_FOLDED
71202 @@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
71203 spin_unlock(&mm->page_table_lock);
71204 return 0;
71205 }
71206 +
71207 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
71208 +{
71209 + pmd_t *new = pmd_alloc_one(mm, address);
71210 + if (!new)
71211 + return -ENOMEM;
71212 +
71213 + smp_wmb(); /* See comment in __pte_alloc */
71214 +
71215 + spin_lock(&mm->page_table_lock);
71216 +#ifndef __ARCH_HAS_4LEVEL_HACK
71217 + if (pud_present(*pud)) /* Another has populated it */
71218 + pmd_free(mm, new);
71219 + else
71220 + pud_populate_kernel(mm, pud, new);
71221 +#else
71222 + if (pgd_present(*pud)) /* Another has populated it */
71223 + pmd_free(mm, new);
71224 + else
71225 + pgd_populate_kernel(mm, pud, new);
71226 +#endif /* __ARCH_HAS_4LEVEL_HACK */
71227 + spin_unlock(&mm->page_table_lock);
71228 + return 0;
71229 +}
71230 #endif /* __PAGETABLE_PMD_FOLDED */
71231
71232 int make_pages_present(unsigned long addr, unsigned long end)
71233 @@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
71234 gate_vma.vm_start = FIXADDR_USER_START;
71235 gate_vma.vm_end = FIXADDR_USER_END;
71236 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71237 - gate_vma.vm_page_prot = __P101;
71238 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
71239
71240 return 0;
71241 }
71242 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
71243 index bf5b485..e44c2cb 100644
71244 --- a/mm/mempolicy.c
71245 +++ b/mm/mempolicy.c
71246 @@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71247 unsigned long vmstart;
71248 unsigned long vmend;
71249
71250 +#ifdef CONFIG_PAX_SEGMEXEC
71251 + struct vm_area_struct *vma_m;
71252 +#endif
71253 +
71254 vma = find_vma(mm, start);
71255 if (!vma || vma->vm_start > start)
71256 return -EFAULT;
71257 @@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71258 if (err)
71259 goto out;
71260 }
71261 +
71262 +#ifdef CONFIG_PAX_SEGMEXEC
71263 + vma_m = pax_find_mirror_vma(vma);
71264 + if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
71265 + err = vma_m->vm_ops->set_policy(vma_m, new_pol);
71266 + if (err)
71267 + goto out;
71268 + }
71269 +#endif
71270 +
71271 }
71272
71273 out:
71274 @@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71275
71276 if (end < start)
71277 return -EINVAL;
71278 +
71279 +#ifdef CONFIG_PAX_SEGMEXEC
71280 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71281 + if (end > SEGMEXEC_TASK_SIZE)
71282 + return -EINVAL;
71283 + } else
71284 +#endif
71285 +
71286 + if (end > TASK_SIZE)
71287 + return -EINVAL;
71288 +
71289 if (end == start)
71290 return 0;
71291
71292 @@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71293 */
71294 tcred = __task_cred(task);
71295 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71296 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71297 - !capable(CAP_SYS_NICE)) {
71298 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71299 rcu_read_unlock();
71300 err = -EPERM;
71301 goto out_put;
71302 @@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71303 goto out;
71304 }
71305
71306 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71307 + if (mm != current->mm &&
71308 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71309 + mmput(mm);
71310 + err = -EPERM;
71311 + goto out;
71312 + }
71313 +#endif
71314 +
71315 err = do_migrate_pages(mm, old, new,
71316 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
71317
71318 diff --git a/mm/mlock.c b/mm/mlock.c
71319 index ef726e8..13e0901 100644
71320 --- a/mm/mlock.c
71321 +++ b/mm/mlock.c
71322 @@ -13,6 +13,7 @@
71323 #include <linux/pagemap.h>
71324 #include <linux/mempolicy.h>
71325 #include <linux/syscalls.h>
71326 +#include <linux/security.h>
71327 #include <linux/sched.h>
71328 #include <linux/export.h>
71329 #include <linux/rmap.h>
71330 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71331 return -EINVAL;
71332 if (end == start)
71333 return 0;
71334 + if (end > TASK_SIZE)
71335 + return -EINVAL;
71336 +
71337 vma = find_vma(current->mm, start);
71338 if (!vma || vma->vm_start > start)
71339 return -ENOMEM;
71340 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71341 for (nstart = start ; ; ) {
71342 vm_flags_t newflags;
71343
71344 +#ifdef CONFIG_PAX_SEGMEXEC
71345 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71346 + break;
71347 +#endif
71348 +
71349 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71350
71351 newflags = vma->vm_flags | VM_LOCKED;
71352 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71353 lock_limit >>= PAGE_SHIFT;
71354
71355 /* check against resource limits */
71356 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71357 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71358 error = do_mlock(start, len, 1);
71359 up_write(&current->mm->mmap_sem);
71360 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71361 static int do_mlockall(int flags)
71362 {
71363 struct vm_area_struct * vma, * prev = NULL;
71364 - unsigned int def_flags = 0;
71365
71366 if (flags & MCL_FUTURE)
71367 - def_flags = VM_LOCKED;
71368 - current->mm->def_flags = def_flags;
71369 + current->mm->def_flags |= VM_LOCKED;
71370 + else
71371 + current->mm->def_flags &= ~VM_LOCKED;
71372 if (flags == MCL_FUTURE)
71373 goto out;
71374
71375 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71376 vm_flags_t newflags;
71377
71378 +#ifdef CONFIG_PAX_SEGMEXEC
71379 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71380 + break;
71381 +#endif
71382 +
71383 + BUG_ON(vma->vm_end > TASK_SIZE);
71384 newflags = vma->vm_flags | VM_LOCKED;
71385 if (!(flags & MCL_CURRENT))
71386 newflags &= ~VM_LOCKED;
71387 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71388 lock_limit >>= PAGE_SHIFT;
71389
71390 ret = -ENOMEM;
71391 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71392 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71393 capable(CAP_IPC_LOCK))
71394 ret = do_mlockall(flags);
71395 diff --git a/mm/mmap.c b/mm/mmap.c
71396 index 848ef52..d2b586c 100644
71397 --- a/mm/mmap.c
71398 +++ b/mm/mmap.c
71399 @@ -46,6 +46,16 @@
71400 #define arch_rebalance_pgtables(addr, len) (addr)
71401 #endif
71402
71403 +static inline void verify_mm_writelocked(struct mm_struct *mm)
71404 +{
71405 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71406 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71407 + up_read(&mm->mmap_sem);
71408 + BUG();
71409 + }
71410 +#endif
71411 +}
71412 +
71413 static void unmap_region(struct mm_struct *mm,
71414 struct vm_area_struct *vma, struct vm_area_struct *prev,
71415 unsigned long start, unsigned long end);
71416 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71417 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71418 *
71419 */
71420 -pgprot_t protection_map[16] = {
71421 +pgprot_t protection_map[16] __read_only = {
71422 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71423 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71424 };
71425
71426 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
71427 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71428 {
71429 - return __pgprot(pgprot_val(protection_map[vm_flags &
71430 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71431 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71432 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71433 +
71434 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71435 + if (!(__supported_pte_mask & _PAGE_NX) &&
71436 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71437 + (vm_flags & (VM_READ | VM_WRITE)))
71438 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71439 +#endif
71440 +
71441 + return prot;
71442 }
71443 EXPORT_SYMBOL(vm_get_page_prot);
71444
71445 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71446 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71447 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71448 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71449 /*
71450 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71451 * other variables. It can be updated by several CPUs frequently.
71452 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71453 struct vm_area_struct *next = vma->vm_next;
71454
71455 might_sleep();
71456 + BUG_ON(vma->vm_mirror);
71457 if (vma->vm_ops && vma->vm_ops->close)
71458 vma->vm_ops->close(vma);
71459 if (vma->vm_file) {
71460 @@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71461 * not page aligned -Ram Gupta
71462 */
71463 rlim = rlimit(RLIMIT_DATA);
71464 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71465 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71466 (mm->end_data - mm->start_data) > rlim)
71467 goto out;
71468 @@ -690,6 +712,12 @@ static int
71469 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71470 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71471 {
71472 +
71473 +#ifdef CONFIG_PAX_SEGMEXEC
71474 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71475 + return 0;
71476 +#endif
71477 +
71478 if (is_mergeable_vma(vma, file, vm_flags) &&
71479 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71480 if (vma->vm_pgoff == vm_pgoff)
71481 @@ -709,6 +737,12 @@ static int
71482 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71483 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71484 {
71485 +
71486 +#ifdef CONFIG_PAX_SEGMEXEC
71487 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71488 + return 0;
71489 +#endif
71490 +
71491 if (is_mergeable_vma(vma, file, vm_flags) &&
71492 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71493 pgoff_t vm_pglen;
71494 @@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71495 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71496 struct vm_area_struct *prev, unsigned long addr,
71497 unsigned long end, unsigned long vm_flags,
71498 - struct anon_vma *anon_vma, struct file *file,
71499 + struct anon_vma *anon_vma, struct file *file,
71500 pgoff_t pgoff, struct mempolicy *policy)
71501 {
71502 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71503 struct vm_area_struct *area, *next;
71504 int err;
71505
71506 +#ifdef CONFIG_PAX_SEGMEXEC
71507 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71508 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71509 +
71510 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71511 +#endif
71512 +
71513 /*
71514 * We later require that vma->vm_flags == vm_flags,
71515 * so this tests vma->vm_flags & VM_SPECIAL, too.
71516 @@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71517 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71518 next = next->vm_next;
71519
71520 +#ifdef CONFIG_PAX_SEGMEXEC
71521 + if (prev)
71522 + prev_m = pax_find_mirror_vma(prev);
71523 + if (area)
71524 + area_m = pax_find_mirror_vma(area);
71525 + if (next)
71526 + next_m = pax_find_mirror_vma(next);
71527 +#endif
71528 +
71529 /*
71530 * Can it merge with the predecessor?
71531 */
71532 @@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71533 /* cases 1, 6 */
71534 err = vma_adjust(prev, prev->vm_start,
71535 next->vm_end, prev->vm_pgoff, NULL);
71536 - } else /* cases 2, 5, 7 */
71537 +
71538 +#ifdef CONFIG_PAX_SEGMEXEC
71539 + if (!err && prev_m)
71540 + err = vma_adjust(prev_m, prev_m->vm_start,
71541 + next_m->vm_end, prev_m->vm_pgoff, NULL);
71542 +#endif
71543 +
71544 + } else { /* cases 2, 5, 7 */
71545 err = vma_adjust(prev, prev->vm_start,
71546 end, prev->vm_pgoff, NULL);
71547 +
71548 +#ifdef CONFIG_PAX_SEGMEXEC
71549 + if (!err && prev_m)
71550 + err = vma_adjust(prev_m, prev_m->vm_start,
71551 + end_m, prev_m->vm_pgoff, NULL);
71552 +#endif
71553 +
71554 + }
71555 if (err)
71556 return NULL;
71557 khugepaged_enter_vma_merge(prev);
71558 @@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71559 mpol_equal(policy, vma_policy(next)) &&
71560 can_vma_merge_before(next, vm_flags,
71561 anon_vma, file, pgoff+pglen)) {
71562 - if (prev && addr < prev->vm_end) /* case 4 */
71563 + if (prev && addr < prev->vm_end) { /* case 4 */
71564 err = vma_adjust(prev, prev->vm_start,
71565 addr, prev->vm_pgoff, NULL);
71566 - else /* cases 3, 8 */
71567 +
71568 +#ifdef CONFIG_PAX_SEGMEXEC
71569 + if (!err && prev_m)
71570 + err = vma_adjust(prev_m, prev_m->vm_start,
71571 + addr_m, prev_m->vm_pgoff, NULL);
71572 +#endif
71573 +
71574 + } else { /* cases 3, 8 */
71575 err = vma_adjust(area, addr, next->vm_end,
71576 next->vm_pgoff - pglen, NULL);
71577 +
71578 +#ifdef CONFIG_PAX_SEGMEXEC
71579 + if (!err && area_m)
71580 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
71581 + next_m->vm_pgoff - pglen, NULL);
71582 +#endif
71583 +
71584 + }
71585 if (err)
71586 return NULL;
71587 khugepaged_enter_vma_merge(area);
71588 @@ -922,14 +1002,11 @@ none:
71589 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71590 struct file *file, long pages)
71591 {
71592 - const unsigned long stack_flags
71593 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71594 -
71595 if (file) {
71596 mm->shared_vm += pages;
71597 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71598 mm->exec_vm += pages;
71599 - } else if (flags & stack_flags)
71600 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71601 mm->stack_vm += pages;
71602 if (flags & (VM_RESERVED|VM_IO))
71603 mm->reserved_vm += pages;
71604 @@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71605 * (the exception is when the underlying filesystem is noexec
71606 * mounted, in which case we dont add PROT_EXEC.)
71607 */
71608 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71609 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71610 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71611 prot |= PROT_EXEC;
71612
71613 @@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71614 /* Obtain the address to map to. we verify (or select) it and ensure
71615 * that it represents a valid section of the address space.
71616 */
71617 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
71618 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71619 if (addr & ~PAGE_MASK)
71620 return addr;
71621
71622 @@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71623 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71624 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71625
71626 +#ifdef CONFIG_PAX_MPROTECT
71627 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71628 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71629 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71630 + gr_log_rwxmmap(file);
71631 +
71632 +#ifdef CONFIG_PAX_EMUPLT
71633 + vm_flags &= ~VM_EXEC;
71634 +#else
71635 + return -EPERM;
71636 +#endif
71637 +
71638 + }
71639 +
71640 + if (!(vm_flags & VM_EXEC))
71641 + vm_flags &= ~VM_MAYEXEC;
71642 +#else
71643 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71644 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71645 +#endif
71646 + else
71647 + vm_flags &= ~VM_MAYWRITE;
71648 + }
71649 +#endif
71650 +
71651 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71652 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71653 + vm_flags &= ~VM_PAGEEXEC;
71654 +#endif
71655 +
71656 if (flags & MAP_LOCKED)
71657 if (!can_do_mlock())
71658 return -EPERM;
71659 @@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71660 locked += mm->locked_vm;
71661 lock_limit = rlimit(RLIMIT_MEMLOCK);
71662 lock_limit >>= PAGE_SHIFT;
71663 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71664 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71665 return -EAGAIN;
71666 }
71667 @@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71668 if (error)
71669 return error;
71670
71671 + if (!gr_acl_handle_mmap(file, prot))
71672 + return -EACCES;
71673 +
71674 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71675 }
71676
71677 @@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71678 vm_flags_t vm_flags = vma->vm_flags;
71679
71680 /* If it was private or non-writable, the write bit is already clear */
71681 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71682 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71683 return 0;
71684
71685 /* The backer wishes to know when pages are first written to? */
71686 @@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71687 unsigned long charged = 0;
71688 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71689
71690 +#ifdef CONFIG_PAX_SEGMEXEC
71691 + struct vm_area_struct *vma_m = NULL;
71692 +#endif
71693 +
71694 + /*
71695 + * mm->mmap_sem is required to protect against another thread
71696 + * changing the mappings in case we sleep.
71697 + */
71698 + verify_mm_writelocked(mm);
71699 +
71700 /* Clear old maps */
71701 error = -ENOMEM;
71702 -munmap_back:
71703 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71704 if (vma && vma->vm_start < addr + len) {
71705 if (do_munmap(mm, addr, len))
71706 return -ENOMEM;
71707 - goto munmap_back;
71708 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71709 + BUG_ON(vma && vma->vm_start < addr + len);
71710 }
71711
71712 /* Check against address space limit. */
71713 @@ -1297,6 +1418,16 @@ munmap_back:
71714 goto unacct_error;
71715 }
71716
71717 +#ifdef CONFIG_PAX_SEGMEXEC
71718 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71719 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71720 + if (!vma_m) {
71721 + error = -ENOMEM;
71722 + goto free_vma;
71723 + }
71724 + }
71725 +#endif
71726 +
71727 vma->vm_mm = mm;
71728 vma->vm_start = addr;
71729 vma->vm_end = addr + len;
71730 @@ -1321,6 +1452,19 @@ munmap_back:
71731 error = file->f_op->mmap(file, vma);
71732 if (error)
71733 goto unmap_and_free_vma;
71734 +
71735 +#ifdef CONFIG_PAX_SEGMEXEC
71736 + if (vma_m && (vm_flags & VM_EXECUTABLE))
71737 + added_exe_file_vma(mm);
71738 +#endif
71739 +
71740 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71741 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71742 + vma->vm_flags |= VM_PAGEEXEC;
71743 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71744 + }
71745 +#endif
71746 +
71747 if (vm_flags & VM_EXECUTABLE)
71748 added_exe_file_vma(mm);
71749
71750 @@ -1358,6 +1502,11 @@ munmap_back:
71751 vma_link(mm, vma, prev, rb_link, rb_parent);
71752 file = vma->vm_file;
71753
71754 +#ifdef CONFIG_PAX_SEGMEXEC
71755 + if (vma_m)
71756 + BUG_ON(pax_mirror_vma(vma_m, vma));
71757 +#endif
71758 +
71759 /* Once vma denies write, undo our temporary denial count */
71760 if (correct_wcount)
71761 atomic_inc(&inode->i_writecount);
71762 @@ -1366,6 +1515,7 @@ out:
71763
71764 mm->total_vm += len >> PAGE_SHIFT;
71765 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71766 + track_exec_limit(mm, addr, addr + len, vm_flags);
71767 if (vm_flags & VM_LOCKED) {
71768 if (!mlock_vma_pages_range(vma, addr, addr + len))
71769 mm->locked_vm += (len >> PAGE_SHIFT);
71770 @@ -1383,6 +1533,12 @@ unmap_and_free_vma:
71771 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71772 charged = 0;
71773 free_vma:
71774 +
71775 +#ifdef CONFIG_PAX_SEGMEXEC
71776 + if (vma_m)
71777 + kmem_cache_free(vm_area_cachep, vma_m);
71778 +#endif
71779 +
71780 kmem_cache_free(vm_area_cachep, vma);
71781 unacct_error:
71782 if (charged)
71783 @@ -1390,6 +1546,44 @@ unacct_error:
71784 return error;
71785 }
71786
71787 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71788 +{
71789 + if (!vma) {
71790 +#ifdef CONFIG_STACK_GROWSUP
71791 + if (addr > sysctl_heap_stack_gap)
71792 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71793 + else
71794 + vma = find_vma(current->mm, 0);
71795 + if (vma && (vma->vm_flags & VM_GROWSUP))
71796 + return false;
71797 +#endif
71798 + return true;
71799 + }
71800 +
71801 + if (addr + len > vma->vm_start)
71802 + return false;
71803 +
71804 + if (vma->vm_flags & VM_GROWSDOWN)
71805 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71806 +#ifdef CONFIG_STACK_GROWSUP
71807 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71808 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71809 +#endif
71810 +
71811 + return true;
71812 +}
71813 +
71814 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71815 +{
71816 + if (vma->vm_start < len)
71817 + return -ENOMEM;
71818 + if (!(vma->vm_flags & VM_GROWSDOWN))
71819 + return vma->vm_start - len;
71820 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
71821 + return vma->vm_start - len - sysctl_heap_stack_gap;
71822 + return -ENOMEM;
71823 +}
71824 +
71825 /* Get an address range which is currently unmapped.
71826 * For shmat() with addr=0.
71827 *
71828 @@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71829 if (flags & MAP_FIXED)
71830 return addr;
71831
71832 +#ifdef CONFIG_PAX_RANDMMAP
71833 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71834 +#endif
71835 +
71836 if (addr) {
71837 addr = PAGE_ALIGN(addr);
71838 - vma = find_vma(mm, addr);
71839 - if (TASK_SIZE - len >= addr &&
71840 - (!vma || addr + len <= vma->vm_start))
71841 - return addr;
71842 + if (TASK_SIZE - len >= addr) {
71843 + vma = find_vma(mm, addr);
71844 + if (check_heap_stack_gap(vma, addr, len))
71845 + return addr;
71846 + }
71847 }
71848 if (len > mm->cached_hole_size) {
71849 - start_addr = addr = mm->free_area_cache;
71850 + start_addr = addr = mm->free_area_cache;
71851 } else {
71852 - start_addr = addr = TASK_UNMAPPED_BASE;
71853 - mm->cached_hole_size = 0;
71854 + start_addr = addr = mm->mmap_base;
71855 + mm->cached_hole_size = 0;
71856 }
71857
71858 full_search:
71859 @@ -1438,34 +1637,40 @@ full_search:
71860 * Start a new search - just in case we missed
71861 * some holes.
71862 */
71863 - if (start_addr != TASK_UNMAPPED_BASE) {
71864 - addr = TASK_UNMAPPED_BASE;
71865 - start_addr = addr;
71866 + if (start_addr != mm->mmap_base) {
71867 + start_addr = addr = mm->mmap_base;
71868 mm->cached_hole_size = 0;
71869 goto full_search;
71870 }
71871 return -ENOMEM;
71872 }
71873 - if (!vma || addr + len <= vma->vm_start) {
71874 - /*
71875 - * Remember the place where we stopped the search:
71876 - */
71877 - mm->free_area_cache = addr + len;
71878 - return addr;
71879 - }
71880 + if (check_heap_stack_gap(vma, addr, len))
71881 + break;
71882 if (addr + mm->cached_hole_size < vma->vm_start)
71883 mm->cached_hole_size = vma->vm_start - addr;
71884 addr = vma->vm_end;
71885 }
71886 +
71887 + /*
71888 + * Remember the place where we stopped the search:
71889 + */
71890 + mm->free_area_cache = addr + len;
71891 + return addr;
71892 }
71893 #endif
71894
71895 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71896 {
71897 +
71898 +#ifdef CONFIG_PAX_SEGMEXEC
71899 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71900 + return;
71901 +#endif
71902 +
71903 /*
71904 * Is this a new hole at the lowest possible address?
71905 */
71906 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71907 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
71908 mm->free_area_cache = addr;
71909 }
71910
71911 @@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71912 {
71913 struct vm_area_struct *vma;
71914 struct mm_struct *mm = current->mm;
71915 - unsigned long addr = addr0, start_addr;
71916 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
71917
71918 /* requested length too big for entire address space */
71919 if (len > TASK_SIZE)
71920 @@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71921 if (flags & MAP_FIXED)
71922 return addr;
71923
71924 +#ifdef CONFIG_PAX_RANDMMAP
71925 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71926 +#endif
71927 +
71928 /* requesting a specific address */
71929 if (addr) {
71930 addr = PAGE_ALIGN(addr);
71931 - vma = find_vma(mm, addr);
71932 - if (TASK_SIZE - len >= addr &&
71933 - (!vma || addr + len <= vma->vm_start))
71934 - return addr;
71935 + if (TASK_SIZE - len >= addr) {
71936 + vma = find_vma(mm, addr);
71937 + if (check_heap_stack_gap(vma, addr, len))
71938 + return addr;
71939 + }
71940 }
71941
71942 /* check if free_area_cache is useful for us */
71943 @@ -1520,7 +1730,7 @@ try_again:
71944 * return with success:
71945 */
71946 vma = find_vma(mm, addr);
71947 - if (!vma || addr+len <= vma->vm_start)
71948 + if (check_heap_stack_gap(vma, addr, len))
71949 /* remember the address as a hint for next time */
71950 return (mm->free_area_cache = addr);
71951
71952 @@ -1529,8 +1739,8 @@ try_again:
71953 mm->cached_hole_size = vma->vm_start - addr;
71954
71955 /* try just below the current vma->vm_start */
71956 - addr = vma->vm_start-len;
71957 - } while (len < vma->vm_start);
71958 + addr = skip_heap_stack_gap(vma, len);
71959 + } while (!IS_ERR_VALUE(addr));
71960
71961 fail:
71962 /*
71963 @@ -1553,13 +1763,21 @@ fail:
71964 * can happen with large stack limits and large mmap()
71965 * allocations.
71966 */
71967 + mm->mmap_base = TASK_UNMAPPED_BASE;
71968 +
71969 +#ifdef CONFIG_PAX_RANDMMAP
71970 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71971 + mm->mmap_base += mm->delta_mmap;
71972 +#endif
71973 +
71974 + mm->free_area_cache = mm->mmap_base;
71975 mm->cached_hole_size = ~0UL;
71976 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71977 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71978 /*
71979 * Restore the topdown base:
71980 */
71981 - mm->free_area_cache = mm->mmap_base;
71982 + mm->mmap_base = base;
71983 + mm->free_area_cache = base;
71984 mm->cached_hole_size = ~0UL;
71985
71986 return addr;
71987 @@ -1568,6 +1786,12 @@ fail:
71988
71989 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71990 {
71991 +
71992 +#ifdef CONFIG_PAX_SEGMEXEC
71993 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71994 + return;
71995 +#endif
71996 +
71997 /*
71998 * Is this a new hole at the highest possible address?
71999 */
72000 @@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72001 mm->free_area_cache = addr;
72002
72003 /* dont allow allocations above current base */
72004 - if (mm->free_area_cache > mm->mmap_base)
72005 + if (mm->free_area_cache > mm->mmap_base) {
72006 mm->free_area_cache = mm->mmap_base;
72007 + mm->cached_hole_size = ~0UL;
72008 + }
72009 }
72010
72011 unsigned long
72012 @@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
72013 return vma;
72014 }
72015
72016 +#ifdef CONFIG_PAX_SEGMEXEC
72017 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
72018 +{
72019 + struct vm_area_struct *vma_m;
72020 +
72021 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
72022 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
72023 + BUG_ON(vma->vm_mirror);
72024 + return NULL;
72025 + }
72026 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
72027 + vma_m = vma->vm_mirror;
72028 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
72029 + BUG_ON(vma->vm_file != vma_m->vm_file);
72030 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
72031 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
72032 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
72033 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
72034 + return vma_m;
72035 +}
72036 +#endif
72037 +
72038 /*
72039 * Verify that the stack growth is acceptable and
72040 * update accounting. This is shared with both the
72041 @@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72042 return -ENOMEM;
72043
72044 /* Stack limit test */
72045 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
72046 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
72047 return -ENOMEM;
72048
72049 @@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72050 locked = mm->locked_vm + grow;
72051 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
72052 limit >>= PAGE_SHIFT;
72053 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72054 if (locked > limit && !capable(CAP_IPC_LOCK))
72055 return -ENOMEM;
72056 }
72057 @@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72058 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72059 * vma is the last one with address > vma->vm_end. Have to extend vma.
72060 */
72061 +#ifndef CONFIG_IA64
72062 +static
72063 +#endif
72064 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72065 {
72066 int error;
72067 + bool locknext;
72068
72069 if (!(vma->vm_flags & VM_GROWSUP))
72070 return -EFAULT;
72071
72072 + /* Also guard against wrapping around to address 0. */
72073 + if (address < PAGE_ALIGN(address+1))
72074 + address = PAGE_ALIGN(address+1);
72075 + else
72076 + return -ENOMEM;
72077 +
72078 /*
72079 * We must make sure the anon_vma is allocated
72080 * so that the anon_vma locking is not a noop.
72081 */
72082 if (unlikely(anon_vma_prepare(vma)))
72083 return -ENOMEM;
72084 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
72085 + if (locknext && anon_vma_prepare(vma->vm_next))
72086 + return -ENOMEM;
72087 vma_lock_anon_vma(vma);
72088 + if (locknext)
72089 + vma_lock_anon_vma(vma->vm_next);
72090
72091 /*
72092 * vma->vm_start/vm_end cannot change under us because the caller
72093 * is required to hold the mmap_sem in read mode. We need the
72094 - * anon_vma lock to serialize against concurrent expand_stacks.
72095 - * Also guard against wrapping around to address 0.
72096 + * anon_vma locks to serialize against concurrent expand_stacks
72097 + * and expand_upwards.
72098 */
72099 - if (address < PAGE_ALIGN(address+4))
72100 - address = PAGE_ALIGN(address+4);
72101 - else {
72102 - vma_unlock_anon_vma(vma);
72103 - return -ENOMEM;
72104 - }
72105 error = 0;
72106
72107 /* Somebody else might have raced and expanded it already */
72108 - if (address > vma->vm_end) {
72109 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72110 + error = -ENOMEM;
72111 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
72112 unsigned long size, grow;
72113
72114 size = address - vma->vm_start;
72115 @@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72116 }
72117 }
72118 }
72119 + if (locknext)
72120 + vma_unlock_anon_vma(vma->vm_next);
72121 vma_unlock_anon_vma(vma);
72122 khugepaged_enter_vma_merge(vma);
72123 return error;
72124 @@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
72125 unsigned long address)
72126 {
72127 int error;
72128 + bool lockprev = false;
72129 + struct vm_area_struct *prev;
72130
72131 /*
72132 * We must make sure the anon_vma is allocated
72133 @@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
72134 if (error)
72135 return error;
72136
72137 + prev = vma->vm_prev;
72138 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
72139 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72140 +#endif
72141 + if (lockprev && anon_vma_prepare(prev))
72142 + return -ENOMEM;
72143 + if (lockprev)
72144 + vma_lock_anon_vma(prev);
72145 +
72146 vma_lock_anon_vma(vma);
72147
72148 /*
72149 @@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
72150 */
72151
72152 /* Somebody else might have raced and expanded it already */
72153 - if (address < vma->vm_start) {
72154 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72155 + error = -ENOMEM;
72156 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
72157 unsigned long size, grow;
72158
72159 +#ifdef CONFIG_PAX_SEGMEXEC
72160 + struct vm_area_struct *vma_m;
72161 +
72162 + vma_m = pax_find_mirror_vma(vma);
72163 +#endif
72164 +
72165 size = vma->vm_end - address;
72166 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72167
72168 @@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
72169 if (!error) {
72170 vma->vm_start = address;
72171 vma->vm_pgoff -= grow;
72172 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
72173 +
72174 +#ifdef CONFIG_PAX_SEGMEXEC
72175 + if (vma_m) {
72176 + vma_m->vm_start -= grow << PAGE_SHIFT;
72177 + vma_m->vm_pgoff -= grow;
72178 + }
72179 +#endif
72180 +
72181 perf_event_mmap(vma);
72182 }
72183 }
72184 }
72185 vma_unlock_anon_vma(vma);
72186 + if (lockprev)
72187 + vma_unlock_anon_vma(prev);
72188 khugepaged_enter_vma_merge(vma);
72189 return error;
72190 }
72191 @@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
72192 do {
72193 long nrpages = vma_pages(vma);
72194
72195 +#ifdef CONFIG_PAX_SEGMEXEC
72196 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72197 + vma = remove_vma(vma);
72198 + continue;
72199 + }
72200 +#endif
72201 +
72202 mm->total_vm -= nrpages;
72203 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72204 vma = remove_vma(vma);
72205 @@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
72206 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
72207 vma->vm_prev = NULL;
72208 do {
72209 +
72210 +#ifdef CONFIG_PAX_SEGMEXEC
72211 + if (vma->vm_mirror) {
72212 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72213 + vma->vm_mirror->vm_mirror = NULL;
72214 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
72215 + vma->vm_mirror = NULL;
72216 + }
72217 +#endif
72218 +
72219 rb_erase(&vma->vm_rb, &mm->mm_rb);
72220 mm->map_count--;
72221 tail_vma = vma;
72222 @@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72223 struct vm_area_struct *new;
72224 int err = -ENOMEM;
72225
72226 +#ifdef CONFIG_PAX_SEGMEXEC
72227 + struct vm_area_struct *vma_m, *new_m = NULL;
72228 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
72229 +#endif
72230 +
72231 if (is_vm_hugetlb_page(vma) && (addr &
72232 ~(huge_page_mask(hstate_vma(vma)))))
72233 return -EINVAL;
72234
72235 +#ifdef CONFIG_PAX_SEGMEXEC
72236 + vma_m = pax_find_mirror_vma(vma);
72237 +#endif
72238 +
72239 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72240 if (!new)
72241 goto out_err;
72242
72243 +#ifdef CONFIG_PAX_SEGMEXEC
72244 + if (vma_m) {
72245 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72246 + if (!new_m) {
72247 + kmem_cache_free(vm_area_cachep, new);
72248 + goto out_err;
72249 + }
72250 + }
72251 +#endif
72252 +
72253 /* most fields are the same, copy all, and then fixup */
72254 *new = *vma;
72255
72256 @@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72257 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72258 }
72259
72260 +#ifdef CONFIG_PAX_SEGMEXEC
72261 + if (vma_m) {
72262 + *new_m = *vma_m;
72263 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
72264 + new_m->vm_mirror = new;
72265 + new->vm_mirror = new_m;
72266 +
72267 + if (new_below)
72268 + new_m->vm_end = addr_m;
72269 + else {
72270 + new_m->vm_start = addr_m;
72271 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72272 + }
72273 + }
72274 +#endif
72275 +
72276 pol = mpol_dup(vma_policy(vma));
72277 if (IS_ERR(pol)) {
72278 err = PTR_ERR(pol);
72279 @@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72280 else
72281 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72282
72283 +#ifdef CONFIG_PAX_SEGMEXEC
72284 + if (!err && vma_m) {
72285 + if (anon_vma_clone(new_m, vma_m))
72286 + goto out_free_mpol;
72287 +
72288 + mpol_get(pol);
72289 + vma_set_policy(new_m, pol);
72290 +
72291 + if (new_m->vm_file) {
72292 + get_file(new_m->vm_file);
72293 + if (vma_m->vm_flags & VM_EXECUTABLE)
72294 + added_exe_file_vma(mm);
72295 + }
72296 +
72297 + if (new_m->vm_ops && new_m->vm_ops->open)
72298 + new_m->vm_ops->open(new_m);
72299 +
72300 + if (new_below)
72301 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72302 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72303 + else
72304 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72305 +
72306 + if (err) {
72307 + if (new_m->vm_ops && new_m->vm_ops->close)
72308 + new_m->vm_ops->close(new_m);
72309 + if (new_m->vm_file) {
72310 + if (vma_m->vm_flags & VM_EXECUTABLE)
72311 + removed_exe_file_vma(mm);
72312 + fput(new_m->vm_file);
72313 + }
72314 + mpol_put(pol);
72315 + }
72316 + }
72317 +#endif
72318 +
72319 /* Success. */
72320 if (!err)
72321 return 0;
72322 @@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72323 removed_exe_file_vma(mm);
72324 fput(new->vm_file);
72325 }
72326 - unlink_anon_vmas(new);
72327 out_free_mpol:
72328 mpol_put(pol);
72329 out_free_vma:
72330 +
72331 +#ifdef CONFIG_PAX_SEGMEXEC
72332 + if (new_m) {
72333 + unlink_anon_vmas(new_m);
72334 + kmem_cache_free(vm_area_cachep, new_m);
72335 + }
72336 +#endif
72337 +
72338 + unlink_anon_vmas(new);
72339 kmem_cache_free(vm_area_cachep, new);
72340 out_err:
72341 return err;
72342 @@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72343 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72344 unsigned long addr, int new_below)
72345 {
72346 +
72347 +#ifdef CONFIG_PAX_SEGMEXEC
72348 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72349 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72350 + if (mm->map_count >= sysctl_max_map_count-1)
72351 + return -ENOMEM;
72352 + } else
72353 +#endif
72354 +
72355 if (mm->map_count >= sysctl_max_map_count)
72356 return -ENOMEM;
72357
72358 @@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72359 * work. This now handles partial unmappings.
72360 * Jeremy Fitzhardinge <jeremy@goop.org>
72361 */
72362 +#ifdef CONFIG_PAX_SEGMEXEC
72363 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72364 {
72365 + int ret = __do_munmap(mm, start, len);
72366 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72367 + return ret;
72368 +
72369 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72370 +}
72371 +
72372 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72373 +#else
72374 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72375 +#endif
72376 +{
72377 unsigned long end;
72378 struct vm_area_struct *vma, *prev, *last;
72379
72380 + /*
72381 + * mm->mmap_sem is required to protect against another thread
72382 + * changing the mappings in case we sleep.
72383 + */
72384 + verify_mm_writelocked(mm);
72385 +
72386 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72387 return -EINVAL;
72388
72389 @@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72390 /* Fix up all other VM information */
72391 remove_vma_list(mm, vma);
72392
72393 + track_exec_limit(mm, start, end, 0UL);
72394 +
72395 return 0;
72396 }
72397 EXPORT_SYMBOL(do_munmap);
72398 @@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
72399 int ret;
72400 struct mm_struct *mm = current->mm;
72401
72402 +
72403 +#ifdef CONFIG_PAX_SEGMEXEC
72404 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72405 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
72406 + return -EINVAL;
72407 +#endif
72408 +
72409 down_write(&mm->mmap_sem);
72410 ret = do_munmap(mm, start, len);
72411 up_write(&mm->mmap_sem);
72412 @@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72413 return vm_munmap(addr, len);
72414 }
72415
72416 -static inline void verify_mm_writelocked(struct mm_struct *mm)
72417 -{
72418 -#ifdef CONFIG_DEBUG_VM
72419 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72420 - WARN_ON(1);
72421 - up_read(&mm->mmap_sem);
72422 - }
72423 -#endif
72424 -}
72425 -
72426 /*
72427 * this is really a simplified "do_mmap". it only handles
72428 * anonymous maps. eventually we may be able to do some
72429 @@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72430 struct rb_node ** rb_link, * rb_parent;
72431 pgoff_t pgoff = addr >> PAGE_SHIFT;
72432 int error;
72433 + unsigned long charged;
72434
72435 len = PAGE_ALIGN(len);
72436 if (!len)
72437 @@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72438
72439 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72440
72441 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72442 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72443 + flags &= ~VM_EXEC;
72444 +
72445 +#ifdef CONFIG_PAX_MPROTECT
72446 + if (mm->pax_flags & MF_PAX_MPROTECT)
72447 + flags &= ~VM_MAYEXEC;
72448 +#endif
72449 +
72450 + }
72451 +#endif
72452 +
72453 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72454 if (error & ~PAGE_MASK)
72455 return error;
72456
72457 + charged = len >> PAGE_SHIFT;
72458 +
72459 /*
72460 * mlock MCL_FUTURE?
72461 */
72462 if (mm->def_flags & VM_LOCKED) {
72463 unsigned long locked, lock_limit;
72464 - locked = len >> PAGE_SHIFT;
72465 + locked = charged;
72466 locked += mm->locked_vm;
72467 lock_limit = rlimit(RLIMIT_MEMLOCK);
72468 lock_limit >>= PAGE_SHIFT;
72469 @@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72470 /*
72471 * Clear old maps. this also does some error checking for us
72472 */
72473 - munmap_back:
72474 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72475 if (vma && vma->vm_start < addr + len) {
72476 if (do_munmap(mm, addr, len))
72477 return -ENOMEM;
72478 - goto munmap_back;
72479 - }
72480 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72481 + BUG_ON(vma && vma->vm_start < addr + len);
72482 + }
72483
72484 /* Check against address space limits *after* clearing old maps... */
72485 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72486 + if (!may_expand_vm(mm, charged))
72487 return -ENOMEM;
72488
72489 if (mm->map_count > sysctl_max_map_count)
72490 return -ENOMEM;
72491
72492 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
72493 + if (security_vm_enough_memory_mm(mm, charged))
72494 return -ENOMEM;
72495
72496 /* Can we just expand an old private anonymous mapping? */
72497 @@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72498 */
72499 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72500 if (!vma) {
72501 - vm_unacct_memory(len >> PAGE_SHIFT);
72502 + vm_unacct_memory(charged);
72503 return -ENOMEM;
72504 }
72505
72506 @@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72507 vma_link(mm, vma, prev, rb_link, rb_parent);
72508 out:
72509 perf_event_mmap(vma);
72510 - mm->total_vm += len >> PAGE_SHIFT;
72511 + mm->total_vm += charged;
72512 if (flags & VM_LOCKED) {
72513 if (!mlock_vma_pages_range(vma, addr, addr + len))
72514 - mm->locked_vm += (len >> PAGE_SHIFT);
72515 + mm->locked_vm += charged;
72516 }
72517 + track_exec_limit(mm, addr, addr + len, flags);
72518 return addr;
72519 }
72520
72521 @@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
72522 * Walk the list again, actually closing and freeing it,
72523 * with preemption enabled, without holding any MM locks.
72524 */
72525 - while (vma)
72526 + while (vma) {
72527 + vma->vm_mirror = NULL;
72528 vma = remove_vma(vma);
72529 + }
72530
72531 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72532 }
72533 @@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72534 struct vm_area_struct * __vma, * prev;
72535 struct rb_node ** rb_link, * rb_parent;
72536
72537 +#ifdef CONFIG_PAX_SEGMEXEC
72538 + struct vm_area_struct *vma_m = NULL;
72539 +#endif
72540 +
72541 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72542 + return -EPERM;
72543 +
72544 /*
72545 * The vm_pgoff of a purely anonymous vma should be irrelevant
72546 * until its first write fault, when page's anon_vma and index
72547 @@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72548 if ((vma->vm_flags & VM_ACCOUNT) &&
72549 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72550 return -ENOMEM;
72551 +
72552 +#ifdef CONFIG_PAX_SEGMEXEC
72553 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72554 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72555 + if (!vma_m)
72556 + return -ENOMEM;
72557 + }
72558 +#endif
72559 +
72560 vma_link(mm, vma, prev, rb_link, rb_parent);
72561 +
72562 +#ifdef CONFIG_PAX_SEGMEXEC
72563 + if (vma_m)
72564 + BUG_ON(pax_mirror_vma(vma_m, vma));
72565 +#endif
72566 +
72567 return 0;
72568 }
72569
72570 @@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72571 struct mempolicy *pol;
72572 bool faulted_in_anon_vma = true;
72573
72574 + BUG_ON(vma->vm_mirror);
72575 +
72576 /*
72577 * If anonymous vma has not yet been faulted, update new pgoff
72578 * to match new location, to increase its chance of merging.
72579 @@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72580 return NULL;
72581 }
72582
72583 +#ifdef CONFIG_PAX_SEGMEXEC
72584 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72585 +{
72586 + struct vm_area_struct *prev_m;
72587 + struct rb_node **rb_link_m, *rb_parent_m;
72588 + struct mempolicy *pol_m;
72589 +
72590 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72591 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72592 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72593 + *vma_m = *vma;
72594 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72595 + if (anon_vma_clone(vma_m, vma))
72596 + return -ENOMEM;
72597 + pol_m = vma_policy(vma_m);
72598 + mpol_get(pol_m);
72599 + vma_set_policy(vma_m, pol_m);
72600 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72601 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72602 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72603 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72604 + if (vma_m->vm_file)
72605 + get_file(vma_m->vm_file);
72606 + if (vma_m->vm_ops && vma_m->vm_ops->open)
72607 + vma_m->vm_ops->open(vma_m);
72608 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72609 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72610 + vma_m->vm_mirror = vma;
72611 + vma->vm_mirror = vma_m;
72612 + return 0;
72613 +}
72614 +#endif
72615 +
72616 /*
72617 * Return true if the calling process may expand its vm space by the passed
72618 * number of pages
72619 @@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72620
72621 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72622
72623 +#ifdef CONFIG_PAX_RANDMMAP
72624 + if (mm->pax_flags & MF_PAX_RANDMMAP)
72625 + cur -= mm->brk_gap;
72626 +#endif
72627 +
72628 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72629 if (cur + npages > lim)
72630 return 0;
72631 return 1;
72632 @@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
72633 vma->vm_start = addr;
72634 vma->vm_end = addr + len;
72635
72636 +#ifdef CONFIG_PAX_MPROTECT
72637 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72638 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72639 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72640 + return -EPERM;
72641 + if (!(vm_flags & VM_EXEC))
72642 + vm_flags &= ~VM_MAYEXEC;
72643 +#else
72644 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72645 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72646 +#endif
72647 + else
72648 + vm_flags &= ~VM_MAYWRITE;
72649 + }
72650 +#endif
72651 +
72652 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72653 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72654
72655 diff --git a/mm/mprotect.c b/mm/mprotect.c
72656 index a409926..8b32e6d 100644
72657 --- a/mm/mprotect.c
72658 +++ b/mm/mprotect.c
72659 @@ -23,10 +23,17 @@
72660 #include <linux/mmu_notifier.h>
72661 #include <linux/migrate.h>
72662 #include <linux/perf_event.h>
72663 +
72664 +#ifdef CONFIG_PAX_MPROTECT
72665 +#include <linux/elf.h>
72666 +#include <linux/binfmts.h>
72667 +#endif
72668 +
72669 #include <asm/uaccess.h>
72670 #include <asm/pgtable.h>
72671 #include <asm/cacheflush.h>
72672 #include <asm/tlbflush.h>
72673 +#include <asm/mmu_context.h>
72674
72675 #ifndef pgprot_modify
72676 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72677 @@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
72678 flush_tlb_range(vma, start, end);
72679 }
72680
72681 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72682 +/* called while holding the mmap semaphor for writing except stack expansion */
72683 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72684 +{
72685 + unsigned long oldlimit, newlimit = 0UL;
72686 +
72687 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72688 + return;
72689 +
72690 + spin_lock(&mm->page_table_lock);
72691 + oldlimit = mm->context.user_cs_limit;
72692 + if ((prot & VM_EXEC) && oldlimit < end)
72693 + /* USER_CS limit moved up */
72694 + newlimit = end;
72695 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72696 + /* USER_CS limit moved down */
72697 + newlimit = start;
72698 +
72699 + if (newlimit) {
72700 + mm->context.user_cs_limit = newlimit;
72701 +
72702 +#ifdef CONFIG_SMP
72703 + wmb();
72704 + cpus_clear(mm->context.cpu_user_cs_mask);
72705 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72706 +#endif
72707 +
72708 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72709 + }
72710 + spin_unlock(&mm->page_table_lock);
72711 + if (newlimit == end) {
72712 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
72713 +
72714 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
72715 + if (is_vm_hugetlb_page(vma))
72716 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72717 + else
72718 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72719 + }
72720 +}
72721 +#endif
72722 +
72723 int
72724 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72725 unsigned long start, unsigned long end, unsigned long newflags)
72726 @@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72727 int error;
72728 int dirty_accountable = 0;
72729
72730 +#ifdef CONFIG_PAX_SEGMEXEC
72731 + struct vm_area_struct *vma_m = NULL;
72732 + unsigned long start_m, end_m;
72733 +
72734 + start_m = start + SEGMEXEC_TASK_SIZE;
72735 + end_m = end + SEGMEXEC_TASK_SIZE;
72736 +#endif
72737 +
72738 if (newflags == oldflags) {
72739 *pprev = vma;
72740 return 0;
72741 }
72742
72743 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72744 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72745 +
72746 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72747 + return -ENOMEM;
72748 +
72749 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72750 + return -ENOMEM;
72751 + }
72752 +
72753 /*
72754 * If we make a private mapping writable we increase our commit;
72755 * but (without finer accounting) cannot reduce our commit if we
72756 @@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72757 }
72758 }
72759
72760 +#ifdef CONFIG_PAX_SEGMEXEC
72761 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72762 + if (start != vma->vm_start) {
72763 + error = split_vma(mm, vma, start, 1);
72764 + if (error)
72765 + goto fail;
72766 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72767 + *pprev = (*pprev)->vm_next;
72768 + }
72769 +
72770 + if (end != vma->vm_end) {
72771 + error = split_vma(mm, vma, end, 0);
72772 + if (error)
72773 + goto fail;
72774 + }
72775 +
72776 + if (pax_find_mirror_vma(vma)) {
72777 + error = __do_munmap(mm, start_m, end_m - start_m);
72778 + if (error)
72779 + goto fail;
72780 + } else {
72781 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72782 + if (!vma_m) {
72783 + error = -ENOMEM;
72784 + goto fail;
72785 + }
72786 + vma->vm_flags = newflags;
72787 + error = pax_mirror_vma(vma_m, vma);
72788 + if (error) {
72789 + vma->vm_flags = oldflags;
72790 + goto fail;
72791 + }
72792 + }
72793 + }
72794 +#endif
72795 +
72796 /*
72797 * First try to merge with previous and/or next vma.
72798 */
72799 @@ -204,9 +307,21 @@ success:
72800 * vm_flags and vm_page_prot are protected by the mmap_sem
72801 * held in write mode.
72802 */
72803 +
72804 +#ifdef CONFIG_PAX_SEGMEXEC
72805 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72806 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72807 +#endif
72808 +
72809 vma->vm_flags = newflags;
72810 +
72811 +#ifdef CONFIG_PAX_MPROTECT
72812 + if (mm->binfmt && mm->binfmt->handle_mprotect)
72813 + mm->binfmt->handle_mprotect(vma, newflags);
72814 +#endif
72815 +
72816 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72817 - vm_get_page_prot(newflags));
72818 + vm_get_page_prot(vma->vm_flags));
72819
72820 if (vma_wants_writenotify(vma)) {
72821 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72822 @@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72823 end = start + len;
72824 if (end <= start)
72825 return -ENOMEM;
72826 +
72827 +#ifdef CONFIG_PAX_SEGMEXEC
72828 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72829 + if (end > SEGMEXEC_TASK_SIZE)
72830 + return -EINVAL;
72831 + } else
72832 +#endif
72833 +
72834 + if (end > TASK_SIZE)
72835 + return -EINVAL;
72836 +
72837 if (!arch_validate_prot(prot))
72838 return -EINVAL;
72839
72840 @@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72841 /*
72842 * Does the application expect PROT_READ to imply PROT_EXEC:
72843 */
72844 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72845 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72846 prot |= PROT_EXEC;
72847
72848 vm_flags = calc_vm_prot_bits(prot);
72849 @@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72850 if (start > vma->vm_start)
72851 prev = vma;
72852
72853 +#ifdef CONFIG_PAX_MPROTECT
72854 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72855 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72856 +#endif
72857 +
72858 for (nstart = start ; ; ) {
72859 unsigned long newflags;
72860
72861 @@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72862
72863 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72864 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72865 + if (prot & (PROT_WRITE | PROT_EXEC))
72866 + gr_log_rwxmprotect(vma->vm_file);
72867 +
72868 + error = -EACCES;
72869 + goto out;
72870 + }
72871 +
72872 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72873 error = -EACCES;
72874 goto out;
72875 }
72876 @@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72877 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72878 if (error)
72879 goto out;
72880 +
72881 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72882 +
72883 nstart = tmp;
72884
72885 if (nstart < prev->vm_end)
72886 diff --git a/mm/mremap.c b/mm/mremap.c
72887 index db8d983..76506cb 100644
72888 --- a/mm/mremap.c
72889 +++ b/mm/mremap.c
72890 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72891 continue;
72892 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72893 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72894 +
72895 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72896 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72897 + pte = pte_exprotect(pte);
72898 +#endif
72899 +
72900 set_pte_at(mm, new_addr, new_pte, pte);
72901 }
72902
72903 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72904 if (is_vm_hugetlb_page(vma))
72905 goto Einval;
72906
72907 +#ifdef CONFIG_PAX_SEGMEXEC
72908 + if (pax_find_mirror_vma(vma))
72909 + goto Einval;
72910 +#endif
72911 +
72912 /* We can't remap across vm area boundaries */
72913 if (old_len > vma->vm_end - addr)
72914 goto Efault;
72915 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72916 unsigned long ret = -EINVAL;
72917 unsigned long charged = 0;
72918 unsigned long map_flags;
72919 + unsigned long pax_task_size = TASK_SIZE;
72920
72921 if (new_addr & ~PAGE_MASK)
72922 goto out;
72923
72924 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72925 +#ifdef CONFIG_PAX_SEGMEXEC
72926 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72927 + pax_task_size = SEGMEXEC_TASK_SIZE;
72928 +#endif
72929 +
72930 + pax_task_size -= PAGE_SIZE;
72931 +
72932 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72933 goto out;
72934
72935 /* Check if the location we're moving into overlaps the
72936 * old location at all, and fail if it does.
72937 */
72938 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72939 - goto out;
72940 -
72941 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72942 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72943 goto out;
72944
72945 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72946 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72947 struct vm_area_struct *vma;
72948 unsigned long ret = -EINVAL;
72949 unsigned long charged = 0;
72950 + unsigned long pax_task_size = TASK_SIZE;
72951
72952 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72953 goto out;
72954 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72955 if (!new_len)
72956 goto out;
72957
72958 +#ifdef CONFIG_PAX_SEGMEXEC
72959 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72960 + pax_task_size = SEGMEXEC_TASK_SIZE;
72961 +#endif
72962 +
72963 + pax_task_size -= PAGE_SIZE;
72964 +
72965 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72966 + old_len > pax_task_size || addr > pax_task_size-old_len)
72967 + goto out;
72968 +
72969 if (flags & MREMAP_FIXED) {
72970 if (flags & MREMAP_MAYMOVE)
72971 ret = mremap_to(addr, old_len, new_addr, new_len);
72972 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72973 addr + new_len);
72974 }
72975 ret = addr;
72976 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72977 goto out;
72978 }
72979 }
72980 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72981 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72982 if (ret)
72983 goto out;
72984 +
72985 + map_flags = vma->vm_flags;
72986 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72987 + if (!(ret & ~PAGE_MASK)) {
72988 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72989 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72990 + }
72991 }
72992 out:
72993 if (ret & ~PAGE_MASK)
72994 diff --git a/mm/nommu.c b/mm/nommu.c
72995 index bb8f4f0..40d3e02 100644
72996 --- a/mm/nommu.c
72997 +++ b/mm/nommu.c
72998 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72999 int sysctl_overcommit_ratio = 50; /* default is 50% */
73000 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
73001 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
73002 -int heap_stack_gap = 0;
73003
73004 atomic_long_t mmap_pages_allocated;
73005
73006 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
73007 EXPORT_SYMBOL(find_vma);
73008
73009 /*
73010 - * find a VMA
73011 - * - we don't extend stack VMAs under NOMMU conditions
73012 - */
73013 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
73014 -{
73015 - return find_vma(mm, addr);
73016 -}
73017 -
73018 -/*
73019 * expand a stack to a given address
73020 * - not supported under NOMMU conditions
73021 */
73022 @@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73023
73024 /* most fields are the same, copy all, and then fixup */
73025 *new = *vma;
73026 + INIT_LIST_HEAD(&new->anon_vma_chain);
73027 *region = *vma->vm_region;
73028 new->vm_region = region;
73029
73030 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
73031 index 918330f..ae99ae1 100644
73032 --- a/mm/page_alloc.c
73033 +++ b/mm/page_alloc.c
73034 @@ -335,7 +335,7 @@ out:
73035 * This usage means that zero-order pages may not be compound.
73036 */
73037
73038 -static void free_compound_page(struct page *page)
73039 +void free_compound_page(struct page *page)
73040 {
73041 __free_pages_ok(page, compound_order(page));
73042 }
73043 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73044 int i;
73045 int bad = 0;
73046
73047 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
73048 + unsigned long index = 1UL << order;
73049 +#endif
73050 +
73051 trace_mm_page_free(page, order);
73052 kmemcheck_free_shadow(page, order);
73053
73054 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73055 debug_check_no_obj_freed(page_address(page),
73056 PAGE_SIZE << order);
73057 }
73058 +
73059 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
73060 + for (; index; --index)
73061 + sanitize_highpage(page + index - 1);
73062 +#endif
73063 +
73064 arch_free_page(page, order);
73065 kernel_map_pages(page, 1 << order, 0);
73066
73067 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
73068 arch_alloc_page(page, order);
73069 kernel_map_pages(page, 1 << order, 1);
73070
73071 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
73072 if (gfp_flags & __GFP_ZERO)
73073 prep_zero_page(page, order, gfp_flags);
73074 +#endif
73075
73076 if (order && (gfp_flags & __GFP_COMP))
73077 prep_compound_page(page, order);
73078 @@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
73079 unsigned long pfn;
73080
73081 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73082 +#ifdef CONFIG_X86_32
73083 + /* boot failures in VMware 8 on 32bit vanilla since
73084 + this change */
73085 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73086 +#else
73087 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73088 +#endif
73089 return 1;
73090 }
73091 return 0;
73092 diff --git a/mm/percpu.c b/mm/percpu.c
73093 index bb4be74..a43ea85 100644
73094 --- a/mm/percpu.c
73095 +++ b/mm/percpu.c
73096 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
73097 static unsigned int pcpu_high_unit_cpu __read_mostly;
73098
73099 /* the address of the first chunk which starts with the kernel static area */
73100 -void *pcpu_base_addr __read_mostly;
73101 +void *pcpu_base_addr __read_only;
73102 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73103
73104 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
73105 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
73106 index c20ff48..137702a 100644
73107 --- a/mm/process_vm_access.c
73108 +++ b/mm/process_vm_access.c
73109 @@ -13,6 +13,7 @@
73110 #include <linux/uio.h>
73111 #include <linux/sched.h>
73112 #include <linux/highmem.h>
73113 +#include <linux/security.h>
73114 #include <linux/ptrace.h>
73115 #include <linux/slab.h>
73116 #include <linux/syscalls.h>
73117 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73118 size_t iov_l_curr_offset = 0;
73119 ssize_t iov_len;
73120
73121 + return -ENOSYS; // PaX: until properly audited
73122 +
73123 /*
73124 * Work out how many pages of struct pages we're going to need
73125 * when eventually calling get_user_pages
73126 */
73127 for (i = 0; i < riovcnt; i++) {
73128 iov_len = rvec[i].iov_len;
73129 - if (iov_len > 0) {
73130 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
73131 - + iov_len)
73132 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
73133 - / PAGE_SIZE + 1;
73134 - nr_pages = max(nr_pages, nr_pages_iov);
73135 - }
73136 + if (iov_len <= 0)
73137 + continue;
73138 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
73139 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
73140 + nr_pages = max(nr_pages, nr_pages_iov);
73141 }
73142
73143 if (nr_pages == 0)
73144 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73145 goto free_proc_pages;
73146 }
73147
73148 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
73149 + rc = -EPERM;
73150 + goto put_task_struct;
73151 + }
73152 +
73153 mm = mm_access(task, PTRACE_MODE_ATTACH);
73154 if (!mm || IS_ERR(mm)) {
73155 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
73156 diff --git a/mm/rmap.c b/mm/rmap.c
73157 index 5b5ad58..0f77903 100644
73158 --- a/mm/rmap.c
73159 +++ b/mm/rmap.c
73160 @@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73161 struct anon_vma *anon_vma = vma->anon_vma;
73162 struct anon_vma_chain *avc;
73163
73164 +#ifdef CONFIG_PAX_SEGMEXEC
73165 + struct anon_vma_chain *avc_m = NULL;
73166 +#endif
73167 +
73168 might_sleep();
73169 if (unlikely(!anon_vma)) {
73170 struct mm_struct *mm = vma->vm_mm;
73171 @@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73172 if (!avc)
73173 goto out_enomem;
73174
73175 +#ifdef CONFIG_PAX_SEGMEXEC
73176 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
73177 + if (!avc_m)
73178 + goto out_enomem_free_avc;
73179 +#endif
73180 +
73181 anon_vma = find_mergeable_anon_vma(vma);
73182 allocated = NULL;
73183 if (!anon_vma) {
73184 @@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73185 /* page_table_lock to protect against threads */
73186 spin_lock(&mm->page_table_lock);
73187 if (likely(!vma->anon_vma)) {
73188 +
73189 +#ifdef CONFIG_PAX_SEGMEXEC
73190 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73191 +
73192 + if (vma_m) {
73193 + BUG_ON(vma_m->anon_vma);
73194 + vma_m->anon_vma = anon_vma;
73195 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
73196 + avc_m = NULL;
73197 + }
73198 +#endif
73199 +
73200 vma->anon_vma = anon_vma;
73201 anon_vma_chain_link(vma, avc, anon_vma);
73202 allocated = NULL;
73203 @@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73204
73205 if (unlikely(allocated))
73206 put_anon_vma(allocated);
73207 +
73208 +#ifdef CONFIG_PAX_SEGMEXEC
73209 + if (unlikely(avc_m))
73210 + anon_vma_chain_free(avc_m);
73211 +#endif
73212 +
73213 if (unlikely(avc))
73214 anon_vma_chain_free(avc);
73215 }
73216 return 0;
73217
73218 out_enomem_free_avc:
73219 +
73220 +#ifdef CONFIG_PAX_SEGMEXEC
73221 + if (avc_m)
73222 + anon_vma_chain_free(avc_m);
73223 +#endif
73224 +
73225 anon_vma_chain_free(avc);
73226 out_enomem:
73227 return -ENOMEM;
73228 @@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
73229 * Attach the anon_vmas from src to dst.
73230 * Returns 0 on success, -ENOMEM on failure.
73231 */
73232 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73233 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73234 {
73235 struct anon_vma_chain *avc, *pavc;
73236 struct anon_vma *root = NULL;
73237 @@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
73238 * the corresponding VMA in the parent process is attached to.
73239 * Returns 0 on success, non-zero on failure.
73240 */
73241 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73242 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73243 {
73244 struct anon_vma_chain *avc;
73245 struct anon_vma *anon_vma;
73246 diff --git a/mm/shmem.c b/mm/shmem.c
73247 index 9d65a02..7c877e7 100644
73248 --- a/mm/shmem.c
73249 +++ b/mm/shmem.c
73250 @@ -31,7 +31,7 @@
73251 #include <linux/export.h>
73252 #include <linux/swap.h>
73253
73254 -static struct vfsmount *shm_mnt;
73255 +struct vfsmount *shm_mnt;
73256
73257 #ifdef CONFIG_SHMEM
73258 /*
73259 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73260 #define BOGO_DIRENT_SIZE 20
73261
73262 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73263 -#define SHORT_SYMLINK_LEN 128
73264 +#define SHORT_SYMLINK_LEN 64
73265
73266 struct shmem_xattr {
73267 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
73268 @@ -2236,8 +2236,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
73269 int err = -ENOMEM;
73270
73271 /* Round up to L1_CACHE_BYTES to resist false sharing */
73272 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73273 - L1_CACHE_BYTES), GFP_KERNEL);
73274 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73275 if (!sbinfo)
73276 return -ENOMEM;
73277
73278 diff --git a/mm/slab.c b/mm/slab.c
73279 index e901a36..ca479fc 100644
73280 --- a/mm/slab.c
73281 +++ b/mm/slab.c
73282 @@ -153,7 +153,7 @@
73283
73284 /* Legal flag mask for kmem_cache_create(). */
73285 #if DEBUG
73286 -# define CREATE_MASK (SLAB_RED_ZONE | \
73287 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73288 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73289 SLAB_CACHE_DMA | \
73290 SLAB_STORE_USER | \
73291 @@ -161,7 +161,7 @@
73292 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73293 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73294 #else
73295 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73296 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73297 SLAB_CACHE_DMA | \
73298 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73299 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73300 @@ -290,7 +290,7 @@ struct kmem_list3 {
73301 * Need this for bootstrapping a per node allocator.
73302 */
73303 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73304 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73305 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73306 #define CACHE_CACHE 0
73307 #define SIZE_AC MAX_NUMNODES
73308 #define SIZE_L3 (2 * MAX_NUMNODES)
73309 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73310 if ((x)->max_freeable < i) \
73311 (x)->max_freeable = i; \
73312 } while (0)
73313 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73314 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73315 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73316 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73317 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73318 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73319 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73320 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73321 #else
73322 #define STATS_INC_ACTIVE(x) do { } while (0)
73323 #define STATS_DEC_ACTIVE(x) do { } while (0)
73324 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73325 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73326 */
73327 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73328 - const struct slab *slab, void *obj)
73329 + const struct slab *slab, const void *obj)
73330 {
73331 u32 offset = (obj - slab->s_mem);
73332 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73333 @@ -563,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
73334 struct cache_names {
73335 char *name;
73336 char *name_dma;
73337 + char *name_usercopy;
73338 };
73339
73340 static struct cache_names __initdata cache_names[] = {
73341 -#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73342 +#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
73343 #include <linux/kmalloc_sizes.h>
73344 - {NULL,}
73345 + {NULL}
73346 #undef CACHE
73347 };
73348
73349 @@ -756,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
73350 if (unlikely(gfpflags & GFP_DMA))
73351 return csizep->cs_dmacachep;
73352 #endif
73353 +
73354 +#ifdef CONFIG_PAX_USERCOPY_SLABS
73355 + if (unlikely(gfpflags & GFP_USERCOPY))
73356 + return csizep->cs_usercopycachep;
73357 +#endif
73358 +
73359 return csizep->cs_cachep;
73360 }
73361
73362 @@ -1588,7 +1595,7 @@ void __init kmem_cache_init(void)
73363 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73364 sizes[INDEX_AC].cs_size,
73365 ARCH_KMALLOC_MINALIGN,
73366 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73367 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73368 NULL);
73369
73370 if (INDEX_AC != INDEX_L3) {
73371 @@ -1596,7 +1603,7 @@ void __init kmem_cache_init(void)
73372 kmem_cache_create(names[INDEX_L3].name,
73373 sizes[INDEX_L3].cs_size,
73374 ARCH_KMALLOC_MINALIGN,
73375 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73376 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73377 NULL);
73378 }
73379
73380 @@ -1614,7 +1621,7 @@ void __init kmem_cache_init(void)
73381 sizes->cs_cachep = kmem_cache_create(names->name,
73382 sizes->cs_size,
73383 ARCH_KMALLOC_MINALIGN,
73384 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73385 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73386 NULL);
73387 }
73388 #ifdef CONFIG_ZONE_DMA
73389 @@ -1626,6 +1633,16 @@ void __init kmem_cache_init(void)
73390 SLAB_PANIC,
73391 NULL);
73392 #endif
73393 +
73394 +#ifdef CONFIG_PAX_USERCOPY_SLABS
73395 + sizes->cs_usercopycachep = kmem_cache_create(
73396 + names->name_usercopy,
73397 + sizes->cs_size,
73398 + ARCH_KMALLOC_MINALIGN,
73399 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73400 + NULL);
73401 +#endif
73402 +
73403 sizes++;
73404 names++;
73405 }
73406 @@ -4390,10 +4407,10 @@ static int s_show(struct seq_file *m, void *p)
73407 }
73408 /* cpu stats */
73409 {
73410 - unsigned long allochit = atomic_read(&cachep->allochit);
73411 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73412 - unsigned long freehit = atomic_read(&cachep->freehit);
73413 - unsigned long freemiss = atomic_read(&cachep->freemiss);
73414 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73415 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73416 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73417 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73418
73419 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73420 allochit, allocmiss, freehit, freemiss);
73421 @@ -4652,13 +4669,68 @@ static int __init slab_proc_init(void)
73422 {
73423 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
73424 #ifdef CONFIG_DEBUG_SLAB_LEAK
73425 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73426 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
73427 #endif
73428 return 0;
73429 }
73430 module_init(slab_proc_init);
73431 #endif
73432
73433 +bool is_usercopy_object(const void *ptr)
73434 +{
73435 + struct page *page;
73436 + struct kmem_cache *cachep;
73437 +
73438 + if (ZERO_OR_NULL_PTR(ptr))
73439 + return false;
73440 +
73441 + if (!virt_addr_valid(ptr))
73442 + return false;
73443 +
73444 + page = virt_to_head_page(ptr);
73445 +
73446 + if (!PageSlab(page))
73447 + return false;
73448 +
73449 + cachep = page_get_cache(page);
73450 + return cachep->flags & SLAB_USERCOPY;
73451 +}
73452 +
73453 +#ifdef CONFIG_PAX_USERCOPY
73454 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73455 +{
73456 + struct page *page;
73457 + struct kmem_cache *cachep;
73458 + struct slab *slabp;
73459 + unsigned int objnr;
73460 + unsigned long offset;
73461 +
73462 + if (ZERO_OR_NULL_PTR(ptr))
73463 + return "<null>";
73464 +
73465 + if (!virt_addr_valid(ptr))
73466 + return NULL;
73467 +
73468 + page = virt_to_head_page(ptr);
73469 +
73470 + if (!PageSlab(page))
73471 + return NULL;
73472 +
73473 + cachep = page_get_cache(page);
73474 + if (!(cachep->flags & SLAB_USERCOPY))
73475 + return cachep->name;
73476 +
73477 + slabp = page_get_slab(page);
73478 + objnr = obj_to_index(cachep, slabp, ptr);
73479 + BUG_ON(objnr >= cachep->num);
73480 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73481 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73482 + return NULL;
73483 +
73484 + return cachep->name;
73485 +}
73486 +#endif
73487 +
73488 /**
73489 * ksize - get the actual amount of memory allocated for a given object
73490 * @objp: Pointer to the object
73491 diff --git a/mm/slob.c b/mm/slob.c
73492 index 8105be4..3c15e57 100644
73493 --- a/mm/slob.c
73494 +++ b/mm/slob.c
73495 @@ -29,7 +29,7 @@
73496 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73497 * alloc_pages() directly, allocating compound pages so the page order
73498 * does not have to be separately tracked, and also stores the exact
73499 - * allocation size in page->private so that it can be used to accurately
73500 + * allocation size in slob_page->size so that it can be used to accurately
73501 * provide ksize(). These objects are detected in kfree() because slob_page()
73502 * is false for them.
73503 *
73504 @@ -58,6 +58,7 @@
73505 */
73506
73507 #include <linux/kernel.h>
73508 +#include <linux/sched.h>
73509 #include <linux/slab.h>
73510 #include <linux/mm.h>
73511 #include <linux/swap.h> /* struct reclaim_state */
73512 @@ -102,7 +103,8 @@ struct slob_page {
73513 unsigned long flags; /* mandatory */
73514 atomic_t _count; /* mandatory */
73515 slobidx_t units; /* free units left in page */
73516 - unsigned long pad[2];
73517 + unsigned long pad[1];
73518 + unsigned long size; /* size when >=PAGE_SIZE */
73519 slob_t *free; /* first free slob_t in page */
73520 struct list_head list; /* linked list of free pages */
73521 };
73522 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73523 */
73524 static inline int is_slob_page(struct slob_page *sp)
73525 {
73526 - return PageSlab((struct page *)sp);
73527 + return PageSlab((struct page *)sp) && !sp->size;
73528 }
73529
73530 static inline void set_slob_page(struct slob_page *sp)
73531 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73532
73533 static inline struct slob_page *slob_page(const void *addr)
73534 {
73535 - return (struct slob_page *)virt_to_page(addr);
73536 + return (struct slob_page *)virt_to_head_page(addr);
73537 }
73538
73539 /*
73540 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73541 /*
73542 * Return the size of a slob block.
73543 */
73544 -static slobidx_t slob_units(slob_t *s)
73545 +static slobidx_t slob_units(const slob_t *s)
73546 {
73547 if (s->units > 0)
73548 return s->units;
73549 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73550 /*
73551 * Return the next free slob block pointer after this one.
73552 */
73553 -static slob_t *slob_next(slob_t *s)
73554 +static slob_t *slob_next(const slob_t *s)
73555 {
73556 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73557 slobidx_t next;
73558 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73559 /*
73560 * Returns true if s is the last free block in its page.
73561 */
73562 -static int slob_last(slob_t *s)
73563 +static int slob_last(const slob_t *s)
73564 {
73565 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73566 }
73567 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73568 if (!page)
73569 return NULL;
73570
73571 + set_slob_page(page);
73572 return page_address(page);
73573 }
73574
73575 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73576 if (!b)
73577 return NULL;
73578 sp = slob_page(b);
73579 - set_slob_page(sp);
73580
73581 spin_lock_irqsave(&slob_lock, flags);
73582 sp->units = SLOB_UNITS(PAGE_SIZE);
73583 sp->free = b;
73584 + sp->size = 0;
73585 INIT_LIST_HEAD(&sp->list);
73586 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73587 set_slob_page_free(sp, slob_list);
73588 @@ -476,10 +479,9 @@ out:
73589 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73590 */
73591
73592 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73593 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73594 {
73595 - unsigned int *m;
73596 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73597 + slob_t *m;
73598 void *ret;
73599
73600 gfp &= gfp_allowed_mask;
73601 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73602
73603 if (!m)
73604 return NULL;
73605 - *m = size;
73606 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73607 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73608 + m[0].units = size;
73609 + m[1].units = align;
73610 ret = (void *)m + align;
73611
73612 trace_kmalloc_node(_RET_IP_, ret,
73613 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73614 gfp |= __GFP_COMP;
73615 ret = slob_new_pages(gfp, order, node);
73616 if (ret) {
73617 - struct page *page;
73618 - page = virt_to_page(ret);
73619 - page->private = size;
73620 + struct slob_page *sp;
73621 + sp = slob_page(ret);
73622 + sp->size = size;
73623 }
73624
73625 trace_kmalloc_node(_RET_IP_, ret,
73626 size, PAGE_SIZE << order, gfp, node);
73627 }
73628
73629 - kmemleak_alloc(ret, size, 1, gfp);
73630 + return ret;
73631 +}
73632 +
73633 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73634 +{
73635 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73636 + void *ret = __kmalloc_node_align(size, gfp, node, align);
73637 +
73638 + if (!ZERO_OR_NULL_PTR(ret))
73639 + kmemleak_alloc(ret, size, 1, gfp);
73640 return ret;
73641 }
73642 EXPORT_SYMBOL(__kmalloc_node);
73643 @@ -533,13 +547,83 @@ void kfree(const void *block)
73644 sp = slob_page(block);
73645 if (is_slob_page(sp)) {
73646 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73647 - unsigned int *m = (unsigned int *)(block - align);
73648 - slob_free(m, *m + align);
73649 - } else
73650 + slob_t *m = (slob_t *)(block - align);
73651 + slob_free(m, m[0].units + align);
73652 + } else {
73653 + clear_slob_page(sp);
73654 + free_slob_page(sp);
73655 + sp->size = 0;
73656 put_page(&sp->page);
73657 + }
73658 }
73659 EXPORT_SYMBOL(kfree);
73660
73661 +bool is_usercopy_object(const void *ptr)
73662 +{
73663 + return false;
73664 +}
73665 +
73666 +#ifdef CONFIG_PAX_USERCOPY
73667 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73668 +{
73669 + struct slob_page *sp;
73670 + const slob_t *free;
73671 + const void *base;
73672 + unsigned long flags;
73673 +
73674 + if (ZERO_OR_NULL_PTR(ptr))
73675 + return "<null>";
73676 +
73677 + if (!virt_addr_valid(ptr))
73678 + return NULL;
73679 +
73680 + sp = slob_page(ptr);
73681 + if (!PageSlab((struct page *)sp))
73682 + return NULL;
73683 +
73684 + if (sp->size) {
73685 + base = page_address(&sp->page);
73686 + if (base <= ptr && n <= sp->size - (ptr - base))
73687 + return NULL;
73688 + return "<slob>";
73689 + }
73690 +
73691 + /* some tricky double walking to find the chunk */
73692 + spin_lock_irqsave(&slob_lock, flags);
73693 + base = (void *)((unsigned long)ptr & PAGE_MASK);
73694 + free = sp->free;
73695 +
73696 + while (!slob_last(free) && (void *)free <= ptr) {
73697 + base = free + slob_units(free);
73698 + free = slob_next(free);
73699 + }
73700 +
73701 + while (base < (void *)free) {
73702 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73703 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
73704 + int offset;
73705 +
73706 + if (ptr < base + align)
73707 + break;
73708 +
73709 + offset = ptr - base - align;
73710 + if (offset >= m) {
73711 + base += size;
73712 + continue;
73713 + }
73714 +
73715 + if (n > m - offset)
73716 + break;
73717 +
73718 + spin_unlock_irqrestore(&slob_lock, flags);
73719 + return NULL;
73720 + }
73721 +
73722 + spin_unlock_irqrestore(&slob_lock, flags);
73723 + return "<slob>";
73724 +}
73725 +#endif
73726 +
73727 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73728 size_t ksize(const void *block)
73729 {
73730 @@ -552,10 +636,10 @@ size_t ksize(const void *block)
73731 sp = slob_page(block);
73732 if (is_slob_page(sp)) {
73733 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73734 - unsigned int *m = (unsigned int *)(block - align);
73735 - return SLOB_UNITS(*m) * SLOB_UNIT;
73736 + slob_t *m = (slob_t *)(block - align);
73737 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73738 } else
73739 - return sp->page.private;
73740 + return sp->size;
73741 }
73742 EXPORT_SYMBOL(ksize);
73743
73744 @@ -571,8 +655,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73745 {
73746 struct kmem_cache *c;
73747
73748 +#ifdef CONFIG_PAX_USERCOPY_SLABS
73749 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
73750 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73751 +#else
73752 c = slob_alloc(sizeof(struct kmem_cache),
73753 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73754 +#endif
73755
73756 if (c) {
73757 c->name = name;
73758 @@ -614,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73759
73760 lockdep_trace_alloc(flags);
73761
73762 +#ifdef CONFIG_PAX_USERCOPY_SLABS
73763 + b = __kmalloc_node_align(c->size, flags, node, c->align);
73764 +#else
73765 if (c->size < PAGE_SIZE) {
73766 b = slob_alloc(c->size, flags, c->align, node);
73767 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73768 SLOB_UNITS(c->size) * SLOB_UNIT,
73769 flags, node);
73770 } else {
73771 + struct slob_page *sp;
73772 +
73773 b = slob_new_pages(flags, get_order(c->size), node);
73774 + sp = slob_page(b);
73775 + sp->size = c->size;
73776 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73777 PAGE_SIZE << get_order(c->size),
73778 flags, node);
73779 }
73780 +#endif
73781
73782 if (c->ctor)
73783 c->ctor(b);
73784 @@ -636,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73785
73786 static void __kmem_cache_free(void *b, int size)
73787 {
73788 - if (size < PAGE_SIZE)
73789 + struct slob_page *sp = slob_page(b);
73790 +
73791 + if (is_slob_page(sp))
73792 slob_free(b, size);
73793 - else
73794 + else {
73795 + clear_slob_page(sp);
73796 + free_slob_page(sp);
73797 + sp->size = 0;
73798 slob_free_pages(b, get_order(size));
73799 + }
73800 }
73801
73802 static void kmem_rcu_free(struct rcu_head *head)
73803 @@ -652,17 +755,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73804
73805 void kmem_cache_free(struct kmem_cache *c, void *b)
73806 {
73807 + int size = c->size;
73808 +
73809 +#ifdef CONFIG_PAX_USERCOPY_SLABS
73810 + if (size + c->align < PAGE_SIZE) {
73811 + size += c->align;
73812 + b -= c->align;
73813 + }
73814 +#endif
73815 +
73816 kmemleak_free_recursive(b, c->flags);
73817 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73818 struct slob_rcu *slob_rcu;
73819 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73820 - slob_rcu->size = c->size;
73821 + slob_rcu = b + (size - sizeof(struct slob_rcu));
73822 + slob_rcu->size = size;
73823 call_rcu(&slob_rcu->head, kmem_rcu_free);
73824 } else {
73825 - __kmem_cache_free(b, c->size);
73826 + __kmem_cache_free(b, size);
73827 }
73828
73829 +#ifdef CONFIG_PAX_USERCOPY_SLABS
73830 + trace_kfree(_RET_IP_, b);
73831 +#else
73832 trace_kmem_cache_free(_RET_IP_, b);
73833 +#endif
73834 +
73835 }
73836 EXPORT_SYMBOL(kmem_cache_free);
73837
73838 diff --git a/mm/slub.c b/mm/slub.c
73839 index 71de9b5..a93d4a4 100644
73840 --- a/mm/slub.c
73841 +++ b/mm/slub.c
73842 @@ -209,7 +209,7 @@ struct track {
73843
73844 enum track_item { TRACK_ALLOC, TRACK_FREE };
73845
73846 -#ifdef CONFIG_SYSFS
73847 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73848 static int sysfs_slab_add(struct kmem_cache *);
73849 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73850 static void sysfs_slab_remove(struct kmem_cache *);
73851 @@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
73852 if (!t->addr)
73853 return;
73854
73855 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73856 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73857 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73858 #ifdef CONFIG_STACKTRACE
73859 {
73860 @@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73861
73862 page = virt_to_head_page(x);
73863
73864 + BUG_ON(!PageSlab(page));
73865 +
73866 slab_free(s, page, x, _RET_IP_);
73867
73868 trace_kmem_cache_free(_RET_IP_, x);
73869 @@ -2636,7 +2638,7 @@ static int slub_min_objects;
73870 * Merge control. If this is set then no merging of slab caches will occur.
73871 * (Could be removed. This was introduced to pacify the merge skeptics.)
73872 */
73873 -static int slub_nomerge;
73874 +static int slub_nomerge = 1;
73875
73876 /*
73877 * Calculate the order of allocation given an slab object size.
73878 @@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73879 else
73880 s->cpu_partial = 30;
73881
73882 - s->refcount = 1;
73883 + atomic_set(&s->refcount, 1);
73884 #ifdef CONFIG_NUMA
73885 s->remote_node_defrag_ratio = 1000;
73886 #endif
73887 @@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73888 void kmem_cache_destroy(struct kmem_cache *s)
73889 {
73890 down_write(&slub_lock);
73891 - s->refcount--;
73892 - if (!s->refcount) {
73893 + if (atomic_dec_and_test(&s->refcount)) {
73894 list_del(&s->list);
73895 up_write(&slub_lock);
73896 if (kmem_cache_close(s)) {
73897 @@ -3223,6 +3224,10 @@ static struct kmem_cache *kmem_cache;
73898 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
73899 #endif
73900
73901 +#ifdef CONFIG_PAX_USERCOPY_SLABS
73902 +static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
73903 +#endif
73904 +
73905 static int __init setup_slub_min_order(char *str)
73906 {
73907 get_option(&str, &slub_min_order);
73908 @@ -3337,6 +3342,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
73909 return kmalloc_dma_caches[index];
73910
73911 #endif
73912 +
73913 +#ifdef CONFIG_PAX_USERCOPY_SLABS
73914 + if (flags & SLAB_USERCOPY)
73915 + return kmalloc_usercopy_caches[index];
73916 +
73917 +#endif
73918 +
73919 return kmalloc_caches[index];
73920 }
73921
73922 @@ -3405,6 +3417,56 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73923 EXPORT_SYMBOL(__kmalloc_node);
73924 #endif
73925
73926 +bool is_usercopy_object(const void *ptr)
73927 +{
73928 + struct page *page;
73929 + struct kmem_cache *s;
73930 +
73931 + if (ZERO_OR_NULL_PTR(ptr))
73932 + return false;
73933 +
73934 + if (!virt_addr_valid(ptr))
73935 + return false;
73936 +
73937 + page = virt_to_head_page(ptr);
73938 +
73939 + if (!PageSlab(page))
73940 + return false;
73941 +
73942 + s = page->slab;
73943 + return s->flags & SLAB_USERCOPY;
73944 +}
73945 +
73946 +#ifdef CONFIG_PAX_USERCOPY
73947 +const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73948 +{
73949 + struct page *page;
73950 + struct kmem_cache *s;
73951 + unsigned long offset;
73952 +
73953 + if (ZERO_OR_NULL_PTR(ptr))
73954 + return "<null>";
73955 +
73956 + if (!virt_addr_valid(ptr))
73957 + return NULL;
73958 +
73959 + page = virt_to_head_page(ptr);
73960 +
73961 + if (!PageSlab(page))
73962 + return NULL;
73963 +
73964 + s = page->slab;
73965 + if (!(s->flags & SLAB_USERCOPY))
73966 + return s->name;
73967 +
73968 + offset = (ptr - page_address(page)) % s->size;
73969 + if (offset <= s->objsize && n <= s->objsize - offset)
73970 + return NULL;
73971 +
73972 + return s->name;
73973 +}
73974 +#endif
73975 +
73976 size_t ksize(const void *object)
73977 {
73978 struct page *page;
73979 @@ -3679,7 +3741,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73980 int node;
73981
73982 list_add(&s->list, &slab_caches);
73983 - s->refcount = -1;
73984 + atomic_set(&s->refcount, -1);
73985
73986 for_each_node_state(node, N_NORMAL_MEMORY) {
73987 struct kmem_cache_node *n = get_node(s, node);
73988 @@ -3799,17 +3861,17 @@ void __init kmem_cache_init(void)
73989
73990 /* Caches that are not of the two-to-the-power-of size */
73991 if (KMALLOC_MIN_SIZE <= 32) {
73992 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73993 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73994 caches++;
73995 }
73996
73997 if (KMALLOC_MIN_SIZE <= 64) {
73998 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73999 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
74000 caches++;
74001 }
74002
74003 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
74004 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
74005 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
74006 caches++;
74007 }
74008
74009 @@ -3851,6 +3913,22 @@ void __init kmem_cache_init(void)
74010 }
74011 }
74012 #endif
74013 +
74014 +#ifdef CONFIG_PAX_USERCOPY_SLABS
74015 + for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
74016 + struct kmem_cache *s = kmalloc_caches[i];
74017 +
74018 + if (s && s->size) {
74019 + char *name = kasprintf(GFP_NOWAIT,
74020 + "usercopy-kmalloc-%d", s->objsize);
74021 +
74022 + BUG_ON(!name);
74023 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
74024 + s->objsize, SLAB_USERCOPY);
74025 + }
74026 + }
74027 +#endif
74028 +
74029 printk(KERN_INFO
74030 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
74031 " CPUs=%d, Nodes=%d\n",
74032 @@ -3877,7 +3955,7 @@ static int slab_unmergeable(struct kmem_cache *s)
74033 /*
74034 * We may have set a slab to be unmergeable during bootstrap.
74035 */
74036 - if (s->refcount < 0)
74037 + if (atomic_read(&s->refcount) < 0)
74038 return 1;
74039
74040 return 0;
74041 @@ -3936,7 +4014,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74042 down_write(&slub_lock);
74043 s = find_mergeable(size, align, flags, name, ctor);
74044 if (s) {
74045 - s->refcount++;
74046 + atomic_inc(&s->refcount);
74047 /*
74048 * Adjust the object sizes so that we clear
74049 * the complete object on kzalloc.
74050 @@ -3945,7 +4023,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74051 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
74052
74053 if (sysfs_slab_alias(s, name)) {
74054 - s->refcount--;
74055 + atomic_dec(&s->refcount);
74056 goto err;
74057 }
74058 up_write(&slub_lock);
74059 @@ -4074,7 +4152,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
74060 }
74061 #endif
74062
74063 -#ifdef CONFIG_SYSFS
74064 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74065 static int count_inuse(struct page *page)
74066 {
74067 return page->inuse;
74068 @@ -4461,12 +4539,12 @@ static void resiliency_test(void)
74069 validate_slab_cache(kmalloc_caches[9]);
74070 }
74071 #else
74072 -#ifdef CONFIG_SYSFS
74073 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74074 static void resiliency_test(void) {};
74075 #endif
74076 #endif
74077
74078 -#ifdef CONFIG_SYSFS
74079 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74080 enum slab_stat_type {
74081 SL_ALL, /* All slabs */
74082 SL_PARTIAL, /* Only partially allocated slabs */
74083 @@ -4709,7 +4787,7 @@ SLAB_ATTR_RO(ctor);
74084
74085 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
74086 {
74087 - return sprintf(buf, "%d\n", s->refcount - 1);
74088 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
74089 }
74090 SLAB_ATTR_RO(aliases);
74091
74092 @@ -5280,6 +5358,7 @@ static char *create_unique_id(struct kmem_cache *s)
74093 return name;
74094 }
74095
74096 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74097 static int sysfs_slab_add(struct kmem_cache *s)
74098 {
74099 int err;
74100 @@ -5342,6 +5421,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
74101 kobject_del(&s->kobj);
74102 kobject_put(&s->kobj);
74103 }
74104 +#endif
74105
74106 /*
74107 * Need to buffer aliases during bootup until sysfs becomes
74108 @@ -5355,6 +5435,7 @@ struct saved_alias {
74109
74110 static struct saved_alias *alias_list;
74111
74112 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74113 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74114 {
74115 struct saved_alias *al;
74116 @@ -5377,6 +5458,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74117 alias_list = al;
74118 return 0;
74119 }
74120 +#endif
74121
74122 static int __init slab_sysfs_init(void)
74123 {
74124 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
74125 index 1b7e22a..3fcd4f3 100644
74126 --- a/mm/sparse-vmemmap.c
74127 +++ b/mm/sparse-vmemmap.c
74128 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
74129 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74130 if (!p)
74131 return NULL;
74132 - pud_populate(&init_mm, pud, p);
74133 + pud_populate_kernel(&init_mm, pud, p);
74134 }
74135 return pud;
74136 }
74137 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
74138 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74139 if (!p)
74140 return NULL;
74141 - pgd_populate(&init_mm, pgd, p);
74142 + pgd_populate_kernel(&init_mm, pgd, p);
74143 }
74144 return pgd;
74145 }
74146 diff --git a/mm/swap.c b/mm/swap.c
74147 index 5c13f13..f1cfc13 100644
74148 --- a/mm/swap.c
74149 +++ b/mm/swap.c
74150 @@ -30,6 +30,7 @@
74151 #include <linux/backing-dev.h>
74152 #include <linux/memcontrol.h>
74153 #include <linux/gfp.h>
74154 +#include <linux/hugetlb.h>
74155
74156 #include "internal.h"
74157
74158 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
74159
74160 __page_cache_release(page);
74161 dtor = get_compound_page_dtor(page);
74162 + if (!PageHuge(page))
74163 + BUG_ON(dtor != free_compound_page);
74164 (*dtor)(page);
74165 }
74166
74167 diff --git a/mm/swapfile.c b/mm/swapfile.c
74168 index 38186d9..bfba6d3 100644
74169 --- a/mm/swapfile.c
74170 +++ b/mm/swapfile.c
74171 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
74172
74173 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74174 /* Activity counter to indicate that a swapon or swapoff has occurred */
74175 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
74176 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74177
74178 static inline unsigned char swap_count(unsigned char ent)
74179 {
74180 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
74181 }
74182 filp_close(swap_file, NULL);
74183 err = 0;
74184 - atomic_inc(&proc_poll_event);
74185 + atomic_inc_unchecked(&proc_poll_event);
74186 wake_up_interruptible(&proc_poll_wait);
74187
74188 out_dput:
74189 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
74190
74191 poll_wait(file, &proc_poll_wait, wait);
74192
74193 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
74194 - seq->poll_event = atomic_read(&proc_poll_event);
74195 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
74196 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74197 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
74198 }
74199
74200 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
74201 return ret;
74202
74203 seq = file->private_data;
74204 - seq->poll_event = atomic_read(&proc_poll_event);
74205 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74206 return 0;
74207 }
74208
74209 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
74210 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74211
74212 mutex_unlock(&swapon_mutex);
74213 - atomic_inc(&proc_poll_event);
74214 + atomic_inc_unchecked(&proc_poll_event);
74215 wake_up_interruptible(&proc_poll_wait);
74216
74217 if (S_ISREG(inode->i_mode))
74218 diff --git a/mm/util.c b/mm/util.c
74219 index ae962b3..0bba886 100644
74220 --- a/mm/util.c
74221 +++ b/mm/util.c
74222 @@ -284,6 +284,12 @@ done:
74223 void arch_pick_mmap_layout(struct mm_struct *mm)
74224 {
74225 mm->mmap_base = TASK_UNMAPPED_BASE;
74226 +
74227 +#ifdef CONFIG_PAX_RANDMMAP
74228 + if (mm->pax_flags & MF_PAX_RANDMMAP)
74229 + mm->mmap_base += mm->delta_mmap;
74230 +#endif
74231 +
74232 mm->get_unmapped_area = arch_get_unmapped_area;
74233 mm->unmap_area = arch_unmap_area;
74234 }
74235 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74236 index 1196c77..2e608e8 100644
74237 --- a/mm/vmalloc.c
74238 +++ b/mm/vmalloc.c
74239 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74240
74241 pte = pte_offset_kernel(pmd, addr);
74242 do {
74243 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74244 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74245 +
74246 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74247 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74248 + BUG_ON(!pte_exec(*pte));
74249 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74250 + continue;
74251 + }
74252 +#endif
74253 +
74254 + {
74255 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74256 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74257 + }
74258 } while (pte++, addr += PAGE_SIZE, addr != end);
74259 }
74260
74261 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74262 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74263 {
74264 pte_t *pte;
74265 + int ret = -ENOMEM;
74266
74267 /*
74268 * nr is a running index into the array which helps higher level
74269 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74270 pte = pte_alloc_kernel(pmd, addr);
74271 if (!pte)
74272 return -ENOMEM;
74273 +
74274 + pax_open_kernel();
74275 do {
74276 struct page *page = pages[*nr];
74277
74278 - if (WARN_ON(!pte_none(*pte)))
74279 - return -EBUSY;
74280 - if (WARN_ON(!page))
74281 - return -ENOMEM;
74282 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74283 + if (pgprot_val(prot) & _PAGE_NX)
74284 +#endif
74285 +
74286 + if (WARN_ON(!pte_none(*pte))) {
74287 + ret = -EBUSY;
74288 + goto out;
74289 + }
74290 + if (WARN_ON(!page)) {
74291 + ret = -ENOMEM;
74292 + goto out;
74293 + }
74294 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74295 (*nr)++;
74296 } while (pte++, addr += PAGE_SIZE, addr != end);
74297 - return 0;
74298 + ret = 0;
74299 +out:
74300 + pax_close_kernel();
74301 + return ret;
74302 }
74303
74304 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74305 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74306 pmd_t *pmd;
74307 unsigned long next;
74308
74309 - pmd = pmd_alloc(&init_mm, pud, addr);
74310 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
74311 if (!pmd)
74312 return -ENOMEM;
74313 do {
74314 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
74315 pud_t *pud;
74316 unsigned long next;
74317
74318 - pud = pud_alloc(&init_mm, pgd, addr);
74319 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
74320 if (!pud)
74321 return -ENOMEM;
74322 do {
74323 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74324 * and fall back on vmalloc() if that fails. Others
74325 * just put it in the vmalloc space.
74326 */
74327 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74328 +#ifdef CONFIG_MODULES
74329 +#ifdef MODULES_VADDR
74330 unsigned long addr = (unsigned long)x;
74331 if (addr >= MODULES_VADDR && addr < MODULES_END)
74332 return 1;
74333 #endif
74334 +
74335 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74336 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74337 + return 1;
74338 +#endif
74339 +
74340 +#endif
74341 +
74342 return is_vmalloc_addr(x);
74343 }
74344
74345 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74346
74347 if (!pgd_none(*pgd)) {
74348 pud_t *pud = pud_offset(pgd, addr);
74349 +#ifdef CONFIG_X86
74350 + if (!pud_large(*pud))
74351 +#endif
74352 if (!pud_none(*pud)) {
74353 pmd_t *pmd = pmd_offset(pud, addr);
74354 +#ifdef CONFIG_X86
74355 + if (!pmd_large(*pmd))
74356 +#endif
74357 if (!pmd_none(*pmd)) {
74358 pte_t *ptep, pte;
74359
74360 @@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
74361 static struct vmap_area *alloc_vmap_area(unsigned long size,
74362 unsigned long align,
74363 unsigned long vstart, unsigned long vend,
74364 + int node, gfp_t gfp_mask) __size_overflow(1);
74365 +static struct vmap_area *alloc_vmap_area(unsigned long size,
74366 + unsigned long align,
74367 + unsigned long vstart, unsigned long vend,
74368 int node, gfp_t gfp_mask)
74369 {
74370 struct vmap_area *va;
74371 @@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74372 struct vm_struct *area;
74373
74374 BUG_ON(in_interrupt());
74375 +
74376 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74377 + if (flags & VM_KERNEXEC) {
74378 + if (start != VMALLOC_START || end != VMALLOC_END)
74379 + return NULL;
74380 + start = (unsigned long)MODULES_EXEC_VADDR;
74381 + end = (unsigned long)MODULES_EXEC_END;
74382 + }
74383 +#endif
74384 +
74385 if (flags & VM_IOREMAP) {
74386 int bit = fls(size);
74387
74388 @@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
74389 if (count > totalram_pages)
74390 return NULL;
74391
74392 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74393 + if (!(pgprot_val(prot) & _PAGE_NX))
74394 + flags |= VM_KERNEXEC;
74395 +#endif
74396 +
74397 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74398 __builtin_return_address(0));
74399 if (!area)
74400 @@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74401 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74402 goto fail;
74403
74404 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74405 + if (!(pgprot_val(prot) & _PAGE_NX))
74406 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74407 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74408 + else
74409 +#endif
74410 +
74411 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74412 start, end, node, gfp_mask, caller);
74413 if (!area)
74414 @@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
74415 * For tight control over page level allocator and protection flags
74416 * use __vmalloc() instead.
74417 */
74418 -
74419 void *vmalloc_exec(unsigned long size)
74420 {
74421 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74422 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74423 -1, __builtin_return_address(0));
74424 }
74425
74426 @@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74427 unsigned long uaddr = vma->vm_start;
74428 unsigned long usize = vma->vm_end - vma->vm_start;
74429
74430 + BUG_ON(vma->vm_mirror);
74431 +
74432 if ((PAGE_SIZE-1) & (unsigned long)addr)
74433 return -EINVAL;
74434
74435 @@ -2376,8 +2443,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
74436 return NULL;
74437 }
74438
74439 - vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
74440 - vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
74441 + vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
74442 + vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
74443 if (!vas || !vms)
74444 goto err_free2;
74445
74446 diff --git a/mm/vmscan.c b/mm/vmscan.c
74447 index 4607cc6..be5bc0a 100644
74448 --- a/mm/vmscan.c
74449 +++ b/mm/vmscan.c
74450 @@ -3013,7 +3013,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
74451 * them before going back to sleep.
74452 */
74453 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
74454 - schedule();
74455 +
74456 + if (!kthread_should_stop())
74457 + schedule();
74458 +
74459 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
74460 } else {
74461 if (remaining)
74462 diff --git a/mm/vmstat.c b/mm/vmstat.c
74463 index 7db1b9b..e9f6b07 100644
74464 --- a/mm/vmstat.c
74465 +++ b/mm/vmstat.c
74466 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74467 *
74468 * vm_stat contains the global counters
74469 */
74470 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74471 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74472 EXPORT_SYMBOL(vm_stat);
74473
74474 #ifdef CONFIG_SMP
74475 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74476 v = p->vm_stat_diff[i];
74477 p->vm_stat_diff[i] = 0;
74478 local_irq_restore(flags);
74479 - atomic_long_add(v, &zone->vm_stat[i]);
74480 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74481 global_diff[i] += v;
74482 #ifdef CONFIG_NUMA
74483 /* 3 seconds idle till flush */
74484 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74485
74486 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74487 if (global_diff[i])
74488 - atomic_long_add(global_diff[i], &vm_stat[i]);
74489 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74490 }
74491
74492 #endif
74493 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
74494 start_cpu_timer(cpu);
74495 #endif
74496 #ifdef CONFIG_PROC_FS
74497 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74498 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74499 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74500 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74501 + {
74502 + mode_t gr_mode = S_IRUGO;
74503 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74504 + gr_mode = S_IRUSR;
74505 +#endif
74506 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74507 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74508 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74509 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74510 +#else
74511 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74512 +#endif
74513 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74514 + }
74515 #endif
74516 return 0;
74517 }
74518 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74519 index efea35b..9c8dd0b 100644
74520 --- a/net/8021q/vlan.c
74521 +++ b/net/8021q/vlan.c
74522 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74523 err = -EPERM;
74524 if (!capable(CAP_NET_ADMIN))
74525 break;
74526 - if ((args.u.name_type >= 0) &&
74527 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74528 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74529 struct vlan_net *vn;
74530
74531 vn = net_generic(net, vlan_net_id);
74532 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74533 index fccae26..e7ece2f 100644
74534 --- a/net/9p/trans_fd.c
74535 +++ b/net/9p/trans_fd.c
74536 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74537 oldfs = get_fs();
74538 set_fs(get_ds());
74539 /* The cast to a user pointer is valid due to the set_fs() */
74540 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74541 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74542 set_fs(oldfs);
74543
74544 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74545 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74546 index 876fbe8..8bbea9f 100644
74547 --- a/net/atm/atm_misc.c
74548 +++ b/net/atm/atm_misc.c
74549 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74550 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74551 return 1;
74552 atm_return(vcc, truesize);
74553 - atomic_inc(&vcc->stats->rx_drop);
74554 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74555 return 0;
74556 }
74557 EXPORT_SYMBOL(atm_charge);
74558 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74559 }
74560 }
74561 atm_return(vcc, guess);
74562 - atomic_inc(&vcc->stats->rx_drop);
74563 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74564 return NULL;
74565 }
74566 EXPORT_SYMBOL(atm_alloc_charge);
74567 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74568
74569 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74570 {
74571 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74572 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74573 __SONET_ITEMS
74574 #undef __HANDLE_ITEM
74575 }
74576 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74577
74578 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74579 {
74580 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74581 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74582 __SONET_ITEMS
74583 #undef __HANDLE_ITEM
74584 }
74585 diff --git a/net/atm/lec.h b/net/atm/lec.h
74586 index dfc0719..47c5322 100644
74587 --- a/net/atm/lec.h
74588 +++ b/net/atm/lec.h
74589 @@ -48,7 +48,7 @@ struct lane2_ops {
74590 const u8 *tlvs, u32 sizeoftlvs);
74591 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74592 const u8 *tlvs, u32 sizeoftlvs);
74593 -};
74594 +} __no_const;
74595
74596 /*
74597 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74598 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74599 index 0919a88..a23d54e 100644
74600 --- a/net/atm/mpc.h
74601 +++ b/net/atm/mpc.h
74602 @@ -33,7 +33,7 @@ struct mpoa_client {
74603 struct mpc_parameters parameters; /* parameters for this client */
74604
74605 const struct net_device_ops *old_ops;
74606 - struct net_device_ops new_ops;
74607 + net_device_ops_no_const new_ops;
74608 };
74609
74610
74611 diff --git a/net/atm/proc.c b/net/atm/proc.c
74612 index 0d020de..011c7bb 100644
74613 --- a/net/atm/proc.c
74614 +++ b/net/atm/proc.c
74615 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74616 const struct k_atm_aal_stats *stats)
74617 {
74618 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74619 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74620 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74621 - atomic_read(&stats->rx_drop));
74622 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74623 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74624 + atomic_read_unchecked(&stats->rx_drop));
74625 }
74626
74627 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74628 diff --git a/net/atm/resources.c b/net/atm/resources.c
74629 index 23f45ce..c748f1a 100644
74630 --- a/net/atm/resources.c
74631 +++ b/net/atm/resources.c
74632 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74633 static void copy_aal_stats(struct k_atm_aal_stats *from,
74634 struct atm_aal_stats *to)
74635 {
74636 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74637 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74638 __AAL_STAT_ITEMS
74639 #undef __HANDLE_ITEM
74640 }
74641 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74642 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74643 struct atm_aal_stats *to)
74644 {
74645 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74646 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74647 __AAL_STAT_ITEMS
74648 #undef __HANDLE_ITEM
74649 }
74650 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
74651 index a6d5d63..1cc6c2b 100644
74652 --- a/net/batman-adv/bat_iv_ogm.c
74653 +++ b/net/batman-adv/bat_iv_ogm.c
74654 @@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
74655
74656 /* change sequence number to network order */
74657 batman_ogm_packet->seqno =
74658 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
74659 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74660
74661 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
74662 batman_ogm_packet->tt_crc = htons((uint16_t)
74663 @@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
74664 else
74665 batman_ogm_packet->gw_flags = NO_FLAGS;
74666
74667 - atomic_inc(&hard_iface->seqno);
74668 + atomic_inc_unchecked(&hard_iface->seqno);
74669
74670 slide_own_bcast_window(hard_iface);
74671 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
74672 @@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
74673 return;
74674
74675 /* could be changed by schedule_own_packet() */
74676 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
74677 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74678
74679 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
74680
74681 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74682 index 3778977..f6a9450 100644
74683 --- a/net/batman-adv/hard-interface.c
74684 +++ b/net/batman-adv/hard-interface.c
74685 @@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74686 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74687 dev_add_pack(&hard_iface->batman_adv_ptype);
74688
74689 - atomic_set(&hard_iface->seqno, 1);
74690 - atomic_set(&hard_iface->frag_seqno, 1);
74691 + atomic_set_unchecked(&hard_iface->seqno, 1);
74692 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74693 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74694 hard_iface->net_dev->name);
74695
74696 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74697 index a5590f4..8d31969 100644
74698 --- a/net/batman-adv/soft-interface.c
74699 +++ b/net/batman-adv/soft-interface.c
74700 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74701
74702 /* set broadcast sequence number */
74703 bcast_packet->seqno =
74704 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74705 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74706
74707 add_bcast_packet_to_list(bat_priv, skb, 1);
74708
74709 @@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
74710 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74711
74712 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74713 - atomic_set(&bat_priv->bcast_seqno, 1);
74714 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74715 atomic_set(&bat_priv->ttvn, 0);
74716 atomic_set(&bat_priv->tt_local_changes, 0);
74717 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74718 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74719 index 302efb5..1590365 100644
74720 --- a/net/batman-adv/types.h
74721 +++ b/net/batman-adv/types.h
74722 @@ -38,8 +38,8 @@ struct hard_iface {
74723 int16_t if_num;
74724 char if_status;
74725 struct net_device *net_dev;
74726 - atomic_t seqno;
74727 - atomic_t frag_seqno;
74728 + atomic_unchecked_t seqno;
74729 + atomic_unchecked_t frag_seqno;
74730 unsigned char *packet_buff;
74731 int packet_len;
74732 struct kobject *hardif_obj;
74733 @@ -155,7 +155,7 @@ struct bat_priv {
74734 atomic_t orig_interval; /* uint */
74735 atomic_t hop_penalty; /* uint */
74736 atomic_t log_level; /* uint */
74737 - atomic_t bcast_seqno;
74738 + atomic_unchecked_t bcast_seqno;
74739 atomic_t bcast_queue_left;
74740 atomic_t batman_queue_left;
74741 atomic_t ttvn; /* translation table version number */
74742 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74743 index 676f6a6..3b4e668 100644
74744 --- a/net/batman-adv/unicast.c
74745 +++ b/net/batman-adv/unicast.c
74746 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74747 frag1->flags = UNI_FRAG_HEAD | large_tail;
74748 frag2->flags = large_tail;
74749
74750 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74751 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74752 frag1->seqno = htons(seqno - 1);
74753 frag2->seqno = htons(seqno);
74754
74755 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74756 index 5238b6b..c9798ce 100644
74757 --- a/net/bluetooth/hci_conn.c
74758 +++ b/net/bluetooth/hci_conn.c
74759 @@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74760 memset(&cp, 0, sizeof(cp));
74761
74762 cp.handle = cpu_to_le16(conn->handle);
74763 - memcpy(cp.ltk, ltk, sizeof(ltk));
74764 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74765
74766 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74767 }
74768 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
74769 index 6f9c25b..d19fd66 100644
74770 --- a/net/bluetooth/l2cap_core.c
74771 +++ b/net/bluetooth/l2cap_core.c
74772 @@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
74773 break;
74774
74775 case L2CAP_CONF_RFC:
74776 - if (olen == sizeof(rfc))
74777 - memcpy(&rfc, (void *)val, olen);
74778 + if (olen != sizeof(rfc))
74779 + break;
74780 +
74781 + memcpy(&rfc, (void *)val, olen);
74782
74783 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
74784 rfc.mode != chan->mode)
74785 @@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
74786
74787 switch (type) {
74788 case L2CAP_CONF_RFC:
74789 - if (olen == sizeof(rfc))
74790 - memcpy(&rfc, (void *)val, olen);
74791 + if (olen != sizeof(rfc))
74792 + break;
74793 +
74794 + memcpy(&rfc, (void *)val, olen);
74795 goto done;
74796 }
74797 }
74798 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74799 index 5fe2ff3..10968b5 100644
74800 --- a/net/bridge/netfilter/ebtables.c
74801 +++ b/net/bridge/netfilter/ebtables.c
74802 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74803 tmp.valid_hooks = t->table->valid_hooks;
74804 }
74805 mutex_unlock(&ebt_mutex);
74806 - if (copy_to_user(user, &tmp, *len) != 0){
74807 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74808 BUGPRINT("c2u Didn't work\n");
74809 ret = -EFAULT;
74810 break;
74811 diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
74812 index aa6f716..7bf4c21 100644
74813 --- a/net/caif/caif_dev.c
74814 +++ b/net/caif/caif_dev.c
74815 @@ -562,9 +562,9 @@ static int __init caif_device_init(void)
74816
74817 static void __exit caif_device_exit(void)
74818 {
74819 - unregister_pernet_subsys(&caif_net_ops);
74820 unregister_netdevice_notifier(&caif_device_notifier);
74821 dev_remove_pack(&caif_packet_type);
74822 + unregister_pernet_subsys(&caif_net_ops);
74823 }
74824
74825 module_init(caif_device_init);
74826 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74827 index 5cf5222..6f704ad 100644
74828 --- a/net/caif/cfctrl.c
74829 +++ b/net/caif/cfctrl.c
74830 @@ -9,6 +9,7 @@
74831 #include <linux/stddef.h>
74832 #include <linux/spinlock.h>
74833 #include <linux/slab.h>
74834 +#include <linux/sched.h>
74835 #include <net/caif/caif_layer.h>
74836 #include <net/caif/cfpkt.h>
74837 #include <net/caif/cfctrl.h>
74838 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74839 memset(&dev_info, 0, sizeof(dev_info));
74840 dev_info.id = 0xff;
74841 cfsrvl_init(&this->serv, 0, &dev_info, false);
74842 - atomic_set(&this->req_seq_no, 1);
74843 - atomic_set(&this->rsp_seq_no, 1);
74844 + atomic_set_unchecked(&this->req_seq_no, 1);
74845 + atomic_set_unchecked(&this->rsp_seq_no, 1);
74846 this->serv.layer.receive = cfctrl_recv;
74847 sprintf(this->serv.layer.name, "ctrl");
74848 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74849 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74850 struct cfctrl_request_info *req)
74851 {
74852 spin_lock_bh(&ctrl->info_list_lock);
74853 - atomic_inc(&ctrl->req_seq_no);
74854 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74855 + atomic_inc_unchecked(&ctrl->req_seq_no);
74856 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74857 list_add_tail(&req->list, &ctrl->list);
74858 spin_unlock_bh(&ctrl->info_list_lock);
74859 }
74860 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74861 if (p != first)
74862 pr_warn("Requests are not received in order\n");
74863
74864 - atomic_set(&ctrl->rsp_seq_no,
74865 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74866 p->sequence_no);
74867 list_del(&p->list);
74868 goto out;
74869 diff --git a/net/can/gw.c b/net/can/gw.c
74870 index 3d79b12..8de85fa 100644
74871 --- a/net/can/gw.c
74872 +++ b/net/can/gw.c
74873 @@ -96,7 +96,7 @@ struct cf_mod {
74874 struct {
74875 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74876 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74877 - } csumfunc;
74878 + } __no_const csumfunc;
74879 };
74880
74881
74882 diff --git a/net/compat.c b/net/compat.c
74883 index e055708..3f80795 100644
74884 --- a/net/compat.c
74885 +++ b/net/compat.c
74886 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74887 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74888 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74889 return -EFAULT;
74890 - kmsg->msg_name = compat_ptr(tmp1);
74891 - kmsg->msg_iov = compat_ptr(tmp2);
74892 - kmsg->msg_control = compat_ptr(tmp3);
74893 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74894 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74895 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74896 return 0;
74897 }
74898
74899 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74900
74901 if (kern_msg->msg_namelen) {
74902 if (mode == VERIFY_READ) {
74903 - int err = move_addr_to_kernel(kern_msg->msg_name,
74904 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74905 kern_msg->msg_namelen,
74906 kern_address);
74907 if (err < 0)
74908 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74909 kern_msg->msg_name = NULL;
74910
74911 tot_len = iov_from_user_compat_to_kern(kern_iov,
74912 - (struct compat_iovec __user *)kern_msg->msg_iov,
74913 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
74914 kern_msg->msg_iovlen);
74915 if (tot_len >= 0)
74916 kern_msg->msg_iov = kern_iov;
74917 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74918
74919 #define CMSG_COMPAT_FIRSTHDR(msg) \
74920 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74921 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74922 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74923 (struct compat_cmsghdr __user *)NULL)
74924
74925 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74926 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74927 (ucmlen) <= (unsigned long) \
74928 ((mhdr)->msg_controllen - \
74929 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74930 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74931
74932 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74933 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74934 {
74935 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74936 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74937 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74938 msg->msg_controllen)
74939 return NULL;
74940 return (struct compat_cmsghdr __user *)ptr;
74941 @@ -219,7 +219,7 @@ Efault:
74942
74943 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
74944 {
74945 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74946 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74947 struct compat_cmsghdr cmhdr;
74948 int cmlen;
74949
74950 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74951
74952 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74953 {
74954 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74955 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74956 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74957 int fdnum = scm->fp->count;
74958 struct file **fp = scm->fp->fp;
74959 @@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74960 return -EFAULT;
74961 old_fs = get_fs();
74962 set_fs(KERNEL_DS);
74963 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74964 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74965 set_fs(old_fs);
74966
74967 return err;
74968 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74969 len = sizeof(ktime);
74970 old_fs = get_fs();
74971 set_fs(KERNEL_DS);
74972 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74973 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74974 set_fs(old_fs);
74975
74976 if (!err) {
74977 @@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74978 case MCAST_JOIN_GROUP:
74979 case MCAST_LEAVE_GROUP:
74980 {
74981 - struct compat_group_req __user *gr32 = (void *)optval;
74982 + struct compat_group_req __user *gr32 = (void __user *)optval;
74983 struct group_req __user *kgr =
74984 compat_alloc_user_space(sizeof(struct group_req));
74985 u32 interface;
74986 @@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74987 case MCAST_BLOCK_SOURCE:
74988 case MCAST_UNBLOCK_SOURCE:
74989 {
74990 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74991 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74992 struct group_source_req __user *kgsr = compat_alloc_user_space(
74993 sizeof(struct group_source_req));
74994 u32 interface;
74995 @@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74996 }
74997 case MCAST_MSFILTER:
74998 {
74999 - struct compat_group_filter __user *gf32 = (void *)optval;
75000 + struct compat_group_filter __user *gf32 = (void __user *)optval;
75001 struct group_filter __user *kgf;
75002 u32 interface, fmode, numsrc;
75003
75004 @@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
75005 char __user *optval, int __user *optlen,
75006 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
75007 {
75008 - struct compat_group_filter __user *gf32 = (void *)optval;
75009 + struct compat_group_filter __user *gf32 = (void __user *)optval;
75010 struct group_filter __user *kgf;
75011 int __user *koptlen;
75012 u32 interface, fmode, numsrc;
75013 diff --git a/net/core/datagram.c b/net/core/datagram.c
75014 index e4fbfd6..6a6ac94 100644
75015 --- a/net/core/datagram.c
75016 +++ b/net/core/datagram.c
75017 @@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
75018 }
75019
75020 kfree_skb(skb);
75021 - atomic_inc(&sk->sk_drops);
75022 + atomic_inc_unchecked(&sk->sk_drops);
75023 sk_mem_reclaim_partial(sk);
75024
75025 return err;
75026 diff --git a/net/core/dev.c b/net/core/dev.c
75027 index 533c586..f78a55f 100644
75028 --- a/net/core/dev.c
75029 +++ b/net/core/dev.c
75030 @@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
75031 if (no_module && capable(CAP_NET_ADMIN))
75032 no_module = request_module("netdev-%s", name);
75033 if (no_module && capable(CAP_SYS_MODULE)) {
75034 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
75035 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
75036 +#else
75037 if (!request_module("%s", name))
75038 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
75039 name);
75040 +#endif
75041 }
75042 }
75043 EXPORT_SYMBOL(dev_load);
75044 @@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
75045 {
75046 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
75047 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
75048 - atomic_long_inc(&dev->rx_dropped);
75049 + atomic_long_inc_unchecked(&dev->rx_dropped);
75050 kfree_skb(skb);
75051 return NET_RX_DROP;
75052 }
75053 @@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
75054 nf_reset(skb);
75055
75056 if (unlikely(!is_skb_forwardable(dev, skb))) {
75057 - atomic_long_inc(&dev->rx_dropped);
75058 + atomic_long_inc_unchecked(&dev->rx_dropped);
75059 kfree_skb(skb);
75060 return NET_RX_DROP;
75061 }
75062 @@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
75063
75064 struct dev_gso_cb {
75065 void (*destructor)(struct sk_buff *skb);
75066 -};
75067 +} __no_const;
75068
75069 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75070
75071 @@ -2877,7 +2881,7 @@ enqueue:
75072
75073 local_irq_restore(flags);
75074
75075 - atomic_long_inc(&skb->dev->rx_dropped);
75076 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75077 kfree_skb(skb);
75078 return NET_RX_DROP;
75079 }
75080 @@ -2949,7 +2953,7 @@ int netif_rx_ni(struct sk_buff *skb)
75081 }
75082 EXPORT_SYMBOL(netif_rx_ni);
75083
75084 -static void net_tx_action(struct softirq_action *h)
75085 +static void net_tx_action(void)
75086 {
75087 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75088
75089 @@ -3237,7 +3241,7 @@ ncls:
75090 if (pt_prev) {
75091 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
75092 } else {
75093 - atomic_long_inc(&skb->dev->rx_dropped);
75094 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75095 kfree_skb(skb);
75096 /* Jamal, now you will not able to escape explaining
75097 * me how you were going to use this. :-)
75098 @@ -3797,7 +3801,7 @@ void netif_napi_del(struct napi_struct *napi)
75099 }
75100 EXPORT_SYMBOL(netif_napi_del);
75101
75102 -static void net_rx_action(struct softirq_action *h)
75103 +static void net_rx_action(void)
75104 {
75105 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75106 unsigned long time_limit = jiffies + 2;
75107 @@ -4267,8 +4271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
75108 else
75109 seq_printf(seq, "%04x", ntohs(pt->type));
75110
75111 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75112 + seq_printf(seq, " %-8s %p\n",
75113 + pt->dev ? pt->dev->name : "", NULL);
75114 +#else
75115 seq_printf(seq, " %-8s %pF\n",
75116 pt->dev ? pt->dev->name : "", pt->func);
75117 +#endif
75118 }
75119
75120 return 0;
75121 @@ -5818,7 +5827,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
75122 } else {
75123 netdev_stats_to_stats64(storage, &dev->stats);
75124 }
75125 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
75126 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
75127 return storage;
75128 }
75129 EXPORT_SYMBOL(dev_get_stats);
75130 diff --git a/net/core/flow.c b/net/core/flow.c
75131 index e318c7e..168b1d0 100644
75132 --- a/net/core/flow.c
75133 +++ b/net/core/flow.c
75134 @@ -61,7 +61,7 @@ struct flow_cache {
75135 struct timer_list rnd_timer;
75136 };
75137
75138 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
75139 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75140 EXPORT_SYMBOL(flow_cache_genid);
75141 static struct flow_cache flow_cache_global;
75142 static struct kmem_cache *flow_cachep __read_mostly;
75143 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75144
75145 static int flow_entry_valid(struct flow_cache_entry *fle)
75146 {
75147 - if (atomic_read(&flow_cache_genid) != fle->genid)
75148 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75149 return 0;
75150 if (fle->object && !fle->object->ops->check(fle->object))
75151 return 0;
75152 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
75153 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75154 fcp->hash_count++;
75155 }
75156 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75157 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75158 flo = fle->object;
75159 if (!flo)
75160 goto ret_object;
75161 @@ -280,7 +280,7 @@ nocache:
75162 }
75163 flo = resolver(net, key, family, dir, flo, ctx);
75164 if (fle) {
75165 - fle->genid = atomic_read(&flow_cache_genid);
75166 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
75167 if (!IS_ERR(flo))
75168 fle->object = flo;
75169 else
75170 diff --git a/net/core/iovec.c b/net/core/iovec.c
75171 index 7e7aeb0..2a998cb 100644
75172 --- a/net/core/iovec.c
75173 +++ b/net/core/iovec.c
75174 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75175 if (m->msg_namelen) {
75176 if (mode == VERIFY_READ) {
75177 void __user *namep;
75178 - namep = (void __user __force *) m->msg_name;
75179 + namep = (void __force_user *) m->msg_name;
75180 err = move_addr_to_kernel(namep, m->msg_namelen,
75181 address);
75182 if (err < 0)
75183 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75184 }
75185
75186 size = m->msg_iovlen * sizeof(struct iovec);
75187 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75188 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75189 return -EFAULT;
75190
75191 m->msg_iov = iov;
75192 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
75193 index 90430b7..0032ec0 100644
75194 --- a/net/core/rtnetlink.c
75195 +++ b/net/core/rtnetlink.c
75196 @@ -56,7 +56,7 @@ struct rtnl_link {
75197 rtnl_doit_func doit;
75198 rtnl_dumpit_func dumpit;
75199 rtnl_calcit_func calcit;
75200 -};
75201 +} __no_const;
75202
75203 static DEFINE_MUTEX(rtnl_mutex);
75204
75205 diff --git a/net/core/scm.c b/net/core/scm.c
75206 index 611c5ef..88f6d6d 100644
75207 --- a/net/core/scm.c
75208 +++ b/net/core/scm.c
75209 @@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
75210 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75211 {
75212 struct cmsghdr __user *cm
75213 - = (__force struct cmsghdr __user *)msg->msg_control;
75214 + = (struct cmsghdr __force_user *)msg->msg_control;
75215 struct cmsghdr cmhdr;
75216 int cmlen = CMSG_LEN(len);
75217 int err;
75218 @@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75219 err = -EFAULT;
75220 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75221 goto out;
75222 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75223 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75224 goto out;
75225 cmlen = CMSG_SPACE(len);
75226 if (msg->msg_controllen < cmlen)
75227 @@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
75228 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75229 {
75230 struct cmsghdr __user *cm
75231 - = (__force struct cmsghdr __user*)msg->msg_control;
75232 + = (struct cmsghdr __force_user *)msg->msg_control;
75233
75234 int fdmax = 0;
75235 int fdnum = scm->fp->count;
75236 @@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75237 if (fdnum < fdmax)
75238 fdmax = fdnum;
75239
75240 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75241 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75242 i++, cmfptr++)
75243 {
75244 int new_fd;
75245 diff --git a/net/core/sock.c b/net/core/sock.c
75246 index 0f8402e..f0b6338 100644
75247 --- a/net/core/sock.c
75248 +++ b/net/core/sock.c
75249 @@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75250 struct sk_buff_head *list = &sk->sk_receive_queue;
75251
75252 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
75253 - atomic_inc(&sk->sk_drops);
75254 + atomic_inc_unchecked(&sk->sk_drops);
75255 trace_sock_rcvqueue_full(sk, skb);
75256 return -ENOMEM;
75257 }
75258 @@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75259 return err;
75260
75261 if (!sk_rmem_schedule(sk, skb->truesize)) {
75262 - atomic_inc(&sk->sk_drops);
75263 + atomic_inc_unchecked(&sk->sk_drops);
75264 return -ENOBUFS;
75265 }
75266
75267 @@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75268 skb_dst_force(skb);
75269
75270 spin_lock_irqsave(&list->lock, flags);
75271 - skb->dropcount = atomic_read(&sk->sk_drops);
75272 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75273 __skb_queue_tail(list, skb);
75274 spin_unlock_irqrestore(&list->lock, flags);
75275
75276 @@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75277 skb->dev = NULL;
75278
75279 if (sk_rcvqueues_full(sk, skb)) {
75280 - atomic_inc(&sk->sk_drops);
75281 + atomic_inc_unchecked(&sk->sk_drops);
75282 goto discard_and_relse;
75283 }
75284 if (nested)
75285 @@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75286 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75287 } else if (sk_add_backlog(sk, skb)) {
75288 bh_unlock_sock(sk);
75289 - atomic_inc(&sk->sk_drops);
75290 + atomic_inc_unchecked(&sk->sk_drops);
75291 goto discard_and_relse;
75292 }
75293
75294 @@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75295 if (len > sizeof(peercred))
75296 len = sizeof(peercred);
75297 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75298 - if (copy_to_user(optval, &peercred, len))
75299 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75300 return -EFAULT;
75301 goto lenout;
75302 }
75303 @@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75304 return -ENOTCONN;
75305 if (lv < len)
75306 return -EINVAL;
75307 - if (copy_to_user(optval, address, len))
75308 + if (len > sizeof(address) || copy_to_user(optval, address, len))
75309 return -EFAULT;
75310 goto lenout;
75311 }
75312 @@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75313
75314 if (len > lv)
75315 len = lv;
75316 - if (copy_to_user(optval, &v, len))
75317 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
75318 return -EFAULT;
75319 lenout:
75320 if (put_user(len, optlen))
75321 @@ -2131,7 +2131,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75322 */
75323 smp_wmb();
75324 atomic_set(&sk->sk_refcnt, 1);
75325 - atomic_set(&sk->sk_drops, 0);
75326 + atomic_set_unchecked(&sk->sk_drops, 0);
75327 }
75328 EXPORT_SYMBOL(sock_init_data);
75329
75330 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
75331 index b9868e1..849f809 100644
75332 --- a/net/core/sock_diag.c
75333 +++ b/net/core/sock_diag.c
75334 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
75335
75336 int sock_diag_check_cookie(void *sk, __u32 *cookie)
75337 {
75338 +#ifndef CONFIG_GRKERNSEC_HIDESYM
75339 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
75340 cookie[1] != INET_DIAG_NOCOOKIE) &&
75341 ((u32)(unsigned long)sk != cookie[0] ||
75342 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
75343 return -ESTALE;
75344 else
75345 +#endif
75346 return 0;
75347 }
75348 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
75349
75350 void sock_diag_save_cookie(void *sk, __u32 *cookie)
75351 {
75352 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75353 + cookie[0] = 0;
75354 + cookie[1] = 0;
75355 +#else
75356 cookie[0] = (u32)(unsigned long)sk;
75357 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75358 +#endif
75359 }
75360 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
75361
75362 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75363 index 02e75d1..9a57a7c 100644
75364 --- a/net/decnet/sysctl_net_decnet.c
75365 +++ b/net/decnet/sysctl_net_decnet.c
75366 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75367
75368 if (len > *lenp) len = *lenp;
75369
75370 - if (copy_to_user(buffer, addr, len))
75371 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
75372 return -EFAULT;
75373
75374 *lenp = len;
75375 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75376
75377 if (len > *lenp) len = *lenp;
75378
75379 - if (copy_to_user(buffer, devname, len))
75380 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
75381 return -EFAULT;
75382
75383 *lenp = len;
75384 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75385 index 39a2d29..f39c0fe 100644
75386 --- a/net/econet/Kconfig
75387 +++ b/net/econet/Kconfig
75388 @@ -4,7 +4,7 @@
75389
75390 config ECONET
75391 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75392 - depends on EXPERIMENTAL && INET
75393 + depends on EXPERIMENTAL && INET && BROKEN
75394 ---help---
75395 Econet is a fairly old and slow networking protocol mainly used by
75396 Acorn computers to access file and print servers. It uses native
75397 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
75398 index c48adc5..667c1d4 100644
75399 --- a/net/ipv4/cipso_ipv4.c
75400 +++ b/net/ipv4/cipso_ipv4.c
75401 @@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
75402 case CIPSO_V4_TAG_LOCAL:
75403 /* This is a non-standard tag that we only allow for
75404 * local connections, so if the incoming interface is
75405 - * not the loopback device drop the packet. */
75406 - if (!(skb->dev->flags & IFF_LOOPBACK)) {
75407 + * not the loopback device drop the packet. Further,
75408 + * there is no legitimate reason for setting this from
75409 + * userspace so reject it if skb is NULL. */
75410 + if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
75411 err_offset = opt_iter;
75412 goto validate_return_locked;
75413 }
75414 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75415 index cbe3a68..a879b75 100644
75416 --- a/net/ipv4/fib_frontend.c
75417 +++ b/net/ipv4/fib_frontend.c
75418 @@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75419 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75420 fib_sync_up(dev);
75421 #endif
75422 - atomic_inc(&net->ipv4.dev_addr_genid);
75423 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75424 rt_cache_flush(dev_net(dev), -1);
75425 break;
75426 case NETDEV_DOWN:
75427 fib_del_ifaddr(ifa, NULL);
75428 - atomic_inc(&net->ipv4.dev_addr_genid);
75429 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75430 if (ifa->ifa_dev->ifa_list == NULL) {
75431 /* Last address was deleted from this interface.
75432 * Disable IP.
75433 @@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75434 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75435 fib_sync_up(dev);
75436 #endif
75437 - atomic_inc(&net->ipv4.dev_addr_genid);
75438 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75439 rt_cache_flush(dev_net(dev), -1);
75440 break;
75441 case NETDEV_DOWN:
75442 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75443 index 8861f91..ab1e3c1 100644
75444 --- a/net/ipv4/fib_semantics.c
75445 +++ b/net/ipv4/fib_semantics.c
75446 @@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75447 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75448 nh->nh_gw,
75449 nh->nh_parent->fib_scope);
75450 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75451 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75452
75453 return nh->nh_saddr;
75454 }
75455 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75456 index 984ec65..97ac518 100644
75457 --- a/net/ipv4/inet_hashtables.c
75458 +++ b/net/ipv4/inet_hashtables.c
75459 @@ -18,12 +18,15 @@
75460 #include <linux/sched.h>
75461 #include <linux/slab.h>
75462 #include <linux/wait.h>
75463 +#include <linux/security.h>
75464
75465 #include <net/inet_connection_sock.h>
75466 #include <net/inet_hashtables.h>
75467 #include <net/secure_seq.h>
75468 #include <net/ip.h>
75469
75470 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75471 +
75472 /*
75473 * Allocate and initialize a new local port bind bucket.
75474 * The bindhash mutex for snum's hash chain must be held here.
75475 @@ -530,6 +533,8 @@ ok:
75476 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75477 spin_unlock(&head->lock);
75478
75479 + gr_update_task_in_ip_table(current, inet_sk(sk));
75480 +
75481 if (tw) {
75482 inet_twsk_deschedule(tw, death_row);
75483 while (twrefcnt) {
75484 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75485 index dfba343..c827d50 100644
75486 --- a/net/ipv4/inetpeer.c
75487 +++ b/net/ipv4/inetpeer.c
75488 @@ -487,8 +487,8 @@ relookup:
75489 if (p) {
75490 p->daddr = *daddr;
75491 atomic_set(&p->refcnt, 1);
75492 - atomic_set(&p->rid, 0);
75493 - atomic_set(&p->ip_id_count,
75494 + atomic_set_unchecked(&p->rid, 0);
75495 + atomic_set_unchecked(&p->ip_id_count,
75496 (daddr->family == AF_INET) ?
75497 secure_ip_id(daddr->addr.a4) :
75498 secure_ipv6_id(daddr->addr.a6));
75499 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75500 index 3727e23..517f5df 100644
75501 --- a/net/ipv4/ip_fragment.c
75502 +++ b/net/ipv4/ip_fragment.c
75503 @@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75504 return 0;
75505
75506 start = qp->rid;
75507 - end = atomic_inc_return(&peer->rid);
75508 + end = atomic_inc_return_unchecked(&peer->rid);
75509 qp->rid = end;
75510
75511 rc = qp->q.fragments && (end - start) > max;
75512 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75513 index 2fd0fba..83fac99 100644
75514 --- a/net/ipv4/ip_sockglue.c
75515 +++ b/net/ipv4/ip_sockglue.c
75516 @@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75517 len = min_t(unsigned int, len, opt->optlen);
75518 if (put_user(len, optlen))
75519 return -EFAULT;
75520 - if (copy_to_user(optval, opt->__data, len))
75521 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75522 + copy_to_user(optval, opt->__data, len))
75523 return -EFAULT;
75524 return 0;
75525 }
75526 @@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75527 if (sk->sk_type != SOCK_STREAM)
75528 return -ENOPROTOOPT;
75529
75530 - msg.msg_control = optval;
75531 + msg.msg_control = (void __force_kernel *)optval;
75532 msg.msg_controllen = len;
75533 msg.msg_flags = flags;
75534
75535 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75536 index 92ac7e7..13f93d9 100644
75537 --- a/net/ipv4/ipconfig.c
75538 +++ b/net/ipv4/ipconfig.c
75539 @@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75540
75541 mm_segment_t oldfs = get_fs();
75542 set_fs(get_ds());
75543 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75544 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75545 set_fs(oldfs);
75546 return res;
75547 }
75548 @@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75549
75550 mm_segment_t oldfs = get_fs();
75551 set_fs(get_ds());
75552 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75553 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75554 set_fs(oldfs);
75555 return res;
75556 }
75557 @@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75558
75559 mm_segment_t oldfs = get_fs();
75560 set_fs(get_ds());
75561 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75562 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75563 set_fs(oldfs);
75564 return res;
75565 }
75566 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75567 index 50009c7..5996a9f 100644
75568 --- a/net/ipv4/ping.c
75569 +++ b/net/ipv4/ping.c
75570 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75571 sk_rmem_alloc_get(sp),
75572 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75573 atomic_read(&sp->sk_refcnt), sp,
75574 - atomic_read(&sp->sk_drops), len);
75575 + atomic_read_unchecked(&sp->sk_drops), len);
75576 }
75577
75578 static int ping_seq_show(struct seq_file *seq, void *v)
75579 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75580 index bbd604c..4d5469c 100644
75581 --- a/net/ipv4/raw.c
75582 +++ b/net/ipv4/raw.c
75583 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75584 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75585 {
75586 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75587 - atomic_inc(&sk->sk_drops);
75588 + atomic_inc_unchecked(&sk->sk_drops);
75589 kfree_skb(skb);
75590 return NET_RX_DROP;
75591 }
75592 @@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
75593
75594 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75595 {
75596 + struct icmp_filter filter;
75597 +
75598 if (optlen > sizeof(struct icmp_filter))
75599 optlen = sizeof(struct icmp_filter);
75600 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75601 + if (copy_from_user(&filter, optval, optlen))
75602 return -EFAULT;
75603 + raw_sk(sk)->filter = filter;
75604 return 0;
75605 }
75606
75607 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75608 {
75609 int len, ret = -EFAULT;
75610 + struct icmp_filter filter;
75611
75612 if (get_user(len, optlen))
75613 goto out;
75614 @@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75615 if (len > sizeof(struct icmp_filter))
75616 len = sizeof(struct icmp_filter);
75617 ret = -EFAULT;
75618 - if (put_user(len, optlen) ||
75619 - copy_to_user(optval, &raw_sk(sk)->filter, len))
75620 + filter = raw_sk(sk)->filter;
75621 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75622 goto out;
75623 ret = 0;
75624 out: return ret;
75625 @@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75626 sk_wmem_alloc_get(sp),
75627 sk_rmem_alloc_get(sp),
75628 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75629 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75630 + atomic_read(&sp->sk_refcnt),
75631 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75632 + NULL,
75633 +#else
75634 + sp,
75635 +#endif
75636 + atomic_read_unchecked(&sp->sk_drops));
75637 }
75638
75639 static int raw_seq_show(struct seq_file *seq, void *v)
75640 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75641 index 167ea10..4b15883 100644
75642 --- a/net/ipv4/route.c
75643 +++ b/net/ipv4/route.c
75644 @@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75645
75646 static inline int rt_genid(struct net *net)
75647 {
75648 - return atomic_read(&net->ipv4.rt_genid);
75649 + return atomic_read_unchecked(&net->ipv4.rt_genid);
75650 }
75651
75652 #ifdef CONFIG_PROC_FS
75653 @@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
75654 unsigned char shuffle;
75655
75656 get_random_bytes(&shuffle, sizeof(shuffle));
75657 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75658 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75659 inetpeer_invalidate_tree(AF_INET);
75660 }
75661
75662 @@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
75663 error = rt->dst.error;
75664 if (peer) {
75665 inet_peer_refcheck(rt->peer);
75666 - id = atomic_read(&peer->ip_id_count) & 0xffff;
75667 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75668 if (peer->tcp_ts_stamp) {
75669 ts = peer->tcp_ts;
75670 tsage = get_seconds() - peer->tcp_ts_stamp;
75671 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75672 index 0cb86ce..8e7fda8 100644
75673 --- a/net/ipv4/tcp_ipv4.c
75674 +++ b/net/ipv4/tcp_ipv4.c
75675 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
75676 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75677
75678
75679 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75680 +extern int grsec_enable_blackhole;
75681 +#endif
75682 +
75683 #ifdef CONFIG_TCP_MD5SIG
75684 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
75685 __be32 daddr, __be32 saddr, const struct tcphdr *th);
75686 @@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75687 return 0;
75688
75689 reset:
75690 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75691 + if (!grsec_enable_blackhole)
75692 +#endif
75693 tcp_v4_send_reset(rsk, skb);
75694 discard:
75695 kfree_skb(skb);
75696 @@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75697 TCP_SKB_CB(skb)->sacked = 0;
75698
75699 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75700 - if (!sk)
75701 + if (!sk) {
75702 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75703 + ret = 1;
75704 +#endif
75705 goto no_tcp_socket;
75706 -
75707 + }
75708 process:
75709 - if (sk->sk_state == TCP_TIME_WAIT)
75710 + if (sk->sk_state == TCP_TIME_WAIT) {
75711 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75712 + ret = 2;
75713 +#endif
75714 goto do_time_wait;
75715 + }
75716
75717 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75718 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75719 @@ -1758,6 +1772,10 @@ no_tcp_socket:
75720 bad_packet:
75721 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75722 } else {
75723 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75724 + if (!grsec_enable_blackhole || (ret == 1 &&
75725 + (skb->dev->flags & IFF_LOOPBACK)))
75726 +#endif
75727 tcp_v4_send_reset(NULL, skb);
75728 }
75729
75730 @@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
75731 0, /* non standard timer */
75732 0, /* open_requests have no inode */
75733 atomic_read(&sk->sk_refcnt),
75734 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75735 + NULL,
75736 +#else
75737 req,
75738 +#endif
75739 len);
75740 }
75741
75742 @@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75743 sock_i_uid(sk),
75744 icsk->icsk_probes_out,
75745 sock_i_ino(sk),
75746 - atomic_read(&sk->sk_refcnt), sk,
75747 + atomic_read(&sk->sk_refcnt),
75748 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75749 + NULL,
75750 +#else
75751 + sk,
75752 +#endif
75753 jiffies_to_clock_t(icsk->icsk_rto),
75754 jiffies_to_clock_t(icsk->icsk_ack.ato),
75755 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75756 @@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
75757 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75758 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75759 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75760 - atomic_read(&tw->tw_refcnt), tw, len);
75761 + atomic_read(&tw->tw_refcnt),
75762 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75763 + NULL,
75764 +#else
75765 + tw,
75766 +#endif
75767 + len);
75768 }
75769
75770 #define TMPSZ 150
75771 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75772 index 3cabafb..640525b 100644
75773 --- a/net/ipv4/tcp_minisocks.c
75774 +++ b/net/ipv4/tcp_minisocks.c
75775 @@ -27,6 +27,10 @@
75776 #include <net/inet_common.h>
75777 #include <net/xfrm.h>
75778
75779 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75780 +extern int grsec_enable_blackhole;
75781 +#endif
75782 +
75783 int sysctl_tcp_syncookies __read_mostly = 1;
75784 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75785
75786 @@ -753,6 +757,10 @@ listen_overflow:
75787
75788 embryonic_reset:
75789 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75790 +
75791 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75792 + if (!grsec_enable_blackhole)
75793 +#endif
75794 if (!(flg & TCP_FLAG_RST))
75795 req->rsk_ops->send_reset(sk, skb);
75796
75797 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75798 index a981cdc..48f4c3a 100644
75799 --- a/net/ipv4/tcp_probe.c
75800 +++ b/net/ipv4/tcp_probe.c
75801 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75802 if (cnt + width >= len)
75803 break;
75804
75805 - if (copy_to_user(buf + cnt, tbuf, width))
75806 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75807 return -EFAULT;
75808 cnt += width;
75809 }
75810 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75811 index 34d4a02..3b57f86 100644
75812 --- a/net/ipv4/tcp_timer.c
75813 +++ b/net/ipv4/tcp_timer.c
75814 @@ -22,6 +22,10 @@
75815 #include <linux/gfp.h>
75816 #include <net/tcp.h>
75817
75818 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75819 +extern int grsec_lastack_retries;
75820 +#endif
75821 +
75822 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75823 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75824 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75825 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
75826 }
75827 }
75828
75829 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75830 + if ((sk->sk_state == TCP_LAST_ACK) &&
75831 + (grsec_lastack_retries > 0) &&
75832 + (grsec_lastack_retries < retry_until))
75833 + retry_until = grsec_lastack_retries;
75834 +#endif
75835 +
75836 if (retransmits_timed_out(sk, retry_until,
75837 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75838 /* Has it gone just too far? */
75839 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75840 index fe14105..0618260 100644
75841 --- a/net/ipv4/udp.c
75842 +++ b/net/ipv4/udp.c
75843 @@ -87,6 +87,7 @@
75844 #include <linux/types.h>
75845 #include <linux/fcntl.h>
75846 #include <linux/module.h>
75847 +#include <linux/security.h>
75848 #include <linux/socket.h>
75849 #include <linux/sockios.h>
75850 #include <linux/igmp.h>
75851 @@ -109,6 +110,10 @@
75852 #include <trace/events/udp.h>
75853 #include "udp_impl.h"
75854
75855 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75856 +extern int grsec_enable_blackhole;
75857 +#endif
75858 +
75859 struct udp_table udp_table __read_mostly;
75860 EXPORT_SYMBOL(udp_table);
75861
75862 @@ -567,6 +572,9 @@ found:
75863 return s;
75864 }
75865
75866 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75867 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75868 +
75869 /*
75870 * This routine is called by the ICMP module when it gets some
75871 * sort of error condition. If err < 0 then the socket should
75872 @@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75873 dport = usin->sin_port;
75874 if (dport == 0)
75875 return -EINVAL;
75876 +
75877 + err = gr_search_udp_sendmsg(sk, usin);
75878 + if (err)
75879 + return err;
75880 } else {
75881 if (sk->sk_state != TCP_ESTABLISHED)
75882 return -EDESTADDRREQ;
75883 +
75884 + err = gr_search_udp_sendmsg(sk, NULL);
75885 + if (err)
75886 + return err;
75887 +
75888 daddr = inet->inet_daddr;
75889 dport = inet->inet_dport;
75890 /* Open fast path for connected socket.
75891 @@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
75892 udp_lib_checksum_complete(skb)) {
75893 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75894 IS_UDPLITE(sk));
75895 - atomic_inc(&sk->sk_drops);
75896 + atomic_inc_unchecked(&sk->sk_drops);
75897 __skb_unlink(skb, rcvq);
75898 __skb_queue_tail(&list_kill, skb);
75899 }
75900 @@ -1188,6 +1205,10 @@ try_again:
75901 if (!skb)
75902 goto out;
75903
75904 + err = gr_search_udp_recvmsg(sk, skb);
75905 + if (err)
75906 + goto out_free;
75907 +
75908 ulen = skb->len - sizeof(struct udphdr);
75909 copied = len;
75910 if (copied > ulen)
75911 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75912
75913 drop:
75914 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75915 - atomic_inc(&sk->sk_drops);
75916 + atomic_inc_unchecked(&sk->sk_drops);
75917 kfree_skb(skb);
75918 return -1;
75919 }
75920 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75921 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75922
75923 if (!skb1) {
75924 - atomic_inc(&sk->sk_drops);
75925 + atomic_inc_unchecked(&sk->sk_drops);
75926 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75927 IS_UDPLITE(sk));
75928 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75929 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75930 goto csum_error;
75931
75932 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75933 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75934 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75935 +#endif
75936 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75937
75938 /*
75939 @@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75940 sk_wmem_alloc_get(sp),
75941 sk_rmem_alloc_get(sp),
75942 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75943 - atomic_read(&sp->sk_refcnt), sp,
75944 - atomic_read(&sp->sk_drops), len);
75945 + atomic_read(&sp->sk_refcnt),
75946 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75947 + NULL,
75948 +#else
75949 + sp,
75950 +#endif
75951 + atomic_read_unchecked(&sp->sk_drops), len);
75952 }
75953
75954 int udp4_seq_show(struct seq_file *seq, void *v)
75955 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75956 index 7d5cb97..c56564f 100644
75957 --- a/net/ipv6/addrconf.c
75958 +++ b/net/ipv6/addrconf.c
75959 @@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75960 p.iph.ihl = 5;
75961 p.iph.protocol = IPPROTO_IPV6;
75962 p.iph.ttl = 64;
75963 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75964 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75965
75966 if (ops->ndo_do_ioctl) {
75967 mm_segment_t oldfs = get_fs();
75968 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75969 index 02dd203..e03fcc9 100644
75970 --- a/net/ipv6/inet6_connection_sock.c
75971 +++ b/net/ipv6/inet6_connection_sock.c
75972 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75973 #ifdef CONFIG_XFRM
75974 {
75975 struct rt6_info *rt = (struct rt6_info *)dst;
75976 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75977 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75978 }
75979 #endif
75980 }
75981 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75982 #ifdef CONFIG_XFRM
75983 if (dst) {
75984 struct rt6_info *rt = (struct rt6_info *)dst;
75985 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75986 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75987 __sk_dst_reset(sk);
75988 dst = NULL;
75989 }
75990 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75991 index 63dd1f8..e7f53ca 100644
75992 --- a/net/ipv6/ipv6_sockglue.c
75993 +++ b/net/ipv6/ipv6_sockglue.c
75994 @@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75995 if (sk->sk_type != SOCK_STREAM)
75996 return -ENOPROTOOPT;
75997
75998 - msg.msg_control = optval;
75999 + msg.msg_control = (void __force_kernel *)optval;
76000 msg.msg_controllen = len;
76001 msg.msg_flags = flags;
76002
76003 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
76004 index 5bddea7..82d9d67 100644
76005 --- a/net/ipv6/raw.c
76006 +++ b/net/ipv6/raw.c
76007 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
76008 {
76009 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
76010 skb_checksum_complete(skb)) {
76011 - atomic_inc(&sk->sk_drops);
76012 + atomic_inc_unchecked(&sk->sk_drops);
76013 kfree_skb(skb);
76014 return NET_RX_DROP;
76015 }
76016 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76017 struct raw6_sock *rp = raw6_sk(sk);
76018
76019 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
76020 - atomic_inc(&sk->sk_drops);
76021 + atomic_inc_unchecked(&sk->sk_drops);
76022 kfree_skb(skb);
76023 return NET_RX_DROP;
76024 }
76025 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76026
76027 if (inet->hdrincl) {
76028 if (skb_checksum_complete(skb)) {
76029 - atomic_inc(&sk->sk_drops);
76030 + atomic_inc_unchecked(&sk->sk_drops);
76031 kfree_skb(skb);
76032 return NET_RX_DROP;
76033 }
76034 @@ -602,7 +602,7 @@ out:
76035 return err;
76036 }
76037
76038 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
76039 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
76040 struct flowi6 *fl6, struct dst_entry **dstp,
76041 unsigned int flags)
76042 {
76043 @@ -914,12 +914,15 @@ do_confirm:
76044 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
76045 char __user *optval, int optlen)
76046 {
76047 + struct icmp6_filter filter;
76048 +
76049 switch (optname) {
76050 case ICMPV6_FILTER:
76051 if (optlen > sizeof(struct icmp6_filter))
76052 optlen = sizeof(struct icmp6_filter);
76053 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76054 + if (copy_from_user(&filter, optval, optlen))
76055 return -EFAULT;
76056 + raw6_sk(sk)->filter = filter;
76057 return 0;
76058 default:
76059 return -ENOPROTOOPT;
76060 @@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76061 char __user *optval, int __user *optlen)
76062 {
76063 int len;
76064 + struct icmp6_filter filter;
76065
76066 switch (optname) {
76067 case ICMPV6_FILTER:
76068 @@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76069 len = sizeof(struct icmp6_filter);
76070 if (put_user(len, optlen))
76071 return -EFAULT;
76072 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
76073 + filter = raw6_sk(sk)->filter;
76074 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
76075 return -EFAULT;
76076 return 0;
76077 default:
76078 @@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76079 0, 0L, 0,
76080 sock_i_uid(sp), 0,
76081 sock_i_ino(sp),
76082 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76083 + atomic_read(&sp->sk_refcnt),
76084 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76085 + NULL,
76086 +#else
76087 + sp,
76088 +#endif
76089 + atomic_read_unchecked(&sp->sk_drops));
76090 }
76091
76092 static int raw6_seq_show(struct seq_file *seq, void *v)
76093 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
76094 index 98256cf..7f16dbd 100644
76095 --- a/net/ipv6/tcp_ipv6.c
76096 +++ b/net/ipv6/tcp_ipv6.c
76097 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
76098 }
76099 #endif
76100
76101 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76102 +extern int grsec_enable_blackhole;
76103 +#endif
76104 +
76105 static void tcp_v6_hash(struct sock *sk)
76106 {
76107 if (sk->sk_state != TCP_CLOSE) {
76108 @@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
76109 return 0;
76110
76111 reset:
76112 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76113 + if (!grsec_enable_blackhole)
76114 +#endif
76115 tcp_v6_send_reset(sk, skb);
76116 discard:
76117 if (opt_skb)
76118 @@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
76119 TCP_SKB_CB(skb)->sacked = 0;
76120
76121 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76122 - if (!sk)
76123 + if (!sk) {
76124 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76125 + ret = 1;
76126 +#endif
76127 goto no_tcp_socket;
76128 + }
76129
76130 process:
76131 - if (sk->sk_state == TCP_TIME_WAIT)
76132 + if (sk->sk_state == TCP_TIME_WAIT) {
76133 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76134 + ret = 2;
76135 +#endif
76136 goto do_time_wait;
76137 + }
76138
76139 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
76140 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76141 @@ -1676,6 +1691,10 @@ no_tcp_socket:
76142 bad_packet:
76143 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76144 } else {
76145 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76146 + if (!grsec_enable_blackhole || (ret == 1 &&
76147 + (skb->dev->flags & IFF_LOOPBACK)))
76148 +#endif
76149 tcp_v6_send_reset(NULL, skb);
76150 }
76151
76152 @@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
76153 uid,
76154 0, /* non standard timer */
76155 0, /* open_requests have no inode */
76156 - 0, req);
76157 + 0,
76158 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76159 + NULL
76160 +#else
76161 + req
76162 +#endif
76163 + );
76164 }
76165
76166 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76167 @@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76168 sock_i_uid(sp),
76169 icsk->icsk_probes_out,
76170 sock_i_ino(sp),
76171 - atomic_read(&sp->sk_refcnt), sp,
76172 + atomic_read(&sp->sk_refcnt),
76173 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76174 + NULL,
76175 +#else
76176 + sp,
76177 +#endif
76178 jiffies_to_clock_t(icsk->icsk_rto),
76179 jiffies_to_clock_t(icsk->icsk_ack.ato),
76180 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
76181 @@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
76182 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76183 tw->tw_substate, 0, 0,
76184 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76185 - atomic_read(&tw->tw_refcnt), tw);
76186 + atomic_read(&tw->tw_refcnt),
76187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76188 + NULL
76189 +#else
76190 + tw
76191 +#endif
76192 + );
76193 }
76194
76195 static int tcp6_seq_show(struct seq_file *seq, void *v)
76196 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76197 index 37b0699..d323408 100644
76198 --- a/net/ipv6/udp.c
76199 +++ b/net/ipv6/udp.c
76200 @@ -50,6 +50,10 @@
76201 #include <linux/seq_file.h>
76202 #include "udp_impl.h"
76203
76204 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76205 +extern int grsec_enable_blackhole;
76206 +#endif
76207 +
76208 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76209 {
76210 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76211 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
76212
76213 return 0;
76214 drop:
76215 - atomic_inc(&sk->sk_drops);
76216 + atomic_inc_unchecked(&sk->sk_drops);
76217 drop_no_sk_drops_inc:
76218 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76219 kfree_skb(skb);
76220 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76221 continue;
76222 }
76223 drop:
76224 - atomic_inc(&sk->sk_drops);
76225 + atomic_inc_unchecked(&sk->sk_drops);
76226 UDP6_INC_STATS_BH(sock_net(sk),
76227 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76228 UDP6_INC_STATS_BH(sock_net(sk),
76229 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76230 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76231 proto == IPPROTO_UDPLITE);
76232
76233 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76234 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76235 +#endif
76236 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76237
76238 kfree_skb(skb);
76239 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76240 if (!sock_owned_by_user(sk))
76241 udpv6_queue_rcv_skb(sk, skb);
76242 else if (sk_add_backlog(sk, skb)) {
76243 - atomic_inc(&sk->sk_drops);
76244 + atomic_inc_unchecked(&sk->sk_drops);
76245 bh_unlock_sock(sk);
76246 sock_put(sk);
76247 goto discard;
76248 @@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76249 0, 0L, 0,
76250 sock_i_uid(sp), 0,
76251 sock_i_ino(sp),
76252 - atomic_read(&sp->sk_refcnt), sp,
76253 - atomic_read(&sp->sk_drops));
76254 + atomic_read(&sp->sk_refcnt),
76255 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76256 + NULL,
76257 +#else
76258 + sp,
76259 +#endif
76260 + atomic_read_unchecked(&sp->sk_drops));
76261 }
76262
76263 int udp6_seq_show(struct seq_file *seq, void *v)
76264 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76265 index 6b9d5a0..4dffaf1 100644
76266 --- a/net/irda/ircomm/ircomm_tty.c
76267 +++ b/net/irda/ircomm/ircomm_tty.c
76268 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76269 add_wait_queue(&self->open_wait, &wait);
76270
76271 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76272 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76273 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76274
76275 /* As far as I can see, we protect open_count - Jean II */
76276 spin_lock_irqsave(&self->spinlock, flags);
76277 if (!tty_hung_up_p(filp)) {
76278 extra_count = 1;
76279 - self->open_count--;
76280 + local_dec(&self->open_count);
76281 }
76282 spin_unlock_irqrestore(&self->spinlock, flags);
76283 - self->blocked_open++;
76284 + local_inc(&self->blocked_open);
76285
76286 while (1) {
76287 if (tty->termios->c_cflag & CBAUD) {
76288 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76289 }
76290
76291 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76292 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76293 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76294
76295 schedule();
76296 }
76297 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76298 if (extra_count) {
76299 /* ++ is not atomic, so this should be protected - Jean II */
76300 spin_lock_irqsave(&self->spinlock, flags);
76301 - self->open_count++;
76302 + local_inc(&self->open_count);
76303 spin_unlock_irqrestore(&self->spinlock, flags);
76304 }
76305 - self->blocked_open--;
76306 + local_dec(&self->blocked_open);
76307
76308 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76309 - __FILE__,__LINE__, tty->driver->name, self->open_count);
76310 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76311
76312 if (!retval)
76313 self->flags |= ASYNC_NORMAL_ACTIVE;
76314 @@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76315 }
76316 /* ++ is not atomic, so this should be protected - Jean II */
76317 spin_lock_irqsave(&self->spinlock, flags);
76318 - self->open_count++;
76319 + local_inc(&self->open_count);
76320
76321 tty->driver_data = self;
76322 self->tty = tty;
76323 spin_unlock_irqrestore(&self->spinlock, flags);
76324
76325 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76326 - self->line, self->open_count);
76327 + self->line, local_read(&self->open_count));
76328
76329 /* Not really used by us, but lets do it anyway */
76330 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76331 @@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76332 return;
76333 }
76334
76335 - if ((tty->count == 1) && (self->open_count != 1)) {
76336 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76337 /*
76338 * Uh, oh. tty->count is 1, which means that the tty
76339 * structure will be freed. state->count should always
76340 @@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76341 */
76342 IRDA_DEBUG(0, "%s(), bad serial port count; "
76343 "tty->count is 1, state->count is %d\n", __func__ ,
76344 - self->open_count);
76345 - self->open_count = 1;
76346 + local_read(&self->open_count));
76347 + local_set(&self->open_count, 1);
76348 }
76349
76350 - if (--self->open_count < 0) {
76351 + if (local_dec_return(&self->open_count) < 0) {
76352 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76353 - __func__, self->line, self->open_count);
76354 - self->open_count = 0;
76355 + __func__, self->line, local_read(&self->open_count));
76356 + local_set(&self->open_count, 0);
76357 }
76358 - if (self->open_count) {
76359 + if (local_read(&self->open_count)) {
76360 spin_unlock_irqrestore(&self->spinlock, flags);
76361
76362 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76363 @@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76364 tty->closing = 0;
76365 self->tty = NULL;
76366
76367 - if (self->blocked_open) {
76368 + if (local_read(&self->blocked_open)) {
76369 if (self->close_delay)
76370 schedule_timeout_interruptible(self->close_delay);
76371 wake_up_interruptible(&self->open_wait);
76372 @@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76373 spin_lock_irqsave(&self->spinlock, flags);
76374 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76375 self->tty = NULL;
76376 - self->open_count = 0;
76377 + local_set(&self->open_count, 0);
76378 spin_unlock_irqrestore(&self->spinlock, flags);
76379
76380 wake_up_interruptible(&self->open_wait);
76381 @@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76382 seq_putc(m, '\n');
76383
76384 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76385 - seq_printf(m, "Open count: %d\n", self->open_count);
76386 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76387 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76388 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76389
76390 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76391 index cd6f7a9..e63fe89 100644
76392 --- a/net/iucv/af_iucv.c
76393 +++ b/net/iucv/af_iucv.c
76394 @@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
76395
76396 write_lock_bh(&iucv_sk_list.lock);
76397
76398 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76399 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76400 while (__iucv_get_sock_by_name(name)) {
76401 sprintf(name, "%08x",
76402 - atomic_inc_return(&iucv_sk_list.autobind_name));
76403 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76404 }
76405
76406 write_unlock_bh(&iucv_sk_list.lock);
76407 diff --git a/net/key/af_key.c b/net/key/af_key.c
76408 index 7e5d927..cdbb54e 100644
76409 --- a/net/key/af_key.c
76410 +++ b/net/key/af_key.c
76411 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76412 static u32 get_acqseq(void)
76413 {
76414 u32 res;
76415 - static atomic_t acqseq;
76416 + static atomic_unchecked_t acqseq;
76417
76418 do {
76419 - res = atomic_inc_return(&acqseq);
76420 + res = atomic_inc_return_unchecked(&acqseq);
76421 } while (!res);
76422 return res;
76423 }
76424 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76425 index db8fae5..ff070cd 100644
76426 --- a/net/mac80211/ieee80211_i.h
76427 +++ b/net/mac80211/ieee80211_i.h
76428 @@ -28,6 +28,7 @@
76429 #include <net/ieee80211_radiotap.h>
76430 #include <net/cfg80211.h>
76431 #include <net/mac80211.h>
76432 +#include <asm/local.h>
76433 #include "key.h"
76434 #include "sta_info.h"
76435
76436 @@ -842,7 +843,7 @@ struct ieee80211_local {
76437 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76438 spinlock_t queue_stop_reason_lock;
76439
76440 - int open_count;
76441 + local_t open_count;
76442 int monitors, cooked_mntrs;
76443 /* number of interfaces with corresponding FIF_ flags */
76444 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76445 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76446 index 48f937e..4ccd7b8 100644
76447 --- a/net/mac80211/iface.c
76448 +++ b/net/mac80211/iface.c
76449 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76450 break;
76451 }
76452
76453 - if (local->open_count == 0) {
76454 + if (local_read(&local->open_count) == 0) {
76455 res = drv_start(local);
76456 if (res)
76457 goto err_del_bss;
76458 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76459 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76460
76461 if (!is_valid_ether_addr(dev->dev_addr)) {
76462 - if (!local->open_count)
76463 + if (!local_read(&local->open_count))
76464 drv_stop(local);
76465 return -EADDRNOTAVAIL;
76466 }
76467 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76468 mutex_unlock(&local->mtx);
76469
76470 if (coming_up)
76471 - local->open_count++;
76472 + local_inc(&local->open_count);
76473
76474 if (hw_reconf_flags)
76475 ieee80211_hw_config(local, hw_reconf_flags);
76476 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76477 err_del_interface:
76478 drv_remove_interface(local, sdata);
76479 err_stop:
76480 - if (!local->open_count)
76481 + if (!local_read(&local->open_count))
76482 drv_stop(local);
76483 err_del_bss:
76484 sdata->bss = NULL;
76485 @@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76486 }
76487
76488 if (going_down)
76489 - local->open_count--;
76490 + local_dec(&local->open_count);
76491
76492 switch (sdata->vif.type) {
76493 case NL80211_IFTYPE_AP_VLAN:
76494 @@ -562,7 +562,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76495
76496 ieee80211_recalc_ps(local, -1);
76497
76498 - if (local->open_count == 0) {
76499 + if (local_read(&local->open_count) == 0) {
76500 if (local->ops->napi_poll)
76501 napi_disable(&local->napi);
76502 ieee80211_clear_tx_pending(local);
76503 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76504 index 1633648..d45ebfa 100644
76505 --- a/net/mac80211/main.c
76506 +++ b/net/mac80211/main.c
76507 @@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76508 local->hw.conf.power_level = power;
76509 }
76510
76511 - if (changed && local->open_count) {
76512 + if (changed && local_read(&local->open_count)) {
76513 ret = drv_config(local, changed);
76514 /*
76515 * Goal:
76516 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76517 index ef8eba1..5c63952 100644
76518 --- a/net/mac80211/pm.c
76519 +++ b/net/mac80211/pm.c
76520 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76521 struct ieee80211_sub_if_data *sdata;
76522 struct sta_info *sta;
76523
76524 - if (!local->open_count)
76525 + if (!local_read(&local->open_count))
76526 goto suspend;
76527
76528 ieee80211_scan_cancel(local);
76529 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76530 cancel_work_sync(&local->dynamic_ps_enable_work);
76531 del_timer_sync(&local->dynamic_ps_timer);
76532
76533 - local->wowlan = wowlan && local->open_count;
76534 + local->wowlan = wowlan && local_read(&local->open_count);
76535 if (local->wowlan) {
76536 int err = drv_suspend(local, wowlan);
76537 if (err < 0) {
76538 @@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76539 }
76540
76541 /* stop hardware - this must stop RX */
76542 - if (local->open_count)
76543 + if (local_read(&local->open_count))
76544 ieee80211_stop_device(local);
76545
76546 suspend:
76547 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76548 index 3313c11..bec9f17 100644
76549 --- a/net/mac80211/rate.c
76550 +++ b/net/mac80211/rate.c
76551 @@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76552
76553 ASSERT_RTNL();
76554
76555 - if (local->open_count)
76556 + if (local_read(&local->open_count))
76557 return -EBUSY;
76558
76559 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76560 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76561 index c97a065..ff61928 100644
76562 --- a/net/mac80211/rc80211_pid_debugfs.c
76563 +++ b/net/mac80211/rc80211_pid_debugfs.c
76564 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76565
76566 spin_unlock_irqrestore(&events->lock, status);
76567
76568 - if (copy_to_user(buf, pb, p))
76569 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76570 return -EFAULT;
76571
76572 return p;
76573 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76574 index eb9d7c0..d34b832 100644
76575 --- a/net/mac80211/util.c
76576 +++ b/net/mac80211/util.c
76577 @@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76578 }
76579 #endif
76580 /* everything else happens only if HW was up & running */
76581 - if (!local->open_count)
76582 + if (!local_read(&local->open_count))
76583 goto wake_up;
76584
76585 /*
76586 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76587 index 0c6f67e..d02cdfc 100644
76588 --- a/net/netfilter/Kconfig
76589 +++ b/net/netfilter/Kconfig
76590 @@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
76591
76592 To compile it as a module, choose M here. If unsure, say N.
76593
76594 +config NETFILTER_XT_MATCH_GRADM
76595 + tristate '"gradm" match support'
76596 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76597 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76598 + ---help---
76599 + The gradm match allows to match on grsecurity RBAC being enabled.
76600 + It is useful when iptables rules are applied early on bootup to
76601 + prevent connections to the machine (except from a trusted host)
76602 + while the RBAC system is disabled.
76603 +
76604 config NETFILTER_XT_MATCH_HASHLIMIT
76605 tristate '"hashlimit" match support'
76606 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76607 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76608 index ca36765..0882e7c 100644
76609 --- a/net/netfilter/Makefile
76610 +++ b/net/netfilter/Makefile
76611 @@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76612 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76613 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
76614 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76615 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76616 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76617 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76618 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76619 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76620 index 29fa5ba..8debc79 100644
76621 --- a/net/netfilter/ipvs/ip_vs_conn.c
76622 +++ b/net/netfilter/ipvs/ip_vs_conn.c
76623 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76624 /* Increase the refcnt counter of the dest */
76625 atomic_inc(&dest->refcnt);
76626
76627 - conn_flags = atomic_read(&dest->conn_flags);
76628 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
76629 if (cp->protocol != IPPROTO_UDP)
76630 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76631 /* Bind with the destination and its corresponding transmitter */
76632 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76633 atomic_set(&cp->refcnt, 1);
76634
76635 atomic_set(&cp->n_control, 0);
76636 - atomic_set(&cp->in_pkts, 0);
76637 + atomic_set_unchecked(&cp->in_pkts, 0);
76638
76639 atomic_inc(&ipvs->conn_count);
76640 if (flags & IP_VS_CONN_F_NO_CPORT)
76641 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76642
76643 /* Don't drop the entry if its number of incoming packets is not
76644 located in [0, 8] */
76645 - i = atomic_read(&cp->in_pkts);
76646 + i = atomic_read_unchecked(&cp->in_pkts);
76647 if (i > 8 || i < 0) return 0;
76648
76649 if (!todrop_rate[i]) return 0;
76650 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76651 index 00bdb1d..6725a48 100644
76652 --- a/net/netfilter/ipvs/ip_vs_core.c
76653 +++ b/net/netfilter/ipvs/ip_vs_core.c
76654 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76655 ret = cp->packet_xmit(skb, cp, pd->pp);
76656 /* do not touch skb anymore */
76657
76658 - atomic_inc(&cp->in_pkts);
76659 + atomic_inc_unchecked(&cp->in_pkts);
76660 ip_vs_conn_put(cp);
76661 return ret;
76662 }
76663 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76664 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76665 pkts = sysctl_sync_threshold(ipvs);
76666 else
76667 - pkts = atomic_add_return(1, &cp->in_pkts);
76668 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76669
76670 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76671 cp->protocol == IPPROTO_SCTP) {
76672 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76673 index f558998..7dfb054 100644
76674 --- a/net/netfilter/ipvs/ip_vs_ctl.c
76675 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
76676 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
76677 ip_vs_rs_hash(ipvs, dest);
76678 write_unlock_bh(&ipvs->rs_lock);
76679 }
76680 - atomic_set(&dest->conn_flags, conn_flags);
76681 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
76682
76683 /* bind the service */
76684 if (!dest->svc) {
76685 @@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
76686 {
76687 struct net_device *dev = ptr;
76688 struct net *net = dev_net(dev);
76689 + struct netns_ipvs *ipvs = net_ipvs(net);
76690 struct ip_vs_service *svc;
76691 struct ip_vs_dest *dest;
76692 unsigned int idx;
76693
76694 - if (event != NETDEV_UNREGISTER)
76695 + if (event != NETDEV_UNREGISTER || !ipvs)
76696 return NOTIFY_DONE;
76697 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
76698 EnterFunction(2);
76699 @@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
76700 }
76701 }
76702
76703 - list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
76704 + list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
76705 __ip_vs_dev_reset(dest, dev);
76706 }
76707 mutex_unlock(&__ip_vs_mutex);
76708 @@ -2028,7 +2029,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76709 " %-7s %-6d %-10d %-10d\n",
76710 &dest->addr.in6,
76711 ntohs(dest->port),
76712 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76713 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76714 atomic_read(&dest->weight),
76715 atomic_read(&dest->activeconns),
76716 atomic_read(&dest->inactconns));
76717 @@ -2039,7 +2040,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76718 "%-7s %-6d %-10d %-10d\n",
76719 ntohl(dest->addr.ip),
76720 ntohs(dest->port),
76721 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76722 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76723 atomic_read(&dest->weight),
76724 atomic_read(&dest->activeconns),
76725 atomic_read(&dest->inactconns));
76726 @@ -2509,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76727
76728 entry.addr = dest->addr.ip;
76729 entry.port = dest->port;
76730 - entry.conn_flags = atomic_read(&dest->conn_flags);
76731 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76732 entry.weight = atomic_read(&dest->weight);
76733 entry.u_threshold = dest->u_threshold;
76734 entry.l_threshold = dest->l_threshold;
76735 @@ -3042,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76736 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76737
76738 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76739 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76740 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76741 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76742 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76743 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76744 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76745 index 8a0d6d6..90ec197 100644
76746 --- a/net/netfilter/ipvs/ip_vs_sync.c
76747 +++ b/net/netfilter/ipvs/ip_vs_sync.c
76748 @@ -649,7 +649,7 @@ control:
76749 * i.e only increment in_pkts for Templates.
76750 */
76751 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76752 - int pkts = atomic_add_return(1, &cp->in_pkts);
76753 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76754
76755 if (pkts % sysctl_sync_period(ipvs) != 1)
76756 return;
76757 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76758
76759 if (opt)
76760 memcpy(&cp->in_seq, opt, sizeof(*opt));
76761 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76762 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76763 cp->state = state;
76764 cp->old_state = cp->state;
76765 /*
76766 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76767 index 7fd66de..e6fb361 100644
76768 --- a/net/netfilter/ipvs/ip_vs_xmit.c
76769 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
76770 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76771 else
76772 rc = NF_ACCEPT;
76773 /* do not touch skb anymore */
76774 - atomic_inc(&cp->in_pkts);
76775 + atomic_inc_unchecked(&cp->in_pkts);
76776 goto out;
76777 }
76778
76779 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76780 else
76781 rc = NF_ACCEPT;
76782 /* do not touch skb anymore */
76783 - atomic_inc(&cp->in_pkts);
76784 + atomic_inc_unchecked(&cp->in_pkts);
76785 goto out;
76786 }
76787
76788 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76789 index 66b2c54..c7884e3 100644
76790 --- a/net/netfilter/nfnetlink_log.c
76791 +++ b/net/netfilter/nfnetlink_log.c
76792 @@ -70,7 +70,7 @@ struct nfulnl_instance {
76793 };
76794
76795 static DEFINE_SPINLOCK(instances_lock);
76796 -static atomic_t global_seq;
76797 +static atomic_unchecked_t global_seq;
76798
76799 #define INSTANCE_BUCKETS 16
76800 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76801 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76802 /* global sequence number */
76803 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76804 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76805 - htonl(atomic_inc_return(&global_seq)));
76806 + htonl(atomic_inc_return_unchecked(&global_seq)));
76807
76808 if (data_len) {
76809 struct nlattr *nla;
76810 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76811 new file mode 100644
76812 index 0000000..6905327
76813 --- /dev/null
76814 +++ b/net/netfilter/xt_gradm.c
76815 @@ -0,0 +1,51 @@
76816 +/*
76817 + * gradm match for netfilter
76818 + * Copyright © Zbigniew Krzystolik, 2010
76819 + *
76820 + * This program is free software; you can redistribute it and/or modify
76821 + * it under the terms of the GNU General Public License; either version
76822 + * 2 or 3 as published by the Free Software Foundation.
76823 + */
76824 +#include <linux/module.h>
76825 +#include <linux/moduleparam.h>
76826 +#include <linux/skbuff.h>
76827 +#include <linux/netfilter/x_tables.h>
76828 +#include <linux/grsecurity.h>
76829 +#include <linux/netfilter/xt_gradm.h>
76830 +
76831 +static bool
76832 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
76833 +{
76834 + const struct xt_gradm_mtinfo *info = par->matchinfo;
76835 + bool retval = false;
76836 + if (gr_acl_is_enabled())
76837 + retval = true;
76838 + return retval ^ info->invflags;
76839 +}
76840 +
76841 +static struct xt_match gradm_mt_reg __read_mostly = {
76842 + .name = "gradm",
76843 + .revision = 0,
76844 + .family = NFPROTO_UNSPEC,
76845 + .match = gradm_mt,
76846 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
76847 + .me = THIS_MODULE,
76848 +};
76849 +
76850 +static int __init gradm_mt_init(void)
76851 +{
76852 + return xt_register_match(&gradm_mt_reg);
76853 +}
76854 +
76855 +static void __exit gradm_mt_exit(void)
76856 +{
76857 + xt_unregister_match(&gradm_mt_reg);
76858 +}
76859 +
76860 +module_init(gradm_mt_init);
76861 +module_exit(gradm_mt_exit);
76862 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
76863 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
76864 +MODULE_LICENSE("GPL");
76865 +MODULE_ALIAS("ipt_gradm");
76866 +MODULE_ALIAS("ip6t_gradm");
76867 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
76868 index 4fe4fb4..87a89e5 100644
76869 --- a/net/netfilter/xt_statistic.c
76870 +++ b/net/netfilter/xt_statistic.c
76871 @@ -19,7 +19,7 @@
76872 #include <linux/module.h>
76873
76874 struct xt_statistic_priv {
76875 - atomic_t count;
76876 + atomic_unchecked_t count;
76877 } ____cacheline_aligned_in_smp;
76878
76879 MODULE_LICENSE("GPL");
76880 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
76881 break;
76882 case XT_STATISTIC_MODE_NTH:
76883 do {
76884 - oval = atomic_read(&info->master->count);
76885 + oval = atomic_read_unchecked(&info->master->count);
76886 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
76887 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
76888 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
76889 if (nval == 0)
76890 ret = !ret;
76891 break;
76892 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
76893 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
76894 if (info->master == NULL)
76895 return -ENOMEM;
76896 - atomic_set(&info->master->count, info->u.nth.count);
76897 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
76898
76899 return 0;
76900 }
76901 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
76902 index faa48f7..65f7f54 100644
76903 --- a/net/netlink/af_netlink.c
76904 +++ b/net/netlink/af_netlink.c
76905 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
76906 sk->sk_error_report(sk);
76907 }
76908 }
76909 - atomic_inc(&sk->sk_drops);
76910 + atomic_inc_unchecked(&sk->sk_drops);
76911 }
76912
76913 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
76914 @@ -2013,7 +2013,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
76915 sk_wmem_alloc_get(s),
76916 nlk->cb,
76917 atomic_read(&s->sk_refcnt),
76918 - atomic_read(&s->sk_drops),
76919 + atomic_read_unchecked(&s->sk_drops),
76920 sock_i_ino(s)
76921 );
76922
76923 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
76924 index 06592d8..64860f6 100644
76925 --- a/net/netrom/af_netrom.c
76926 +++ b/net/netrom/af_netrom.c
76927 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76928 struct sock *sk = sock->sk;
76929 struct nr_sock *nr = nr_sk(sk);
76930
76931 + memset(sax, 0, sizeof(*sax));
76932 lock_sock(sk);
76933 if (peer != 0) {
76934 if (sk->sk_state != TCP_ESTABLISHED) {
76935 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76936 *uaddr_len = sizeof(struct full_sockaddr_ax25);
76937 } else {
76938 sax->fsa_ax25.sax25_family = AF_NETROM;
76939 - sax->fsa_ax25.sax25_ndigis = 0;
76940 sax->fsa_ax25.sax25_call = nr->source_addr;
76941 *uaddr_len = sizeof(struct sockaddr_ax25);
76942 }
76943 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
76944 index 4f2c0df..f0ff342 100644
76945 --- a/net/packet/af_packet.c
76946 +++ b/net/packet/af_packet.c
76947 @@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76948
76949 spin_lock(&sk->sk_receive_queue.lock);
76950 po->stats.tp_packets++;
76951 - skb->dropcount = atomic_read(&sk->sk_drops);
76952 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76953 __skb_queue_tail(&sk->sk_receive_queue, skb);
76954 spin_unlock(&sk->sk_receive_queue.lock);
76955 sk->sk_data_ready(sk, skb->len);
76956 @@ -1696,7 +1696,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76957 drop_n_acct:
76958 spin_lock(&sk->sk_receive_queue.lock);
76959 po->stats.tp_drops++;
76960 - atomic_inc(&sk->sk_drops);
76961 + atomic_inc_unchecked(&sk->sk_drops);
76962 spin_unlock(&sk->sk_receive_queue.lock);
76963
76964 drop_n_restore:
76965 @@ -3294,7 +3294,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76966 case PACKET_HDRLEN:
76967 if (len > sizeof(int))
76968 len = sizeof(int);
76969 - if (copy_from_user(&val, optval, len))
76970 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
76971 return -EFAULT;
76972 switch (val) {
76973 case TPACKET_V1:
76974 @@ -3344,7 +3344,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76975
76976 if (put_user(len, optlen))
76977 return -EFAULT;
76978 - if (copy_to_user(optval, data, len))
76979 + if (len > sizeof(st) || copy_to_user(optval, data, len))
76980 return -EFAULT;
76981 return 0;
76982 }
76983 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
76984 index d65f699..05aa6ce 100644
76985 --- a/net/phonet/af_phonet.c
76986 +++ b/net/phonet/af_phonet.c
76987 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
76988 {
76989 struct phonet_protocol *pp;
76990
76991 - if (protocol >= PHONET_NPROTO)
76992 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76993 return NULL;
76994
76995 rcu_read_lock();
76996 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
76997 {
76998 int err = 0;
76999
77000 - if (protocol >= PHONET_NPROTO)
77001 + if (protocol < 0 || protocol >= PHONET_NPROTO)
77002 return -EINVAL;
77003
77004 err = proto_register(pp->prot, 1);
77005 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
77006 index 9726fe6..fc4e3a4 100644
77007 --- a/net/phonet/pep.c
77008 +++ b/net/phonet/pep.c
77009 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
77010
77011 case PNS_PEP_CTRL_REQ:
77012 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
77013 - atomic_inc(&sk->sk_drops);
77014 + atomic_inc_unchecked(&sk->sk_drops);
77015 break;
77016 }
77017 __skb_pull(skb, 4);
77018 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
77019 }
77020
77021 if (pn->rx_credits == 0) {
77022 - atomic_inc(&sk->sk_drops);
77023 + atomic_inc_unchecked(&sk->sk_drops);
77024 err = -ENOBUFS;
77025 break;
77026 }
77027 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
77028 }
77029
77030 if (pn->rx_credits == 0) {
77031 - atomic_inc(&sk->sk_drops);
77032 + atomic_inc_unchecked(&sk->sk_drops);
77033 err = NET_RX_DROP;
77034 break;
77035 }
77036 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
77037 index 4c7eff3..59c727f 100644
77038 --- a/net/phonet/socket.c
77039 +++ b/net/phonet/socket.c
77040 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
77041 pn->resource, sk->sk_state,
77042 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
77043 sock_i_uid(sk), sock_i_ino(sk),
77044 - atomic_read(&sk->sk_refcnt), sk,
77045 - atomic_read(&sk->sk_drops), &len);
77046 + atomic_read(&sk->sk_refcnt),
77047 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77048 + NULL,
77049 +#else
77050 + sk,
77051 +#endif
77052 + atomic_read_unchecked(&sk->sk_drops), &len);
77053 }
77054 seq_printf(seq, "%*s\n", 127 - len, "");
77055 return 0;
77056 diff --git a/net/rds/cong.c b/net/rds/cong.c
77057 index e5b65ac..f3b6fb7 100644
77058 --- a/net/rds/cong.c
77059 +++ b/net/rds/cong.c
77060 @@ -78,7 +78,7 @@
77061 * finds that the saved generation number is smaller than the global generation
77062 * number, it wakes up the process.
77063 */
77064 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
77065 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
77066
77067 /*
77068 * Congestion monitoring
77069 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
77070 rdsdebug("waking map %p for %pI4\n",
77071 map, &map->m_addr);
77072 rds_stats_inc(s_cong_update_received);
77073 - atomic_inc(&rds_cong_generation);
77074 + atomic_inc_unchecked(&rds_cong_generation);
77075 if (waitqueue_active(&map->m_waitq))
77076 wake_up(&map->m_waitq);
77077 if (waitqueue_active(&rds_poll_waitq))
77078 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
77079
77080 int rds_cong_updated_since(unsigned long *recent)
77081 {
77082 - unsigned long gen = atomic_read(&rds_cong_generation);
77083 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
77084
77085 if (likely(*recent == gen))
77086 return 0;
77087 diff --git a/net/rds/ib.h b/net/rds/ib.h
77088 index edfaaaf..8c89879 100644
77089 --- a/net/rds/ib.h
77090 +++ b/net/rds/ib.h
77091 @@ -128,7 +128,7 @@ struct rds_ib_connection {
77092 /* sending acks */
77093 unsigned long i_ack_flags;
77094 #ifdef KERNEL_HAS_ATOMIC64
77095 - atomic64_t i_ack_next; /* next ACK to send */
77096 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77097 #else
77098 spinlock_t i_ack_lock; /* protect i_ack_next */
77099 u64 i_ack_next; /* next ACK to send */
77100 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
77101 index a1e1162..265e129 100644
77102 --- a/net/rds/ib_cm.c
77103 +++ b/net/rds/ib_cm.c
77104 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
77105 /* Clear the ACK state */
77106 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77107 #ifdef KERNEL_HAS_ATOMIC64
77108 - atomic64_set(&ic->i_ack_next, 0);
77109 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77110 #else
77111 ic->i_ack_next = 0;
77112 #endif
77113 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
77114 index 8d19491..05a3e65 100644
77115 --- a/net/rds/ib_recv.c
77116 +++ b/net/rds/ib_recv.c
77117 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77118 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
77119 int ack_required)
77120 {
77121 - atomic64_set(&ic->i_ack_next, seq);
77122 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77123 if (ack_required) {
77124 smp_mb__before_clear_bit();
77125 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77126 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77127 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77128 smp_mb__after_clear_bit();
77129
77130 - return atomic64_read(&ic->i_ack_next);
77131 + return atomic64_read_unchecked(&ic->i_ack_next);
77132 }
77133 #endif
77134
77135 diff --git a/net/rds/iw.h b/net/rds/iw.h
77136 index 04ce3b1..48119a6 100644
77137 --- a/net/rds/iw.h
77138 +++ b/net/rds/iw.h
77139 @@ -134,7 +134,7 @@ struct rds_iw_connection {
77140 /* sending acks */
77141 unsigned long i_ack_flags;
77142 #ifdef KERNEL_HAS_ATOMIC64
77143 - atomic64_t i_ack_next; /* next ACK to send */
77144 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77145 #else
77146 spinlock_t i_ack_lock; /* protect i_ack_next */
77147 u64 i_ack_next; /* next ACK to send */
77148 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
77149 index a91e1db..cf3053f 100644
77150 --- a/net/rds/iw_cm.c
77151 +++ b/net/rds/iw_cm.c
77152 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
77153 /* Clear the ACK state */
77154 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77155 #ifdef KERNEL_HAS_ATOMIC64
77156 - atomic64_set(&ic->i_ack_next, 0);
77157 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77158 #else
77159 ic->i_ack_next = 0;
77160 #endif
77161 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
77162 index 4503335..db566b4 100644
77163 --- a/net/rds/iw_recv.c
77164 +++ b/net/rds/iw_recv.c
77165 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77166 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
77167 int ack_required)
77168 {
77169 - atomic64_set(&ic->i_ack_next, seq);
77170 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77171 if (ack_required) {
77172 smp_mb__before_clear_bit();
77173 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77174 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77175 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77176 smp_mb__after_clear_bit();
77177
77178 - return atomic64_read(&ic->i_ack_next);
77179 + return atomic64_read_unchecked(&ic->i_ack_next);
77180 }
77181 #endif
77182
77183 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
77184 index edac9ef..16bcb98 100644
77185 --- a/net/rds/tcp.c
77186 +++ b/net/rds/tcp.c
77187 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
77188 int val = 1;
77189
77190 set_fs(KERNEL_DS);
77191 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
77192 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
77193 sizeof(val));
77194 set_fs(oldfs);
77195 }
77196 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
77197 index 1b4fd68..2234175 100644
77198 --- a/net/rds/tcp_send.c
77199 +++ b/net/rds/tcp_send.c
77200 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
77201
77202 oldfs = get_fs();
77203 set_fs(KERNEL_DS);
77204 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
77205 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
77206 sizeof(val));
77207 set_fs(oldfs);
77208 }
77209 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
77210 index 74c064c..fdec26f 100644
77211 --- a/net/rxrpc/af_rxrpc.c
77212 +++ b/net/rxrpc/af_rxrpc.c
77213 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
77214 __be32 rxrpc_epoch;
77215
77216 /* current debugging ID */
77217 -atomic_t rxrpc_debug_id;
77218 +atomic_unchecked_t rxrpc_debug_id;
77219
77220 /* count of skbs currently in use */
77221 atomic_t rxrpc_n_skbs;
77222 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
77223 index c3126e8..21facc7 100644
77224 --- a/net/rxrpc/ar-ack.c
77225 +++ b/net/rxrpc/ar-ack.c
77226 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77227
77228 _enter("{%d,%d,%d,%d},",
77229 call->acks_hard, call->acks_unacked,
77230 - atomic_read(&call->sequence),
77231 + atomic_read_unchecked(&call->sequence),
77232 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
77233
77234 stop = 0;
77235 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77236
77237 /* each Tx packet has a new serial number */
77238 sp->hdr.serial =
77239 - htonl(atomic_inc_return(&call->conn->serial));
77240 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
77241
77242 hdr = (struct rxrpc_header *) txb->head;
77243 hdr->serial = sp->hdr.serial;
77244 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
77245 */
77246 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
77247 {
77248 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
77249 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
77250 }
77251
77252 /*
77253 @@ -629,7 +629,7 @@ process_further:
77254
77255 latest = ntohl(sp->hdr.serial);
77256 hard = ntohl(ack.firstPacket);
77257 - tx = atomic_read(&call->sequence);
77258 + tx = atomic_read_unchecked(&call->sequence);
77259
77260 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77261 latest,
77262 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
77263 goto maybe_reschedule;
77264
77265 send_ACK_with_skew:
77266 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
77267 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
77268 ntohl(ack.serial));
77269 send_ACK:
77270 mtu = call->conn->trans->peer->if_mtu;
77271 @@ -1173,7 +1173,7 @@ send_ACK:
77272 ackinfo.rxMTU = htonl(5692);
77273 ackinfo.jumbo_max = htonl(4);
77274
77275 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77276 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77277 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77278 ntohl(hdr.serial),
77279 ntohs(ack.maxSkew),
77280 @@ -1191,7 +1191,7 @@ send_ACK:
77281 send_message:
77282 _debug("send message");
77283
77284 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77285 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77286 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
77287 send_message_2:
77288
77289 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
77290 index bf656c2..48f9d27 100644
77291 --- a/net/rxrpc/ar-call.c
77292 +++ b/net/rxrpc/ar-call.c
77293 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
77294 spin_lock_init(&call->lock);
77295 rwlock_init(&call->state_lock);
77296 atomic_set(&call->usage, 1);
77297 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
77298 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77299 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
77300
77301 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
77302 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
77303 index 4106ca9..a338d7a 100644
77304 --- a/net/rxrpc/ar-connection.c
77305 +++ b/net/rxrpc/ar-connection.c
77306 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
77307 rwlock_init(&conn->lock);
77308 spin_lock_init(&conn->state_lock);
77309 atomic_set(&conn->usage, 1);
77310 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77311 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77312 conn->avail_calls = RXRPC_MAXCALLS;
77313 conn->size_align = 4;
77314 conn->header_size = sizeof(struct rxrpc_header);
77315 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
77316 index e7ed43a..6afa140 100644
77317 --- a/net/rxrpc/ar-connevent.c
77318 +++ b/net/rxrpc/ar-connevent.c
77319 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
77320
77321 len = iov[0].iov_len + iov[1].iov_len;
77322
77323 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77324 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77325 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
77326
77327 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77328 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
77329 index 1a2b0633..e8d1382 100644
77330 --- a/net/rxrpc/ar-input.c
77331 +++ b/net/rxrpc/ar-input.c
77332 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
77333 /* track the latest serial number on this connection for ACK packet
77334 * information */
77335 serial = ntohl(sp->hdr.serial);
77336 - hi_serial = atomic_read(&call->conn->hi_serial);
77337 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
77338 while (serial > hi_serial)
77339 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
77340 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
77341 serial);
77342
77343 /* request ACK generation for any ACK or DATA packet that requests
77344 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
77345 index 8e22bd3..f66d1c0 100644
77346 --- a/net/rxrpc/ar-internal.h
77347 +++ b/net/rxrpc/ar-internal.h
77348 @@ -272,8 +272,8 @@ struct rxrpc_connection {
77349 int error; /* error code for local abort */
77350 int debug_id; /* debug ID for printks */
77351 unsigned call_counter; /* call ID counter */
77352 - atomic_t serial; /* packet serial number counter */
77353 - atomic_t hi_serial; /* highest serial number received */
77354 + atomic_unchecked_t serial; /* packet serial number counter */
77355 + atomic_unchecked_t hi_serial; /* highest serial number received */
77356 u8 avail_calls; /* number of calls available */
77357 u8 size_align; /* data size alignment (for security) */
77358 u8 header_size; /* rxrpc + security header size */
77359 @@ -346,7 +346,7 @@ struct rxrpc_call {
77360 spinlock_t lock;
77361 rwlock_t state_lock; /* lock for state transition */
77362 atomic_t usage;
77363 - atomic_t sequence; /* Tx data packet sequence counter */
77364 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
77365 u32 abort_code; /* local/remote abort code */
77366 enum { /* current state of call */
77367 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
77368 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
77369 */
77370 extern atomic_t rxrpc_n_skbs;
77371 extern __be32 rxrpc_epoch;
77372 -extern atomic_t rxrpc_debug_id;
77373 +extern atomic_unchecked_t rxrpc_debug_id;
77374 extern struct workqueue_struct *rxrpc_workqueue;
77375
77376 /*
77377 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
77378 index 87f7135..74d3703 100644
77379 --- a/net/rxrpc/ar-local.c
77380 +++ b/net/rxrpc/ar-local.c
77381 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
77382 spin_lock_init(&local->lock);
77383 rwlock_init(&local->services_lock);
77384 atomic_set(&local->usage, 1);
77385 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
77386 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77387 memcpy(&local->srx, srx, sizeof(*srx));
77388 }
77389
77390 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
77391 index 16ae887..d24f12b 100644
77392 --- a/net/rxrpc/ar-output.c
77393 +++ b/net/rxrpc/ar-output.c
77394 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
77395 sp->hdr.cid = call->cid;
77396 sp->hdr.callNumber = call->call_id;
77397 sp->hdr.seq =
77398 - htonl(atomic_inc_return(&call->sequence));
77399 + htonl(atomic_inc_return_unchecked(&call->sequence));
77400 sp->hdr.serial =
77401 - htonl(atomic_inc_return(&conn->serial));
77402 + htonl(atomic_inc_return_unchecked(&conn->serial));
77403 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
77404 sp->hdr.userStatus = 0;
77405 sp->hdr.securityIndex = conn->security_ix;
77406 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
77407 index 2754f09..b20e38f 100644
77408 --- a/net/rxrpc/ar-peer.c
77409 +++ b/net/rxrpc/ar-peer.c
77410 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
77411 INIT_LIST_HEAD(&peer->error_targets);
77412 spin_lock_init(&peer->lock);
77413 atomic_set(&peer->usage, 1);
77414 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
77415 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77416 memcpy(&peer->srx, srx, sizeof(*srx));
77417
77418 rxrpc_assess_MTU_size(peer);
77419 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
77420 index 38047f7..9f48511 100644
77421 --- a/net/rxrpc/ar-proc.c
77422 +++ b/net/rxrpc/ar-proc.c
77423 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
77424 atomic_read(&conn->usage),
77425 rxrpc_conn_states[conn->state],
77426 key_serial(conn->key),
77427 - atomic_read(&conn->serial),
77428 - atomic_read(&conn->hi_serial));
77429 + atomic_read_unchecked(&conn->serial),
77430 + atomic_read_unchecked(&conn->hi_serial));
77431
77432 return 0;
77433 }
77434 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
77435 index 92df566..87ec1bf 100644
77436 --- a/net/rxrpc/ar-transport.c
77437 +++ b/net/rxrpc/ar-transport.c
77438 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
77439 spin_lock_init(&trans->client_lock);
77440 rwlock_init(&trans->conn_lock);
77441 atomic_set(&trans->usage, 1);
77442 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
77443 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77444
77445 if (peer->srx.transport.family == AF_INET) {
77446 switch (peer->srx.transport_type) {
77447 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
77448 index 7635107..4670276 100644
77449 --- a/net/rxrpc/rxkad.c
77450 +++ b/net/rxrpc/rxkad.c
77451 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
77452
77453 len = iov[0].iov_len + iov[1].iov_len;
77454
77455 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77456 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77457 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
77458
77459 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77460 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
77461
77462 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
77463
77464 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
77465 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77466 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
77467
77468 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
77469 diff --git a/net/sctp/input.c b/net/sctp/input.c
77470 index 80f71af..be772c0 100644
77471 --- a/net/sctp/input.c
77472 +++ b/net/sctp/input.c
77473 @@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
77474
77475 epb = &ep->base;
77476
77477 - if (hlist_unhashed(&epb->node))
77478 - return;
77479 -
77480 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
77481
77482 head = &sctp_ep_hashtable[epb->hashent];
77483
77484 sctp_write_lock(&head->lock);
77485 - __hlist_del(&epb->node);
77486 + hlist_del_init(&epb->node);
77487 sctp_write_unlock(&head->lock);
77488 }
77489
77490 @@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
77491 head = &sctp_assoc_hashtable[epb->hashent];
77492
77493 sctp_write_lock(&head->lock);
77494 - __hlist_del(&epb->node);
77495 + hlist_del_init(&epb->node);
77496 sctp_write_unlock(&head->lock);
77497 }
77498
77499 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
77500 index 1e2eee8..ce3967e 100644
77501 --- a/net/sctp/proc.c
77502 +++ b/net/sctp/proc.c
77503 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
77504 seq_printf(seq,
77505 "%8pK %8pK %-3d %-3d %-2d %-4d "
77506 "%4d %8d %8d %7d %5lu %-5d %5d ",
77507 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
77508 + assoc, sk,
77509 + sctp_sk(sk)->type, sk->sk_state,
77510 assoc->state, hash,
77511 assoc->assoc_id,
77512 assoc->sndbuf_used,
77513 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
77514 index 92ba71d..9352c05 100644
77515 --- a/net/sctp/socket.c
77516 +++ b/net/sctp/socket.c
77517 @@ -1231,8 +1231,14 @@ out_free:
77518 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
77519 " kaddrs: %p err: %d\n",
77520 asoc, kaddrs, err);
77521 - if (asoc)
77522 + if (asoc) {
77523 + /* sctp_primitive_ASSOCIATE may have added this association
77524 + * To the hash table, try to unhash it, just in case, its a noop
77525 + * if it wasn't hashed so we're safe
77526 + */
77527 + sctp_unhash_established(asoc);
77528 sctp_association_free(asoc);
77529 + }
77530 return err;
77531 }
77532
77533 @@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
77534 goto out_unlock;
77535
77536 out_free:
77537 - if (new_asoc)
77538 + if (new_asoc) {
77539 + sctp_unhash_established(asoc);
77540 sctp_association_free(asoc);
77541 + }
77542 out_unlock:
77543 sctp_release_sock(sk);
77544
77545 @@ -4569,7 +4577,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
77546 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
77547 if (space_left < addrlen)
77548 return -ENOMEM;
77549 - if (copy_to_user(to, &temp, addrlen))
77550 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
77551 return -EFAULT;
77552 to += addrlen;
77553 cnt++;
77554 diff --git a/net/socket.c b/net/socket.c
77555 index 851edcd..b786851 100644
77556 --- a/net/socket.c
77557 +++ b/net/socket.c
77558 @@ -88,6 +88,7 @@
77559 #include <linux/nsproxy.h>
77560 #include <linux/magic.h>
77561 #include <linux/slab.h>
77562 +#include <linux/in.h>
77563
77564 #include <asm/uaccess.h>
77565 #include <asm/unistd.h>
77566 @@ -105,6 +106,8 @@
77567 #include <linux/sockios.h>
77568 #include <linux/atalk.h>
77569
77570 +#include <linux/grsock.h>
77571 +
77572 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
77573 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
77574 unsigned long nr_segs, loff_t pos);
77575 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
77576 &sockfs_dentry_operations, SOCKFS_MAGIC);
77577 }
77578
77579 -static struct vfsmount *sock_mnt __read_mostly;
77580 +struct vfsmount *sock_mnt __read_mostly;
77581
77582 static struct file_system_type sock_fs_type = {
77583 .name = "sockfs",
77584 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
77585 return -EAFNOSUPPORT;
77586 if (type < 0 || type >= SOCK_MAX)
77587 return -EINVAL;
77588 + if (protocol < 0)
77589 + return -EINVAL;
77590
77591 /* Compatibility.
77592
77593 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
77594 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
77595 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
77596
77597 + if(!gr_search_socket(family, type, protocol)) {
77598 + retval = -EACCES;
77599 + goto out;
77600 + }
77601 +
77602 + if (gr_handle_sock_all(family, type, protocol)) {
77603 + retval = -EACCES;
77604 + goto out;
77605 + }
77606 +
77607 retval = sock_create(family, type, protocol, &sock);
77608 if (retval < 0)
77609 goto out;
77610 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77611 if (sock) {
77612 err = move_addr_to_kernel(umyaddr, addrlen, &address);
77613 if (err >= 0) {
77614 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
77615 + err = -EACCES;
77616 + goto error;
77617 + }
77618 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
77619 + if (err)
77620 + goto error;
77621 +
77622 err = security_socket_bind(sock,
77623 (struct sockaddr *)&address,
77624 addrlen);
77625 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77626 (struct sockaddr *)
77627 &address, addrlen);
77628 }
77629 +error:
77630 fput_light(sock->file, fput_needed);
77631 }
77632 return err;
77633 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
77634 if ((unsigned)backlog > somaxconn)
77635 backlog = somaxconn;
77636
77637 + if (gr_handle_sock_server_other(sock->sk)) {
77638 + err = -EPERM;
77639 + goto error;
77640 + }
77641 +
77642 + err = gr_search_listen(sock);
77643 + if (err)
77644 + goto error;
77645 +
77646 err = security_socket_listen(sock, backlog);
77647 if (!err)
77648 err = sock->ops->listen(sock, backlog);
77649
77650 +error:
77651 fput_light(sock->file, fput_needed);
77652 }
77653 return err;
77654 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77655 newsock->type = sock->type;
77656 newsock->ops = sock->ops;
77657
77658 + if (gr_handle_sock_server_other(sock->sk)) {
77659 + err = -EPERM;
77660 + sock_release(newsock);
77661 + goto out_put;
77662 + }
77663 +
77664 + err = gr_search_accept(sock);
77665 + if (err) {
77666 + sock_release(newsock);
77667 + goto out_put;
77668 + }
77669 +
77670 /*
77671 * We don't need try_module_get here, as the listening socket (sock)
77672 * has the protocol module (sock->ops->owner) held.
77673 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77674 fd_install(newfd, newfile);
77675 err = newfd;
77676
77677 + gr_attach_curr_ip(newsock->sk);
77678 +
77679 out_put:
77680 fput_light(sock->file, fput_needed);
77681 out:
77682 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77683 int, addrlen)
77684 {
77685 struct socket *sock;
77686 + struct sockaddr *sck;
77687 struct sockaddr_storage address;
77688 int err, fput_needed;
77689
77690 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77691 if (err < 0)
77692 goto out_put;
77693
77694 + sck = (struct sockaddr *)&address;
77695 +
77696 + if (gr_handle_sock_client(sck)) {
77697 + err = -EACCES;
77698 + goto out_put;
77699 + }
77700 +
77701 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
77702 + if (err)
77703 + goto out_put;
77704 +
77705 err =
77706 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
77707 if (err)
77708 @@ -1966,7 +2026,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
77709 * checking falls down on this.
77710 */
77711 if (copy_from_user(ctl_buf,
77712 - (void __user __force *)msg_sys->msg_control,
77713 + (void __force_user *)msg_sys->msg_control,
77714 ctl_len))
77715 goto out_freectl;
77716 msg_sys->msg_control = ctl_buf;
77717 @@ -2136,7 +2196,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
77718 * kernel msghdr to use the kernel address space)
77719 */
77720
77721 - uaddr = (__force void __user *)msg_sys->msg_name;
77722 + uaddr = (void __force_user *)msg_sys->msg_name;
77723 uaddr_len = COMPAT_NAMELEN(msg);
77724 if (MSG_CMSG_COMPAT & flags) {
77725 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
77726 @@ -2758,7 +2818,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77727 }
77728
77729 ifr = compat_alloc_user_space(buf_size);
77730 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
77731 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
77732
77733 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
77734 return -EFAULT;
77735 @@ -2782,12 +2842,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77736 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
77737
77738 if (copy_in_user(rxnfc, compat_rxnfc,
77739 - (void *)(&rxnfc->fs.m_ext + 1) -
77740 - (void *)rxnfc) ||
77741 + (void __user *)(&rxnfc->fs.m_ext + 1) -
77742 + (void __user *)rxnfc) ||
77743 copy_in_user(&rxnfc->fs.ring_cookie,
77744 &compat_rxnfc->fs.ring_cookie,
77745 - (void *)(&rxnfc->fs.location + 1) -
77746 - (void *)&rxnfc->fs.ring_cookie) ||
77747 + (void __user *)(&rxnfc->fs.location + 1) -
77748 + (void __user *)&rxnfc->fs.ring_cookie) ||
77749 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
77750 sizeof(rxnfc->rule_cnt)))
77751 return -EFAULT;
77752 @@ -2799,12 +2859,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77753
77754 if (convert_out) {
77755 if (copy_in_user(compat_rxnfc, rxnfc,
77756 - (const void *)(&rxnfc->fs.m_ext + 1) -
77757 - (const void *)rxnfc) ||
77758 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
77759 + (const void __user *)rxnfc) ||
77760 copy_in_user(&compat_rxnfc->fs.ring_cookie,
77761 &rxnfc->fs.ring_cookie,
77762 - (const void *)(&rxnfc->fs.location + 1) -
77763 - (const void *)&rxnfc->fs.ring_cookie) ||
77764 + (const void __user *)(&rxnfc->fs.location + 1) -
77765 + (const void __user *)&rxnfc->fs.ring_cookie) ||
77766 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
77767 sizeof(rxnfc->rule_cnt)))
77768 return -EFAULT;
77769 @@ -2874,7 +2934,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
77770 old_fs = get_fs();
77771 set_fs(KERNEL_DS);
77772 err = dev_ioctl(net, cmd,
77773 - (struct ifreq __user __force *) &kifr);
77774 + (struct ifreq __force_user *) &kifr);
77775 set_fs(old_fs);
77776
77777 return err;
77778 @@ -2983,7 +3043,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
77779
77780 old_fs = get_fs();
77781 set_fs(KERNEL_DS);
77782 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
77783 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
77784 set_fs(old_fs);
77785
77786 if (cmd == SIOCGIFMAP && !err) {
77787 @@ -3088,7 +3148,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
77788 ret |= __get_user(rtdev, &(ur4->rt_dev));
77789 if (rtdev) {
77790 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
77791 - r4.rt_dev = (char __user __force *)devname;
77792 + r4.rt_dev = (char __force_user *)devname;
77793 devname[15] = 0;
77794 } else
77795 r4.rt_dev = NULL;
77796 @@ -3314,8 +3374,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
77797 int __user *uoptlen;
77798 int err;
77799
77800 - uoptval = (char __user __force *) optval;
77801 - uoptlen = (int __user __force *) optlen;
77802 + uoptval = (char __force_user *) optval;
77803 + uoptlen = (int __force_user *) optlen;
77804
77805 set_fs(KERNEL_DS);
77806 if (level == SOL_SOCKET)
77807 @@ -3335,7 +3395,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
77808 char __user *uoptval;
77809 int err;
77810
77811 - uoptval = (char __user __force *) optval;
77812 + uoptval = (char __force_user *) optval;
77813
77814 set_fs(KERNEL_DS);
77815 if (level == SOL_SOCKET)
77816 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
77817 index 994cfea..5343b6b 100644
77818 --- a/net/sunrpc/sched.c
77819 +++ b/net/sunrpc/sched.c
77820 @@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
77821 #ifdef RPC_DEBUG
77822 static void rpc_task_set_debuginfo(struct rpc_task *task)
77823 {
77824 - static atomic_t rpc_pid;
77825 + static atomic_unchecked_t rpc_pid;
77826
77827 - task->tk_pid = atomic_inc_return(&rpc_pid);
77828 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
77829 }
77830 #else
77831 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
77832 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
77833 index 8343737..677025e 100644
77834 --- a/net/sunrpc/xprtrdma/svc_rdma.c
77835 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
77836 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
77837 static unsigned int min_max_inline = 4096;
77838 static unsigned int max_max_inline = 65536;
77839
77840 -atomic_t rdma_stat_recv;
77841 -atomic_t rdma_stat_read;
77842 -atomic_t rdma_stat_write;
77843 -atomic_t rdma_stat_sq_starve;
77844 -atomic_t rdma_stat_rq_starve;
77845 -atomic_t rdma_stat_rq_poll;
77846 -atomic_t rdma_stat_rq_prod;
77847 -atomic_t rdma_stat_sq_poll;
77848 -atomic_t rdma_stat_sq_prod;
77849 +atomic_unchecked_t rdma_stat_recv;
77850 +atomic_unchecked_t rdma_stat_read;
77851 +atomic_unchecked_t rdma_stat_write;
77852 +atomic_unchecked_t rdma_stat_sq_starve;
77853 +atomic_unchecked_t rdma_stat_rq_starve;
77854 +atomic_unchecked_t rdma_stat_rq_poll;
77855 +atomic_unchecked_t rdma_stat_rq_prod;
77856 +atomic_unchecked_t rdma_stat_sq_poll;
77857 +atomic_unchecked_t rdma_stat_sq_prod;
77858
77859 /* Temporary NFS request map and context caches */
77860 struct kmem_cache *svc_rdma_map_cachep;
77861 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
77862 len -= *ppos;
77863 if (len > *lenp)
77864 len = *lenp;
77865 - if (len && copy_to_user(buffer, str_buf, len))
77866 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
77867 return -EFAULT;
77868 *lenp = len;
77869 *ppos += len;
77870 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
77871 {
77872 .procname = "rdma_stat_read",
77873 .data = &rdma_stat_read,
77874 - .maxlen = sizeof(atomic_t),
77875 + .maxlen = sizeof(atomic_unchecked_t),
77876 .mode = 0644,
77877 .proc_handler = read_reset_stat,
77878 },
77879 {
77880 .procname = "rdma_stat_recv",
77881 .data = &rdma_stat_recv,
77882 - .maxlen = sizeof(atomic_t),
77883 + .maxlen = sizeof(atomic_unchecked_t),
77884 .mode = 0644,
77885 .proc_handler = read_reset_stat,
77886 },
77887 {
77888 .procname = "rdma_stat_write",
77889 .data = &rdma_stat_write,
77890 - .maxlen = sizeof(atomic_t),
77891 + .maxlen = sizeof(atomic_unchecked_t),
77892 .mode = 0644,
77893 .proc_handler = read_reset_stat,
77894 },
77895 {
77896 .procname = "rdma_stat_sq_starve",
77897 .data = &rdma_stat_sq_starve,
77898 - .maxlen = sizeof(atomic_t),
77899 + .maxlen = sizeof(atomic_unchecked_t),
77900 .mode = 0644,
77901 .proc_handler = read_reset_stat,
77902 },
77903 {
77904 .procname = "rdma_stat_rq_starve",
77905 .data = &rdma_stat_rq_starve,
77906 - .maxlen = sizeof(atomic_t),
77907 + .maxlen = sizeof(atomic_unchecked_t),
77908 .mode = 0644,
77909 .proc_handler = read_reset_stat,
77910 },
77911 {
77912 .procname = "rdma_stat_rq_poll",
77913 .data = &rdma_stat_rq_poll,
77914 - .maxlen = sizeof(atomic_t),
77915 + .maxlen = sizeof(atomic_unchecked_t),
77916 .mode = 0644,
77917 .proc_handler = read_reset_stat,
77918 },
77919 {
77920 .procname = "rdma_stat_rq_prod",
77921 .data = &rdma_stat_rq_prod,
77922 - .maxlen = sizeof(atomic_t),
77923 + .maxlen = sizeof(atomic_unchecked_t),
77924 .mode = 0644,
77925 .proc_handler = read_reset_stat,
77926 },
77927 {
77928 .procname = "rdma_stat_sq_poll",
77929 .data = &rdma_stat_sq_poll,
77930 - .maxlen = sizeof(atomic_t),
77931 + .maxlen = sizeof(atomic_unchecked_t),
77932 .mode = 0644,
77933 .proc_handler = read_reset_stat,
77934 },
77935 {
77936 .procname = "rdma_stat_sq_prod",
77937 .data = &rdma_stat_sq_prod,
77938 - .maxlen = sizeof(atomic_t),
77939 + .maxlen = sizeof(atomic_unchecked_t),
77940 .mode = 0644,
77941 .proc_handler = read_reset_stat,
77942 },
77943 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77944 index 41cb63b..c4a1489 100644
77945 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77946 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77947 @@ -501,7 +501,7 @@ next_sge:
77948 svc_rdma_put_context(ctxt, 0);
77949 goto out;
77950 }
77951 - atomic_inc(&rdma_stat_read);
77952 + atomic_inc_unchecked(&rdma_stat_read);
77953
77954 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
77955 chl_map->ch[ch_no].count -= read_wr.num_sge;
77956 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77957 dto_q);
77958 list_del_init(&ctxt->dto_q);
77959 } else {
77960 - atomic_inc(&rdma_stat_rq_starve);
77961 + atomic_inc_unchecked(&rdma_stat_rq_starve);
77962 clear_bit(XPT_DATA, &xprt->xpt_flags);
77963 ctxt = NULL;
77964 }
77965 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77966 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
77967 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
77968 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
77969 - atomic_inc(&rdma_stat_recv);
77970 + atomic_inc_unchecked(&rdma_stat_recv);
77971
77972 /* Build up the XDR from the receive buffers. */
77973 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
77974 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77975 index 42eb7ba..c887c45 100644
77976 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77977 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77978 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
77979 write_wr.wr.rdma.remote_addr = to;
77980
77981 /* Post It */
77982 - atomic_inc(&rdma_stat_write);
77983 + atomic_inc_unchecked(&rdma_stat_write);
77984 if (svc_rdma_send(xprt, &write_wr))
77985 goto err;
77986 return 0;
77987 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77988 index 73b428b..5f3f8f3 100644
77989 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
77990 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77991 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77992 return;
77993
77994 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
77995 - atomic_inc(&rdma_stat_rq_poll);
77996 + atomic_inc_unchecked(&rdma_stat_rq_poll);
77997
77998 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
77999 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
78000 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
78001 }
78002
78003 if (ctxt)
78004 - atomic_inc(&rdma_stat_rq_prod);
78005 + atomic_inc_unchecked(&rdma_stat_rq_prod);
78006
78007 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
78008 /*
78009 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
78010 return;
78011
78012 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
78013 - atomic_inc(&rdma_stat_sq_poll);
78014 + atomic_inc_unchecked(&rdma_stat_sq_poll);
78015 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
78016 if (wc.status != IB_WC_SUCCESS)
78017 /* Close the transport */
78018 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
78019 }
78020
78021 if (ctxt)
78022 - atomic_inc(&rdma_stat_sq_prod);
78023 + atomic_inc_unchecked(&rdma_stat_sq_prod);
78024 }
78025
78026 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
78027 @@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
78028 spin_lock_bh(&xprt->sc_lock);
78029 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
78030 spin_unlock_bh(&xprt->sc_lock);
78031 - atomic_inc(&rdma_stat_sq_starve);
78032 + atomic_inc_unchecked(&rdma_stat_sq_starve);
78033
78034 /* See if we can opportunistically reap SQ WR to make room */
78035 sq_cq_reap(xprt);
78036 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
78037 index c3e65ae..f512a2b 100644
78038 --- a/net/sysctl_net.c
78039 +++ b/net/sysctl_net.c
78040 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
78041 struct ctl_table *table)
78042 {
78043 /* Allow network administrator to have same access as root. */
78044 - if (capable(CAP_NET_ADMIN)) {
78045 + if (capable_nolog(CAP_NET_ADMIN)) {
78046 int mode = (table->mode >> 6) & 7;
78047 return (mode << 6) | (mode << 3) | mode;
78048 }
78049 diff --git a/net/tipc/link.c b/net/tipc/link.c
78050 index b4b9b30..5b62131 100644
78051 --- a/net/tipc/link.c
78052 +++ b/net/tipc/link.c
78053 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
78054 struct tipc_msg fragm_hdr;
78055 struct sk_buff *buf, *buf_chain, *prev;
78056 u32 fragm_crs, fragm_rest, hsz, sect_rest;
78057 - const unchar *sect_crs;
78058 + const unchar __user *sect_crs;
78059 int curr_sect;
78060 u32 fragm_no;
78061
78062 @@ -1247,7 +1247,7 @@ again:
78063
78064 if (!sect_rest) {
78065 sect_rest = msg_sect[++curr_sect].iov_len;
78066 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
78067 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
78068 }
78069
78070 if (sect_rest < fragm_rest)
78071 @@ -1266,7 +1266,7 @@ error:
78072 }
78073 } else
78074 skb_copy_to_linear_data_offset(buf, fragm_crs,
78075 - sect_crs, sz);
78076 + (const void __force_kernel *)sect_crs, sz);
78077 sect_crs += sz;
78078 sect_rest -= sz;
78079 fragm_crs += sz;
78080 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
78081 index e3afe16..333ea83 100644
78082 --- a/net/tipc/msg.c
78083 +++ b/net/tipc/msg.c
78084 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
78085 msg_sect[cnt].iov_len);
78086 else
78087 skb_copy_to_linear_data_offset(*buf, pos,
78088 - msg_sect[cnt].iov_base,
78089 + (const void __force_kernel *)msg_sect[cnt].iov_base,
78090 msg_sect[cnt].iov_len);
78091 pos += msg_sect[cnt].iov_len;
78092 }
78093 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
78094 index b2964e9..fdf2e27 100644
78095 --- a/net/tipc/subscr.c
78096 +++ b/net/tipc/subscr.c
78097 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
78098 {
78099 struct iovec msg_sect;
78100
78101 - msg_sect.iov_base = (void *)&sub->evt;
78102 + msg_sect.iov_base = (void __force_user *)&sub->evt;
78103 msg_sect.iov_len = sizeof(struct tipc_event);
78104
78105 sub->evt.event = htohl(event, sub->swap);
78106 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
78107 index d510353..26c8a32 100644
78108 --- a/net/unix/af_unix.c
78109 +++ b/net/unix/af_unix.c
78110 @@ -779,6 +779,12 @@ static struct sock *unix_find_other(struct net *net,
78111 err = -ECONNREFUSED;
78112 if (!S_ISSOCK(inode->i_mode))
78113 goto put_fail;
78114 +
78115 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
78116 + err = -EACCES;
78117 + goto put_fail;
78118 + }
78119 +
78120 u = unix_find_socket_byinode(inode);
78121 if (!u)
78122 goto put_fail;
78123 @@ -799,6 +805,13 @@ static struct sock *unix_find_other(struct net *net,
78124 if (u) {
78125 struct dentry *dentry;
78126 dentry = unix_sk(u)->path.dentry;
78127 +
78128 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
78129 + err = -EPERM;
78130 + sock_put(u);
78131 + goto fail;
78132 + }
78133 +
78134 if (dentry)
78135 touch_atime(&unix_sk(u)->path);
78136 } else
78137 @@ -881,11 +894,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
78138 err = security_path_mknod(&path, dentry, mode, 0);
78139 if (err)
78140 goto out_mknod_drop_write;
78141 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
78142 + err = -EACCES;
78143 + goto out_mknod_drop_write;
78144 + }
78145 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
78146 out_mknod_drop_write:
78147 mnt_drop_write(path.mnt);
78148 if (err)
78149 goto out_mknod_dput;
78150 +
78151 + gr_handle_create(dentry, path.mnt);
78152 +
78153 mutex_unlock(&path.dentry->d_inode->i_mutex);
78154 dput(path.dentry);
78155 path.dentry = dentry;
78156 diff --git a/net/wireless/core.h b/net/wireless/core.h
78157 index 3ac2dd0..fbe533e 100644
78158 --- a/net/wireless/core.h
78159 +++ b/net/wireless/core.h
78160 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
78161 struct mutex mtx;
78162
78163 /* rfkill support */
78164 - struct rfkill_ops rfkill_ops;
78165 + rfkill_ops_no_const rfkill_ops;
78166 struct rfkill *rfkill;
78167 struct work_struct rfkill_sync;
78168
78169 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
78170 index af648e0..6185d3a 100644
78171 --- a/net/wireless/wext-core.c
78172 +++ b/net/wireless/wext-core.c
78173 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78174 */
78175
78176 /* Support for very large requests */
78177 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
78178 - (user_length > descr->max_tokens)) {
78179 + if (user_length > descr->max_tokens) {
78180 /* Allow userspace to GET more than max so
78181 * we can support any size GET requests.
78182 * There is still a limit : -ENOMEM.
78183 @@ -787,22 +786,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78184 }
78185 }
78186
78187 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
78188 - /*
78189 - * If this is a GET, but not NOMAX, it means that the extra
78190 - * data is not bounded by userspace, but by max_tokens. Thus
78191 - * set the length to max_tokens. This matches the extra data
78192 - * allocation.
78193 - * The driver should fill it with the number of tokens it
78194 - * provided, and it may check iwp->length rather than having
78195 - * knowledge of max_tokens. If the driver doesn't change the
78196 - * iwp->length, this ioctl just copies back max_token tokens
78197 - * filled with zeroes. Hopefully the driver isn't claiming
78198 - * them to be valid data.
78199 - */
78200 - iwp->length = descr->max_tokens;
78201 - }
78202 -
78203 err = handler(dev, info, (union iwreq_data *) iwp, extra);
78204
78205 iwp->length += essid_compat;
78206 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
78207 index a15d2a0..12142af 100644
78208 --- a/net/xfrm/xfrm_policy.c
78209 +++ b/net/xfrm/xfrm_policy.c
78210 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
78211 {
78212 policy->walk.dead = 1;
78213
78214 - atomic_inc(&policy->genid);
78215 + atomic_inc_unchecked(&policy->genid);
78216
78217 if (del_timer(&policy->timer))
78218 xfrm_pol_put(policy);
78219 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
78220 hlist_add_head(&policy->bydst, chain);
78221 xfrm_pol_hold(policy);
78222 net->xfrm.policy_count[dir]++;
78223 - atomic_inc(&flow_cache_genid);
78224 + atomic_inc_unchecked(&flow_cache_genid);
78225 if (delpol)
78226 __xfrm_policy_unlink(delpol, dir);
78227 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
78228 @@ -1530,7 +1530,7 @@ free_dst:
78229 goto out;
78230 }
78231
78232 -static int inline
78233 +static inline int
78234 xfrm_dst_alloc_copy(void **target, const void *src, int size)
78235 {
78236 if (!*target) {
78237 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
78238 return 0;
78239 }
78240
78241 -static int inline
78242 +static inline int
78243 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78244 {
78245 #ifdef CONFIG_XFRM_SUB_POLICY
78246 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78247 #endif
78248 }
78249
78250 -static int inline
78251 +static inline int
78252 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
78253 {
78254 #ifdef CONFIG_XFRM_SUB_POLICY
78255 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
78256
78257 xdst->num_pols = num_pols;
78258 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
78259 - xdst->policy_genid = atomic_read(&pols[0]->genid);
78260 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
78261
78262 return xdst;
78263 }
78264 @@ -2348,7 +2348,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
78265 if (xdst->xfrm_genid != dst->xfrm->genid)
78266 return 0;
78267 if (xdst->num_pols > 0 &&
78268 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
78269 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
78270 return 0;
78271
78272 mtu = dst_mtu(dst->child);
78273 @@ -2885,7 +2885,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
78274 sizeof(pol->xfrm_vec[i].saddr));
78275 pol->xfrm_vec[i].encap_family = mp->new_family;
78276 /* flush bundles */
78277 - atomic_inc(&pol->genid);
78278 + atomic_inc_unchecked(&pol->genid);
78279 }
78280 }
78281
78282 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
78283 index ff1720d..ed8475e 100644
78284 --- a/scripts/Makefile.build
78285 +++ b/scripts/Makefile.build
78286 @@ -111,7 +111,7 @@ endif
78287 endif
78288
78289 # Do not include host rules unless needed
78290 -ifneq ($(hostprogs-y)$(hostprogs-m),)
78291 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
78292 include scripts/Makefile.host
78293 endif
78294
78295 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
78296 index 686cb0d..9d653bf 100644
78297 --- a/scripts/Makefile.clean
78298 +++ b/scripts/Makefile.clean
78299 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
78300 __clean-files := $(extra-y) $(always) \
78301 $(targets) $(clean-files) \
78302 $(host-progs) \
78303 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
78304 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
78305 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
78306
78307 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
78308
78309 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
78310 index 1ac414f..38575f7 100644
78311 --- a/scripts/Makefile.host
78312 +++ b/scripts/Makefile.host
78313 @@ -31,6 +31,8 @@
78314 # Note: Shared libraries consisting of C++ files are not supported
78315
78316 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
78317 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
78318 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
78319
78320 # C code
78321 # Executables compiled from a single .c file
78322 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
78323 # Shared libaries (only .c supported)
78324 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
78325 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
78326 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
78327 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
78328 # Remove .so files from "xxx-objs"
78329 host-cobjs := $(filter-out %.so,$(host-cobjs))
78330 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
78331
78332 -#Object (.o) files used by the shared libaries
78333 +# Object (.o) files used by the shared libaries
78334 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
78335 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
78336
78337 # output directory for programs/.o files
78338 # hostprogs-y := tools/build may have been specified. Retrieve directory
78339 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
78340 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
78341 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
78342 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
78343 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
78344 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
78345 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
78346 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
78347
78348 obj-dirs += $(host-objdirs)
78349 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
78350 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
78351 $(call if_changed_dep,host-cshobjs)
78352
78353 +# Compile .c file, create position independent .o file
78354 +# host-cxxshobjs -> .o
78355 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
78356 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
78357 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
78358 + $(call if_changed_dep,host-cxxshobjs)
78359 +
78360 # Link a shared library, based on position independent .o files
78361 # *.o -> .so shared library (host-cshlib)
78362 quiet_cmd_host-cshlib = HOSTLLD -shared $@
78363 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
78364 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
78365 $(call if_changed,host-cshlib)
78366
78367 +# Link a shared library, based on position independent .o files
78368 +# *.o -> .so shared library (host-cxxshlib)
78369 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
78370 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
78371 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
78372 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
78373 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
78374 + $(call if_changed,host-cxxshlib)
78375 +
78376 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
78377 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
78378 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
78379
78380 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
78381 index cb1f50c..cef2a7c 100644
78382 --- a/scripts/basic/fixdep.c
78383 +++ b/scripts/basic/fixdep.c
78384 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
78385 /*
78386 * Lookup a value in the configuration string.
78387 */
78388 -static int is_defined_config(const char *name, int len, unsigned int hash)
78389 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
78390 {
78391 struct item *aux;
78392
78393 @@ -211,10 +211,10 @@ static void clear_config(void)
78394 /*
78395 * Record the use of a CONFIG_* word.
78396 */
78397 -static void use_config(const char *m, int slen)
78398 +static void use_config(const char *m, unsigned int slen)
78399 {
78400 unsigned int hash = strhash(m, slen);
78401 - int c, i;
78402 + unsigned int c, i;
78403
78404 if (is_defined_config(m, slen, hash))
78405 return;
78406 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
78407
78408 static void parse_config_file(const char *map, size_t len)
78409 {
78410 - const int *end = (const int *) (map + len);
78411 + const unsigned int *end = (const unsigned int *) (map + len);
78412 /* start at +1, so that p can never be < map */
78413 - const int *m = (const int *) map + 1;
78414 + const unsigned int *m = (const unsigned int *) map + 1;
78415 const char *p, *q;
78416
78417 for (; m < end; m++) {
78418 @@ -406,7 +406,7 @@ static void print_deps(void)
78419 static void traps(void)
78420 {
78421 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
78422 - int *p = (int *)test;
78423 + unsigned int *p = (unsigned int *)test;
78424
78425 if (*p != INT_CONF) {
78426 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
78427 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
78428 new file mode 100644
78429 index 0000000..008ac1a
78430 --- /dev/null
78431 +++ b/scripts/gcc-plugin.sh
78432 @@ -0,0 +1,17 @@
78433 +#!/bin/bash
78434 +plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
78435 +#include "gcc-plugin.h"
78436 +#include "tree.h"
78437 +#include "tm.h"
78438 +#include "rtl.h"
78439 +#ifdef ENABLE_BUILD_WITH_CXX
78440 +#warning $2
78441 +#else
78442 +#warning $1
78443 +#endif
78444 +EOF`
78445 +if [ $? -eq 0 ]
78446 +then
78447 + [[ "$plugincc" =~ "$1" ]] && echo "$1"
78448 + [[ "$plugincc" =~ "$2" ]] && echo "$2"
78449 +fi
78450 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
78451 index 44ddaa5..a3119bd 100644
78452 --- a/scripts/mod/file2alias.c
78453 +++ b/scripts/mod/file2alias.c
78454 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
78455 unsigned long size, unsigned long id_size,
78456 void *symval)
78457 {
78458 - int i;
78459 + unsigned int i;
78460
78461 if (size % id_size || size < id_size) {
78462 if (cross_build != 0)
78463 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
78464 /* USB is special because the bcdDevice can be matched against a numeric range */
78465 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
78466 static void do_usb_entry(struct usb_device_id *id,
78467 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
78468 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
78469 unsigned char range_lo, unsigned char range_hi,
78470 unsigned char max, struct module *mod)
78471 {
78472 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
78473 {
78474 unsigned int devlo, devhi;
78475 unsigned char chi, clo, max;
78476 - int ndigits;
78477 + unsigned int ndigits;
78478
78479 id->match_flags = TO_NATIVE(id->match_flags);
78480 id->idVendor = TO_NATIVE(id->idVendor);
78481 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
78482 for (i = 0; i < count; i++) {
78483 const char *id = (char *)devs[i].id;
78484 char acpi_id[sizeof(devs[0].id)];
78485 - int j;
78486 + unsigned int j;
78487
78488 buf_printf(&mod->dev_table_buf,
78489 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78490 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78491
78492 for (j = 0; j < PNP_MAX_DEVICES; j++) {
78493 const char *id = (char *)card->devs[j].id;
78494 - int i2, j2;
78495 + unsigned int i2, j2;
78496 int dup = 0;
78497
78498 if (!id[0])
78499 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78500 /* add an individual alias for every device entry */
78501 if (!dup) {
78502 char acpi_id[sizeof(card->devs[0].id)];
78503 - int k;
78504 + unsigned int k;
78505
78506 buf_printf(&mod->dev_table_buf,
78507 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78508 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
78509 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
78510 char *alias)
78511 {
78512 - int i, j;
78513 + unsigned int i, j;
78514
78515 sprintf(alias, "dmi*");
78516
78517 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
78518 index c4e7d15..dad16c1 100644
78519 --- a/scripts/mod/modpost.c
78520 +++ b/scripts/mod/modpost.c
78521 @@ -922,6 +922,7 @@ enum mismatch {
78522 ANY_INIT_TO_ANY_EXIT,
78523 ANY_EXIT_TO_ANY_INIT,
78524 EXPORT_TO_INIT_EXIT,
78525 + DATA_TO_TEXT
78526 };
78527
78528 struct sectioncheck {
78529 @@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
78530 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
78531 .mismatch = EXPORT_TO_INIT_EXIT,
78532 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
78533 +},
78534 +/* Do not reference code from writable data */
78535 +{
78536 + .fromsec = { DATA_SECTIONS, NULL },
78537 + .tosec = { TEXT_SECTIONS, NULL },
78538 + .mismatch = DATA_TO_TEXT
78539 }
78540 };
78541
78542 @@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
78543 continue;
78544 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
78545 continue;
78546 - if (sym->st_value == addr)
78547 - return sym;
78548 /* Find a symbol nearby - addr are maybe negative */
78549 d = sym->st_value - addr;
78550 + if (d == 0)
78551 + return sym;
78552 if (d < 0)
78553 d = addr - sym->st_value;
78554 if (d < distance) {
78555 @@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
78556 tosym, prl_to, prl_to, tosym);
78557 free(prl_to);
78558 break;
78559 + case DATA_TO_TEXT:
78560 +#if 0
78561 + fprintf(stderr,
78562 + "The %s %s:%s references\n"
78563 + "the %s %s:%s%s\n",
78564 + from, fromsec, fromsym, to, tosec, tosym, to_p);
78565 +#endif
78566 + break;
78567 }
78568 fprintf(stderr, "\n");
78569 }
78570 @@ -1668,7 +1683,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
78571 static void check_sec_ref(struct module *mod, const char *modname,
78572 struct elf_info *elf)
78573 {
78574 - int i;
78575 + unsigned int i;
78576 Elf_Shdr *sechdrs = elf->sechdrs;
78577
78578 /* Walk through all sections */
78579 @@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
78580 va_end(ap);
78581 }
78582
78583 -void buf_write(struct buffer *buf, const char *s, int len)
78584 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
78585 {
78586 if (buf->size - buf->pos < len) {
78587 buf->size += len + SZ;
78588 @@ -1984,7 +1999,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
78589 if (fstat(fileno(file), &st) < 0)
78590 goto close_write;
78591
78592 - if (st.st_size != b->pos)
78593 + if (st.st_size != (off_t)b->pos)
78594 goto close_write;
78595
78596 tmp = NOFAIL(malloc(b->pos));
78597 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
78598 index 51207e4..f7d603d 100644
78599 --- a/scripts/mod/modpost.h
78600 +++ b/scripts/mod/modpost.h
78601 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
78602
78603 struct buffer {
78604 char *p;
78605 - int pos;
78606 - int size;
78607 + unsigned int pos;
78608 + unsigned int size;
78609 };
78610
78611 void __attribute__((format(printf, 2, 3)))
78612 buf_printf(struct buffer *buf, const char *fmt, ...);
78613
78614 void
78615 -buf_write(struct buffer *buf, const char *s, int len);
78616 +buf_write(struct buffer *buf, const char *s, unsigned int len);
78617
78618 struct module {
78619 struct module *next;
78620 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
78621 index 9dfcd6d..099068e 100644
78622 --- a/scripts/mod/sumversion.c
78623 +++ b/scripts/mod/sumversion.c
78624 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
78625 goto out;
78626 }
78627
78628 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
78629 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
78630 warn("writing sum in %s failed: %s\n",
78631 filename, strerror(errno));
78632 goto out;
78633 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
78634 index 5c11312..72742b5 100644
78635 --- a/scripts/pnmtologo.c
78636 +++ b/scripts/pnmtologo.c
78637 @@ -237,14 +237,14 @@ static void write_header(void)
78638 fprintf(out, " * Linux logo %s\n", logoname);
78639 fputs(" */\n\n", out);
78640 fputs("#include <linux/linux_logo.h>\n\n", out);
78641 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
78642 + fprintf(out, "static unsigned char %s_data[] = {\n",
78643 logoname);
78644 }
78645
78646 static void write_footer(void)
78647 {
78648 fputs("\n};\n\n", out);
78649 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
78650 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
78651 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
78652 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
78653 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
78654 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
78655 fputs("\n};\n\n", out);
78656
78657 /* write logo clut */
78658 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
78659 + fprintf(out, "static unsigned char %s_clut[] = {\n",
78660 logoname);
78661 write_hex_cnt = 0;
78662 for (i = 0; i < logo_clutsize; i++) {
78663 diff --git a/security/Kconfig b/security/Kconfig
78664 index ccc61f8..5e68d73 100644
78665 --- a/security/Kconfig
78666 +++ b/security/Kconfig
78667 @@ -4,6 +4,875 @@
78668
78669 menu "Security options"
78670
78671 +menu "Grsecurity"
78672 +
78673 + config ARCH_TRACK_EXEC_LIMIT
78674 + bool
78675 +
78676 + config PAX_KERNEXEC_PLUGIN
78677 + bool
78678 +
78679 + config PAX_PER_CPU_PGD
78680 + bool
78681 +
78682 + config TASK_SIZE_MAX_SHIFT
78683 + int
78684 + depends on X86_64
78685 + default 47 if !PAX_PER_CPU_PGD
78686 + default 42 if PAX_PER_CPU_PGD
78687 +
78688 + config PAX_ENABLE_PAE
78689 + bool
78690 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
78691 +
78692 + config PAX_USERCOPY_SLABS
78693 + bool
78694 +
78695 +config GRKERNSEC
78696 + bool "Grsecurity"
78697 + select CRYPTO
78698 + select CRYPTO_SHA256
78699 + select STOP_MACHINE
78700 + help
78701 + If you say Y here, you will be able to configure many features
78702 + that will enhance the security of your system. It is highly
78703 + recommended that you say Y here and read through the help
78704 + for each option so that you fully understand the features and
78705 + can evaluate their usefulness for your machine.
78706 +
78707 +choice
78708 + prompt "Configuration Method"
78709 + depends on GRKERNSEC
78710 + default GRKERNSEC_CONFIG_CUSTOM
78711 + help
78712 +
78713 +config GRKERNSEC_CONFIG_AUTO
78714 + bool "Automatic"
78715 + help
78716 + If you choose this configuration method, you'll be able to answer a small
78717 + number of simple questions about how you plan to use this kernel.
78718 + The settings of grsecurity and PaX will be automatically configured for
78719 + the highest commonly-used settings within the provided constraints.
78720 +
78721 + If you require additional configuration, custom changes can still be made
78722 + from the "custom configuration" menu.
78723 +
78724 +config GRKERNSEC_CONFIG_CUSTOM
78725 + bool "Custom"
78726 + help
78727 + If you choose this configuration method, you'll be able to configure all
78728 + grsecurity and PaX settings manually. Via this method, no options are
78729 + automatically enabled.
78730 +
78731 +endchoice
78732 +
78733 +choice
78734 + prompt "Usage Type"
78735 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
78736 + default GRKERNSEC_CONFIG_SERVER
78737 + help
78738 +
78739 +config GRKERNSEC_CONFIG_SERVER
78740 + bool "Server"
78741 + help
78742 + Choose this option if you plan to use this kernel on a server.
78743 +
78744 +config GRKERNSEC_CONFIG_DESKTOP
78745 + bool "Desktop"
78746 + help
78747 + Choose this option if you plan to use this kernel on a desktop.
78748 +
78749 +endchoice
78750 +
78751 +choice
78752 + prompt "Virtualization Type"
78753 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
78754 + default GRKERNSEC_CONFIG_VIRT_NONE
78755 + help
78756 +
78757 +config GRKERNSEC_CONFIG_VIRT_NONE
78758 + bool "None"
78759 + help
78760 + Choose this option if this kernel will be run on bare metal.
78761 +
78762 +config GRKERNSEC_CONFIG_VIRT_GUEST
78763 + bool "Guest"
78764 + help
78765 + Choose this option if this kernel will be run as a VM guest.
78766 +
78767 +config GRKERNSEC_CONFIG_VIRT_HOST
78768 + bool "Host"
78769 + help
78770 + Choose this option if this kernel will be run as a VM host.
78771 +
78772 +endchoice
78773 +
78774 +choice
78775 + prompt "Virtualization Hardware"
78776 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
78777 + help
78778 +
78779 +config GRKERNSEC_CONFIG_VIRT_EPT
78780 + bool "EPT/RVI Processor Support"
78781 + depends on X86
78782 + help
78783 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
78784 + hardware virtualization. This allows for additional kernel hardening protections
78785 + to operate without additional performance impact.
78786 +
78787 + To see if your Intel processor supports EPT, see:
78788 + http://ark.intel.com/Products/VirtualizationTechnology
78789 + (Most Core i3/5/7 support EPT)
78790 +
78791 + To see if your AMD processor supports RVI, see:
78792 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
78793 +
78794 +config GRKERNSEC_CONFIG_VIRT_SOFT
78795 + bool "First-gen/No Hardware Virtualization"
78796 + help
78797 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
78798 + support hardware virtualization or doesn't support the EPT/RVI extensions.
78799 +
78800 +endchoice
78801 +
78802 +choice
78803 + prompt "Virtualization Software"
78804 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
78805 + help
78806 +
78807 +config GRKERNSEC_CONFIG_VIRT_XEN
78808 + bool "Xen"
78809 + help
78810 + Choose this option if this kernel is running as a Xen guest or host.
78811 +
78812 +config GRKERNSEC_CONFIG_VIRT_VMWARE
78813 + bool "VMWare"
78814 + help
78815 + Choose this option if this kernel is running as a VMWare guest or host.
78816 +
78817 +config GRKERNSEC_CONFIG_VIRT_KVM
78818 + bool "KVM"
78819 + help
78820 + Choose this option if this kernel is running as a KVM guest or host.
78821 +
78822 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
78823 + bool "VirtualBox"
78824 + help
78825 + Choose this option if this kernel is running as a VirtualBox guest or host.
78826 +
78827 +endchoice
78828 +
78829 +choice
78830 + prompt "Required Priorities"
78831 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
78832 + default GRKERNSEC_CONFIG_PRIORITY_PERF
78833 + help
78834 +
78835 +config GRKERNSEC_CONFIG_PRIORITY_PERF
78836 + bool "Performance"
78837 + help
78838 + Choose this option if performance is of highest priority for this deployment
78839 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
78840 + and freed memory sanitizing will be disabled.
78841 +
78842 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
78843 + bool "Security"
78844 + help
78845 + Choose this option if security is of highest priority for this deployment of
78846 + grsecurity. UDEREF, kernel stack clearing, and freed memory sanitizing will
78847 + be enabled for this kernel. In a worst-case scenario, these features can
78848 + introduce a 20% performance hit (UDEREF on x64 contributing half of this hit).
78849 +
78850 +endchoice
78851 +
78852 +menu "Default Special Groups"
78853 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
78854 +
78855 +config GRKERNSEC_PROC_GID
78856 + int "GID exempted from /proc restrictions"
78857 + default 1001
78858 + help
78859 + Setting this GID determines which group will be exempted from
78860 + grsecurity's /proc restrictions, allowing users of the specified
78861 + group to view network statistics and the existence of other users'
78862 + processes on the system.
78863 +
78864 +config GRKERNSEC_TPE_GID
78865 + int "GID for untrusted users"
78866 + depends on GRKERNSEC_CONFIG_SERVER
78867 + default 1005
78868 + help
78869 + Setting this GID determines which group untrusted users should
78870 + be added to. These users will be placed under grsecurity's Trusted Path
78871 + Execution mechanism, preventing them from executing their own binaries.
78872 + The users will only be able to execute binaries in directories owned and
78873 + writable only by the root user.
78874 +
78875 +config GRKERNSEC_SYMLINKOWN_GID
78876 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
78877 + depends on GRKERNSEC_CONFIG_SERVER
78878 + default 1006
78879 + help
78880 + Setting this GID determines what group kernel-enforced
78881 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
78882 + is enabled, a sysctl option with name "symlinkown_gid" is created.
78883 +
78884 +
78885 +endmenu
78886 +
78887 +menu "Customize Configuration"
78888 +depends on GRKERNSEC
78889 +
78890 +menu "PaX"
78891 +
78892 +config PAX
78893 + bool "Enable various PaX features"
78894 + default y if GRKERNSEC_CONFIG_AUTO
78895 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
78896 + help
78897 + This allows you to enable various PaX features. PaX adds
78898 + intrusion prevention mechanisms to the kernel that reduce
78899 + the risks posed by exploitable memory corruption bugs.
78900 +
78901 +menu "PaX Control"
78902 + depends on PAX
78903 +
78904 +config PAX_SOFTMODE
78905 + bool 'Support soft mode'
78906 + help
78907 + Enabling this option will allow you to run PaX in soft mode, that
78908 + is, PaX features will not be enforced by default, only on executables
78909 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
78910 + support as they are the only way to mark executables for soft mode use.
78911 +
78912 + Soft mode can be activated by using the "pax_softmode=1" kernel command
78913 + line option on boot. Furthermore you can control various PaX features
78914 + at runtime via the entries in /proc/sys/kernel/pax.
78915 +
78916 +config PAX_EI_PAX
78917 + bool 'Use legacy ELF header marking'
78918 + default y if GRKERNSEC_CONFIG_AUTO
78919 + help
78920 + Enabling this option will allow you to control PaX features on
78921 + a per executable basis via the 'chpax' utility available at
78922 + http://pax.grsecurity.net/. The control flags will be read from
78923 + an otherwise reserved part of the ELF header. This marking has
78924 + numerous drawbacks (no support for soft-mode, toolchain does not
78925 + know about the non-standard use of the ELF header) therefore it
78926 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
78927 + support.
78928 +
78929 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
78930 + support as well, they will override the legacy EI_PAX marks.
78931 +
78932 + If you enable none of the marking options then all applications
78933 + will run with PaX enabled on them by default.
78934 +
78935 +config PAX_PT_PAX_FLAGS
78936 + bool 'Use ELF program header marking'
78937 + default y if GRKERNSEC_CONFIG_AUTO
78938 + help
78939 + Enabling this option will allow you to control PaX features on
78940 + a per executable basis via the 'paxctl' utility available at
78941 + http://pax.grsecurity.net/. The control flags will be read from
78942 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
78943 + has the benefits of supporting both soft mode and being fully
78944 + integrated into the toolchain (the binutils patch is available
78945 + from http://pax.grsecurity.net).
78946 +
78947 + Note that if you enable the legacy EI_PAX marking support as well,
78948 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
78949 +
78950 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
78951 + must make sure that the marks are the same if a binary has both marks.
78952 +
78953 + If you enable none of the marking options then all applications
78954 + will run with PaX enabled on them by default.
78955 +
78956 +config PAX_XATTR_PAX_FLAGS
78957 + bool 'Use filesystem extended attributes marking'
78958 + default y if GRKERNSEC_CONFIG_AUTO
78959 + select CIFS_XATTR if CIFS
78960 + select EXT2_FS_XATTR if EXT2_FS
78961 + select EXT3_FS_XATTR if EXT3_FS
78962 + select EXT4_FS_XATTR if EXT4_FS
78963 + select JFFS2_FS_XATTR if JFFS2_FS
78964 + select REISERFS_FS_XATTR if REISERFS_FS
78965 + select SQUASHFS_XATTR if SQUASHFS
78966 + select TMPFS_XATTR if TMPFS
78967 + select UBIFS_FS_XATTR if UBIFS_FS
78968 + help
78969 + Enabling this option will allow you to control PaX features on
78970 + a per executable basis via the 'setfattr' utility. The control
78971 + flags will be read from the user.pax.flags extended attribute of
78972 + the file. This marking has the benefit of supporting binary-only
78973 + applications that self-check themselves (e.g., skype) and would
78974 + not tolerate chpax/paxctl changes. The main drawback is that
78975 + extended attributes are not supported by some filesystems (e.g.,
78976 + isofs, udf, vfat) so copying files through such filesystems will
78977 + lose the extended attributes and these PaX markings.
78978 +
78979 + Note that if you enable the legacy EI_PAX marking support as well,
78980 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
78981 +
78982 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
78983 + must make sure that the marks are the same if a binary has both marks.
78984 +
78985 + If you enable none of the marking options then all applications
78986 + will run with PaX enabled on them by default.
78987 +
78988 +choice
78989 + prompt 'MAC system integration'
78990 + default PAX_HAVE_ACL_FLAGS
78991 + help
78992 + Mandatory Access Control systems have the option of controlling
78993 + PaX flags on a per executable basis, choose the method supported
78994 + by your particular system.
78995 +
78996 + - "none": if your MAC system does not interact with PaX,
78997 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
78998 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
78999 +
79000 + NOTE: this option is for developers/integrators only.
79001 +
79002 + config PAX_NO_ACL_FLAGS
79003 + bool 'none'
79004 +
79005 + config PAX_HAVE_ACL_FLAGS
79006 + bool 'direct'
79007 +
79008 + config PAX_HOOK_ACL_FLAGS
79009 + bool 'hook'
79010 +endchoice
79011 +
79012 +endmenu
79013 +
79014 +menu "Non-executable pages"
79015 + depends on PAX
79016 +
79017 +config PAX_NOEXEC
79018 + bool "Enforce non-executable pages"
79019 + default y if GRKERNSEC_CONFIG_AUTO
79020 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
79021 + help
79022 + By design some architectures do not allow for protecting memory
79023 + pages against execution or even if they do, Linux does not make
79024 + use of this feature. In practice this means that if a page is
79025 + readable (such as the stack or heap) it is also executable.
79026 +
79027 + There is a well known exploit technique that makes use of this
79028 + fact and a common programming mistake where an attacker can
79029 + introduce code of his choice somewhere in the attacked program's
79030 + memory (typically the stack or the heap) and then execute it.
79031 +
79032 + If the attacked program was running with different (typically
79033 + higher) privileges than that of the attacker, then he can elevate
79034 + his own privilege level (e.g. get a root shell, write to files for
79035 + which he does not have write access to, etc).
79036 +
79037 + Enabling this option will let you choose from various features
79038 + that prevent the injection and execution of 'foreign' code in
79039 + a program.
79040 +
79041 + This will also break programs that rely on the old behaviour and
79042 + expect that dynamically allocated memory via the malloc() family
79043 + of functions is executable (which it is not). Notable examples
79044 + are the XFree86 4.x server, the java runtime and wine.
79045 +
79046 +config PAX_PAGEEXEC
79047 + bool "Paging based non-executable pages"
79048 + default y if GRKERNSEC_CONFIG_AUTO
79049 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
79050 + select S390_SWITCH_AMODE if S390
79051 + select S390_EXEC_PROTECT if S390
79052 + select ARCH_TRACK_EXEC_LIMIT if X86_32
79053 + help
79054 + This implementation is based on the paging feature of the CPU.
79055 + On i386 without hardware non-executable bit support there is a
79056 + variable but usually low performance impact, however on Intel's
79057 + P4 core based CPUs it is very high so you should not enable this
79058 + for kernels meant to be used on such CPUs.
79059 +
79060 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
79061 + with hardware non-executable bit support there is no performance
79062 + impact, on ppc the impact is negligible.
79063 +
79064 + Note that several architectures require various emulations due to
79065 + badly designed userland ABIs, this will cause a performance impact
79066 + but will disappear as soon as userland is fixed. For example, ppc
79067 + userland MUST have been built with secure-plt by a recent toolchain.
79068 +
79069 +config PAX_SEGMEXEC
79070 + bool "Segmentation based non-executable pages"
79071 + default y if GRKERNSEC_CONFIG_AUTO
79072 + depends on PAX_NOEXEC && X86_32
79073 + help
79074 + This implementation is based on the segmentation feature of the
79075 + CPU and has a very small performance impact, however applications
79076 + will be limited to a 1.5 GB address space instead of the normal
79077 + 3 GB.
79078 +
79079 +config PAX_EMUTRAMP
79080 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
79081 + default y if PARISC
79082 + help
79083 + There are some programs and libraries that for one reason or
79084 + another attempt to execute special small code snippets from
79085 + non-executable memory pages. Most notable examples are the
79086 + signal handler return code generated by the kernel itself and
79087 + the GCC trampolines.
79088 +
79089 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
79090 + such programs will no longer work under your kernel.
79091 +
79092 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
79093 + utilities to enable trampoline emulation for the affected programs
79094 + yet still have the protection provided by the non-executable pages.
79095 +
79096 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
79097 + your system will not even boot.
79098 +
79099 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
79100 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
79101 + for the affected files.
79102 +
79103 + NOTE: enabling this feature *may* open up a loophole in the
79104 + protection provided by non-executable pages that an attacker
79105 + could abuse. Therefore the best solution is to not have any
79106 + files on your system that would require this option. This can
79107 + be achieved by not using libc5 (which relies on the kernel
79108 + signal handler return code) and not using or rewriting programs
79109 + that make use of the nested function implementation of GCC.
79110 + Skilled users can just fix GCC itself so that it implements
79111 + nested function calls in a way that does not interfere with PaX.
79112 +
79113 +config PAX_EMUSIGRT
79114 + bool "Automatically emulate sigreturn trampolines"
79115 + depends on PAX_EMUTRAMP && PARISC
79116 + default y
79117 + help
79118 + Enabling this option will have the kernel automatically detect
79119 + and emulate signal return trampolines executing on the stack
79120 + that would otherwise lead to task termination.
79121 +
79122 + This solution is intended as a temporary one for users with
79123 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
79124 + Modula-3 runtime, etc) or executables linked to such, basically
79125 + everything that does not specify its own SA_RESTORER function in
79126 + normal executable memory like glibc 2.1+ does.
79127 +
79128 + On parisc you MUST enable this option, otherwise your system will
79129 + not even boot.
79130 +
79131 + NOTE: this feature cannot be disabled on a per executable basis
79132 + and since it *does* open up a loophole in the protection provided
79133 + by non-executable pages, the best solution is to not have any
79134 + files on your system that would require this option.
79135 +
79136 +config PAX_MPROTECT
79137 + bool "Restrict mprotect()"
79138 + default y if GRKERNSEC_CONFIG_AUTO
79139 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
79140 + help
79141 + Enabling this option will prevent programs from
79142 + - changing the executable status of memory pages that were
79143 + not originally created as executable,
79144 + - making read-only executable pages writable again,
79145 + - creating executable pages from anonymous memory,
79146 + - making read-only-after-relocations (RELRO) data pages writable again.
79147 +
79148 + You should say Y here to complete the protection provided by
79149 + the enforcement of non-executable pages.
79150 +
79151 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
79152 + this feature on a per file basis.
79153 +
79154 +config PAX_MPROTECT_COMPAT
79155 + bool "Use legacy/compat protection demoting (read help)"
79156 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
79157 + depends on PAX_MPROTECT
79158 + help
79159 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
79160 + by sending the proper error code to the application. For some broken
79161 + userland, this can cause problems with Python or other applications. The
79162 + current implementation however allows for applications like clamav to
79163 + detect if JIT compilation/execution is allowed and to fall back gracefully
79164 + to an interpreter-based mode if it does not. While we encourage everyone
79165 + to use the current implementation as-is and push upstream to fix broken
79166 + userland (note that the RWX logging option can assist with this), in some
79167 + environments this may not be possible. Having to disable MPROTECT
79168 + completely on certain binaries reduces the security benefit of PaX,
79169 + so this option is provided for those environments to revert to the old
79170 + behavior.
79171 +
79172 +config PAX_ELFRELOCS
79173 + bool "Allow ELF text relocations (read help)"
79174 + depends on PAX_MPROTECT
79175 + default n
79176 + help
79177 + Non-executable pages and mprotect() restrictions are effective
79178 + in preventing the introduction of new executable code into an
79179 + attacked task's address space. There remain only two venues
79180 + for this kind of attack: if the attacker can execute already
79181 + existing code in the attacked task then he can either have it
79182 + create and mmap() a file containing his code or have it mmap()
79183 + an already existing ELF library that does not have position
79184 + independent code in it and use mprotect() on it to make it
79185 + writable and copy his code there. While protecting against
79186 + the former approach is beyond PaX, the latter can be prevented
79187 + by having only PIC ELF libraries on one's system (which do not
79188 + need to relocate their code). If you are sure this is your case,
79189 + as is the case with all modern Linux distributions, then leave
79190 + this option disabled. You should say 'n' here.
79191 +
79192 +config PAX_ETEXECRELOCS
79193 + bool "Allow ELF ET_EXEC text relocations"
79194 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
79195 + select PAX_ELFRELOCS
79196 + default y
79197 + help
79198 + On some architectures there are incorrectly created applications
79199 + that require text relocations and would not work without enabling
79200 + this option. If you are an alpha, ia64 or parisc user, you should
79201 + enable this option and disable it once you have made sure that
79202 + none of your applications need it.
79203 +
79204 +config PAX_EMUPLT
79205 + bool "Automatically emulate ELF PLT"
79206 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
79207 + default y
79208 + help
79209 + Enabling this option will have the kernel automatically detect
79210 + and emulate the Procedure Linkage Table entries in ELF files.
79211 + On some architectures such entries are in writable memory, and
79212 + become non-executable leading to task termination. Therefore
79213 + it is mandatory that you enable this option on alpha, parisc,
79214 + sparc and sparc64, otherwise your system would not even boot.
79215 +
79216 + NOTE: this feature *does* open up a loophole in the protection
79217 + provided by the non-executable pages, therefore the proper
79218 + solution is to modify the toolchain to produce a PLT that does
79219 + not need to be writable.
79220 +
79221 +config PAX_DLRESOLVE
79222 + bool 'Emulate old glibc resolver stub'
79223 + depends on PAX_EMUPLT && SPARC
79224 + default n
79225 + help
79226 + This option is needed if userland has an old glibc (before 2.4)
79227 + that puts a 'save' instruction into the runtime generated resolver
79228 + stub that needs special emulation.
79229 +
79230 +config PAX_KERNEXEC
79231 + bool "Enforce non-executable kernel pages"
79232 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
79233 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
79234 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
79235 + select PAX_KERNEXEC_PLUGIN if X86_64
79236 + help
79237 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
79238 + that is, enabling this option will make it harder to inject
79239 + and execute 'foreign' code in kernel memory itself.
79240 +
79241 + Note that on x86_64 kernels there is a known regression when
79242 + this feature and KVM/VMX are both enabled in the host kernel.
79243 +
79244 +choice
79245 + prompt "Return Address Instrumentation Method"
79246 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
79247 + depends on PAX_KERNEXEC_PLUGIN
79248 + help
79249 + Select the method used to instrument function pointer dereferences.
79250 + Note that binary modules cannot be instrumented by this approach.
79251 +
79252 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
79253 + bool "bts"
79254 + help
79255 + This method is compatible with binary only modules but has
79256 + a higher runtime overhead.
79257 +
79258 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
79259 + bool "or"
79260 + depends on !PARAVIRT
79261 + help
79262 + This method is incompatible with binary only modules but has
79263 + a lower runtime overhead.
79264 +endchoice
79265 +
79266 +config PAX_KERNEXEC_PLUGIN_METHOD
79267 + string
79268 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
79269 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
79270 + default ""
79271 +
79272 +config PAX_KERNEXEC_MODULE_TEXT
79273 + int "Minimum amount of memory reserved for module code"
79274 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
79275 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
79276 + depends on PAX_KERNEXEC && X86_32 && MODULES
79277 + help
79278 + Due to implementation details the kernel must reserve a fixed
79279 + amount of memory for module code at compile time that cannot be
79280 + changed at runtime. Here you can specify the minimum amount
79281 + in MB that will be reserved. Due to the same implementation
79282 + details this size will always be rounded up to the next 2/4 MB
79283 + boundary (depends on PAE) so the actually available memory for
79284 + module code will usually be more than this minimum.
79285 +
79286 + The default 4 MB should be enough for most users but if you have
79287 + an excessive number of modules (e.g., most distribution configs
79288 + compile many drivers as modules) or use huge modules such as
79289 + nvidia's kernel driver, you will need to adjust this amount.
79290 + A good rule of thumb is to look at your currently loaded kernel
79291 + modules and add up their sizes.
79292 +
79293 +endmenu
79294 +
79295 +menu "Address Space Layout Randomization"
79296 + depends on PAX
79297 +
79298 +config PAX_ASLR
79299 + bool "Address Space Layout Randomization"
79300 + default y if GRKERNSEC_CONFIG_AUTO
79301 + help
79302 + Many if not most exploit techniques rely on the knowledge of
79303 + certain addresses in the attacked program. The following options
79304 + will allow the kernel to apply a certain amount of randomization
79305 + to specific parts of the program thereby forcing an attacker to
79306 + guess them in most cases. Any failed guess will most likely crash
79307 + the attacked program which allows the kernel to detect such attempts
79308 + and react on them. PaX itself provides no reaction mechanisms,
79309 + instead it is strongly encouraged that you make use of Nergal's
79310 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
79311 + (http://www.grsecurity.net/) built-in crash detection features or
79312 + develop one yourself.
79313 +
79314 + By saying Y here you can choose to randomize the following areas:
79315 + - top of the task's kernel stack
79316 + - top of the task's userland stack
79317 + - base address for mmap() requests that do not specify one
79318 + (this includes all libraries)
79319 + - base address of the main executable
79320 +
79321 + It is strongly recommended to say Y here as address space layout
79322 + randomization has negligible impact on performance yet it provides
79323 + a very effective protection.
79324 +
79325 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
79326 + this feature on a per file basis.
79327 +
79328 +config PAX_RANDKSTACK
79329 + bool "Randomize kernel stack base"
79330 + default y if GRKERNSEC_CONFIG_AUTO
79331 + depends on X86_TSC && X86
79332 + help
79333 + By saying Y here the kernel will randomize every task's kernel
79334 + stack on every system call. This will not only force an attacker
79335 + to guess it but also prevent him from making use of possible
79336 + leaked information about it.
79337 +
79338 + Since the kernel stack is a rather scarce resource, randomization
79339 + may cause unexpected stack overflows, therefore you should very
79340 + carefully test your system. Note that once enabled in the kernel
79341 + configuration, this feature cannot be disabled on a per file basis.
79342 +
79343 +config PAX_RANDUSTACK
79344 + bool "Randomize user stack base"
79345 + default y if GRKERNSEC_CONFIG_AUTO
79346 + depends on PAX_ASLR
79347 + help
79348 + By saying Y here the kernel will randomize every task's userland
79349 + stack. The randomization is done in two steps where the second
79350 + one may apply a big amount of shift to the top of the stack and
79351 + cause problems for programs that want to use lots of memory (more
79352 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
79353 + For this reason the second step can be controlled by 'chpax' or
79354 + 'paxctl' on a per file basis.
79355 +
79356 +config PAX_RANDMMAP
79357 + bool "Randomize mmap() base"
79358 + default y if GRKERNSEC_CONFIG_AUTO
79359 + depends on PAX_ASLR
79360 + help
79361 + By saying Y here the kernel will use a randomized base address for
79362 + mmap() requests that do not specify one themselves. As a result
79363 + all dynamically loaded libraries will appear at random addresses
79364 + and therefore be harder to exploit by a technique where an attacker
79365 + attempts to execute library code for his purposes (e.g. spawn a
79366 + shell from an exploited program that is running at an elevated
79367 + privilege level).
79368 +
79369 + Furthermore, if a program is relinked as a dynamic ELF file, its
79370 + base address will be randomized as well, completing the full
79371 + randomization of the address space layout. Attacking such programs
79372 + becomes a guess game. You can find an example of doing this at
79373 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
79374 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
79375 +
79376 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
79377 + feature on a per file basis.
79378 +
79379 +endmenu
79380 +
79381 +menu "Miscellaneous hardening features"
79382 +
79383 +config PAX_MEMORY_SANITIZE
79384 + bool "Sanitize all freed memory"
79385 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
79386 + depends on !HIBERNATION
79387 + help
79388 + By saying Y here the kernel will erase memory pages as soon as they
79389 + are freed. This in turn reduces the lifetime of data stored in the
79390 + pages, making it less likely that sensitive information such as
79391 + passwords, cryptographic secrets, etc stay in memory for too long.
79392 +
79393 + This is especially useful for programs whose runtime is short, long
79394 + lived processes and the kernel itself benefit from this as long as
79395 + they operate on whole memory pages and ensure timely freeing of pages
79396 + that may hold sensitive information.
79397 +
79398 + The tradeoff is performance impact, on a single CPU system kernel
79399 + compilation sees a 3% slowdown, other systems and workloads may vary
79400 + and you are advised to test this feature on your expected workload
79401 + before deploying it.
79402 +
79403 + Note that this feature does not protect data stored in live pages,
79404 + e.g., process memory swapped to disk may stay there for a long time.
79405 +
79406 +config PAX_MEMORY_STACKLEAK
79407 + bool "Sanitize kernel stack"
79408 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
79409 + depends on X86
79410 + help
79411 + By saying Y here the kernel will erase the kernel stack before it
79412 + returns from a system call. This in turn reduces the information
79413 + that a kernel stack leak bug can reveal.
79414 +
79415 + Note that such a bug can still leak information that was put on
79416 + the stack by the current system call (the one eventually triggering
79417 + the bug) but traces of earlier system calls on the kernel stack
79418 + cannot leak anymore.
79419 +
79420 + The tradeoff is performance impact: on a single CPU system kernel
79421 + compilation sees a 1% slowdown, other systems and workloads may vary
79422 + and you are advised to test this feature on your expected workload
79423 + before deploying it.
79424 +
79425 + Note: full support for this feature requires gcc with plugin support
79426 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
79427 + versions means that functions with large enough stack frames may
79428 + leave uninitialized memory behind that may be exposed to a later
79429 + syscall leaking the stack.
79430 +
79431 +config PAX_MEMORY_UDEREF
79432 + bool "Prevent invalid userland pointer dereference"
79433 + default y if GRKERNSEC_CONFIG_AUTO && (X86_32 || (X86_64 && GRKERNSEC_CONFIG_PRIORITY_SECURITY)) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
79434 + depends on X86 && !UML_X86 && !XEN
79435 + select PAX_PER_CPU_PGD if X86_64
79436 + help
79437 + By saying Y here the kernel will be prevented from dereferencing
79438 + userland pointers in contexts where the kernel expects only kernel
79439 + pointers. This is both a useful runtime debugging feature and a
79440 + security measure that prevents exploiting a class of kernel bugs.
79441 +
79442 + The tradeoff is that some virtualization solutions may experience
79443 + a huge slowdown and therefore you should not enable this feature
79444 + for kernels meant to run in such environments. Whether a given VM
79445 + solution is affected or not is best determined by simply trying it
79446 + out, the performance impact will be obvious right on boot as this
79447 + mechanism engages from very early on. A good rule of thumb is that
79448 + VMs running on CPUs without hardware virtualization support (i.e.,
79449 + the majority of IA-32 CPUs) will likely experience the slowdown.
79450 +
79451 +config PAX_REFCOUNT
79452 + bool "Prevent various kernel object reference counter overflows"
79453 + default y if GRKERNSEC_CONFIG_AUTO
79454 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
79455 + help
79456 + By saying Y here the kernel will detect and prevent overflowing
79457 + various (but not all) kinds of object reference counters. Such
79458 + overflows can normally occur due to bugs only and are often, if
79459 + not always, exploitable.
79460 +
79461 + The tradeoff is that data structures protected by an overflowed
79462 + refcount will never be freed and therefore will leak memory. Note
79463 + that this leak also happens even without this protection but in
79464 + that case the overflow can eventually trigger the freeing of the
79465 + data structure while it is still being used elsewhere, resulting
79466 + in the exploitable situation that this feature prevents.
79467 +
79468 + Since this has a negligible performance impact, you should enable
79469 + this feature.
79470 +
79471 +config PAX_USERCOPY
79472 + bool "Harden heap object copies between kernel and userland"
79473 + default y if GRKERNSEC_CONFIG_AUTO
79474 + depends on X86 || PPC || SPARC || ARM
79475 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
79476 + select PAX_USERCOPY_SLABS
79477 + help
79478 + By saying Y here the kernel will enforce the size of heap objects
79479 + when they are copied in either direction between the kernel and
79480 + userland, even if only a part of the heap object is copied.
79481 +
79482 + Specifically, this checking prevents information leaking from the
79483 + kernel heap during kernel to userland copies (if the kernel heap
79484 + object is otherwise fully initialized) and prevents kernel heap
79485 + overflows during userland to kernel copies.
79486 +
79487 + Note that the current implementation provides the strictest bounds
79488 + checks for the SLUB allocator.
79489 +
79490 + Enabling this option also enables per-slab cache protection against
79491 + data in a given cache being copied into/out of via userland
79492 + accessors. Though the whitelist of regions will be reduced over
79493 + time, it notably protects important data structures like task structs.
79494 +
79495 + If frame pointers are enabled on x86, this option will also restrict
79496 + copies into and out of the kernel stack to local variables within a
79497 + single frame.
79498 +
79499 + Since this has a negligible performance impact, you should enable
79500 + this feature.
79501 +
79502 +config PAX_SIZE_OVERFLOW
79503 + bool "Prevent various integer overflows in function size parameters"
79504 + default y if GRKERNSEC_CONFIG_AUTO
79505 + depends on X86
79506 + help
79507 + By saying Y here the kernel recomputes expressions of function
79508 + arguments marked by a size_overflow attribute with double integer
79509 + precision (DImode/TImode for 32/64 bit integer types).
79510 +
79511 + The recomputed argument is checked against INT_MAX and an event
79512 + is logged on overflow and the triggering process is killed.
79513 +
79514 + Homepage:
79515 + http://www.grsecurity.net/~ephox/overflow_plugin/
79516 +
79517 +config PAX_LATENT_ENTROPY
79518 + bool "Generate some entropy during boot"
79519 + default y if GRKERNSEC_CONFIG_AUTO
79520 + help
79521 + By saying Y here the kernel will instrument early boot code to
79522 + extract some entropy from both original and artificially created
79523 + program state. This will help especially embedded systems where
79524 + there is little 'natural' source of entropy normally. The cost
79525 + is some slowdown of the boot process.
79526 +
79527 + Note that entropy extracted this way is not cryptographically
79528 + secure!
79529 +
79530 +endmenu
79531 +
79532 +endmenu
79533 +
79534 +source grsecurity/Kconfig
79535 +
79536 +endmenu
79537 +
79538 +endmenu
79539 +
79540 config KEYS
79541 bool "Enable access key retention support"
79542 help
79543 @@ -169,7 +1038,7 @@ config INTEL_TXT
79544 config LSM_MMAP_MIN_ADDR
79545 int "Low address space for LSM to protect from user allocation"
79546 depends on SECURITY && SECURITY_SELINUX
79547 - default 32768 if ARM
79548 + default 32768 if ALPHA || ARM || PARISC || SPARC32
79549 default 65536
79550 help
79551 This is the portion of low virtual memory which should be protected
79552 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
79553 index ad05d39..afffccb 100644
79554 --- a/security/apparmor/lsm.c
79555 +++ b/security/apparmor/lsm.c
79556 @@ -622,7 +622,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
79557 return error;
79558 }
79559
79560 -static struct security_operations apparmor_ops = {
79561 +static struct security_operations apparmor_ops __read_only = {
79562 .name = "apparmor",
79563
79564 .ptrace_access_check = apparmor_ptrace_access_check,
79565 diff --git a/security/commoncap.c b/security/commoncap.c
79566 index 71a166a..851bb3e 100644
79567 --- a/security/commoncap.c
79568 +++ b/security/commoncap.c
79569 @@ -576,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
79570 {
79571 const struct cred *cred = current_cred();
79572
79573 + if (gr_acl_enable_at_secure())
79574 + return 1;
79575 +
79576 if (cred->uid != 0) {
79577 if (bprm->cap_effective)
79578 return 1;
79579 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
79580 index 3ccf7ac..d73ad64 100644
79581 --- a/security/integrity/ima/ima.h
79582 +++ b/security/integrity/ima/ima.h
79583 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79584 extern spinlock_t ima_queue_lock;
79585
79586 struct ima_h_table {
79587 - atomic_long_t len; /* number of stored measurements in the list */
79588 - atomic_long_t violations;
79589 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
79590 + atomic_long_unchecked_t violations;
79591 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
79592 };
79593 extern struct ima_h_table ima_htable;
79594 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
79595 index 88a2788..581ab92 100644
79596 --- a/security/integrity/ima/ima_api.c
79597 +++ b/security/integrity/ima/ima_api.c
79598 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79599 int result;
79600
79601 /* can overflow, only indicator */
79602 - atomic_long_inc(&ima_htable.violations);
79603 + atomic_long_inc_unchecked(&ima_htable.violations);
79604
79605 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
79606 if (!entry) {
79607 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
79608 index e1aa2b4..52027bf 100644
79609 --- a/security/integrity/ima/ima_fs.c
79610 +++ b/security/integrity/ima/ima_fs.c
79611 @@ -28,12 +28,12 @@
79612 static int valid_policy = 1;
79613 #define TMPBUFLEN 12
79614 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
79615 - loff_t *ppos, atomic_long_t *val)
79616 + loff_t *ppos, atomic_long_unchecked_t *val)
79617 {
79618 char tmpbuf[TMPBUFLEN];
79619 ssize_t len;
79620
79621 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
79622 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
79623 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
79624 }
79625
79626 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
79627 index 55a6271..ad829c3 100644
79628 --- a/security/integrity/ima/ima_queue.c
79629 +++ b/security/integrity/ima/ima_queue.c
79630 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
79631 INIT_LIST_HEAD(&qe->later);
79632 list_add_tail_rcu(&qe->later, &ima_measurements);
79633
79634 - atomic_long_inc(&ima_htable.len);
79635 + atomic_long_inc_unchecked(&ima_htable.len);
79636 key = ima_hash_key(entry->digest);
79637 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
79638 return 0;
79639 diff --git a/security/keys/compat.c b/security/keys/compat.c
79640 index 4c48e13..7abdac9 100644
79641 --- a/security/keys/compat.c
79642 +++ b/security/keys/compat.c
79643 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
79644 if (ret == 0)
79645 goto no_payload_free;
79646
79647 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79648 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79649
79650 if (iov != iovstack)
79651 kfree(iov);
79652 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
79653 index fb767c6..b9c49c0 100644
79654 --- a/security/keys/keyctl.c
79655 +++ b/security/keys/keyctl.c
79656 @@ -935,7 +935,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
79657 /*
79658 * Copy the iovec data from userspace
79659 */
79660 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79661 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
79662 unsigned ioc)
79663 {
79664 for (; ioc > 0; ioc--) {
79665 @@ -957,7 +957,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79666 * If successful, 0 will be returned.
79667 */
79668 long keyctl_instantiate_key_common(key_serial_t id,
79669 - const struct iovec *payload_iov,
79670 + const struct iovec __user *payload_iov,
79671 unsigned ioc,
79672 size_t plen,
79673 key_serial_t ringid)
79674 @@ -1052,7 +1052,7 @@ long keyctl_instantiate_key(key_serial_t id,
79675 [0].iov_len = plen
79676 };
79677
79678 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
79679 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
79680 }
79681
79682 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
79683 @@ -1085,7 +1085,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
79684 if (ret == 0)
79685 goto no_payload_free;
79686
79687 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79688 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79689
79690 if (iov != iovstack)
79691 kfree(iov);
79692 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
79693 index d605f75..2bc6be9 100644
79694 --- a/security/keys/keyring.c
79695 +++ b/security/keys/keyring.c
79696 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
79697 ret = -EFAULT;
79698
79699 for (loop = 0; loop < klist->nkeys; loop++) {
79700 + key_serial_t serial;
79701 key = klist->keys[loop];
79702 + serial = key->serial;
79703
79704 tmp = sizeof(key_serial_t);
79705 if (tmp > buflen)
79706 tmp = buflen;
79707
79708 - if (copy_to_user(buffer,
79709 - &key->serial,
79710 - tmp) != 0)
79711 + if (copy_to_user(buffer, &serial, tmp))
79712 goto error;
79713
79714 buflen -= tmp;
79715 diff --git a/security/min_addr.c b/security/min_addr.c
79716 index f728728..6457a0c 100644
79717 --- a/security/min_addr.c
79718 +++ b/security/min_addr.c
79719 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
79720 */
79721 static void update_mmap_min_addr(void)
79722 {
79723 +#ifndef SPARC
79724 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
79725 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
79726 mmap_min_addr = dac_mmap_min_addr;
79727 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
79728 #else
79729 mmap_min_addr = dac_mmap_min_addr;
79730 #endif
79731 +#endif
79732 }
79733
79734 /*
79735 diff --git a/security/security.c b/security/security.c
79736 index bf619ff..8179030 100644
79737 --- a/security/security.c
79738 +++ b/security/security.c
79739 @@ -20,6 +20,7 @@
79740 #include <linux/ima.h>
79741 #include <linux/evm.h>
79742 #include <linux/fsnotify.h>
79743 +#include <linux/mm.h>
79744 #include <net/flow.h>
79745
79746 #define MAX_LSM_EVM_XATTR 2
79747 @@ -28,8 +29,8 @@
79748 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
79749 CONFIG_DEFAULT_SECURITY;
79750
79751 -static struct security_operations *security_ops;
79752 -static struct security_operations default_security_ops = {
79753 +static struct security_operations *security_ops __read_only;
79754 +static struct security_operations default_security_ops __read_only = {
79755 .name = "default",
79756 };
79757
79758 @@ -70,7 +71,9 @@ int __init security_init(void)
79759
79760 void reset_security_ops(void)
79761 {
79762 + pax_open_kernel();
79763 security_ops = &default_security_ops;
79764 + pax_close_kernel();
79765 }
79766
79767 /* Save user chosen LSM */
79768 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
79769 index d85b793..a164832 100644
79770 --- a/security/selinux/hooks.c
79771 +++ b/security/selinux/hooks.c
79772 @@ -95,8 +95,6 @@
79773
79774 #define NUM_SEL_MNT_OPTS 5
79775
79776 -extern struct security_operations *security_ops;
79777 -
79778 /* SECMARK reference count */
79779 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
79780
79781 @@ -5520,7 +5518,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
79782
79783 #endif
79784
79785 -static struct security_operations selinux_ops = {
79786 +static struct security_operations selinux_ops __read_only = {
79787 .name = "selinux",
79788
79789 .ptrace_access_check = selinux_ptrace_access_check,
79790 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
79791 index c220f31..89fab3f 100644
79792 --- a/security/selinux/include/xfrm.h
79793 +++ b/security/selinux/include/xfrm.h
79794 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
79795
79796 static inline void selinux_xfrm_notify_policyload(void)
79797 {
79798 - atomic_inc(&flow_cache_genid);
79799 + atomic_inc_unchecked(&flow_cache_genid);
79800 }
79801 #else
79802 static inline int selinux_xfrm_enabled(void)
79803 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
79804 index 45c32f0..0038be2 100644
79805 --- a/security/smack/smack_lsm.c
79806 +++ b/security/smack/smack_lsm.c
79807 @@ -3500,7 +3500,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
79808 return 0;
79809 }
79810
79811 -struct security_operations smack_ops = {
79812 +struct security_operations smack_ops __read_only = {
79813 .name = "smack",
79814
79815 .ptrace_access_check = smack_ptrace_access_check,
79816 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
79817 index 620d37c..e2ad89b 100644
79818 --- a/security/tomoyo/tomoyo.c
79819 +++ b/security/tomoyo/tomoyo.c
79820 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
79821 * tomoyo_security_ops is a "struct security_operations" which is used for
79822 * registering TOMOYO.
79823 */
79824 -static struct security_operations tomoyo_security_ops = {
79825 +static struct security_operations tomoyo_security_ops __read_only = {
79826 .name = "tomoyo",
79827 .cred_alloc_blank = tomoyo_cred_alloc_blank,
79828 .cred_prepare = tomoyo_cred_prepare,
79829 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
79830 index 51d6709..1f3dbe2 100644
79831 --- a/security/yama/Kconfig
79832 +++ b/security/yama/Kconfig
79833 @@ -1,6 +1,6 @@
79834 config SECURITY_YAMA
79835 bool "Yama support"
79836 - depends on SECURITY
79837 + depends on SECURITY && !GRKERNSEC
79838 select SECURITYFS
79839 select SECURITY_PATH
79840 default n
79841 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
79842 index 270790d..c67dfcb 100644
79843 --- a/sound/aoa/codecs/onyx.c
79844 +++ b/sound/aoa/codecs/onyx.c
79845 @@ -54,7 +54,7 @@ struct onyx {
79846 spdif_locked:1,
79847 analog_locked:1,
79848 original_mute:2;
79849 - int open_count;
79850 + local_t open_count;
79851 struct codec_info *codec_info;
79852
79853 /* mutex serializes concurrent access to the device
79854 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
79855 struct onyx *onyx = cii->codec_data;
79856
79857 mutex_lock(&onyx->mutex);
79858 - onyx->open_count++;
79859 + local_inc(&onyx->open_count);
79860 mutex_unlock(&onyx->mutex);
79861
79862 return 0;
79863 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
79864 struct onyx *onyx = cii->codec_data;
79865
79866 mutex_lock(&onyx->mutex);
79867 - onyx->open_count--;
79868 - if (!onyx->open_count)
79869 + if (local_dec_and_test(&onyx->open_count))
79870 onyx->spdif_locked = onyx->analog_locked = 0;
79871 mutex_unlock(&onyx->mutex);
79872
79873 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
79874 index ffd2025..df062c9 100644
79875 --- a/sound/aoa/codecs/onyx.h
79876 +++ b/sound/aoa/codecs/onyx.h
79877 @@ -11,6 +11,7 @@
79878 #include <linux/i2c.h>
79879 #include <asm/pmac_low_i2c.h>
79880 #include <asm/prom.h>
79881 +#include <asm/local.h>
79882
79883 /* PCM3052 register definitions */
79884
79885 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
79886 index 08fde00..0bf641a 100644
79887 --- a/sound/core/oss/pcm_oss.c
79888 +++ b/sound/core/oss/pcm_oss.c
79889 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
79890 if (in_kernel) {
79891 mm_segment_t fs;
79892 fs = snd_enter_user();
79893 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79894 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79895 snd_leave_user(fs);
79896 } else {
79897 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79898 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79899 }
79900 if (ret != -EPIPE && ret != -ESTRPIPE)
79901 break;
79902 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
79903 if (in_kernel) {
79904 mm_segment_t fs;
79905 fs = snd_enter_user();
79906 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79907 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79908 snd_leave_user(fs);
79909 } else {
79910 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79911 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79912 }
79913 if (ret == -EPIPE) {
79914 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
79915 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
79916 struct snd_pcm_plugin_channel *channels;
79917 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
79918 if (!in_kernel) {
79919 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
79920 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
79921 return -EFAULT;
79922 buf = runtime->oss.buffer;
79923 }
79924 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
79925 }
79926 } else {
79927 tmp = snd_pcm_oss_write2(substream,
79928 - (const char __force *)buf,
79929 + (const char __force_kernel *)buf,
79930 runtime->oss.period_bytes, 0);
79931 if (tmp <= 0)
79932 goto err;
79933 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
79934 struct snd_pcm_runtime *runtime = substream->runtime;
79935 snd_pcm_sframes_t frames, frames1;
79936 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
79937 - char __user *final_dst = (char __force __user *)buf;
79938 + char __user *final_dst = (char __force_user *)buf;
79939 if (runtime->oss.plugin_first) {
79940 struct snd_pcm_plugin_channel *channels;
79941 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
79942 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
79943 xfer += tmp;
79944 runtime->oss.buffer_used -= tmp;
79945 } else {
79946 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
79947 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
79948 runtime->oss.period_bytes, 0);
79949 if (tmp <= 0)
79950 goto err;
79951 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
79952 size1);
79953 size1 /= runtime->channels; /* frames */
79954 fs = snd_enter_user();
79955 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
79956 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
79957 snd_leave_user(fs);
79958 }
79959 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
79960 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
79961 index 91cdf94..4085161 100644
79962 --- a/sound/core/pcm_compat.c
79963 +++ b/sound/core/pcm_compat.c
79964 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
79965 int err;
79966
79967 fs = snd_enter_user();
79968 - err = snd_pcm_delay(substream, &delay);
79969 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
79970 snd_leave_user(fs);
79971 if (err < 0)
79972 return err;
79973 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
79974 index 3fe99e6..26952e4 100644
79975 --- a/sound/core/pcm_native.c
79976 +++ b/sound/core/pcm_native.c
79977 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
79978 switch (substream->stream) {
79979 case SNDRV_PCM_STREAM_PLAYBACK:
79980 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
79981 - (void __user *)arg);
79982 + (void __force_user *)arg);
79983 break;
79984 case SNDRV_PCM_STREAM_CAPTURE:
79985 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
79986 - (void __user *)arg);
79987 + (void __force_user *)arg);
79988 break;
79989 default:
79990 result = -EINVAL;
79991 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
79992 index 5cf8d65..912a79c 100644
79993 --- a/sound/core/seq/seq_device.c
79994 +++ b/sound/core/seq/seq_device.c
79995 @@ -64,7 +64,7 @@ struct ops_list {
79996 int argsize; /* argument size */
79997
79998 /* operators */
79999 - struct snd_seq_dev_ops ops;
80000 + struct snd_seq_dev_ops *ops;
80001
80002 /* registred devices */
80003 struct list_head dev_list; /* list of devices */
80004 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
80005
80006 mutex_lock(&ops->reg_mutex);
80007 /* copy driver operators */
80008 - ops->ops = *entry;
80009 + ops->ops = entry;
80010 ops->driver |= DRIVER_LOADED;
80011 ops->argsize = argsize;
80012
80013 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
80014 dev->name, ops->id, ops->argsize, dev->argsize);
80015 return -EINVAL;
80016 }
80017 - if (ops->ops.init_device(dev) >= 0) {
80018 + if (ops->ops->init_device(dev) >= 0) {
80019 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
80020 ops->num_init_devices++;
80021 } else {
80022 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
80023 dev->name, ops->id, ops->argsize, dev->argsize);
80024 return -EINVAL;
80025 }
80026 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
80027 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
80028 dev->status = SNDRV_SEQ_DEVICE_FREE;
80029 dev->driver_data = NULL;
80030 ops->num_init_devices--;
80031 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
80032 index 621e60e..f4543f5 100644
80033 --- a/sound/drivers/mts64.c
80034 +++ b/sound/drivers/mts64.c
80035 @@ -29,6 +29,7 @@
80036 #include <sound/initval.h>
80037 #include <sound/rawmidi.h>
80038 #include <sound/control.h>
80039 +#include <asm/local.h>
80040
80041 #define CARD_NAME "Miditerminal 4140"
80042 #define DRIVER_NAME "MTS64"
80043 @@ -67,7 +68,7 @@ struct mts64 {
80044 struct pardevice *pardev;
80045 int pardev_claimed;
80046
80047 - int open_count;
80048 + local_t open_count;
80049 int current_midi_output_port;
80050 int current_midi_input_port;
80051 u8 mode[MTS64_NUM_INPUT_PORTS];
80052 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
80053 {
80054 struct mts64 *mts = substream->rmidi->private_data;
80055
80056 - if (mts->open_count == 0) {
80057 + if (local_read(&mts->open_count) == 0) {
80058 /* We don't need a spinlock here, because this is just called
80059 if the device has not been opened before.
80060 So there aren't any IRQs from the device */
80061 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
80062
80063 msleep(50);
80064 }
80065 - ++(mts->open_count);
80066 + local_inc(&mts->open_count);
80067
80068 return 0;
80069 }
80070 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
80071 struct mts64 *mts = substream->rmidi->private_data;
80072 unsigned long flags;
80073
80074 - --(mts->open_count);
80075 - if (mts->open_count == 0) {
80076 + if (local_dec_return(&mts->open_count) == 0) {
80077 /* We need the spinlock_irqsave here because we can still
80078 have IRQs at this point */
80079 spin_lock_irqsave(&mts->lock, flags);
80080 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
80081
80082 msleep(500);
80083
80084 - } else if (mts->open_count < 0)
80085 - mts->open_count = 0;
80086 + } else if (local_read(&mts->open_count) < 0)
80087 + local_set(&mts->open_count, 0);
80088
80089 return 0;
80090 }
80091 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
80092 index b953fb4..1999c01 100644
80093 --- a/sound/drivers/opl4/opl4_lib.c
80094 +++ b/sound/drivers/opl4/opl4_lib.c
80095 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
80096 MODULE_DESCRIPTION("OPL4 driver");
80097 MODULE_LICENSE("GPL");
80098
80099 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
80100 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
80101 {
80102 int timeout = 10;
80103 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
80104 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
80105 index 3e32bd3..46fc152 100644
80106 --- a/sound/drivers/portman2x4.c
80107 +++ b/sound/drivers/portman2x4.c
80108 @@ -48,6 +48,7 @@
80109 #include <sound/initval.h>
80110 #include <sound/rawmidi.h>
80111 #include <sound/control.h>
80112 +#include <asm/local.h>
80113
80114 #define CARD_NAME "Portman 2x4"
80115 #define DRIVER_NAME "portman"
80116 @@ -85,7 +86,7 @@ struct portman {
80117 struct pardevice *pardev;
80118 int pardev_claimed;
80119
80120 - int open_count;
80121 + local_t open_count;
80122 int mode[PORTMAN_NUM_INPUT_PORTS];
80123 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
80124 };
80125 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
80126 index 87657dd..a8268d4 100644
80127 --- a/sound/firewire/amdtp.c
80128 +++ b/sound/firewire/amdtp.c
80129 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
80130 ptr = s->pcm_buffer_pointer + data_blocks;
80131 if (ptr >= pcm->runtime->buffer_size)
80132 ptr -= pcm->runtime->buffer_size;
80133 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
80134 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
80135
80136 s->pcm_period_pointer += data_blocks;
80137 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
80138 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
80139 */
80140 void amdtp_out_stream_update(struct amdtp_out_stream *s)
80141 {
80142 - ACCESS_ONCE(s->source_node_id_field) =
80143 + ACCESS_ONCE_RW(s->source_node_id_field) =
80144 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
80145 }
80146 EXPORT_SYMBOL(amdtp_out_stream_update);
80147 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
80148 index 537a9cb..8e8c8e9 100644
80149 --- a/sound/firewire/amdtp.h
80150 +++ b/sound/firewire/amdtp.h
80151 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
80152 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
80153 struct snd_pcm_substream *pcm)
80154 {
80155 - ACCESS_ONCE(s->pcm) = pcm;
80156 + ACCESS_ONCE_RW(s->pcm) = pcm;
80157 }
80158
80159 /**
80160 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
80161 index d428ffe..751ef78 100644
80162 --- a/sound/firewire/isight.c
80163 +++ b/sound/firewire/isight.c
80164 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
80165 ptr += count;
80166 if (ptr >= runtime->buffer_size)
80167 ptr -= runtime->buffer_size;
80168 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
80169 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
80170
80171 isight->period_counter += count;
80172 if (isight->period_counter >= runtime->period_size) {
80173 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
80174 if (err < 0)
80175 return err;
80176
80177 - ACCESS_ONCE(isight->pcm_active) = true;
80178 + ACCESS_ONCE_RW(isight->pcm_active) = true;
80179
80180 return 0;
80181 }
80182 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
80183 {
80184 struct isight *isight = substream->private_data;
80185
80186 - ACCESS_ONCE(isight->pcm_active) = false;
80187 + ACCESS_ONCE_RW(isight->pcm_active) = false;
80188
80189 mutex_lock(&isight->mutex);
80190 isight_stop_streaming(isight);
80191 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
80192
80193 switch (cmd) {
80194 case SNDRV_PCM_TRIGGER_START:
80195 - ACCESS_ONCE(isight->pcm_running) = true;
80196 + ACCESS_ONCE_RW(isight->pcm_running) = true;
80197 break;
80198 case SNDRV_PCM_TRIGGER_STOP:
80199 - ACCESS_ONCE(isight->pcm_running) = false;
80200 + ACCESS_ONCE_RW(isight->pcm_running) = false;
80201 break;
80202 default:
80203 return -EINVAL;
80204 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
80205 index 7bd5e33..1fcab12 100644
80206 --- a/sound/isa/cmi8330.c
80207 +++ b/sound/isa/cmi8330.c
80208 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
80209
80210 struct snd_pcm *pcm;
80211 struct snd_cmi8330_stream {
80212 - struct snd_pcm_ops ops;
80213 + snd_pcm_ops_no_const ops;
80214 snd_pcm_open_callback_t open;
80215 void *private_data; /* sb or wss */
80216 } streams[2];
80217 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
80218 index 733b014..56ce96f 100644
80219 --- a/sound/oss/sb_audio.c
80220 +++ b/sound/oss/sb_audio.c
80221 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
80222 buf16 = (signed short *)(localbuf + localoffs);
80223 while (c)
80224 {
80225 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
80226 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
80227 if (copy_from_user(lbuf8,
80228 userbuf+useroffs + p,
80229 locallen))
80230 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
80231 index 09d4648..cf234c7 100644
80232 --- a/sound/oss/swarm_cs4297a.c
80233 +++ b/sound/oss/swarm_cs4297a.c
80234 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
80235 {
80236 struct cs4297a_state *s;
80237 u32 pwr, id;
80238 - mm_segment_t fs;
80239 int rval;
80240 #ifndef CONFIG_BCM_CS4297A_CSWARM
80241 u64 cfg;
80242 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
80243 if (!rval) {
80244 char *sb1250_duart_present;
80245
80246 +#if 0
80247 + mm_segment_t fs;
80248 fs = get_fs();
80249 set_fs(KERNEL_DS);
80250 -#if 0
80251 val = SOUND_MASK_LINE;
80252 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
80253 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
80254 val = initvol[i].vol;
80255 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
80256 }
80257 + set_fs(fs);
80258 // cs4297a_write_ac97(s, 0x18, 0x0808);
80259 #else
80260 // cs4297a_write_ac97(s, 0x5e, 0x180);
80261 cs4297a_write_ac97(s, 0x02, 0x0808);
80262 cs4297a_write_ac97(s, 0x18, 0x0808);
80263 #endif
80264 - set_fs(fs);
80265
80266 list_add(&s->list, &cs4297a_devs);
80267
80268 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
80269 index 56b4f74..7cfd41a 100644
80270 --- a/sound/pci/hda/hda_codec.h
80271 +++ b/sound/pci/hda/hda_codec.h
80272 @@ -611,7 +611,7 @@ struct hda_bus_ops {
80273 /* notify power-up/down from codec to controller */
80274 void (*pm_notify)(struct hda_bus *bus);
80275 #endif
80276 -};
80277 +} __no_const;
80278
80279 /* template to pass to the bus constructor */
80280 struct hda_bus_template {
80281 @@ -713,6 +713,7 @@ struct hda_codec_ops {
80282 #endif
80283 void (*reboot_notify)(struct hda_codec *codec);
80284 };
80285 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
80286
80287 /* record for amp information cache */
80288 struct hda_cache_head {
80289 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
80290 struct snd_pcm_substream *substream);
80291 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
80292 struct snd_pcm_substream *substream);
80293 -};
80294 +} __no_const;
80295
80296 /* PCM information for each substream */
80297 struct hda_pcm_stream {
80298 @@ -801,7 +802,7 @@ struct hda_codec {
80299 const char *modelname; /* model name for preset */
80300
80301 /* set by patch */
80302 - struct hda_codec_ops patch_ops;
80303 + hda_codec_ops_no_const patch_ops;
80304
80305 /* PCM to create, set by patch_ops.build_pcms callback */
80306 unsigned int num_pcms;
80307 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
80308 index 0da778a..bc38b84 100644
80309 --- a/sound/pci/ice1712/ice1712.h
80310 +++ b/sound/pci/ice1712/ice1712.h
80311 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
80312 unsigned int mask_flags; /* total mask bits */
80313 struct snd_akm4xxx_ops {
80314 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
80315 - } ops;
80316 + } __no_const ops;
80317 };
80318
80319 struct snd_ice1712_spdif {
80320 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
80321 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80322 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80323 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80324 - } ops;
80325 + } __no_const ops;
80326 };
80327
80328
80329 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
80330 index a8159b81..5f006a5 100644
80331 --- a/sound/pci/ymfpci/ymfpci_main.c
80332 +++ b/sound/pci/ymfpci/ymfpci_main.c
80333 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
80334 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
80335 break;
80336 }
80337 - if (atomic_read(&chip->interrupt_sleep_count)) {
80338 - atomic_set(&chip->interrupt_sleep_count, 0);
80339 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80340 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80341 wake_up(&chip->interrupt_sleep);
80342 }
80343 __end:
80344 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
80345 continue;
80346 init_waitqueue_entry(&wait, current);
80347 add_wait_queue(&chip->interrupt_sleep, &wait);
80348 - atomic_inc(&chip->interrupt_sleep_count);
80349 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
80350 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
80351 remove_wait_queue(&chip->interrupt_sleep, &wait);
80352 }
80353 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
80354 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
80355 spin_unlock(&chip->reg_lock);
80356
80357 - if (atomic_read(&chip->interrupt_sleep_count)) {
80358 - atomic_set(&chip->interrupt_sleep_count, 0);
80359 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80360 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80361 wake_up(&chip->interrupt_sleep);
80362 }
80363 }
80364 @@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
80365 spin_lock_init(&chip->reg_lock);
80366 spin_lock_init(&chip->voice_lock);
80367 init_waitqueue_head(&chip->interrupt_sleep);
80368 - atomic_set(&chip->interrupt_sleep_count, 0);
80369 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80370 chip->card = card;
80371 chip->pci = pci;
80372 chip->irq = -1;
80373 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
80374 index 0ad8dca..7186339 100644
80375 --- a/sound/soc/soc-pcm.c
80376 +++ b/sound/soc/soc-pcm.c
80377 @@ -641,7 +641,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
80378 struct snd_soc_platform *platform = rtd->platform;
80379 struct snd_soc_dai *codec_dai = rtd->codec_dai;
80380 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
80381 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
80382 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
80383 struct snd_pcm *pcm;
80384 char new_name[64];
80385 int ret = 0, playback = 0, capture = 0;
80386 diff --git a/sound/usb/card.h b/sound/usb/card.h
80387 index da5fa1a..113cd02 100644
80388 --- a/sound/usb/card.h
80389 +++ b/sound/usb/card.h
80390 @@ -45,6 +45,7 @@ struct snd_urb_ops {
80391 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80392 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80393 };
80394 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
80395
80396 struct snd_usb_substream {
80397 struct snd_usb_stream *stream;
80398 @@ -94,7 +95,7 @@ struct snd_usb_substream {
80399 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
80400 spinlock_t lock;
80401
80402 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
80403 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
80404 int last_frame_number; /* stored frame number */
80405 int last_delay; /* stored delay */
80406 };
80407 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
80408 new file mode 100644
80409 index 0000000..50f2f2f
80410 --- /dev/null
80411 +++ b/tools/gcc/.gitignore
80412 @@ -0,0 +1 @@
80413 +size_overflow_hash.h
80414 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
80415 new file mode 100644
80416 index 0000000..1d09b7e
80417 --- /dev/null
80418 +++ b/tools/gcc/Makefile
80419 @@ -0,0 +1,43 @@
80420 +#CC := gcc
80421 +#PLUGIN_SOURCE_FILES := pax_plugin.c
80422 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
80423 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
80424 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
80425 +
80426 +ifeq ($(PLUGINCC),$(HOSTCC))
80427 +HOSTLIBS := hostlibs
80428 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
80429 +else
80430 +HOSTLIBS := hostcxxlibs
80431 +HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
80432 +endif
80433 +
80434 +$(HOSTLIBS)-y := constify_plugin.so
80435 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
80436 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
80437 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
80438 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
80439 +$(HOSTLIBS)-y += colorize_plugin.so
80440 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
80441 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
80442 +
80443 +always := $($(HOSTLIBS)-y)
80444 +
80445 +constify_plugin-objs := constify_plugin.o
80446 +stackleak_plugin-objs := stackleak_plugin.o
80447 +kallocstat_plugin-objs := kallocstat_plugin.o
80448 +kernexec_plugin-objs := kernexec_plugin.o
80449 +checker_plugin-objs := checker_plugin.o
80450 +colorize_plugin-objs := colorize_plugin.o
80451 +size_overflow_plugin-objs := size_overflow_plugin.o
80452 +latent_entropy_plugin-objs := latent_entropy_plugin.o
80453 +
80454 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
80455 +
80456 +quiet_cmd_build_size_overflow_hash = GENHASH $@
80457 + cmd_build_size_overflow_hash = \
80458 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
80459 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
80460 + $(call if_changed,build_size_overflow_hash)
80461 +
80462 +targets += size_overflow_hash.h
80463 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
80464 new file mode 100644
80465 index 0000000..d41b5af
80466 --- /dev/null
80467 +++ b/tools/gcc/checker_plugin.c
80468 @@ -0,0 +1,171 @@
80469 +/*
80470 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80471 + * Licensed under the GPL v2
80472 + *
80473 + * Note: the choice of the license means that the compilation process is
80474 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80475 + * but for the kernel it doesn't matter since it doesn't link against
80476 + * any of the gcc libraries
80477 + *
80478 + * gcc plugin to implement various sparse (source code checker) features
80479 + *
80480 + * TODO:
80481 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
80482 + *
80483 + * BUGS:
80484 + * - none known
80485 + */
80486 +#include "gcc-plugin.h"
80487 +#include "config.h"
80488 +#include "system.h"
80489 +#include "coretypes.h"
80490 +#include "tree.h"
80491 +#include "tree-pass.h"
80492 +#include "flags.h"
80493 +#include "intl.h"
80494 +#include "toplev.h"
80495 +#include "plugin.h"
80496 +//#include "expr.h" where are you...
80497 +#include "diagnostic.h"
80498 +#include "plugin-version.h"
80499 +#include "tm.h"
80500 +#include "function.h"
80501 +#include "basic-block.h"
80502 +#include "gimple.h"
80503 +#include "rtl.h"
80504 +#include "emit-rtl.h"
80505 +#include "tree-flow.h"
80506 +#include "target.h"
80507 +
80508 +extern void c_register_addr_space (const char *str, addr_space_t as);
80509 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
80510 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
80511 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
80512 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
80513 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
80514 +
80515 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80516 +extern rtx emit_move_insn(rtx x, rtx y);
80517 +
80518 +int plugin_is_GPL_compatible;
80519 +
80520 +static struct plugin_info checker_plugin_info = {
80521 + .version = "201111150100",
80522 +};
80523 +
80524 +#define ADDR_SPACE_KERNEL 0
80525 +#define ADDR_SPACE_FORCE_KERNEL 1
80526 +#define ADDR_SPACE_USER 2
80527 +#define ADDR_SPACE_FORCE_USER 3
80528 +#define ADDR_SPACE_IOMEM 0
80529 +#define ADDR_SPACE_FORCE_IOMEM 0
80530 +#define ADDR_SPACE_PERCPU 0
80531 +#define ADDR_SPACE_FORCE_PERCPU 0
80532 +#define ADDR_SPACE_RCU 0
80533 +#define ADDR_SPACE_FORCE_RCU 0
80534 +
80535 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
80536 +{
80537 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
80538 +}
80539 +
80540 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
80541 +{
80542 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
80543 +}
80544 +
80545 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
80546 +{
80547 + return default_addr_space_valid_pointer_mode(mode, as);
80548 +}
80549 +
80550 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
80551 +{
80552 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
80553 +}
80554 +
80555 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
80556 +{
80557 + return default_addr_space_legitimize_address(x, oldx, mode, as);
80558 +}
80559 +
80560 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
80561 +{
80562 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
80563 + return true;
80564 +
80565 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
80566 + return true;
80567 +
80568 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
80569 + return true;
80570 +
80571 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
80572 + return true;
80573 +
80574 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
80575 + return true;
80576 +
80577 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
80578 + return true;
80579 +
80580 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
80581 + return true;
80582 +
80583 + return subset == superset;
80584 +}
80585 +
80586 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
80587 +{
80588 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
80589 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
80590 +
80591 + return op;
80592 +}
80593 +
80594 +static void register_checker_address_spaces(void *event_data, void *data)
80595 +{
80596 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
80597 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
80598 + c_register_addr_space("__user", ADDR_SPACE_USER);
80599 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
80600 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
80601 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
80602 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
80603 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
80604 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
80605 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
80606 +
80607 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
80608 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
80609 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
80610 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
80611 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
80612 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
80613 + targetm.addr_space.convert = checker_addr_space_convert;
80614 +}
80615 +
80616 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80617 +{
80618 + const char * const plugin_name = plugin_info->base_name;
80619 + const int argc = plugin_info->argc;
80620 + const struct plugin_argument * const argv = plugin_info->argv;
80621 + int i;
80622 +
80623 + if (!plugin_default_version_check(version, &gcc_version)) {
80624 + error(G_("incompatible gcc/plugin versions"));
80625 + return 1;
80626 + }
80627 +
80628 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
80629 +
80630 + for (i = 0; i < argc; ++i)
80631 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80632 +
80633 + if (TARGET_64BIT == 0)
80634 + return 0;
80635 +
80636 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
80637 +
80638 + return 0;
80639 +}
80640 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
80641 new file mode 100644
80642 index 0000000..846aeb0
80643 --- /dev/null
80644 +++ b/tools/gcc/colorize_plugin.c
80645 @@ -0,0 +1,148 @@
80646 +/*
80647 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
80648 + * Licensed under the GPL v2
80649 + *
80650 + * Note: the choice of the license means that the compilation process is
80651 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80652 + * but for the kernel it doesn't matter since it doesn't link against
80653 + * any of the gcc libraries
80654 + *
80655 + * gcc plugin to colorize diagnostic output
80656 + *
80657 + */
80658 +
80659 +#include "gcc-plugin.h"
80660 +#include "config.h"
80661 +#include "system.h"
80662 +#include "coretypes.h"
80663 +#include "tree.h"
80664 +#include "tree-pass.h"
80665 +#include "flags.h"
80666 +#include "intl.h"
80667 +#include "toplev.h"
80668 +#include "plugin.h"
80669 +#include "diagnostic.h"
80670 +#include "plugin-version.h"
80671 +#include "tm.h"
80672 +
80673 +int plugin_is_GPL_compatible;
80674 +
80675 +static struct plugin_info colorize_plugin_info = {
80676 + .version = "201203092200",
80677 + .help = NULL,
80678 +};
80679 +
80680 +#define GREEN "\033[32m\033[2m"
80681 +#define LIGHTGREEN "\033[32m\033[1m"
80682 +#define YELLOW "\033[33m\033[2m"
80683 +#define LIGHTYELLOW "\033[33m\033[1m"
80684 +#define RED "\033[31m\033[2m"
80685 +#define LIGHTRED "\033[31m\033[1m"
80686 +#define BLUE "\033[34m\033[2m"
80687 +#define LIGHTBLUE "\033[34m\033[1m"
80688 +#define BRIGHT "\033[m\033[1m"
80689 +#define NORMAL "\033[m"
80690 +
80691 +static diagnostic_starter_fn old_starter;
80692 +static diagnostic_finalizer_fn old_finalizer;
80693 +
80694 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80695 +{
80696 + const char *color;
80697 + char *newprefix;
80698 +
80699 + switch (diagnostic->kind) {
80700 + case DK_NOTE:
80701 + color = LIGHTBLUE;
80702 + break;
80703 +
80704 + case DK_PEDWARN:
80705 + case DK_WARNING:
80706 + color = LIGHTYELLOW;
80707 + break;
80708 +
80709 + case DK_ERROR:
80710 + case DK_FATAL:
80711 + case DK_ICE:
80712 + case DK_PERMERROR:
80713 + case DK_SORRY:
80714 + color = LIGHTRED;
80715 + break;
80716 +
80717 + default:
80718 + color = NORMAL;
80719 + }
80720 +
80721 + old_starter(context, diagnostic);
80722 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
80723 + return;
80724 + pp_destroy_prefix(context->printer);
80725 + pp_set_prefix(context->printer, newprefix);
80726 +}
80727 +
80728 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80729 +{
80730 + old_finalizer(context, diagnostic);
80731 +}
80732 +
80733 +static void colorize_arm(void)
80734 +{
80735 + old_starter = diagnostic_starter(global_dc);
80736 + old_finalizer = diagnostic_finalizer(global_dc);
80737 +
80738 + diagnostic_starter(global_dc) = start_colorize;
80739 + diagnostic_finalizer(global_dc) = finalize_colorize;
80740 +}
80741 +
80742 +static unsigned int execute_colorize_rearm(void)
80743 +{
80744 + if (diagnostic_starter(global_dc) == start_colorize)
80745 + return 0;
80746 +
80747 + colorize_arm();
80748 + return 0;
80749 +}
80750 +
80751 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
80752 + .pass = {
80753 + .type = SIMPLE_IPA_PASS,
80754 + .name = "colorize_rearm",
80755 + .gate = NULL,
80756 + .execute = execute_colorize_rearm,
80757 + .sub = NULL,
80758 + .next = NULL,
80759 + .static_pass_number = 0,
80760 + .tv_id = TV_NONE,
80761 + .properties_required = 0,
80762 + .properties_provided = 0,
80763 + .properties_destroyed = 0,
80764 + .todo_flags_start = 0,
80765 + .todo_flags_finish = 0
80766 + }
80767 +};
80768 +
80769 +static void colorize_start_unit(void *gcc_data, void *user_data)
80770 +{
80771 + colorize_arm();
80772 +}
80773 +
80774 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80775 +{
80776 + const char * const plugin_name = plugin_info->base_name;
80777 + struct register_pass_info colorize_rearm_pass_info = {
80778 + .pass = &pass_ipa_colorize_rearm.pass,
80779 + .reference_pass_name = "*free_lang_data",
80780 + .ref_pass_instance_number = 1,
80781 + .pos_op = PASS_POS_INSERT_AFTER
80782 + };
80783 +
80784 + if (!plugin_default_version_check(version, &gcc_version)) {
80785 + error(G_("incompatible gcc/plugin versions"));
80786 + return 1;
80787 + }
80788 +
80789 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
80790 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
80791 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
80792 + return 0;
80793 +}
80794 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
80795 new file mode 100644
80796 index 0000000..048d4ff
80797 --- /dev/null
80798 +++ b/tools/gcc/constify_plugin.c
80799 @@ -0,0 +1,328 @@
80800 +/*
80801 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
80802 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
80803 + * Licensed under the GPL v2, or (at your option) v3
80804 + *
80805 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
80806 + *
80807 + * Homepage:
80808 + * http://www.grsecurity.net/~ephox/const_plugin/
80809 + *
80810 + * Usage:
80811 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
80812 + * $ gcc -fplugin=constify_plugin.so test.c -O2
80813 + */
80814 +
80815 +#include "gcc-plugin.h"
80816 +#include "config.h"
80817 +#include "system.h"
80818 +#include "coretypes.h"
80819 +#include "tree.h"
80820 +#include "tree-pass.h"
80821 +#include "flags.h"
80822 +#include "intl.h"
80823 +#include "toplev.h"
80824 +#include "plugin.h"
80825 +#include "diagnostic.h"
80826 +#include "plugin-version.h"
80827 +#include "tm.h"
80828 +#include "function.h"
80829 +#include "basic-block.h"
80830 +#include "gimple.h"
80831 +#include "rtl.h"
80832 +#include "emit-rtl.h"
80833 +#include "tree-flow.h"
80834 +
80835 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
80836 +
80837 +int plugin_is_GPL_compatible;
80838 +
80839 +static struct plugin_info const_plugin_info = {
80840 + .version = "201205300030",
80841 + .help = "no-constify\tturn off constification\n",
80842 +};
80843 +
80844 +static void deconstify_tree(tree node);
80845 +
80846 +static void deconstify_type(tree type)
80847 +{
80848 + tree field;
80849 +
80850 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
80851 + tree type = TREE_TYPE(field);
80852 +
80853 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
80854 + continue;
80855 + if (!TYPE_READONLY(type))
80856 + continue;
80857 +
80858 + deconstify_tree(field);
80859 + }
80860 + TYPE_READONLY(type) = 0;
80861 + C_TYPE_FIELDS_READONLY(type) = 0;
80862 +}
80863 +
80864 +static void deconstify_tree(tree node)
80865 +{
80866 + tree old_type, new_type, field;
80867 +
80868 + old_type = TREE_TYPE(node);
80869 +
80870 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
80871 +
80872 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
80873 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
80874 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
80875 + DECL_FIELD_CONTEXT(field) = new_type;
80876 +
80877 + deconstify_type(new_type);
80878 +
80879 + TREE_READONLY(node) = 0;
80880 + TREE_TYPE(node) = new_type;
80881 +}
80882 +
80883 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80884 +{
80885 + tree type;
80886 +
80887 + *no_add_attrs = true;
80888 + if (TREE_CODE(*node) == FUNCTION_DECL) {
80889 + error("%qE attribute does not apply to functions", name);
80890 + return NULL_TREE;
80891 + }
80892 +
80893 + if (TREE_CODE(*node) == VAR_DECL) {
80894 + error("%qE attribute does not apply to variables", name);
80895 + return NULL_TREE;
80896 + }
80897 +
80898 + if (TYPE_P(*node)) {
80899 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
80900 + *no_add_attrs = false;
80901 + else
80902 + error("%qE attribute applies to struct and union types only", name);
80903 + return NULL_TREE;
80904 + }
80905 +
80906 + type = TREE_TYPE(*node);
80907 +
80908 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
80909 + error("%qE attribute applies to struct and union types only", name);
80910 + return NULL_TREE;
80911 + }
80912 +
80913 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
80914 + error("%qE attribute is already applied to the type", name);
80915 + return NULL_TREE;
80916 + }
80917 +
80918 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
80919 + error("%qE attribute used on type that is not constified", name);
80920 + return NULL_TREE;
80921 + }
80922 +
80923 + if (TREE_CODE(*node) == TYPE_DECL) {
80924 + deconstify_tree(*node);
80925 + return NULL_TREE;
80926 + }
80927 +
80928 + return NULL_TREE;
80929 +}
80930 +
80931 +static void constify_type(tree type)
80932 +{
80933 + TYPE_READONLY(type) = 1;
80934 + C_TYPE_FIELDS_READONLY(type) = 1;
80935 +}
80936 +
80937 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80938 +{
80939 + *no_add_attrs = true;
80940 + if (!TYPE_P(*node)) {
80941 + error("%qE attribute applies to types only", name);
80942 + return NULL_TREE;
80943 + }
80944 +
80945 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
80946 + error("%qE attribute applies to struct and union types only", name);
80947 + return NULL_TREE;
80948 + }
80949 +
80950 + *no_add_attrs = false;
80951 + constify_type(*node);
80952 + return NULL_TREE;
80953 +}
80954 +
80955 +static struct attribute_spec no_const_attr = {
80956 + .name = "no_const",
80957 + .min_length = 0,
80958 + .max_length = 0,
80959 + .decl_required = false,
80960 + .type_required = false,
80961 + .function_type_required = false,
80962 + .handler = handle_no_const_attribute,
80963 +#if BUILDING_GCC_VERSION >= 4007
80964 + .affects_type_identity = true
80965 +#endif
80966 +};
80967 +
80968 +static struct attribute_spec do_const_attr = {
80969 + .name = "do_const",
80970 + .min_length = 0,
80971 + .max_length = 0,
80972 + .decl_required = false,
80973 + .type_required = false,
80974 + .function_type_required = false,
80975 + .handler = handle_do_const_attribute,
80976 +#if BUILDING_GCC_VERSION >= 4007
80977 + .affects_type_identity = true
80978 +#endif
80979 +};
80980 +
80981 +static void register_attributes(void *event_data, void *data)
80982 +{
80983 + register_attribute(&no_const_attr);
80984 + register_attribute(&do_const_attr);
80985 +}
80986 +
80987 +static bool is_fptr(tree field)
80988 +{
80989 + tree ptr = TREE_TYPE(field);
80990 +
80991 + if (TREE_CODE(ptr) != POINTER_TYPE)
80992 + return false;
80993 +
80994 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
80995 +}
80996 +
80997 +static bool walk_struct(tree node)
80998 +{
80999 + tree field;
81000 +
81001 + if (TYPE_FIELDS(node) == NULL_TREE)
81002 + return false;
81003 +
81004 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
81005 + gcc_assert(!TYPE_READONLY(node));
81006 + deconstify_type(node);
81007 + return false;
81008 + }
81009 +
81010 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
81011 + tree type = TREE_TYPE(field);
81012 + enum tree_code code = TREE_CODE(type);
81013 + if (code == RECORD_TYPE || code == UNION_TYPE) {
81014 + if (!(walk_struct(type)))
81015 + return false;
81016 + } else if (!is_fptr(field) && !TREE_READONLY(field))
81017 + return false;
81018 + }
81019 + return true;
81020 +}
81021 +
81022 +static void finish_type(void *event_data, void *data)
81023 +{
81024 + tree type = (tree)event_data;
81025 +
81026 + if (type == NULL_TREE)
81027 + return;
81028 +
81029 + if (TYPE_READONLY(type))
81030 + return;
81031 +
81032 + if (walk_struct(type))
81033 + constify_type(type);
81034 +}
81035 +
81036 +static unsigned int check_local_variables(void);
81037 +
81038 +struct gimple_opt_pass pass_local_variable = {
81039 + {
81040 + .type = GIMPLE_PASS,
81041 + .name = "check_local_variables",
81042 + .gate = NULL,
81043 + .execute = check_local_variables,
81044 + .sub = NULL,
81045 + .next = NULL,
81046 + .static_pass_number = 0,
81047 + .tv_id = TV_NONE,
81048 + .properties_required = 0,
81049 + .properties_provided = 0,
81050 + .properties_destroyed = 0,
81051 + .todo_flags_start = 0,
81052 + .todo_flags_finish = 0
81053 + }
81054 +};
81055 +
81056 +static unsigned int check_local_variables(void)
81057 +{
81058 + tree var;
81059 + referenced_var_iterator rvi;
81060 +
81061 +#if BUILDING_GCC_VERSION == 4005
81062 + FOR_EACH_REFERENCED_VAR(var, rvi) {
81063 +#else
81064 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
81065 +#endif
81066 + tree type = TREE_TYPE(var);
81067 +
81068 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
81069 + continue;
81070 +
81071 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
81072 + continue;
81073 +
81074 + if (!TYPE_READONLY(type))
81075 + continue;
81076 +
81077 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
81078 +// continue;
81079 +
81080 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
81081 +// continue;
81082 +
81083 + if (walk_struct(type)) {
81084 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
81085 + return 1;
81086 + }
81087 + }
81088 + return 0;
81089 +}
81090 +
81091 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81092 +{
81093 + const char * const plugin_name = plugin_info->base_name;
81094 + const int argc = plugin_info->argc;
81095 + const struct plugin_argument * const argv = plugin_info->argv;
81096 + int i;
81097 + bool constify = true;
81098 +
81099 + struct register_pass_info local_variable_pass_info = {
81100 + .pass = &pass_local_variable.pass,
81101 + .reference_pass_name = "*referenced_vars",
81102 + .ref_pass_instance_number = 1,
81103 + .pos_op = PASS_POS_INSERT_AFTER
81104 + };
81105 +
81106 + if (!plugin_default_version_check(version, &gcc_version)) {
81107 + error(G_("incompatible gcc/plugin versions"));
81108 + return 1;
81109 + }
81110 +
81111 + for (i = 0; i < argc; ++i) {
81112 + if (!(strcmp(argv[i].key, "no-constify"))) {
81113 + constify = false;
81114 + continue;
81115 + }
81116 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81117 + }
81118 +
81119 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
81120 + if (constify) {
81121 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
81122 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
81123 + }
81124 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
81125 +
81126 + return 0;
81127 +}
81128 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
81129 new file mode 100644
81130 index 0000000..a0fe8b2
81131 --- /dev/null
81132 +++ b/tools/gcc/generate_size_overflow_hash.sh
81133 @@ -0,0 +1,94 @@
81134 +#!/bin/bash
81135 +
81136 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
81137 +
81138 +header1="size_overflow_hash.h"
81139 +database="size_overflow_hash.data"
81140 +n=65536
81141 +
81142 +usage() {
81143 +cat <<EOF
81144 +usage: $0 options
81145 +OPTIONS:
81146 + -h|--help help
81147 + -o header file
81148 + -d database file
81149 + -n hash array size
81150 +EOF
81151 + return 0
81152 +}
81153 +
81154 +while true
81155 +do
81156 + case "$1" in
81157 + -h|--help) usage && exit 0;;
81158 + -n) n=$2; shift 2;;
81159 + -o) header1="$2"; shift 2;;
81160 + -d) database="$2"; shift 2;;
81161 + --) shift 1; break ;;
81162 + *) break ;;
81163 + esac
81164 +done
81165 +
81166 +create_defines() {
81167 + for i in `seq 1 10`
81168 + do
81169 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
81170 + done
81171 + echo >> "$header1"
81172 +}
81173 +
81174 +create_structs () {
81175 + rm -f "$header1"
81176 +
81177 + create_defines
81178 +
81179 + cat "$database" | while read data
81180 + do
81181 + data_array=($data)
81182 + struct_hash_name="${data_array[0]}"
81183 + funcn="${data_array[1]}"
81184 + params="${data_array[2]}"
81185 + next="${data_array[5]}"
81186 +
81187 + echo "struct size_overflow_hash $struct_hash_name = {" >> "$header1"
81188 +
81189 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
81190 + echo -en "\t.param\t= " >> "$header1"
81191 + line=
81192 + for param_num in ${params//-/ };
81193 + do
81194 + line="${line}PARAM"$param_num"|"
81195 + done
81196 +
81197 + echo -e "${line%?},\n};\n" >> "$header1"
81198 + done
81199 +}
81200 +
81201 +create_headers () {
81202 + echo "struct size_overflow_hash *size_overflow_hash[$n] = {" >> "$header1"
81203 +}
81204 +
81205 +create_array_elements () {
81206 + index=0
81207 + grep -v "nohasharray" $database | sort -n -k 4 | while read data
81208 + do
81209 + data_array=($data)
81210 + i="${data_array[3]}"
81211 + hash="${data_array[4]}"
81212 + while [[ $index -lt $i ]]
81213 + do
81214 + echo -e "\t["$index"]\t= NULL," >> "$header1"
81215 + index=$(($index + 1))
81216 + done
81217 + index=$(($index + 1))
81218 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
81219 + done
81220 + echo '};' >> $header1
81221 +}
81222 +
81223 +create_structs
81224 +create_headers
81225 +create_array_elements
81226 +
81227 +exit 0
81228 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
81229 new file mode 100644
81230 index 0000000..a86e422
81231 --- /dev/null
81232 +++ b/tools/gcc/kallocstat_plugin.c
81233 @@ -0,0 +1,167 @@
81234 +/*
81235 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81236 + * Licensed under the GPL v2
81237 + *
81238 + * Note: the choice of the license means that the compilation process is
81239 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81240 + * but for the kernel it doesn't matter since it doesn't link against
81241 + * any of the gcc libraries
81242 + *
81243 + * gcc plugin to find the distribution of k*alloc sizes
81244 + *
81245 + * TODO:
81246 + *
81247 + * BUGS:
81248 + * - none known
81249 + */
81250 +#include "gcc-plugin.h"
81251 +#include "config.h"
81252 +#include "system.h"
81253 +#include "coretypes.h"
81254 +#include "tree.h"
81255 +#include "tree-pass.h"
81256 +#include "flags.h"
81257 +#include "intl.h"
81258 +#include "toplev.h"
81259 +#include "plugin.h"
81260 +//#include "expr.h" where are you...
81261 +#include "diagnostic.h"
81262 +#include "plugin-version.h"
81263 +#include "tm.h"
81264 +#include "function.h"
81265 +#include "basic-block.h"
81266 +#include "gimple.h"
81267 +#include "rtl.h"
81268 +#include "emit-rtl.h"
81269 +
81270 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81271 +
81272 +int plugin_is_GPL_compatible;
81273 +
81274 +static const char * const kalloc_functions[] = {
81275 + "__kmalloc",
81276 + "kmalloc",
81277 + "kmalloc_large",
81278 + "kmalloc_node",
81279 + "kmalloc_order",
81280 + "kmalloc_order_trace",
81281 + "kmalloc_slab",
81282 + "kzalloc",
81283 + "kzalloc_node",
81284 +};
81285 +
81286 +static struct plugin_info kallocstat_plugin_info = {
81287 + .version = "201111150100",
81288 +};
81289 +
81290 +static unsigned int execute_kallocstat(void);
81291 +
81292 +static struct gimple_opt_pass kallocstat_pass = {
81293 + .pass = {
81294 + .type = GIMPLE_PASS,
81295 + .name = "kallocstat",
81296 + .gate = NULL,
81297 + .execute = execute_kallocstat,
81298 + .sub = NULL,
81299 + .next = NULL,
81300 + .static_pass_number = 0,
81301 + .tv_id = TV_NONE,
81302 + .properties_required = 0,
81303 + .properties_provided = 0,
81304 + .properties_destroyed = 0,
81305 + .todo_flags_start = 0,
81306 + .todo_flags_finish = 0
81307 + }
81308 +};
81309 +
81310 +static bool is_kalloc(const char *fnname)
81311 +{
81312 + size_t i;
81313 +
81314 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
81315 + if (!strcmp(fnname, kalloc_functions[i]))
81316 + return true;
81317 + return false;
81318 +}
81319 +
81320 +static unsigned int execute_kallocstat(void)
81321 +{
81322 + basic_block bb;
81323 +
81324 + // 1. loop through BBs and GIMPLE statements
81325 + FOR_EACH_BB(bb) {
81326 + gimple_stmt_iterator gsi;
81327 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81328 + // gimple match:
81329 + tree fndecl, size;
81330 + gimple call_stmt;
81331 + const char *fnname;
81332 +
81333 + // is it a call
81334 + call_stmt = gsi_stmt(gsi);
81335 + if (!is_gimple_call(call_stmt))
81336 + continue;
81337 + fndecl = gimple_call_fndecl(call_stmt);
81338 + if (fndecl == NULL_TREE)
81339 + continue;
81340 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
81341 + continue;
81342 +
81343 + // is it a call to k*alloc
81344 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
81345 + if (!is_kalloc(fnname))
81346 + continue;
81347 +
81348 + // is the size arg the result of a simple const assignment
81349 + size = gimple_call_arg(call_stmt, 0);
81350 + while (true) {
81351 + gimple def_stmt;
81352 + expanded_location xloc;
81353 + size_t size_val;
81354 +
81355 + if (TREE_CODE(size) != SSA_NAME)
81356 + break;
81357 + def_stmt = SSA_NAME_DEF_STMT(size);
81358 + if (!def_stmt || !is_gimple_assign(def_stmt))
81359 + break;
81360 + if (gimple_num_ops(def_stmt) != 2)
81361 + break;
81362 + size = gimple_assign_rhs1(def_stmt);
81363 + if (!TREE_CONSTANT(size))
81364 + continue;
81365 + xloc = expand_location(gimple_location(def_stmt));
81366 + if (!xloc.file)
81367 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
81368 + size_val = TREE_INT_CST_LOW(size);
81369 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
81370 + break;
81371 + }
81372 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81373 +//debug_tree(gimple_call_fn(call_stmt));
81374 +//print_node(stderr, "pax", fndecl, 4);
81375 + }
81376 + }
81377 +
81378 + return 0;
81379 +}
81380 +
81381 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81382 +{
81383 + const char * const plugin_name = plugin_info->base_name;
81384 + struct register_pass_info kallocstat_pass_info = {
81385 + .pass = &kallocstat_pass.pass,
81386 + .reference_pass_name = "ssa",
81387 + .ref_pass_instance_number = 1,
81388 + .pos_op = PASS_POS_INSERT_AFTER
81389 + };
81390 +
81391 + if (!plugin_default_version_check(version, &gcc_version)) {
81392 + error(G_("incompatible gcc/plugin versions"));
81393 + return 1;
81394 + }
81395 +
81396 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
81397 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
81398 +
81399 + return 0;
81400 +}
81401 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
81402 new file mode 100644
81403 index 0000000..98011fa
81404 --- /dev/null
81405 +++ b/tools/gcc/kernexec_plugin.c
81406 @@ -0,0 +1,427 @@
81407 +/*
81408 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81409 + * Licensed under the GPL v2
81410 + *
81411 + * Note: the choice of the license means that the compilation process is
81412 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81413 + * but for the kernel it doesn't matter since it doesn't link against
81414 + * any of the gcc libraries
81415 + *
81416 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
81417 + *
81418 + * TODO:
81419 + *
81420 + * BUGS:
81421 + * - none known
81422 + */
81423 +#include "gcc-plugin.h"
81424 +#include "config.h"
81425 +#include "system.h"
81426 +#include "coretypes.h"
81427 +#include "tree.h"
81428 +#include "tree-pass.h"
81429 +#include "flags.h"
81430 +#include "intl.h"
81431 +#include "toplev.h"
81432 +#include "plugin.h"
81433 +//#include "expr.h" where are you...
81434 +#include "diagnostic.h"
81435 +#include "plugin-version.h"
81436 +#include "tm.h"
81437 +#include "function.h"
81438 +#include "basic-block.h"
81439 +#include "gimple.h"
81440 +#include "rtl.h"
81441 +#include "emit-rtl.h"
81442 +#include "tree-flow.h"
81443 +
81444 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81445 +extern rtx emit_move_insn(rtx x, rtx y);
81446 +
81447 +int plugin_is_GPL_compatible;
81448 +
81449 +static struct plugin_info kernexec_plugin_info = {
81450 + .version = "201111291120",
81451 + .help = "method=[bts|or]\tinstrumentation method\n"
81452 +};
81453 +
81454 +static unsigned int execute_kernexec_reload(void);
81455 +static unsigned int execute_kernexec_fptr(void);
81456 +static unsigned int execute_kernexec_retaddr(void);
81457 +static bool kernexec_cmodel_check(void);
81458 +
81459 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
81460 +static void (*kernexec_instrument_retaddr)(rtx);
81461 +
81462 +static struct gimple_opt_pass kernexec_reload_pass = {
81463 + .pass = {
81464 + .type = GIMPLE_PASS,
81465 + .name = "kernexec_reload",
81466 + .gate = kernexec_cmodel_check,
81467 + .execute = execute_kernexec_reload,
81468 + .sub = NULL,
81469 + .next = NULL,
81470 + .static_pass_number = 0,
81471 + .tv_id = TV_NONE,
81472 + .properties_required = 0,
81473 + .properties_provided = 0,
81474 + .properties_destroyed = 0,
81475 + .todo_flags_start = 0,
81476 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81477 + }
81478 +};
81479 +
81480 +static struct gimple_opt_pass kernexec_fptr_pass = {
81481 + .pass = {
81482 + .type = GIMPLE_PASS,
81483 + .name = "kernexec_fptr",
81484 + .gate = kernexec_cmodel_check,
81485 + .execute = execute_kernexec_fptr,
81486 + .sub = NULL,
81487 + .next = NULL,
81488 + .static_pass_number = 0,
81489 + .tv_id = TV_NONE,
81490 + .properties_required = 0,
81491 + .properties_provided = 0,
81492 + .properties_destroyed = 0,
81493 + .todo_flags_start = 0,
81494 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81495 + }
81496 +};
81497 +
81498 +static struct rtl_opt_pass kernexec_retaddr_pass = {
81499 + .pass = {
81500 + .type = RTL_PASS,
81501 + .name = "kernexec_retaddr",
81502 + .gate = kernexec_cmodel_check,
81503 + .execute = execute_kernexec_retaddr,
81504 + .sub = NULL,
81505 + .next = NULL,
81506 + .static_pass_number = 0,
81507 + .tv_id = TV_NONE,
81508 + .properties_required = 0,
81509 + .properties_provided = 0,
81510 + .properties_destroyed = 0,
81511 + .todo_flags_start = 0,
81512 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
81513 + }
81514 +};
81515 +
81516 +static bool kernexec_cmodel_check(void)
81517 +{
81518 + tree section;
81519 +
81520 + if (ix86_cmodel != CM_KERNEL)
81521 + return false;
81522 +
81523 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
81524 + if (!section || !TREE_VALUE(section))
81525 + return true;
81526 +
81527 + section = TREE_VALUE(TREE_VALUE(section));
81528 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
81529 + return true;
81530 +
81531 + return false;
81532 +}
81533 +
81534 +/*
81535 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
81536 + */
81537 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
81538 +{
81539 + gimple asm_movabs_stmt;
81540 +
81541 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
81542 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
81543 + gimple_asm_set_volatile(asm_movabs_stmt, true);
81544 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
81545 + update_stmt(asm_movabs_stmt);
81546 +}
81547 +
81548 +/*
81549 + * find all asm() stmts that clobber r10 and add a reload of r10
81550 + */
81551 +static unsigned int execute_kernexec_reload(void)
81552 +{
81553 + basic_block bb;
81554 +
81555 + // 1. loop through BBs and GIMPLE statements
81556 + FOR_EACH_BB(bb) {
81557 + gimple_stmt_iterator gsi;
81558 +
81559 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81560 + // gimple match: __asm__ ("" : : : "r10");
81561 + gimple asm_stmt;
81562 + size_t nclobbers;
81563 +
81564 + // is it an asm ...
81565 + asm_stmt = gsi_stmt(gsi);
81566 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
81567 + continue;
81568 +
81569 + // ... clobbering r10
81570 + nclobbers = gimple_asm_nclobbers(asm_stmt);
81571 + while (nclobbers--) {
81572 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
81573 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
81574 + continue;
81575 + kernexec_reload_fptr_mask(&gsi);
81576 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
81577 + break;
81578 + }
81579 + }
81580 + }
81581 +
81582 + return 0;
81583 +}
81584 +
81585 +/*
81586 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
81587 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
81588 + */
81589 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
81590 +{
81591 + gimple assign_intptr, assign_new_fptr, call_stmt;
81592 + tree intptr, old_fptr, new_fptr, kernexec_mask;
81593 +
81594 + call_stmt = gsi_stmt(*gsi);
81595 + old_fptr = gimple_call_fn(call_stmt);
81596 +
81597 + // create temporary unsigned long variable used for bitops and cast fptr to it
81598 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
81599 + add_referenced_var(intptr);
81600 + mark_sym_for_renaming(intptr);
81601 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
81602 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
81603 + update_stmt(assign_intptr);
81604 +
81605 + // apply logical or to temporary unsigned long and bitmask
81606 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
81607 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
81608 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
81609 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
81610 + update_stmt(assign_intptr);
81611 +
81612 + // cast temporary unsigned long back to a temporary fptr variable
81613 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
81614 + add_referenced_var(new_fptr);
81615 + mark_sym_for_renaming(new_fptr);
81616 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
81617 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
81618 + update_stmt(assign_new_fptr);
81619 +
81620 + // replace call stmt fn with the new fptr
81621 + gimple_call_set_fn(call_stmt, new_fptr);
81622 + update_stmt(call_stmt);
81623 +}
81624 +
81625 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
81626 +{
81627 + gimple asm_or_stmt, call_stmt;
81628 + tree old_fptr, new_fptr, input, output;
81629 + VEC(tree, gc) *inputs = NULL;
81630 + VEC(tree, gc) *outputs = NULL;
81631 +
81632 + call_stmt = gsi_stmt(*gsi);
81633 + old_fptr = gimple_call_fn(call_stmt);
81634 +
81635 + // create temporary fptr variable
81636 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
81637 + add_referenced_var(new_fptr);
81638 + mark_sym_for_renaming(new_fptr);
81639 +
81640 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
81641 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
81642 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
81643 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
81644 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
81645 + VEC_safe_push(tree, gc, inputs, input);
81646 + VEC_safe_push(tree, gc, outputs, output);
81647 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
81648 + gimple_asm_set_volatile(asm_or_stmt, true);
81649 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
81650 + update_stmt(asm_or_stmt);
81651 +
81652 + // replace call stmt fn with the new fptr
81653 + gimple_call_set_fn(call_stmt, new_fptr);
81654 + update_stmt(call_stmt);
81655 +}
81656 +
81657 +/*
81658 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
81659 + */
81660 +static unsigned int execute_kernexec_fptr(void)
81661 +{
81662 + basic_block bb;
81663 +
81664 + // 1. loop through BBs and GIMPLE statements
81665 + FOR_EACH_BB(bb) {
81666 + gimple_stmt_iterator gsi;
81667 +
81668 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81669 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
81670 + tree fn;
81671 + gimple call_stmt;
81672 +
81673 + // is it a call ...
81674 + call_stmt = gsi_stmt(gsi);
81675 + if (!is_gimple_call(call_stmt))
81676 + continue;
81677 + fn = gimple_call_fn(call_stmt);
81678 + if (TREE_CODE(fn) == ADDR_EXPR)
81679 + continue;
81680 + if (TREE_CODE(fn) != SSA_NAME)
81681 + gcc_unreachable();
81682 +
81683 + // ... through a function pointer
81684 + fn = SSA_NAME_VAR(fn);
81685 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
81686 + continue;
81687 + fn = TREE_TYPE(fn);
81688 + if (TREE_CODE(fn) != POINTER_TYPE)
81689 + continue;
81690 + fn = TREE_TYPE(fn);
81691 + if (TREE_CODE(fn) != FUNCTION_TYPE)
81692 + continue;
81693 +
81694 + kernexec_instrument_fptr(&gsi);
81695 +
81696 +//debug_tree(gimple_call_fn(call_stmt));
81697 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81698 + }
81699 + }
81700 +
81701 + return 0;
81702 +}
81703 +
81704 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
81705 +static void kernexec_instrument_retaddr_bts(rtx insn)
81706 +{
81707 + rtx btsq;
81708 + rtvec argvec, constraintvec, labelvec;
81709 + int line;
81710 +
81711 + // create asm volatile("btsq $63,(%%rsp)":::)
81712 + argvec = rtvec_alloc(0);
81713 + constraintvec = rtvec_alloc(0);
81714 + labelvec = rtvec_alloc(0);
81715 + line = expand_location(RTL_LOCATION(insn)).line;
81716 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81717 + MEM_VOLATILE_P(btsq) = 1;
81718 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
81719 + emit_insn_before(btsq, insn);
81720 +}
81721 +
81722 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
81723 +static void kernexec_instrument_retaddr_or(rtx insn)
81724 +{
81725 + rtx orq;
81726 + rtvec argvec, constraintvec, labelvec;
81727 + int line;
81728 +
81729 + // create asm volatile("orq %%r10,(%%rsp)":::)
81730 + argvec = rtvec_alloc(0);
81731 + constraintvec = rtvec_alloc(0);
81732 + labelvec = rtvec_alloc(0);
81733 + line = expand_location(RTL_LOCATION(insn)).line;
81734 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81735 + MEM_VOLATILE_P(orq) = 1;
81736 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
81737 + emit_insn_before(orq, insn);
81738 +}
81739 +
81740 +/*
81741 + * find all asm level function returns and forcibly set the highest bit of the return address
81742 + */
81743 +static unsigned int execute_kernexec_retaddr(void)
81744 +{
81745 + rtx insn;
81746 +
81747 + // 1. find function returns
81748 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81749 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
81750 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
81751 + rtx body;
81752 +
81753 + // is it a retn
81754 + if (!JUMP_P(insn))
81755 + continue;
81756 + body = PATTERN(insn);
81757 + if (GET_CODE(body) == PARALLEL)
81758 + body = XVECEXP(body, 0, 0);
81759 + if (GET_CODE(body) != RETURN)
81760 + continue;
81761 + kernexec_instrument_retaddr(insn);
81762 + }
81763 +
81764 +// print_simple_rtl(stderr, get_insns());
81765 +// print_rtl(stderr, get_insns());
81766 +
81767 + return 0;
81768 +}
81769 +
81770 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81771 +{
81772 + const char * const plugin_name = plugin_info->base_name;
81773 + const int argc = plugin_info->argc;
81774 + const struct plugin_argument * const argv = plugin_info->argv;
81775 + int i;
81776 + struct register_pass_info kernexec_reload_pass_info = {
81777 + .pass = &kernexec_reload_pass.pass,
81778 + .reference_pass_name = "ssa",
81779 + .ref_pass_instance_number = 1,
81780 + .pos_op = PASS_POS_INSERT_AFTER
81781 + };
81782 + struct register_pass_info kernexec_fptr_pass_info = {
81783 + .pass = &kernexec_fptr_pass.pass,
81784 + .reference_pass_name = "ssa",
81785 + .ref_pass_instance_number = 1,
81786 + .pos_op = PASS_POS_INSERT_AFTER
81787 + };
81788 + struct register_pass_info kernexec_retaddr_pass_info = {
81789 + .pass = &kernexec_retaddr_pass.pass,
81790 + .reference_pass_name = "pro_and_epilogue",
81791 + .ref_pass_instance_number = 1,
81792 + .pos_op = PASS_POS_INSERT_AFTER
81793 + };
81794 +
81795 + if (!plugin_default_version_check(version, &gcc_version)) {
81796 + error(G_("incompatible gcc/plugin versions"));
81797 + return 1;
81798 + }
81799 +
81800 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
81801 +
81802 + if (TARGET_64BIT == 0)
81803 + return 0;
81804 +
81805 + for (i = 0; i < argc; ++i) {
81806 + if (!strcmp(argv[i].key, "method")) {
81807 + if (!argv[i].value) {
81808 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81809 + continue;
81810 + }
81811 + if (!strcmp(argv[i].value, "bts")) {
81812 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
81813 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
81814 + } else if (!strcmp(argv[i].value, "or")) {
81815 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
81816 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
81817 + fix_register("r10", 1, 1);
81818 + } else
81819 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81820 + continue;
81821 + }
81822 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81823 + }
81824 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
81825 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
81826 +
81827 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
81828 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
81829 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
81830 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
81831 +
81832 + return 0;
81833 +}
81834 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
81835 new file mode 100644
81836 index 0000000..b8008f7
81837 --- /dev/null
81838 +++ b/tools/gcc/latent_entropy_plugin.c
81839 @@ -0,0 +1,295 @@
81840 +/*
81841 + * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
81842 + * Licensed under the GPL v2
81843 + *
81844 + * Note: the choice of the license means that the compilation process is
81845 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81846 + * but for the kernel it doesn't matter since it doesn't link against
81847 + * any of the gcc libraries
81848 + *
81849 + * gcc plugin to help generate a little bit of entropy from program state,
81850 + * used during boot in the kernel
81851 + *
81852 + * TODO:
81853 + * - add ipa pass to identify not explicitly marked candidate functions
81854 + * - mix in more program state (function arguments/return values, loop variables, etc)
81855 + * - more instrumentation control via attribute parameters
81856 + *
81857 + * BUGS:
81858 + * - LTO needs -flto-partition=none for now
81859 + */
81860 +#include "gcc-plugin.h"
81861 +#include "config.h"
81862 +#include "system.h"
81863 +#include "coretypes.h"
81864 +#include "tree.h"
81865 +#include "tree-pass.h"
81866 +#include "flags.h"
81867 +#include "intl.h"
81868 +#include "toplev.h"
81869 +#include "plugin.h"
81870 +//#include "expr.h" where are you...
81871 +#include "diagnostic.h"
81872 +#include "plugin-version.h"
81873 +#include "tm.h"
81874 +#include "function.h"
81875 +#include "basic-block.h"
81876 +#include "gimple.h"
81877 +#include "rtl.h"
81878 +#include "emit-rtl.h"
81879 +#include "tree-flow.h"
81880 +
81881 +int plugin_is_GPL_compatible;
81882 +
81883 +static tree latent_entropy_decl;
81884 +
81885 +static struct plugin_info latent_entropy_plugin_info = {
81886 + .version = "201207271820",
81887 + .help = NULL
81888 +};
81889 +
81890 +static unsigned int execute_latent_entropy(void);
81891 +static bool gate_latent_entropy(void);
81892 +
81893 +static struct gimple_opt_pass latent_entropy_pass = {
81894 + .pass = {
81895 + .type = GIMPLE_PASS,
81896 + .name = "latent_entropy",
81897 + .gate = gate_latent_entropy,
81898 + .execute = execute_latent_entropy,
81899 + .sub = NULL,
81900 + .next = NULL,
81901 + .static_pass_number = 0,
81902 + .tv_id = TV_NONE,
81903 + .properties_required = PROP_gimple_leh | PROP_cfg,
81904 + .properties_provided = 0,
81905 + .properties_destroyed = 0,
81906 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
81907 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
81908 + }
81909 +};
81910 +
81911 +static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
81912 +{
81913 + if (TREE_CODE(*node) != FUNCTION_DECL) {
81914 + *no_add_attrs = true;
81915 + error("%qE attribute only applies to functions", name);
81916 + }
81917 + return NULL_TREE;
81918 +}
81919 +
81920 +static struct attribute_spec latent_entropy_attr = {
81921 + .name = "latent_entropy",
81922 + .min_length = 0,
81923 + .max_length = 0,
81924 + .decl_required = true,
81925 + .type_required = false,
81926 + .function_type_required = false,
81927 + .handler = handle_latent_entropy_attribute,
81928 +#if BUILDING_GCC_VERSION >= 4007
81929 + .affects_type_identity = false
81930 +#endif
81931 +};
81932 +
81933 +static void register_attributes(void *event_data, void *data)
81934 +{
81935 + register_attribute(&latent_entropy_attr);
81936 +}
81937 +
81938 +static bool gate_latent_entropy(void)
81939 +{
81940 + tree latent_entropy_attr;
81941 +
81942 + latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
81943 + return latent_entropy_attr != NULL_TREE;
81944 +}
81945 +
81946 +static unsigned HOST_WIDE_INT seed;
81947 +static unsigned HOST_WIDE_INT get_random_const(void)
81948 +{
81949 + seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
81950 + return seed;
81951 +}
81952 +
81953 +static enum tree_code get_op(tree *rhs)
81954 +{
81955 + static enum tree_code op;
81956 + unsigned HOST_WIDE_INT random_const;
81957 +
81958 + random_const = get_random_const();
81959 +
81960 + switch (op) {
81961 + case BIT_XOR_EXPR:
81962 + op = PLUS_EXPR;
81963 + break;
81964 +
81965 + case PLUS_EXPR:
81966 + if (rhs) {
81967 + op = LROTATE_EXPR;
81968 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
81969 + break;
81970 + }
81971 +
81972 + case LROTATE_EXPR:
81973 + default:
81974 + op = BIT_XOR_EXPR;
81975 + break;
81976 + }
81977 + if (rhs)
81978 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
81979 + return op;
81980 +}
81981 +
81982 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
81983 +{
81984 + gimple_stmt_iterator gsi;
81985 + gimple assign;
81986 + tree addxorrol, rhs;
81987 + enum tree_code op;
81988 +
81989 + op = get_op(&rhs);
81990 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
81991 + assign = gimple_build_assign(local_entropy, addxorrol);
81992 + find_referenced_vars_in(assign);
81993 +//debug_bb(bb);
81994 + gsi = gsi_after_labels(bb);
81995 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
81996 + update_stmt(assign);
81997 +}
81998 +
81999 +static void perturb_latent_entropy(basic_block bb, tree rhs)
82000 +{
82001 + gimple_stmt_iterator gsi;
82002 + gimple assign;
82003 + tree addxorrol, temp;
82004 +
82005 + // 1. create temporary copy of latent_entropy
82006 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
82007 + add_referenced_var(temp);
82008 + mark_sym_for_renaming(temp);
82009 +
82010 + // 2. read...
82011 + assign = gimple_build_assign(temp, latent_entropy_decl);
82012 + find_referenced_vars_in(assign);
82013 + gsi = gsi_after_labels(bb);
82014 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
82015 + update_stmt(assign);
82016 +
82017 + // 3. ...modify...
82018 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
82019 + assign = gimple_build_assign(temp, addxorrol);
82020 + find_referenced_vars_in(assign);
82021 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
82022 + update_stmt(assign);
82023 +
82024 + // 4. ...write latent_entropy
82025 + assign = gimple_build_assign(latent_entropy_decl, temp);
82026 + find_referenced_vars_in(assign);
82027 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
82028 + update_stmt(assign);
82029 +}
82030 +
82031 +static unsigned int execute_latent_entropy(void)
82032 +{
82033 + basic_block bb;
82034 + gimple assign;
82035 + gimple_stmt_iterator gsi;
82036 + tree local_entropy;
82037 +
82038 + if (!latent_entropy_decl) {
82039 + struct varpool_node *node;
82040 +
82041 + for (node = varpool_nodes; node; node = node->next) {
82042 + tree var = node->decl;
82043 + if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
82044 + continue;
82045 + latent_entropy_decl = var;
82046 +// debug_tree(var);
82047 + break;
82048 + }
82049 + if (!latent_entropy_decl) {
82050 +// debug_tree(current_function_decl);
82051 + return 0;
82052 + }
82053 + }
82054 +
82055 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
82056 +
82057 + // 1. create local entropy variable
82058 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
82059 + add_referenced_var(local_entropy);
82060 + mark_sym_for_renaming(local_entropy);
82061 +
82062 + // 2. initialize local entropy variable
82063 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
82064 + if (dom_info_available_p(CDI_DOMINATORS))
82065 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
82066 + gsi = gsi_start_bb(bb);
82067 +
82068 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
82069 +// gimple_set_location(assign, loc);
82070 + find_referenced_vars_in(assign);
82071 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
82072 + update_stmt(assign);
82073 + bb = bb->next_bb;
82074 +
82075 + // 3. instrument each BB with an operation on the local entropy variable
82076 + while (bb != EXIT_BLOCK_PTR) {
82077 + perturb_local_entropy(bb, local_entropy);
82078 + bb = bb->next_bb;
82079 + };
82080 +
82081 + // 4. mix local entropy into the global entropy variable
82082 + perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
82083 + return 0;
82084 +}
82085 +
82086 +static void start_unit_callback(void *gcc_data, void *user_data)
82087 +{
82088 +#if BUILDING_GCC_VERSION >= 4007
82089 + seed = get_random_seed(false);
82090 +#else
82091 + sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
82092 + seed *= seed;
82093 +#endif
82094 +
82095 + if (in_lto_p)
82096 + return;
82097 +
82098 + // extern u64 latent_entropy
82099 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
82100 +
82101 + TREE_STATIC(latent_entropy_decl) = 1;
82102 + TREE_PUBLIC(latent_entropy_decl) = 1;
82103 + TREE_USED(latent_entropy_decl) = 1;
82104 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
82105 + DECL_EXTERNAL(latent_entropy_decl) = 1;
82106 + DECL_ARTIFICIAL(latent_entropy_decl) = 0;
82107 + DECL_INITIAL(latent_entropy_decl) = NULL;
82108 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
82109 +// varpool_finalize_decl(latent_entropy_decl);
82110 +// varpool_mark_needed_node(latent_entropy_decl);
82111 +}
82112 +
82113 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
82114 +{
82115 + const char * const plugin_name = plugin_info->base_name;
82116 + struct register_pass_info latent_entropy_pass_info = {
82117 + .pass = &latent_entropy_pass.pass,
82118 + .reference_pass_name = "optimized",
82119 + .ref_pass_instance_number = 1,
82120 + .pos_op = PASS_POS_INSERT_BEFORE
82121 + };
82122 +
82123 + if (!plugin_default_version_check(version, &gcc_version)) {
82124 + error(G_("incompatible gcc/plugin versions"));
82125 + return 1;
82126 + }
82127 +
82128 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
82129 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
82130 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
82131 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
82132 +
82133 + return 0;
82134 +}
82135 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
82136 new file mode 100644
82137 index 0000000..daaa86c
82138 --- /dev/null
82139 +++ b/tools/gcc/size_overflow_hash.data
82140 @@ -0,0 +1,2486 @@
82141 +_000001_hash alloc_dr 2 65495 _000001_hash NULL
82142 +_000002_hash __copy_from_user 3 10918 _000002_hash NULL
82143 +_000003_hash copy_from_user 3 17559 _000003_hash NULL
82144 +_000004_hash __copy_from_user_inatomic 3 4365 _000004_hash NULL
82145 +_000005_hash __copy_from_user_nocache 3 39351 _000005_hash NULL
82146 +_000006_hash __copy_to_user_inatomic 3 19214 _000006_hash NULL
82147 +_000007_hash do_xip_mapping_read 5 60297 _000007_hash NULL
82148 +_000008_hash hugetlbfs_read 3 11268 _000008_hash NULL
82149 +_000009_hash kmalloc 1 60432 _002597_hash NULL nohasharray
82150 +_000010_hash kmalloc_array 1-2 9444 _000010_hash NULL
82151 +_000012_hash kmalloc_slab 1 11917 _000012_hash NULL
82152 +_000013_hash kmemdup 2 64015 _000013_hash NULL
82153 +_000014_hash __krealloc 2 14857 _000331_hash NULL nohasharray
82154 +_000015_hash memdup_user 2 59590 _000015_hash NULL
82155 +_000016_hash module_alloc 1 63630 _000016_hash NULL
82156 +_000017_hash read_default_ldt 2 14302 _000017_hash NULL
82157 +_000018_hash read_kcore 3 63488 _000018_hash NULL
82158 +_000019_hash read_ldt 2 47570 _000019_hash NULL
82159 +_000020_hash read_zero 3 19366 _000020_hash NULL
82160 +_000021_hash __vmalloc_node 1 39308 _000021_hash NULL
82161 +_000022_hash vm_map_ram 2 23078 _001054_hash NULL nohasharray
82162 +_000023_hash aa_simple_write_to_buffer 4-3 49683 _000023_hash NULL
82163 +_000024_hash ablkcipher_copy_iv 3 64140 _000024_hash NULL
82164 +_000025_hash ablkcipher_next_slow 4 47274 _000025_hash NULL
82165 +_000026_hash acpi_battery_write_alarm 3 1240 _000026_hash NULL
82166 +_000027_hash acpi_os_allocate 1 14892 _000027_hash NULL
82167 +_000028_hash acpi_system_write_wakeup_device 3 34853 _000028_hash NULL
82168 +_000029_hash adu_write 3 30487 _000029_hash NULL
82169 +_000030_hash aer_inject_write 3 52399 _000030_hash NULL
82170 +_000031_hash afs_alloc_flat_call 2-3 36399 _000031_hash NULL
82171 +_000033_hash afs_proc_cells_write 3 61139 _000033_hash NULL
82172 +_000034_hash afs_proc_rootcell_write 3 15822 _000034_hash NULL
82173 +_000035_hash agp_3_5_isochronous_node_enable 3 49465 _000035_hash NULL
82174 +_000036_hash agp_alloc_page_array 1 22554 _000036_hash NULL
82175 +_000037_hash ah_alloc_tmp 2 54378 _000037_hash NULL
82176 +_000038_hash ahash_setkey_unaligned 3 33521 _000038_hash NULL
82177 +_000039_hash alg_setkey 3 31485 _000039_hash NULL
82178 +_000040_hash aligned_kmalloc 1 3628 _000040_hash NULL
82179 +_000041_hash alloc_context 1 3194 _000041_hash NULL
82180 +_000042_hash alloc_ep_req 2 54860 _000042_hash NULL
82181 +_000043_hash alloc_fdmem 1 27083 _000043_hash NULL
82182 +_000044_hash alloc_flex_gd 1 57259 _000044_hash NULL
82183 +_000045_hash alloc_sglist 1-3-2 22960 _000045_hash NULL
82184 +_000046_hash aoedev_flush 2 44398 _000046_hash NULL
82185 +_000047_hash append_to_buffer 3 63550 _000047_hash NULL
82186 +_000048_hash asix_read_cmd 5 13245 _000048_hash NULL
82187 +_000049_hash asix_write_cmd 5 58192 _000049_hash NULL
82188 +_000050_hash asn1_octets_decode 2 9991 _000050_hash NULL
82189 +_000051_hash asn1_oid_decode 2 4999 _000051_hash NULL
82190 +_000052_hash at76_set_card_command 4 4471 _000052_hash NULL
82191 +_000053_hash ath6kl_add_bss_if_needed 6 24317 _000053_hash NULL
82192 +_000054_hash ath6kl_debug_roam_tbl_event 3 5224 _000054_hash NULL
82193 +_000055_hash ath6kl_mgmt_powersave_ap 6 13791 _000055_hash NULL
82194 +_000056_hash ath6kl_send_go_probe_resp 3 21113 _000056_hash NULL
82195 +_000057_hash ath6kl_set_ap_probe_resp_ies 3 50539 _000057_hash NULL
82196 +_000058_hash ath6kl_set_assoc_req_ies 3 43185 _000058_hash NULL
82197 +_000059_hash ath6kl_wmi_bssinfo_event_rx 3 2275 _000059_hash NULL
82198 +_000060_hash ath6kl_wmi_send_action_cmd 7 58860 _000060_hash NULL
82199 +_000061_hash __ath6kl_wmi_send_mgmt_cmd 7 38971 _000061_hash NULL
82200 +_000062_hash attach_hdlc_protocol 3 19986 _000062_hash NULL
82201 +_000063_hash audio_write 4 54261 _001597_hash NULL nohasharray
82202 +_000064_hash audit_unpack_string 3 13748 _000064_hash NULL
82203 +_000065_hash av7110_vbi_write 3 34384 _000065_hash NULL
82204 +_000066_hash ax25_setsockopt 5 42740 _000066_hash NULL
82205 +_000067_hash b43_debugfs_write 3 34838 _000067_hash NULL
82206 +_000068_hash b43legacy_debugfs_write 3 28556 _000068_hash NULL
82207 +_000069_hash bch_alloc 1 4593 _000069_hash NULL
82208 +_000070_hash befs_nls2utf 3 17163 _000070_hash NULL
82209 +_000071_hash befs_utf2nls 3 25628 _000071_hash NULL
82210 +_000072_hash bfad_debugfs_write_regrd 3 15218 _000072_hash NULL
82211 +_000073_hash bfad_debugfs_write_regwr 3 61841 _000073_hash NULL
82212 +_000074_hash bio_alloc_map_data 1-2 50782 _000074_hash NULL
82213 +_000076_hash bio_kmalloc 2 54672 _000076_hash NULL
82214 +_000077_hash blkcipher_copy_iv 3 24075 _000077_hash NULL
82215 +_000078_hash blkcipher_next_slow 4 52733 _000078_hash NULL
82216 +_000079_hash bl_pipe_downcall 3 34264 _000079_hash NULL
82217 +_000080_hash bnad_debugfs_write_regrd 3 6706 _000080_hash NULL
82218 +_000081_hash bnad_debugfs_write_regwr 3 57500 _000081_hash NULL
82219 +_000082_hash bnx2fc_cmd_mgr_alloc 2-3 24873 _000082_hash NULL
82220 +_000084_hash bnx2_nvram_write 4 7790 _000084_hash NULL
82221 +_000085_hash brcmf_sdbrcm_downloadvars 3 42064 _000085_hash NULL
82222 +_000086_hash btmrvl_gpiogap_write 3 35053 _000086_hash NULL
82223 +_000087_hash btmrvl_hscfgcmd_write 3 27143 _000087_hash NULL
82224 +_000088_hash btmrvl_hscmd_write 3 27089 _000088_hash NULL
82225 +_000089_hash btmrvl_hsmode_write 3 42252 _000089_hash NULL
82226 +_000090_hash btmrvl_pscmd_write 3 29504 _000090_hash NULL
82227 +_000091_hash btmrvl_psmode_write 3 3703 _000091_hash NULL
82228 +_000092_hash btrfs_alloc_delayed_item 1 11678 _000092_hash NULL
82229 +_000093_hash cache_do_downcall 3 6926 _000093_hash NULL
82230 +_000094_hash cachefiles_cook_key 2 33274 _000094_hash NULL
82231 +_000095_hash cachefiles_daemon_write 3 43535 _000095_hash NULL
82232 +_000096_hash capi_write 3 35104 _000096_hash NULL
82233 +_000097_hash carl9170_debugfs_write 3 50857 _000097_hash NULL
82234 +_000098_hash cciss_allocate_sg_chain_blocks 2-3 5368 _000098_hash NULL
82235 +_000100_hash cciss_proc_write 3 10259 _000100_hash NULL
82236 +_000101_hash cdrom_read_cdda_old 4 27664 _000101_hash NULL
82237 +_000102_hash ceph_alloc_page_vector 1 18710 _000102_hash NULL
82238 +_000103_hash ceph_buffer_new 1 35974 _000103_hash NULL
82239 +_000104_hash ceph_copy_user_to_page_vector 4 656 _000104_hash NULL
82240 +_000105_hash ceph_get_direct_page_vector 2 41917 _000105_hash NULL
82241 +_000106_hash ceph_msg_new 2 5846 _000106_hash NULL
82242 +_000107_hash ceph_setxattr 4 18913 _000107_hash NULL
82243 +_000108_hash cfi_read_pri 3 24366 _000108_hash NULL
82244 +_000109_hash cgroup_write_string 5 10900 _000109_hash NULL
82245 +_000110_hash cgroup_write_X64 5 54514 _000110_hash NULL
82246 +_000111_hash change_xattr 5 61390 _000111_hash NULL
82247 +_000112_hash check_load_and_stores 2 2143 _000112_hash NULL
82248 +_000113_hash cifs_idmap_key_instantiate 3 54503 _000113_hash NULL
82249 +_000114_hash cifs_security_flags_proc_write 3 5484 _000114_hash NULL
82250 +_000115_hash cifs_setxattr 4 23957 _000115_hash NULL
82251 +_000116_hash cifs_spnego_key_instantiate 3 23588 _000116_hash NULL
82252 +_000117_hash ci_ll_write 4 3740 _000117_hash NULL
82253 +_000118_hash cld_pipe_downcall 3 15058 _000118_hash NULL
82254 +_000119_hash clear_refs_write 3 61904 _000119_hash NULL
82255 +_000120_hash clusterip_proc_write 3 44729 _000120_hash NULL
82256 +_000121_hash cm4040_write 3 58079 _000121_hash NULL
82257 +_000122_hash cm_copy_private_data 2 3649 _000122_hash NULL
82258 +_000123_hash cmm_write 3 2896 _000123_hash NULL
82259 +_000124_hash cm_write 3 36858 _000124_hash NULL
82260 +_000125_hash coda_psdev_write 3 1711 _000125_hash NULL
82261 +_000126_hash codec_reg_read_file 3 36280 _000126_hash NULL
82262 +_000127_hash command_file_write 3 31318 _000127_hash NULL
82263 +_000128_hash command_write 3 58841 _000128_hash NULL
82264 +_000129_hash comm_write 3 44537 _001532_hash NULL nohasharray
82265 +_000130_hash concat_writev 3 21451 _000130_hash NULL
82266 +_000131_hash copy_and_check 3 19089 _000131_hash NULL
82267 +_000132_hash copy_from_user_toio 3 31966 _000132_hash NULL
82268 +_000133_hash copy_items 6 50140 _000133_hash NULL
82269 +_000134_hash copy_macs 4 45534 _000134_hash NULL
82270 +_000135_hash __copy_to_user 3 17551 _000135_hash NULL
82271 +_000136_hash copy_vm86_regs_from_user 3 45340 _000136_hash NULL
82272 +_000137_hash cosa_write 3 1774 _000137_hash NULL
82273 +_000138_hash create_entry 2 33479 _000138_hash NULL
82274 +_000139_hash create_queues 2-3 9088 _000139_hash NULL
82275 +_000141_hash create_xattr 5 54106 _000141_hash NULL
82276 +_000142_hash create_xattr_datum 5 33356 _000142_hash NULL
82277 +_000143_hash csum_partial_copy_fromiovecend 3-4 9957 _000143_hash NULL
82278 +_000145_hash ctrl_out 3-5 8712 _000145_hash NULL
82279 +_000147_hash cx24116_writeregN 4 41975 _000147_hash NULL
82280 +_000148_hash cxacru_cm_get_array 4 4412 _000148_hash NULL
82281 +_000149_hash cxgbi_alloc_big_mem 1 4707 _000149_hash NULL
82282 +_000150_hash dac960_user_command_proc_write 3 3071 _000150_hash NULL
82283 +_000151_hash datablob_format 2 39571 _002156_hash NULL nohasharray
82284 +_000152_hash dccp_feat_clone_sp_val 3 11942 _000152_hash NULL
82285 +_000153_hash dccp_setsockopt_ccid 4 30701 _000153_hash NULL
82286 +_000154_hash dccp_setsockopt_cscov 2 37766 _000154_hash NULL
82287 +_000155_hash dccp_setsockopt_service 4 65336 _000155_hash NULL
82288 +_000156_hash ddb_output_write 3 31902 _000156_hash NULL
82289 +_000157_hash ddebug_proc_write 3 18055 _000157_hash NULL
82290 +_000158_hash dev_config 3 8506 _000158_hash NULL
82291 +_000159_hash device_write 3 45156 _000159_hash NULL
82292 +_000160_hash devm_kzalloc 2 4966 _000160_hash NULL
82293 +_000161_hash devres_alloc 2 551 _000161_hash NULL
82294 +_000162_hash dfs_file_write 3 41196 _000162_hash NULL
82295 +_000163_hash direct_entry 3 38836 _000163_hash NULL
82296 +_000164_hash dispatch_proc_write 3 44320 _000164_hash NULL
82297 +_000165_hash diva_os_copy_from_user 4 7792 _000165_hash NULL
82298 +_000166_hash dlm_alloc_pagevec 1 54296 _000166_hash NULL
82299 +_000167_hash dlmfs_file_read 3 28385 _000167_hash NULL
82300 +_000168_hash dlmfs_file_write 3 6892 _000168_hash NULL
82301 +_000169_hash dm_read 3 15674 _000169_hash NULL
82302 +_000170_hash dm_write 3 2513 _000170_hash NULL
82303 +_000171_hash __dn_setsockopt 5 13060 _000171_hash NULL
82304 +_000172_hash dns_query 3 9676 _000172_hash NULL
82305 +_000173_hash dns_resolver_instantiate 3 63314 _000173_hash NULL
82306 +_000174_hash do_add_counters 3 3992 _000174_hash NULL
82307 +_000175_hash __do_config_autodelink 3 58763 _000175_hash NULL
82308 +_000176_hash do_ip_setsockopt 5 41852 _000176_hash NULL
82309 +_000177_hash do_ipv6_setsockopt 5 18215 _000177_hash NULL
82310 +_000178_hash do_ip_vs_set_ctl 4 48641 _000178_hash NULL
82311 +_000179_hash do_kimage_alloc 3 64827 _000179_hash NULL
82312 +_000180_hash do_register_entry 4 29478 _000180_hash NULL
82313 +_000181_hash do_tty_write 5 44896 _000181_hash NULL
82314 +_000182_hash do_update_counters 4 2259 _000182_hash NULL
82315 +_000183_hash dsp_write 2 46218 _000183_hash NULL
82316 +_000184_hash dup_to_netobj 3 26363 _000184_hash NULL
82317 +_000185_hash dvb_aplay 3 56296 _000185_hash NULL
82318 +_000186_hash dvb_ca_en50221_io_write 3 43533 _000186_hash NULL
82319 +_000187_hash dvbdmx_write 3 19423 _000187_hash NULL
82320 +_000188_hash dvb_play 3 50814 _000188_hash NULL
82321 +_000189_hash dw210x_op_rw 6 39915 _000189_hash NULL
82322 +_000190_hash dwc3_link_state_write 3 12641 _000190_hash NULL
82323 +_000191_hash dwc3_mode_write 3 51997 _000191_hash NULL
82324 +_000192_hash dwc3_testmode_write 3 30516 _000192_hash NULL
82325 +_000193_hash ecryptfs_copy_filename 4 11868 _000193_hash NULL
82326 +_000194_hash ecryptfs_miscdev_write 3 26847 _000194_hash NULL
82327 +_000195_hash ecryptfs_send_miscdev 2 64816 _000195_hash NULL
82328 +_000196_hash efx_tsoh_heap_alloc 2 58545 _000196_hash NULL
82329 +_000197_hash emi26_writememory 4 57908 _000197_hash NULL
82330 +_000198_hash emi62_writememory 4 29731 _000198_hash NULL
82331 +_000199_hash encrypted_instantiate 3 3168 _000199_hash NULL
82332 +_000200_hash encrypted_update 3 13414 _000200_hash NULL
82333 +_000201_hash ep0_write 3 14536 _001328_hash NULL nohasharray
82334 +_000202_hash ep_read 3 58813 _000202_hash NULL
82335 +_000203_hash ep_write 3 59008 _000203_hash NULL
82336 +_000204_hash erst_dbg_write 3 46715 _000204_hash NULL
82337 +_000205_hash esp_alloc_tmp 2 40558 _000205_hash NULL
82338 +_000206_hash exofs_read_lookup_dev_table 3 17733 _000206_hash NULL
82339 +_000207_hash ext4_kvmalloc 1 14796 _000207_hash NULL
82340 +_000208_hash ezusb_writememory 4 45976 _000208_hash NULL
82341 +_000209_hash fanotify_write 3 64623 _000209_hash NULL
82342 +_000210_hash fd_copyin 3 56247 _000210_hash NULL
82343 +_000211_hash ffs_epfile_io 3 64886 _000211_hash NULL
82344 +_000212_hash ffs_prepare_buffer 2 59892 _000212_hash NULL
82345 +_000213_hash f_hidg_write 3 7932 _000213_hash NULL
82346 +_000214_hash file_read_actor 4 1401 _000214_hash NULL
82347 +_000215_hash fill_write_buffer 3 3142 _000215_hash NULL
82348 +_000216_hash fl_create 5 56435 _000216_hash NULL
82349 +_000217_hash ftdi_elan_write 3 57309 _000217_hash NULL
82350 +_000218_hash fuse_conn_limit_write 3 30777 _000218_hash NULL
82351 +_000219_hash fw_iso_buffer_init 3 54582 _000219_hash NULL
82352 +_000220_hash garmin_write_bulk 3 58191 _000220_hash NULL
82353 +_000221_hash garp_attr_create 3 3883 _000221_hash NULL
82354 +_000222_hash get_arg 3 5694 _000222_hash NULL
82355 +_000223_hash getdqbuf 1 62908 _000223_hash NULL
82356 +_000224_hash get_fdb_entries 3 41916 _000224_hash NULL
82357 +_000225_hash get_indirect_ea 4 51869 _000225_hash NULL
82358 +_000226_hash get_registers 3 26187 _000226_hash NULL
82359 +_000227_hash get_scq 2 10897 _000227_hash NULL
82360 +_000228_hash get_server_iovec 2 16804 _000228_hash NULL
82361 +_000229_hash get_ucode_user 3 38202 _000229_hash NULL
82362 +_000230_hash get_user_cpu_mask 2 14861 _000230_hash NULL
82363 +_000231_hash gfs2_alloc_sort_buffer 1 18275 _000231_hash NULL
82364 +_000232_hash gfs2_glock_nq_m 1 20347 _000232_hash NULL
82365 +_000233_hash gigaset_initcs 2 43753 _000233_hash NULL
82366 +_000234_hash gigaset_initdriver 2 1060 _000234_hash NULL
82367 +_000235_hash gs_alloc_req 2 58883 _000235_hash NULL
82368 +_000236_hash gs_buf_alloc 2 25067 _000236_hash NULL
82369 +_000237_hash gsm_data_alloc 3 42437 _000237_hash NULL
82370 +_000238_hash gss_pipe_downcall 3 23182 _000238_hash NULL
82371 +_000239_hash handle_request 9 10024 _000239_hash NULL
82372 +_000240_hash hash_new 1 62224 _000240_hash NULL
82373 +_000241_hash hashtab_create 3 33769 _000241_hash NULL
82374 +_000242_hash hcd_buffer_alloc 2 27495 _000242_hash NULL
82375 +_000243_hash hci_sock_setsockopt 5 28993 _000243_hash NULL
82376 +_000244_hash heap_init 2 49617 _000244_hash NULL
82377 +_000245_hash hest_ghes_dev_register 1 46766 _000245_hash NULL
82378 +_000246_hash hidraw_get_report 3 45609 _000246_hash NULL
82379 +_000247_hash hidraw_report_event 3 49578 _000509_hash NULL nohasharray
82380 +_000248_hash hidraw_send_report 3 23449 _000248_hash NULL
82381 +_000249_hash hpfs_translate_name 3 41497 _000249_hash NULL
82382 +_000250_hash hysdn_conf_write 3 52145 _000250_hash NULL
82383 +_000251_hash hysdn_log_write 3 48694 _000251_hash NULL
82384 +_000252_hash __i2400mu_send_barker 3 23652 _000252_hash NULL
82385 +_000253_hash i2cdev_read 3 1206 _000253_hash NULL
82386 +_000254_hash i2cdev_write 3 23310 _000254_hash NULL
82387 +_000255_hash i2o_parm_field_get 5 34477 _000255_hash NULL
82388 +_000256_hash i2o_parm_table_get 6 61635 _000256_hash NULL
82389 +_000257_hash ib_copy_from_udata 3 59502 _000257_hash NULL
82390 +_000258_hash ib_ucm_alloc_data 3 36885 _000258_hash NULL
82391 +_000259_hash ib_umad_write 3 47993 _000259_hash NULL
82392 +_000260_hash ib_uverbs_unmarshall_recv 5 12251 _000260_hash NULL
82393 +_000261_hash icn_writecmd 2 38629 _000261_hash NULL
82394 +_000262_hash ide_driver_proc_write 3 32493 _000262_hash NULL
82395 +_000263_hash ide_settings_proc_write 3 35110 _000263_hash NULL
82396 +_000264_hash idetape_chrdev_write 3 53976 _000264_hash NULL
82397 +_000265_hash idmap_pipe_downcall 3 14591 _000265_hash NULL
82398 +_000266_hash ieee80211_build_probe_req 7 27660 _000266_hash NULL
82399 +_000267_hash ieee80211_if_write 3 34894 _000267_hash NULL
82400 +_000268_hash if_write 3 51756 _000268_hash NULL
82401 +_000269_hash ilo_write 3 64378 _000269_hash NULL
82402 +_000270_hash ima_write_policy 3 40548 _000270_hash NULL
82403 +_000271_hash init_data_container 1 60709 _000271_hash NULL
82404 +_000272_hash init_send_hfcd 1 34586 _000272_hash NULL
82405 +_000273_hash insert_dent 7 65034 _000273_hash NULL
82406 +_000274_hash interpret_user_input 2 19393 _000274_hash NULL
82407 +_000275_hash int_proc_write 3 39542 _000275_hash NULL
82408 +_000276_hash ioctl_private_iw_point 7 1273 _000276_hash NULL
82409 +_000277_hash iov_iter_copy_from_user 4 31942 _000277_hash NULL
82410 +_000278_hash iov_iter_copy_from_user_atomic 4 56368 _000278_hash NULL
82411 +_000279_hash iowarrior_write 3 18604 _000279_hash NULL
82412 +_000280_hash ipc_alloc 1 1192 _000280_hash NULL
82413 +_000281_hash ipc_rcu_alloc 1 21208 _000281_hash NULL
82414 +_000282_hash ip_options_get_from_user 4 64958 _000282_hash NULL
82415 +_000283_hash ipv6_renew_option 3 38813 _000283_hash NULL
82416 +_000284_hash ip_vs_conn_fill_param_sync 6 29771 _002404_hash NULL nohasharray
82417 +_000285_hash ip_vs_create_timeout_table 2 64478 _000285_hash NULL
82418 +_000286_hash ipw_queue_tx_init 3 49161 _000286_hash NULL
82419 +_000287_hash irda_setsockopt 5 19824 _000287_hash NULL
82420 +_000288_hash irias_new_octseq_value 2 13596 _000288_hash NULL
82421 +_000289_hash ir_lirc_transmit_ir 3 64403 _000289_hash NULL
82422 +_000290_hash irnet_ctrl_write 3 24139 _000290_hash NULL
82423 +_000291_hash isdn_add_channels 3 40905 _000291_hash NULL
82424 +_000292_hash isdn_ppp_fill_rq 2 41428 _000292_hash NULL
82425 +_000293_hash isdn_ppp_write 4 29109 _000293_hash NULL
82426 +_000294_hash isdn_read 3 50021 _000294_hash NULL
82427 +_000295_hash isdn_v110_open 3 2418 _000295_hash NULL
82428 +_000296_hash isdn_writebuf_stub 4 52383 _000296_hash NULL
82429 +_000297_hash islpci_mgt_transmit 5 34133 _000297_hash NULL
82430 +_000298_hash iso_callback 3 43208 _000298_hash NULL
82431 +_000299_hash iso_packets_buffer_init 3 29061 _000299_hash NULL
82432 +_000300_hash it821x_firmware_command 3 8628 _000300_hash NULL
82433 +_000301_hash ivtv_buf_copy_from_user 4 25502 _000301_hash NULL
82434 +_000302_hash iwch_alloc_fastreg_pbl 2 40153 _000302_hash NULL
82435 +_000303_hash iwl_calib_set 3 34400 _002188_hash NULL nohasharray
82436 +_000304_hash jbd2_journal_init_revoke_table 1 36336 _000304_hash NULL
82437 +_000305_hash jffs2_alloc_full_dirent 1 60179 _001111_hash NULL nohasharray
82438 +_000306_hash journal_init_revoke_table 1 56331 _000306_hash NULL
82439 +_000307_hash kcalloc 1-2 27770 _000307_hash NULL
82440 +_000309_hash keyctl_instantiate_key_common 4 47889 _000309_hash NULL
82441 +_000310_hash keyctl_update_key 3 26061 _000310_hash NULL
82442 +_000311_hash __kfifo_alloc 2-3 22173 _000311_hash NULL
82443 +_000313_hash kfifo_copy_from_user 3 5091 _000313_hash NULL
82444 +_000314_hash kmalloc_node 1 50163 _000314_hash NULL
82445 +_000315_hash kmalloc_parameter 1 65279 _000315_hash NULL
82446 +_000316_hash kmem_alloc 1 31920 _000316_hash NULL
82447 +_000317_hash kobj_map 2-3 9566 _000317_hash NULL
82448 +_000319_hash kone_receive 4 4690 _000319_hash NULL
82449 +_000320_hash kone_send 4 63435 _000320_hash NULL
82450 +_000321_hash krealloc 2 14908 _000321_hash NULL
82451 +_000322_hash kvmalloc 1 32646 _000322_hash NULL
82452 +_000323_hash kvm_read_guest_atomic 4 10765 _000323_hash NULL
82453 +_000324_hash kvm_read_guest_cached 4 39666 _000324_hash NULL
82454 +_000325_hash kvm_read_guest_page 5 18074 _000325_hash NULL
82455 +_000326_hash kzalloc 1 54740 _000326_hash NULL
82456 +_000327_hash l2cap_sock_setsockopt 5 50207 _000327_hash NULL
82457 +_000328_hash l2cap_sock_setsockopt_old 4 29346 _000328_hash NULL
82458 +_000329_hash lane2_associate_req 4 45398 _000329_hash NULL
82459 +_000330_hash lbs_debugfs_write 3 48413 _000330_hash NULL
82460 +_000331_hash lcd_write 3 14857 _000331_hash &_000014_hash
82461 +_000332_hash ldm_frag_add 2 5611 _000332_hash NULL
82462 +_000333_hash __lgread 4 31668 _000333_hash NULL
82463 +_000334_hash libipw_alloc_txb 1 27579 _000334_hash NULL
82464 +_000335_hash link_send_sections_long 4 46556 _000335_hash NULL
82465 +_000336_hash listxattr 3 12769 _000336_hash NULL
82466 +_000337_hash LoadBitmap 2 19658 _000337_hash NULL
82467 +_000338_hash load_msg 2 95 _000338_hash NULL
82468 +_000339_hash lpfc_debugfs_dif_err_write 3 17424 _000339_hash NULL
82469 +_000340_hash lp_write 3 9511 _000340_hash NULL
82470 +_000341_hash mb_cache_create 2 17307 _000341_hash NULL
82471 +_000342_hash mce_write 3 26201 _000342_hash NULL
82472 +_000343_hash mcs7830_get_reg 3 33308 _000343_hash NULL
82473 +_000344_hash mcs7830_set_reg 3 31413 _000344_hash NULL
82474 +_000345_hash memcpy_fromiovec 3 55247 _000345_hash NULL
82475 +_000346_hash memcpy_fromiovecend 3-4 2707 _000346_hash NULL
82476 +_000348_hash mempool_kmalloc 2 53831 _000348_hash NULL
82477 +_000349_hash mempool_resize 2 47983 _001821_hash NULL nohasharray
82478 +_000350_hash mem_rw 3 22085 _000350_hash NULL
82479 +_000351_hash mgmt_control 3 7349 _000351_hash NULL
82480 +_000352_hash mgmt_pending_add 5 46976 _000352_hash NULL
82481 +_000353_hash mlx4_ib_alloc_fast_reg_page_list 2 46119 _000353_hash NULL
82482 +_000354_hash mmc_alloc_sg 1 21504 _000354_hash NULL
82483 +_000355_hash mmc_send_bus_test 4 18285 _000355_hash NULL
82484 +_000356_hash mmc_send_cxd_data 5 38655 _000356_hash NULL
82485 +_000357_hash module_alloc_update_bounds 1 47205 _000357_hash NULL
82486 +_000358_hash move_addr_to_kernel 2 32673 _000358_hash NULL
82487 +_000359_hash mpi_alloc_limb_space 1 23190 _000359_hash NULL
82488 +_000360_hash mpi_resize 2 44674 _000360_hash NULL
82489 +_000361_hash mptctl_getiocinfo 2 28545 _000361_hash NULL
82490 +_000362_hash mtdchar_readoob 4 31200 _000362_hash NULL
82491 +_000363_hash mtdchar_write 3 56831 _000363_hash NULL
82492 +_000364_hash mtdchar_writeoob 4 3393 _000364_hash NULL
82493 +_000365_hash mtd_device_parse_register 5 5024 _000365_hash NULL
82494 +_000366_hash mtf_test_write 3 18844 _000366_hash NULL
82495 +_000367_hash mtrr_write 3 59622 _000367_hash NULL
82496 +_000368_hash musb_test_mode_write 3 33518 _000368_hash NULL
82497 +_000369_hash mwifiex_get_common_rates 3 17131 _000369_hash NULL
82498 +_000370_hash mwifiex_update_curr_bss_params 5 16908 _000370_hash NULL
82499 +_000371_hash nand_bch_init 2-3 16280 _001341_hash NULL nohasharray
82500 +_000373_hash ncp_file_write 3 3813 _000373_hash NULL
82501 +_000374_hash ncp__vol2io 5 4804 _000374_hash NULL
82502 +_000375_hash nes_alloc_fast_reg_page_list 2 33523 _000375_hash NULL
82503 +_000376_hash nfc_targets_found 3 29886 _000376_hash NULL
82504 +_000377_hash nfs4_acl_new 1 49806 _000377_hash NULL
82505 +_000378_hash nfs4_write_cached_acl 4 15070 _000378_hash NULL
82506 +_000379_hash nfsd_cache_update 3 59574 _000379_hash NULL
82507 +_000380_hash nfsd_symlink 6 63442 _000380_hash NULL
82508 +_000381_hash nfs_idmap_get_desc 2-4 42990 _000381_hash NULL
82509 +_000383_hash nfs_readdir_make_qstr 3 12509 _000383_hash NULL
82510 +_000384_hash note_last_dentry 3 12285 _000384_hash NULL
82511 +_000385_hash ntfs_copy_from_user 3-5 15072 _000385_hash NULL
82512 +_000387_hash __ntfs_copy_from_user_iovec_inatomic 3-4 38153 _000387_hash NULL
82513 +_000389_hash ntfs_ucstonls 3 23097 _000389_hash NULL
82514 +_000390_hash nvme_alloc_iod 1 56027 _000390_hash NULL
82515 +_000391_hash nvram_write 3 3894 _000391_hash NULL
82516 +_000392_hash o2hb_debug_create 4 18744 _000392_hash NULL
82517 +_000393_hash o2net_send_message_vec 4 879 _001792_hash NULL nohasharray
82518 +_000394_hash ocfs2_control_cfu 2 37750 _000394_hash NULL
82519 +_000395_hash oom_adjust_write 3 41116 _000395_hash NULL
82520 +_000396_hash oom_score_adj_write 3 42594 _000396_hash NULL
82521 +_000397_hash opera1_xilinx_rw 5 31453 _000397_hash NULL
82522 +_000398_hash oprofilefs_ulong_from_user 3 57251 _000398_hash NULL
82523 +_000399_hash opticon_write 4 60775 _000399_hash NULL
82524 +_000400_hash orig_node_add_if 2 32833 _000400_hash NULL
82525 +_000401_hash orig_node_del_if 2 28371 _000401_hash NULL
82526 +_000402_hash p9_check_zc_errors 4 15534 _000402_hash NULL
82527 +_000403_hash packet_buffer_init 2 1607 _000403_hash NULL
82528 +_000404_hash packet_setsockopt 5 17662 _000404_hash NULL
82529 +_000405_hash parse_command 2 37079 _000405_hash NULL
82530 +_000406_hash pcbit_writecmd 2 12332 _000406_hash NULL
82531 +_000407_hash pcmcia_replace_cis 3 57066 _000407_hash NULL
82532 +_000408_hash pgctrl_write 3 50453 _000408_hash NULL
82533 +_000409_hash pg_write 3 40766 _000409_hash NULL
82534 +_000410_hash pidlist_allocate 1 64404 _000410_hash NULL
82535 +_000411_hash pipe_iov_copy_from_user 3 23102 _000411_hash NULL
82536 +_000412_hash pipe_iov_copy_to_user 3 3447 _000412_hash NULL
82537 +_000413_hash pkt_add 3 39897 _000413_hash NULL
82538 +_000414_hash pktgen_if_write 3 55628 _000414_hash NULL
82539 +_000415_hash platform_device_add_data 3 310 _000415_hash NULL
82540 +_000416_hash platform_device_add_resources 3 13289 _000416_hash NULL
82541 +_000417_hash pm_qos_power_write 3 52513 _000417_hash NULL
82542 +_000418_hash pnpbios_proc_write 3 19758 _000418_hash NULL
82543 +_000419_hash pool_allocate 3 42012 _000419_hash NULL
82544 +_000420_hash posix_acl_alloc 1 48063 _000420_hash NULL
82545 +_000421_hash ppp_cp_parse_cr 4 5214 _000421_hash NULL
82546 +_000422_hash ppp_write 3 34034 _000422_hash NULL
82547 +_000423_hash pp_read 3 33210 _000423_hash NULL
82548 +_000424_hash pp_write 3 39554 _000424_hash NULL
82549 +_000425_hash printer_req_alloc 2 62687 _001807_hash NULL nohasharray
82550 +_000426_hash printer_write 3 60276 _000426_hash NULL
82551 +_000427_hash prism2_set_genericelement 3 29277 _000427_hash NULL
82552 +_000428_hash __probe_kernel_read 3 61119 _000428_hash NULL
82553 +_000429_hash __probe_kernel_write 3 29842 _000429_hash NULL
82554 +_000430_hash proc_coredump_filter_write 3 25625 _000430_hash NULL
82555 +_000431_hash _proc_do_string 2 6376 _000431_hash NULL
82556 +_000432_hash process_vm_rw_pages 5-6 15954 _000432_hash NULL
82557 +_000434_hash proc_loginuid_write 3 63648 _000434_hash NULL
82558 +_000435_hash proc_pid_attr_write 3 63845 _000435_hash NULL
82559 +_000436_hash proc_scsi_devinfo_write 3 32064 _000436_hash NULL
82560 +_000437_hash proc_scsi_write 3 29142 _000437_hash NULL
82561 +_000438_hash proc_scsi_write_proc 3 267 _000438_hash NULL
82562 +_000439_hash pstore_mkfile 5 50830 _000439_hash NULL
82563 +_000440_hash pti_char_write 3 60960 _000440_hash NULL
82564 +_000441_hash ptrace_writedata 4 45021 _000441_hash NULL
82565 +_000442_hash pt_write 3 40159 _000442_hash NULL
82566 +_000443_hash pvr2_ioread_set_sync_key 3 59882 _000443_hash NULL
82567 +_000444_hash pvr2_stream_buffer_count 2 33719 _000444_hash NULL
82568 +_000445_hash qdisc_class_hash_alloc 1 18262 _000445_hash NULL
82569 +_000446_hash r3964_write 4 57662 _000446_hash NULL
82570 +_000447_hash raw_seticmpfilter 3 6888 _000447_hash NULL
82571 +_000448_hash raw_setsockopt 5 45800 _000448_hash NULL
82572 +_000449_hash rawv6_seticmpfilter 5 12137 _000449_hash NULL
82573 +_000450_hash ray_cs_essid_proc_write 3 17875 _000450_hash NULL
82574 +_000451_hash rbd_add 3 16366 _000451_hash NULL
82575 +_000452_hash rbd_snap_add 4 19678 _000452_hash NULL
82576 +_000453_hash rdma_set_ib_paths 3 45592 _000453_hash NULL
82577 +_000454_hash rds_page_copy_user 4 35691 _000454_hash NULL
82578 +_000455_hash read 3 9397 _000455_hash NULL
82579 +_000456_hash read_buf 2 20469 _000456_hash NULL
82580 +_000457_hash read_cis_cache 4 29735 _000457_hash NULL
82581 +_000458_hash realloc_buffer 2 25816 _000458_hash NULL
82582 +_000459_hash realloc_packet_buffer 2 25569 _000459_hash NULL
82583 +_000460_hash receive_DataRequest 3 9904 _000460_hash NULL
82584 +_000461_hash recent_mt_proc_write 3 8206 _000461_hash NULL
82585 +_000462_hash regmap_access_read_file 3 37223 _000462_hash NULL
82586 +_000463_hash regmap_bulk_write 4 59049 _000463_hash NULL
82587 +_000464_hash regmap_map_read_file 3 37685 _000464_hash NULL
82588 +_000465_hash regset_tls_set 4 18459 _000465_hash NULL
82589 +_000466_hash reg_w_buf 3 27724 _000466_hash NULL
82590 +_000467_hash reg_w_ixbuf 4 34736 _000467_hash NULL
82591 +_000468_hash remote_settings_file_write 3 22987 _000468_hash NULL
82592 +_000469_hash request_key_auth_new 3 38092 _000469_hash NULL
82593 +_000470_hash restore_i387_fxsave 2 17528 _000470_hash NULL
82594 +_000471_hash revalidate 2 19043 _000471_hash NULL
82595 +_000472_hash rfcomm_sock_setsockopt 5 18254 _000472_hash NULL
82596 +_000473_hash rndis_add_response 2 58544 _000473_hash NULL
82597 +_000474_hash rndis_set_oid 4 6547 _000474_hash NULL
82598 +_000475_hash rngapi_reset 3 34366 _000475_hash NULL
82599 +_000476_hash roccat_common_receive 4 53407 _000476_hash NULL
82600 +_000477_hash roccat_common_send 4 12284 _000477_hash NULL
82601 +_000478_hash rpc_malloc 2 43573 _000478_hash NULL
82602 +_000479_hash rt2x00debug_write_bbp 3 8212 _000479_hash NULL
82603 +_000480_hash rt2x00debug_write_csr 3 64753 _000480_hash NULL
82604 +_000481_hash rt2x00debug_write_eeprom 3 23091 _000481_hash NULL
82605 +_000482_hash rt2x00debug_write_rf 3 38195 _000482_hash NULL
82606 +_000483_hash rts51x_read_mem 4 26577 _000483_hash NULL
82607 +_000484_hash rts51x_read_status 4 11830 _000484_hash NULL
82608 +_000485_hash rts51x_write_mem 4 17598 _000485_hash NULL
82609 +_000486_hash rw_copy_check_uvector 3 34271 _000486_hash NULL
82610 +_000487_hash rxrpc_request_key 3 27235 _000487_hash NULL
82611 +_000488_hash rxrpc_server_keyring 3 16431 _000488_hash NULL
82612 +_000489_hash savemem 3 58129 _000489_hash NULL
82613 +_000490_hash sb16_copy_from_user 10-7-6 55836 _000490_hash NULL
82614 +_000493_hash sched_autogroup_write 3 10984 _000493_hash NULL
82615 +_000494_hash scsi_mode_select 6 37330 _000494_hash NULL
82616 +_000495_hash scsi_tgt_copy_sense 3 26933 _000495_hash NULL
82617 +_000496_hash sctp_auth_create_key 1 51641 _000496_hash NULL
82618 +_000497_hash sctp_getsockopt_delayed_ack 2 9232 _000497_hash NULL
82619 +_000498_hash sctp_getsockopt_local_addrs 2 25178 _000498_hash NULL
82620 +_000499_hash sctp_make_abort_user 3 29654 _000499_hash NULL
82621 +_000500_hash sctp_setsockopt_active_key 3 43755 _000500_hash NULL
82622 +_000501_hash sctp_setsockopt_adaptation_layer 3 26935 _001925_hash NULL nohasharray
82623 +_000502_hash sctp_setsockopt_associnfo 3 51684 _000502_hash NULL
82624 +_000503_hash sctp_setsockopt_auth_chunk 3 30843 _000503_hash NULL
82625 +_000504_hash sctp_setsockopt_auth_key 3 3793 _000504_hash NULL
82626 +_000505_hash sctp_setsockopt_autoclose 3 5775 _000505_hash NULL
82627 +_000506_hash sctp_setsockopt_bindx 3 49870 _000506_hash NULL
82628 +_000507_hash __sctp_setsockopt_connectx 3 46949 _000507_hash NULL
82629 +_000508_hash sctp_setsockopt_context 3 31091 _000508_hash NULL
82630 +_000509_hash sctp_setsockopt_default_send_param 3 49578 _000509_hash &_000247_hash
82631 +_000510_hash sctp_setsockopt_delayed_ack 3 40129 _000510_hash NULL
82632 +_000511_hash sctp_setsockopt_del_key 3 42304 _002281_hash NULL nohasharray
82633 +_000512_hash sctp_setsockopt_events 3 18862 _000512_hash NULL
82634 +_000513_hash sctp_setsockopt_hmac_ident 3 11687 _000513_hash NULL
82635 +_000514_hash sctp_setsockopt_initmsg 3 1383 _000514_hash NULL
82636 +_000515_hash sctp_setsockopt_maxburst 3 28041 _000515_hash NULL
82637 +_000516_hash sctp_setsockopt_maxseg 3 11829 _000516_hash NULL
82638 +_000517_hash sctp_setsockopt_peer_addr_params 3 734 _000517_hash NULL
82639 +_000518_hash sctp_setsockopt_peer_primary_addr 3 13440 _000518_hash NULL
82640 +_000519_hash sctp_setsockopt_rtoinfo 3 30941 _000519_hash NULL
82641 +_000520_hash security_context_to_sid_core 2 29248 _000520_hash NULL
82642 +_000521_hash sel_commit_bools_write 3 46077 _000521_hash NULL
82643 +_000522_hash sel_write_avc_cache_threshold 3 2256 _000522_hash NULL
82644 +_000523_hash sel_write_bool 3 46996 _000523_hash NULL
82645 +_000524_hash sel_write_checkreqprot 3 60774 _000524_hash NULL
82646 +_000525_hash sel_write_disable 3 10511 _000525_hash NULL
82647 +_000526_hash sel_write_enforce 3 48998 _000526_hash NULL
82648 +_000527_hash sel_write_load 3 63830 _000527_hash NULL
82649 +_000528_hash send_bulk_static_data 3 61932 _000528_hash NULL
82650 +_000529_hash send_control_msg 6 48498 _000529_hash NULL
82651 +_000530_hash set_aoe_iflist 2 42737 _000530_hash NULL
82652 +_000531_hash setkey_unaligned 3 39474 _000531_hash NULL
82653 +_000532_hash set_registers 3 53582 _000532_hash NULL
82654 +_000533_hash setsockopt 5 54539 _000533_hash NULL
82655 +_000534_hash setup_req 3 5848 _000534_hash NULL
82656 +_000535_hash setup_window 7 59178 _000535_hash NULL
82657 +_000536_hash setxattr 4 37006 _000536_hash NULL
82658 +_000537_hash sfq_alloc 1 2861 _000537_hash NULL
82659 +_000538_hash sg_kmalloc 1 50240 _000538_hash NULL
82660 +_000539_hash sgl_map_user_pages 2 30610 _000539_hash NULL
82661 +_000540_hash shash_setkey_unaligned 3 8620 _000540_hash NULL
82662 +_000541_hash shmem_xattr_alloc 2 61190 _000541_hash NULL
82663 +_000542_hash sierra_setup_urb 5 46029 _000542_hash NULL
82664 +_000543_hash simple_transaction_get 3 50633 _000543_hash NULL
82665 +_000544_hash simple_write_to_buffer 2-5 3122 _000544_hash NULL
82666 +_000546_hash sisusb_send_bulk_msg 3 17864 _000546_hash NULL
82667 +_000547_hash skb_add_data 3 48363 _000547_hash NULL
82668 +_000548_hash skb_do_copy_data_nocache 5 12465 _000548_hash NULL
82669 +_000549_hash sl_alloc_bufs 2 50380 _000549_hash NULL
82670 +_000550_hash sl_realloc_bufs 2 64086 _000550_hash NULL
82671 +_000551_hash smk_write_ambient 3 45691 _000551_hash NULL
82672 +_000552_hash smk_write_cipso 3 17989 _000552_hash NULL
82673 +_000553_hash smk_write_direct 3 46363 _000553_hash NULL
82674 +_000554_hash smk_write_doi 3 49621 _000554_hash NULL
82675 +_000555_hash smk_write_load_list 3 52280 _000555_hash NULL
82676 +_000556_hash smk_write_logging 3 2618 _000556_hash NULL
82677 +_000557_hash smk_write_netlbladdr 3 42525 _000557_hash NULL
82678 +_000558_hash smk_write_onlycap 3 14400 _000558_hash NULL
82679 +_000559_hash snd_ctl_elem_user_tlv 3 11695 _000559_hash NULL
82680 +_000560_hash snd_emu10k1_fx8010_read 5 9605 _000560_hash NULL
82681 +_000561_hash snd_emu10k1_synth_copy_from_user 3-5 9061 _000561_hash NULL
82682 +_000563_hash snd_gus_dram_poke 4 18525 _000563_hash NULL
82683 +_000564_hash snd_hdsp_playback_copy 5 20676 _000564_hash NULL
82684 +_000565_hash snd_info_entry_write 3 63474 _000565_hash NULL
82685 +_000566_hash snd_korg1212_copy_from 6 36169 _000566_hash NULL
82686 +_000567_hash snd_mem_proc_write 3 9786 _000567_hash NULL
82687 +_000568_hash snd_midi_channel_init_set 1 30092 _000568_hash NULL
82688 +_000569_hash snd_midi_event_new 1 9893 _000750_hash NULL nohasharray
82689 +_000570_hash snd_opl4_mem_proc_write 5 9670 _000570_hash NULL
82690 +_000571_hash snd_pcm_aio_read 3 13900 _000571_hash NULL
82691 +_000572_hash snd_pcm_aio_write 3 28738 _000572_hash NULL
82692 +_000573_hash snd_pcm_oss_write1 3 10872 _000573_hash NULL
82693 +_000574_hash snd_pcm_oss_write2 3 27332 _000574_hash NULL
82694 +_000575_hash snd_rawmidi_kernel_write1 4 56847 _000575_hash NULL
82695 +_000576_hash snd_rme9652_playback_copy 5 20970 _000576_hash NULL
82696 +_000577_hash snd_sb_csp_load_user 3 45190 _000577_hash NULL
82697 +_000578_hash snd_usb_ctl_msg 8 8436 _000578_hash NULL
82698 +_000579_hash sock_bindtodevice 3 50942 _000579_hash NULL
82699 +_000580_hash sock_kmalloc 2 62205 _000580_hash NULL
82700 +_000581_hash spidev_write 3 44510 _000581_hash NULL
82701 +_000582_hash squashfs_read_table 3 16945 _000582_hash NULL
82702 +_000583_hash srpt_alloc_ioctx 2-3 51042 _000583_hash NULL
82703 +_000585_hash srpt_alloc_ioctx_ring 2 49330 _000585_hash NULL
82704 +_000586_hash st5481_setup_isocpipes 6-4 61340 _000586_hash NULL
82705 +_000587_hash sta_agg_status_write 3 45164 _000587_hash NULL
82706 +_000588_hash svc_setsockopt 5 36876 _000588_hash NULL
82707 +_000589_hash sys_add_key 4 61288 _000589_hash NULL
82708 +_000590_hash sys_modify_ldt 3 18824 _000590_hash NULL
82709 +_000591_hash sys_semtimedop 3 4486 _000591_hash NULL
82710 +_000592_hash sys_setdomainname 2 4373 _000592_hash NULL
82711 +_000593_hash sys_sethostname 2 42962 _000593_hash NULL
82712 +_000594_hash tda10048_writeregbulk 4 11050 _000594_hash NULL
82713 +_000595_hash tipc_log_resize 1 34803 _000595_hash NULL
82714 +_000596_hash tomoyo_write_self 3 45161 _000596_hash NULL
82715 +_000597_hash tower_write 3 8580 _000597_hash NULL
82716 +_000598_hash tpm_write 3 50798 _000598_hash NULL
82717 +_000599_hash trusted_instantiate 3 4710 _000599_hash NULL
82718 +_000600_hash trusted_update 3 12664 _000600_hash NULL
82719 +_000601_hash tt_changes_fill_buffer 3 62649 _000601_hash NULL
82720 +_000602_hash tty_buffer_alloc 2 45437 _000602_hash NULL
82721 +_000603_hash __tun_chr_ioctl 4 22300 _000603_hash NULL
82722 +_000604_hash ubi_more_leb_change_data 4 63534 _000604_hash NULL
82723 +_000605_hash ubi_more_update_data 4 39189 _000605_hash NULL
82724 +_000606_hash ubi_resize_volume 2 50172 _000606_hash NULL
82725 +_000607_hash udf_alloc_i_data 2 35786 _000607_hash NULL
82726 +_000608_hash uea_idma_write 3 64139 _000608_hash NULL
82727 +_000609_hash uea_request 4 47613 _000609_hash NULL
82728 +_000610_hash uea_send_modem_cmd 3 3888 _000610_hash NULL
82729 +_000611_hash uio_write 3 43202 _000611_hash NULL
82730 +_000612_hash um_idi_write 3 18293 _000612_hash NULL
82731 +_000613_hash us122l_ctl_msg 8 13330 _000613_hash NULL
82732 +_000614_hash usb_alloc_urb 1 43436 _000614_hash NULL
82733 +_000615_hash usblp_new_writeurb 2 22894 _000615_hash NULL
82734 +_000616_hash usblp_write 3 23178 _000616_hash NULL
82735 +_000617_hash usbtest_alloc_urb 3-5 34446 _000617_hash NULL
82736 +_000619_hash usbtmc_write 3 64340 _000619_hash NULL
82737 +_000620_hash user_instantiate 3 26131 _000620_hash NULL
82738 +_000621_hash user_update 3 41332 _000621_hash NULL
82739 +_000622_hash uvc_simplify_fraction 3 31303 _000622_hash NULL
82740 +_000623_hash uwb_rc_cmd_done 4 35892 _000623_hash NULL
82741 +_000624_hash uwb_rc_neh_grok_event 3 55799 _000624_hash NULL
82742 +_000625_hash v9fs_alloc_rdir_buf 2 42150 _000625_hash NULL
82743 +_000626_hash __vb2_perform_fileio 3 63033 _000626_hash NULL
82744 +_000627_hash vc_do_resize 3-4 48842 _000627_hash NULL
82745 +_000629_hash vcs_write 3 3910 _000629_hash NULL
82746 +_000630_hash vfd_write 3 14717 _000630_hash NULL
82747 +_000631_hash vga_arb_write 3 36112 _000631_hash NULL
82748 +_000632_hash vga_switcheroo_debugfs_write 3 33984 _000632_hash NULL
82749 +_000633_hash vhci_get_user 3 45039 _000633_hash NULL
82750 +_000634_hash video_proc_write 3 6724 _000634_hash NULL
82751 +_000635_hash vlsi_alloc_ring 3-4 57003 _000635_hash NULL
82752 +_000637_hash __vmalloc 1 61168 _000637_hash NULL
82753 +_000638_hash vmalloc_32 1 1135 _000638_hash NULL
82754 +_000639_hash vmalloc_32_user 1 37519 _000639_hash NULL
82755 +_000640_hash vmalloc_exec 1 36132 _000640_hash NULL
82756 +_000641_hash vmalloc_node 1 58700 _000641_hash NULL
82757 +_000642_hash __vmalloc_node_flags 1 30352 _000642_hash NULL
82758 +_000643_hash vmalloc_user 1 32308 _000643_hash NULL
82759 +_000644_hash vol_cdev_direct_write 3 20751 _000644_hash NULL
82760 +_000645_hash vp_request_msix_vectors 2 28849 _000645_hash NULL
82761 +_000646_hash vring_add_indirect 3-4 20737 _000646_hash NULL
82762 +_000648_hash vring_new_virtqueue 1 9671 _000648_hash NULL
82763 +_000649_hash vxge_os_dma_malloc 2 46184 _000649_hash NULL
82764 +_000650_hash vxge_os_dma_malloc_async 3 56348 _000650_hash NULL
82765 +_000651_hash wdm_write 3 53735 _000651_hash NULL
82766 +_000652_hash wiimote_hid_send 3 48528 _000652_hash NULL
82767 +_000653_hash wl1273_fm_fops_write 3 60621 _000653_hash NULL
82768 +_000654_hash wlc_phy_loadsampletable_nphy 3 64367 _000654_hash NULL
82769 +_000655_hash write 3 62671 _000655_hash NULL
82770 +_000656_hash write_flush 3 50803 _000656_hash NULL
82771 +_000657_hash write_rio 3 54837 _000657_hash NULL
82772 +_000658_hash x25_asy_change_mtu 2 26928 _000658_hash NULL
82773 +_000659_hash xdi_copy_from_user 4 8395 _000659_hash NULL
82774 +_000660_hash xfrm_dst_alloc_copy 3 3034 _000660_hash NULL
82775 +_000661_hash xfrm_user_policy 4 62573 _000661_hash NULL
82776 +_000662_hash xfs_attrmulti_attr_set 4 59346 _000662_hash NULL
82777 +_000663_hash xfs_handle_to_dentry 3 12135 _000663_hash NULL
82778 +_000664_hash __xip_file_write 3 2733 _000664_hash NULL
82779 +_000665_hash xprt_rdma_allocate 2 31372 _000665_hash NULL
82780 +_000666_hash zd_usb_iowrite16v_async 3 23984 _000666_hash NULL
82781 +_000667_hash zd_usb_read_fw 4 22049 _000667_hash NULL
82782 +_000668_hash zerocopy_sg_from_iovec 3 11828 _000668_hash NULL
82783 +_000669_hash zoran_write 3 22404 _000669_hash NULL
82784 +_000671_hash acpi_ex_allocate_name_string 2 7685 _000671_hash NULL
82785 +_000672_hash acpi_os_allocate_zeroed 1 37422 _000672_hash NULL
82786 +_000673_hash acpi_ut_initialize_buffer 2 47143 _002314_hash NULL nohasharray
82787 +_000674_hash ad7879_spi_xfer 3 36311 _000674_hash NULL
82788 +_000675_hash add_new_gdb 3 27643 _000675_hash NULL
82789 +_000676_hash add_numbered_child 5 14273 _000676_hash NULL
82790 +_000677_hash add_res_range 4 21310 _000677_hash NULL
82791 +_000678_hash addtgt 3 54703 _000678_hash NULL
82792 +_000679_hash add_uuid 4 49831 _000679_hash NULL
82793 +_000680_hash afs_cell_alloc 2 24052 _000680_hash NULL
82794 +_000681_hash aggr_recv_addba_req_evt 4 38037 _000681_hash NULL
82795 +_000682_hash agp_create_memory 1 1075 _000682_hash NULL
82796 +_000683_hash agp_create_user_memory 1 62955 _000683_hash NULL
82797 +_000684_hash alg_setsockopt 5 20985 _000684_hash NULL
82798 +_000685_hash alloc_async 1 14208 _000685_hash NULL
82799 +_000686_hash ___alloc_bootmem_nopanic 1 53626 _000686_hash NULL
82800 +_000687_hash alloc_buf 1 34532 _000687_hash NULL
82801 +_000688_hash alloc_chunk 1 49575 _000688_hash NULL
82802 +_000689_hash alloc_context 1 41283 _000689_hash NULL
82803 +_000690_hash alloc_ctrl_packet 1 44667 _000690_hash NULL
82804 +_000691_hash alloc_data_packet 1 46698 _000691_hash NULL
82805 +_000692_hash alloc_dca_provider 2 59670 _000692_hash NULL
82806 +_000693_hash __alloc_dev_table 2 54343 _000693_hash NULL
82807 +_000694_hash alloc_ep 1 17269 _000694_hash NULL
82808 +_000695_hash __alloc_extent_buffer 3 15093 _000695_hash NULL
82809 +_000696_hash alloc_group_attrs 2 9194 _000719_hash NULL nohasharray
82810 +_000697_hash alloc_large_system_hash 2 64490 _000697_hash NULL
82811 +_000698_hash alloc_netdev_mqs 1 30030 _000698_hash NULL
82812 +_000699_hash __alloc_objio_seg 1 7203 _000699_hash NULL
82813 +_000700_hash alloc_ring 2-4 15345 _000700_hash NULL
82814 +_000701_hash alloc_ring 2-4 39151 _000701_hash NULL
82815 +_000704_hash alloc_session 1-2 64171 _000704_hash NULL
82816 +_000708_hash alloc_smp_req 1 51337 _000708_hash NULL
82817 +_000709_hash alloc_smp_resp 1 3566 _000709_hash NULL
82818 +_000710_hash alloc_ts_config 1 45775 _000710_hash NULL
82819 +_000711_hash alloc_upcall 2 62186 _000711_hash NULL
82820 +_000712_hash altera_drscan 2 48698 _000712_hash NULL
82821 +_000713_hash altera_irscan 2 62396 _000713_hash NULL
82822 +_000714_hash altera_set_dr_post 2 54291 _000714_hash NULL
82823 +_000715_hash altera_set_dr_pre 2 64862 _000715_hash NULL
82824 +_000716_hash altera_set_ir_post 2 20948 _000716_hash NULL
82825 +_000717_hash altera_set_ir_pre 2 54103 _000717_hash NULL
82826 +_000718_hash altera_swap_dr 2 50090 _000718_hash NULL
82827 +_000719_hash altera_swap_ir 2 9194 _000719_hash &_000696_hash
82828 +_000720_hash amd_create_gatt_pages 1 20537 _000720_hash NULL
82829 +_000721_hash aoechr_write 3 62883 _001352_hash NULL nohasharray
82830 +_000722_hash applesmc_create_nodes 2 49392 _000722_hash NULL
82831 +_000723_hash array_zalloc 1-2 7519 _000723_hash NULL
82832 +_000725_hash arvo_sysfs_read 6 31617 _000725_hash NULL
82833 +_000726_hash arvo_sysfs_write 6 3311 _000726_hash NULL
82834 +_000727_hash asd_store_update_bios 4 10165 _000727_hash NULL
82835 +_000728_hash ata_host_alloc 2 46094 _000728_hash NULL
82836 +_000729_hash atalk_sendmsg 4 21677 _000729_hash NULL
82837 +_000730_hash ath6kl_cfg80211_connect_event 7-9-8 13443 _000730_hash NULL
82838 +_000731_hash ath6kl_mgmt_tx 9 21153 _000731_hash NULL
82839 +_000732_hash ath6kl_wmi_roam_tbl_event_rx 3 43440 _000732_hash NULL
82840 +_000733_hash ath6kl_wmi_send_mgmt_cmd 7 17347 _000733_hash NULL
82841 +_000734_hash ath_descdma_setup 5 12257 _000734_hash NULL
82842 +_000735_hash ath_rx_edma_init 2 65483 _000735_hash NULL
82843 +_000736_hash ati_create_gatt_pages 1 4722 _000736_hash NULL
82844 +_000737_hash au0828_init_isoc 2-3 61917 _000737_hash NULL
82845 +_000739_hash audit_init_entry 1 38644 _000739_hash NULL
82846 +_000740_hash ax25_sendmsg 4 62770 _000740_hash NULL
82847 +_000741_hash b1_alloc_card 1 36155 _000741_hash NULL
82848 +_000742_hash b43_nphy_load_samples 3 36481 _000742_hash NULL
82849 +_000743_hash bio_copy_user_iov 4 37660 _000743_hash NULL
82850 +_000744_hash __bio_map_kern 2-3 47379 _000744_hash NULL
82851 +_000746_hash blk_register_region 1-2 51424 _000746_hash NULL
82852 +_000748_hash bm_entry_write 3 28338 _000748_hash NULL
82853 +_000749_hash bm_realloc_pages 2 9431 _000749_hash NULL
82854 +_000750_hash bm_register_write 3 9893 _000750_hash &_000569_hash
82855 +_000751_hash bm_status_write 3 12964 _000751_hash NULL
82856 +_000752_hash br_mdb_rehash 2 42643 _000752_hash NULL
82857 +_000753_hash btrfs_copy_from_user 3 43806 _000753_hash NULL
82858 +_000754_hash btrfs_insert_delayed_dir_index 4 63720 _000754_hash NULL
82859 +_000755_hash __btrfs_map_block 3 49839 _000755_hash NULL
82860 +_000756_hash __c4iw_init_resource_fifo 3 8334 _000756_hash NULL
82861 +_000757_hash cache_downcall 3 13666 _000757_hash NULL
82862 +_000758_hash cache_slow_downcall 2 8570 _000758_hash NULL
82863 +_000759_hash ca_extend 2 64541 _000759_hash NULL
82864 +_000760_hash caif_seqpkt_sendmsg 4 22961 _000760_hash NULL
82865 +_000761_hash caif_stream_sendmsg 4 9110 _000761_hash NULL
82866 +_000762_hash carl9170_cmd_buf 3 950 _000762_hash NULL
82867 +_000763_hash cdev_add 2-3 38176 _000763_hash NULL
82868 +_000765_hash cdrom_read_cdda 4 50478 _000765_hash NULL
82869 +_000766_hash ceph_dns_resolve_name 1 62488 _000766_hash NULL
82870 +_000767_hash ceph_msgpool_get 2 54258 _000767_hash NULL
82871 +_000768_hash cfg80211_connect_result 4-6 56515 _000768_hash NULL
82872 +_000770_hash cfg80211_disconnected 4 57 _000770_hash NULL
82873 +_000771_hash cfg80211_inform_bss 8 19332 _000771_hash NULL
82874 +_000772_hash cfg80211_inform_bss_frame 4 41078 _000772_hash NULL
82875 +_000773_hash cfg80211_mlme_register_mgmt 5 19852 _000773_hash NULL
82876 +_000774_hash cfg80211_roamed_bss 4-6 50198 _000774_hash NULL
82877 +_000776_hash cifs_readdata_alloc 1 50318 _000776_hash NULL
82878 +_000777_hash cifs_readv_from_socket 3 19109 _000777_hash NULL
82879 +_000778_hash cifs_writedata_alloc 1 32880 _000778_hash NULL
82880 +_000779_hash cnic_alloc_dma 3 34641 _000779_hash NULL
82881 +_000780_hash configfs_write_file 3 61621 _000780_hash NULL
82882 +_000781_hash construct_key 3 11329 _000781_hash NULL
82883 +_000782_hash context_alloc 3 24645 _000782_hash NULL
82884 +_000783_hash copy_to_user 3 57835 _000783_hash NULL
82885 +_000784_hash create_attr_set 1 22861 _000784_hash NULL
82886 +_000785_hash create_bounce_buffer 3 39155 _000785_hash NULL
82887 +_000786_hash create_gpadl_header 2 19064 _000786_hash NULL
82888 +_000787_hash _create_sg_bios 4 31244 _000787_hash NULL
82889 +_000788_hash cryptd_alloc_instance 2-3 18048 _000788_hash NULL
82890 +_000790_hash crypto_ahash_setkey 3 55134 _000790_hash NULL
82891 +_000791_hash crypto_alloc_instance2 3 25277 _000791_hash NULL
82892 +_000792_hash crypto_shash_setkey 3 60483 _000792_hash NULL
82893 +_000793_hash cx231xx_init_bulk 3-2 47024 _000793_hash NULL
82894 +_000794_hash cx231xx_init_isoc 2-3 56453 _000794_hash NULL
82895 +_000796_hash cx231xx_init_vbi_isoc 2-3 28053 _000796_hash NULL
82896 +_000798_hash cxgb_alloc_mem 1 24007 _000798_hash NULL
82897 +_000799_hash cxgbi_device_portmap_create 3 25747 _000799_hash NULL
82898 +_000800_hash cxgbi_device_register 1-2 36746 _000800_hash NULL
82899 +_000802_hash __cxio_init_resource_fifo 3 23447 _000802_hash NULL
82900 +_000803_hash dccp_sendmsg 4 56058 _000803_hash NULL
82901 +_000804_hash ddp_make_gl 1 12179 _000804_hash NULL
82902 +_000805_hash depth_write 3 3021 _000805_hash NULL
82903 +_000806_hash dev_irnet_write 3 11398 _000806_hash NULL
82904 +_000807_hash dev_set_alias 3 50084 _000807_hash NULL
82905 +_000808_hash dev_write 3 7708 _000808_hash NULL
82906 +_000809_hash dfs_global_file_write 3 6112 _000809_hash NULL
82907 +_000810_hash dgram_sendmsg 4 45679 _000810_hash NULL
82908 +_000811_hash disconnect 4 32521 _000811_hash NULL
82909 +_000812_hash dma_attach 6-7 50831 _000812_hash NULL
82910 +_000814_hash dn_sendmsg 4 38390 _000814_hash NULL
82911 +_000815_hash do_dccp_setsockopt 5 54377 _000815_hash NULL
82912 +_000816_hash do_jffs2_setxattr 5 25910 _000816_hash NULL
82913 +_000817_hash do_msgsnd 4 1387 _000817_hash NULL
82914 +_000818_hash do_raw_setsockopt 5 55215 _000818_hash NULL
82915 +_000819_hash do_readv_writev 4 51849 _000819_hash NULL
82916 +_000820_hash do_sync 1 9604 _000820_hash NULL
82917 +_000821_hash dup_array 3 33551 _000821_hash NULL
82918 +_000822_hash dvb_audio_write 3 51275 _000822_hash NULL
82919 +_000823_hash dvb_ca_en50221_init 4 45718 _000823_hash NULL
82920 +_000824_hash dvb_video_write 3 754 _000824_hash NULL
82921 +_000825_hash econet_sendmsg 4 51430 _000825_hash NULL
82922 +_000826_hash ecryptfs_decode_and_decrypt_filename 5 10379 _000826_hash NULL
82923 +_000827_hash ecryptfs_encrypt_and_encode_filename 6 2109 _000827_hash NULL
82924 +_000828_hash ecryptfs_send_message_locked 2 31801 _000828_hash NULL
82925 +_000829_hash edac_device_alloc_ctl_info 1 5941 _000829_hash NULL
82926 +_000830_hash edac_mc_alloc 1 54846 _000830_hash NULL
82927 +_000831_hash edac_pci_alloc_ctl_info 1 63388 _000831_hash NULL
82928 +_000832_hash efivar_create_sysfs_entry 2 19485 _000832_hash NULL
82929 +_000833_hash em28xx_alloc_isoc 4 46892 _000833_hash NULL
82930 +_000834_hash enable_write 3 30456 _000834_hash NULL
82931 +_000835_hash enclosure_register 3 57412 _000835_hash NULL
82932 +_000836_hash ext4_kvzalloc 1 47605 _000836_hash NULL
82933 +_000837_hash extend_netdev_table 2 31680 _000837_hash NULL
82934 +_000838_hash __feat_register_sp 6 64712 _000838_hash NULL
82935 +_000839_hash __ffs_ep0_read_events 3 48868 _000839_hash NULL
82936 +_000840_hash ffs_ep0_write 3 9438 _000840_hash NULL
82937 +_000841_hash ffs_epfile_read 3 18775 _000841_hash NULL
82938 +_000842_hash ffs_epfile_write 3 48014 _000842_hash NULL
82939 +_000843_hash fib_info_hash_alloc 1 9075 _000843_hash NULL
82940 +_000844_hash fillonedir 3 41746 _000844_hash NULL
82941 +_000845_hash flexcop_device_kmalloc 1 54793 _000845_hash NULL
82942 +_000846_hash frame_alloc 4 15981 _000846_hash NULL
82943 +_000847_hash fw_node_create 2 9559 _000847_hash NULL
82944 +_000848_hash garmin_read_process 3 27509 _000848_hash NULL
82945 +_000849_hash garp_request_join 4 7471 _000849_hash NULL
82946 +_000850_hash get_derived_key 4 61100 _000850_hash NULL
82947 +_000851_hash get_entry 4 16003 _000851_hash NULL
82948 +_000852_hash get_free_de 2 33714 _000852_hash NULL
82949 +_000853_hash get_new_cssid 2 51665 _000853_hash NULL
82950 +_000854_hash getxattr 4 24398 _000854_hash NULL
82951 +_000855_hash gspca_dev_probe2 4 59833 _000855_hash NULL
82952 +_000856_hash hcd_alloc_coherent 5 55862 _000856_hash NULL
82953 +_000857_hash hci_sock_sendmsg 4 37420 _000857_hash NULL
82954 +_000858_hash hid_register_field 2-3 4874 _000858_hash NULL
82955 +_000860_hash hid_report_raw_event 4 7024 _000860_hash NULL
82956 +_000861_hash hpi_alloc_control_cache 1 35351 _000861_hash NULL
82957 +_000862_hash hugetlbfs_read_actor 2-5-4 34547 _000862_hash NULL
82958 +_000865_hash hvc_alloc 4 12579 _000865_hash NULL
82959 +_000866_hash __hwahc_dev_set_key 5 46328 _000866_hash NULL
82960 +_000867_hash i2400m_zrealloc_2x 3 54166 _001430_hash NULL nohasharray
82961 +_000868_hash ib_alloc_device 1 26483 _000868_hash NULL
82962 +_000869_hash ib_create_send_mad 5 1196 _000869_hash NULL
82963 +_000870_hash ibmasm_new_command 2 25714 _000870_hash NULL
82964 +_000871_hash ib_send_cm_drep 3 50186 _000871_hash NULL
82965 +_000872_hash ib_send_cm_mra 4 60202 _000872_hash NULL
82966 +_000873_hash ib_send_cm_rtu 3 63138 _000873_hash NULL
82967 +_000874_hash ieee80211_key_alloc 3 19065 _000874_hash NULL
82968 +_000875_hash ieee80211_mgmt_tx 9 46860 _000875_hash NULL
82969 +_000876_hash ieee80211_send_probe_req 6 6924 _000876_hash NULL
82970 +_000877_hash if_writecmd 2 815 _000877_hash NULL
82971 +_000878_hash init_bch 1-2 64130 _000878_hash NULL
82972 +_000880_hash init_ipath 1 48187 _000880_hash NULL
82973 +_000881_hash init_list_set 2-3 39188 _000881_hash NULL
82974 +_000883_hash init_q 4 132 _000883_hash NULL
82975 +_000884_hash init_state 2 60165 _000884_hash NULL
82976 +_000885_hash init_tag_map 3 57515 _000885_hash NULL
82977 +_000886_hash input_ff_create 2 21240 _000886_hash NULL
82978 +_000887_hash input_mt_init_slots 2 31183 _000887_hash NULL
82979 +_000888_hash interfaces 2 38859 _000888_hash NULL
82980 +_000889_hash ioat2_alloc_ring 2 11172 _000889_hash NULL
82981 +_000890_hash ip_generic_getfrag 3-4 12187 _000890_hash NULL
82982 +_000892_hash ipr_alloc_ucode_buffer 1 40199 _000892_hash NULL
82983 +_000893_hash ip_set_alloc 1 57953 _000893_hash NULL
82984 +_000894_hash ipv6_flowlabel_opt 3 58135 _001125_hash NULL nohasharray
82985 +_000895_hash ipv6_renew_options 5 28867 _000895_hash NULL
82986 +_000896_hash ipxrtr_route_packet 4 54036 _000896_hash NULL
82987 +_000897_hash irda_sendmsg 4 4388 _000897_hash NULL
82988 +_000898_hash irda_sendmsg_dgram 4 38563 _000898_hash NULL
82989 +_000899_hash irda_sendmsg_ultra 4 42047 _000899_hash NULL
82990 +_000900_hash irias_add_octseq_attrib 4 29983 _000900_hash NULL
82991 +_000901_hash irq_alloc_generic_chip 2 26650 _000901_hash NULL
82992 +_000902_hash irq_domain_add_linear 2 29236 _000902_hash NULL
82993 +_000903_hash iscsi_alloc_session 3 49390 _000903_hash NULL
82994 +_000904_hash iscsi_create_conn 2 50425 _000904_hash NULL
82995 +_000905_hash iscsi_create_endpoint 1 15193 _000905_hash NULL
82996 +_000906_hash iscsi_create_iface 5 38510 _000906_hash NULL
82997 +_000907_hash iscsi_decode_text_input 4 58292 _000907_hash NULL
82998 +_000908_hash iscsi_pool_init 2-4 54913 _000908_hash NULL
82999 +_000910_hash iscsit_dump_data_payload 2 38683 _000910_hash NULL
83000 +_000911_hash isdn_write 3 45863 _000911_hash NULL
83001 +_000912_hash isku_receive 4 54130 _000912_hash NULL
83002 +_000913_hash isku_send 4 41542 _000913_hash NULL
83003 +_000914_hash islpci_mgt_transaction 5 23610 _000914_hash NULL
83004 +_000915_hash iso_sched_alloc 1 13377 _002079_hash NULL nohasharray
83005 +_000916_hash ivtv_v4l2_write 3 39226 _000916_hash NULL
83006 +_000917_hash iwl_trans_txq_alloc 3 36147 _000917_hash NULL
83007 +_000918_hash iwmct_fw_parser_init 4 37876 _000918_hash NULL
83008 +_000919_hash iwm_notif_send 6 12295 _000919_hash NULL
83009 +_000920_hash iwm_ntf_calib_res 3 11686 _000920_hash NULL
83010 +_000921_hash iwm_umac_set_config_var 4 17320 _000921_hash NULL
83011 +_000922_hash ixgbe_alloc_q_vector 3-5 45428 _000922_hash NULL
83012 +_000924_hash jbd2_journal_init_revoke 2 51088 _000924_hash NULL
83013 +_000925_hash jffs2_write_dirent 5 37311 _000925_hash NULL
83014 +_000926_hash journal_init_revoke 2 56933 _000926_hash NULL
83015 +_000927_hash keyctl_instantiate_key 3 41855 _000927_hash NULL
83016 +_000928_hash keyctl_instantiate_key_iov 3 16969 _000928_hash NULL
83017 +_000929_hash __kfifo_from_user 3 20399 _000929_hash NULL
83018 +_000930_hash kimage_crash_alloc 3 3233 _000930_hash NULL
83019 +_000931_hash kimage_normal_alloc 3 31140 _000931_hash NULL
83020 +_000932_hash kmem_realloc 2 37489 _000932_hash NULL
83021 +_000933_hash kmem_zalloc 1 11510 _000933_hash NULL
83022 +_000934_hash koneplus_send 4 18226 _000934_hash NULL
83023 +_000935_hash koneplus_sysfs_read 6 42792 _000935_hash NULL
83024 +_000936_hash kovaplus_send 4 10009 _000936_hash NULL
83025 +_000937_hash kvm_read_guest_page_mmu 6 37611 _000937_hash NULL
83026 +_000938_hash kvm_set_irq_routing 3 48704 _000938_hash NULL
83027 +_000939_hash kvm_write_guest_cached 4 11106 _000939_hash NULL
83028 +_000940_hash kvm_write_guest_page 5 63555 _000940_hash NULL
83029 +_000941_hash l2cap_skbuff_fromiovec 3-4 35003 _000941_hash NULL
83030 +_000943_hash l2tp_ip_sendmsg 4 50411 _000943_hash NULL
83031 +_000944_hash l2tp_session_create 1 25286 _000944_hash NULL
83032 +_000945_hash lc_create 3 48662 _000945_hash NULL
83033 +_000946_hash leaf_dealloc 3 29566 _000946_hash NULL
83034 +_000947_hash linear_conf 2 23485 _000947_hash NULL
83035 +_000948_hash lirc_buffer_init 2-3 53282 _000948_hash NULL
83036 +_000950_hash llc_ui_sendmsg 4 24987 _000950_hash NULL
83037 +_000951_hash lpfc_sli4_queue_alloc 3 62646 _000951_hash NULL
83038 +_000952_hash mce_request_packet 3 1073 _000952_hash NULL
83039 +_000953_hash mdiobus_alloc_size 1 52259 _000953_hash NULL
83040 +_000954_hash media_entity_init 2-4 15870 _001556_hash NULL nohasharray
83041 +_000956_hash memstick_alloc_host 1 142 _000956_hash NULL
83042 +_000957_hash mesh_table_alloc 1 22305 _000957_hash NULL
83043 +_000958_hash mfd_add_devices 4 56753 _000958_hash NULL
83044 +_000959_hash mISDN_sock_sendmsg 4 41035 _000959_hash NULL
83045 +_000960_hash mmc_alloc_host 1 48097 _000960_hash NULL
83046 +_000961_hash mmc_test_alloc_mem 3 28102 _000961_hash NULL
83047 +_000962_hash mpi_alloc 1 18094 _000962_hash NULL
83048 +_000963_hash mpihelp_mul_karatsuba_case 5-3 23918 _000963_hash NULL
83049 +_000964_hash mpihelp_mul_n 4 16405 _000964_hash NULL
83050 +_000965_hash mpi_set_bit 2 15104 _000965_hash NULL
83051 +_000966_hash mpi_set_highbit 2 37327 _001420_hash NULL nohasharray
83052 +_000967_hash mtd_concat_create 2 14416 _000967_hash NULL
83053 +_000968_hash mvumi_alloc_mem_resource 3 47750 _000968_hash NULL
83054 +_000969_hash mwifiex_11n_create_rx_reorder_tbl 4 63806 _000969_hash NULL
83055 +_000970_hash mwifiex_alloc_sdio_mpa_buffers 2-3 60961 _000970_hash NULL
83056 +_000972_hash mwl8k_cmd_set_beacon 4 23110 _000972_hash NULL
83057 +_000973_hash neigh_hash_alloc 1 17595 _000973_hash NULL
83058 +_000974_hash netlink_sendmsg 4 33708 _001172_hash NULL nohasharray
83059 +_000975_hash netxen_alloc_sds_rings 2 13417 _000975_hash NULL
83060 +_000976_hash new_bind_ctl 2 35324 _000976_hash NULL
83061 +_000977_hash new_dir 3 31919 _000977_hash NULL
83062 +_000978_hash new_tape_buffer 2 32866 _000978_hash NULL
83063 +_000979_hash nfc_llcp_build_tlv 3 19536 _000979_hash NULL
83064 +_000980_hash nfc_llcp_send_i_frame 3 59130 _000980_hash NULL
83065 +_000981_hash nfs4_alloc_slots 1 2454 _000981_hash NULL
83066 +_000982_hash nfsctl_transaction_write 3 64800 _000982_hash NULL
83067 +_000983_hash nfs_idmap_request_key 3 30208 _000983_hash NULL
83068 +_000984_hash nfs_readdata_alloc 1 9990 _000984_hash NULL
83069 +_000985_hash nfs_writedata_alloc 1 62868 _000985_hash NULL
83070 +_000986_hash nl_pid_hash_zalloc 1 23314 _000986_hash NULL
83071 +_000987_hash nr_sendmsg 4 53656 _000987_hash NULL
83072 +_000988_hash nsm_create_handle 4 38060 _000988_hash NULL
83073 +_000989_hash ntfs_copy_from_user_iovec 3-6 49829 _000989_hash NULL
83074 +_000991_hash ntfs_file_buffered_write 4-6 41442 _000991_hash NULL
83075 +_000993_hash __ntfs_malloc 1 34022 _000993_hash NULL
83076 +_000994_hash nvme_alloc_queue 3 46865 _000994_hash NULL
83077 +_000995_hash ocfs2_acl_from_xattr 2 21604 _000995_hash NULL
83078 +_000996_hash ocfs2_control_message 3 19564 _000996_hash NULL
83079 +_000997_hash opera1_usb_i2c_msgxfer 4 64521 _000997_hash NULL
83080 +_000998_hash _ore_get_io_state 3 2166 _000998_hash NULL
83081 +_000999_hash orig_hash_add_if 2 53676 _000999_hash NULL
83082 +_001000_hash orig_hash_del_if 2 45080 _001000_hash NULL
83083 +_001001_hash orinoco_set_key 5-7 17878 _001001_hash NULL
83084 +_001003_hash osdmap_set_max_osd 2 57630 _001003_hash NULL
83085 +_001004_hash _osd_realloc_seg 3 54352 _001004_hash NULL
83086 +_001005_hash OSDSetBlock 2-4 38986 _001005_hash NULL
83087 +_001007_hash osst_execute 7-6 17607 _001007_hash NULL
83088 +_001008_hash osst_write 3 31581 _001008_hash NULL
83089 +_001009_hash otp_read 2-5-4 10594 _001009_hash NULL
83090 +_001012_hash ovs_vport_alloc 1 33475 _001012_hash NULL
83091 +_001013_hash packet_sendmsg_spkt 4 28885 _001013_hash NULL
83092 +_001014_hash pair_device 4 61175 _001708_hash NULL nohasharray
83093 +_001015_hash pccard_store_cis 6 18176 _001015_hash NULL
83094 +_001016_hash pci_add_cap_save_buffer 3 3426 _001016_hash NULL
83095 +_001017_hash pcnet32_realloc_rx_ring 3 36598 _001017_hash NULL
83096 +_001018_hash pcnet32_realloc_tx_ring 3 38428 _001018_hash NULL
83097 +_001019_hash pcpu_mem_zalloc 1 22948 _001019_hash NULL
83098 +_001020_hash pep_sendmsg 4 62524 _001020_hash NULL
83099 +_001021_hash pfkey_sendmsg 4 47394 _001021_hash NULL
83100 +_001022_hash pidlist_resize 2 496 _001022_hash NULL
83101 +_001023_hash pin_code_reply 4 46510 _001023_hash NULL
83102 +_001024_hash ping_getfrag 3-4 8360 _001024_hash NULL
83103 +_001026_hash pipe_set_size 2 5204 _001026_hash NULL
83104 +_001027_hash pkt_bio_alloc 1 48284 _001027_hash NULL
83105 +_001028_hash platform_create_bundle 4-6 12785 _001028_hash NULL
83106 +_001030_hash play_iframe 3 8219 _001030_hash NULL
83107 +_001031_hash pm8001_store_update_fw 4 55716 _001031_hash NULL
83108 +_001032_hash pmcraid_alloc_sglist 1 9864 _001032_hash NULL
83109 +_001033_hash pn533_dep_link_up 5 7659 _001033_hash NULL
83110 +_001034_hash pnp_alloc 1 24869 _001419_hash NULL nohasharray
83111 +_001035_hash pn_sendmsg 4 12640 _001035_hash NULL
83112 +_001036_hash pppoe_sendmsg 4 48039 _001036_hash NULL
83113 +_001037_hash pppol2tp_sendmsg 4 56420 _001037_hash NULL
83114 +_001038_hash process_vm_rw 3-5 47533 _001038_hash NULL
83115 +_001040_hash process_vm_rw_single_vec 1-2 26213 _001040_hash NULL
83116 +_001042_hash proc_write 3 51003 _001042_hash NULL
83117 +_001043_hash profile_load 3 58267 _001043_hash NULL
83118 +_001044_hash profile_remove 3 8556 _001044_hash NULL
83119 +_001045_hash profile_replace 3 14652 _001045_hash NULL
83120 +_001046_hash pscsi_get_bio 1 56103 _001046_hash NULL
83121 +_001047_hash pyra_send 4 12061 _001047_hash NULL
83122 +_001048_hash qc_capture 3 19298 _001048_hash NULL
83123 +_001049_hash qla4xxx_alloc_work 2 44813 _001049_hash NULL
83124 +_001050_hash qlcnic_alloc_msix_entries 2 46160 _001050_hash NULL
83125 +_001051_hash qlcnic_alloc_sds_rings 2 26795 _001051_hash NULL
83126 +_001052_hash queue_received_packet 5 9657 _001052_hash NULL
83127 +_001053_hash raw_send_hdrinc 4 58803 _001053_hash NULL
83128 +_001054_hash raw_sendmsg 4 23078 _001054_hash &_000022_hash
83129 +_001055_hash rawsock_sendmsg 4 60010 _001055_hash NULL
83130 +_001056_hash rawv6_send_hdrinc 3 35425 _001056_hash NULL
83131 +_001057_hash rb_alloc 1 3102 _001057_hash NULL
83132 +_001058_hash rbd_alloc_coll 1 33678 _001058_hash NULL
83133 +_001059_hash rbd_create_rw_ops 2 4605 _001059_hash NULL
83134 +_001060_hash rds_ib_inc_copy_to_user 3 55007 _001060_hash NULL
83135 +_001061_hash rds_iw_inc_copy_to_user 3 29214 _001061_hash NULL
83136 +_001062_hash rds_message_alloc 1 10517 _001062_hash NULL
83137 +_001063_hash rds_message_copy_from_user 3 45510 _001063_hash NULL
83138 +_001064_hash rds_message_inc_copy_to_user 3 26540 _001064_hash NULL
83139 +_001065_hash redrat3_transmit_ir 3 64244 _001065_hash NULL
83140 +_001066_hash regcache_rbtree_insert_to_block 5 58009 _001066_hash NULL
83141 +_001067_hash _regmap_raw_write 4 42652 _001067_hash NULL
83142 +_001068_hash regmap_register_patch 3 21681 _001068_hash NULL
83143 +_001069_hash relay_alloc_page_array 1 52735 _001069_hash NULL
83144 +_001070_hash remove_uuid 4 64505 _001070_hash NULL
83145 +_001071_hash reshape_ring 2 29147 _001071_hash NULL
83146 +_001072_hash RESIZE_IF_NEEDED 2 56286 _001072_hash NULL
83147 +_001073_hash resize_stripes 2 61650 _001073_hash NULL
83148 +_001074_hash rfcomm_sock_sendmsg 4 37661 _001074_hash NULL
83149 +_001075_hash rose_sendmsg 4 20249 _001075_hash NULL
83150 +_001076_hash rxrpc_send_data 5 21553 _001076_hash NULL
83151 +_001077_hash rxrpc_setsockopt 5 50286 _001077_hash NULL
83152 +_001078_hash saa7146_vmalloc_build_pgtable 2 19780 _001078_hash NULL
83153 +_001079_hash saa7164_buffer_alloc_user 2 9627 _001079_hash NULL
83154 +_001081_hash sco_send_frame 3 41815 _001081_hash NULL
83155 +_001082_hash scsi_host_alloc 2 63041 _001082_hash NULL
83156 +_001083_hash scsi_tgt_kspace_exec 8 9522 _001083_hash NULL
83157 +_001084_hash sctp_sendmsg 4 61919 _001084_hash NULL
83158 +_001085_hash sctp_setsockopt 5 44788 _001085_hash NULL
83159 +_001086_hash sctp_setsockopt_connectx 3 6073 _001086_hash NULL
83160 +_001087_hash sctp_setsockopt_connectx_old 3 22631 _001087_hash NULL
83161 +_001088_hash sctp_tsnmap_init 2 36446 _001088_hash NULL
83162 +_001089_hash sctp_user_addto_chunk 2-3 62047 _001089_hash NULL
83163 +_001091_hash security_context_to_sid 2 19839 _001091_hash NULL
83164 +_001092_hash security_context_to_sid_default 2 3492 _001092_hash NULL
83165 +_001093_hash security_context_to_sid_force 2 20724 _001093_hash NULL
83166 +_001094_hash selinux_transaction_write 3 59038 _001094_hash NULL
83167 +_001095_hash sel_write_access 3 51704 _001095_hash NULL
83168 +_001096_hash sel_write_create 3 11353 _001096_hash NULL
83169 +_001097_hash sel_write_member 3 28800 _001097_hash NULL
83170 +_001098_hash sel_write_relabel 3 55195 _001098_hash NULL
83171 +_001099_hash sel_write_user 3 45060 _001099_hash NULL
83172 +_001100_hash __seq_open_private 3 40715 _001100_hash NULL
83173 +_001101_hash serverworks_create_gatt_pages 1 46582 _001101_hash NULL
83174 +_001102_hash set_connectable 4 56458 _001102_hash NULL
83175 +_001103_hash set_dev_class 4 39645 _001697_hash NULL nohasharray
83176 +_001104_hash set_discoverable 4 48141 _001104_hash NULL
83177 +_001105_hash setkey 3 14987 _001105_hash NULL
83178 +_001106_hash set_le 4 30581 _001106_hash NULL
83179 +_001107_hash set_link_security 4 4502 _001107_hash NULL
83180 +_001108_hash set_local_name 4 55757 _001108_hash NULL
83181 +_001109_hash set_powered 4 12129 _001109_hash NULL
83182 +_001110_hash set_ssp 4 62411 _001110_hash NULL
83183 +_001111_hash sg_build_sgat 3 60179 _001111_hash &_000305_hash
83184 +_001112_hash sg_read_oxfer 3 51724 _001112_hash NULL
83185 +_001113_hash shmem_xattr_set 4 11843 _001113_hash NULL
83186 +_001114_hash simple_alloc_urb 3 60420 _001114_hash NULL
83187 +_001115_hash sisusb_send_bridge_packet 2 11649 _001115_hash NULL
83188 +_001116_hash sisusb_send_packet 2 20891 _001116_hash NULL
83189 +_001117_hash skb_add_data_nocache 4 4682 _001117_hash NULL
83190 +_001118_hash skb_copy_datagram_from_iovec 2-5-4 52014 _001118_hash NULL
83191 +_001121_hash skb_copy_to_page_nocache 6 58624 _001121_hash NULL
83192 +_001122_hash sk_chk_filter 2 42095 _001122_hash NULL
83193 +_001123_hash skcipher_sendmsg 4 30290 _001123_hash NULL
83194 +_001124_hash sl_change_mtu 2 7396 _001124_hash NULL
83195 +_001125_hash slhc_init 1-2 58135 _001125_hash &_000894_hash
83196 +_001127_hash sm501_create_subdev 3-4 48668 _001127_hash NULL
83197 +_001129_hash smk_write_access 3 49561 _001129_hash NULL
83198 +_001130_hash snapshot_write 3 28351 _001130_hash NULL
83199 +_001131_hash snd_ac97_pcm_assign 2 30218 _001131_hash NULL
83200 +_001132_hash snd_card_create 4 64418 _001411_hash NULL nohasharray
83201 +_001133_hash snd_emux_create_port 3 42533 _001133_hash NULL
83202 +_001134_hash snd_gus_dram_write 4 38784 _001134_hash NULL
83203 +_001135_hash snd_midi_channel_alloc_set 1 28153 _001135_hash NULL
83204 +_001136_hash _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 _001136_hash NULL
83205 +_001137_hash snd_pcm_oss_sync1 2 45298 _001137_hash NULL
83206 +_001138_hash snd_pcm_oss_write 3 38108 _001138_hash NULL
83207 +_001139_hash snd_pcm_plugin_build 5 25505 _001139_hash NULL
83208 +_001140_hash snd_rawmidi_kernel_write 3 25106 _001140_hash NULL
83209 +_001141_hash snd_rawmidi_write 3 28008 _001141_hash NULL
83210 +_001142_hash snd_rme32_playback_copy 5 43732 _001142_hash NULL
83211 +_001143_hash snd_rme96_playback_copy 5 13111 _001143_hash NULL
83212 +_001144_hash snd_seq_device_new 4 31753 _001144_hash NULL
83213 +_001145_hash snd_seq_oss_readq_new 2 14283 _001145_hash NULL
83214 +_001146_hash snd_vx_create 4 40948 _001146_hash NULL
83215 +_001147_hash sock_setsockopt 5 50088 _001147_hash NULL
83216 +_001148_hash sound_write 3 5102 _001148_hash NULL
83217 +_001149_hash _sp2d_alloc 1 16944 _001149_hash NULL
83218 +_001150_hash spi_alloc_master 2 45223 _001150_hash NULL
83219 +_001151_hash spidev_message 3 5518 _001151_hash NULL
83220 +_001152_hash spi_register_board_info 2 35651 _001152_hash NULL
83221 +_001153_hash squashfs_cache_init 2 41656 _001153_hash NULL
83222 +_001154_hash squashfs_read_data 6 59440 _001154_hash NULL
83223 +_001155_hash srp_alloc_iu 2 44227 _001155_hash NULL
83224 +_001156_hash srp_iu_pool_alloc 2 17920 _001156_hash NULL
83225 +_001157_hash srp_ring_alloc 2 26760 _001157_hash NULL
83226 +_001159_hash start_isoc_chain 2 565 _001159_hash NULL
83227 +_001160_hash stk_prepare_sio_buffers 2 57168 _001160_hash NULL
83228 +_001161_hash store_iwmct_log_level 4 60209 _001161_hash NULL
83229 +_001162_hash store_iwmct_log_level_fw 4 1974 _001162_hash NULL
83230 +_001163_hash st_write 3 16874 _001163_hash NULL
83231 +_001164_hash svc_pool_map_alloc_arrays 2 47181 _001164_hash NULL
83232 +_001165_hash symtab_init 2 61050 _001165_hash NULL
83233 +_001166_hash sys_bind 3 10799 _001166_hash NULL
83234 +_001167_hash sys_connect 3 15291 _001167_hash NULL
83235 +_001168_hash sys_flistxattr 3 41407 _001168_hash NULL
83236 +_001169_hash sys_fsetxattr 4 49736 _001169_hash NULL
83237 +_001170_hash sysfs_write_file 3 57116 _001170_hash NULL
83238 +_001171_hash sys_ipc 3 4889 _001171_hash NULL
83239 +_001172_hash sys_keyctl 4 33708 _001172_hash &_000974_hash
83240 +_001173_hash sys_listxattr 3 27833 _001173_hash NULL
83241 +_001174_hash sys_llistxattr 3 4532 _001174_hash NULL
83242 +_001175_hash sys_lsetxattr 4 61177 _001175_hash NULL
83243 +_001176_hash sys_mq_timedsend 3 57661 _001176_hash NULL
83244 +_001177_hash sys_sched_setaffinity 2 32046 _001177_hash NULL
83245 +_001178_hash sys_semop 3 39457 _001178_hash NULL
83246 +_001179_hash sys_sendto 6 20809 _001179_hash NULL
83247 +_001180_hash sys_setxattr 4 37880 _001180_hash NULL
83248 +_001181_hash t4_alloc_mem 1 32342 _001181_hash NULL
83249 +_001182_hash tcf_hash_create 4 54360 _001182_hash NULL
83250 +_001183_hash __team_options_register 3 63941 _001183_hash NULL
83251 +_001184_hash test_unaligned_bulk 3 52333 _001184_hash NULL
83252 +_001185_hash tifm_alloc_adapter 1 10903 _001185_hash NULL
83253 +_001186_hash timeout_write 3 50991 _001186_hash NULL
83254 +_001187_hash tipc_link_send_sections_fast 4 37920 _001187_hash NULL
83255 +_001188_hash tipc_subseq_alloc 1 5957 _001188_hash NULL
83256 +_001189_hash tm6000_read_write_usb 7 50774 _001189_hash NULL
83257 +_001190_hash tnode_alloc 1 49407 _001190_hash NULL
83258 +_001191_hash tomoyo_commit_ok 2 20167 _001191_hash NULL
83259 +_001192_hash tomoyo_scan_bprm 2-4 15642 _001192_hash NULL
83260 +_001194_hash tps65910_i2c_write 3 39531 _001194_hash NULL
83261 +_001195_hash ts_write 3 64336 _001195_hash NULL
83262 +_001196_hash ttusb2_msg 4 3100 _001196_hash NULL
83263 +_001197_hash tty_write 3 5494 _001197_hash NULL
83264 +_001198_hash ubi_dbg_check_all_ff 4 59810 _001198_hash NULL
83265 +_001199_hash ubi_dbg_check_write 5 48525 _001199_hash NULL
83266 +_001200_hash ubifs_setxattr 4 59650 _001370_hash NULL nohasharray
83267 +_001201_hash udf_sb_alloc_partition_maps 2 62313 _001201_hash NULL
83268 +_001202_hash udplite_getfrag 3-4 14479 _001202_hash NULL
83269 +_001204_hash ulong_write_file 3 26485 _001204_hash NULL
83270 +_001205_hash unix_dgram_sendmsg 4 45699 _001205_hash NULL
83271 +_001206_hash unix_stream_sendmsg 4 61455 _001206_hash NULL
83272 +_001207_hash unlink_queued 3-4 645 _001207_hash NULL
83273 +_001208_hash update_pmkid 4 2481 _001208_hash NULL
83274 +_001209_hash usb_alloc_coherent 2 65444 _001209_hash NULL
83275 +_001210_hash uvc_alloc_buffers 2 9656 _001210_hash NULL
83276 +_001211_hash uvc_alloc_entity 3 20836 _001211_hash NULL
83277 +_001212_hash v4l2_ctrl_new 7 38725 _001212_hash NULL
83278 +_001213_hash v4l2_event_subscribe 3 19510 _001213_hash NULL
83279 +_001214_hash vb2_read 3 42703 _001214_hash NULL
83280 +_001215_hash vb2_write 3 31948 _001215_hash NULL
83281 +_001216_hash vc_resize 2-3 3585 _001216_hash NULL
83282 +_001218_hash __vhost_add_used_n 3 26554 _001218_hash NULL
83283 +_001219_hash __videobuf_alloc_vb 1 27062 _001219_hash NULL
83284 +_001220_hash videobuf_dma_init_kernel 3 6963 _001220_hash NULL
83285 +_001221_hash virtqueue_add_buf 3-4 59470 _001221_hash NULL
83286 +_001223_hash vmalloc 1 15464 _001223_hash NULL
83287 +_001224_hash vmalloc_to_sg 2 58354 _001224_hash NULL
83288 +_001225_hash vol_cdev_write 3 40915 _001225_hash NULL
83289 +_001226_hash vxge_device_register 4 7752 _001226_hash NULL
83290 +_001227_hash __vxge_hw_channel_allocate 3 55462 _001227_hash NULL
83291 +_001228_hash vzalloc 1 47421 _001228_hash NULL
83292 +_001229_hash vzalloc_node 1 23424 _001229_hash NULL
83293 +_001230_hash wa_nep_queue 2 8858 _001230_hash NULL
83294 +_001231_hash __wa_xfer_setup_segs 2 56725 _001231_hash NULL
83295 +_001232_hash wiphy_new 2 2482 _001232_hash NULL
83296 +_001233_hash wpan_phy_alloc 1 48056 _001233_hash NULL
83297 +_001234_hash wusb_ccm_mac 7 32199 _001234_hash NULL
83298 +_001235_hash x25_sendmsg 4 12487 _001235_hash NULL
83299 +_001236_hash xfrm_hash_alloc 1 10997 _001236_hash NULL
83300 +_001237_hash _xfs_buf_get_pages 2 46811 _001237_hash NULL
83301 +_001238_hash xfs_da_buf_make 1 55845 _001238_hash NULL
83302 +_001239_hash xfs_da_grow_inode_int 3 21785 _001239_hash NULL
83303 +_001240_hash xfs_dir_cilookup_result 3 64288 _001240_hash NULL
83304 +_001241_hash xfs_iext_add_indirect_multi 3 32400 _001241_hash NULL
83305 +_001242_hash xfs_iext_inline_to_direct 2 12384 _001242_hash NULL
83306 +_001243_hash xfs_iroot_realloc 2 46826 _001243_hash NULL
83307 +_001244_hash xhci_alloc_stream_info 3 63902 _001244_hash NULL
83308 +_001245_hash xlog_recover_add_to_trans 4 62839 _001245_hash NULL
83309 +_001246_hash xprt_alloc 2 1475 _001246_hash NULL
83310 +_001247_hash xt_alloc_table_info 1 57903 _001247_hash NULL
83311 +_001248_hash _zd_iowrite32v_async_locked 3 39034 _001248_hash NULL
83312 +_001249_hash zd_usb_iowrite16v 3 49744 _001249_hash NULL
83313 +_001250_hash acpi_ds_build_internal_package_obj 3 58271 _001250_hash NULL
83314 +_001251_hash acpi_system_read_event 3 55362 _001251_hash NULL
83315 +_001252_hash acpi_ut_create_buffer_object 1 42030 _001252_hash NULL
83316 +_001253_hash acpi_ut_create_package_object 1 17594 _001253_hash NULL
83317 +_001254_hash acpi_ut_create_string_object 1 15360 _001254_hash NULL
83318 +_001255_hash ad7879_spi_multi_read 3 8218 _001255_hash NULL
83319 +_001256_hash add_child 4 45201 _001256_hash NULL
83320 +_001257_hash add_port 2 54941 _001257_hash NULL
83321 +_001258_hash adu_read 3 24177 _001258_hash NULL
83322 +_001259_hash afs_cell_create 2 27346 _001259_hash NULL
83323 +_001260_hash agp_generic_alloc_user 1 9470 _001260_hash NULL
83324 +_001261_hash alloc_agpphysmem_i8xx 1 39427 _001261_hash NULL
83325 +_001262_hash allocate_cnodes 1 5329 _001262_hash NULL
83326 +_001263_hash ___alloc_bootmem 1 11410 _001263_hash NULL
83327 +_001264_hash __alloc_bootmem_nopanic 1 65397 _001264_hash NULL
83328 +_001265_hash alloc_bulk_urbs_generic 5 12127 _001265_hash NULL
83329 +_001266_hash alloc_candev 1-2 7776 _001266_hash NULL
83330 +_001268_hash ____alloc_ei_netdev 1 51475 _001268_hash NULL
83331 +_001269_hash alloc_etherdev_mqs 1 36450 _001269_hash NULL
83332 +_001270_hash alloc_extent_buffer 3 52824 _001270_hash NULL
83333 +_001271_hash alloc_fcdev 1 18780 _001271_hash NULL
83334 +_001272_hash alloc_fddidev 1 15382 _001272_hash NULL
83335 +_001273_hash alloc_hippi_dev 1 51320 _001273_hash NULL
83336 +_001274_hash alloc_irdadev 1 19140 _001274_hash NULL
83337 +_001275_hash alloc_ltalkdev 1 38071 _001275_hash NULL
83338 +_001276_hash alloc_one_pg_vec_page 1 10747 _001276_hash NULL
83339 +_001277_hash alloc_orinocodev 1 21371 _001277_hash NULL
83340 +_001279_hash alloc_trdev 1 16399 _001279_hash NULL
83341 +_001280_hash async_setkey 3 35521 _001280_hash NULL
83342 +_001281_hash ata_host_alloc_pinfo 3 17325 _001281_hash NULL
83343 +_001284_hash ath6kl_connect_event 7-9-8 14267 _001284_hash NULL
83344 +_001285_hash ath6kl_fwlog_block_read 3 49836 _001285_hash NULL
83345 +_001286_hash ath6kl_fwlog_read 3 32101 _001286_hash NULL
83346 +_001287_hash ath_rx_init 2 43564 _001287_hash NULL
83347 +_001288_hash ath_tx_init 2 60515 _001288_hash NULL
83348 +_001289_hash atm_get_addr 3 31221 _001289_hash NULL
83349 +_001290_hash av7110_ipack_init 2 46655 _001290_hash NULL
83350 +_001291_hash bdx_rxdb_create 1 46525 _001291_hash NULL
83351 +_001292_hash bdx_tx_db_init 2 41719 _001292_hash NULL
83352 +_001293_hash bio_map_kern 3 64751 _001293_hash NULL
83353 +_001294_hash bits_to_user 3 47733 _001294_hash NULL
83354 +_001295_hash __blk_queue_init_tags 2 9778 _001295_hash NULL
83355 +_001296_hash blk_queue_resize_tags 2 28670 _001296_hash NULL
83356 +_001297_hash blk_rq_map_user_iov 5 16772 _001297_hash NULL
83357 +_001298_hash bm_init 2 13529 _001298_hash NULL
83358 +_001299_hash brcmf_alloc_wdev 1 60347 _001299_hash NULL
83359 +_001300_hash btrfs_insert_dir_item 4 59304 _001300_hash NULL
83360 +_001301_hash btrfs_map_block 3 64379 _001301_hash NULL
83361 +_001302_hash c4_add_card 3 54968 _001302_hash NULL
83362 +_001303_hash cache_read 3 24790 _001303_hash NULL
83363 +_001304_hash cache_write 3 13589 _001304_hash NULL
83364 +_001305_hash calc_hmac 3 32010 _001305_hash NULL
83365 +_001306_hash ccid_getsockopt_builtin_ccids 2 53634 _001306_hash NULL
83366 +_001307_hash ceph_copy_page_vector_to_user 4 31270 _001307_hash NULL
83367 +_001308_hash ceph_read_dir 3 17005 _001308_hash NULL
83368 +_001309_hash cfg80211_roamed 5-7 32632 _001309_hash NULL
83369 +_001311_hash ci_ll_init 3 12930 _001311_hash NULL
83370 +_001312_hash coda_psdev_read 3 35029 _001312_hash NULL
83371 +_001313_hash construct_key_and_link 4 8321 _001313_hash NULL
83372 +_001314_hash copy_counters_to_user 5 17027 _001824_hash NULL nohasharray
83373 +_001315_hash copy_entries_to_user 1 52367 _001315_hash NULL
83374 +_001316_hash copy_from_buf 4 27308 _001316_hash NULL
83375 +_001317_hash copy_oldmem_page 3 26164 _001317_hash NULL
83376 +_001318_hash copy_to_user_fromio 3 57432 _001318_hash NULL
83377 +_001319_hash cryptd_hash_setkey 3 42781 _001319_hash NULL
83378 +_001320_hash crypto_authenc_esn_setkey 3 6985 _001320_hash NULL
83379 +_001321_hash crypto_authenc_setkey 3 80 _001321_hash NULL
83380 +_001322_hash cx18_copy_buf_to_user 4 22735 _001322_hash NULL
83381 +_001324_hash cxgbi_ddp_reserve 4 30091 _001324_hash NULL
83382 +_001325_hash datablob_hmac_append 3 40038 _001325_hash NULL
83383 +_001326_hash datablob_hmac_verify 4 24786 _001326_hash NULL
83384 +_001327_hash dataflash_read_fact_otp 3-2 33204 _001327_hash NULL
83385 +_001328_hash dataflash_read_user_otp 3-2 14536 _001328_hash &_000201_hash
83386 +_001329_hash dccp_feat_register_sp 5 17914 _001329_hash NULL
83387 +_001330_hash ddb_input_read 3 9743 _001330_hash NULL
83388 +_001331_hash dev_read 3 56369 _001331_hash NULL
83389 +_001332_hash diva_os_copy_to_user 4 48508 _001332_hash NULL
83390 +_001333_hash diva_os_malloc 2 16406 _001333_hash NULL
83391 +_001334_hash dlm_dir_lookup 4 56662 _001334_hash NULL
83392 +_001335_hash dm_vcalloc 1-2 16814 _001335_hash NULL
83393 +_001337_hash do_proc_readlink 3 14096 _001337_hash NULL
83394 +_001338_hash do_readlink 2 43518 _001338_hash NULL
83395 +_001339_hash __do_replace 5 37227 _001339_hash NULL
83396 +_001340_hash do_sigpending 2 9766 _001340_hash NULL
83397 +_001341_hash drbd_setsockopt 5 16280 _001341_hash &_000371_hash
83398 +_001342_hash dsp_buffer_alloc 2 11684 _001342_hash NULL
83399 +_001343_hash dump_midi 3 51040 _001343_hash NULL
83400 +_001344_hash dvb_dmxdev_set_buffer_size 2 55643 _001344_hash NULL
83401 +_001345_hash dvb_dvr_set_buffer_size 2 9840 _001345_hash NULL
83402 +_001346_hash dvb_ringbuffer_pkt_read_user 3-5 4303 _001346_hash NULL
83403 +_001348_hash dvb_ringbuffer_read_user 3 56702 _001348_hash NULL
83404 +_001349_hash ecryptfs_filldir 3 6622 _001349_hash NULL
83405 +_001350_hash ecryptfs_readlink 3 40775 _001350_hash NULL
83406 +_001351_hash ecryptfs_send_message 2 18322 _001351_hash NULL
83407 +_001352_hash em28xx_init_isoc 4 62883 _001352_hash &_000721_hash
83408 +_001353_hash et61x251_read 3 25420 _001353_hash NULL
83409 +_001354_hash ext4_add_new_descs 3 19509 _001354_hash NULL
83410 +_001355_hash fat_ioctl_filldir 3 36621 _001355_hash NULL
83411 +_001356_hash fd_copyout 3 59323 _001356_hash NULL
83412 +_001357_hash f_hidg_read 3 6238 _001357_hash NULL
83413 +_001358_hash filldir 3 55137 _001358_hash NULL
83414 +_001359_hash filldir64 3 46469 _001359_hash NULL
83415 +_001360_hash fops_read 3 40672 _001360_hash NULL
83416 +_001361_hash from_buffer 3 18625 _001361_hash NULL
83417 +_001362_hash fsm_init 2 16134 _001362_hash NULL
83418 +_001363_hash get_subdir 3 62581 _001363_hash NULL
83419 +_001364_hash gspca_dev_probe 4 2570 _001364_hash NULL
83420 +_001365_hash handle_received_packet 3 22457 _001365_hash NULL
83421 +_001366_hash hash_setkey 3 48310 _001366_hash NULL
83422 +_001367_hash hdlcdrv_register 2 6792 _001367_hash NULL
83423 +_001368_hash hdpvr_read 3 9273 _001368_hash NULL
83424 +_001369_hash hid_input_report 4 32458 _001369_hash NULL
83425 +_001370_hash hidraw_read 3 59650 _001370_hash &_001200_hash
83426 +_001371_hash HiSax_readstatus 2 15752 _001371_hash NULL
83427 +_001373_hash __hwahc_op_set_gtk 4 42038 _001373_hash NULL
83428 +_001374_hash __hwahc_op_set_ptk 5 36510 _001374_hash NULL
83429 +_001375_hash ib_copy_to_udata 3 27525 _001375_hash NULL
83430 +_001376_hash idetape_chrdev_read 3 2097 _001376_hash NULL
83431 +_001377_hash ieee80211_alloc_hw 1 43829 _001377_hash NULL
83432 +_001378_hash ieee80211_bss_info_update 4 13991 _001378_hash NULL
83433 +_001379_hash ilo_read 3 32531 _001379_hash NULL
83434 +_001380_hash init_map_ipmac 3-4 63896 _001380_hash NULL
83435 +_001382_hash init_tid_tabs 2-4-3 13252 _001382_hash NULL
83436 +_001385_hash iowarrior_read 3 53483 _001385_hash NULL
83437 +_001386_hash ipv6_getsockopt_sticky 5 56711 _001386_hash NULL
83438 +_001387_hash ipwireless_send_packet 4 8328 _001387_hash NULL
83439 +_001388_hash ipx_sendmsg 4 1362 _001388_hash NULL
83440 +_001389_hash iscsi_conn_setup 2 35159 _001389_hash NULL
83441 +_001390_hash iscsi_create_session 3 51647 _001390_hash NULL
83442 +_001391_hash iscsi_host_alloc 2 36671 _001391_hash NULL
83443 +_001392_hash iscsi_session_setup 4-5 196 _001392_hash NULL
83444 +_001394_hash iscsit_find_cmd_from_itt_or_dump 3 17194 _001701_hash NULL nohasharray
83445 +_001395_hash isdn_ppp_read 4 50356 _001395_hash NULL
83446 +_001396_hash isku_sysfs_read 6 58806 _001396_hash NULL
83447 +_001397_hash isku_sysfs_write 6 49767 _001397_hash NULL
83448 +_001398_hash iso_alloc_urb 4-5 45206 _001398_hash NULL
83449 +_001400_hash ivtv_copy_buf_to_user 4 6159 _001400_hash NULL
83450 +_001401_hash iwm_rx_handle 3 24899 _001401_hash NULL
83451 +_001402_hash iwm_wdev_alloc 1 38415 _001402_hash NULL
83452 +_001403_hash jbd2_alloc 1 41359 _001403_hash NULL
83453 +_001404_hash jffs2_do_link 6 42048 _001404_hash NULL
83454 +_001405_hash jffs2_do_unlink 4 62020 _001405_hash NULL
83455 +_001406_hash jffs2_security_setxattr 4 62107 _001406_hash NULL
83456 +_001407_hash jffs2_trusted_setxattr 4 17048 _001407_hash NULL
83457 +_001408_hash jffs2_user_setxattr 4 10182 _001408_hash NULL
83458 +_001409_hash kernel_setsockopt 5 35913 _001409_hash NULL
83459 +_001410_hash keyctl_describe_key 3 36853 _001410_hash NULL
83460 +_001411_hash keyctl_get_security 3 64418 _001411_hash &_001132_hash
83461 +_001412_hash keyring_read 3 13438 _001412_hash NULL
83462 +_001413_hash kfifo_copy_to_user 3 20646 _001413_hash NULL
83463 +_001414_hash kmem_zalloc_large 1 56128 _001414_hash NULL
83464 +_001415_hash kmp_init 2 41373 _001415_hash NULL
83465 +_001416_hash koneplus_sysfs_write 6 35993 _001416_hash NULL
83466 +_001417_hash kvm_clear_guest_page 4 2308 _001417_hash NULL
83467 +_001418_hash kvm_read_nested_guest_page 5 13337 _001418_hash NULL
83468 +_001419_hash l2cap_create_basic_pdu 3 24869 _001419_hash &_001034_hash
83469 +_001420_hash l2cap_create_connless_pdu 3 37327 _001420_hash &_000966_hash
83470 +_001421_hash l2cap_create_iframe_pdu 3 51801 _001421_hash NULL
83471 +_001422_hash __lgwrite 4 57669 _001422_hash NULL
83472 +_001423_hash libfc_host_alloc 2 7917 _001423_hash NULL
83473 +_001424_hash llcp_sock_sendmsg 4 1092 _001424_hash NULL
83474 +_001425_hash macvtap_get_user 4 28185 _001425_hash NULL
83475 +_001426_hash mcam_v4l_read 3 36513 _001426_hash NULL
83476 +_001427_hash mce_async_out 3 58056 _001427_hash NULL
83477 +_001428_hash mce_flush_rx_buffer 2 14976 _001428_hash NULL
83478 +_001429_hash mdc800_device_read 3 22896 _001429_hash NULL
83479 +_001430_hash memcpy_toiovec 3 54166 _001430_hash &_000867_hash
83480 +_001431_hash memcpy_toiovecend 3-4 19736 _001431_hash NULL
83481 +_001433_hash mgt_set_varlen 4 60916 _001433_hash NULL
83482 +_001434_hash mlx4_en_create_rx_ring 3 62498 _001434_hash NULL
83483 +_001435_hash mlx4_en_create_tx_ring 4 48501 _001435_hash NULL
83484 +_001436_hash mon_bin_get_event 4 52863 _001436_hash NULL
83485 +_001437_hash mousedev_read 3 47123 _001437_hash NULL
83486 +_001438_hash move_addr_to_user 2 2868 _001438_hash NULL
83487 +_001439_hash mpihelp_mul 5-3 27805 _001439_hash NULL
83488 +_001441_hash mpi_lshift_limbs 2 9337 _001441_hash NULL
83489 +_001442_hash msnd_fifo_alloc 2 23179 _001442_hash NULL
83490 +_001443_hash mtdswap_init 2 55719 _001443_hash NULL
83491 +_001444_hash neigh_hash_grow 2 17283 _001444_hash NULL
83492 +_001445_hash nfs4_realloc_slot_table 2 22859 _001445_hash NULL
83493 +_001446_hash nfs_idmap_get_key 2 39616 _001446_hash NULL
83494 +_001447_hash nsm_get_handle 4 52089 _001447_hash NULL
83495 +_001448_hash ntfs_malloc_nofs 1 49572 _001448_hash NULL
83496 +_001449_hash ntfs_malloc_nofs_nofail 1 63631 _001449_hash NULL
83497 +_001450_hash nvme_create_queue 3 170 _001450_hash NULL
83498 +_001451_hash ocfs2_control_write 3 54737 _001451_hash NULL
83499 +_001452_hash orinoco_add_extscan_result 3 18207 _001452_hash NULL
83500 +_001454_hash override_release 2 52032 _001454_hash NULL
83501 +_001455_hash packet_snd 3 13634 _001455_hash NULL
83502 +_001456_hash pcbit_stat 2 27364 _001456_hash NULL
83503 +_001457_hash pcpu_extend_area_map 2 12589 _001457_hash NULL
83504 +_001458_hash pg_read 3 17276 _001458_hash NULL
83505 +_001459_hash picolcd_debug_eeprom_read 3 14549 _001459_hash NULL
83506 +_001460_hash pkt_alloc_packet_data 1 37928 _001460_hash NULL
83507 +_001461_hash pmcraid_build_passthrough_ioadls 2 62034 _001461_hash NULL
83508 +_001462_hash pms_capture 4 27142 _001462_hash NULL
83509 +_001463_hash posix_clock_register 2 5662 _001463_hash NULL
83510 +_001464_hash printer_read 3 54851 _001464_hash NULL
83511 +_001465_hash __proc_file_read 3 54978 _001465_hash NULL
83512 +_001466_hash pt_read 3 49136 _001466_hash NULL
83513 +_001467_hash put_cmsg 4 36589 _001467_hash NULL
83514 +_001468_hash pvr2_ioread_read 3 10720 _001505_hash NULL nohasharray
83515 +_001469_hash pwc_video_read 3 51735 _001469_hash NULL
83516 +_001470_hash px_raw_event 4 49371 _001470_hash NULL
83517 +_001471_hash qcam_read 3 13977 _001471_hash NULL
83518 +_001472_hash rawv6_sendmsg 4 20080 _001472_hash NULL
83519 +_001473_hash rds_sendmsg 4 40976 _001473_hash NULL
83520 +_001474_hash read_flush 3 43851 _001474_hash NULL
83521 +_001475_hash read_profile 3 27859 _001475_hash NULL
83522 +_001476_hash read_vmcore 3 26501 _001476_hash NULL
83523 +_001477_hash redirected_tty_write 3 65297 _001477_hash NULL
83524 +_001478_hash __register_chrdev 2-3 54223 _001478_hash NULL
83525 +_001480_hash regmap_raw_write 4 53803 _001480_hash NULL
83526 +_001481_hash reiserfs_allocate_list_bitmaps 3 21732 _001481_hash NULL
83527 +_001482_hash reiserfs_resize 2 34377 _001482_hash NULL
83528 +_001483_hash request_key_auth_read 3 24109 _001483_hash NULL
83529 +_001484_hash rfkill_fop_read 3 54711 _001484_hash NULL
83530 +_001485_hash rng_dev_read 3 41581 _001485_hash NULL
83531 +_001486_hash roccat_read 3 41093 _001486_hash NULL
83532 +_001487_hash sco_sock_sendmsg 4 62542 _001487_hash NULL
83533 +_001488_hash scsi_register 2 49094 _001488_hash NULL
83534 +_001489_hash sctp_getsockopt_events 2 3607 _001489_hash NULL
83535 +_001490_hash sctp_getsockopt_maxburst 2 42941 _001490_hash NULL
83536 +_001491_hash sctp_getsockopt_maxseg 2 10737 _001491_hash NULL
83537 +_001492_hash sctpprobe_read 3 17741 _001492_hash NULL
83538 +_001493_hash sdhci_alloc_host 2 7509 _001493_hash NULL
83539 +_001494_hash selinux_inode_post_setxattr 4 26037 _001494_hash NULL
83540 +_001495_hash selinux_inode_setsecurity 4 18148 _001495_hash NULL
83541 +_001496_hash selinux_inode_setxattr 4 10708 _001496_hash NULL
83542 +_001497_hash selinux_secctx_to_secid 2 63744 _001497_hash NULL
83543 +_001498_hash selinux_setprocattr 4 55611 _001498_hash NULL
83544 +_001499_hash sel_write_context 3 25726 _002397_hash NULL nohasharray
83545 +_001500_hash seq_copy_in_user 3 18543 _001500_hash NULL
83546 +_001501_hash seq_open_net 4 8968 _001594_hash NULL nohasharray
83547 +_001502_hash seq_open_private 3 61589 _001502_hash NULL
83548 +_001503_hash set_arg 3 42824 _001503_hash NULL
83549 +_001504_hash sg_read 3 25799 _001504_hash NULL
83550 +_001505_hash shash_async_setkey 3 10720 _001505_hash &_001468_hash
83551 +_001506_hash shash_compat_setkey 3 12267 _001506_hash NULL
83552 +_001507_hash shmem_setxattr 4 55867 _001507_hash NULL
83553 +_001508_hash simple_read_from_buffer 2-5 55957 _001508_hash NULL
83554 +_001511_hash sm_checker_extend 2 23615 _001511_hash NULL
83555 +_001512_hash sn9c102_read 3 29305 _001512_hash NULL
83556 +_001513_hash snd_es1938_capture_copy 5 25930 _001513_hash NULL
83557 +_001514_hash snd_gus_dram_peek 4 9062 _001514_hash NULL
83558 +_001515_hash snd_hdsp_capture_copy 5 4011 _001515_hash NULL
83559 +_001516_hash snd_korg1212_copy_to 6 92 _001516_hash NULL
83560 +_001517_hash snd_opl4_mem_proc_read 5 63774 _001517_hash NULL
83561 +_001518_hash snd_pcm_alloc_vmalloc_buffer 2 44595 _001518_hash NULL
83562 +_001519_hash snd_pcm_oss_read1 3 63771 _001519_hash NULL
83563 +_001520_hash snd_rawmidi_kernel_read1 4 36740 _001520_hash NULL
83564 +_001521_hash snd_rme9652_capture_copy 5 10287 _001521_hash NULL
83565 +_001522_hash srp_target_alloc 3 37288 _001522_hash NULL
83566 +_001523_hash stk_allocate_buffers 2 16291 _001523_hash NULL
83567 +_001524_hash store_ifalias 4 35088 _001524_hash NULL
83568 +_001525_hash store_msg 3 56417 _001525_hash NULL
83569 +_001526_hash str_to_user 2 11411 _001526_hash NULL
83570 +_001527_hash subbuf_read_actor 3 2071 _001527_hash NULL
83571 +_001528_hash sys_fgetxattr 4 25166 _001528_hash NULL
83572 +_001529_hash sys_gethostname 2 49698 _001529_hash NULL
83573 +_001530_hash sys_getxattr 4 37418 _001530_hash NULL
83574 +_001531_hash sys_kexec_load 2 14222 _001531_hash NULL
83575 +_001532_hash sys_msgsnd 3 44537 _001532_hash &_000129_hash
83576 +_001533_hash sys_process_vm_readv 3-5 19090 _001533_hash NULL
83577 +_001535_hash sys_process_vm_writev 3-5 4928 _001535_hash NULL
83578 +_001537_hash sys_sched_getaffinity 2 60033 _001537_hash NULL
83579 +_001538_hash sys_setsockopt 5 35320 _001538_hash NULL
83580 +_001539_hash t3_init_l2t 1 8261 _001539_hash NULL
83581 +_001540_hash team_options_register 3 20091 _001540_hash NULL
83582 +_001541_hash tipc_send2name 6 16809 _001541_hash NULL
83583 +_001542_hash tipc_send2port 5 63935 _001542_hash NULL
83584 +_001543_hash tipc_send 4 51238 _001543_hash NULL
83585 +_001544_hash tm6000_i2c_recv_regs16 5 2949 _001544_hash NULL
83586 +_001545_hash tm6000_i2c_recv_regs 5 46215 _001545_hash NULL
83587 +_001546_hash tm6000_i2c_send_regs 5 20250 _001546_hash NULL
83588 +_001547_hash tnode_new 3 44757 _001547_hash NULL
83589 +_001548_hash tomoyo_read_self 3 33539 _001548_hash NULL
83590 +_001549_hash tomoyo_update_domain 2 5498 _001549_hash NULL
83591 +_001550_hash tomoyo_update_policy 2 40458 _001550_hash NULL
83592 +_001551_hash tpm_read 3 50344 _001551_hash NULL
83593 +_001552_hash TSS_rawhmac 3 17486 _001552_hash NULL
83594 +_001553_hash tt3650_ci_msg 4 57219 _001553_hash NULL
83595 +_001554_hash tun_get_user 3 33178 _001554_hash NULL
83596 +_001555_hash ubi_dbg_dump_flash 4 3870 _001555_hash NULL
83597 +_001556_hash ubi_io_write 4-5 15870 _001556_hash &_000954_hash
83598 +_001558_hash uio_read 3 49300 _001558_hash NULL
83599 +_001559_hash unix_seqpacket_sendmsg 4 27893 _001559_hash NULL
83600 +_001560_hash unlink1 3 63059 _001560_hash NULL
83601 +_001562_hash usb_allocate_stream_buffers 3 8964 _001562_hash NULL
83602 +_001563_hash usbdev_read 3 45114 _001563_hash NULL
83603 +_001564_hash usblp_read 3 57342 _001564_hash NULL
83604 +_001565_hash usbtmc_read 3 32377 _001565_hash NULL
83605 +_001566_hash usbvision_v4l2_read 3 34386 _001566_hash NULL
83606 +_001567_hash _usb_writeN_sync 4 31682 _001567_hash NULL
83607 +_001568_hash user_read 3 51881 _001568_hash NULL
83608 +_001569_hash v4l_stk_read 3 39672 _001569_hash NULL
83609 +_001570_hash vcs_read 3 8017 _001570_hash NULL
83610 +_001571_hash vdma_mem_alloc 1 6171 _001571_hash NULL
83611 +_001572_hash venus_create 4 20555 _001572_hash NULL
83612 +_001573_hash venus_link 5 32165 _001573_hash NULL
83613 +_001574_hash venus_lookup 4 8121 _001574_hash NULL
83614 +_001575_hash venus_mkdir 4 8967 _001575_hash NULL
83615 +_001576_hash venus_remove 4 59781 _001576_hash NULL
83616 +_001577_hash venus_rename 4-5 17707 _001577_hash NULL
83617 +_001579_hash venus_rmdir 4 45564 _001579_hash NULL
83618 +_001580_hash venus_symlink 4-6 23570 _001580_hash NULL
83619 +_001582_hash vfs_readlink 3 54368 _001582_hash NULL
83620 +_001583_hash vfs_readv 3 38011 _001583_hash NULL
83621 +_001584_hash vfs_writev 3 25278 _001584_hash NULL
83622 +_001585_hash vga_arb_read 3 4886 _001585_hash NULL
83623 +_001586_hash vhci_put_user 4 12604 _001586_hash NULL
83624 +_001587_hash vhost_add_used_n 3 10760 _001587_hash NULL
83625 +_001588_hash __videobuf_copy_to_user 4 15423 _001588_hash NULL
83626 +_001589_hash videobuf_pages_to_sg 2 3708 _001589_hash NULL
83627 +_001590_hash videobuf_vmalloc_to_sg 2 4548 _001590_hash NULL
83628 +_001591_hash virtnet_send_command 5-6 61993 _001591_hash NULL
83629 +_001593_hash vmbus_establish_gpadl 3 4495 _001593_hash NULL
83630 +_001594_hash vol_cdev_read 3 8968 _001594_hash &_001501_hash
83631 +_001595_hash w9966_v4l_read 3 31148 _001595_hash NULL
83632 +_001596_hash wdm_read 3 6549 _001596_hash NULL
83633 +_001597_hash wusb_prf 7 54261 _001597_hash &_000063_hash
83634 +_001598_hash xdi_copy_to_user 4 48900 _001598_hash NULL
83635 +_001599_hash xfs_buf_get_uncached 2 51477 _001599_hash NULL
83636 +_001600_hash xfs_efd_init 3 5463 _001600_hash NULL
83637 +_001601_hash xfs_efi_init 2 5476 _001601_hash NULL
83638 +_001602_hash xfs_iext_realloc_direct 2 20521 _001602_hash NULL
83639 +_001603_hash xfs_iext_realloc_indirect 2 59211 _001603_hash NULL
83640 +_001604_hash xfs_inumbers_fmt 3 12817 _001604_hash NULL
83641 +_001605_hash xlog_recover_add_to_cont_trans 4 44102 _001605_hash NULL
83642 +_001606_hash xz_dec_lzma2_create 2 36353 _001606_hash NULL
83643 +_001607_hash _zd_iowrite32v_locked 3 44725 _001607_hash NULL
83644 +_001608_hash aat2870_reg_read_file 3 12221 _001608_hash NULL
83645 +_001609_hash add_sctp_bind_addr 3 12269 _001609_hash NULL
83646 +_001610_hash aes_decrypt_fail_read 3 54815 _001610_hash NULL
83647 +_001611_hash aes_decrypt_interrupt_read 3 19910 _001611_hash NULL
83648 +_001612_hash aes_decrypt_packets_read 3 10155 _001612_hash NULL
83649 +_001613_hash aes_encrypt_fail_read 3 32562 _001613_hash NULL
83650 +_001614_hash aes_encrypt_interrupt_read 3 39919 _001614_hash NULL
83651 +_001615_hash aes_encrypt_packets_read 3 48666 _001615_hash NULL
83652 +_001616_hash afs_cell_lookup 2 8482 _001616_hash NULL
83653 +_001617_hash agp_allocate_memory 2 58761 _001617_hash NULL
83654 +_001618_hash __alloc_bootmem 1 31498 _001618_hash NULL
83655 +_001619_hash __alloc_bootmem_low 1 43423 _001619_hash NULL
83656 +_001620_hash __alloc_bootmem_node_nopanic 2 6432 _001620_hash NULL
83657 +_001621_hash alloc_cc770dev 1 48186 _001621_hash NULL
83658 +_001622_hash __alloc_ei_netdev 1 29338 _001622_hash NULL
83659 +_001623_hash __alloc_eip_netdev 1 51549 _001623_hash NULL
83660 +_001624_hash alloc_libipw 1 22708 _001624_hash NULL
83661 +_001625_hash alloc_pg_vec 2 8533 _001625_hash NULL
83662 +_001626_hash alloc_sja1000dev 1 17868 _001626_hash NULL
83663 +_001627_hash alloc_targets 2 8074 _001627_hash NULL
83664 +_001630_hash ath6kl_disconnect_timeout_read 3 3650 _001630_hash NULL
83665 +_001631_hash ath6kl_endpoint_stats_read 3 41554 _001631_hash NULL
83666 +_001632_hash ath6kl_fwlog_mask_read 3 2050 _001632_hash NULL
83667 +_001633_hash ath6kl_keepalive_read 3 44303 _001633_hash NULL
83668 +_001634_hash ath6kl_listen_int_read 3 10355 _001634_hash NULL
83669 +_001635_hash ath6kl_lrssi_roam_read 3 61022 _001635_hash NULL
83670 +_001636_hash ath6kl_regdump_read 3 14393 _001636_hash NULL
83671 +_001637_hash ath6kl_regread_read 3 25884 _001637_hash NULL
83672 +_001638_hash ath6kl_regwrite_read 3 48747 _001638_hash NULL
83673 +_001639_hash ath6kl_roam_table_read 3 26166 _001639_hash NULL
83674 +_001640_hash ath9k_debugfs_read_buf 3 25316 _001640_hash NULL
83675 +_001641_hash atk_debugfs_ggrp_read 3 29522 _001641_hash NULL
83676 +_001642_hash b43_debugfs_read 3 24425 _001642_hash NULL
83677 +_001643_hash b43legacy_debugfs_read 3 2473 _001643_hash NULL
83678 +_001644_hash bcm_recvmsg 4 43992 _001644_hash NULL
83679 +_001645_hash bfad_debugfs_read 3 13119 _001645_hash NULL
83680 +_001646_hash bfad_debugfs_read_regrd 3 57830 _001646_hash NULL
83681 +_001647_hash blk_init_tags 1 30592 _001647_hash NULL
83682 +_001648_hash blk_queue_init_tags 2 44355 _001648_hash NULL
83683 +_001649_hash blk_rq_map_kern 4 47004 _001649_hash NULL
83684 +_001650_hash bm_entry_read 3 10976 _001650_hash NULL
83685 +_001651_hash bm_status_read 3 19583 _001651_hash NULL
83686 +_001652_hash bnad_debugfs_read 3 50665 _001652_hash NULL
83687 +_001653_hash bnad_debugfs_read_regrd 3 51308 _001653_hash NULL
83688 +_001654_hash btmrvl_curpsmode_read 3 46939 _001654_hash NULL
83689 +_001655_hash btmrvl_gpiogap_read 3 4718 _001655_hash NULL
83690 +_001656_hash btmrvl_hscfgcmd_read 3 56303 _001656_hash NULL
83691 +_001657_hash btmrvl_hscmd_read 3 1614 _001657_hash NULL
83692 +_001658_hash btmrvl_hsmode_read 3 1647 _001658_hash NULL
83693 +_001659_hash btmrvl_hsstate_read 3 920 _001659_hash NULL
83694 +_001660_hash btmrvl_pscmd_read 3 24308 _001660_hash NULL
83695 +_001661_hash btmrvl_psmode_read 3 22395 _001661_hash NULL
83696 +_001662_hash btmrvl_psstate_read 3 50683 _001662_hash NULL
83697 +_001663_hash btmrvl_txdnldready_read 3 413 _001663_hash NULL
83698 +_001664_hash btrfs_add_link 5 9973 _001664_hash NULL
83699 +_001665_hash btrfs_discard_extent 2 38547 _001665_hash NULL
83700 +_001666_hash btrfs_find_create_tree_block 3 55812 _001666_hash NULL
83701 +_001667_hash btrfsic_map_block 2 56751 _001667_hash NULL
83702 +_001668_hash caif_stream_recvmsg 4 13173 _001668_hash NULL
83703 +_001669_hash carl9170_alloc 1 27 _001669_hash NULL
83704 +_001670_hash carl9170_debugfs_read 3 47738 _001670_hash NULL
83705 +_001671_hash cgroup_read_s64 5 19570 _001671_hash NULL
83706 +_001672_hash cgroup_read_u64 5 45532 _001672_hash NULL
83707 +_001673_hash channel_type_read 3 47308 _001673_hash NULL
83708 +_001674_hash codec_list_read_file 3 24910 _001674_hash NULL
83709 +_001675_hash configfs_read_file 3 1683 _001675_hash NULL
83710 +_001676_hash cpuset_common_file_read 5 8800 _001676_hash NULL
83711 +_001677_hash create_subvol 4 2347 _001677_hash NULL
83712 +_001678_hash cx18_copy_mdl_to_user 4 45549 _001678_hash NULL
83713 +_001679_hash dai_list_read_file 3 25421 _001679_hash NULL
83714 +_001680_hash dapm_bias_read_file 3 64715 _001680_hash NULL
83715 +_001681_hash dapm_widget_power_read_file 3 59950 _001754_hash NULL nohasharray
83716 +_001684_hash dbgfs_frame 3 45917 _001684_hash NULL
83717 +_001685_hash dbgfs_state 3 38894 _001685_hash NULL
83718 +_001686_hash debugfs_read 3 62535 _001686_hash NULL
83719 +_001687_hash debug_output 3 18575 _001687_hash NULL
83720 +_001688_hash debug_read 3 19322 _001688_hash NULL
83721 +_001689_hash dfs_file_read 3 18116 _001689_hash NULL
83722 +_001690_hash dma_memcpy_pg_to_iovec 6 1725 _001690_hash NULL
83723 +_001691_hash dma_memcpy_to_iovec 5 12173 _001691_hash NULL
83724 +_001692_hash dma_rx_errors_read 3 52045 _001692_hash NULL
83725 +_001693_hash dma_rx_requested_read 3 65354 _001693_hash NULL
83726 +_001694_hash dma_show_regs 3 35266 _001694_hash NULL
83727 +_001695_hash dma_tx_errors_read 3 46060 _001695_hash NULL
83728 +_001696_hash dma_tx_requested_read 3 16110 _001775_hash NULL nohasharray
83729 +_001697_hash dm_exception_table_init 2 39645 _001697_hash &_001103_hash
83730 +_001698_hash dn_recvmsg 4 17213 _001698_hash NULL
83731 +_001699_hash dns_resolver_read 3 54658 _001699_hash NULL
83732 +_001700_hash do_msgrcv 4 5590 _001700_hash NULL
83733 +_001701_hash driver_state_read 3 17194 _001701_hash &_001394_hash
83734 +_001702_hash dvb_demux_do_ioctl 3 34871 _001702_hash NULL
83735 +_001703_hash dvb_dmxdev_buffer_read 4 20682 _001703_hash NULL
83736 +_001704_hash dvb_dvr_do_ioctl 3 43355 _001704_hash NULL
83737 +_001705_hash econet_recvmsg 4 40978 _001705_hash NULL
83738 +_001706_hash event_calibration_read 3 21083 _001706_hash NULL
83739 +_001707_hash event_heart_beat_read 3 48961 _001707_hash NULL
83740 +_001708_hash event_oom_late_read 3 61175 _001708_hash &_001014_hash
83741 +_001709_hash event_phy_transmit_error_read 3 10471 _001709_hash NULL
83742 +_001710_hash event_rx_mem_empty_read 3 40363 _001710_hash NULL
83743 +_001711_hash event_rx_mismatch_read 3 38518 _001711_hash NULL
83744 +_001712_hash event_rx_pool_read 3 25792 _001712_hash NULL
83745 +_001713_hash event_tx_stuck_read 3 19305 _001713_hash NULL
83746 +_001714_hash excessive_retries_read 3 60425 _001714_hash NULL
83747 +_001715_hash fallback_on_nodma_alloc 2 35332 _001715_hash NULL
83748 +_001716_hash filter_read 3 61692 _001716_hash NULL
83749 +_001717_hash format_devstat_counter 3 32550 _001717_hash NULL
83750 +_001718_hash fragmentation_threshold_read 3 61718 _001718_hash NULL
83751 +_001719_hash fuse_conn_limit_read 3 20084 _001719_hash NULL
83752 +_001720_hash fuse_conn_waiting_read 3 49762 _001720_hash NULL
83753 +_001721_hash generic_readlink 3 32654 _001721_hash NULL
83754 +_001722_hash gpio_power_read 3 36059 _001722_hash NULL
83755 +_001723_hash hash_recvmsg 4 50924 _001723_hash NULL
83756 +_001724_hash ht40allow_map_read 3 55209 _001724_hash NULL
83757 +_001725_hash hwflags_read 3 52318 _001725_hash NULL
83758 +_001726_hash hysdn_conf_read 3 42324 _001726_hash NULL
83759 +_001727_hash i2400m_rx_stats_read 3 57706 _001727_hash NULL
83760 +_001728_hash i2400m_tx_stats_read 3 28527 _001728_hash NULL
83761 +_001729_hash idmouse_read 3 63374 _001729_hash NULL
83762 +_001730_hash ieee80211_if_read 3 6785 _001730_hash NULL
83763 +_001731_hash ieee80211_rx_bss_info 3 61630 _001731_hash NULL
83764 +_001732_hash ikconfig_read_current 3 1658 _001732_hash NULL
83765 +_001733_hash il3945_sta_dbgfs_stats_table_read 3 48802 _001733_hash NULL
83766 +_001734_hash il3945_ucode_general_stats_read 3 46111 _001734_hash NULL
83767 +_001735_hash il3945_ucode_rx_stats_read 3 3048 _001735_hash NULL
83768 +_001736_hash il3945_ucode_tx_stats_read 3 36016 _001736_hash NULL
83769 +_001737_hash il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 _001737_hash NULL
83770 +_001738_hash il4965_rs_sta_dbgfs_scale_table_read 3 38564 _001738_hash NULL
83771 +_001739_hash il4965_rs_sta_dbgfs_stats_table_read 3 49206 _001739_hash NULL
83772 +_001740_hash il4965_ucode_general_stats_read 3 56277 _001740_hash NULL
83773 +_001741_hash il4965_ucode_rx_stats_read 3 61948 _001741_hash NULL
83774 +_001742_hash il4965_ucode_tx_stats_read 3 12064 _001742_hash NULL
83775 +_001743_hash il_dbgfs_chain_noise_read 3 38044 _001743_hash NULL
83776 +_001744_hash il_dbgfs_channels_read 3 25005 _001744_hash NULL
83777 +_001745_hash il_dbgfs_disable_ht40_read 3 42386 _001745_hash NULL
83778 +_001746_hash il_dbgfs_fh_reg_read 3 40993 _001746_hash NULL
83779 +_001747_hash il_dbgfs_force_reset_read 3 57517 _001747_hash NULL
83780 +_001748_hash il_dbgfs_interrupt_read 3 3351 _001748_hash NULL
83781 +_001749_hash il_dbgfs_missed_beacon_read 3 59956 _001749_hash NULL
83782 +_001750_hash il_dbgfs_nvm_read 3 12288 _001750_hash NULL
83783 +_001751_hash il_dbgfs_power_save_status_read 3 43165 _001751_hash NULL
83784 +_001752_hash il_dbgfs_qos_read 3 33615 _001752_hash NULL
83785 +_001753_hash il_dbgfs_rxon_filter_flags_read 3 19281 _001753_hash NULL
83786 +_001754_hash il_dbgfs_rxon_flags_read 3 59950 _001754_hash &_001681_hash
83787 +_001755_hash il_dbgfs_rx_queue_read 3 11221 _001755_hash NULL
83788 +_001756_hash il_dbgfs_rx_stats_read 3 15243 _001756_hash NULL
83789 +_001757_hash il_dbgfs_sensitivity_read 3 2370 _001757_hash NULL
83790 +_001758_hash il_dbgfs_sram_read 3 62296 _001758_hash NULL
83791 +_001759_hash il_dbgfs_stations_read 3 21532 _001759_hash NULL
83792 +_001760_hash il_dbgfs_status_read 3 58388 _001760_hash NULL
83793 +_001761_hash il_dbgfs_tx_queue_read 3 55668 _001761_hash NULL
83794 +_001762_hash il_dbgfs_tx_stats_read 3 32913 _001762_hash NULL
83795 +_001763_hash ima_show_htable_value 2 57136 _001763_hash NULL
83796 +_001765_hash ipw_write 3 59807 _001765_hash NULL
83797 +_001766_hash irda_recvmsg_stream 4 35280 _001766_hash NULL
83798 +_001767_hash iscsi_tcp_conn_setup 2 16376 _001767_hash NULL
83799 +_001768_hash isr_cmd_cmplt_read 3 53439 _001768_hash NULL
83800 +_001769_hash isr_commands_read 3 41398 _001769_hash NULL
83801 +_001770_hash isr_decrypt_done_read 3 49490 _001770_hash NULL
83802 +_001771_hash isr_dma0_done_read 3 8574 _001771_hash NULL
83803 +_001772_hash isr_dma1_done_read 3 48159 _001772_hash NULL
83804 +_001773_hash isr_fiqs_read 3 34687 _001773_hash NULL
83805 +_001774_hash isr_host_acknowledges_read 3 54136 _001774_hash NULL
83806 +_001775_hash isr_hw_pm_mode_changes_read 3 16110 _001775_hash &_001696_hash
83807 +_001776_hash isr_irqs_read 3 9181 _001776_hash NULL
83808 +_001777_hash isr_low_rssi_read 3 64789 _001777_hash NULL
83809 +_001778_hash isr_pci_pm_read 3 30271 _001778_hash NULL
83810 +_001779_hash isr_rx_headers_read 3 38325 _001779_hash NULL
83811 +_001780_hash isr_rx_mem_overflow_read 3 43025 _001780_hash NULL
83812 +_001781_hash isr_rx_procs_read 3 31804 _001781_hash NULL
83813 +_001782_hash isr_rx_rdys_read 3 35283 _001782_hash NULL
83814 +_001783_hash isr_tx_exch_complete_read 3 16103 _001783_hash NULL
83815 +_001784_hash isr_tx_procs_read 3 23084 _001784_hash NULL
83816 +_001785_hash isr_wakeups_read 3 49607 _001785_hash NULL
83817 +_001786_hash ivtv_read 3 57796 _001786_hash NULL
83818 +_001787_hash iwl_dbgfs_bt_traffic_read 3 35534 _001787_hash NULL
83819 +_001788_hash iwl_dbgfs_chain_noise_read 3 46355 _001788_hash NULL
83820 +_001789_hash iwl_dbgfs_channels_read 3 6784 _001789_hash NULL
83821 +_001790_hash iwl_dbgfs_current_sleep_command_read 3 2081 _001790_hash NULL
83822 +_001791_hash iwl_dbgfs_disable_ht40_read 3 35761 _001791_hash NULL
83823 +_001792_hash iwl_dbgfs_fh_reg_read 3 879 _001792_hash &_000393_hash
83824 +_001793_hash iwl_dbgfs_force_reset_read 3 62628 _001793_hash NULL
83825 +_001794_hash iwl_dbgfs_interrupt_read 3 23574 _001794_hash NULL
83826 +_001795_hash iwl_dbgfs_log_event_read 3 2107 _001795_hash NULL
83827 +_001796_hash iwl_dbgfs_missed_beacon_read 3 50584 _001796_hash NULL
83828 +_001797_hash iwl_dbgfs_nvm_read 3 23845 _001797_hash NULL
83829 +_001798_hash iwl_dbgfs_plcp_delta_read 3 55407 _001798_hash NULL
83830 +_001799_hash iwl_dbgfs_power_save_status_read 3 54392 _001799_hash NULL
83831 +_001800_hash iwl_dbgfs_protection_mode_read 3 13943 _001800_hash NULL
83832 +_001801_hash iwl_dbgfs_qos_read 3 11753 _001801_hash NULL
83833 +_001802_hash iwl_dbgfs_reply_tx_error_read 3 19205 _001802_hash NULL
83834 +_001803_hash iwl_dbgfs_rx_handlers_read 3 18708 _001803_hash NULL
83835 +_001804_hash iwl_dbgfs_rxon_filter_flags_read 3 28832 _001804_hash NULL
83836 +_001805_hash iwl_dbgfs_rxon_flags_read 3 20795 _001805_hash NULL
83837 +_001806_hash iwl_dbgfs_rx_queue_read 3 19943 _001806_hash NULL
83838 +_001807_hash iwl_dbgfs_rx_statistics_read 3 62687 _001807_hash &_000425_hash
83839 +_001808_hash iwl_dbgfs_sensitivity_read 3 63116 _001808_hash NULL
83840 +_001809_hash iwl_dbgfs_sleep_level_override_read 3 3038 _001809_hash NULL
83841 +_001810_hash iwl_dbgfs_sram_read 3 44505 _001810_hash NULL
83842 +_001811_hash iwl_dbgfs_stations_read 3 9309 _001811_hash NULL
83843 +_001812_hash iwl_dbgfs_status_read 3 5171 _001812_hash NULL
83844 +_001813_hash iwl_dbgfs_temperature_read 3 29224 _001813_hash NULL
83845 +_001814_hash iwl_dbgfs_thermal_throttling_read 3 38779 _001814_hash NULL
83846 +_001815_hash iwl_dbgfs_traffic_log_read 3 58870 _001815_hash NULL
83847 +_001816_hash iwl_dbgfs_tx_queue_read 3 4635 _001816_hash NULL
83848 +_001817_hash iwl_dbgfs_tx_statistics_read 3 314 _001817_hash NULL
83849 +_001818_hash iwl_dbgfs_ucode_bt_stats_read 3 42820 _001818_hash NULL
83850 +_001819_hash iwl_dbgfs_ucode_general_stats_read 3 49199 _001819_hash NULL
83851 +_001820_hash iwl_dbgfs_ucode_rx_stats_read 3 58023 _001820_hash NULL
83852 +_001821_hash iwl_dbgfs_ucode_tracing_read 3 47983 _001821_hash &_000349_hash
83853 +_001822_hash iwl_dbgfs_ucode_tx_stats_read 3 31611 _001822_hash NULL
83854 +_001823_hash iwl_dbgfs_wowlan_sram_read 3 540 _001823_hash NULL
83855 +_001824_hash iwm_if_alloc 1 17027 _001824_hash &_001314_hash
83856 +_001825_hash kernel_readv 3 35617 _001825_hash NULL
83857 +_001826_hash key_algorithm_read 3 57946 _001826_hash NULL
83858 +_001827_hash key_icverrors_read 3 20895 _001827_hash NULL
83859 +_001828_hash key_key_read 3 3241 _001828_hash NULL
83860 +_001829_hash key_replays_read 3 62746 _001829_hash NULL
83861 +_001830_hash key_rx_spec_read 3 12736 _001830_hash NULL
83862 +_001831_hash key_tx_spec_read 3 4862 _001831_hash NULL
83863 +_001832_hash __kfifo_to_user 3 36555 _002199_hash NULL nohasharray
83864 +_001833_hash __kfifo_to_user_r 3 39123 _001833_hash NULL
83865 +_001834_hash kmem_zalloc_greedy 2-3 65268 _001834_hash NULL
83866 +_001836_hash l2cap_chan_send 3 49995 _001836_hash NULL
83867 +_001837_hash l2cap_sar_segment_sdu 3 27701 _001837_hash NULL
83868 +_001838_hash lbs_debugfs_read 3 30721 _001838_hash NULL
83869 +_001839_hash lbs_dev_info 3 51023 _001839_hash NULL
83870 +_001840_hash lbs_host_sleep_read 3 31013 _001840_hash NULL
83871 +_001841_hash lbs_rdbbp_read 3 45805 _001841_hash NULL
83872 +_001842_hash lbs_rdmac_read 3 418 _001842_hash NULL
83873 +_001843_hash lbs_rdrf_read 3 41431 _001843_hash NULL
83874 +_001844_hash lbs_sleepparams_read 3 10840 _001844_hash NULL
83875 +_001845_hash lbs_threshold_read 5 21046 _001845_hash NULL
83876 +_001846_hash libfc_vport_create 2 4415 _001846_hash NULL
83877 +_001847_hash lkdtm_debugfs_read 3 45752 _001847_hash NULL
83878 +_001848_hash llcp_sock_recvmsg 4 13556 _001848_hash NULL
83879 +_001849_hash long_retry_limit_read 3 59766 _001849_hash NULL
83880 +_001850_hash lpfc_debugfs_dif_err_read 3 36303 _001850_hash NULL
83881 +_001851_hash lpfc_debugfs_read 3 16566 _001851_hash NULL
83882 +_001852_hash lpfc_idiag_baracc_read 3 58466 _002447_hash NULL nohasharray
83883 +_001853_hash lpfc_idiag_ctlacc_read 3 33943 _001853_hash NULL
83884 +_001854_hash lpfc_idiag_drbacc_read 3 15948 _001854_hash NULL
83885 +_001855_hash lpfc_idiag_extacc_read 3 48301 _001855_hash NULL
83886 +_001856_hash lpfc_idiag_mbxacc_read 3 28061 _001856_hash NULL
83887 +_001857_hash lpfc_idiag_pcicfg_read 3 50334 _001857_hash NULL
83888 +_001858_hash lpfc_idiag_queacc_read 3 13950 _001858_hash NULL
83889 +_001859_hash lpfc_idiag_queinfo_read 3 55662 _001859_hash NULL
83890 +_001860_hash mac80211_format_buffer 2 41010 _001860_hash NULL
83891 +_001861_hash macvtap_put_user 4 55609 _001861_hash NULL
83892 +_001862_hash macvtap_sendmsg 4 30629 _001862_hash NULL
83893 +_001863_hash mic_calc_failure_read 3 59700 _001863_hash NULL
83894 +_001864_hash mic_rx_pkts_read 3 27972 _001864_hash NULL
83895 +_001865_hash minstrel_stats_read 3 17290 _001865_hash NULL
83896 +_001866_hash mmc_ext_csd_read 3 13205 _001866_hash NULL
83897 +_001867_hash mon_bin_read 3 6841 _001867_hash NULL
83898 +_001868_hash mon_stat_read 3 25238 _001868_hash NULL
83899 +_001870_hash mqueue_read_file 3 6228 _001870_hash NULL
83900 +_001871_hash mwifiex_debug_read 3 53074 _001871_hash NULL
83901 +_001872_hash mwifiex_getlog_read 3 54269 _001872_hash NULL
83902 +_001873_hash mwifiex_info_read 3 53447 _001873_hash NULL
83903 +_001874_hash mwifiex_rdeeprom_read 3 51429 _001874_hash NULL
83904 +_001875_hash mwifiex_regrdwr_read 3 34472 _001875_hash NULL
83905 +_001876_hash nfsd_vfs_read 6 62605 _001876_hash NULL
83906 +_001877_hash nfsd_vfs_write 6 54577 _001877_hash NULL
83907 +_001878_hash nfs_idmap_lookup_id 2 10660 _001878_hash NULL
83908 +_001879_hash o2hb_debug_read 3 37851 _001879_hash NULL
83909 +_001880_hash o2net_debug_read 3 52105 _001880_hash NULL
83910 +_001881_hash ocfs2_control_read 3 56405 _001881_hash NULL
83911 +_001882_hash ocfs2_debug_read 3 14507 _001882_hash NULL
83912 +_001883_hash ocfs2_readlink 3 50656 _001883_hash NULL
83913 +_001884_hash oom_adjust_read 3 25127 _001884_hash NULL
83914 +_001885_hash oom_score_adj_read 3 39921 _002116_hash NULL nohasharray
83915 +_001886_hash oprofilefs_str_to_user 3 42182 _001886_hash NULL
83916 +_001887_hash oprofilefs_ulong_to_user 3 11582 _001887_hash NULL
83917 +_001888_hash _osd_req_list_objects 6 4204 _001888_hash NULL
83918 +_001889_hash osd_req_read_kern 5 59990 _001889_hash NULL
83919 +_001890_hash osd_req_write_kern 5 53486 _001890_hash NULL
83920 +_001891_hash p54_init_common 1 23850 _001891_hash NULL
83921 +_001892_hash packet_sendmsg 4 24954 _001892_hash NULL
83922 +_001893_hash page_readlink 3 23346 _001893_hash NULL
83923 +_001894_hash pcf50633_write_block 3 2124 _001894_hash NULL
83924 +_001895_hash platform_list_read_file 3 34734 _001895_hash NULL
83925 +_001896_hash pm860x_bulk_write 3 43875 _001896_hash NULL
83926 +_001897_hash pm_qos_power_read 3 55891 _001897_hash NULL
83927 +_001898_hash pms_read 3 53873 _001898_hash NULL
83928 +_001899_hash port_show_regs 3 5904 _001899_hash NULL
83929 +_001900_hash proc_coredump_filter_read 3 39153 _001900_hash NULL
83930 +_001901_hash proc_fdinfo_read 3 62043 _001901_hash NULL
83931 +_001902_hash proc_info_read 3 63344 _001902_hash NULL
83932 +_001903_hash proc_loginuid_read 3 15631 _001903_hash NULL
83933 +_001904_hash proc_pid_attr_read 3 10173 _001904_hash NULL
83934 +_001905_hash proc_pid_readlink 3 52186 _001905_hash NULL
83935 +_001906_hash proc_read 3 43614 _001906_hash NULL
83936 +_001907_hash proc_self_readlink 3 38094 _001907_hash NULL
83937 +_001908_hash proc_sessionid_read 3 6911 _002038_hash NULL nohasharray
83938 +_001909_hash provide_user_output 3 41105 _001909_hash NULL
83939 +_001910_hash ps_pspoll_max_apturn_read 3 6699 _001910_hash NULL
83940 +_001911_hash ps_pspoll_timeouts_read 3 11776 _001911_hash NULL
83941 +_001912_hash ps_pspoll_utilization_read 3 5361 _001912_hash NULL
83942 +_001913_hash pstore_file_read 3 57288 _001913_hash NULL
83943 +_001914_hash ps_upsd_max_apturn_read 3 19918 _001914_hash NULL
83944 +_001915_hash ps_upsd_max_sptime_read 3 63362 _001915_hash NULL
83945 +_001916_hash ps_upsd_timeouts_read 3 28924 _001916_hash NULL
83946 +_001917_hash ps_upsd_utilization_read 3 51669 _001917_hash NULL
83947 +_001918_hash pvr2_v4l2_read 3 18006 _001918_hash NULL
83948 +_001919_hash pwr_disable_ps_read 3 13176 _001919_hash NULL
83949 +_001920_hash pwr_elp_enter_read 3 5324 _001920_hash NULL
83950 +_001921_hash pwr_enable_ps_read 3 17686 _001921_hash NULL
83951 +_001922_hash pwr_fix_tsf_ps_read 3 26627 _001922_hash NULL
83952 +_001923_hash pwr_missing_bcns_read 3 25824 _001923_hash NULL
83953 +_001924_hash pwr_power_save_off_read 3 18355 _001924_hash NULL
83954 +_001925_hash pwr_ps_enter_read 3 26935 _001925_hash &_000501_hash
83955 +_001926_hash pwr_rcvd_awake_beacons_read 3 50505 _001926_hash NULL
83956 +_001927_hash pwr_rcvd_beacons_read 3 52836 _001927_hash NULL
83957 +_001928_hash pwr_tx_without_ps_read 3 48423 _001928_hash NULL
83958 +_001929_hash pwr_tx_with_ps_read 3 60851 _001929_hash NULL
83959 +_001930_hash pwr_wake_on_host_read 3 26321 _001930_hash NULL
83960 +_001931_hash pwr_wake_on_timer_exp_read 3 22640 _001931_hash NULL
83961 +_001932_hash queues_read 3 24877 _001932_hash NULL
83962 +_001933_hash raw_recvmsg 4 17277 _001933_hash NULL
83963 +_001934_hash rcname_read 3 25919 _001934_hash NULL
83964 +_001935_hash read_4k_modal_eeprom 3 30212 _001935_hash NULL
83965 +_001936_hash read_9287_modal_eeprom 3 59327 _001936_hash NULL
83966 +_001937_hash reada_find_extent 2 63486 _001937_hash NULL
83967 +_001938_hash read_def_modal_eeprom 3 14041 _001938_hash NULL
83968 +_001939_hash read_enabled_file_bool 3 37744 _001939_hash NULL
83969 +_001940_hash read_file_ani 3 23161 _001940_hash NULL
83970 +_001941_hash read_file_antenna 3 13574 _001941_hash NULL
83971 +_001942_hash read_file_base_eeprom 3 42168 _001942_hash NULL
83972 +_001943_hash read_file_beacon 3 32595 _001943_hash NULL
83973 +_001944_hash read_file_blob 3 57406 _001944_hash NULL
83974 +_001945_hash read_file_bool 3 4180 _001945_hash NULL
83975 +_001946_hash read_file_credit_dist_stats 3 54367 _001946_hash NULL
83976 +_001947_hash read_file_debug 3 58256 _001947_hash NULL
83977 +_001948_hash read_file_disable_ani 3 6536 _001948_hash NULL
83978 +_001949_hash read_file_dma 3 9530 _001949_hash NULL
83979 +_001950_hash read_file_dump_nfcal 3 18766 _001950_hash NULL
83980 +_001951_hash read_file_frameerrors 3 64001 _001951_hash NULL
83981 +_001952_hash read_file_interrupt 3 61742 _001959_hash NULL nohasharray
83982 +_001953_hash read_file_misc 3 9948 _001953_hash NULL
83983 +_001954_hash read_file_modal_eeprom 3 39909 _001954_hash NULL
83984 +_001955_hash read_file_queue 3 40895 _001955_hash NULL
83985 +_001956_hash read_file_rcstat 3 22854 _001956_hash NULL
83986 +_001957_hash read_file_recv 3 48232 _001957_hash NULL
83987 +_001958_hash read_file_regidx 3 33370 _001958_hash NULL
83988 +_001959_hash read_file_regval 3 61742 _001959_hash &_001952_hash
83989 +_001960_hash read_file_reset 3 52310 _001960_hash NULL
83990 +_001961_hash read_file_rx_chainmask 3 41605 _001961_hash NULL
83991 +_001962_hash read_file_slot 3 50111 _001962_hash NULL
83992 +_001963_hash read_file_stations 3 35795 _001963_hash NULL
83993 +_001964_hash read_file_tgt_int_stats 3 20697 _001964_hash NULL
83994 +_001965_hash read_file_tgt_rx_stats 3 33944 _001965_hash NULL
83995 +_001966_hash read_file_tgt_stats 3 8959 _001966_hash NULL
83996 +_001967_hash read_file_tgt_tx_stats 3 51847 _001967_hash NULL
83997 +_001968_hash read_file_tx_chainmask 3 3829 _001968_hash NULL
83998 +_001969_hash read_file_war_stats 3 292 _001969_hash NULL
83999 +_001970_hash read_file_xmit 3 21487 _001970_hash NULL
84000 +_001971_hash read_from_oldmem 2 3337 _001971_hash NULL
84001 +_001972_hash read_oldmem 3 55658 _001972_hash NULL
84002 +_001973_hash regmap_name_read_file 3 39379 _001973_hash NULL
84003 +_001974_hash repair_io_failure 4 4815 _001974_hash NULL
84004 +_001975_hash request_key_and_link 4 42693 _001975_hash NULL
84005 +_001976_hash res_counter_read 4 33499 _001976_hash NULL
84006 +_001977_hash retry_count_read 3 52129 _001977_hash NULL
84007 +_001978_hash rs_sta_dbgfs_rate_scale_data_read 3 47165 _001978_hash NULL
84008 +_001979_hash rs_sta_dbgfs_scale_table_read 3 40262 _001979_hash NULL
84009 +_001980_hash rs_sta_dbgfs_stats_table_read 3 56573 _001980_hash NULL
84010 +_001981_hash rts_threshold_read 3 44384 _001981_hash NULL
84011 +_001982_hash rx_dropped_read 3 44799 _001982_hash NULL
84012 +_001983_hash rx_fcs_err_read 3 62844 _001983_hash NULL
84013 +_001984_hash rx_hdr_overflow_read 3 64407 _001984_hash NULL
84014 +_001985_hash rx_hw_stuck_read 3 57179 _001985_hash NULL
84015 +_001986_hash rx_out_of_mem_read 3 10157 _001986_hash NULL
84016 +_001987_hash rx_path_reset_read 3 23801 _001987_hash NULL
84017 +_001988_hash rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 _001988_hash NULL
84018 +_001989_hash rxpipe_descr_host_int_trig_rx_data_read 3 22001 _001989_hash NULL
84019 +_001990_hash rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 _001990_hash NULL
84020 +_001991_hash rxpipe_rx_prep_beacon_drop_read 3 2403 _001991_hash NULL
84021 +_001992_hash rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 _001992_hash NULL
84022 +_001993_hash rx_reset_counter_read 3 58001 _001993_hash NULL
84023 +_001994_hash rx_xfr_hint_trig_read 3 40283 _001994_hash NULL
84024 +_001995_hash s5m_bulk_write 3 4833 _001995_hash NULL
84025 +_001996_hash scrub_setup_recheck_block 3-4 56245 _001996_hash NULL
84026 +_001998_hash scsi_adjust_queue_depth 3 12802 _001998_hash NULL
84027 +_001999_hash selinux_inode_notifysecctx 3 36896 _001999_hash NULL
84028 +_002000_hash sel_read_avc_cache_threshold 3 33942 _002000_hash NULL
84029 +_002001_hash sel_read_avc_hash_stats 3 1984 _002001_hash NULL
84030 +_002002_hash sel_read_bool 3 24236 _002002_hash NULL
84031 +_002003_hash sel_read_checkreqprot 3 33068 _002003_hash NULL
84032 +_002004_hash sel_read_class 3 12669 _002541_hash NULL nohasharray
84033 +_002005_hash sel_read_enforce 3 2828 _002005_hash NULL
84034 +_002006_hash sel_read_handle_status 3 56139 _002006_hash NULL
84035 +_002007_hash sel_read_handle_unknown 3 57933 _002007_hash NULL
84036 +_002008_hash sel_read_initcon 3 32362 _002008_hash NULL
84037 +_002009_hash sel_read_mls 3 25369 _002009_hash NULL
84038 +_002010_hash sel_read_perm 3 42302 _002010_hash NULL
84039 +_002011_hash sel_read_policy 3 55947 _002011_hash NULL
84040 +_002012_hash sel_read_policycap 3 28544 _002012_hash NULL
84041 +_002013_hash sel_read_policyvers 3 55 _002013_hash NULL
84042 +_002014_hash send_msg 4 37323 _002014_hash NULL
84043 +_002015_hash send_packet 4 52960 _002015_hash NULL
84044 +_002016_hash short_retry_limit_read 3 4687 _002016_hash NULL
84045 +_002017_hash simple_attr_read 3 24738 _002017_hash NULL
84046 +_002018_hash simple_transaction_read 3 17076 _002018_hash NULL
84047 +_002019_hash skb_copy_datagram_const_iovec 2-5-4 48102 _002019_hash NULL
84048 +_002022_hash skb_copy_datagram_iovec 2-4 5806 _002022_hash NULL
84049 +_002024_hash smk_read_ambient 3 61220 _002024_hash NULL
84050 +_002025_hash smk_read_direct 3 15803 _002025_hash NULL
84051 +_002026_hash smk_read_doi 3 30813 _002026_hash NULL
84052 +_002027_hash smk_read_logging 3 37804 _002027_hash NULL
84053 +_002028_hash smk_read_onlycap 3 3855 _002028_hash NULL
84054 +_002029_hash snapshot_read 3 22601 _002029_hash NULL
84055 +_002030_hash snd_cs4281_BA0_read 5 6847 _002030_hash NULL
84056 +_002031_hash snd_cs4281_BA1_read 5 20323 _002031_hash NULL
84057 +_002032_hash snd_cs46xx_io_read 5 45734 _002032_hash NULL
84058 +_002033_hash snd_gus_dram_read 4 56686 _002033_hash NULL
84059 +_002034_hash snd_pcm_oss_read 3 28317 _002034_hash NULL
84060 +_002035_hash snd_rme32_capture_copy 5 39653 _002035_hash NULL
84061 +_002036_hash snd_rme96_capture_copy 5 58484 _002036_hash NULL
84062 +_002037_hash snd_soc_hw_bulk_write_raw 4 14245 _002037_hash NULL
84063 +_002038_hash spi_show_regs 3 6911 _002038_hash &_001908_hash
84064 +_002039_hash sta_agg_status_read 3 14058 _002039_hash NULL
84065 +_002040_hash sta_connected_time_read 3 17435 _002040_hash NULL
84066 +_002041_hash sta_flags_read 3 56710 _002041_hash NULL
84067 +_002042_hash sta_ht_capa_read 3 10366 _002042_hash NULL
84068 +_002043_hash sta_last_seq_ctrl_read 3 19106 _002043_hash NULL
84069 +_002044_hash sta_num_ps_buf_frames_read 3 1488 _002044_hash NULL
84070 +_002045_hash st_read 3 51251 _002045_hash NULL
84071 +_002046_hash supply_map_read_file 3 10608 _002046_hash NULL
84072 +_002047_hash sysfs_read_file 3 42113 _002047_hash NULL
84073 +_002048_hash sys_lgetxattr 4 45531 _002048_hash NULL
84074 +_002049_hash sys_preadv 3 17100 _002049_hash NULL
84075 +_002050_hash sys_pwritev 3 41722 _002050_hash NULL
84076 +_002051_hash sys_readv 3 50664 _002051_hash NULL
84077 +_002052_hash sys_rt_sigpending 2 24961 _002052_hash NULL
84078 +_002053_hash sys_writev 3 28384 _002053_hash NULL
84079 +_002054_hash test_iso_queue 5 62534 _002054_hash NULL
84080 +_002055_hash ts_read 3 44687 _002055_hash NULL
84081 +_002056_hash TSS_authhmac 3 12839 _002056_hash NULL
84082 +_002057_hash TSS_checkhmac1 5 31429 _002057_hash NULL
84083 +_002058_hash TSS_checkhmac2 5-7 40520 _002058_hash NULL
84084 +_002060_hash tt3650_ci_msg_locked 4 8013 _002060_hash NULL
84085 +_002061_hash tun_sendmsg 4 10337 _002061_hash NULL
84086 +_002062_hash tx_internal_desc_overflow_read 3 47300 _002062_hash NULL
84087 +_002063_hash tx_queue_len_read 3 1463 _002063_hash NULL
84088 +_002064_hash tx_queue_status_read 3 44978 _002064_hash NULL
84089 +_002065_hash ubi_io_write_data 4-5 40305 _002065_hash NULL
84090 +_002067_hash uhci_debug_read 3 5911 _002067_hash NULL
84091 +_002068_hash unix_stream_recvmsg 4 35210 _002068_hash NULL
84092 +_002069_hash uvc_debugfs_stats_read 3 56651 _002069_hash NULL
84093 +_002070_hash vhost_add_used_and_signal_n 4 8038 _002070_hash NULL
84094 +_002071_hash vifs_state_read 3 33762 _002071_hash NULL
84095 +_002072_hash vmbus_open 2-3 12154 _002072_hash NULL
84096 +_002074_hash waiters_read 3 40902 _002074_hash NULL
84097 +_002075_hash wep_addr_key_count_read 3 20174 _002075_hash NULL
84098 +_002076_hash wep_decrypt_fail_read 3 58567 _002076_hash NULL
84099 +_002077_hash wep_default_key_count_read 3 43035 _002077_hash NULL
84100 +_002078_hash wep_interrupt_read 3 41492 _002078_hash NULL
84101 +_002079_hash wep_key_not_found_read 3 13377 _002079_hash &_000915_hash
84102 +_002080_hash wep_packets_read 3 18751 _002080_hash NULL
84103 +_002081_hash wl1271_format_buffer 2 20834 _002081_hash NULL
84104 +_002082_hash wm8994_bulk_write 3 13615 _002082_hash NULL
84105 +_002083_hash wusb_prf_256 7 29203 _002083_hash NULL
84106 +_002084_hash wusb_prf_64 7 51065 _002084_hash NULL
84107 +_002085_hash xfs_buf_read_uncached 4 27519 _002085_hash NULL
84108 +_002086_hash xfs_iext_add 3 41422 _002086_hash NULL
84109 +_002087_hash xfs_iext_remove_direct 3 40744 _002087_hash NULL
84110 +_002088_hash xfs_trans_get_efd 3 51148 _002088_hash NULL
84111 +_002089_hash xfs_trans_get_efi 2 7898 _002089_hash NULL
84112 +_002090_hash xlog_get_bp 2 23229 _002090_hash NULL
84113 +_002091_hash xz_dec_init 2 29029 _002091_hash NULL
84114 +_002092_hash aac_change_queue_depth 2 825 _002092_hash NULL
84115 +_002093_hash agp_allocate_memory_wrap 1 16576 _002093_hash NULL
84116 +_002094_hash arcmsr_adjust_disk_queue_depth 2 16756 _002094_hash NULL
84117 +_002095_hash atalk_recvmsg 4 22053 _002095_hash NULL
84118 +_002097_hash atomic_read_file 3 16227 _002097_hash NULL
84119 +_002098_hash ax25_recvmsg 4 64441 _002098_hash NULL
84120 +_002099_hash beacon_interval_read 3 7091 _002099_hash NULL
84121 +_002100_hash btrfs_init_new_buffer 4 55761 _002100_hash NULL
84122 +_002101_hash btrfs_mksubvol 3 39479 _002101_hash NULL
84123 +_002102_hash bt_sock_recvmsg 4 12316 _002102_hash NULL
84124 +_002103_hash bt_sock_stream_recvmsg 4 52518 _002103_hash NULL
84125 +_002104_hash caif_seqpkt_recvmsg 4 32241 _002104_hash NULL
84126 +_002105_hash cpu_type_read 3 36540 _002105_hash NULL
84127 +_002106_hash cx18_read 3 23699 _002106_hash NULL
84128 +_002107_hash dccp_recvmsg 4 16056 _002107_hash NULL
84129 +_002108_hash depth_read 3 31112 _002108_hash NULL
84130 +_002109_hash dfs_global_file_read 3 7787 _002109_hash NULL
84131 +_002110_hash dgram_recvmsg 4 23104 _002110_hash NULL
84132 +_002111_hash dma_skb_copy_datagram_iovec 3-5 21516 _002111_hash NULL
84133 +_002113_hash dtim_interval_read 3 654 _002113_hash NULL
84134 +_002114_hash dynamic_ps_timeout_read 3 10110 _002114_hash NULL
84135 +_002115_hash enable_read 3 2117 _002115_hash NULL
84136 +_002116_hash exofs_read_kern 6 39921 _002116_hash &_001885_hash
84137 +_002117_hash fc_change_queue_depth 2 36841 _002117_hash NULL
84138 +_002118_hash forced_ps_read 3 31685 _002118_hash NULL
84139 +_002119_hash frequency_read 3 64031 _002119_hash NULL
84140 +_002120_hash get_alua_req 3 4166 _002120_hash NULL
84141 +_002121_hash get_rdac_req 3 45882 _002121_hash NULL
84142 +_002122_hash hci_sock_recvmsg 4 7072 _002122_hash NULL
84143 +_002123_hash hpsa_change_queue_depth 2 15449 _002123_hash NULL
84144 +_002124_hash hptiop_adjust_disk_queue_depth 2 20122 _002124_hash NULL
84145 +_002125_hash ide_queue_pc_tail 5 11673 _002125_hash NULL
84146 +_002126_hash ide_raw_taskfile 4 42355 _002126_hash NULL
84147 +_002127_hash idetape_queue_rw_tail 3 29562 _002127_hash NULL
84148 +_002128_hash ieee80211_if_read_aid 3 9705 _002128_hash NULL
84149 +_002129_hash ieee80211_if_read_auto_open_plinks 3 38268 _002129_hash NULL
84150 +_002130_hash ieee80211_if_read_ave_beacon 3 64924 _002130_hash NULL
84151 +_002131_hash ieee80211_if_read_bssid 3 35161 _002131_hash NULL
84152 +_002132_hash ieee80211_if_read_channel_type 3 23884 _002132_hash NULL
84153 +_002133_hash ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 _002133_hash NULL
84154 +_002134_hash ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 _002134_hash NULL
84155 +_002135_hash ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 _002135_hash NULL
84156 +_002136_hash ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 _002136_hash NULL
84157 +_002137_hash ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 _002137_hash NULL
84158 +_002138_hash ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 _002138_hash NULL
84159 +_002139_hash ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 _002139_hash NULL
84160 +_002140_hash ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 _002140_hash NULL
84161 +_002141_hash ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 _002141_hash NULL
84162 +_002142_hash ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 _002142_hash NULL
84163 +_002143_hash ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 _002143_hash NULL
84164 +_002144_hash ieee80211_if_read_dot11MeshMaxRetries 3 12756 _002144_hash NULL
84165 +_002145_hash ieee80211_if_read_dot11MeshRetryTimeout 3 52168 _002145_hash NULL
84166 +_002146_hash ieee80211_if_read_dot11MeshTTL 3 58307 _002146_hash NULL
84167 +_002147_hash ieee80211_if_read_dropped_frames_congestion 3 32603 _002147_hash NULL
84168 +_002148_hash ieee80211_if_read_dropped_frames_no_route 3 33383 _002148_hash NULL
84169 +_002149_hash ieee80211_if_read_dropped_frames_ttl 3 44500 _002149_hash NULL
84170 +_002150_hash ieee80211_if_read_drop_unencrypted 3 37053 _002150_hash NULL
84171 +_002151_hash ieee80211_if_read_dtim_count 3 38419 _002151_hash NULL
84172 +_002152_hash ieee80211_if_read_element_ttl 3 18869 _002152_hash NULL
84173 +_002153_hash ieee80211_if_read_estab_plinks 3 32533 _002153_hash NULL
84174 +_002154_hash ieee80211_if_read_flags 3 57470 _002389_hash NULL nohasharray
84175 +_002155_hash ieee80211_if_read_fwded_frames 3 36520 _002155_hash NULL
84176 +_002156_hash ieee80211_if_read_fwded_mcast 3 39571 _002156_hash &_000151_hash
84177 +_002157_hash ieee80211_if_read_fwded_unicast 3 59740 _002157_hash NULL
84178 +_002158_hash ieee80211_if_read_last_beacon 3 31257 _002158_hash NULL
84179 +_002159_hash ieee80211_if_read_min_discovery_timeout 3 13946 _002159_hash NULL
84180 +_002160_hash ieee80211_if_read_num_buffered_multicast 3 12716 _002160_hash NULL
84181 +_002161_hash ieee80211_if_read_num_sta_authorized 3 56177 _002161_hash NULL
84182 +_002162_hash ieee80211_if_read_num_sta_ps 3 34722 _002162_hash NULL
84183 +_002163_hash ieee80211_if_read_path_refresh_time 3 25545 _002163_hash NULL
84184 +_002164_hash ieee80211_if_read_peer 3 45233 _002164_hash NULL
84185 +_002165_hash ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 _002165_hash NULL
84186 +_002166_hash ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 _002166_hash NULL
84187 +_002167_hash ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 _002167_hash NULL
84188 +_002168_hash ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 _002168_hash NULL
84189 +_002169_hash ieee80211_if_read_rssi_threshold 3 49260 _002169_hash NULL
84190 +_002170_hash ieee80211_if_read_smps 3 27416 _002170_hash NULL
84191 +_002171_hash ieee80211_if_read_state 3 9813 _002280_hash NULL nohasharray
84192 +_002172_hash ieee80211_if_read_tkip_mic_test 3 19565 _002172_hash NULL
84193 +_002173_hash ieee80211_if_read_tsf 3 16420 _002173_hash NULL
84194 +_002174_hash ieee80211_if_read_uapsd_max_sp_len 3 15067 _002174_hash NULL
84195 +_002175_hash ieee80211_if_read_uapsd_queues 3 55150 _002175_hash NULL
84196 +_002176_hash ieee80211_rx_mgmt_beacon 3 24430 _002176_hash NULL
84197 +_002177_hash ieee80211_rx_mgmt_probe_resp 3 6918 _002177_hash NULL
84198 +_002178_hash ima_show_htable_violations 3 10619 _002178_hash NULL
84199 +_002179_hash ima_show_measurements_count 3 23536 _002179_hash NULL
84200 +_002180_hash insert_one_name 7 61668 _002180_hash NULL
84201 +_002181_hash ipr_change_queue_depth 2 6431 _002181_hash NULL
84202 +_002182_hash ip_recv_error 3 23109 _002182_hash NULL
84203 +_002183_hash ipv6_recv_error 3 56347 _002183_hash NULL
84204 +_002184_hash ipv6_recv_rxpmtu 3 7142 _002184_hash NULL
84205 +_002185_hash ipx_recvmsg 4 44366 _002185_hash NULL
84206 +_002186_hash irda_recvmsg_dgram 4 32631 _002186_hash NULL
84207 +_002187_hash iscsi_change_queue_depth 2 23416 _002187_hash NULL
84208 +_002188_hash ivtv_read_pos 3 34400 _002188_hash &_000303_hash
84209 +_002189_hash key_conf_hw_key_idx_read 3 25003 _002189_hash NULL
84210 +_002190_hash key_conf_keyidx_read 3 42443 _002190_hash NULL
84211 +_002191_hash key_conf_keylen_read 3 49758 _002191_hash NULL
84212 +_002192_hash key_flags_read 3 25931 _002192_hash NULL
84213 +_002193_hash key_ifindex_read 3 31411 _002193_hash NULL
84214 +_002194_hash key_tx_rx_count_read 3 44742 _002194_hash NULL
84215 +_002195_hash l2cap_sock_sendmsg 4 63427 _002195_hash NULL
84216 +_002196_hash l2tp_ip_recvmsg 4 22681 _002196_hash NULL
84217 +_002197_hash llc_ui_recvmsg 4 3826 _002197_hash NULL
84218 +_002198_hash lpfc_change_queue_depth 2 25905 _002198_hash NULL
84219 +_002199_hash macvtap_do_read 4 36555 _002199_hash &_001832_hash
84220 +_002200_hash megaraid_change_queue_depth 2 64815 _002200_hash NULL
84221 +_002201_hash megasas_change_queue_depth 2 32747 _002201_hash NULL
84222 +_002202_hash mptscsih_change_queue_depth 2 26036 _002202_hash NULL
84223 +_002203_hash NCR_700_change_queue_depth 2 31742 _002203_hash NULL
84224 +_002204_hash netlink_recvmsg 4 61600 _002204_hash NULL
84225 +_002205_hash nfsctl_transaction_read 3 48250 _002205_hash NULL
84226 +_002206_hash nfs_map_group_to_gid 3 15892 _002206_hash NULL
84227 +_002207_hash nfs_map_name_to_uid 3 51132 _002207_hash NULL
84228 +_002208_hash nr_recvmsg 4 12649 _002208_hash NULL
84229 +_002209_hash osd_req_list_collection_objects 5 36664 _002209_hash NULL
84230 +_002210_hash osd_req_list_partition_objects 5 56464 _002210_hash NULL
84231 +_002212_hash packet_recv_error 3 16669 _002212_hash NULL
84232 +_002213_hash packet_recvmsg 4 47700 _002213_hash NULL
84233 +_002214_hash pep_recvmsg 4 19402 _002214_hash NULL
84234 +_002215_hash pfkey_recvmsg 4 53604 _002215_hash NULL
84235 +_002216_hash ping_recvmsg 4 25597 _002216_hash NULL
84236 +_002217_hash pmcraid_change_queue_depth 2 9116 _002217_hash NULL
84237 +_002218_hash pn_recvmsg 4 30887 _002218_hash NULL
84238 +_002219_hash pointer_size_read 3 51863 _002219_hash NULL
84239 +_002220_hash power_read 3 15939 _002220_hash NULL
84240 +_002221_hash pppoe_recvmsg 4 15073 _002221_hash NULL
84241 +_002222_hash pppol2tp_recvmsg 4 57742 _002222_hash NULL
84242 +_002223_hash qla2x00_adjust_sdev_qdepth_up 2 20097 _002223_hash NULL
84243 +_002224_hash qla2x00_change_queue_depth 2 24742 _002224_hash NULL
84244 +_002225_hash raw_recvmsg 4 52529 _002225_hash NULL
84245 +_002226_hash rawsock_recvmsg 4 12144 _002226_hash NULL
84246 +_002227_hash rawv6_recvmsg 4 30265 _002227_hash NULL
84247 +_002228_hash reada_add_block 2 54247 _002228_hash NULL
84248 +_002229_hash readahead_tree_block 3 36285 _002229_hash NULL
84249 +_002230_hash reada_tree_block_flagged 3 18402 _002230_hash NULL
84250 +_002231_hash read_tree_block 3 841 _002231_hash NULL
84251 +_002232_hash recover_peb 6-7 29238 _002232_hash NULL
84252 +_002234_hash recv_msg 4 48709 _002234_hash NULL
84253 +_002235_hash recv_stream 4 30138 _002235_hash NULL
84254 +_002236_hash _req_append_segment 2 41031 _002236_hash NULL
84255 +_002237_hash request_key_async 4 6990 _002237_hash NULL
84256 +_002238_hash request_key_async_with_auxdata 4 46624 _002238_hash NULL
84257 +_002239_hash request_key_with_auxdata 4 24515 _002239_hash NULL
84258 +_002240_hash rose_recvmsg 4 2368 _002240_hash NULL
84259 +_002241_hash rxrpc_recvmsg 4 26233 _002241_hash NULL
84260 +_002242_hash rx_streaming_always_read 3 49401 _002242_hash NULL
84261 +_002243_hash rx_streaming_interval_read 3 55291 _002243_hash NULL
84262 +_002244_hash sas_change_queue_depth 2 18555 _002244_hash NULL
84263 +_002245_hash scsi_activate_tcq 2 42640 _002245_hash NULL
84264 +_002246_hash scsi_deactivate_tcq 2 47086 _002246_hash NULL
84265 +_002247_hash scsi_execute 5 33596 _002247_hash NULL
84266 +_002248_hash _scsih_adjust_queue_depth 2 1083 _002248_hash NULL
84267 +_002249_hash scsi_init_shared_tag_map 2 59812 _002249_hash NULL
84268 +_002250_hash scsi_track_queue_full 2 44239 _002250_hash NULL
84269 +_002251_hash sctp_recvmsg 4 23265 _002251_hash NULL
84270 +_002252_hash send_stream 4 3397 _002252_hash NULL
84271 +_002253_hash skb_copy_and_csum_datagram_iovec 2 24466 _002253_hash NULL
84272 +_002255_hash snd_gf1_mem_proc_dump 5 16926 _002255_hash NULL
84273 +_002256_hash split_scan_timeout_read 3 20029 _002256_hash NULL
84274 +_002257_hash sta_dev_read 3 14782 _002257_hash NULL
84275 +_002258_hash sta_inactive_ms_read 3 25690 _002258_hash NULL
84276 +_002259_hash sta_last_signal_read 3 31818 _002259_hash NULL
84277 +_002260_hash stats_dot11ACKFailureCount_read 3 45558 _002260_hash NULL
84278 +_002261_hash stats_dot11FCSErrorCount_read 3 28154 _002261_hash NULL
84279 +_002262_hash stats_dot11RTSFailureCount_read 3 43948 _002262_hash NULL
84280 +_002263_hash stats_dot11RTSSuccessCount_read 3 33065 _002263_hash NULL
84281 +_002264_hash storvsc_connect_to_vsp 2 22 _002264_hash NULL
84282 +_002265_hash suspend_dtim_interval_read 3 64971 _002265_hash NULL
84283 +_002266_hash sys_msgrcv 3 959 _002266_hash NULL
84284 +_002267_hash tcm_loop_change_queue_depth 2 42454 _002267_hash NULL
84285 +_002268_hash tcp_copy_to_iovec 3 28344 _002268_hash NULL
84286 +_002269_hash tcp_recvmsg 4 31238 _002269_hash NULL
84287 +_002270_hash timeout_read 3 47915 _002270_hash NULL
84288 +_002271_hash total_ps_buffered_read 3 16365 _002271_hash NULL
84289 +_002272_hash tun_put_user 4 59849 _002272_hash NULL
84290 +_002273_hash twa_change_queue_depth 2 48808 _002273_hash NULL
84291 +_002274_hash tw_change_queue_depth 2 11116 _002274_hash NULL
84292 +_002275_hash twl_change_queue_depth 2 41342 _002275_hash NULL
84293 +_002276_hash ubi_eba_write_leb 5-6 19826 _002276_hash NULL
84294 +_002278_hash ubi_eba_write_leb_st 5 27896 _002278_hash NULL
84295 +_002279_hash udp_recvmsg 4 42558 _002279_hash NULL
84296 +_002280_hash udpv6_recvmsg 4 9813 _002280_hash &_002171_hash
84297 +_002281_hash ulong_read_file 3 42304 _002281_hash &_000511_hash
84298 +_002282_hash unix_dgram_recvmsg 4 14952 _002282_hash NULL
84299 +_002283_hash user_power_read 3 39414 _002283_hash NULL
84300 +_002284_hash vcc_recvmsg 4 37198 _002284_hash NULL
84301 +_002285_hash wep_iv_read 3 54744 _002285_hash NULL
84302 +_002286_hash x25_recvmsg 4 42777 _002286_hash NULL
84303 +_002287_hash xfs_iext_insert 3 18667 _002287_hash NULL
84304 +_002288_hash xfs_iext_remove 3 50909 _002288_hash NULL
84305 +_002289_hash xlog_find_verify_log_record 2 18870 _002289_hash NULL
84306 +_002290_hash btrfs_alloc_free_block 3 29982 _002290_hash NULL
84307 +_002291_hash cx18_read_pos 3 4683 _002291_hash NULL
84308 +_002292_hash l2cap_sock_recvmsg 4 59886 _002292_hash NULL
84309 +_002293_hash osd_req_list_dev_partitions 4 60027 _002293_hash NULL
84310 +_002294_hash osd_req_list_partition_collections 5 38223 _002294_hash NULL
84311 +_002295_hash osst_do_scsi 4 44410 _002295_hash NULL
84312 +_002296_hash qla2x00_handle_queue_full 2 24365 _002296_hash NULL
84313 +_002297_hash rfcomm_sock_recvmsg 4 22227 _002297_hash NULL
84314 +_002298_hash scsi_execute_req 5 42088 _002298_hash NULL
84315 +_002299_hash _scsih_change_queue_depth 2 26230 _002299_hash NULL
84316 +_002300_hash spi_execute 5 28736 _002300_hash NULL
84317 +_002301_hash submit_inquiry 3 42108 _002301_hash NULL
84318 +_002302_hash tcp_dma_try_early_copy 3 37651 _002302_hash NULL
84319 +_002303_hash tun_do_read 4 50800 _002303_hash NULL
84320 +_002304_hash ubi_eba_atomic_leb_change 5 13041 _002304_hash NULL
84321 +_002305_hash ubi_leb_write 4-5 41691 _002305_hash NULL
84322 +_002307_hash unix_seqpacket_recvmsg 4 23062 _002307_hash NULL
84323 +_002308_hash write_leb 5 36957 _002308_hash NULL
84324 +_002309_hash ch_do_scsi 4 31171 _002309_hash NULL
84325 +_002310_hash dbg_leb_write 4-5 20478 _002310_hash NULL
84326 +_002312_hash scsi_mode_sense 5 16835 _002312_hash NULL
84327 +_002313_hash scsi_vpd_inquiry 4 30040 _002313_hash NULL
84328 +_002314_hash ses_recv_diag 4 47143 _002314_hash &_000673_hash
84329 +_002315_hash ses_send_diag 4 64527 _002315_hash NULL
84330 +_002316_hash spi_dv_device_echo_buffer 2-3 39846 _002316_hash NULL
84331 +_002318_hash ubifs_leb_write 4-5 61226 _002318_hash NULL
84332 +_002320_hash ubi_leb_change 4 14899 _002320_hash NULL
84333 +_002321_hash ubi_write 4-5 30809 _002321_hash NULL
84334 +_002322_hash dbg_leb_change 4 19969 _002322_hash NULL
84335 +_002323_hash gluebi_write 3 27905 _002323_hash NULL
84336 +_002324_hash scsi_get_vpd_page 4 51951 _002324_hash NULL
84337 +_002325_hash sd_do_mode_sense 5 11507 _002325_hash NULL
84338 +_002326_hash ubifs_leb_change 4 22399 _002436_hash NULL nohasharray
84339 +_002327_hash ubifs_write_node 5 15088 _002327_hash NULL
84340 +_002328_hash fixup_leb 3 43256 _002328_hash NULL
84341 +_002329_hash recover_head 3 17904 _002329_hash NULL
84342 +_002330_hash alloc_cpu_rmap 1 65363 _002330_hash NULL
84343 +_002331_hash alloc_ebda_hpc 1-2 50046 _002331_hash NULL
84344 +_002333_hash alloc_sched_domains 1 28972 _002333_hash NULL
84345 +_002334_hash amthi_read 4 45831 _002334_hash NULL
84346 +_002335_hash bcm_char_read 3 31750 _002335_hash NULL
84347 +_002336_hash BcmCopySection 5 2035 _002336_hash NULL
84348 +_002337_hash buffer_from_user 3 51826 _002337_hash NULL
84349 +_002338_hash buffer_to_user 3 35439 _002338_hash NULL
84350 +_002339_hash c4iw_init_resource_fifo 3 48090 _002339_hash NULL
84351 +_002340_hash c4iw_init_resource_fifo_random 3 25547 _002340_hash NULL
84352 +_002341_hash card_send_command 3 40757 _002341_hash NULL
84353 +_002342_hash chd_dec_fetch_cdata 3 50926 _002342_hash NULL
84354 +_002343_hash crystalhd_create_dio_pool 2 3427 _002343_hash NULL
84355 +_002344_hash crystalhd_user_data 3 18407 _002344_hash NULL
84356 +_002345_hash cxio_init_resource_fifo 3 28764 _002345_hash NULL
84357 +_002346_hash cxio_init_resource_fifo_random 3 47151 _002346_hash NULL
84358 +_002347_hash do_pages_stat 2 4437 _002347_hash NULL
84359 +_002348_hash do_read_log_to_user 4 3236 _002348_hash NULL
84360 +_002349_hash do_write_log_from_user 3 39362 _002349_hash NULL
84361 +_002350_hash dt3155_read 3 59226 _002350_hash NULL
84362 +_002351_hash easycap_alsa_vmalloc 2 14426 _002351_hash NULL
84363 +_002352_hash evm_read_key 3 54674 _002352_hash NULL
84364 +_002353_hash evm_write_key 3 27715 _002353_hash NULL
84365 +_002354_hash fir16_create 3 5574 _002354_hash NULL
84366 +_002355_hash iio_allocate_device 1 18821 _002355_hash NULL
84367 +_002356_hash __iio_allocate_kfifo 2-3 55738 _002356_hash NULL
84368 +_002358_hash __iio_allocate_sw_ring_buffer 3 4843 _002358_hash NULL
84369 +_002359_hash iio_debugfs_read_reg 3 60908 _002359_hash NULL
84370 +_002360_hash iio_debugfs_write_reg 3 22742 _002360_hash NULL
84371 +_002361_hash iio_event_chrdev_read 3 54757 _002361_hash NULL
84372 +_002362_hash iio_read_first_n_kfifo 2 57910 _002362_hash NULL
84373 +_002363_hash iio_read_first_n_sw_rb 2 51911 _002363_hash NULL
84374 +_002364_hash ioapic_setup_resources 1 35255 _002364_hash NULL
84375 +_002365_hash keymap_store 4 45406 _002365_hash NULL
84376 +_002366_hash kzalloc_node 1 24352 _002366_hash NULL
84377 +_002367_hash line6_alloc_sysex_buffer 4 28225 _002367_hash NULL
84378 +_002368_hash line6_dumpreq_initbuf 3 53123 _002368_hash NULL
84379 +_002369_hash line6_midibuf_init 2 52425 _002369_hash NULL
84380 +_002370_hash lirc_write 3 20604 _002370_hash NULL
84381 +_002371_hash _malloc 1 54077 _002371_hash NULL
84382 +_002372_hash mei_read 3 6507 _002372_hash NULL
84383 +_002373_hash mei_write 3 4005 _002373_hash NULL
84384 +_002374_hash mempool_create_node 1 44715 _002374_hash NULL
84385 +_002375_hash msg_set 3 51725 _002375_hash NULL
84386 +_002376_hash newpart 6 47485 _002376_hash NULL
84387 +_002377_hash OS_kmalloc 1 36909 _002377_hash NULL
84388 +_002378_hash pcpu_alloc_bootmem 2 62074 _002378_hash NULL
84389 +_002379_hash pcpu_get_vm_areas 3 50085 _002379_hash NULL
84390 +_002380_hash resource_from_user 3 30341 _002380_hash NULL
84391 +_002381_hash sca3000_read_data 4 57064 _002381_hash NULL
84392 +_002382_hash sca3000_read_first_n_hw_rb 2 11479 _002382_hash NULL
84393 +_002383_hash send_midi_async 3 57463 _002383_hash NULL
84394 +_002384_hash sep_create_dcb_dmatables_context 6 37551 _002384_hash NULL
84395 +_002385_hash sep_create_dcb_dmatables_context_kernel 6 49728 _002385_hash NULL
84396 +_002386_hash sep_create_msgarea_context 4 33829 _002386_hash NULL
84397 +_002387_hash sep_lli_table_secure_dma 2-3 64042 _002387_hash NULL
84398 +_002389_hash sep_lock_user_pages 2-3 57470 _002389_hash &_002154_hash
84399 +_002391_hash sep_prepare_input_output_dma_table_in_dcb 4-5 63087 _002391_hash NULL
84400 +_002393_hash sep_read 3 17161 _002393_hash NULL
84401 +_002394_hash TransmitTcb 4 12989 _002394_hash NULL
84402 +_002395_hash ValidateDSDParamsChecksum 3 63654 _002395_hash NULL
84403 +_002396_hash Wb35Reg_BurstWrite 4 62327 _002396_hash NULL
84404 +_002397_hash __alloc_bootmem_low_node 2 25726 _002397_hash &_001499_hash
84405 +_002398_hash __alloc_bootmem_node 2 1992 _002398_hash NULL
84406 +_002399_hash alloc_irq_cpu_rmap 1 28459 _002399_hash NULL
84407 +_002400_hash alloc_ring 2-4 18278 _002400_hash NULL
84408 +_002402_hash c4iw_init_resource 2-3 30393 _002402_hash NULL
84409 +_002404_hash cxio_hal_init_resource 2-7-6 29771 _002404_hash &_000284_hash
84410 +_002407_hash cxio_hal_init_rhdl_resource 1 25104 _002407_hash NULL
84411 +_002408_hash disk_expand_part_tbl 2 30561 _002408_hash NULL
84412 +_002409_hash InterfaceTransmitPacket 3 42058 _002409_hash NULL
84413 +_002410_hash line6_dumpreq_init 3 34473 _002410_hash NULL
84414 +_002411_hash mempool_create 1 29437 _002411_hash NULL
84415 +_002412_hash pcpu_fc_alloc 2 11818 _002412_hash NULL
84416 +_002413_hash pod_alloc_sysex_buffer 3 31651 _002413_hash NULL
84417 +_002414_hash r8712_usbctrl_vendorreq 6 48489 _002414_hash NULL
84418 +_002415_hash r871x_set_wpa_ie 3 7000 _002415_hash NULL
84419 +_002416_hash sys_move_pages 2 42626 _002416_hash NULL
84420 +_002417_hash variax_alloc_sysex_buffer 3 15237 _002417_hash NULL
84421 +_002418_hash vme_user_write 3 15587 _002418_hash NULL
84422 +_002419_hash add_partition 2 55588 _002419_hash NULL
84423 +_002420_hash __alloc_bootmem_node_high 2 65076 _002420_hash NULL
84424 +_002421_hash ceph_msgpool_init 3 33312 _002421_hash NULL
84425 +_002423_hash mempool_create_kmalloc_pool 1 41650 _002423_hash NULL
84426 +_002424_hash mempool_create_page_pool 1 30189 _002424_hash NULL
84427 +_002425_hash mempool_create_slab_pool 1 62907 _002425_hash NULL
84428 +_002426_hash variax_set_raw2 4 32374 _002426_hash NULL
84429 +_002427_hash bioset_create 1 5580 _002427_hash NULL
84430 +_002428_hash bioset_integrity_create 2 62708 _002428_hash NULL
84431 +_002429_hash biovec_create_pools 2 9575 _002429_hash NULL
84432 +_002430_hash i2o_pool_alloc 4 55485 _002430_hash NULL
84433 +_002431_hash prison_create 1 43623 _002431_hash NULL
84434 +_002432_hash unlink_simple 3 47506 _002432_hash NULL
84435 +_002433_hash alloc_ieee80211 1 20063 _002433_hash NULL
84436 +_002434_hash alloc_ieee80211_rsl 1 34564 _002434_hash NULL
84437 +_002435_hash alloc_page_cgroup 1 2919 _002435_hash NULL
84438 +_002436_hash alloc_private 2 22399 _002436_hash &_002326_hash
84439 +_002437_hash alloc_rtllib 1 51136 _002437_hash NULL
84440 +_002438_hash alloc_rx_desc_ring 2 18016 _002438_hash NULL
84441 +_002439_hash alloc_subdevices 2 43300 _002439_hash NULL
84442 +_002440_hash atomic_counters_read 3 48827 _002440_hash NULL
84443 +_002441_hash atomic_stats_read 3 36228 _002441_hash NULL
84444 +_002442_hash capabilities_read 3 58457 _002442_hash NULL
84445 +_002443_hash comedi_read 3 13199 _002443_hash NULL
84446 +_002444_hash comedi_write 3 47926 _002444_hash NULL
84447 +_002445_hash compat_do_arpt_set_ctl 4 12184 _002445_hash NULL
84448 +_002446_hash compat_do_ip6t_set_ctl 4 3184 _002446_hash NULL
84449 +_002447_hash compat_do_ipt_set_ctl 4 58466 _002447_hash &_001852_hash
84450 +_002448_hash compat_filldir 3 32999 _002448_hash NULL
84451 +_002449_hash compat_filldir64 3 35354 _002449_hash NULL
84452 +_002450_hash compat_fillonedir 3 15620 _002450_hash NULL
84453 +_002451_hash compat_rw_copy_check_uvector 3 25242 _002451_hash NULL
84454 +_002452_hash compat_sock_setsockopt 5 23 _002452_hash NULL
84455 +_002453_hash compat_sys_kexec_load 2 35674 _002453_hash NULL
84456 +_002454_hash compat_sys_keyctl 4 9639 _002454_hash NULL
84457 +_002455_hash compat_sys_move_pages 2 5861 _002455_hash NULL
84458 +_002456_hash compat_sys_mq_timedsend 3 31060 _002456_hash NULL
84459 +_002457_hash compat_sys_msgrcv 2 7482 _002457_hash NULL
84460 +_002458_hash compat_sys_msgsnd 2 10738 _002458_hash NULL
84461 +_002459_hash compat_sys_semtimedop 3 3606 _002459_hash NULL
84462 +_002460_hash __copy_in_user 3 34790 _002460_hash NULL
84463 +_002461_hash copy_in_user 3 57502 _002461_hash NULL
84464 +_002462_hash dev_counters_read 3 19216 _002462_hash NULL
84465 +_002463_hash dev_names_read 3 38509 _002463_hash NULL
84466 +_002464_hash do_arpt_set_ctl 4 51053 _002464_hash NULL
84467 +_002465_hash do_ip6t_set_ctl 4 60040 _002465_hash NULL
84468 +_002466_hash do_ipt_set_ctl 4 56238 _002466_hash NULL
84469 +_002467_hash drbd_bm_resize 2 20522 _002467_hash NULL
84470 +_002468_hash driver_names_read 3 60399 _002468_hash NULL
84471 +_002469_hash driver_stats_read 3 8944 _002469_hash NULL
84472 +_002470_hash __earlyonly_bootmem_alloc 2 23824 _002470_hash NULL
84473 +_002471_hash evtchn_read 3 3569 _002471_hash NULL
84474 +_002472_hash ext_sd_execute_read_data 9 48589 _002472_hash NULL
84475 +_002473_hash ext_sd_execute_write_data 9 8175 _002473_hash NULL
84476 +_002474_hash fat_compat_ioctl_filldir 3 36328 _002474_hash NULL
84477 +_002475_hash firmwareUpload 3 32794 _002475_hash NULL
84478 +_002476_hash flash_read 3 57843 _002476_hash NULL
84479 +_002477_hash flash_write 3 62354 _002477_hash NULL
84480 +_002478_hash gather_array 3 56641 _002478_hash NULL
84481 +_002479_hash ghash_async_setkey 3 60001 _002479_hash NULL
84482 +_002480_hash gntdev_alloc_map 2 35145 _002480_hash NULL
84483 +_002481_hash gnttab_map 2 56439 _002481_hash NULL
84484 +_002482_hash gru_alloc_gts 2-3 60056 _002482_hash NULL
84485 +_002484_hash handle_eviocgbit 3 44193 _002484_hash NULL
84486 +_002485_hash hid_parse_report 3 51737 _002485_hash NULL
84487 +_002486_hash ieee80211_alloc_txb 1 52477 _002486_hash NULL
84488 +_002487_hash ieee80211_wx_set_gen_ie 3 51399 _002487_hash NULL
84489 +_002488_hash ieee80211_wx_set_gen_ie_rsl 3 3521 _002488_hash NULL
84490 +_002489_hash init_cdev 1 8274 _002489_hash NULL
84491 +_002490_hash init_per_cpu 1 17880 _002490_hash NULL
84492 +_002491_hash ipath_create_cq 2 45586 _002491_hash NULL
84493 +_002492_hash ipath_get_base_info 3 7043 _002492_hash NULL
84494 +_002493_hash ipath_init_qp_table 2 25167 _002493_hash NULL
84495 +_002494_hash ipath_resize_cq 2 712 _002494_hash NULL
84496 +_002495_hash ni_gpct_device_construct 5 610 _002495_hash NULL
84497 +_002496_hash options_write 3 47243 _002496_hash NULL
84498 +_002497_hash portcntrs_1_read 3 47253 _002497_hash NULL
84499 +_002498_hash portcntrs_2_read 3 56586 _002498_hash NULL
84500 +_002499_hash portnames_read 3 41958 _002499_hash NULL
84501 +_002500_hash ptc_proc_write 3 12076 _002500_hash NULL
84502 +_002501_hash put_cmsg_compat 4 35937 _002501_hash NULL
84503 +_002502_hash qib_alloc_devdata 2 51819 _002502_hash NULL
84504 +_002503_hash qib_alloc_fast_reg_page_list 2 10507 _002503_hash NULL
84505 +_002504_hash qib_cdev_init 1 34778 _002504_hash NULL
84506 +_002505_hash qib_create_cq 2 27497 _002505_hash NULL
84507 +_002506_hash qib_diag_write 3 62133 _002506_hash NULL
84508 +_002507_hash qib_get_base_info 3 11369 _002507_hash NULL
84509 +_002508_hash qib_resize_cq 2 53090 _002508_hash NULL
84510 +_002509_hash qsfp_1_read 3 21915 _002509_hash NULL
84511 +_002510_hash qsfp_2_read 3 31491 _002510_hash NULL
84512 +_002511_hash queue_reply 3 22416 _002511_hash NULL
84513 +_002512_hash Realloc 2 34961 _002512_hash NULL
84514 +_002513_hash rfc4106_set_key 3 54519 _002513_hash NULL
84515 +_002514_hash rtllib_alloc_txb 1 21687 _002514_hash NULL
84516 +_002515_hash rtllib_wx_set_gen_ie 3 59808 _002515_hash NULL
84517 +_002516_hash rts51x_transfer_data_partial 6 5735 _002516_hash NULL
84518 +_002517_hash sparse_early_usemaps_alloc_node 4 9269 _002517_hash NULL
84519 +_002518_hash split 2 11691 _002518_hash NULL
84520 +_002519_hash stats_read_ul 3 32751 _002519_hash NULL
84521 +_002520_hash store_debug_level 3 35652 _002520_hash NULL
84522 +_002521_hash sys32_ipc 3 7238 _002521_hash NULL
84523 +_002522_hash sys32_rt_sigpending 2 25814 _002522_hash NULL
84524 +_002523_hash tunables_read 3 36385 _002523_hash NULL
84525 +_002524_hash tunables_write 3 59563 _002524_hash NULL
84526 +_002525_hash u32_array_read 3 2219 _002525_hash NULL
84527 +_002526_hash usb_buffer_alloc 2 36276 _002526_hash NULL
84528 +_002527_hash xenbus_file_write 3 6282 _002527_hash NULL
84529 +_002528_hash xpc_kmalloc_cacheline_aligned 1 42895 _002528_hash NULL
84530 +_002529_hash xpc_kzalloc_cacheline_aligned 1 65433 _002529_hash NULL
84531 +_002530_hash xsd_read 3 15653 _002530_hash NULL
84532 +_002531_hash compat_do_readv_writev 4 49102 _002531_hash NULL
84533 +_002532_hash compat_keyctl_instantiate_key_iov 3 57431 _002532_hash NULL
84534 +_002533_hash compat_process_vm_rw 3-5 22254 _002533_hash NULL
84535 +_002535_hash compat_sys_setsockopt 5 3326 _002535_hash NULL
84536 +_002536_hash ipath_cdev_init 1 37752 _002536_hash NULL
84537 +_002537_hash ms_read_multiple_pages 4-5 8052 _002537_hash NULL
84538 +_002539_hash ms_write_multiple_pages 5-6 10362 _002539_hash NULL
84539 +_002541_hash sparse_mem_maps_populate_node 4 12669 _002541_hash &_002004_hash
84540 +_002542_hash vmemmap_alloc_block 1 43245 _002542_hash NULL
84541 +_002543_hash xd_read_multiple_pages 4-5 11422 _002543_hash NULL
84542 +_002545_hash xd_write_multiple_pages 5-6 53633 _002545_hash NULL
84543 +_002546_hash compat_readv 3 30273 _002546_hash NULL
84544 +_002547_hash compat_sys_process_vm_readv 3-5 15374 _002547_hash NULL
84545 +_002549_hash compat_sys_process_vm_writev 3-5 41194 _002549_hash NULL
84546 +_002551_hash compat_writev 3 60063 _002551_hash NULL
84547 +_002552_hash ms_rw_multi_sector 4 7459 _002552_hash NULL
84548 +_002553_hash sparse_early_mem_maps_alloc_node 4 36971 _002553_hash NULL
84549 +_002554_hash vmemmap_alloc_block_buf 1 61126 _002554_hash NULL
84550 +_002555_hash xd_rw 4 49020 _002555_hash NULL
84551 +_002556_hash compat_sys_preadv64 3 24283 _002556_hash NULL
84552 +_002557_hash compat_sys_pwritev64 3 51151 _002557_hash NULL
84553 +_002558_hash compat_sys_readv 3 20911 _002558_hash NULL
84554 +_002559_hash compat_sys_writev 3 5784 _002559_hash NULL
84555 +_002560_hash ms_rw 4 17220 _002560_hash NULL
84556 +_002561_hash compat_sys_preadv 3 583 _002561_hash NULL
84557 +_002562_hash compat_sys_pwritev 3 17886 _002562_hash NULL
84558 +_002563_hash alloc_apertures 1 56561 _002563_hash NULL
84559 +_002564_hash bin_uuid 3 28999 _002564_hash NULL
84560 +_002565_hash __copy_from_user_inatomic_nocache 3 49921 _002565_hash NULL
84561 +_002566_hash do_dmabuf_dirty_sou 7 3017 _002566_hash NULL
84562 +_002567_hash do_surface_dirty_sou 7 39678 _002567_hash NULL
84563 +_002568_hash drm_agp_bind_pages 3 56748 _002568_hash NULL
84564 +_002569_hash drm_calloc_large 1-2 65421 _002569_hash NULL
84565 +_002571_hash drm_fb_helper_init 3-4 19044 _002571_hash NULL
84566 +_002573_hash drm_ht_create 2 18853 _002573_hash NULL
84567 +_002574_hash drm_malloc_ab 1-2 16831 _002574_hash NULL
84568 +_002576_hash drm_mode_crtc_set_gamma_size 2 31881 _002576_hash NULL
84569 +_002577_hash drm_plane_init 6 28731 _002577_hash NULL
84570 +_002578_hash drm_property_create 4 51239 _002578_hash NULL
84571 +_002579_hash drm_property_create_blob 2 7414 _002579_hash NULL
84572 +_002580_hash drm_vblank_init 2 11362 _002580_hash NULL
84573 +_002581_hash drm_vmalloc_dma 1 14550 _002581_hash NULL
84574 +_002582_hash fb_alloc_cmap_gfp 2 20792 _002582_hash NULL
84575 +_002583_hash fbcon_prepare_logo 5 6246 _002583_hash NULL
84576 +_002584_hash fb_read 3 33506 _002584_hash NULL
84577 +_002585_hash fb_write 3 46924 _002585_hash NULL
84578 +_002586_hash framebuffer_alloc 1 59145 _002586_hash NULL
84579 +_002587_hash i915_cache_sharing_read 3 24775 _002587_hash NULL
84580 +_002588_hash i915_cache_sharing_write 3 57961 _002588_hash NULL
84581 +_002589_hash i915_max_freq_read 3 20581 _002589_hash NULL
84582 +_002590_hash i915_max_freq_write 3 11350 _002590_hash NULL
84583 +_002591_hash i915_wedged_read 3 35474 _002591_hash NULL
84584 +_002592_hash i915_wedged_write 3 47771 _002592_hash NULL
84585 +_002593_hash p9_client_read 5 19750 _002593_hash NULL
84586 +_002594_hash probe_kernel_write 3 17481 _002594_hash NULL
84587 +_002595_hash sched_feat_write 3 55202 _002595_hash NULL
84588 +_002596_hash sd_alloc_ctl_entry 1 29708 _002596_hash NULL
84589 +_002597_hash tstats_write 3 60432 _002597_hash &_000009_hash
84590 +_002598_hash ttm_bo_fbdev_io 4 9805 _002598_hash NULL
84591 +_002599_hash ttm_bo_io 5 47000 _002599_hash NULL
84592 +_002600_hash ttm_dma_page_pool_free 2 34135 _002600_hash NULL
84593 +_002601_hash ttm_page_pool_free 2 61661 _002601_hash NULL
84594 +_002602_hash vmw_execbuf_process 5 22885 _002602_hash NULL
84595 +_002603_hash vmw_fifo_reserve 2 12141 _002603_hash NULL
84596 +_002604_hash vmw_kms_present 9 38130 _002604_hash NULL
84597 +_002605_hash vmw_kms_readback 6 5727 _002605_hash NULL
84598 +_002606_hash do_dmabuf_dirty_ldu 6 52241 _002606_hash NULL
84599 +_002607_hash drm_mode_create_tv_properties 2 23122 _002607_hash NULL
84600 +_002608_hash drm_property_create_enum 5 29201 _002608_hash NULL
84601 +_002609_hash fast_user_write 5 20494 _002609_hash NULL
84602 +_002610_hash fb_alloc_cmap 2 6554 _002610_hash NULL
84603 +_002611_hash i915_gem_execbuffer_relocate_slow 7 25355 _002611_hash NULL
84604 +_002612_hash kgdb_hex2mem 3 24755 _002612_hash NULL
84605 +_002613_hash ttm_object_device_init 2 10321 _002613_hash NULL
84606 +_002614_hash ttm_object_file_init 2 27804 _002614_hash NULL
84607 +_002615_hash vmw_cursor_update_image 3-4 16332 _002615_hash NULL
84608 +_002617_hash vmw_gmr2_bind 3 21305 _002617_hash NULL
84609 +_002618_hash vmw_cursor_update_dmabuf 3-4 32045 _002618_hash NULL
84610 +_002620_hash vmw_gmr_bind 3 44130 _002620_hash NULL
84611 +_002621_hash vmw_du_crtc_cursor_set 4-5 28479 _002621_hash NULL
84612 +_002622_hash __module_alloc 1 50004 _002622_hash NULL
84613 +_002623_hash module_alloc_update_bounds_rw 1 63233 _002623_hash NULL
84614 +_002624_hash module_alloc_update_bounds_rx 1 58634 _002624_hash NULL
84615 +_002625_hash acpi_system_write_alarm 3 40205 _002625_hash NULL
84616 +_002626_hash create_table 2 16213 _002626_hash NULL
84617 +_002627_hash mem_read 3 57631 _002627_hash NULL
84618 +_002628_hash mem_write 3 22232 _002628_hash NULL
84619 +_002629_hash proc_fault_inject_read 3 36802 _002629_hash NULL
84620 +_002630_hash proc_fault_inject_write 3 21058 _002630_hash NULL
84621 +_002631_hash v9fs_fid_readn 4 60544 _002631_hash NULL
84622 +_002632_hash v9fs_file_read 3 40858 _002632_hash NULL
84623 +_002633_hash __devres_alloc 2 25598 _002633_hash NULL
84624 +_002634_hash acl_alloc 1 35979 _002634_hash NULL
84625 +_002635_hash acl_alloc_stack_init 1 60630 _002635_hash NULL
84626 +_002636_hash acl_alloc_num 1-2 60778 _002636_hash NULL
84627 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
84628 new file mode 100644
84629 index 0000000..cc96254
84630 --- /dev/null
84631 +++ b/tools/gcc/size_overflow_plugin.c
84632 @@ -0,0 +1,1204 @@
84633 +/*
84634 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
84635 + * Licensed under the GPL v2, or (at your option) v3
84636 + *
84637 + * Homepage:
84638 + * http://www.grsecurity.net/~ephox/overflow_plugin/
84639 + *
84640 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
84641 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
84642 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
84643 + *
84644 + * Usage:
84645 + * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c
84646 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
84647 + */
84648 +
84649 +#include "gcc-plugin.h"
84650 +#include "config.h"
84651 +#include "system.h"
84652 +#include "coretypes.h"
84653 +#include "tree.h"
84654 +#include "tree-pass.h"
84655 +#include "intl.h"
84656 +#include "plugin-version.h"
84657 +#include "tm.h"
84658 +#include "toplev.h"
84659 +#include "function.h"
84660 +#include "tree-flow.h"
84661 +#include "plugin.h"
84662 +#include "gimple.h"
84663 +#include "c-common.h"
84664 +#include "diagnostic.h"
84665 +#include "cfgloop.h"
84666 +
84667 +struct size_overflow_hash {
84668 + struct size_overflow_hash *next;
84669 + const char *name;
84670 + unsigned int param;
84671 +};
84672 +
84673 +#include "size_overflow_hash.h"
84674 +
84675 +#define __unused __attribute__((__unused__))
84676 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
84677 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
84678 +#define BEFORE_STMT true
84679 +#define AFTER_STMT false
84680 +#define CREATE_NEW_VAR NULL_TREE
84681 +#define CODES_LIMIT 32
84682 +#define MAX_PARAM 10
84683 +
84684 +#if BUILDING_GCC_VERSION == 4005
84685 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
84686 +#endif
84687 +
84688 +int plugin_is_GPL_compatible;
84689 +void debug_gimple_stmt(gimple gs);
84690 +
84691 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
84692 +static tree signed_size_overflow_type;
84693 +static tree unsigned_size_overflow_type;
84694 +static tree report_size_overflow_decl;
84695 +static tree const_char_ptr_type_node;
84696 +static unsigned int handle_function(void);
84697 +
84698 +static struct plugin_info size_overflow_plugin_info = {
84699 + .version = "20120618beta",
84700 + .help = "no-size-overflow\tturn off size overflow checking\n",
84701 +};
84702 +
84703 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
84704 +{
84705 + unsigned int arg_count = type_num_arguments(*node);
84706 +
84707 + for (; args; args = TREE_CHAIN(args)) {
84708 + tree position = TREE_VALUE(args);
84709 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
84710 + error("handle_size_overflow_attribute: overflow parameter outside range.");
84711 + *no_add_attrs = true;
84712 + }
84713 + }
84714 + return NULL_TREE;
84715 +}
84716 +
84717 +static struct attribute_spec no_size_overflow_attr = {
84718 + .name = "size_overflow",
84719 + .min_length = 1,
84720 + .max_length = -1,
84721 + .decl_required = false,
84722 + .type_required = true,
84723 + .function_type_required = true,
84724 + .handler = handle_size_overflow_attribute,
84725 +#if BUILDING_GCC_VERSION >= 4007
84726 + .affects_type_identity = false
84727 +#endif
84728 +};
84729 +
84730 +static void register_attributes(void __unused *event_data, void __unused *data)
84731 +{
84732 + register_attribute(&no_size_overflow_attr);
84733 +}
84734 +
84735 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
84736 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
84737 +{
84738 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
84739 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
84740 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
84741 +
84742 + const unsigned int m = 0x57559429;
84743 + const unsigned int n = 0x5052acdb;
84744 + const unsigned int *key4 = (const unsigned int *)key;
84745 + unsigned int h = len;
84746 + unsigned int k = len + seed + n;
84747 + unsigned long long p;
84748 +
84749 + while (len >= 8) {
84750 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
84751 + len -= 8;
84752 + }
84753 + if (len >= 4) {
84754 + cwmixb(key4[0]) key4 += 1;
84755 + len -= 4;
84756 + }
84757 + if (len)
84758 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
84759 + cwmixb(h ^ (k + n));
84760 + return k ^ h;
84761 +
84762 +#undef cwfold
84763 +#undef cwmixa
84764 +#undef cwmixb
84765 +}
84766 +
84767 +static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
84768 +{
84769 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
84770 + unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
84771 + return fn ^ codes;
84772 +}
84773 +
84774 +static inline tree get_original_function_decl(tree fndecl)
84775 +{
84776 + if (DECL_ABSTRACT_ORIGIN(fndecl))
84777 + return DECL_ABSTRACT_ORIGIN(fndecl);
84778 + return fndecl;
84779 +}
84780 +
84781 +static inline gimple get_def_stmt(tree node)
84782 +{
84783 + gcc_assert(TREE_CODE(node) == SSA_NAME);
84784 + return SSA_NAME_DEF_STMT(node);
84785 +}
84786 +
84787 +static unsigned char get_tree_code(tree type)
84788 +{
84789 + switch (TREE_CODE(type)) {
84790 + case ARRAY_TYPE:
84791 + return 0;
84792 + case BOOLEAN_TYPE:
84793 + return 1;
84794 + case ENUMERAL_TYPE:
84795 + return 2;
84796 + case FUNCTION_TYPE:
84797 + return 3;
84798 + case INTEGER_TYPE:
84799 + return 4;
84800 + case POINTER_TYPE:
84801 + return 5;
84802 + case RECORD_TYPE:
84803 + return 6;
84804 + case UNION_TYPE:
84805 + return 7;
84806 + case VOID_TYPE:
84807 + return 8;
84808 + case REAL_TYPE:
84809 + return 9;
84810 + case VECTOR_TYPE:
84811 + return 10;
84812 + case REFERENCE_TYPE:
84813 + return 11;
84814 + default:
84815 + debug_tree(type);
84816 + gcc_unreachable();
84817 + }
84818 +}
84819 +
84820 +static size_t add_type_codes(tree type, unsigned char *tree_codes, size_t len)
84821 +{
84822 + gcc_assert(type != NULL_TREE);
84823 +
84824 + while (type && len < CODES_LIMIT) {
84825 + tree_codes[len] = get_tree_code(type);
84826 + len++;
84827 + type = TREE_TYPE(type);
84828 + }
84829 + return len;
84830 +}
84831 +
84832 +static unsigned int get_function_decl(tree fndecl, unsigned char *tree_codes)
84833 +{
84834 + tree arg, result, type = TREE_TYPE(fndecl);
84835 + enum tree_code code = TREE_CODE(type);
84836 + size_t len = 0;
84837 +
84838 + gcc_assert(code == FUNCTION_TYPE);
84839 +
84840 + arg = TYPE_ARG_TYPES(type);
84841 + // skip builtins __builtin_constant_p
84842 + if (!arg && DECL_BUILT_IN(fndecl))
84843 + return 0;
84844 + gcc_assert(arg != NULL_TREE);
84845 +
84846 + if (TREE_CODE_CLASS(code) == tcc_type)
84847 + result = type;
84848 + else
84849 + result = DECL_RESULT(fndecl);
84850 +
84851 + gcc_assert(result != NULL_TREE);
84852 + len = add_type_codes(TREE_TYPE(result), tree_codes, len);
84853 +
84854 + while (arg && len < CODES_LIMIT) {
84855 + len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
84856 + arg = TREE_CHAIN(arg);
84857 + }
84858 +
84859 + gcc_assert(len != 0);
84860 + return len;
84861 +}
84862 +
84863 +static struct size_overflow_hash *get_function_hash(tree fndecl)
84864 +{
84865 + unsigned int hash;
84866 + struct size_overflow_hash *entry;
84867 + unsigned char tree_codes[CODES_LIMIT];
84868 + size_t len;
84869 + const char *func_name = NAME(fndecl);
84870 +
84871 + len = get_function_decl(fndecl, tree_codes);
84872 + if (len == 0)
84873 + return NULL;
84874 +
84875 + hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
84876 +
84877 + entry = size_overflow_hash[hash];
84878 + while (entry) {
84879 + if (!strcmp(entry->name, func_name))
84880 + return entry;
84881 + entry = entry->next;
84882 + }
84883 +
84884 + return NULL;
84885 +}
84886 +
84887 +static void check_arg_type(tree var)
84888 +{
84889 + tree type = TREE_TYPE(var);
84890 + enum tree_code code = TREE_CODE(type);
84891 +
84892 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
84893 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
84894 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
84895 +}
84896 +
84897 +static int find_arg_number(tree arg, tree func)
84898 +{
84899 + tree var;
84900 + bool match = false;
84901 + unsigned int argnum = 1;
84902 +
84903 + if (TREE_CODE(arg) == SSA_NAME)
84904 + arg = SSA_NAME_VAR(arg);
84905 +
84906 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
84907 + if (strcmp(NAME(arg), NAME(var))) {
84908 + argnum++;
84909 + continue;
84910 + }
84911 + check_arg_type(var);
84912 +
84913 + match = true;
84914 + break;
84915 + }
84916 + if (!match) {
84917 + warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
84918 + return 0;
84919 + }
84920 + return argnum;
84921 +}
84922 +
84923 +static void print_missing_msg(tree func, unsigned int argnum)
84924 +{
84925 + unsigned int new_hash;
84926 + size_t len;
84927 + unsigned char tree_codes[CODES_LIMIT];
84928 + location_t loc = DECL_SOURCE_LOCATION(func);
84929 + const char *curfunc = NAME(func);
84930 +
84931 + len = get_function_decl(func, tree_codes);
84932 + new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
84933 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+", curfunc, curfunc, argnum, new_hash);
84934 +}
84935 +
84936 +static void check_missing_attribute(tree arg)
84937 +{
84938 + tree type, func = get_original_function_decl(current_function_decl);
84939 + unsigned int argnum;
84940 + struct size_overflow_hash *hash;
84941 +
84942 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
84943 +
84944 + type = TREE_TYPE(arg);
84945 + // skip function pointers
84946 + if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
84947 + return;
84948 +
84949 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
84950 + return;
84951 +
84952 + argnum = find_arg_number(arg, func);
84953 + if (argnum == 0)
84954 + return;
84955 +
84956 + hash = get_function_hash(func);
84957 + if (!hash || !(hash->param & (1U << argnum)))
84958 + print_missing_msg(func, argnum);
84959 +}
84960 +
84961 +static tree create_new_var(tree type)
84962 +{
84963 + tree new_var = create_tmp_var(type, "cicus");
84964 +
84965 + add_referenced_var(new_var);
84966 + mark_sym_for_renaming(new_var);
84967 + return new_var;
84968 +}
84969 +
84970 +static bool is_bool(tree node)
84971 +{
84972 + tree type;
84973 +
84974 + if (node == NULL_TREE)
84975 + return false;
84976 +
84977 + type = TREE_TYPE(node);
84978 + if (!INTEGRAL_TYPE_P(type))
84979 + return false;
84980 + if (TREE_CODE(type) == BOOLEAN_TYPE)
84981 + return true;
84982 + if (TYPE_PRECISION(type) == 1)
84983 + return true;
84984 + return false;
84985 +}
84986 +
84987 +static tree cast_a_tree(tree type, tree var)
84988 +{
84989 + gcc_assert(type != NULL_TREE && var != NULL_TREE);
84990 + gcc_assert(fold_convertible_p(type, var));
84991 +
84992 + return fold_convert(type, var);
84993 +}
84994 +
84995 +static tree signed_cast(tree var)
84996 +{
84997 + return cast_a_tree(signed_size_overflow_type, var);
84998 +}
84999 +
85000 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
85001 +{
85002 + gimple assign;
85003 +
85004 + if (new_var == CREATE_NEW_VAR)
85005 + new_var = create_new_var(type);
85006 +
85007 + assign = gimple_build_assign(new_var, cast_a_tree(type, var));
85008 + gimple_set_location(assign, loc);
85009 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
85010 +
85011 + return assign;
85012 +}
85013 +
85014 +static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
85015 +{
85016 + tree oldstmt_rhs1;
85017 + enum tree_code code;
85018 + gimple stmt;
85019 + gimple_stmt_iterator gsi;
85020 +
85021 + if (!*potentionally_overflowed)
85022 + return NULL_TREE;
85023 +
85024 + if (rhs1 == NULL_TREE) {
85025 + debug_gimple_stmt(oldstmt);
85026 + error("create_assign: rhs1 is NULL_TREE");
85027 + gcc_unreachable();
85028 + }
85029 +
85030 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
85031 + code = TREE_CODE(oldstmt_rhs1);
85032 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
85033 + check_missing_attribute(oldstmt_rhs1);
85034 +
85035 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
85036 + gsi = gsi_for_stmt(oldstmt);
85037 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
85038 + basic_block next_bb, cur_bb;
85039 + edge e;
85040 +
85041 + gcc_assert(before == false);
85042 + gcc_assert(stmt_can_throw_internal(oldstmt));
85043 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
85044 + gcc_assert(!gsi_end_p(gsi));
85045 +
85046 + cur_bb = gimple_bb(oldstmt);
85047 + next_bb = cur_bb->next_bb;
85048 + e = find_edge(cur_bb, next_bb);
85049 + gcc_assert(e != NULL);
85050 + gcc_assert(e->flags & EDGE_FALLTHRU);
85051 +
85052 + gsi = gsi_after_labels(next_bb);
85053 + gcc_assert(!gsi_end_p(gsi));
85054 + before = true;
85055 + }
85056 + if (before)
85057 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
85058 + else
85059 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
85060 + update_stmt(stmt);
85061 + pointer_set_insert(visited, oldstmt);
85062 + return gimple_get_lhs(stmt);
85063 +}
85064 +
85065 +static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
85066 +{
85067 + tree new_var, lhs = gimple_get_lhs(oldstmt);
85068 + gimple stmt;
85069 + gimple_stmt_iterator gsi;
85070 +
85071 + if (!*potentionally_overflowed)
85072 + return NULL_TREE;
85073 +
85074 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
85075 + rhs1 = gimple_assign_rhs1(oldstmt);
85076 + rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
85077 + }
85078 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
85079 + rhs2 = gimple_assign_rhs2(oldstmt);
85080 + rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
85081 + }
85082 +
85083 + stmt = gimple_copy(oldstmt);
85084 + gimple_set_location(stmt, gimple_location(oldstmt));
85085 +
85086 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
85087 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
85088 +
85089 + if (is_bool(lhs))
85090 + new_var = SSA_NAME_VAR(lhs);
85091 + else
85092 + new_var = create_new_var(signed_size_overflow_type);
85093 + new_var = make_ssa_name(new_var, stmt);
85094 + gimple_set_lhs(stmt, new_var);
85095 +
85096 + if (rhs1 != NULL_TREE) {
85097 + if (!gimple_assign_cast_p(oldstmt))
85098 + rhs1 = signed_cast(rhs1);
85099 + gimple_assign_set_rhs1(stmt, rhs1);
85100 + }
85101 +
85102 + if (rhs2 != NULL_TREE)
85103 + gimple_assign_set_rhs2(stmt, rhs2);
85104 +#if BUILDING_GCC_VERSION >= 4007
85105 + if (rhs3 != NULL_TREE)
85106 + gimple_assign_set_rhs3(stmt, rhs3);
85107 +#endif
85108 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
85109 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
85110 +
85111 + gsi = gsi_for_stmt(oldstmt);
85112 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
85113 + update_stmt(stmt);
85114 + pointer_set_insert(visited, oldstmt);
85115 + return gimple_get_lhs(stmt);
85116 +}
85117 +
85118 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
85119 +{
85120 + basic_block bb;
85121 + gimple phi;
85122 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
85123 +
85124 + bb = gsi_bb(gsi);
85125 +
85126 + phi = create_phi_node(var, bb);
85127 + gsi = gsi_last(phi_nodes(bb));
85128 + gsi_remove(&gsi, false);
85129 +
85130 + gsi = gsi_for_stmt(oldstmt);
85131 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
85132 + gimple_set_bb(phi, bb);
85133 + return phi;
85134 +}
85135 +
85136 +static basic_block create_a_first_bb(void)
85137 +{
85138 + basic_block first_bb;
85139 +
85140 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
85141 + if (dom_info_available_p(CDI_DOMINATORS))
85142 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
85143 + return first_bb;
85144 +}
85145 +
85146 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
85147 +{
85148 + basic_block bb;
85149 + gimple newstmt, def_stmt;
85150 + gimple_stmt_iterator gsi;
85151 +
85152 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
85153 + if (TREE_CODE(arg) == SSA_NAME) {
85154 + def_stmt = get_def_stmt(arg);
85155 + if (gimple_code(def_stmt) != GIMPLE_NOP) {
85156 + gsi = gsi_for_stmt(def_stmt);
85157 + gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
85158 + return newstmt;
85159 + }
85160 + }
85161 +
85162 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
85163 + if (bb->index == 0)
85164 + bb = create_a_first_bb();
85165 + gsi = gsi_after_labels(bb);
85166 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
85167 + return newstmt;
85168 +}
85169 +
85170 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
85171 +{
85172 + gimple newstmt;
85173 + gimple_stmt_iterator gsi;
85174 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
85175 + gimple def_newstmt = get_def_stmt(new_rhs);
85176 +
85177 + gsi_insert = gsi_insert_after;
85178 + gsi = gsi_for_stmt(def_newstmt);
85179 +
85180 + switch (gimple_code(get_def_stmt(arg))) {
85181 + case GIMPLE_PHI:
85182 + newstmt = gimple_build_assign(new_var, new_rhs);
85183 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
85184 + gsi_insert = gsi_insert_before;
85185 + break;
85186 + case GIMPLE_ASM:
85187 + case GIMPLE_CALL:
85188 + newstmt = gimple_build_assign(new_var, new_rhs);
85189 + break;
85190 + case GIMPLE_ASSIGN:
85191 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
85192 + break;
85193 + default:
85194 + /* unknown gimple_code (handle_build_new_phi_arg) */
85195 + gcc_unreachable();
85196 + }
85197 +
85198 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
85199 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
85200 + update_stmt(newstmt);
85201 + return newstmt;
85202 +}
85203 +
85204 +static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
85205 +{
85206 + gimple newstmt;
85207 + tree new_rhs;
85208 +
85209 + new_rhs = expand(visited, potentionally_overflowed, arg);
85210 +
85211 + if (new_rhs == NULL_TREE)
85212 + return NULL_TREE;
85213 +
85214 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
85215 + return gimple_get_lhs(newstmt);
85216 +}
85217 +
85218 +static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
85219 +{
85220 + gimple phi;
85221 + tree new_var = create_new_var(signed_size_overflow_type);
85222 + unsigned int i, n = gimple_phi_num_args(oldstmt);
85223 +
85224 + pointer_set_insert(visited, oldstmt);
85225 + phi = overflow_create_phi_node(oldstmt, new_var);
85226 + for (i = 0; i < n; i++) {
85227 + tree arg, lhs;
85228 +
85229 + arg = gimple_phi_arg_def(oldstmt, i);
85230 + if (is_gimple_constant(arg))
85231 + arg = signed_cast(arg);
85232 + lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
85233 + if (lhs == NULL_TREE)
85234 + lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
85235 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
85236 + }
85237 +
85238 + update_stmt(phi);
85239 + return gimple_phi_result(phi);
85240 +}
85241 +
85242 +static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85243 +{
85244 + gimple def_stmt = get_def_stmt(var);
85245 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
85246 +
85247 + *potentionally_overflowed = true;
85248 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
85249 + if (new_rhs1 == NULL_TREE) {
85250 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
85251 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85252 + else
85253 + return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
85254 + }
85255 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
85256 +}
85257 +
85258 +static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85259 +{
85260 + gimple def_stmt = get_def_stmt(var);
85261 + tree rhs1 = gimple_assign_rhs1(def_stmt);
85262 +
85263 + if (is_gimple_constant(rhs1))
85264 + return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast(rhs1), NULL_TREE, NULL_TREE);
85265 +
85266 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
85267 + switch (TREE_CODE(rhs1)) {
85268 + case SSA_NAME:
85269 + return handle_unary_rhs(visited, potentionally_overflowed, var);
85270 +
85271 + case ARRAY_REF:
85272 + case BIT_FIELD_REF:
85273 + case ADDR_EXPR:
85274 + case COMPONENT_REF:
85275 + case INDIRECT_REF:
85276 +#if BUILDING_GCC_VERSION >= 4006
85277 + case MEM_REF:
85278 +#endif
85279 + case PARM_DECL:
85280 + case TARGET_MEM_REF:
85281 + case VAR_DECL:
85282 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85283 +
85284 + default:
85285 + debug_gimple_stmt(def_stmt);
85286 + debug_tree(rhs1);
85287 + gcc_unreachable();
85288 + }
85289 +}
85290 +
85291 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
85292 +{
85293 + gimple cond_stmt;
85294 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
85295 +
85296 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
85297 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
85298 + update_stmt(cond_stmt);
85299 +}
85300 +
85301 +static tree create_string_param(tree string)
85302 +{
85303 + tree i_type, a_type;
85304 + int length = TREE_STRING_LENGTH(string);
85305 +
85306 + gcc_assert(length > 0);
85307 +
85308 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
85309 + a_type = build_array_type(char_type_node, i_type);
85310 +
85311 + TREE_TYPE(string) = a_type;
85312 + TREE_CONSTANT(string) = 1;
85313 + TREE_READONLY(string) = 1;
85314 +
85315 + return build1(ADDR_EXPR, ptr_type_node, string);
85316 +}
85317 +
85318 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
85319 +{
85320 + gimple func_stmt, def_stmt;
85321 + tree current_func, loc_file, loc_line;
85322 + expanded_location xloc;
85323 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
85324 +
85325 + def_stmt = get_def_stmt(arg);
85326 + xloc = expand_location(gimple_location(def_stmt));
85327 +
85328 + if (!gimple_has_location(def_stmt)) {
85329 + xloc = expand_location(gimple_location(stmt));
85330 + if (!gimple_has_location(stmt))
85331 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
85332 + }
85333 +
85334 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
85335 +
85336 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
85337 + loc_file = create_string_param(loc_file);
85338 +
85339 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
85340 + current_func = create_string_param(current_func);
85341 +
85342 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
85343 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
85344 +
85345 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
85346 +}
85347 +
85348 +static void __unused print_the_code_insertions(gimple stmt)
85349 +{
85350 + location_t loc = gimple_location(stmt);
85351 +
85352 + inform(loc, "Integer size_overflow check applied here.");
85353 +}
85354 +
85355 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
85356 +{
85357 + basic_block cond_bb, join_bb, bb_true;
85358 + edge e;
85359 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85360 +
85361 + cond_bb = gimple_bb(stmt);
85362 + gsi_prev(&gsi);
85363 + if (gsi_end_p(gsi))
85364 + e = split_block_after_labels(cond_bb);
85365 + else
85366 + e = split_block(cond_bb, gsi_stmt(gsi));
85367 + cond_bb = e->src;
85368 + join_bb = e->dest;
85369 + e->flags = EDGE_FALSE_VALUE;
85370 + e->probability = REG_BR_PROB_BASE;
85371 +
85372 + bb_true = create_empty_bb(cond_bb);
85373 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
85374 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
85375 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
85376 +
85377 + if (dom_info_available_p(CDI_DOMINATORS)) {
85378 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
85379 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
85380 + }
85381 +
85382 + if (current_loops != NULL) {
85383 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
85384 + add_bb_to_loop(bb_true, cond_bb->loop_father);
85385 + }
85386 +
85387 + insert_cond(cond_bb, arg, cond_code, type_value);
85388 + insert_cond_result(bb_true, stmt, arg);
85389 +
85390 +// print_the_code_insertions(stmt);
85391 +}
85392 +
85393 +static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
85394 +{
85395 + gimple ucast_stmt;
85396 + gimple_stmt_iterator gsi;
85397 + location_t loc = gimple_location(stmt);
85398 +
85399 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
85400 + gsi = gsi_for_stmt(stmt);
85401 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
85402 + return ucast_stmt;
85403 +}
85404 +
85405 +static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
85406 +{
85407 + tree type_max, type_min, rhs_type = TREE_TYPE(rhs);
85408 + gimple ucast_stmt;
85409 +
85410 + if (!*potentionally_overflowed)
85411 + return;
85412 +
85413 + if (TYPE_UNSIGNED(rhs_type)) {
85414 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
85415 + type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
85416 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
85417 + } else {
85418 + type_max = signed_cast(TYPE_MAX_VALUE(rhs_type));
85419 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
85420 +
85421 + type_min = signed_cast(TYPE_MIN_VALUE(rhs_type));
85422 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
85423 + }
85424 +}
85425 +
85426 +static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
85427 +{
85428 + gimple assign;
85429 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85430 + tree origtype = TREE_TYPE(orig_rhs);
85431 +
85432 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
85433 +
85434 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
85435 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
85436 + update_stmt(assign);
85437 + return gimple_get_lhs(assign);
85438 +}
85439 +
85440 +static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree orig_rhs, tree var_rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
85441 +{
85442 + tree new_rhs;
85443 +
85444 + if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
85445 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
85446 +
85447 + if (var_rhs == NULL_TREE)
85448 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85449 +
85450 + new_rhs = change_assign_rhs(def_stmt, orig_rhs, var_rhs);
85451 + gimple_assign_set_rhs(def_stmt, new_rhs);
85452 + update_stmt(def_stmt);
85453 +
85454 + check_size_overflow(def_stmt, var_rhs, orig_rhs, potentionally_overflowed);
85455 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85456 +}
85457 +
85458 +static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85459 +{
85460 + tree rhs1, rhs2;
85461 + gimple def_stmt = get_def_stmt(var);
85462 + tree new_rhs1 = NULL_TREE;
85463 + tree new_rhs2 = NULL_TREE;
85464 +
85465 + rhs1 = gimple_assign_rhs1(def_stmt);
85466 + rhs2 = gimple_assign_rhs2(def_stmt);
85467 +
85468 + /* no DImode/TImode division in the 32/64 bit kernel */
85469 + switch (gimple_assign_rhs_code(def_stmt)) {
85470 + case RDIV_EXPR:
85471 + case TRUNC_DIV_EXPR:
85472 + case CEIL_DIV_EXPR:
85473 + case FLOOR_DIV_EXPR:
85474 + case ROUND_DIV_EXPR:
85475 + case TRUNC_MOD_EXPR:
85476 + case CEIL_MOD_EXPR:
85477 + case FLOOR_MOD_EXPR:
85478 + case ROUND_MOD_EXPR:
85479 + case EXACT_DIV_EXPR:
85480 + case POINTER_PLUS_EXPR:
85481 + case BIT_AND_EXPR:
85482 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85483 + default:
85484 + break;
85485 + }
85486 +
85487 + *potentionally_overflowed = true;
85488 +
85489 + if (TREE_CODE(rhs1) == SSA_NAME)
85490 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
85491 + if (TREE_CODE(rhs2) == SSA_NAME)
85492 + new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
85493 +
85494 + if (is_gimple_constant(rhs2))
85495 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, new_rhs1, signed_cast(rhs2), &gimple_assign_set_rhs1);
85496 +
85497 + if (is_gimple_constant(rhs1))
85498 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, new_rhs2, signed_cast(rhs1), new_rhs2, &gimple_assign_set_rhs2);
85499 +
85500 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
85501 +}
85502 +
85503 +#if BUILDING_GCC_VERSION >= 4007
85504 +static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
85505 +{
85506 + if (is_gimple_constant(rhs))
85507 + return signed_cast(rhs);
85508 + if (TREE_CODE(rhs) != SSA_NAME)
85509 + return NULL_TREE;
85510 + return expand(visited, potentionally_overflowed, rhs);
85511 +}
85512 +
85513 +static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85514 +{
85515 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
85516 + gimple def_stmt = get_def_stmt(var);
85517 +
85518 + *potentionally_overflowed = true;
85519 +
85520 + rhs1 = gimple_assign_rhs1(def_stmt);
85521 + rhs2 = gimple_assign_rhs2(def_stmt);
85522 + rhs3 = gimple_assign_rhs3(def_stmt);
85523 + new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
85524 + new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
85525 + new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
85526 +
85527 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
85528 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
85529 + error("handle_ternary_ops: unknown rhs");
85530 + gcc_unreachable();
85531 +}
85532 +#endif
85533 +
85534 +static void set_size_overflow_type(tree node)
85535 +{
85536 + switch (TYPE_MODE(TREE_TYPE(node))) {
85537 + case SImode:
85538 + signed_size_overflow_type = intDI_type_node;
85539 + unsigned_size_overflow_type = unsigned_intDI_type_node;
85540 + break;
85541 + case DImode:
85542 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
85543 + signed_size_overflow_type = intDI_type_node;
85544 + unsigned_size_overflow_type = unsigned_intDI_type_node;
85545 + } else {
85546 + signed_size_overflow_type = intTI_type_node;
85547 + unsigned_size_overflow_type = unsigned_intTI_type_node;
85548 + }
85549 + break;
85550 + default:
85551 + error("set_size_overflow_type: unsupported gcc configuration.");
85552 + gcc_unreachable();
85553 + }
85554 +}
85555 +
85556 +static tree expand_visited(gimple def_stmt)
85557 +{
85558 + gimple tmp;
85559 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
85560 +
85561 + gsi_next(&gsi);
85562 + tmp = gsi_stmt(gsi);
85563 + switch (gimple_code(tmp)) {
85564 + case GIMPLE_ASSIGN:
85565 + return gimple_get_lhs(tmp);
85566 + case GIMPLE_PHI:
85567 + return gimple_phi_result(tmp);
85568 + case GIMPLE_CALL:
85569 + return gimple_call_lhs(tmp);
85570 + default:
85571 + return NULL_TREE;
85572 + }
85573 +}
85574 +
85575 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85576 +{
85577 + gimple def_stmt;
85578 + enum tree_code code = TREE_CODE(TREE_TYPE(var));
85579 +
85580 + if (is_gimple_constant(var))
85581 + return NULL_TREE;
85582 +
85583 + if (TREE_CODE(var) == ADDR_EXPR)
85584 + return NULL_TREE;
85585 +
85586 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
85587 + if (code != INTEGER_TYPE)
85588 + return NULL_TREE;
85589 +
85590 + if (SSA_NAME_IS_DEFAULT_DEF(var)) {
85591 + check_missing_attribute(var);
85592 + return NULL_TREE;
85593 + }
85594 +
85595 + def_stmt = get_def_stmt(var);
85596 +
85597 + if (!def_stmt)
85598 + return NULL_TREE;
85599 +
85600 + if (pointer_set_contains(visited, def_stmt))
85601 + return expand_visited(def_stmt);
85602 +
85603 + switch (gimple_code(def_stmt)) {
85604 + case GIMPLE_NOP:
85605 + check_missing_attribute(var);
85606 + return NULL_TREE;
85607 + case GIMPLE_PHI:
85608 + return build_new_phi(visited, potentionally_overflowed, def_stmt);
85609 + case GIMPLE_CALL:
85610 + case GIMPLE_ASM:
85611 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85612 + case GIMPLE_ASSIGN:
85613 + switch (gimple_num_ops(def_stmt)) {
85614 + case 2:
85615 + return handle_unary_ops(visited, potentionally_overflowed, var);
85616 + case 3:
85617 + return handle_binary_ops(visited, potentionally_overflowed, var);
85618 +#if BUILDING_GCC_VERSION >= 4007
85619 + case 4:
85620 + return handle_ternary_ops(visited, potentionally_overflowed, var);
85621 +#endif
85622 + }
85623 + default:
85624 + debug_gimple_stmt(def_stmt);
85625 + error("expand: unknown gimple code");
85626 + gcc_unreachable();
85627 + }
85628 +}
85629 +
85630 +static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
85631 +{
85632 + gimple assign;
85633 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85634 + tree origtype = TREE_TYPE(origarg);
85635 +
85636 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
85637 +
85638 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
85639 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
85640 + update_stmt(assign);
85641 +
85642 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
85643 + update_stmt(stmt);
85644 +}
85645 +
85646 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
85647 +{
85648 + const char *origid;
85649 + tree arg, origarg;
85650 +
85651 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
85652 + gcc_assert(gimple_call_num_args(stmt) > argnum);
85653 + return gimple_call_arg(stmt, argnum);
85654 + }
85655 +
85656 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
85657 + while (origarg && argnum) {
85658 + argnum--;
85659 + origarg = TREE_CHAIN(origarg);
85660 + }
85661 +
85662 + gcc_assert(argnum == 0);
85663 +
85664 + gcc_assert(origarg != NULL_TREE);
85665 + origid = NAME(origarg);
85666 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
85667 + if (!strcmp(origid, NAME(arg)))
85668 + return arg;
85669 + }
85670 + return NULL_TREE;
85671 +}
85672 +
85673 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
85674 +{
85675 + struct pointer_set_t *visited;
85676 + tree arg, newarg;
85677 + bool potentionally_overflowed;
85678 +
85679 + arg = get_function_arg(argnum, stmt, fndecl);
85680 + if (arg == NULL_TREE)
85681 + return;
85682 +
85683 + if (is_gimple_constant(arg))
85684 + return;
85685 + if (TREE_CODE(arg) != SSA_NAME)
85686 + return;
85687 +
85688 + check_arg_type(arg);
85689 +
85690 + set_size_overflow_type(arg);
85691 +
85692 + visited = pointer_set_create();
85693 + potentionally_overflowed = false;
85694 + newarg = expand(visited, &potentionally_overflowed, arg);
85695 + pointer_set_destroy(visited);
85696 +
85697 + if (newarg == NULL_TREE || !potentionally_overflowed)
85698 + return;
85699 +
85700 + change_function_arg(stmt, arg, argnum, newarg);
85701 +
85702 + check_size_overflow(stmt, newarg, arg, &potentionally_overflowed);
85703 +}
85704 +
85705 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
85706 +{
85707 + tree p = TREE_VALUE(attr);
85708 + do {
85709 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
85710 + p = TREE_CHAIN(p);
85711 + } while (p);
85712 +}
85713 +
85714 +static void handle_function_by_hash(gimple stmt, tree fndecl)
85715 +{
85716 + tree orig_fndecl;
85717 + unsigned int num;
85718 + struct size_overflow_hash *hash;
85719 +
85720 + orig_fndecl = get_original_function_decl(fndecl);
85721 + hash = get_function_hash(orig_fndecl);
85722 + if (!hash)
85723 + return;
85724 +
85725 + for (num = 1; num <= MAX_PARAM; num++)
85726 + if (hash->param & (1U << num))
85727 + handle_function_arg(stmt, fndecl, num - 1);
85728 +}
85729 +
85730 +static unsigned int handle_function(void)
85731 +{
85732 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
85733 + int saved_last_basic_block = last_basic_block;
85734 +
85735 + do {
85736 + gimple_stmt_iterator gsi;
85737 + basic_block next = bb->next_bb;
85738 +
85739 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
85740 + tree fndecl, attr;
85741 + gimple stmt = gsi_stmt(gsi);
85742 +
85743 + if (!(is_gimple_call(stmt)))
85744 + continue;
85745 + fndecl = gimple_call_fndecl(stmt);
85746 + if (fndecl == NULL_TREE)
85747 + continue;
85748 + if (gimple_call_num_args(stmt) == 0)
85749 + continue;
85750 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
85751 + if (!attr || !TREE_VALUE(attr))
85752 + handle_function_by_hash(stmt, fndecl);
85753 + else
85754 + handle_function_by_attribute(stmt, attr, fndecl);
85755 + gsi = gsi_for_stmt(stmt);
85756 + }
85757 + bb = next;
85758 + } while (bb && bb->index <= saved_last_basic_block);
85759 + return 0;
85760 +}
85761 +
85762 +static struct gimple_opt_pass size_overflow_pass = {
85763 + .pass = {
85764 + .type = GIMPLE_PASS,
85765 + .name = "size_overflow",
85766 + .gate = NULL,
85767 + .execute = handle_function,
85768 + .sub = NULL,
85769 + .next = NULL,
85770 + .static_pass_number = 0,
85771 + .tv_id = TV_NONE,
85772 + .properties_required = PROP_cfg | PROP_referenced_vars,
85773 + .properties_provided = 0,
85774 + .properties_destroyed = 0,
85775 + .todo_flags_start = 0,
85776 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
85777 + }
85778 +};
85779 +
85780 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
85781 +{
85782 + tree fntype;
85783 +
85784 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
85785 +
85786 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
85787 + fntype = build_function_type_list(void_type_node,
85788 + const_char_ptr_type_node,
85789 + unsigned_type_node,
85790 + const_char_ptr_type_node,
85791 + NULL_TREE);
85792 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
85793 +
85794 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
85795 + TREE_PUBLIC(report_size_overflow_decl) = 1;
85796 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
85797 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
85798 +}
85799 +
85800 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85801 +{
85802 + int i;
85803 + const char * const plugin_name = plugin_info->base_name;
85804 + const int argc = plugin_info->argc;
85805 + const struct plugin_argument * const argv = plugin_info->argv;
85806 + bool enable = true;
85807 +
85808 + struct register_pass_info size_overflow_pass_info = {
85809 + .pass = &size_overflow_pass.pass,
85810 + .reference_pass_name = "ssa",
85811 + .ref_pass_instance_number = 1,
85812 + .pos_op = PASS_POS_INSERT_AFTER
85813 + };
85814 +
85815 + if (!plugin_default_version_check(version, &gcc_version)) {
85816 + error(G_("incompatible gcc/plugin versions"));
85817 + return 1;
85818 + }
85819 +
85820 + for (i = 0; i < argc; ++i) {
85821 + if (!strcmp(argv[i].key, "no-size-overflow")) {
85822 + enable = false;
85823 + continue;
85824 + }
85825 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85826 + }
85827 +
85828 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
85829 + if (enable) {
85830 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
85831 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
85832 + }
85833 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
85834 +
85835 + return 0;
85836 +}
85837 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
85838 new file mode 100644
85839 index 0000000..38d2014
85840 --- /dev/null
85841 +++ b/tools/gcc/stackleak_plugin.c
85842 @@ -0,0 +1,313 @@
85843 +/*
85844 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
85845 + * Licensed under the GPL v2
85846 + *
85847 + * Note: the choice of the license means that the compilation process is
85848 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
85849 + * but for the kernel it doesn't matter since it doesn't link against
85850 + * any of the gcc libraries
85851 + *
85852 + * gcc plugin to help implement various PaX features
85853 + *
85854 + * - track lowest stack pointer
85855 + *
85856 + * TODO:
85857 + * - initialize all local variables
85858 + *
85859 + * BUGS:
85860 + * - none known
85861 + */
85862 +#include "gcc-plugin.h"
85863 +#include "config.h"
85864 +#include "system.h"
85865 +#include "coretypes.h"
85866 +#include "tree.h"
85867 +#include "tree-pass.h"
85868 +#include "flags.h"
85869 +#include "intl.h"
85870 +#include "toplev.h"
85871 +#include "plugin.h"
85872 +//#include "expr.h" where are you...
85873 +#include "diagnostic.h"
85874 +#include "plugin-version.h"
85875 +#include "tm.h"
85876 +#include "function.h"
85877 +#include "basic-block.h"
85878 +#include "gimple.h"
85879 +#include "rtl.h"
85880 +#include "emit-rtl.h"
85881 +
85882 +extern void print_gimple_stmt(FILE *, gimple, int, int);
85883 +
85884 +int plugin_is_GPL_compatible;
85885 +
85886 +static int track_frame_size = -1;
85887 +static const char track_function[] = "pax_track_stack";
85888 +static const char check_function[] = "pax_check_alloca";
85889 +static bool init_locals;
85890 +
85891 +static struct plugin_info stackleak_plugin_info = {
85892 + .version = "201203140940",
85893 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
85894 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
85895 +};
85896 +
85897 +static bool gate_stackleak_track_stack(void);
85898 +static unsigned int execute_stackleak_tree_instrument(void);
85899 +static unsigned int execute_stackleak_final(void);
85900 +
85901 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
85902 + .pass = {
85903 + .type = GIMPLE_PASS,
85904 + .name = "stackleak_tree_instrument",
85905 + .gate = gate_stackleak_track_stack,
85906 + .execute = execute_stackleak_tree_instrument,
85907 + .sub = NULL,
85908 + .next = NULL,
85909 + .static_pass_number = 0,
85910 + .tv_id = TV_NONE,
85911 + .properties_required = PROP_gimple_leh | PROP_cfg,
85912 + .properties_provided = 0,
85913 + .properties_destroyed = 0,
85914 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
85915 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
85916 + }
85917 +};
85918 +
85919 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
85920 + .pass = {
85921 + .type = RTL_PASS,
85922 + .name = "stackleak_final",
85923 + .gate = gate_stackleak_track_stack,
85924 + .execute = execute_stackleak_final,
85925 + .sub = NULL,
85926 + .next = NULL,
85927 + .static_pass_number = 0,
85928 + .tv_id = TV_NONE,
85929 + .properties_required = 0,
85930 + .properties_provided = 0,
85931 + .properties_destroyed = 0,
85932 + .todo_flags_start = 0,
85933 + .todo_flags_finish = TODO_dump_func
85934 + }
85935 +};
85936 +
85937 +static bool gate_stackleak_track_stack(void)
85938 +{
85939 + return track_frame_size >= 0;
85940 +}
85941 +
85942 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
85943 +{
85944 + gimple check_alloca;
85945 + tree fntype, fndecl, alloca_size;
85946 +
85947 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
85948 + fndecl = build_fn_decl(check_function, fntype);
85949 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
85950 +
85951 + // insert call to void pax_check_alloca(unsigned long size)
85952 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
85953 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
85954 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
85955 +}
85956 +
85957 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
85958 +{
85959 + gimple track_stack;
85960 + tree fntype, fndecl;
85961 +
85962 + fntype = build_function_type_list(void_type_node, NULL_TREE);
85963 + fndecl = build_fn_decl(track_function, fntype);
85964 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
85965 +
85966 + // insert call to void pax_track_stack(void)
85967 + track_stack = gimple_build_call(fndecl, 0);
85968 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
85969 +}
85970 +
85971 +#if BUILDING_GCC_VERSION == 4005
85972 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
85973 +{
85974 + tree fndecl;
85975 +
85976 + if (!is_gimple_call(stmt))
85977 + return false;
85978 + fndecl = gimple_call_fndecl(stmt);
85979 + if (!fndecl)
85980 + return false;
85981 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
85982 + return false;
85983 +// print_node(stderr, "pax", fndecl, 4);
85984 + return DECL_FUNCTION_CODE(fndecl) == code;
85985 +}
85986 +#endif
85987 +
85988 +static bool is_alloca(gimple stmt)
85989 +{
85990 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
85991 + return true;
85992 +
85993 +#if BUILDING_GCC_VERSION >= 4007
85994 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
85995 + return true;
85996 +#endif
85997 +
85998 + return false;
85999 +}
86000 +
86001 +static unsigned int execute_stackleak_tree_instrument(void)
86002 +{
86003 + basic_block bb, entry_bb;
86004 + bool prologue_instrumented = false, is_leaf = true;
86005 +
86006 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
86007 +
86008 + // 1. loop through BBs and GIMPLE statements
86009 + FOR_EACH_BB(bb) {
86010 + gimple_stmt_iterator gsi;
86011 +
86012 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86013 + gimple stmt;
86014 +
86015 + stmt = gsi_stmt(gsi);
86016 +
86017 + if (is_gimple_call(stmt))
86018 + is_leaf = false;
86019 +
86020 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
86021 + if (!is_alloca(stmt))
86022 + continue;
86023 +
86024 + // 2. insert stack overflow check before each __builtin_alloca call
86025 + stackleak_check_alloca(&gsi);
86026 +
86027 + // 3. insert track call after each __builtin_alloca call
86028 + stackleak_add_instrumentation(&gsi);
86029 + if (bb == entry_bb)
86030 + prologue_instrumented = true;
86031 + }
86032 + }
86033 +
86034 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
86035 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
86036 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
86037 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
86038 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
86039 + return 0;
86040 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
86041 + return 0;
86042 +
86043 + // 4. insert track call at the beginning
86044 + if (!prologue_instrumented) {
86045 + gimple_stmt_iterator gsi;
86046 +
86047 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
86048 + if (dom_info_available_p(CDI_DOMINATORS))
86049 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
86050 + gsi = gsi_start_bb(bb);
86051 + stackleak_add_instrumentation(&gsi);
86052 + }
86053 +
86054 + return 0;
86055 +}
86056 +
86057 +static unsigned int execute_stackleak_final(void)
86058 +{
86059 + rtx insn;
86060 +
86061 + if (cfun->calls_alloca)
86062 + return 0;
86063 +
86064 + // keep calls only if function frame is big enough
86065 + if (get_frame_size() >= track_frame_size)
86066 + return 0;
86067 +
86068 + // 1. find pax_track_stack calls
86069 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
86070 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
86071 + rtx body;
86072 +
86073 + if (!CALL_P(insn))
86074 + continue;
86075 + body = PATTERN(insn);
86076 + if (GET_CODE(body) != CALL)
86077 + continue;
86078 + body = XEXP(body, 0);
86079 + if (GET_CODE(body) != MEM)
86080 + continue;
86081 + body = XEXP(body, 0);
86082 + if (GET_CODE(body) != SYMBOL_REF)
86083 + continue;
86084 + if (strcmp(XSTR(body, 0), track_function))
86085 + continue;
86086 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
86087 + // 2. delete call
86088 + insn = delete_insn_and_edges(insn);
86089 +#if BUILDING_GCC_VERSION >= 4007
86090 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
86091 + insn = delete_insn_and_edges(insn);
86092 +#endif
86093 + }
86094 +
86095 +// print_simple_rtl(stderr, get_insns());
86096 +// print_rtl(stderr, get_insns());
86097 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
86098 +
86099 + return 0;
86100 +}
86101 +
86102 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86103 +{
86104 + const char * const plugin_name = plugin_info->base_name;
86105 + const int argc = plugin_info->argc;
86106 + const struct plugin_argument * const argv = plugin_info->argv;
86107 + int i;
86108 + struct register_pass_info stackleak_tree_instrument_pass_info = {
86109 + .pass = &stackleak_tree_instrument_pass.pass,
86110 +// .reference_pass_name = "tree_profile",
86111 + .reference_pass_name = "optimized",
86112 + .ref_pass_instance_number = 1,
86113 + .pos_op = PASS_POS_INSERT_BEFORE
86114 + };
86115 + struct register_pass_info stackleak_final_pass_info = {
86116 + .pass = &stackleak_final_rtl_opt_pass.pass,
86117 + .reference_pass_name = "final",
86118 + .ref_pass_instance_number = 1,
86119 + .pos_op = PASS_POS_INSERT_BEFORE
86120 + };
86121 +
86122 + if (!plugin_default_version_check(version, &gcc_version)) {
86123 + error(G_("incompatible gcc/plugin versions"));
86124 + return 1;
86125 + }
86126 +
86127 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
86128 +
86129 + for (i = 0; i < argc; ++i) {
86130 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
86131 + if (!argv[i].value) {
86132 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86133 + continue;
86134 + }
86135 + track_frame_size = atoi(argv[i].value);
86136 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
86137 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
86138 + continue;
86139 + }
86140 + if (!strcmp(argv[i].key, "initialize-locals")) {
86141 + if (argv[i].value) {
86142 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
86143 + continue;
86144 + }
86145 + init_locals = true;
86146 + continue;
86147 + }
86148 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86149 + }
86150 +
86151 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
86152 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
86153 +
86154 + return 0;
86155 +}
86156 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
86157 index 6789d78..4afd019 100644
86158 --- a/tools/perf/util/include/asm/alternative-asm.h
86159 +++ b/tools/perf/util/include/asm/alternative-asm.h
86160 @@ -5,4 +5,7 @@
86161
86162 #define altinstruction_entry #
86163
86164 + .macro pax_force_retaddr rip=0, reload=0
86165 + .endm
86166 +
86167 #endif
86168 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
86169 index af0f22f..9a7d479 100644
86170 --- a/usr/gen_init_cpio.c
86171 +++ b/usr/gen_init_cpio.c
86172 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
86173 int retval;
86174 int rc = -1;
86175 int namesize;
86176 - int i;
86177 + unsigned int i;
86178
86179 mode |= S_IFREG;
86180
86181 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
86182 *env_var = *expanded = '\0';
86183 strncat(env_var, start + 2, end - start - 2);
86184 strncat(expanded, new_location, start - new_location);
86185 - strncat(expanded, getenv(env_var), PATH_MAX);
86186 - strncat(expanded, end + 1, PATH_MAX);
86187 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
86188 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
86189 strncpy(new_location, expanded, PATH_MAX);
86190 + new_location[PATH_MAX] = 0;
86191 } else
86192 break;
86193 }
86194 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
86195 index 9739b53..6d457e3 100644
86196 --- a/virt/kvm/kvm_main.c
86197 +++ b/virt/kvm/kvm_main.c
86198 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
86199
86200 static cpumask_var_t cpus_hardware_enabled;
86201 static int kvm_usage_count = 0;
86202 -static atomic_t hardware_enable_failed;
86203 +static atomic_unchecked_t hardware_enable_failed;
86204
86205 struct kmem_cache *kvm_vcpu_cache;
86206 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
86207 @@ -2247,7 +2247,7 @@ static void hardware_enable_nolock(void *junk)
86208
86209 if (r) {
86210 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
86211 - atomic_inc(&hardware_enable_failed);
86212 + atomic_inc_unchecked(&hardware_enable_failed);
86213 printk(KERN_INFO "kvm: enabling virtualization on "
86214 "CPU%d failed\n", cpu);
86215 }
86216 @@ -2301,10 +2301,10 @@ static int hardware_enable_all(void)
86217
86218 kvm_usage_count++;
86219 if (kvm_usage_count == 1) {
86220 - atomic_set(&hardware_enable_failed, 0);
86221 + atomic_set_unchecked(&hardware_enable_failed, 0);
86222 on_each_cpu(hardware_enable_nolock, NULL, 1);
86223
86224 - if (atomic_read(&hardware_enable_failed)) {
86225 + if (atomic_read_unchecked(&hardware_enable_failed)) {
86226 hardware_disable_all_nolock();
86227 r = -EBUSY;
86228 }
86229 @@ -2667,7 +2667,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
86230 kvm_arch_vcpu_put(vcpu);
86231 }
86232
86233 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86234 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86235 struct module *module)
86236 {
86237 int r;
86238 @@ -2730,7 +2730,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86239 if (!vcpu_align)
86240 vcpu_align = __alignof__(struct kvm_vcpu);
86241 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
86242 - 0, NULL);
86243 + SLAB_USERCOPY, NULL);
86244 if (!kvm_vcpu_cache) {
86245 r = -ENOMEM;
86246 goto out_free_3;
86247 @@ -2740,9 +2740,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86248 if (r)
86249 goto out_free;
86250
86251 - kvm_chardev_ops.owner = module;
86252 - kvm_vm_fops.owner = module;
86253 - kvm_vcpu_fops.owner = module;
86254 + pax_open_kernel();
86255 + *(void **)&kvm_chardev_ops.owner = module;
86256 + *(void **)&kvm_vm_fops.owner = module;
86257 + *(void **)&kvm_vcpu_fops.owner = module;
86258 + pax_close_kernel();
86259
86260 r = misc_register(&kvm_dev);
86261 if (r) {